1 /*******************************************************************************
2 * Filename: target_core_rd.c
4 * This file contains the Storage Engine <-> Ramdisk transport
7 * (c) Copyright 2003-2012 RisingTide Systems LLC.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 ******************************************************************************/
27 #include <linux/string.h>
28 #include <linux/parser.h>
29 #include <linux/timer.h>
30 #include <linux/blkdev.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_host.h>
36 #include <target/target_core_base.h>
37 #include <target/target_core_backend.h>
39 #include "target_core_rd.h"
41 static inline struct rd_dev
*RD_DEV(struct se_device
*dev
)
43 return container_of(dev
, struct rd_dev
, dev
);
46 /* rd_attach_hba(): (Part of se_subsystem_api_t template)
50 static int rd_attach_hba(struct se_hba
*hba
, u32 host_id
)
52 struct rd_host
*rd_host
;
54 rd_host
= kzalloc(sizeof(struct rd_host
), GFP_KERNEL
);
56 pr_err("Unable to allocate memory for struct rd_host\n");
60 rd_host
->rd_host_id
= host_id
;
62 hba
->hba_ptr
= rd_host
;
64 pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
65 " Generic Target Core Stack %s\n", hba
->hba_id
,
66 RD_HBA_VERSION
, TARGET_CORE_MOD_VERSION
);
71 static void rd_detach_hba(struct se_hba
*hba
)
73 struct rd_host
*rd_host
= hba
->hba_ptr
;
75 pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
76 " Generic Target Core\n", hba
->hba_id
, rd_host
->rd_host_id
);
82 /* rd_release_device_space():
86 static void rd_release_device_space(struct rd_dev
*rd_dev
)
88 u32 i
, j
, page_count
= 0, sg_per_table
;
89 struct rd_dev_sg_table
*sg_table
;
91 struct scatterlist
*sg
;
93 if (!rd_dev
->sg_table_array
|| !rd_dev
->sg_table_count
)
96 sg_table
= rd_dev
->sg_table_array
;
98 for (i
= 0; i
< rd_dev
->sg_table_count
; i
++) {
99 sg
= sg_table
[i
].sg_table
;
100 sg_per_table
= sg_table
[i
].rd_sg_count
;
102 for (j
= 0; j
< sg_per_table
; j
++) {
103 pg
= sg_page(&sg
[j
]);
113 pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
114 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
115 rd_dev
->rd_host
->rd_host_id
, rd_dev
->rd_dev_id
, page_count
,
116 rd_dev
->sg_table_count
, (unsigned long)page_count
* PAGE_SIZE
);
119 rd_dev
->sg_table_array
= NULL
;
120 rd_dev
->sg_table_count
= 0;
124 /* rd_build_device_space():
128 static int rd_build_device_space(struct rd_dev
*rd_dev
)
130 u32 i
= 0, j
, page_offset
= 0, sg_per_table
, sg_tables
, total_sg_needed
;
131 u32 max_sg_per_table
= (RD_MAX_ALLOCATION_SIZE
/
132 sizeof(struct scatterlist
));
133 struct rd_dev_sg_table
*sg_table
;
135 struct scatterlist
*sg
;
137 if (rd_dev
->rd_page_count
<= 0) {
138 pr_err("Illegal page count: %u for Ramdisk device\n",
139 rd_dev
->rd_page_count
);
143 /* Don't need backing pages for NULLIO */
144 if (rd_dev
->rd_flags
& RDF_NULLIO
)
147 total_sg_needed
= rd_dev
->rd_page_count
;
149 sg_tables
= (total_sg_needed
/ max_sg_per_table
) + 1;
151 sg_table
= kzalloc(sg_tables
* sizeof(struct rd_dev_sg_table
), GFP_KERNEL
);
153 pr_err("Unable to allocate memory for Ramdisk"
154 " scatterlist tables\n");
158 rd_dev
->sg_table_array
= sg_table
;
159 rd_dev
->sg_table_count
= sg_tables
;
161 while (total_sg_needed
) {
162 sg_per_table
= (total_sg_needed
> max_sg_per_table
) ?
163 max_sg_per_table
: total_sg_needed
;
165 sg
= kzalloc(sg_per_table
* sizeof(struct scatterlist
),
168 pr_err("Unable to allocate scatterlist array"
169 " for struct rd_dev\n");
173 sg_init_table(sg
, sg_per_table
);
175 sg_table
[i
].sg_table
= sg
;
176 sg_table
[i
].rd_sg_count
= sg_per_table
;
177 sg_table
[i
].page_start_offset
= page_offset
;
178 sg_table
[i
++].page_end_offset
= (page_offset
+ sg_per_table
)
181 for (j
= 0; j
< sg_per_table
; j
++) {
182 pg
= alloc_pages(GFP_KERNEL
, 0);
184 pr_err("Unable to allocate scatterlist"
185 " pages for struct rd_dev_sg_table\n");
188 sg_assign_page(&sg
[j
], pg
);
189 sg
[j
].length
= PAGE_SIZE
;
192 page_offset
+= sg_per_table
;
193 total_sg_needed
-= sg_per_table
;
196 pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
197 " %u pages in %u tables\n", rd_dev
->rd_host
->rd_host_id
,
198 rd_dev
->rd_dev_id
, rd_dev
->rd_page_count
,
199 rd_dev
->sg_table_count
);
204 static struct se_device
*rd_alloc_device(struct se_hba
*hba
, const char *name
)
206 struct rd_dev
*rd_dev
;
207 struct rd_host
*rd_host
= hba
->hba_ptr
;
209 rd_dev
= kzalloc(sizeof(struct rd_dev
), GFP_KERNEL
);
211 pr_err("Unable to allocate memory for struct rd_dev\n");
215 rd_dev
->rd_host
= rd_host
;
220 static int rd_configure_device(struct se_device
*dev
)
222 struct rd_dev
*rd_dev
= RD_DEV(dev
);
223 struct rd_host
*rd_host
= dev
->se_hba
->hba_ptr
;
226 if (!(rd_dev
->rd_flags
& RDF_HAS_PAGE_COUNT
)) {
227 pr_debug("Missing rd_pages= parameter\n");
231 ret
= rd_build_device_space(rd_dev
);
235 dev
->dev_attrib
.hw_block_size
= RD_BLOCKSIZE
;
236 dev
->dev_attrib
.hw_max_sectors
= UINT_MAX
;
237 dev
->dev_attrib
.hw_queue_depth
= RD_MAX_DEVICE_QUEUE_DEPTH
;
239 rd_dev
->rd_dev_id
= rd_host
->rd_host_dev_id_count
++;
241 pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
242 " %u pages in %u tables, %lu total bytes\n",
243 rd_host
->rd_host_id
, rd_dev
->rd_dev_id
, rd_dev
->rd_page_count
,
244 rd_dev
->sg_table_count
,
245 (unsigned long)(rd_dev
->rd_page_count
* PAGE_SIZE
));
250 rd_release_device_space(rd_dev
);
254 static void rd_free_device(struct se_device
*dev
)
256 struct rd_dev
*rd_dev
= RD_DEV(dev
);
258 rd_release_device_space(rd_dev
);
262 static struct rd_dev_sg_table
*rd_get_sg_table(struct rd_dev
*rd_dev
, u32 page
)
264 struct rd_dev_sg_table
*sg_table
;
265 u32 i
, sg_per_table
= (RD_MAX_ALLOCATION_SIZE
/
266 sizeof(struct scatterlist
));
268 i
= page
/ sg_per_table
;
269 if (i
< rd_dev
->sg_table_count
) {
270 sg_table
= &rd_dev
->sg_table_array
[i
];
271 if ((sg_table
->page_start_offset
<= page
) &&
272 (sg_table
->page_end_offset
>= page
))
276 pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
282 static sense_reason_t
283 rd_execute_rw(struct se_cmd
*cmd
)
285 struct scatterlist
*sgl
= cmd
->t_data_sg
;
286 u32 sgl_nents
= cmd
->t_data_nents
;
287 enum dma_data_direction data_direction
= cmd
->data_direction
;
288 struct se_device
*se_dev
= cmd
->se_dev
;
289 struct rd_dev
*dev
= RD_DEV(se_dev
);
290 struct rd_dev_sg_table
*table
;
291 struct scatterlist
*rd_sg
;
292 struct sg_mapping_iter m
;
299 if (dev
->rd_flags
& RDF_NULLIO
) {
300 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
304 tmp
= cmd
->t_task_lba
* se_dev
->dev_attrib
.block_size
;
305 rd_offset
= do_div(tmp
, PAGE_SIZE
);
307 rd_size
= cmd
->data_length
;
309 table
= rd_get_sg_table(dev
, rd_page
);
311 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
313 rd_sg
= &table
->sg_table
[rd_page
- table
->page_start_offset
];
315 pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
317 data_direction
== DMA_FROM_DEVICE
? "Read" : "Write",
318 cmd
->t_task_lba
, rd_size
, rd_page
, rd_offset
);
320 src_len
= PAGE_SIZE
- rd_offset
;
321 sg_miter_start(&m
, sgl
, sgl_nents
,
322 data_direction
== DMA_FROM_DEVICE
?
323 SG_MITER_TO_SG
: SG_MITER_FROM_SG
);
329 if (!(u32
)m
.length
) {
330 pr_debug("RD[%u]: invalid sgl %p len %zu\n",
331 dev
->rd_dev_id
, m
.addr
, m
.length
);
333 return TCM_INCORRECT_AMOUNT_OF_DATA
;
335 len
= min((u32
)m
.length
, src_len
);
337 pr_debug("RD[%u]: size underrun page %d offset %d "
338 "size %d\n", dev
->rd_dev_id
,
339 rd_page
, rd_offset
, rd_size
);
344 rd_addr
= sg_virt(rd_sg
) + rd_offset
;
346 if (data_direction
== DMA_FROM_DEVICE
)
347 memcpy(m
.addr
, rd_addr
, len
);
349 memcpy(rd_addr
, m
.addr
, len
);
361 /* rd page completed, next one please */
365 if (rd_page
<= table
->page_end_offset
) {
370 table
= rd_get_sg_table(dev
, rd_page
);
373 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
376 /* since we increment, the first sg entry is correct */
377 rd_sg
= table
->sg_table
;
381 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
386 Opt_rd_pages
, Opt_rd_nullio
, Opt_err
389 static match_table_t tokens
= {
390 {Opt_rd_pages
, "rd_pages=%d"},
391 {Opt_rd_nullio
, "rd_nullio=%d"},
395 static ssize_t
rd_set_configfs_dev_params(struct se_device
*dev
,
396 const char *page
, ssize_t count
)
398 struct rd_dev
*rd_dev
= RD_DEV(dev
);
399 char *orig
, *ptr
, *opts
;
400 substring_t args
[MAX_OPT_ARGS
];
401 int ret
= 0, arg
, token
;
403 opts
= kstrdup(page
, GFP_KERNEL
);
409 while ((ptr
= strsep(&opts
, ",\n")) != NULL
) {
413 token
= match_token(ptr
, tokens
, args
);
416 match_int(args
, &arg
);
417 rd_dev
->rd_page_count
= arg
;
418 pr_debug("RAMDISK: Referencing Page"
419 " Count: %u\n", rd_dev
->rd_page_count
);
420 rd_dev
->rd_flags
|= RDF_HAS_PAGE_COUNT
;
423 match_int(args
, &arg
);
427 pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg
);
428 rd_dev
->rd_flags
|= RDF_NULLIO
;
436 return (!ret
) ? count
: ret
;
439 static ssize_t
rd_show_configfs_dev_params(struct se_device
*dev
, char *b
)
441 struct rd_dev
*rd_dev
= RD_DEV(dev
);
443 ssize_t bl
= sprintf(b
, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
445 bl
+= sprintf(b
+ bl
, " PAGES/PAGE_SIZE: %u*%lu"
446 " SG_table_count: %u nullio: %d\n", rd_dev
->rd_page_count
,
447 PAGE_SIZE
, rd_dev
->sg_table_count
,
448 !!(rd_dev
->rd_flags
& RDF_NULLIO
));
452 static sector_t
rd_get_blocks(struct se_device
*dev
)
454 struct rd_dev
*rd_dev
= RD_DEV(dev
);
456 unsigned long long blocks_long
= ((rd_dev
->rd_page_count
* PAGE_SIZE
) /
457 dev
->dev_attrib
.block_size
) - 1;
462 static struct sbc_ops rd_sbc_ops
= {
463 .execute_rw
= rd_execute_rw
,
466 static sense_reason_t
467 rd_parse_cdb(struct se_cmd
*cmd
)
469 return sbc_parse_cdb(cmd
, &rd_sbc_ops
);
472 static struct se_subsystem_api rd_mcp_template
= {
474 .inquiry_prod
= "RAMDISK-MCP",
475 .inquiry_rev
= RD_MCP_VERSION
,
476 .transport_type
= TRANSPORT_PLUGIN_VHBA_VDEV
,
477 .attach_hba
= rd_attach_hba
,
478 .detach_hba
= rd_detach_hba
,
479 .alloc_device
= rd_alloc_device
,
480 .configure_device
= rd_configure_device
,
481 .free_device
= rd_free_device
,
482 .parse_cdb
= rd_parse_cdb
,
483 .set_configfs_dev_params
= rd_set_configfs_dev_params
,
484 .show_configfs_dev_params
= rd_show_configfs_dev_params
,
485 .get_device_type
= sbc_get_device_type
,
486 .get_blocks
= rd_get_blocks
,
489 int __init
rd_module_init(void)
493 ret
= transport_subsystem_register(&rd_mcp_template
);
501 void rd_module_exit(void)
503 transport_subsystem_release(&rd_mcp_template
);