gma500: don't dynamically allocate the psb_gtt struct
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / target / target_core_rd.c
blob7837dd365a9d98be3ee9f606731a6e064b78acce
1 /*******************************************************************************
2 * Filename: target_core_rd.c
4 * This file contains the Storage Engine <-> Ramdisk transport
5 * specific functions.
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
12 * Nicholas A. Bellinger <nab@kernel.org>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 ******************************************************************************/
30 #include <linux/version.h>
31 #include <linux/string.h>
32 #include <linux/parser.h>
33 #include <linux/timer.h>
34 #include <linux/blkdev.h>
35 #include <linux/slab.h>
36 #include <linux/spinlock.h>
37 #include <scsi/scsi.h>
38 #include <scsi/scsi_host.h>
40 #include <target/target_core_base.h>
41 #include <target/target_core_device.h>
42 #include <target/target_core_transport.h>
43 #include <target/target_core_fabric_ops.h>
45 #include "target_core_rd.h"
47 static struct se_subsystem_api rd_dr_template;
48 static struct se_subsystem_api rd_mcp_template;
50 /* #define DEBUG_RAMDISK_MCP */
51 /* #define DEBUG_RAMDISK_DR */
53 /* rd_attach_hba(): (Part of se_subsystem_api_t template)
57 static int rd_attach_hba(struct se_hba *hba, u32 host_id)
59 struct rd_host *rd_host;
61 rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
62 if (!(rd_host)) {
63 printk(KERN_ERR "Unable to allocate memory for struct rd_host\n");
64 return -ENOMEM;
67 rd_host->rd_host_id = host_id;
69 atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH);
70 atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH);
71 hba->hba_ptr = (void *) rd_host;
73 printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
74 " Generic Target Core Stack %s\n", hba->hba_id,
75 RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
76 printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
77 " Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id,
78 rd_host->rd_host_id, atomic_read(&hba->max_queue_depth),
79 RD_MAX_SECTORS);
81 return 0;
84 static void rd_detach_hba(struct se_hba *hba)
86 struct rd_host *rd_host = hba->hba_ptr;
88 printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
89 " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
91 kfree(rd_host);
92 hba->hba_ptr = NULL;
95 /* rd_release_device_space():
99 static void rd_release_device_space(struct rd_dev *rd_dev)
101 u32 i, j, page_count = 0, sg_per_table;
102 struct rd_dev_sg_table *sg_table;
103 struct page *pg;
104 struct scatterlist *sg;
106 if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
107 return;
109 sg_table = rd_dev->sg_table_array;
111 for (i = 0; i < rd_dev->sg_table_count; i++) {
112 sg = sg_table[i].sg_table;
113 sg_per_table = sg_table[i].rd_sg_count;
115 for (j = 0; j < sg_per_table; j++) {
116 pg = sg_page(&sg[j]);
117 if ((pg)) {
118 __free_page(pg);
119 page_count++;
123 kfree(sg);
126 printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk"
127 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
128 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
129 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
131 kfree(sg_table);
132 rd_dev->sg_table_array = NULL;
133 rd_dev->sg_table_count = 0;
137 /* rd_build_device_space():
141 static int rd_build_device_space(struct rd_dev *rd_dev)
143 u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
144 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
145 sizeof(struct scatterlist));
146 struct rd_dev_sg_table *sg_table;
147 struct page *pg;
148 struct scatterlist *sg;
150 if (rd_dev->rd_page_count <= 0) {
151 printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
152 rd_dev->rd_page_count);
153 return -EINVAL;
155 total_sg_needed = rd_dev->rd_page_count;
157 sg_tables = (total_sg_needed / max_sg_per_table) + 1;
159 sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
160 if (!(sg_table)) {
161 printk(KERN_ERR "Unable to allocate memory for Ramdisk"
162 " scatterlist tables\n");
163 return -ENOMEM;
166 rd_dev->sg_table_array = sg_table;
167 rd_dev->sg_table_count = sg_tables;
169 while (total_sg_needed) {
170 sg_per_table = (total_sg_needed > max_sg_per_table) ?
171 max_sg_per_table : total_sg_needed;
173 sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
174 GFP_KERNEL);
175 if (!(sg)) {
176 printk(KERN_ERR "Unable to allocate scatterlist array"
177 " for struct rd_dev\n");
178 return -ENOMEM;
181 sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
183 sg_table[i].sg_table = sg;
184 sg_table[i].rd_sg_count = sg_per_table;
185 sg_table[i].page_start_offset = page_offset;
186 sg_table[i++].page_end_offset = (page_offset + sg_per_table)
187 - 1;
189 for (j = 0; j < sg_per_table; j++) {
190 pg = alloc_pages(GFP_KERNEL, 0);
191 if (!(pg)) {
192 printk(KERN_ERR "Unable to allocate scatterlist"
193 " pages for struct rd_dev_sg_table\n");
194 return -ENOMEM;
196 sg_assign_page(&sg[j], pg);
197 sg[j].length = PAGE_SIZE;
200 page_offset += sg_per_table;
201 total_sg_needed -= sg_per_table;
204 printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
205 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
206 rd_dev->rd_dev_id, rd_dev->rd_page_count,
207 rd_dev->sg_table_count);
209 return 0;
212 static void *rd_allocate_virtdevice(
213 struct se_hba *hba,
214 const char *name,
215 int rd_direct)
217 struct rd_dev *rd_dev;
218 struct rd_host *rd_host = hba->hba_ptr;
220 rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
221 if (!(rd_dev)) {
222 printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n");
223 return NULL;
226 rd_dev->rd_host = rd_host;
227 rd_dev->rd_direct = rd_direct;
229 return rd_dev;
232 static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name)
234 return rd_allocate_virtdevice(hba, name, 1);
237 static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
239 return rd_allocate_virtdevice(hba, name, 0);
242 /* rd_create_virtdevice():
246 static struct se_device *rd_create_virtdevice(
247 struct se_hba *hba,
248 struct se_subsystem_dev *se_dev,
249 void *p,
250 int rd_direct)
252 struct se_device *dev;
253 struct se_dev_limits dev_limits;
254 struct rd_dev *rd_dev = p;
255 struct rd_host *rd_host = hba->hba_ptr;
256 int dev_flags = 0, ret;
257 char prod[16], rev[4];
259 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
261 ret = rd_build_device_space(rd_dev);
262 if (ret < 0)
263 goto fail;
265 snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
266 snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION :
267 RD_MCP_VERSION);
269 dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
270 dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS;
271 dev_limits.limits.max_sectors = RD_MAX_SECTORS;
272 dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
273 dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
275 dev = transport_add_device_to_core_hba(hba,
276 (rd_dev->rd_direct) ? &rd_dr_template :
277 &rd_mcp_template, se_dev, dev_flags, (void *)rd_dev,
278 &dev_limits, prod, rev);
279 if (!(dev))
280 goto fail;
282 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
283 rd_dev->rd_queue_depth = dev->queue_depth;
285 printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
286 " %u pages in %u tables, %lu total bytes\n",
287 rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
288 "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
289 rd_dev->sg_table_count,
290 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
292 return dev;
294 fail:
295 rd_release_device_space(rd_dev);
296 return ERR_PTR(ret);
299 static struct se_device *rd_DIRECT_create_virtdevice(
300 struct se_hba *hba,
301 struct se_subsystem_dev *se_dev,
302 void *p)
304 return rd_create_virtdevice(hba, se_dev, p, 1);
307 static struct se_device *rd_MEMCPY_create_virtdevice(
308 struct se_hba *hba,
309 struct se_subsystem_dev *se_dev,
310 void *p)
312 return rd_create_virtdevice(hba, se_dev, p, 0);
315 /* rd_free_device(): (Part of se_subsystem_api_t template)
319 static void rd_free_device(void *p)
321 struct rd_dev *rd_dev = p;
323 rd_release_device_space(rd_dev);
324 kfree(rd_dev);
327 static inline struct rd_request *RD_REQ(struct se_task *task)
329 return container_of(task, struct rd_request, rd_task);
332 static struct se_task *
333 rd_alloc_task(struct se_cmd *cmd)
335 struct rd_request *rd_req;
337 rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
338 if (!rd_req) {
339 printk(KERN_ERR "Unable to allocate struct rd_request\n");
340 return NULL;
342 rd_req->rd_dev = SE_DEV(cmd)->dev_ptr;
344 return &rd_req->rd_task;
347 /* rd_get_sg_table():
351 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
353 u32 i;
354 struct rd_dev_sg_table *sg_table;
356 for (i = 0; i < rd_dev->sg_table_count; i++) {
357 sg_table = &rd_dev->sg_table_array[i];
358 if ((sg_table->page_start_offset <= page) &&
359 (sg_table->page_end_offset >= page))
360 return sg_table;
363 printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n",
364 page);
366 return NULL;
369 /* rd_MEMCPY_read():
373 static int rd_MEMCPY_read(struct rd_request *req)
375 struct se_task *task = &req->rd_task;
376 struct rd_dev *dev = req->rd_dev;
377 struct rd_dev_sg_table *table;
378 struct scatterlist *sg_d, *sg_s;
379 void *dst, *src;
380 u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
381 u32 length, page_end = 0, table_sg_end;
382 u32 rd_offset = req->rd_offset;
384 table = rd_get_sg_table(dev, req->rd_page);
385 if (!(table))
386 return -1;
388 table_sg_end = (table->page_end_offset - req->rd_page);
389 sg_d = task->task_sg;
390 sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
391 #ifdef DEBUG_RAMDISK_MCP
392 printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
393 " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
394 req->rd_page, req->rd_offset);
395 #endif
396 src_offset = rd_offset;
398 while (req->rd_size) {
399 if ((sg_d[i].length - dst_offset) <
400 (sg_s[j].length - src_offset)) {
401 length = (sg_d[i].length - dst_offset);
402 #ifdef DEBUG_RAMDISK_MCP
403 printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d"
404 " offset: %u sg_s[%d].length: %u\n", i,
405 &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
406 sg_s[j].length);
407 printk(KERN_INFO "Step 1 - length: %u dst_offset: %u"
408 " src_offset: %u\n", length, dst_offset,
409 src_offset);
410 #endif
411 if (length > req->rd_size)
412 length = req->rd_size;
414 dst = sg_virt(&sg_d[i++]) + dst_offset;
415 if (!dst)
416 BUG();
418 src = sg_virt(&sg_s[j]) + src_offset;
419 if (!src)
420 BUG();
422 dst_offset = 0;
423 src_offset = length;
424 page_end = 0;
425 } else {
426 length = (sg_s[j].length - src_offset);
427 #ifdef DEBUG_RAMDISK_MCP
428 printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d"
429 " offset: %u sg_s[%d].length: %u\n", i,
430 &sg_d[i], sg_d[i].length, sg_d[i].offset,
431 j, sg_s[j].length);
432 printk(KERN_INFO "Step 2 - length: %u dst_offset: %u"
433 " src_offset: %u\n", length, dst_offset,
434 src_offset);
435 #endif
436 if (length > req->rd_size)
437 length = req->rd_size;
439 dst = sg_virt(&sg_d[i]) + dst_offset;
440 if (!dst)
441 BUG();
443 if (sg_d[i].length == length) {
444 i++;
445 dst_offset = 0;
446 } else
447 dst_offset = length;
449 src = sg_virt(&sg_s[j++]) + src_offset;
450 if (!src)
451 BUG();
453 src_offset = 0;
454 page_end = 1;
457 memcpy(dst, src, length);
459 #ifdef DEBUG_RAMDISK_MCP
460 printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
461 " i: %u, j: %u\n", req->rd_page,
462 (req->rd_size - length), length, i, j);
463 #endif
464 req->rd_size -= length;
465 if (!(req->rd_size))
466 return 0;
468 if (!page_end)
469 continue;
471 if (++req->rd_page <= table->page_end_offset) {
472 #ifdef DEBUG_RAMDISK_MCP
473 printk(KERN_INFO "page: %u in same page table\n",
474 req->rd_page);
475 #endif
476 continue;
478 #ifdef DEBUG_RAMDISK_MCP
479 printk(KERN_INFO "getting new page table for page: %u\n",
480 req->rd_page);
481 #endif
482 table = rd_get_sg_table(dev, req->rd_page);
483 if (!(table))
484 return -1;
486 sg_s = &table->sg_table[j = 0];
489 return 0;
492 /* rd_MEMCPY_write():
496 static int rd_MEMCPY_write(struct rd_request *req)
498 struct se_task *task = &req->rd_task;
499 struct rd_dev *dev = req->rd_dev;
500 struct rd_dev_sg_table *table;
501 struct scatterlist *sg_d, *sg_s;
502 void *dst, *src;
503 u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
504 u32 length, page_end = 0, table_sg_end;
505 u32 rd_offset = req->rd_offset;
507 table = rd_get_sg_table(dev, req->rd_page);
508 if (!(table))
509 return -1;
511 table_sg_end = (table->page_end_offset - req->rd_page);
512 sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
513 sg_s = task->task_sg;
514 #ifdef DEBUG_RAMDISK_MCP
515 printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
516 " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
517 req->rd_page, req->rd_offset);
518 #endif
519 dst_offset = rd_offset;
521 while (req->rd_size) {
522 if ((sg_s[i].length - src_offset) <
523 (sg_d[j].length - dst_offset)) {
524 length = (sg_s[i].length - src_offset);
525 #ifdef DEBUG_RAMDISK_MCP
526 printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d"
527 " offset: %d sg_d[%d].length: %u\n", i,
528 &sg_s[i], sg_s[i].length, sg_s[i].offset,
529 j, sg_d[j].length);
530 printk(KERN_INFO "Step 1 - length: %u src_offset: %u"
531 " dst_offset: %u\n", length, src_offset,
532 dst_offset);
533 #endif
534 if (length > req->rd_size)
535 length = req->rd_size;
537 src = sg_virt(&sg_s[i++]) + src_offset;
538 if (!src)
539 BUG();
541 dst = sg_virt(&sg_d[j]) + dst_offset;
542 if (!dst)
543 BUG();
545 src_offset = 0;
546 dst_offset = length;
547 page_end = 0;
548 } else {
549 length = (sg_d[j].length - dst_offset);
550 #ifdef DEBUG_RAMDISK_MCP
551 printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d"
552 " offset: %d sg_d[%d].length: %u\n", i,
553 &sg_s[i], sg_s[i].length, sg_s[i].offset,
554 j, sg_d[j].length);
555 printk(KERN_INFO "Step 2 - length: %u src_offset: %u"
556 " dst_offset: %u\n", length, src_offset,
557 dst_offset);
558 #endif
559 if (length > req->rd_size)
560 length = req->rd_size;
562 src = sg_virt(&sg_s[i]) + src_offset;
563 if (!src)
564 BUG();
566 if (sg_s[i].length == length) {
567 i++;
568 src_offset = 0;
569 } else
570 src_offset = length;
572 dst = sg_virt(&sg_d[j++]) + dst_offset;
573 if (!dst)
574 BUG();
576 dst_offset = 0;
577 page_end = 1;
580 memcpy(dst, src, length);
582 #ifdef DEBUG_RAMDISK_MCP
583 printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
584 " i: %u, j: %u\n", req->rd_page,
585 (req->rd_size - length), length, i, j);
586 #endif
587 req->rd_size -= length;
588 if (!(req->rd_size))
589 return 0;
591 if (!page_end)
592 continue;
594 if (++req->rd_page <= table->page_end_offset) {
595 #ifdef DEBUG_RAMDISK_MCP
596 printk(KERN_INFO "page: %u in same page table\n",
597 req->rd_page);
598 #endif
599 continue;
601 #ifdef DEBUG_RAMDISK_MCP
602 printk(KERN_INFO "getting new page table for page: %u\n",
603 req->rd_page);
604 #endif
605 table = rd_get_sg_table(dev, req->rd_page);
606 if (!(table))
607 return -1;
609 sg_d = &table->sg_table[j = 0];
612 return 0;
615 /* rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
619 static int rd_MEMCPY_do_task(struct se_task *task)
621 struct se_device *dev = task->se_dev;
622 struct rd_request *req = RD_REQ(task);
623 unsigned long long lba;
624 int ret;
626 req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE;
627 lba = task->task_lba;
628 req->rd_offset = (do_div(lba,
629 (PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) *
630 DEV_ATTRIB(dev)->block_size;
631 req->rd_size = task->task_size;
633 if (task->task_data_direction == DMA_FROM_DEVICE)
634 ret = rd_MEMCPY_read(req);
635 else
636 ret = rd_MEMCPY_write(req);
638 if (ret != 0)
639 return ret;
641 task->task_scsi_status = GOOD;
642 transport_complete_task(task, 1);
644 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
647 /* rd_DIRECT_with_offset():
651 static int rd_DIRECT_with_offset(
652 struct se_task *task,
653 struct list_head *se_mem_list,
654 u32 *se_mem_cnt,
655 u32 *task_offset)
657 struct rd_request *req = RD_REQ(task);
658 struct rd_dev *dev = req->rd_dev;
659 struct rd_dev_sg_table *table;
660 struct se_mem *se_mem;
661 struct scatterlist *sg_s;
662 u32 j = 0, set_offset = 1;
663 u32 get_next_table = 0, offset_length, table_sg_end;
665 table = rd_get_sg_table(dev, req->rd_page);
666 if (!(table))
667 return -1;
669 table_sg_end = (table->page_end_offset - req->rd_page);
670 sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
671 #ifdef DEBUG_RAMDISK_DR
672 printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n",
673 (task->task_data_direction == DMA_TO_DEVICE) ?
674 "Write" : "Read",
675 task->task_lba, req->rd_size, req->rd_page, req->rd_offset);
676 #endif
677 while (req->rd_size) {
678 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
679 if (!(se_mem)) {
680 printk(KERN_ERR "Unable to allocate struct se_mem\n");
681 return -1;
683 INIT_LIST_HEAD(&se_mem->se_list);
685 if (set_offset) {
686 offset_length = sg_s[j].length - req->rd_offset;
687 if (offset_length > req->rd_size)
688 offset_length = req->rd_size;
690 se_mem->se_page = sg_page(&sg_s[j++]);
691 se_mem->se_off = req->rd_offset;
692 se_mem->se_len = offset_length;
694 set_offset = 0;
695 get_next_table = (j > table_sg_end);
696 goto check_eot;
699 offset_length = (req->rd_size < req->rd_offset) ?
700 req->rd_size : req->rd_offset;
702 se_mem->se_page = sg_page(&sg_s[j]);
703 se_mem->se_len = offset_length;
705 set_offset = 1;
707 check_eot:
708 #ifdef DEBUG_RAMDISK_DR
709 printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u"
710 " se_mem: %p, se_page: %p se_off: %u se_len: %u\n",
711 req->rd_page, req->rd_size, offset_length, j, se_mem,
712 se_mem->se_page, se_mem->se_off, se_mem->se_len);
713 #endif
714 list_add_tail(&se_mem->se_list, se_mem_list);
715 (*se_mem_cnt)++;
717 req->rd_size -= offset_length;
718 if (!(req->rd_size))
719 goto out;
721 if (!set_offset && !get_next_table)
722 continue;
724 if (++req->rd_page <= table->page_end_offset) {
725 #ifdef DEBUG_RAMDISK_DR
726 printk(KERN_INFO "page: %u in same page table\n",
727 req->rd_page);
728 #endif
729 continue;
731 #ifdef DEBUG_RAMDISK_DR
732 printk(KERN_INFO "getting new page table for page: %u\n",
733 req->rd_page);
734 #endif
735 table = rd_get_sg_table(dev, req->rd_page);
736 if (!(table))
737 return -1;
739 sg_s = &table->sg_table[j = 0];
742 out:
743 T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
744 #ifdef DEBUG_RAMDISK_DR
745 printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
746 *se_mem_cnt);
747 #endif
748 return 0;
751 /* rd_DIRECT_without_offset():
755 static int rd_DIRECT_without_offset(
756 struct se_task *task,
757 struct list_head *se_mem_list,
758 u32 *se_mem_cnt,
759 u32 *task_offset)
761 struct rd_request *req = RD_REQ(task);
762 struct rd_dev *dev = req->rd_dev;
763 struct rd_dev_sg_table *table;
764 struct se_mem *se_mem;
765 struct scatterlist *sg_s;
766 u32 length, j = 0;
768 table = rd_get_sg_table(dev, req->rd_page);
769 if (!(table))
770 return -1;
772 sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
773 #ifdef DEBUG_RAMDISK_DR
774 printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n",
775 (task->task_data_direction == DMA_TO_DEVICE) ?
776 "Write" : "Read",
777 task->task_lba, req->rd_size, req->rd_page);
778 #endif
779 while (req->rd_size) {
780 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
781 if (!(se_mem)) {
782 printk(KERN_ERR "Unable to allocate struct se_mem\n");
783 return -1;
785 INIT_LIST_HEAD(&se_mem->se_list);
787 length = (req->rd_size < sg_s[j].length) ?
788 req->rd_size : sg_s[j].length;
790 se_mem->se_page = sg_page(&sg_s[j++]);
791 se_mem->se_len = length;
793 #ifdef DEBUG_RAMDISK_DR
794 printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p,"
795 " se_page: %p se_off: %u se_len: %u\n", req->rd_page,
796 req->rd_size, j, se_mem, se_mem->se_page,
797 se_mem->se_off, se_mem->se_len);
798 #endif
799 list_add_tail(&se_mem->se_list, se_mem_list);
800 (*se_mem_cnt)++;
802 req->rd_size -= length;
803 if (!(req->rd_size))
804 goto out;
806 if (++req->rd_page <= table->page_end_offset) {
807 #ifdef DEBUG_RAMDISK_DR
808 printk("page: %u in same page table\n",
809 req->rd_page);
810 #endif
811 continue;
813 #ifdef DEBUG_RAMDISK_DR
814 printk(KERN_INFO "getting new page table for page: %u\n",
815 req->rd_page);
816 #endif
817 table = rd_get_sg_table(dev, req->rd_page);
818 if (!(table))
819 return -1;
821 sg_s = &table->sg_table[j = 0];
824 out:
825 T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
826 #ifdef DEBUG_RAMDISK_DR
827 printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
828 *se_mem_cnt);
829 #endif
830 return 0;
833 /* rd_DIRECT_do_se_mem_map():
837 static int rd_DIRECT_do_se_mem_map(
838 struct se_task *task,
839 struct list_head *se_mem_list,
840 void *in_mem,
841 struct se_mem *in_se_mem,
842 struct se_mem **out_se_mem,
843 u32 *se_mem_cnt,
844 u32 *task_offset_in)
846 struct se_cmd *cmd = task->task_se_cmd;
847 struct rd_request *req = RD_REQ(task);
848 u32 task_offset = *task_offset_in;
849 unsigned long long lba;
850 int ret;
852 req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) /
853 PAGE_SIZE);
854 lba = task->task_lba;
855 req->rd_offset = (do_div(lba,
856 (PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) *
857 DEV_ATTRIB(task->se_dev)->block_size;
858 req->rd_size = task->task_size;
860 if (req->rd_offset)
861 ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt,
862 task_offset_in);
863 else
864 ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt,
865 task_offset_in);
867 if (ret < 0)
868 return ret;
870 if (CMD_TFO(cmd)->task_sg_chaining == 0)
871 return 0;
873 * Currently prevent writers from multiple HW fabrics doing
874 * pci_map_sg() to RD_DR's internal scatterlist memory.
876 if (cmd->data_direction == DMA_TO_DEVICE) {
877 printk(KERN_ERR "DMA_TO_DEVICE not supported for"
878 " RAMDISK_DR with task_sg_chaining=1\n");
879 return -1;
882 * Special case for if task_sg_chaining is enabled, then
883 * we setup struct se_task->task_sg[], as it will be used by
884 * transport_do_task_sg_chain() for creating chainged SGLs
885 * across multiple struct se_task->task_sg[].
887 if (!(transport_calc_sg_num(task,
888 list_entry(T_TASK(cmd)->t_mem_list->next,
889 struct se_mem, se_list),
890 task_offset)))
891 return -1;
893 return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
894 list_entry(T_TASK(cmd)->t_mem_list->next,
895 struct se_mem, se_list),
896 out_se_mem, se_mem_cnt, task_offset_in);
899 /* rd_DIRECT_do_task(): (Part of se_subsystem_api_t template)
903 static int rd_DIRECT_do_task(struct se_task *task)
906 * At this point the locally allocated RD tables have been mapped
907 * to struct se_mem elements in rd_DIRECT_do_se_mem_map().
909 task->task_scsi_status = GOOD;
910 transport_complete_task(task, 1);
912 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
915 /* rd_free_task(): (Part of se_subsystem_api_t template)
919 static void rd_free_task(struct se_task *task)
921 kfree(RD_REQ(task));
924 enum {
925 Opt_rd_pages, Opt_err
928 static match_table_t tokens = {
929 {Opt_rd_pages, "rd_pages=%d"},
930 {Opt_err, NULL}
933 static ssize_t rd_set_configfs_dev_params(
934 struct se_hba *hba,
935 struct se_subsystem_dev *se_dev,
936 const char *page,
937 ssize_t count)
939 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
940 char *orig, *ptr, *opts;
941 substring_t args[MAX_OPT_ARGS];
942 int ret = 0, arg, token;
944 opts = kstrdup(page, GFP_KERNEL);
945 if (!opts)
946 return -ENOMEM;
948 orig = opts;
950 while ((ptr = strsep(&opts, ",")) != NULL) {
951 if (!*ptr)
952 continue;
954 token = match_token(ptr, tokens, args);
955 switch (token) {
956 case Opt_rd_pages:
957 match_int(args, &arg);
958 rd_dev->rd_page_count = arg;
959 printk(KERN_INFO "RAMDISK: Referencing Page"
960 " Count: %u\n", rd_dev->rd_page_count);
961 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
962 break;
963 default:
964 break;
968 kfree(orig);
969 return (!ret) ? count : ret;
972 static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
974 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
976 if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
977 printk(KERN_INFO "Missing rd_pages= parameter\n");
978 return -1;
981 return 0;
984 static ssize_t rd_show_configfs_dev_params(
985 struct se_hba *hba,
986 struct se_subsystem_dev *se_dev,
987 char *b)
989 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
990 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: %s\n",
991 rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
992 "rd_direct" : "rd_mcp");
993 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
994 " SG_table_count: %u\n", rd_dev->rd_page_count,
995 PAGE_SIZE, rd_dev->sg_table_count);
996 return bl;
999 /* rd_get_cdb(): (Part of se_subsystem_api_t template)
1003 static unsigned char *rd_get_cdb(struct se_task *task)
1005 struct rd_request *req = RD_REQ(task);
1007 return req->rd_scsi_cdb;
1010 static u32 rd_get_device_rev(struct se_device *dev)
1012 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
1015 static u32 rd_get_device_type(struct se_device *dev)
1017 return TYPE_DISK;
1020 static sector_t rd_get_blocks(struct se_device *dev)
1022 struct rd_dev *rd_dev = dev->dev_ptr;
1023 unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
1024 DEV_ATTRIB(dev)->block_size) - 1;
1026 return blocks_long;
1029 static struct se_subsystem_api rd_dr_template = {
1030 .name = "rd_dr",
1031 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
1032 .attach_hba = rd_attach_hba,
1033 .detach_hba = rd_detach_hba,
1034 .allocate_virtdevice = rd_DIRECT_allocate_virtdevice,
1035 .create_virtdevice = rd_DIRECT_create_virtdevice,
1036 .free_device = rd_free_device,
1037 .alloc_task = rd_alloc_task,
1038 .do_task = rd_DIRECT_do_task,
1039 .free_task = rd_free_task,
1040 .check_configfs_dev_params = rd_check_configfs_dev_params,
1041 .set_configfs_dev_params = rd_set_configfs_dev_params,
1042 .show_configfs_dev_params = rd_show_configfs_dev_params,
1043 .get_cdb = rd_get_cdb,
1044 .get_device_rev = rd_get_device_rev,
1045 .get_device_type = rd_get_device_type,
1046 .get_blocks = rd_get_blocks,
1047 .do_se_mem_map = rd_DIRECT_do_se_mem_map,
1050 static struct se_subsystem_api rd_mcp_template = {
1051 .name = "rd_mcp",
1052 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
1053 .attach_hba = rd_attach_hba,
1054 .detach_hba = rd_detach_hba,
1055 .allocate_virtdevice = rd_MEMCPY_allocate_virtdevice,
1056 .create_virtdevice = rd_MEMCPY_create_virtdevice,
1057 .free_device = rd_free_device,
1058 .alloc_task = rd_alloc_task,
1059 .do_task = rd_MEMCPY_do_task,
1060 .free_task = rd_free_task,
1061 .check_configfs_dev_params = rd_check_configfs_dev_params,
1062 .set_configfs_dev_params = rd_set_configfs_dev_params,
1063 .show_configfs_dev_params = rd_show_configfs_dev_params,
1064 .get_cdb = rd_get_cdb,
1065 .get_device_rev = rd_get_device_rev,
1066 .get_device_type = rd_get_device_type,
1067 .get_blocks = rd_get_blocks,
1070 int __init rd_module_init(void)
1072 int ret;
1074 ret = transport_subsystem_register(&rd_dr_template);
1075 if (ret < 0)
1076 return ret;
1078 ret = transport_subsystem_register(&rd_mcp_template);
1079 if (ret < 0) {
1080 transport_subsystem_release(&rd_dr_template);
1081 return ret;
1084 return 0;
1087 void rd_module_exit(void)
1089 transport_subsystem_release(&rd_dr_template);
1090 transport_subsystem_release(&rd_mcp_template);