1 /*******************************************************************************
2 * Filename: target_core_iblock.c
4 * This file contains the Storage Engine <-> Linux BlockIO transport
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
12 * Nicholas A. Bellinger <nab@kernel.org>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 ******************************************************************************/
30 #include <linux/version.h>
31 #include <linux/string.h>
32 #include <linux/parser.h>
33 #include <linux/timer.h>
35 #include <linux/blkdev.h>
36 #include <linux/slab.h>
37 #include <linux/spinlock.h>
38 #include <linux/bio.h>
39 #include <linux/genhd.h>
40 #include <linux/file.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_host.h>
44 #include <target/target_core_base.h>
45 #include <target/target_core_device.h>
46 #include <target/target_core_transport.h>
48 #include "target_core_iblock.h"
50 static struct se_subsystem_api iblock_template
;
52 static void iblock_bio_done(struct bio
*, int);
54 /* iblock_attach_hba(): (Part of se_subsystem_api_t template)
58 static int iblock_attach_hba(struct se_hba
*hba
, u32 host_id
)
60 struct iblock_hba
*ib_host
;
62 ib_host
= kzalloc(sizeof(struct iblock_hba
), GFP_KERNEL
);
64 pr_err("Unable to allocate memory for"
65 " struct iblock_hba\n");
69 ib_host
->iblock_host_id
= host_id
;
71 hba
->hba_ptr
= ib_host
;
73 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
74 " Generic Target Core Stack %s\n", hba
->hba_id
,
75 IBLOCK_VERSION
, TARGET_CORE_MOD_VERSION
);
77 pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n",
78 hba
->hba_id
, ib_host
->iblock_host_id
);
83 static void iblock_detach_hba(struct se_hba
*hba
)
85 struct iblock_hba
*ib_host
= hba
->hba_ptr
;
87 pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
88 " Target Core\n", hba
->hba_id
, ib_host
->iblock_host_id
);
94 static void *iblock_allocate_virtdevice(struct se_hba
*hba
, const char *name
)
96 struct iblock_dev
*ib_dev
= NULL
;
97 struct iblock_hba
*ib_host
= hba
->hba_ptr
;
99 ib_dev
= kzalloc(sizeof(struct iblock_dev
), GFP_KERNEL
);
101 pr_err("Unable to allocate struct iblock_dev\n");
104 ib_dev
->ibd_host
= ib_host
;
106 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name
);
111 static struct se_device
*iblock_create_virtdevice(
113 struct se_subsystem_dev
*se_dev
,
116 struct iblock_dev
*ib_dev
= p
;
117 struct se_device
*dev
;
118 struct se_dev_limits dev_limits
;
119 struct block_device
*bd
= NULL
;
120 struct request_queue
*q
;
121 struct queue_limits
*limits
;
126 pr_err("Unable to locate struct iblock_dev parameter\n");
129 memset(&dev_limits
, 0, sizeof(struct se_dev_limits
));
131 * These settings need to be made tunable..
133 ib_dev
->ibd_bio_set
= bioset_create(32, 64);
134 if (!ib_dev
->ibd_bio_set
) {
135 pr_err("IBLOCK: Unable to create bioset()\n");
136 return ERR_PTR(-ENOMEM
);
138 pr_debug("IBLOCK: Created bio_set()\n");
140 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
141 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
143 pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
144 ib_dev
->ibd_udev_path
);
146 bd
= blkdev_get_by_path(ib_dev
->ibd_udev_path
,
147 FMODE_WRITE
|FMODE_READ
|FMODE_EXCL
, ib_dev
);
153 * Setup the local scope queue_limits from struct request_queue->limits
154 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
156 q
= bdev_get_queue(bd
);
157 limits
= &dev_limits
.limits
;
158 limits
->logical_block_size
= bdev_logical_block_size(bd
);
159 limits
->max_hw_sectors
= queue_max_hw_sectors(q
);
160 limits
->max_sectors
= queue_max_sectors(q
);
161 dev_limits
.hw_queue_depth
= q
->nr_requests
;
162 dev_limits
.queue_depth
= q
->nr_requests
;
166 dev
= transport_add_device_to_core_hba(hba
,
167 &iblock_template
, se_dev
, dev_flags
, ib_dev
,
168 &dev_limits
, "IBLOCK", IBLOCK_VERSION
);
173 * Check if the underlying struct block_device request_queue supports
174 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
175 * in ATA and we need to set TPE=1
177 if (blk_queue_discard(q
)) {
178 dev
->se_sub_dev
->se_dev_attrib
.max_unmap_lba_count
=
179 q
->limits
.max_discard_sectors
;
181 * Currently hardcoded to 1 in Linux/SCSI code..
183 dev
->se_sub_dev
->se_dev_attrib
.max_unmap_block_desc_count
= 1;
184 dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity
=
185 q
->limits
.discard_granularity
;
186 dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity_alignment
=
187 q
->limits
.discard_alignment
;
189 pr_debug("IBLOCK: BLOCK Discard support available,"
190 " disabled by default\n");
193 if (blk_queue_nonrot(q
))
194 dev
->se_sub_dev
->se_dev_attrib
.is_nonrot
= 1;
199 if (ib_dev
->ibd_bio_set
) {
200 bioset_free(ib_dev
->ibd_bio_set
);
201 ib_dev
->ibd_bio_set
= NULL
;
203 ib_dev
->ibd_bd
= NULL
;
207 static void iblock_free_device(void *p
)
209 struct iblock_dev
*ib_dev
= p
;
211 if (ib_dev
->ibd_bd
!= NULL
)
212 blkdev_put(ib_dev
->ibd_bd
, FMODE_WRITE
|FMODE_READ
|FMODE_EXCL
);
213 if (ib_dev
->ibd_bio_set
!= NULL
)
214 bioset_free(ib_dev
->ibd_bio_set
);
218 static inline struct iblock_req
*IBLOCK_REQ(struct se_task
*task
)
220 return container_of(task
, struct iblock_req
, ib_task
);
223 static struct se_task
*
224 iblock_alloc_task(unsigned char *cdb
)
226 struct iblock_req
*ib_req
;
228 ib_req
= kzalloc(sizeof(struct iblock_req
), GFP_KERNEL
);
230 pr_err("Unable to allocate memory for struct iblock_req\n");
234 atomic_set(&ib_req
->ib_bio_cnt
, 0);
235 return &ib_req
->ib_task
;
238 static unsigned long long iblock_emulate_read_cap_with_block_size(
239 struct se_device
*dev
,
240 struct block_device
*bd
,
241 struct request_queue
*q
)
243 unsigned long long blocks_long
= (div_u64(i_size_read(bd
->bd_inode
),
244 bdev_logical_block_size(bd
)) - 1);
245 u32 block_size
= bdev_logical_block_size(bd
);
247 if (block_size
== dev
->se_sub_dev
->se_dev_attrib
.block_size
)
250 switch (block_size
) {
252 switch (dev
->se_sub_dev
->se_dev_attrib
.block_size
) {
266 switch (dev
->se_sub_dev
->se_dev_attrib
.block_size
) {
281 switch (dev
->se_sub_dev
->se_dev_attrib
.block_size
) {
296 switch (dev
->se_sub_dev
->se_dev_attrib
.block_size
) {
318 * Emulate SYCHRONIZE_CACHE_*
320 static void iblock_emulate_sync_cache(struct se_task
*task
)
322 struct se_cmd
*cmd
= task
->task_se_cmd
;
323 struct iblock_dev
*ib_dev
= cmd
->se_dev
->dev_ptr
;
324 int immed
= (cmd
->t_task_cdb
[1] & 0x2);
325 sector_t error_sector
;
329 * If the Immediate bit is set, queue up the GOOD response
330 * for this SYNCHRONIZE_CACHE op
333 transport_complete_sync_cache(cmd
, 1);
336 * blkdev_issue_flush() does not support a specifying a range, so
337 * we have to flush the entire cache.
339 ret
= blkdev_issue_flush(ib_dev
->ibd_bd
, GFP_KERNEL
, &error_sector
);
341 pr_err("IBLOCK: block_issue_flush() failed: %d "
342 " error_sector: %llu\n", ret
,
343 (unsigned long long)error_sector
);
347 transport_complete_sync_cache(cmd
, ret
== 0);
351 * Tell TCM Core that we are capable of WriteCache emulation for
352 * an underlying struct se_device.
354 static int iblock_emulated_write_cache(struct se_device
*dev
)
359 static int iblock_emulated_dpo(struct se_device
*dev
)
365 * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
368 static int iblock_emulated_fua_write(struct se_device
*dev
)
373 static int iblock_emulated_fua_read(struct se_device
*dev
)
378 static int iblock_do_task(struct se_task
*task
)
380 struct se_device
*dev
= task
->task_se_cmd
->se_dev
;
381 struct iblock_req
*req
= IBLOCK_REQ(task
);
382 struct bio
*bio
= req
->ib_bio
, *nbio
= NULL
;
383 struct blk_plug plug
;
386 if (task
->task_data_direction
== DMA_TO_DEVICE
) {
388 * Force data to disk if we pretend to not have a volatile
389 * write cache, or the initiator set the Force Unit Access bit.
391 if (dev
->se_sub_dev
->se_dev_attrib
.emulate_write_cache
== 0 ||
392 (dev
->se_sub_dev
->se_dev_attrib
.emulate_fua_write
> 0 &&
393 task
->task_se_cmd
->t_tasks_fua
))
401 blk_start_plug(&plug
);
405 pr_debug("Calling submit_bio() task: %p bio: %p"
406 " bio->bi_sector: %llu\n", task
, bio
,
407 (unsigned long long)bio
->bi_sector
);
412 blk_finish_plug(&plug
);
414 return PYX_TRANSPORT_SENT_TO_TRANSPORT
;
417 static int iblock_do_discard(struct se_device
*dev
, sector_t lba
, u32 range
)
419 struct iblock_dev
*ibd
= dev
->dev_ptr
;
420 struct block_device
*bd
= ibd
->ibd_bd
;
423 return blkdev_issue_discard(bd
, lba
, range
, GFP_KERNEL
, barrier
);
426 static void iblock_free_task(struct se_task
*task
)
428 struct iblock_req
*req
= IBLOCK_REQ(task
);
429 struct bio
*bio
, *hbio
= req
->ib_bio
;
431 * We only release the bio(s) here if iblock_bio_done() has not called
432 * bio_put() -> iblock_bio_destructor().
434 while (hbio
!= NULL
) {
436 hbio
= hbio
->bi_next
;
445 Opt_udev_path
, Opt_force
, Opt_err
448 static match_table_t tokens
= {
449 {Opt_udev_path
, "udev_path=%s"},
450 {Opt_force
, "force=%d"},
454 static ssize_t
iblock_set_configfs_dev_params(struct se_hba
*hba
,
455 struct se_subsystem_dev
*se_dev
,
456 const char *page
, ssize_t count
)
458 struct iblock_dev
*ib_dev
= se_dev
->se_dev_su_ptr
;
459 char *orig
, *ptr
, *arg_p
, *opts
;
460 substring_t args
[MAX_OPT_ARGS
];
463 opts
= kstrdup(page
, GFP_KERNEL
);
469 while ((ptr
= strsep(&opts
, ",")) != NULL
) {
473 token
= match_token(ptr
, tokens
, args
);
476 if (ib_dev
->ibd_bd
) {
477 pr_err("Unable to set udev_path= while"
478 " ib_dev->ibd_bd exists\n");
482 arg_p
= match_strdup(&args
[0]);
487 snprintf(ib_dev
->ibd_udev_path
, SE_UDEV_PATH_LEN
,
490 pr_debug("IBLOCK: Referencing UDEV path: %s\n",
491 ib_dev
->ibd_udev_path
);
492 ib_dev
->ibd_flags
|= IBDF_HAS_UDEV_PATH
;
503 return (!ret
) ? count
: ret
;
506 static ssize_t
iblock_check_configfs_dev_params(
508 struct se_subsystem_dev
*se_dev
)
510 struct iblock_dev
*ibd
= se_dev
->se_dev_su_ptr
;
512 if (!(ibd
->ibd_flags
& IBDF_HAS_UDEV_PATH
)) {
513 pr_err("Missing udev_path= parameters for IBLOCK\n");
520 static ssize_t
iblock_show_configfs_dev_params(
522 struct se_subsystem_dev
*se_dev
,
525 struct iblock_dev
*ibd
= se_dev
->se_dev_su_ptr
;
526 struct block_device
*bd
= ibd
->ibd_bd
;
527 char buf
[BDEVNAME_SIZE
];
531 bl
+= sprintf(b
+ bl
, "iBlock device: %s",
533 if (ibd
->ibd_flags
& IBDF_HAS_UDEV_PATH
) {
534 bl
+= sprintf(b
+ bl
, " UDEV PATH: %s\n",
537 bl
+= sprintf(b
+ bl
, "\n");
539 bl
+= sprintf(b
+ bl
, " ");
541 bl
+= sprintf(b
+ bl
, "Major: %d Minor: %d %s\n",
542 MAJOR(bd
->bd_dev
), MINOR(bd
->bd_dev
), (!bd
->bd_contains
) ?
543 "" : (bd
->bd_holder
== (struct iblock_dev
*)ibd
) ?
544 "CLAIMED: IBLOCK" : "CLAIMED: OS");
546 bl
+= sprintf(b
+ bl
, "Major: 0 Minor: 0\n");
552 static void iblock_bio_destructor(struct bio
*bio
)
554 struct se_task
*task
= bio
->bi_private
;
555 struct iblock_dev
*ib_dev
= task
->se_dev
->dev_ptr
;
557 bio_free(bio
, ib_dev
->ibd_bio_set
);
560 static struct bio
*iblock_get_bio(
561 struct se_task
*task
,
562 struct iblock_req
*ib_req
,
563 struct iblock_dev
*ib_dev
,
570 bio
= bio_alloc_bioset(GFP_NOIO
, sg_num
, ib_dev
->ibd_bio_set
);
572 pr_err("Unable to allocate memory for bio\n");
573 *ret
= PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES
;
577 pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
578 " %p\n", bio
, task
->task_sg_nents
, ib_dev
->ibd_bio_set
);
579 pr_debug("Allocated bio: %p task_size: %u\n", bio
, task
->task_size
);
581 bio
->bi_bdev
= ib_dev
->ibd_bd
;
582 bio
->bi_private
= task
;
583 bio
->bi_destructor
= iblock_bio_destructor
;
584 bio
->bi_end_io
= &iblock_bio_done
;
585 bio
->bi_sector
= lba
;
586 atomic_inc(&ib_req
->ib_bio_cnt
);
588 pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio
->bi_sector
);
589 pr_debug("Set ib_req->ib_bio_cnt: %d\n",
590 atomic_read(&ib_req
->ib_bio_cnt
));
594 static int iblock_map_data_SG(struct se_task
*task
)
596 struct se_cmd
*cmd
= task
->task_se_cmd
;
597 struct se_device
*dev
= cmd
->se_dev
;
598 struct iblock_dev
*ib_dev
= task
->se_dev
->dev_ptr
;
599 struct iblock_req
*ib_req
= IBLOCK_REQ(task
);
600 struct bio
*bio
= NULL
, *hbio
= NULL
, *tbio
= NULL
;
601 struct scatterlist
*sg
;
603 u32 i
, sg_num
= task
->task_sg_nents
;
606 * Do starting conversion up from non 512-byte blocksize with
607 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
609 if (dev
->se_sub_dev
->se_dev_attrib
.block_size
== 4096)
610 block_lba
= (task
->task_lba
<< 3);
611 else if (dev
->se_sub_dev
->se_dev_attrib
.block_size
== 2048)
612 block_lba
= (task
->task_lba
<< 2);
613 else if (dev
->se_sub_dev
->se_dev_attrib
.block_size
== 1024)
614 block_lba
= (task
->task_lba
<< 1);
615 else if (dev
->se_sub_dev
->se_dev_attrib
.block_size
== 512)
616 block_lba
= task
->task_lba
;
618 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
619 " %u\n", dev
->se_sub_dev
->se_dev_attrib
.block_size
);
620 return PYX_TRANSPORT_LU_COMM_FAILURE
;
623 bio
= iblock_get_bio(task
, ib_req
, ib_dev
, &ret
, block_lba
, sg_num
);
627 ib_req
->ib_bio
= bio
;
630 * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
631 * from task->task_sg -> struct scatterlist memory.
633 for_each_sg(task
->task_sg
, sg
, task
->task_sg_nents
, i
) {
634 pr_debug("task: %p bio: %p Calling bio_add_page(): page:"
635 " %p len: %u offset: %u\n", task
, bio
, sg_page(sg
),
636 sg
->length
, sg
->offset
);
638 ret
= bio_add_page(bio
, sg_page(sg
), sg
->length
, sg
->offset
);
639 if (ret
!= sg
->length
) {
641 pr_debug("*** Set bio->bi_sector: %llu\n",
642 (unsigned long long)bio
->bi_sector
);
643 pr_debug("** task->task_size: %u\n",
645 pr_debug("*** bio->bi_max_vecs: %u\n",
647 pr_debug("*** bio->bi_vcnt: %u\n",
650 bio
= iblock_get_bio(task
, ib_req
, ib_dev
, &ret
,
655 tbio
= tbio
->bi_next
= bio
;
656 pr_debug("-----------------> Added +1 bio: %p to"
657 " list, Going to again\n", bio
);
660 /* Always in 512 byte units for Linux/Block */
661 block_lba
+= sg
->length
>> IBLOCK_LBA_SHIFT
;
663 pr_debug("task: %p bio-add_page() passed!, decremented"
664 " sg_num to %u\n", task
, sg_num
);
665 pr_debug("task: %p bio_add_page() passed!, increased lba"
666 " to %llu\n", task
, (unsigned long long)block_lba
);
667 pr_debug("task: %p bio_add_page() passed!, bio->bi_vcnt:"
668 " %u\n", task
, bio
->bi_vcnt
);
675 hbio
= hbio
->bi_next
;
682 static unsigned char *iblock_get_cdb(struct se_task
*task
)
684 return IBLOCK_REQ(task
)->ib_scsi_cdb
;
687 static u32
iblock_get_device_rev(struct se_device
*dev
)
689 return SCSI_SPC_2
; /* Returns SPC-3 in Initiator Data */
692 static u32
iblock_get_device_type(struct se_device
*dev
)
697 static sector_t
iblock_get_blocks(struct se_device
*dev
)
699 struct iblock_dev
*ibd
= dev
->dev_ptr
;
700 struct block_device
*bd
= ibd
->ibd_bd
;
701 struct request_queue
*q
= bdev_get_queue(bd
);
703 return iblock_emulate_read_cap_with_block_size(dev
, bd
, q
);
706 static void iblock_bio_done(struct bio
*bio
, int err
)
708 struct se_task
*task
= bio
->bi_private
;
709 struct iblock_req
*ibr
= IBLOCK_REQ(task
);
711 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
713 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
) && !err
)
717 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
718 " err: %d\n", bio
, err
);
720 * Bump the ib_bio_err_cnt and release bio.
722 atomic_inc(&ibr
->ib_bio_err_cnt
);
723 smp_mb__after_atomic_inc();
726 * Wait to complete the task until the last bio as completed.
728 if (!atomic_dec_and_test(&ibr
->ib_bio_cnt
))
732 transport_complete_task(task
, 0);
735 pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
736 task
, bio
, task
->task_lba
, (unsigned long long)bio
->bi_sector
, err
);
738 * bio_put() will call iblock_bio_destructor() to release the bio back
739 * to ibr->ib_bio_set.
743 * Wait to complete the task until the last bio as completed.
745 if (!atomic_dec_and_test(&ibr
->ib_bio_cnt
))
748 * Return GOOD status for task if zero ib_bio_err_cnt exists.
751 transport_complete_task(task
, (!atomic_read(&ibr
->ib_bio_err_cnt
)));
754 static struct se_subsystem_api iblock_template
= {
756 .owner
= THIS_MODULE
,
757 .transport_type
= TRANSPORT_PLUGIN_VHBA_PDEV
,
758 .map_data_SG
= iblock_map_data_SG
,
759 .attach_hba
= iblock_attach_hba
,
760 .detach_hba
= iblock_detach_hba
,
761 .allocate_virtdevice
= iblock_allocate_virtdevice
,
762 .create_virtdevice
= iblock_create_virtdevice
,
763 .free_device
= iblock_free_device
,
764 .dpo_emulated
= iblock_emulated_dpo
,
765 .fua_write_emulated
= iblock_emulated_fua_write
,
766 .fua_read_emulated
= iblock_emulated_fua_read
,
767 .write_cache_emulated
= iblock_emulated_write_cache
,
768 .alloc_task
= iblock_alloc_task
,
769 .do_task
= iblock_do_task
,
770 .do_discard
= iblock_do_discard
,
771 .do_sync_cache
= iblock_emulate_sync_cache
,
772 .free_task
= iblock_free_task
,
773 .check_configfs_dev_params
= iblock_check_configfs_dev_params
,
774 .set_configfs_dev_params
= iblock_set_configfs_dev_params
,
775 .show_configfs_dev_params
= iblock_show_configfs_dev_params
,
776 .get_cdb
= iblock_get_cdb
,
777 .get_device_rev
= iblock_get_device_rev
,
778 .get_device_type
= iblock_get_device_type
,
779 .get_blocks
= iblock_get_blocks
,
782 static int __init
iblock_module_init(void)
784 return transport_subsystem_register(&iblock_template
);
787 static void iblock_module_exit(void)
789 transport_subsystem_release(&iblock_template
);
792 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
793 MODULE_AUTHOR("nab@Linux-iSCSI.org");
794 MODULE_LICENSE("GPL");
796 module_init(iblock_module_init
);
797 module_exit(iblock_module_exit
);