1 /*******************************************************************************
2 * Filename: target_core_iblock.c
4 * This file contains the Storage Engine <-> Linux BlockIO transport
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
12 * Nicholas A. Bellinger <nab@kernel.org>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 ******************************************************************************/
30 #include <linux/string.h>
31 #include <linux/parser.h>
32 #include <linux/timer.h>
34 #include <linux/blkdev.h>
35 #include <linux/slab.h>
36 #include <linux/spinlock.h>
37 #include <linux/bio.h>
38 #include <linux/genhd.h>
39 #include <linux/file.h>
40 #include <linux/module.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_host.h>
44 #include <target/target_core_base.h>
45 #include <target/target_core_backend.h>
47 #include "target_core_iblock.h"
49 #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
50 #define IBLOCK_BIO_POOL_SIZE 128
52 static struct se_subsystem_api iblock_template
;
54 static void iblock_bio_done(struct bio
*, int);
56 /* iblock_attach_hba(): (Part of se_subsystem_api_t template)
60 static int iblock_attach_hba(struct se_hba
*hba
, u32 host_id
)
62 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
63 " Generic Target Core Stack %s\n", hba
->hba_id
,
64 IBLOCK_VERSION
, TARGET_CORE_MOD_VERSION
);
68 static void iblock_detach_hba(struct se_hba
*hba
)
72 static void *iblock_allocate_virtdevice(struct se_hba
*hba
, const char *name
)
74 struct iblock_dev
*ib_dev
= NULL
;
76 ib_dev
= kzalloc(sizeof(struct iblock_dev
), GFP_KERNEL
);
78 pr_err("Unable to allocate struct iblock_dev\n");
82 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name
);
87 static struct se_device
*iblock_create_virtdevice(
89 struct se_subsystem_dev
*se_dev
,
92 struct iblock_dev
*ib_dev
= p
;
93 struct se_device
*dev
;
94 struct se_dev_limits dev_limits
;
95 struct block_device
*bd
= NULL
;
96 struct request_queue
*q
;
97 struct queue_limits
*limits
;
102 pr_err("Unable to locate struct iblock_dev parameter\n");
105 memset(&dev_limits
, 0, sizeof(struct se_dev_limits
));
107 ib_dev
->ibd_bio_set
= bioset_create(IBLOCK_BIO_POOL_SIZE
, 0);
108 if (!ib_dev
->ibd_bio_set
) {
109 pr_err("IBLOCK: Unable to create bioset()\n");
110 return ERR_PTR(-ENOMEM
);
112 pr_debug("IBLOCK: Created bio_set()\n");
114 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
115 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
117 pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
118 ib_dev
->ibd_udev_path
);
120 bd
= blkdev_get_by_path(ib_dev
->ibd_udev_path
,
121 FMODE_WRITE
|FMODE_READ
|FMODE_EXCL
, ib_dev
);
127 * Setup the local scope queue_limits from struct request_queue->limits
128 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
130 q
= bdev_get_queue(bd
);
131 limits
= &dev_limits
.limits
;
132 limits
->logical_block_size
= bdev_logical_block_size(bd
);
133 limits
->max_hw_sectors
= UINT_MAX
;
134 limits
->max_sectors
= UINT_MAX
;
135 dev_limits
.hw_queue_depth
= q
->nr_requests
;
136 dev_limits
.queue_depth
= q
->nr_requests
;
140 dev
= transport_add_device_to_core_hba(hba
,
141 &iblock_template
, se_dev
, dev_flags
, ib_dev
,
142 &dev_limits
, "IBLOCK", IBLOCK_VERSION
);
147 * Check if the underlying struct block_device request_queue supports
148 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
149 * in ATA and we need to set TPE=1
151 if (blk_queue_discard(q
)) {
152 dev
->se_sub_dev
->se_dev_attrib
.max_unmap_lba_count
=
153 q
->limits
.max_discard_sectors
;
155 * Currently hardcoded to 1 in Linux/SCSI code..
157 dev
->se_sub_dev
->se_dev_attrib
.max_unmap_block_desc_count
= 1;
158 dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity
=
159 q
->limits
.discard_granularity
>> 9;
160 dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity_alignment
=
161 q
->limits
.discard_alignment
;
163 pr_debug("IBLOCK: BLOCK Discard support available,"
164 " disabled by default\n");
167 if (blk_queue_nonrot(q
))
168 dev
->se_sub_dev
->se_dev_attrib
.is_nonrot
= 1;
173 if (ib_dev
->ibd_bio_set
) {
174 bioset_free(ib_dev
->ibd_bio_set
);
175 ib_dev
->ibd_bio_set
= NULL
;
177 ib_dev
->ibd_bd
= NULL
;
181 static void iblock_free_device(void *p
)
183 struct iblock_dev
*ib_dev
= p
;
185 if (ib_dev
->ibd_bd
!= NULL
)
186 blkdev_put(ib_dev
->ibd_bd
, FMODE_WRITE
|FMODE_READ
|FMODE_EXCL
);
187 if (ib_dev
->ibd_bio_set
!= NULL
)
188 bioset_free(ib_dev
->ibd_bio_set
);
192 static unsigned long long iblock_emulate_read_cap_with_block_size(
193 struct se_device
*dev
,
194 struct block_device
*bd
,
195 struct request_queue
*q
)
197 unsigned long long blocks_long
= (div_u64(i_size_read(bd
->bd_inode
),
198 bdev_logical_block_size(bd
)) - 1);
199 u32 block_size
= bdev_logical_block_size(bd
);
201 if (block_size
== dev
->se_sub_dev
->se_dev_attrib
.block_size
)
204 switch (block_size
) {
206 switch (dev
->se_sub_dev
->se_dev_attrib
.block_size
) {
220 switch (dev
->se_sub_dev
->se_dev_attrib
.block_size
) {
235 switch (dev
->se_sub_dev
->se_dev_attrib
.block_size
) {
250 switch (dev
->se_sub_dev
->se_dev_attrib
.block_size
) {
271 static void iblock_end_io_flush(struct bio
*bio
, int err
)
273 struct se_cmd
*cmd
= bio
->bi_private
;
276 pr_err("IBLOCK: cache flush failed: %d\n", err
);
280 cmd
->scsi_sense_reason
=
281 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
282 target_complete_cmd(cmd
, SAM_STAT_CHECK_CONDITION
);
284 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
292 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
293 * always flush the whole cache.
295 static void iblock_emulate_sync_cache(struct se_cmd
*cmd
)
297 struct iblock_dev
*ib_dev
= cmd
->se_dev
->dev_ptr
;
298 int immed
= (cmd
->t_task_cdb
[1] & 0x2);
302 * If the Immediate bit is set, queue up the GOOD response
303 * for this SYNCHRONIZE_CACHE op.
306 target_complete_cmd(cmd
, SAM_STAT_GOOD
);
308 bio
= bio_alloc(GFP_KERNEL
, 0);
309 bio
->bi_end_io
= iblock_end_io_flush
;
310 bio
->bi_bdev
= ib_dev
->ibd_bd
;
312 bio
->bi_private
= cmd
;
313 submit_bio(WRITE_FLUSH
, bio
);
316 static int iblock_do_discard(struct se_device
*dev
, sector_t lba
, u32 range
)
318 struct iblock_dev
*ibd
= dev
->dev_ptr
;
319 struct block_device
*bd
= ibd
->ibd_bd
;
322 return blkdev_issue_discard(bd
, lba
, range
, GFP_KERNEL
, barrier
);
326 Opt_udev_path
, Opt_force
, Opt_err
329 static match_table_t tokens
= {
330 {Opt_udev_path
, "udev_path=%s"},
331 {Opt_force
, "force=%d"},
335 static ssize_t
iblock_set_configfs_dev_params(struct se_hba
*hba
,
336 struct se_subsystem_dev
*se_dev
,
337 const char *page
, ssize_t count
)
339 struct iblock_dev
*ib_dev
= se_dev
->se_dev_su_ptr
;
340 char *orig
, *ptr
, *arg_p
, *opts
;
341 substring_t args
[MAX_OPT_ARGS
];
344 opts
= kstrdup(page
, GFP_KERNEL
);
350 while ((ptr
= strsep(&opts
, ",\n")) != NULL
) {
354 token
= match_token(ptr
, tokens
, args
);
357 if (ib_dev
->ibd_bd
) {
358 pr_err("Unable to set udev_path= while"
359 " ib_dev->ibd_bd exists\n");
363 arg_p
= match_strdup(&args
[0]);
368 snprintf(ib_dev
->ibd_udev_path
, SE_UDEV_PATH_LEN
,
371 pr_debug("IBLOCK: Referencing UDEV path: %s\n",
372 ib_dev
->ibd_udev_path
);
373 ib_dev
->ibd_flags
|= IBDF_HAS_UDEV_PATH
;
384 return (!ret
) ? count
: ret
;
387 static ssize_t
iblock_check_configfs_dev_params(
389 struct se_subsystem_dev
*se_dev
)
391 struct iblock_dev
*ibd
= se_dev
->se_dev_su_ptr
;
393 if (!(ibd
->ibd_flags
& IBDF_HAS_UDEV_PATH
)) {
394 pr_err("Missing udev_path= parameters for IBLOCK\n");
401 static ssize_t
iblock_show_configfs_dev_params(
403 struct se_subsystem_dev
*se_dev
,
406 struct iblock_dev
*ibd
= se_dev
->se_dev_su_ptr
;
407 struct block_device
*bd
= ibd
->ibd_bd
;
408 char buf
[BDEVNAME_SIZE
];
412 bl
+= sprintf(b
+ bl
, "iBlock device: %s",
414 if (ibd
->ibd_flags
& IBDF_HAS_UDEV_PATH
) {
415 bl
+= sprintf(b
+ bl
, " UDEV PATH: %s\n",
418 bl
+= sprintf(b
+ bl
, "\n");
420 bl
+= sprintf(b
+ bl
, " ");
422 bl
+= sprintf(b
+ bl
, "Major: %d Minor: %d %s\n",
423 MAJOR(bd
->bd_dev
), MINOR(bd
->bd_dev
), (!bd
->bd_contains
) ?
424 "" : (bd
->bd_holder
== ibd
) ?
425 "CLAIMED: IBLOCK" : "CLAIMED: OS");
427 bl
+= sprintf(b
+ bl
, "Major: 0 Minor: 0\n");
433 static void iblock_complete_cmd(struct se_cmd
*cmd
)
435 struct iblock_req
*ibr
= cmd
->priv
;
438 if (!atomic_dec_and_test(&ibr
->pending
))
441 if (atomic_read(&ibr
->ib_bio_err_cnt
))
442 status
= SAM_STAT_CHECK_CONDITION
;
444 status
= SAM_STAT_GOOD
;
446 target_complete_cmd(cmd
, status
);
450 static void iblock_bio_destructor(struct bio
*bio
)
452 struct se_cmd
*cmd
= bio
->bi_private
;
453 struct iblock_dev
*ib_dev
= cmd
->se_dev
->dev_ptr
;
455 bio_free(bio
, ib_dev
->ibd_bio_set
);
459 iblock_get_bio(struct se_cmd
*cmd
, sector_t lba
, u32 sg_num
)
461 struct iblock_dev
*ib_dev
= cmd
->se_dev
->dev_ptr
;
465 * Only allocate as many vector entries as the bio code allows us to,
466 * we'll loop later on until we have handled the whole request.
468 if (sg_num
> BIO_MAX_PAGES
)
469 sg_num
= BIO_MAX_PAGES
;
471 bio
= bio_alloc_bioset(GFP_NOIO
, sg_num
, ib_dev
->ibd_bio_set
);
473 pr_err("Unable to allocate memory for bio\n");
477 bio
->bi_bdev
= ib_dev
->ibd_bd
;
478 bio
->bi_private
= cmd
;
479 bio
->bi_destructor
= iblock_bio_destructor
;
480 bio
->bi_end_io
= &iblock_bio_done
;
481 bio
->bi_sector
= lba
;
485 static void iblock_submit_bios(struct bio_list
*list
, int rw
)
487 struct blk_plug plug
;
490 blk_start_plug(&plug
);
491 while ((bio
= bio_list_pop(list
)))
493 blk_finish_plug(&plug
);
496 static int iblock_execute_cmd(struct se_cmd
*cmd
, struct scatterlist
*sgl
,
497 u32 sgl_nents
, enum dma_data_direction data_direction
)
499 struct se_device
*dev
= cmd
->se_dev
;
500 struct iblock_req
*ibr
;
502 struct bio_list list
;
503 struct scatterlist
*sg
;
504 u32 sg_num
= sgl_nents
;
510 if (data_direction
== DMA_TO_DEVICE
) {
512 * Force data to disk if we pretend to not have a volatile
513 * write cache, or the initiator set the Force Unit Access bit.
515 if (dev
->se_sub_dev
->se_dev_attrib
.emulate_write_cache
== 0 ||
516 (dev
->se_sub_dev
->se_dev_attrib
.emulate_fua_write
> 0 &&
517 (cmd
->se_cmd_flags
& SCF_FUA
)))
526 * Convert the blocksize advertised to the initiator to the 512 byte
527 * units unconditionally used by the Linux block layer.
529 if (dev
->se_sub_dev
->se_dev_attrib
.block_size
== 4096)
530 block_lba
= (cmd
->t_task_lba
<< 3);
531 else if (dev
->se_sub_dev
->se_dev_attrib
.block_size
== 2048)
532 block_lba
= (cmd
->t_task_lba
<< 2);
533 else if (dev
->se_sub_dev
->se_dev_attrib
.block_size
== 1024)
534 block_lba
= (cmd
->t_task_lba
<< 1);
535 else if (dev
->se_sub_dev
->se_dev_attrib
.block_size
== 512)
536 block_lba
= cmd
->t_task_lba
;
538 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
539 " %u\n", dev
->se_sub_dev
->se_dev_attrib
.block_size
);
540 cmd
->scsi_sense_reason
= TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
544 ibr
= kzalloc(sizeof(struct iblock_req
), GFP_KERNEL
);
549 bio
= iblock_get_bio(cmd
, block_lba
, sgl_nents
);
553 bio_list_init(&list
);
554 bio_list_add(&list
, bio
);
556 atomic_set(&ibr
->pending
, 2);
559 for_each_sg(sgl
, sg
, sgl_nents
, i
) {
561 * XXX: if the length the device accepts is shorter than the
562 * length of the S/G list entry this will cause and
563 * endless loop. Better hope no driver uses huge pages.
565 while (bio_add_page(bio
, sg_page(sg
), sg
->length
, sg
->offset
)
567 if (bio_cnt
>= IBLOCK_MAX_BIO_PER_TASK
) {
568 iblock_submit_bios(&list
, rw
);
572 bio
= iblock_get_bio(cmd
, block_lba
, sg_num
);
576 atomic_inc(&ibr
->pending
);
577 bio_list_add(&list
, bio
);
581 /* Always in 512 byte units for Linux/Block */
582 block_lba
+= sg
->length
>> IBLOCK_LBA_SHIFT
;
586 iblock_submit_bios(&list
, rw
);
587 iblock_complete_cmd(cmd
);
591 while ((bio
= bio_list_pop(&list
)))
595 cmd
->scsi_sense_reason
= TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE
;
600 static u32
iblock_get_device_rev(struct se_device
*dev
)
602 return SCSI_SPC_2
; /* Returns SPC-3 in Initiator Data */
605 static u32
iblock_get_device_type(struct se_device
*dev
)
610 static sector_t
iblock_get_blocks(struct se_device
*dev
)
612 struct iblock_dev
*ibd
= dev
->dev_ptr
;
613 struct block_device
*bd
= ibd
->ibd_bd
;
614 struct request_queue
*q
= bdev_get_queue(bd
);
616 return iblock_emulate_read_cap_with_block_size(dev
, bd
, q
);
619 static void iblock_bio_done(struct bio
*bio
, int err
)
621 struct se_cmd
*cmd
= bio
->bi_private
;
622 struct iblock_req
*ibr
= cmd
->priv
;
625 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
627 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
) && !err
)
631 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
632 " err: %d\n", bio
, err
);
634 * Bump the ib_bio_err_cnt and release bio.
636 atomic_inc(&ibr
->ib_bio_err_cnt
);
637 smp_mb__after_atomic_inc();
642 iblock_complete_cmd(cmd
);
645 static struct se_subsystem_api iblock_template
= {
647 .owner
= THIS_MODULE
,
648 .transport_type
= TRANSPORT_PLUGIN_VHBA_PDEV
,
649 .write_cache_emulated
= 1,
650 .fua_write_emulated
= 1,
651 .attach_hba
= iblock_attach_hba
,
652 .detach_hba
= iblock_detach_hba
,
653 .allocate_virtdevice
= iblock_allocate_virtdevice
,
654 .create_virtdevice
= iblock_create_virtdevice
,
655 .free_device
= iblock_free_device
,
656 .execute_cmd
= iblock_execute_cmd
,
657 .do_discard
= iblock_do_discard
,
658 .do_sync_cache
= iblock_emulate_sync_cache
,
659 .check_configfs_dev_params
= iblock_check_configfs_dev_params
,
660 .set_configfs_dev_params
= iblock_set_configfs_dev_params
,
661 .show_configfs_dev_params
= iblock_show_configfs_dev_params
,
662 .get_device_rev
= iblock_get_device_rev
,
663 .get_device_type
= iblock_get_device_type
,
664 .get_blocks
= iblock_get_blocks
,
667 static int __init
iblock_module_init(void)
669 return transport_subsystem_register(&iblock_template
);
672 static void iblock_module_exit(void)
674 transport_subsystem_release(&iblock_template
);
677 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
678 MODULE_AUTHOR("nab@Linux-iSCSI.org");
679 MODULE_LICENSE("GPL");
681 module_init(iblock_module_init
);
682 module_exit(iblock_module_exit
);