1 /*******************************************************************************
2 * Filename: target_core_iblock.c
4 * This file contains the Storage Engine <-> Linux BlockIO transport
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
12 * Nicholas A. Bellinger <nab@kernel.org>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 ******************************************************************************/
30 #include <linux/version.h>
31 #include <linux/string.h>
32 #include <linux/parser.h>
33 #include <linux/timer.h>
35 #include <linux/blkdev.h>
36 #include <linux/slab.h>
37 #include <linux/spinlock.h>
38 #include <linux/bio.h>
39 #include <linux/genhd.h>
40 #include <linux/file.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_host.h>
44 #include <target/target_core_base.h>
45 #include <target/target_core_device.h>
46 #include <target/target_core_transport.h>
48 #include "target_core_iblock.h"
51 #define DEBUG_IBLOCK(x...) printk(x)
53 #define DEBUG_IBLOCK(x...)
56 static struct se_subsystem_api iblock_template
;
58 static void iblock_bio_done(struct bio
*, int);
60 /* iblock_attach_hba(): (Part of se_subsystem_api_t template)
64 static int iblock_attach_hba(struct se_hba
*hba
, u32 host_id
)
66 struct iblock_hba
*ib_host
;
68 ib_host
= kzalloc(sizeof(struct iblock_hba
), GFP_KERNEL
);
70 printk(KERN_ERR
"Unable to allocate memory for"
71 " struct iblock_hba\n");
75 ib_host
->iblock_host_id
= host_id
;
77 atomic_set(&hba
->left_queue_depth
, IBLOCK_HBA_QUEUE_DEPTH
);
78 atomic_set(&hba
->max_queue_depth
, IBLOCK_HBA_QUEUE_DEPTH
);
79 hba
->hba_ptr
= (void *) ib_host
;
81 printk(KERN_INFO
"CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
82 " Generic Target Core Stack %s\n", hba
->hba_id
,
83 IBLOCK_VERSION
, TARGET_CORE_MOD_VERSION
);
85 printk(KERN_INFO
"CORE_HBA[%d] - Attached iBlock HBA: %u to Generic"
86 " Target Core TCQ Depth: %d\n", hba
->hba_id
,
87 ib_host
->iblock_host_id
, atomic_read(&hba
->max_queue_depth
));
92 static void iblock_detach_hba(struct se_hba
*hba
)
94 struct iblock_hba
*ib_host
= hba
->hba_ptr
;
96 printk(KERN_INFO
"CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
97 " Target Core\n", hba
->hba_id
, ib_host
->iblock_host_id
);
103 static void *iblock_allocate_virtdevice(struct se_hba
*hba
, const char *name
)
105 struct iblock_dev
*ib_dev
= NULL
;
106 struct iblock_hba
*ib_host
= hba
->hba_ptr
;
108 ib_dev
= kzalloc(sizeof(struct iblock_dev
), GFP_KERNEL
);
110 printk(KERN_ERR
"Unable to allocate struct iblock_dev\n");
113 ib_dev
->ibd_host
= ib_host
;
115 printk(KERN_INFO
"IBLOCK: Allocated ib_dev for %s\n", name
);
120 static struct se_device
*iblock_create_virtdevice(
122 struct se_subsystem_dev
*se_dev
,
125 struct iblock_dev
*ib_dev
= p
;
126 struct se_device
*dev
;
127 struct se_dev_limits dev_limits
;
128 struct block_device
*bd
= NULL
;
129 struct request_queue
*q
;
130 struct queue_limits
*limits
;
135 printk(KERN_ERR
"Unable to locate struct iblock_dev parameter\n");
138 memset(&dev_limits
, 0, sizeof(struct se_dev_limits
));
140 * These settings need to be made tunable..
142 ib_dev
->ibd_bio_set
= bioset_create(32, 64);
143 if (!(ib_dev
->ibd_bio_set
)) {
144 printk(KERN_ERR
"IBLOCK: Unable to create bioset()\n");
145 return ERR_PTR(-ENOMEM
);
147 printk(KERN_INFO
"IBLOCK: Created bio_set()\n");
149 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
150 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
152 printk(KERN_INFO
"IBLOCK: Claiming struct block_device: %s\n",
153 ib_dev
->ibd_udev_path
);
155 bd
= blkdev_get_by_path(ib_dev
->ibd_udev_path
,
156 FMODE_WRITE
|FMODE_READ
|FMODE_EXCL
, ib_dev
);
162 * Setup the local scope queue_limits from struct request_queue->limits
163 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
165 q
= bdev_get_queue(bd
);
166 limits
= &dev_limits
.limits
;
167 limits
->logical_block_size
= bdev_logical_block_size(bd
);
168 limits
->max_hw_sectors
= queue_max_hw_sectors(q
);
169 limits
->max_sectors
= queue_max_sectors(q
);
170 dev_limits
.hw_queue_depth
= IBLOCK_MAX_DEVICE_QUEUE_DEPTH
;
171 dev_limits
.queue_depth
= IBLOCK_DEVICE_QUEUE_DEPTH
;
173 ib_dev
->ibd_major
= MAJOR(bd
->bd_dev
);
174 ib_dev
->ibd_minor
= MINOR(bd
->bd_dev
);
177 dev
= transport_add_device_to_core_hba(hba
,
178 &iblock_template
, se_dev
, dev_flags
, (void *)ib_dev
,
179 &dev_limits
, "IBLOCK", IBLOCK_VERSION
);
183 ib_dev
->ibd_depth
= dev
->queue_depth
;
186 * Check if the underlying struct block_device request_queue supports
187 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
188 * in ATA and we need to set TPE=1
190 if (blk_queue_discard(q
)) {
191 DEV_ATTRIB(dev
)->max_unmap_lba_count
=
192 q
->limits
.max_discard_sectors
;
194 * Currently hardcoded to 1 in Linux/SCSI code..
196 DEV_ATTRIB(dev
)->max_unmap_block_desc_count
= 1;
197 DEV_ATTRIB(dev
)->unmap_granularity
=
198 q
->limits
.discard_granularity
;
199 DEV_ATTRIB(dev
)->unmap_granularity_alignment
=
200 q
->limits
.discard_alignment
;
202 printk(KERN_INFO
"IBLOCK: BLOCK Discard support available,"
203 " disabled by default\n");
209 if (ib_dev
->ibd_bio_set
) {
210 bioset_free(ib_dev
->ibd_bio_set
);
211 ib_dev
->ibd_bio_set
= NULL
;
213 ib_dev
->ibd_bd
= NULL
;
214 ib_dev
->ibd_major
= 0;
215 ib_dev
->ibd_minor
= 0;
219 static void iblock_free_device(void *p
)
221 struct iblock_dev
*ib_dev
= p
;
223 if (ib_dev
->ibd_bd
!= NULL
)
224 blkdev_put(ib_dev
->ibd_bd
, FMODE_WRITE
|FMODE_READ
|FMODE_EXCL
);
225 if (ib_dev
->ibd_bio_set
!= NULL
)
226 bioset_free(ib_dev
->ibd_bio_set
);
230 static inline struct iblock_req
*IBLOCK_REQ(struct se_task
*task
)
232 return container_of(task
, struct iblock_req
, ib_task
);
235 static struct se_task
*
236 iblock_alloc_task(struct se_cmd
*cmd
)
238 struct iblock_req
*ib_req
;
240 ib_req
= kzalloc(sizeof(struct iblock_req
), GFP_KERNEL
);
242 printk(KERN_ERR
"Unable to allocate memory for struct iblock_req\n");
246 ib_req
->ib_dev
= SE_DEV(cmd
)->dev_ptr
;
247 atomic_set(&ib_req
->ib_bio_cnt
, 0);
248 return &ib_req
->ib_task
;
251 static unsigned long long iblock_emulate_read_cap_with_block_size(
252 struct se_device
*dev
,
253 struct block_device
*bd
,
254 struct request_queue
*q
)
256 unsigned long long blocks_long
= (div_u64(i_size_read(bd
->bd_inode
),
257 bdev_logical_block_size(bd
)) - 1);
258 u32 block_size
= bdev_logical_block_size(bd
);
260 if (block_size
== DEV_ATTRIB(dev
)->block_size
)
263 switch (block_size
) {
265 switch (DEV_ATTRIB(dev
)->block_size
) {
279 switch (DEV_ATTRIB(dev
)->block_size
) {
294 switch (DEV_ATTRIB(dev
)->block_size
) {
309 switch (DEV_ATTRIB(dev
)->block_size
) {
331 * Emulate SYCHRONIZE_CACHE_*
333 static void iblock_emulate_sync_cache(struct se_task
*task
)
335 struct se_cmd
*cmd
= TASK_CMD(task
);
336 struct iblock_dev
*ib_dev
= cmd
->se_dev
->dev_ptr
;
337 int immed
= (T_TASK(cmd
)->t_task_cdb
[1] & 0x2);
338 sector_t error_sector
;
342 * If the Immediate bit is set, queue up the GOOD response
343 * for this SYNCHRONIZE_CACHE op
346 transport_complete_sync_cache(cmd
, 1);
349 * blkdev_issue_flush() does not support a specifying a range, so
350 * we have to flush the entire cache.
352 ret
= blkdev_issue_flush(ib_dev
->ibd_bd
, GFP_KERNEL
, &error_sector
);
354 printk(KERN_ERR
"IBLOCK: block_issue_flush() failed: %d "
355 " error_sector: %llu\n", ret
,
356 (unsigned long long)error_sector
);
360 transport_complete_sync_cache(cmd
, ret
== 0);
364 * Tell TCM Core that we are capable of WriteCache emulation for
365 * an underlying struct se_device.
367 static int iblock_emulated_write_cache(struct se_device
*dev
)
372 static int iblock_emulated_dpo(struct se_device
*dev
)
378 * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
381 static int iblock_emulated_fua_write(struct se_device
*dev
)
386 static int iblock_emulated_fua_read(struct se_device
*dev
)
391 static int iblock_do_task(struct se_task
*task
)
393 struct se_device
*dev
= task
->task_se_cmd
->se_dev
;
394 struct iblock_req
*req
= IBLOCK_REQ(task
);
395 struct bio
*bio
= req
->ib_bio
, *nbio
= NULL
;
396 struct blk_plug plug
;
399 if (task
->task_data_direction
== DMA_TO_DEVICE
) {
401 * Force data to disk if we pretend to not have a volatile
402 * write cache, or the initiator set the Force Unit Access bit.
404 if (DEV_ATTRIB(dev
)->emulate_write_cache
== 0 ||
405 (DEV_ATTRIB(dev
)->emulate_fua_write
> 0 &&
406 T_TASK(task
->task_se_cmd
)->t_tasks_fua
))
414 blk_start_plug(&plug
);
418 DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p"
419 " bio->bi_sector: %llu\n", task
, bio
, bio
->bi_sector
);
424 blk_finish_plug(&plug
);
426 return PYX_TRANSPORT_SENT_TO_TRANSPORT
;
429 static int iblock_do_discard(struct se_device
*dev
, sector_t lba
, u32 range
)
431 struct iblock_dev
*ibd
= dev
->dev_ptr
;
432 struct block_device
*bd
= ibd
->ibd_bd
;
435 return blkdev_issue_discard(bd
, lba
, range
, GFP_KERNEL
, barrier
);
438 static void iblock_free_task(struct se_task
*task
)
440 struct iblock_req
*req
= IBLOCK_REQ(task
);
441 struct bio
*bio
, *hbio
= req
->ib_bio
;
443 * We only release the bio(s) here if iblock_bio_done() has not called
444 * bio_put() -> iblock_bio_destructor().
446 while (hbio
!= NULL
) {
448 hbio
= hbio
->bi_next
;
457 Opt_udev_path
, Opt_force
, Opt_err
460 static match_table_t tokens
= {
461 {Opt_udev_path
, "udev_path=%s"},
462 {Opt_force
, "force=%d"},
466 static ssize_t
iblock_set_configfs_dev_params(struct se_hba
*hba
,
467 struct se_subsystem_dev
*se_dev
,
468 const char *page
, ssize_t count
)
470 struct iblock_dev
*ib_dev
= se_dev
->se_dev_su_ptr
;
471 char *orig
, *ptr
, *arg_p
, *opts
;
472 substring_t args
[MAX_OPT_ARGS
];
473 int ret
= 0, arg
, token
;
475 opts
= kstrdup(page
, GFP_KERNEL
);
481 while ((ptr
= strsep(&opts
, ",")) != NULL
) {
485 token
= match_token(ptr
, tokens
, args
);
488 if (ib_dev
->ibd_bd
) {
489 printk(KERN_ERR
"Unable to set udev_path= while"
490 " ib_dev->ibd_bd exists\n");
494 arg_p
= match_strdup(&args
[0]);
499 snprintf(ib_dev
->ibd_udev_path
, SE_UDEV_PATH_LEN
,
502 printk(KERN_INFO
"IBLOCK: Referencing UDEV path: %s\n",
503 ib_dev
->ibd_udev_path
);
504 ib_dev
->ibd_flags
|= IBDF_HAS_UDEV_PATH
;
507 match_int(args
, &arg
);
508 ib_dev
->ibd_force
= arg
;
509 printk(KERN_INFO
"IBLOCK: Set force=%d\n",
519 return (!ret
) ? count
: ret
;
522 static ssize_t
iblock_check_configfs_dev_params(
524 struct se_subsystem_dev
*se_dev
)
526 struct iblock_dev
*ibd
= se_dev
->se_dev_su_ptr
;
528 if (!(ibd
->ibd_flags
& IBDF_HAS_UDEV_PATH
)) {
529 printk(KERN_ERR
"Missing udev_path= parameters for IBLOCK\n");
536 static ssize_t
iblock_show_configfs_dev_params(
538 struct se_subsystem_dev
*se_dev
,
541 struct iblock_dev
*ibd
= se_dev
->se_dev_su_ptr
;
542 struct block_device
*bd
= ibd
->ibd_bd
;
543 char buf
[BDEVNAME_SIZE
];
547 bl
+= sprintf(b
+ bl
, "iBlock device: %s",
549 if (ibd
->ibd_flags
& IBDF_HAS_UDEV_PATH
) {
550 bl
+= sprintf(b
+ bl
, " UDEV PATH: %s\n",
553 bl
+= sprintf(b
+ bl
, "\n");
555 bl
+= sprintf(b
+ bl
, " ");
557 bl
+= sprintf(b
+ bl
, "Major: %d Minor: %d %s\n",
558 ibd
->ibd_major
, ibd
->ibd_minor
, (!bd
->bd_contains
) ?
559 "" : (bd
->bd_holder
== (struct iblock_dev
*)ibd
) ?
560 "CLAIMED: IBLOCK" : "CLAIMED: OS");
562 bl
+= sprintf(b
+ bl
, "Major: %d Minor: %d\n",
563 ibd
->ibd_major
, ibd
->ibd_minor
);
569 static void iblock_bio_destructor(struct bio
*bio
)
571 struct se_task
*task
= bio
->bi_private
;
572 struct iblock_dev
*ib_dev
= task
->se_dev
->dev_ptr
;
574 bio_free(bio
, ib_dev
->ibd_bio_set
);
577 static struct bio
*iblock_get_bio(
578 struct se_task
*task
,
579 struct iblock_req
*ib_req
,
580 struct iblock_dev
*ib_dev
,
587 bio
= bio_alloc_bioset(GFP_NOIO
, sg_num
, ib_dev
->ibd_bio_set
);
589 printk(KERN_ERR
"Unable to allocate memory for bio\n");
590 *ret
= PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES
;
594 DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:"
595 " %p\n", bio
, task
->task_sg_num
, ib_dev
->ibd_bio_set
);
596 DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio
, task
->task_size
);
598 bio
->bi_bdev
= ib_dev
->ibd_bd
;
599 bio
->bi_private
= (void *) task
;
600 bio
->bi_destructor
= iblock_bio_destructor
;
601 bio
->bi_end_io
= &iblock_bio_done
;
602 bio
->bi_sector
= lba
;
603 atomic_inc(&ib_req
->ib_bio_cnt
);
605 DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio
->bi_sector
);
606 DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n",
607 atomic_read(&ib_req
->ib_bio_cnt
));
611 static int iblock_map_task_SG(struct se_task
*task
)
613 struct se_cmd
*cmd
= task
->task_se_cmd
;
614 struct se_device
*dev
= SE_DEV(cmd
);
615 struct iblock_dev
*ib_dev
= task
->se_dev
->dev_ptr
;
616 struct iblock_req
*ib_req
= IBLOCK_REQ(task
);
617 struct bio
*bio
= NULL
, *hbio
= NULL
, *tbio
= NULL
;
618 struct scatterlist
*sg
;
620 u32 i
, sg_num
= task
->task_sg_num
;
623 * Do starting conversion up from non 512-byte blocksize with
624 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
626 if (DEV_ATTRIB(dev
)->block_size
== 4096)
627 block_lba
= (task
->task_lba
<< 3);
628 else if (DEV_ATTRIB(dev
)->block_size
== 2048)
629 block_lba
= (task
->task_lba
<< 2);
630 else if (DEV_ATTRIB(dev
)->block_size
== 1024)
631 block_lba
= (task
->task_lba
<< 1);
632 else if (DEV_ATTRIB(dev
)->block_size
== 512)
633 block_lba
= task
->task_lba
;
635 printk(KERN_ERR
"Unsupported SCSI -> BLOCK LBA conversion:"
636 " %u\n", DEV_ATTRIB(dev
)->block_size
);
637 return PYX_TRANSPORT_LU_COMM_FAILURE
;
640 bio
= iblock_get_bio(task
, ib_req
, ib_dev
, &ret
, block_lba
, sg_num
);
644 ib_req
->ib_bio
= bio
;
647 * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
648 * from TCM struct se_mem -> task->task_sg -> struct scatterlist memory.
650 for_each_sg(task
->task_sg
, sg
, task
->task_sg_num
, i
) {
651 DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:"
652 " %p len: %u offset: %u\n", task
, bio
, sg_page(sg
),
653 sg
->length
, sg
->offset
);
655 ret
= bio_add_page(bio
, sg_page(sg
), sg
->length
, sg
->offset
);
656 if (ret
!= sg
->length
) {
658 DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n",
660 DEBUG_IBLOCK("** task->task_size: %u\n",
662 DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n",
664 DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n",
667 bio
= iblock_get_bio(task
, ib_req
, ib_dev
, &ret
,
672 tbio
= tbio
->bi_next
= bio
;
673 DEBUG_IBLOCK("-----------------> Added +1 bio: %p to"
674 " list, Going to again\n", bio
);
677 /* Always in 512 byte units for Linux/Block */
678 block_lba
+= sg
->length
>> IBLOCK_LBA_SHIFT
;
680 DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented"
681 " sg_num to %u\n", task
, sg_num
);
682 DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba"
683 " to %llu\n", task
, block_lba
);
684 DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:"
685 " %u\n", task
, bio
->bi_vcnt
);
692 hbio
= hbio
->bi_next
;
699 static unsigned char *iblock_get_cdb(struct se_task
*task
)
701 return IBLOCK_REQ(task
)->ib_scsi_cdb
;
704 static u32
iblock_get_device_rev(struct se_device
*dev
)
706 return SCSI_SPC_2
; /* Returns SPC-3 in Initiator Data */
709 static u32
iblock_get_device_type(struct se_device
*dev
)
714 static sector_t
iblock_get_blocks(struct se_device
*dev
)
716 struct iblock_dev
*ibd
= dev
->dev_ptr
;
717 struct block_device
*bd
= ibd
->ibd_bd
;
718 struct request_queue
*q
= bdev_get_queue(bd
);
720 return iblock_emulate_read_cap_with_block_size(dev
, bd
, q
);
723 static void iblock_bio_done(struct bio
*bio
, int err
)
725 struct se_task
*task
= bio
->bi_private
;
726 struct iblock_req
*ibr
= IBLOCK_REQ(task
);
728 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
730 if (!(test_bit(BIO_UPTODATE
, &bio
->bi_flags
)) && !(err
))
734 printk(KERN_ERR
"test_bit(BIO_UPTODATE) failed for bio: %p,"
735 " err: %d\n", bio
, err
);
737 * Bump the ib_bio_err_cnt and release bio.
739 atomic_inc(&ibr
->ib_bio_err_cnt
);
740 smp_mb__after_atomic_inc();
743 * Wait to complete the task until the last bio as completed.
745 if (!(atomic_dec_and_test(&ibr
->ib_bio_cnt
)))
749 transport_complete_task(task
, 0);
752 DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
753 task
, bio
, task
->task_lba
, bio
->bi_sector
, err
);
755 * bio_put() will call iblock_bio_destructor() to release the bio back
756 * to ibr->ib_bio_set.
760 * Wait to complete the task until the last bio as completed.
762 if (!(atomic_dec_and_test(&ibr
->ib_bio_cnt
)))
765 * Return GOOD status for task if zero ib_bio_err_cnt exists.
768 transport_complete_task(task
, (!atomic_read(&ibr
->ib_bio_err_cnt
)));
771 static struct se_subsystem_api iblock_template
= {
773 .owner
= THIS_MODULE
,
774 .transport_type
= TRANSPORT_PLUGIN_VHBA_PDEV
,
775 .map_task_SG
= iblock_map_task_SG
,
776 .attach_hba
= iblock_attach_hba
,
777 .detach_hba
= iblock_detach_hba
,
778 .allocate_virtdevice
= iblock_allocate_virtdevice
,
779 .create_virtdevice
= iblock_create_virtdevice
,
780 .free_device
= iblock_free_device
,
781 .dpo_emulated
= iblock_emulated_dpo
,
782 .fua_write_emulated
= iblock_emulated_fua_write
,
783 .fua_read_emulated
= iblock_emulated_fua_read
,
784 .write_cache_emulated
= iblock_emulated_write_cache
,
785 .alloc_task
= iblock_alloc_task
,
786 .do_task
= iblock_do_task
,
787 .do_discard
= iblock_do_discard
,
788 .do_sync_cache
= iblock_emulate_sync_cache
,
789 .free_task
= iblock_free_task
,
790 .check_configfs_dev_params
= iblock_check_configfs_dev_params
,
791 .set_configfs_dev_params
= iblock_set_configfs_dev_params
,
792 .show_configfs_dev_params
= iblock_show_configfs_dev_params
,
793 .get_cdb
= iblock_get_cdb
,
794 .get_device_rev
= iblock_get_device_rev
,
795 .get_device_type
= iblock_get_device_type
,
796 .get_blocks
= iblock_get_blocks
,
799 static int __init
iblock_module_init(void)
801 return transport_subsystem_register(&iblock_template
);
804 static void iblock_module_exit(void)
806 transport_subsystem_release(&iblock_template
);
809 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
810 MODULE_AUTHOR("nab@Linux-iSCSI.org");
811 MODULE_LICENSE("GPL");
813 module_init(iblock_module_init
);
814 module_exit(iblock_module_exit
);