2 * bsg.c - block layer implementation of the sg v3 interface
4 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
5 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License version 2. See the file "COPYING" in the main directory of this
9 * archive for more details.
14 * - Should this get merged, block/scsi_ioctl.c will be migrated into
15 * this file. To keep maintenance down, it's easier to have them
16 * seperated right now.
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/file.h>
22 #include <linux/blkdev.h>
23 #include <linux/poll.h>
24 #include <linux/cdev.h>
25 #include <linux/percpu.h>
26 #include <linux/uio.h>
27 #include <linux/bsg.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_ioctl.h>
31 #include <scsi/scsi_cmnd.h>
34 static char bsg_version
[] = "block layer sg (bsg) 0.4";
37 request_queue_t
*queue
;
39 struct list_head busy_list
;
40 struct list_head done_list
;
41 struct hlist_node dev_list
;
46 wait_queue_head_t wq_done
;
47 wait_queue_head_t wq_free
;
48 char name
[BUS_ID_SIZE
];
58 #define BSG_DEFAULT_CMDS 64
63 #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args)
65 #define dprintk(fmt, args...)
68 #define list_entry_bc(entry) list_entry((entry), struct bsg_command, list)
73 #define BSG_MAJOR (240)
75 static DEFINE_MUTEX(bsg_mutex
);
76 static int bsg_device_nr
;
78 #define BSG_LIST_SIZE (8)
79 #define bsg_list_idx(minor) ((minor) & (BSG_LIST_SIZE - 1))
80 static struct hlist_head bsg_device_list
[BSG_LIST_SIZE
];
82 static struct class *bsg_class
;
83 static LIST_HEAD(bsg_class_list
);
85 static struct kmem_cache
*bsg_cmd_cachep
;
88 * our internal command type
91 struct bsg_device
*bd
;
92 struct list_head list
;
97 struct sg_io_v4 __user
*uhdr
;
98 char sense
[SCSI_SENSE_BUFFERSIZE
];
101 static void bsg_free_command(struct bsg_command
*bc
)
103 struct bsg_device
*bd
= bc
->bd
;
106 kmem_cache_free(bsg_cmd_cachep
, bc
);
108 spin_lock_irqsave(&bd
->lock
, flags
);
110 spin_unlock_irqrestore(&bd
->lock
, flags
);
112 wake_up(&bd
->wq_free
);
115 static struct bsg_command
*__bsg_alloc_command(struct bsg_device
*bd
)
117 struct bsg_command
*bc
= NULL
;
119 spin_lock_irq(&bd
->lock
);
121 if (bd
->queued_cmds
>= bd
->max_queue
)
125 spin_unlock_irq(&bd
->lock
);
127 bc
= kmem_cache_alloc(bsg_cmd_cachep
, GFP_USER
);
129 spin_lock_irq(&bd
->lock
);
134 memset(bc
, 0, sizeof(*bc
));
136 INIT_LIST_HEAD(&bc
->list
);
137 dprintk("%s: returning free cmd %p\n", bd
->name
, bc
);
140 spin_unlock_irq(&bd
->lock
);
145 bsg_del_done_cmd(struct bsg_device
*bd
, struct bsg_command
*bc
)
152 bsg_add_done_cmd(struct bsg_device
*bd
, struct bsg_command
*bc
)
155 list_add_tail(&bc
->list
, &bd
->done_list
);
156 wake_up(&bd
->wq_done
);
159 static inline int bsg_io_schedule(struct bsg_device
*bd
, int state
)
164 spin_lock_irq(&bd
->lock
);
166 BUG_ON(bd
->done_cmds
> bd
->queued_cmds
);
169 * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no
170 * work to do", even though we return -ENOSPC after this same test
171 * during bsg_write() -- there, it means our buffer can't have more
172 * bsg_commands added to it, thus has no space left.
174 if (bd
->done_cmds
== bd
->queued_cmds
) {
179 if (!test_bit(BSG_F_BLOCK
, &bd
->flags
)) {
184 prepare_to_wait(&bd
->wq_done
, &wait
, state
);
185 spin_unlock_irq(&bd
->lock
);
187 finish_wait(&bd
->wq_done
, &wait
);
189 if ((state
== TASK_INTERRUPTIBLE
) && signal_pending(current
))
194 spin_unlock_irq(&bd
->lock
);
199 * get a new free command, blocking if needed and specified
201 static struct bsg_command
*bsg_get_command(struct bsg_device
*bd
)
203 struct bsg_command
*bc
;
207 bc
= __bsg_alloc_command(bd
);
211 ret
= bsg_io_schedule(bd
, TASK_INTERRUPTIBLE
);
222 static int blk_fill_sgv4_hdr_rq(request_queue_t
*q
, struct request
*rq
,
223 struct sg_io_v4
*hdr
, int has_write_perm
)
225 memset(rq
->cmd
, 0, BLK_MAX_CDB
); /* ATAPI hates garbage after CDB */
227 if (copy_from_user(rq
->cmd
, (void *)(unsigned long)hdr
->request
,
230 if (blk_verify_command(rq
->cmd
, has_write_perm
))
234 * fill in request structure
236 rq
->cmd_len
= hdr
->request_len
;
237 rq
->cmd_type
= REQ_TYPE_BLOCK_PC
;
239 rq
->timeout
= (hdr
->timeout
* HZ
) / 1000;
241 rq
->timeout
= q
->sg_timeout
;
243 rq
->timeout
= BLK_DEFAULT_SG_TIMEOUT
;
249 * Check if sg_io_v4 from user is allowed and valid
252 bsg_validate_sgv4_hdr(request_queue_t
*q
, struct sg_io_v4
*hdr
, int *rw
)
254 if (hdr
->guard
!= 'Q')
256 if (hdr
->request_len
> BLK_MAX_CDB
)
258 if (hdr
->dout_xfer_len
> (q
->max_sectors
<< 9) ||
259 hdr
->din_xfer_len
> (q
->max_sectors
<< 9))
262 /* not supported currently */
263 if (hdr
->protocol
|| hdr
->subprotocol
)
267 * looks sane, if no data then it should be fine from our POV
269 if (!hdr
->dout_xfer_len
&& !hdr
->din_xfer_len
)
272 /* not supported currently */
273 if (hdr
->dout_xfer_len
&& hdr
->din_xfer_len
)
276 *rw
= hdr
->dout_xfer_len
? WRITE
: READ
;
282 * map sg_io_v4 to a request.
284 static struct request
*
285 bsg_map_hdr(struct bsg_device
*bd
, struct sg_io_v4
*hdr
)
287 request_queue_t
*q
= bd
->queue
;
289 int ret
, rw
= 0; /* shut up gcc */
290 unsigned int dxfer_len
;
293 dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr
->dout_xferp
,
294 hdr
->dout_xfer_len
, (unsigned long long) hdr
->din_xferp
,
297 ret
= bsg_validate_sgv4_hdr(q
, hdr
, &rw
);
302 * map scatter-gather elements seperately and string them to request
304 rq
= blk_get_request(q
, rw
, GFP_KERNEL
);
305 ret
= blk_fill_sgv4_hdr_rq(q
, rq
, hdr
, test_bit(BSG_F_WRITE_PERM
,
312 if (hdr
->dout_xfer_len
) {
313 dxfer_len
= hdr
->dout_xfer_len
;
314 dxferp
= (void*)(unsigned long)hdr
->dout_xferp
;
315 } else if (hdr
->din_xfer_len
) {
316 dxfer_len
= hdr
->din_xfer_len
;
317 dxferp
= (void*)(unsigned long)hdr
->din_xferp
;
322 ret
= blk_rq_map_user(q
, rq
, dxferp
, dxfer_len
);
324 dprintk("failed map at %d\n", ret
);
334 * async completion call-back from the block layer, when scsi/ide/whatever
335 * calls end_that_request_last() on a request
337 static void bsg_rq_end_io(struct request
*rq
, int uptodate
)
339 struct bsg_command
*bc
= rq
->end_io_data
;
340 struct bsg_device
*bd
= bc
->bd
;
343 dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
344 bd
->name
, rq
, bc
, bc
->bio
, uptodate
);
346 bc
->hdr
.duration
= jiffies_to_msecs(jiffies
- bc
->hdr
.duration
);
348 spin_lock_irqsave(&bd
->lock
, flags
);
350 bsg_add_done_cmd(bd
, bc
);
351 spin_unlock_irqrestore(&bd
->lock
, flags
);
355 * do final setup of a 'bc' and submit the matching 'rq' to the block
358 static void bsg_add_command(struct bsg_device
*bd
, request_queue_t
*q
,
359 struct bsg_command
*bc
, struct request
*rq
)
361 rq
->sense
= bc
->sense
;
365 * add bc command to busy queue and submit rq for io
369 bc
->hdr
.duration
= jiffies
;
370 spin_lock_irq(&bd
->lock
);
371 list_add_tail(&bc
->list
, &bd
->busy_list
);
372 spin_unlock_irq(&bd
->lock
);
374 dprintk("%s: queueing rq %p, bc %p\n", bd
->name
, rq
, bc
);
376 rq
->end_io_data
= bc
;
377 blk_execute_rq_nowait(q
, NULL
, rq
, 1, bsg_rq_end_io
);
380 static inline struct bsg_command
*bsg_next_done_cmd(struct bsg_device
*bd
)
382 struct bsg_command
*bc
= NULL
;
384 spin_lock_irq(&bd
->lock
);
386 bc
= list_entry_bc(bd
->done_list
.next
);
387 bsg_del_done_cmd(bd
, bc
);
389 spin_unlock_irq(&bd
->lock
);
395 * Get a finished command from the done list
397 static struct bsg_command
*__bsg_get_done_cmd(struct bsg_device
*bd
, int state
)
399 struct bsg_command
*bc
;
403 bc
= bsg_next_done_cmd(bd
);
407 ret
= bsg_io_schedule(bd
, state
);
414 dprintk("%s: returning done %p\n", bd
->name
, bc
);
419 static struct bsg_command
*
420 bsg_get_done_cmd(struct bsg_device
*bd
, const struct iovec
*iov
)
422 return __bsg_get_done_cmd(bd
, TASK_INTERRUPTIBLE
);
425 static struct bsg_command
*
426 bsg_get_done_cmd_nosignals(struct bsg_device
*bd
)
428 return __bsg_get_done_cmd(bd
, TASK_UNINTERRUPTIBLE
);
431 static int blk_complete_sgv4_hdr_rq(struct request
*rq
, struct sg_io_v4
*hdr
,
436 dprintk("rq %p bio %p %u\n", rq
, bio
, rq
->errors
);
438 * fill in all the output members
440 hdr
->device_status
= status_byte(rq
->errors
);
441 hdr
->transport_status
= host_byte(rq
->errors
);
442 hdr
->driver_status
= driver_byte(rq
->errors
);
444 if (hdr
->device_status
|| hdr
->transport_status
|| hdr
->driver_status
)
445 hdr
->info
|= SG_INFO_CHECK
;
446 hdr
->din_resid
= rq
->data_len
;
447 hdr
->response_len
= 0;
449 if (rq
->sense_len
&& hdr
->response
) {
450 int len
= min((unsigned int) hdr
->max_response_len
,
453 ret
= copy_to_user((void*)(unsigned long)hdr
->response
,
456 hdr
->response_len
= len
;
461 blk_rq_unmap_user(bio
);
467 static int bsg_complete_all_commands(struct bsg_device
*bd
)
469 struct bsg_command
*bc
;
472 dprintk("%s: entered\n", bd
->name
);
474 set_bit(BSG_F_BLOCK
, &bd
->flags
);
477 * wait for all commands to complete
481 ret
= bsg_io_schedule(bd
, TASK_UNINTERRUPTIBLE
);
483 * look for -ENODATA specifically -- we'll sometimes get
484 * -ERESTARTSYS when we've taken a signal, but we can't
485 * return until we're done freeing the queue, so ignore
486 * it. The signal will get handled when we're done freeing
489 } while (ret
!= -ENODATA
);
492 * discard done commands
496 bc
= bsg_get_done_cmd_nosignals(bd
);
499 * we _must_ complete before restarting, because
500 * bsg_release can't handle this failing.
502 if (PTR_ERR(bc
) == -ERESTARTSYS
)
509 tret
= blk_complete_sgv4_hdr_rq(bc
->rq
, &bc
->hdr
, bc
->bio
);
513 bsg_free_command(bc
);
519 typedef struct bsg_command
*(*bsg_command_callback
)(struct bsg_device
*bd
, const struct iovec
*iov
);
522 __bsg_read(char __user
*buf
, size_t count
, bsg_command_callback get_bc
,
523 struct bsg_device
*bd
, const struct iovec
*iov
, ssize_t
*bytes_read
)
525 struct bsg_command
*bc
;
526 int nr_commands
, ret
;
528 if (count
% sizeof(struct sg_io_v4
))
532 nr_commands
= count
/ sizeof(struct sg_io_v4
);
533 while (nr_commands
) {
534 bc
= get_bc(bd
, iov
);
541 * this is the only case where we need to copy data back
542 * after completing the request. so do that here,
543 * bsg_complete_work() cannot do that for us
545 ret
= blk_complete_sgv4_hdr_rq(bc
->rq
, &bc
->hdr
, bc
->bio
);
547 if (copy_to_user(buf
, (char *) &bc
->hdr
, sizeof(bc
->hdr
)))
550 bsg_free_command(bc
);
555 buf
+= sizeof(struct sg_io_v4
);
556 *bytes_read
+= sizeof(struct sg_io_v4
);
563 static inline void bsg_set_block(struct bsg_device
*bd
, struct file
*file
)
565 if (file
->f_flags
& O_NONBLOCK
)
566 clear_bit(BSG_F_BLOCK
, &bd
->flags
);
568 set_bit(BSG_F_BLOCK
, &bd
->flags
);
571 static inline void bsg_set_write_perm(struct bsg_device
*bd
, struct file
*file
)
573 if (file
->f_mode
& FMODE_WRITE
)
574 set_bit(BSG_F_WRITE_PERM
, &bd
->flags
);
576 clear_bit(BSG_F_WRITE_PERM
, &bd
->flags
);
579 static inline int err_block_err(int ret
)
581 if (ret
&& ret
!= -ENOSPC
&& ret
!= -ENODATA
&& ret
!= -EAGAIN
)
588 bsg_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
590 struct bsg_device
*bd
= file
->private_data
;
594 dprintk("%s: read %Zd bytes\n", bd
->name
, count
);
596 bsg_set_block(bd
, file
);
598 ret
= __bsg_read(buf
, count
, bsg_get_done_cmd
,
599 bd
, NULL
, &bytes_read
);
602 if (!bytes_read
|| (bytes_read
&& err_block_err(ret
)))
608 static ssize_t
__bsg_write(struct bsg_device
*bd
, const char __user
*buf
,
609 size_t count
, ssize_t
*bytes_read
)
611 struct bsg_command
*bc
;
613 int ret
, nr_commands
;
615 if (count
% sizeof(struct sg_io_v4
))
618 nr_commands
= count
/ sizeof(struct sg_io_v4
);
622 while (nr_commands
) {
623 request_queue_t
*q
= bd
->queue
;
625 bc
= bsg_get_command(bd
);
634 bc
->uhdr
= (struct sg_io_v4 __user
*) buf
;
635 if (copy_from_user(&bc
->hdr
, buf
, sizeof(bc
->hdr
))) {
641 * get a request, fill in the blanks, and add to request queue
643 rq
= bsg_map_hdr(bd
, &bc
->hdr
);
650 bsg_add_command(bd
, q
, bc
, rq
);
654 buf
+= sizeof(struct sg_io_v4
);
655 *bytes_read
+= sizeof(struct sg_io_v4
);
659 bsg_free_command(bc
);
665 bsg_write(struct file
*file
, const char __user
*buf
, size_t count
, loff_t
*ppos
)
667 struct bsg_device
*bd
= file
->private_data
;
671 dprintk("%s: write %Zd bytes\n", bd
->name
, count
);
673 bsg_set_block(bd
, file
);
674 bsg_set_write_perm(bd
, file
);
677 ret
= __bsg_write(bd
, buf
, count
, &bytes_read
);
681 * return bytes written on non-fatal errors
683 if (!bytes_read
|| (bytes_read
&& err_block_err(ret
)))
686 dprintk("%s: returning %Zd\n", bd
->name
, bytes_read
);
690 static struct bsg_device
*bsg_alloc_device(void)
692 struct bsg_device
*bd
;
694 bd
= kzalloc(sizeof(struct bsg_device
), GFP_KERNEL
);
698 spin_lock_init(&bd
->lock
);
700 bd
->max_queue
= BSG_DEFAULT_CMDS
;
702 INIT_LIST_HEAD(&bd
->busy_list
);
703 INIT_LIST_HEAD(&bd
->done_list
);
704 INIT_HLIST_NODE(&bd
->dev_list
);
706 init_waitqueue_head(&bd
->wq_free
);
707 init_waitqueue_head(&bd
->wq_done
);
711 static int bsg_put_device(struct bsg_device
*bd
)
715 mutex_lock(&bsg_mutex
);
717 if (!atomic_dec_and_test(&bd
->ref_count
))
720 dprintk("%s: tearing down\n", bd
->name
);
723 * close can always block
725 set_bit(BSG_F_BLOCK
, &bd
->flags
);
728 * correct error detection baddies here again. it's the responsibility
729 * of the app to properly reap commands before close() if it wants
730 * fool-proof error detection
732 ret
= bsg_complete_all_commands(bd
);
734 blk_put_queue(bd
->queue
);
735 hlist_del(&bd
->dev_list
);
738 mutex_unlock(&bsg_mutex
);
742 static struct bsg_device
*bsg_add_device(struct inode
*inode
,
743 struct request_queue
*rq
,
746 struct bsg_device
*bd
= NULL
;
748 unsigned char buf
[32];
751 bd
= bsg_alloc_device();
753 return ERR_PTR(-ENOMEM
);
756 kobject_get(&rq
->kobj
);
757 bsg_set_block(bd
, file
);
759 atomic_set(&bd
->ref_count
, 1);
760 bd
->minor
= iminor(inode
);
761 mutex_lock(&bsg_mutex
);
762 hlist_add_head(&bd
->dev_list
, &bsg_device_list
[bsg_list_idx(bd
->minor
)]);
764 strncpy(bd
->name
, rq
->bsg_dev
.class_dev
->class_id
, sizeof(bd
->name
) - 1);
765 dprintk("bound to <%s>, max queue %d\n",
766 format_dev_t(buf
, inode
->i_rdev
), bd
->max_queue
);
768 mutex_unlock(&bsg_mutex
);
772 static struct bsg_device
*__bsg_get_device(int minor
)
774 struct hlist_head
*list
= &bsg_device_list
[bsg_list_idx(minor
)];
775 struct bsg_device
*bd
= NULL
;
776 struct hlist_node
*entry
;
778 mutex_lock(&bsg_mutex
);
780 hlist_for_each(entry
, list
) {
781 bd
= hlist_entry(entry
, struct bsg_device
, dev_list
);
782 if (bd
->minor
== minor
) {
783 atomic_inc(&bd
->ref_count
);
790 mutex_unlock(&bsg_mutex
);
794 static struct bsg_device
*bsg_get_device(struct inode
*inode
, struct file
*file
)
796 struct bsg_device
*bd
= __bsg_get_device(iminor(inode
));
797 struct bsg_class_device
*bcd
, *__bcd
;
803 * find the class device
806 mutex_lock(&bsg_mutex
);
807 list_for_each_entry(__bcd
, &bsg_class_list
, list
) {
808 if (__bcd
->minor
== iminor(inode
)) {
813 mutex_unlock(&bsg_mutex
);
816 return ERR_PTR(-ENODEV
);
818 return bsg_add_device(inode
, bcd
->queue
, file
);
821 static int bsg_open(struct inode
*inode
, struct file
*file
)
823 struct bsg_device
*bd
= bsg_get_device(inode
, file
);
828 file
->private_data
= bd
;
832 static int bsg_release(struct inode
*inode
, struct file
*file
)
834 struct bsg_device
*bd
= file
->private_data
;
836 file
->private_data
= NULL
;
837 return bsg_put_device(bd
);
840 static unsigned int bsg_poll(struct file
*file
, poll_table
*wait
)
842 struct bsg_device
*bd
= file
->private_data
;
843 unsigned int mask
= 0;
845 poll_wait(file
, &bd
->wq_done
, wait
);
846 poll_wait(file
, &bd
->wq_free
, wait
);
848 spin_lock_irq(&bd
->lock
);
849 if (!list_empty(&bd
->done_list
))
850 mask
|= POLLIN
| POLLRDNORM
;
851 if (bd
->queued_cmds
>= bd
->max_queue
)
853 spin_unlock_irq(&bd
->lock
);
859 bsg_ioctl(struct inode
*inode
, struct file
*file
, unsigned int cmd
,
862 struct bsg_device
*bd
= file
->private_data
;
863 int __user
*uarg
= (int __user
*) arg
;
872 case SG_GET_COMMAND_Q
:
873 return put_user(bd
->max_queue
, uarg
);
874 case SG_SET_COMMAND_Q
: {
877 if (get_user(queue
, uarg
))
882 spin_lock_irq(&bd
->lock
);
883 bd
->max_queue
= queue
;
884 spin_unlock_irq(&bd
->lock
);
891 case SG_GET_VERSION_NUM
:
892 case SCSI_IOCTL_GET_IDLUN
:
893 case SCSI_IOCTL_GET_BUS_NUMBER
:
896 case SG_GET_RESERVED_SIZE
:
897 case SG_SET_RESERVED_SIZE
:
898 case SG_EMULATED_HOST
:
899 case SCSI_IOCTL_SEND_COMMAND
: {
900 void __user
*uarg
= (void __user
*) arg
;
901 return scsi_cmd_ioctl(file
, bd
->queue
, NULL
, cmd
, uarg
);
908 if (copy_from_user(&hdr
, uarg
, sizeof(hdr
)))
911 rq
= bsg_map_hdr(bd
, &hdr
);
916 blk_execute_rq(bd
->queue
, NULL
, rq
, 0);
917 blk_complete_sgv4_hdr_rq(rq
, &hdr
, bio
);
919 if (copy_to_user(uarg
, &hdr
, sizeof(hdr
)))
925 * block device ioctls
929 return ioctl_by_bdev(bd
->bdev
, cmd
, arg
);
936 static struct file_operations bsg_fops
= {
941 .release
= bsg_release
,
943 .owner
= THIS_MODULE
,
946 void bsg_unregister_queue(struct request_queue
*q
)
948 struct bsg_class_device
*bcd
= &q
->bsg_dev
;
953 mutex_lock(&bsg_mutex
);
954 sysfs_remove_link(&q
->kobj
, "bsg");
955 class_device_destroy(bsg_class
, MKDEV(BSG_MAJOR
, bcd
->minor
));
956 bcd
->class_dev
= NULL
;
957 list_del_init(&bcd
->list
);
958 mutex_unlock(&bsg_mutex
);
961 int bsg_register_queue(struct request_queue
*q
, char *name
)
963 struct bsg_class_device
*bcd
;
967 * we need a proper transport to send commands, not a stacked device
973 memset(bcd
, 0, sizeof(*bcd
));
974 INIT_LIST_HEAD(&bcd
->list
);
976 mutex_lock(&bsg_mutex
);
977 dev
= MKDEV(BSG_MAJOR
, bsg_device_nr
);
978 bcd
->minor
= bsg_device_nr
;
981 bcd
->class_dev
= class_device_create(bsg_class
, NULL
, dev
, bcd
->dev
, "%s", name
);
984 list_add_tail(&bcd
->list
, &bsg_class_list
);
985 if (sysfs_create_link(&q
->kobj
, &bcd
->class_dev
->kobj
, "bsg"))
987 mutex_unlock(&bsg_mutex
);
992 class_device_destroy(bsg_class
, MKDEV(BSG_MAJOR
, bcd
->minor
));
993 mutex_unlock(&bsg_mutex
);
997 static int __init
bsg_init(void)
1001 bsg_cmd_cachep
= kmem_cache_create("bsg_cmd",
1002 sizeof(struct bsg_command
), 0, 0, NULL
, NULL
);
1003 if (!bsg_cmd_cachep
) {
1004 printk(KERN_ERR
"bsg: failed creating slab cache\n");
1008 for (i
= 0; i
< BSG_LIST_SIZE
; i
++)
1009 INIT_HLIST_HEAD(&bsg_device_list
[i
]);
1011 bsg_class
= class_create(THIS_MODULE
, "bsg");
1012 if (IS_ERR(bsg_class
)) {
1013 kmem_cache_destroy(bsg_cmd_cachep
);
1014 return PTR_ERR(bsg_class
);
1017 ret
= register_chrdev(BSG_MAJOR
, "bsg", &bsg_fops
);
1019 kmem_cache_destroy(bsg_cmd_cachep
);
1020 class_destroy(bsg_class
);
1024 printk(KERN_INFO
"%s loaded\n", bsg_version
);
1028 MODULE_AUTHOR("Jens Axboe");
1029 MODULE_DESCRIPTION("Block layer SGSI generic (sg) driver");
1030 MODULE_LICENSE("GPL");
1032 subsys_initcall(bsg_init
);