Merge commit 'v2.6.26' into bkl-removal
[linux-2.6/mini2440.git] / block / bsg.c
blob7c59ffaedfe0860ffc55c8f9519a991e79688514
1 /*
2 * bsg.c - block layer implementation of the sg v4 interface
4 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
5 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License version 2. See the file "COPYING" in the main directory of this
9 * archive for more details.
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/file.h>
15 #include <linux/blkdev.h>
16 #include <linux/poll.h>
17 #include <linux/cdev.h>
18 #include <linux/percpu.h>
19 #include <linux/uio.h>
20 #include <linux/idr.h>
21 #include <linux/bsg.h>
22 #include <linux/smp_lock.h>
24 #include <scsi/scsi.h>
25 #include <scsi/scsi_ioctl.h>
26 #include <scsi/scsi_cmnd.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_driver.h>
29 #include <scsi/sg.h>
31 #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
32 #define BSG_VERSION "0.4"
34 struct bsg_device {
35 struct request_queue *queue;
36 spinlock_t lock;
37 struct list_head busy_list;
38 struct list_head done_list;
39 struct hlist_node dev_list;
40 atomic_t ref_count;
41 int queued_cmds;
42 int done_cmds;
43 wait_queue_head_t wq_done;
44 wait_queue_head_t wq_free;
45 char name[BUS_ID_SIZE];
46 int max_queue;
47 unsigned long flags;
50 enum {
51 BSG_F_BLOCK = 1,
52 BSG_F_WRITE_PERM = 2,
55 #define BSG_DEFAULT_CMDS 64
56 #define BSG_MAX_DEVS 32768
58 #undef BSG_DEBUG
60 #ifdef BSG_DEBUG
61 #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
62 #else
63 #define dprintk(fmt, args...)
64 #endif
66 static DEFINE_MUTEX(bsg_mutex);
67 static DEFINE_IDR(bsg_minor_idr);
69 #define BSG_LIST_ARRAY_SIZE 8
70 static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
72 static struct class *bsg_class;
73 static int bsg_major;
75 static struct kmem_cache *bsg_cmd_cachep;
78 * our internal command type
80 struct bsg_command {
81 struct bsg_device *bd;
82 struct list_head list;
83 struct request *rq;
84 struct bio *bio;
85 struct bio *bidi_bio;
86 int err;
87 struct sg_io_v4 hdr;
88 char sense[SCSI_SENSE_BUFFERSIZE];
91 static void bsg_free_command(struct bsg_command *bc)
93 struct bsg_device *bd = bc->bd;
94 unsigned long flags;
96 kmem_cache_free(bsg_cmd_cachep, bc);
98 spin_lock_irqsave(&bd->lock, flags);
99 bd->queued_cmds--;
100 spin_unlock_irqrestore(&bd->lock, flags);
102 wake_up(&bd->wq_free);
105 static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
107 struct bsg_command *bc = ERR_PTR(-EINVAL);
109 spin_lock_irq(&bd->lock);
111 if (bd->queued_cmds >= bd->max_queue)
112 goto out;
114 bd->queued_cmds++;
115 spin_unlock_irq(&bd->lock);
117 bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
118 if (unlikely(!bc)) {
119 spin_lock_irq(&bd->lock);
120 bd->queued_cmds--;
121 bc = ERR_PTR(-ENOMEM);
122 goto out;
125 bc->bd = bd;
126 INIT_LIST_HEAD(&bc->list);
127 dprintk("%s: returning free cmd %p\n", bd->name, bc);
128 return bc;
129 out:
130 spin_unlock_irq(&bd->lock);
131 return bc;
134 static inline struct hlist_head *bsg_dev_idx_hash(int index)
136 return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
139 static int bsg_io_schedule(struct bsg_device *bd)
141 DEFINE_WAIT(wait);
142 int ret = 0;
144 spin_lock_irq(&bd->lock);
146 BUG_ON(bd->done_cmds > bd->queued_cmds);
149 * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no
150 * work to do", even though we return -ENOSPC after this same test
151 * during bsg_write() -- there, it means our buffer can't have more
152 * bsg_commands added to it, thus has no space left.
154 if (bd->done_cmds == bd->queued_cmds) {
155 ret = -ENODATA;
156 goto unlock;
159 if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
160 ret = -EAGAIN;
161 goto unlock;
164 prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE);
165 spin_unlock_irq(&bd->lock);
166 io_schedule();
167 finish_wait(&bd->wq_done, &wait);
169 return ret;
170 unlock:
171 spin_unlock_irq(&bd->lock);
172 return ret;
175 static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
176 struct sg_io_v4 *hdr, int has_write_perm)
178 if (hdr->request_len > BLK_MAX_CDB) {
179 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
180 if (!rq->cmd)
181 return -ENOMEM;
184 if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
185 hdr->request_len))
186 return -EFAULT;
188 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
189 if (blk_verify_command(rq->cmd, has_write_perm))
190 return -EPERM;
191 } else if (!capable(CAP_SYS_RAWIO))
192 return -EPERM;
195 * fill in request structure
197 rq->cmd_len = hdr->request_len;
198 rq->cmd_type = REQ_TYPE_BLOCK_PC;
200 rq->timeout = (hdr->timeout * HZ) / 1000;
201 if (!rq->timeout)
202 rq->timeout = q->sg_timeout;
203 if (!rq->timeout)
204 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
206 return 0;
210 * Check if sg_io_v4 from user is allowed and valid
212 static int
213 bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
215 int ret = 0;
217 if (hdr->guard != 'Q')
218 return -EINVAL;
219 if (hdr->dout_xfer_len > (q->max_sectors << 9) ||
220 hdr->din_xfer_len > (q->max_sectors << 9))
221 return -EIO;
223 switch (hdr->protocol) {
224 case BSG_PROTOCOL_SCSI:
225 switch (hdr->subprotocol) {
226 case BSG_SUB_PROTOCOL_SCSI_CMD:
227 case BSG_SUB_PROTOCOL_SCSI_TRANSPORT:
228 break;
229 default:
230 ret = -EINVAL;
232 break;
233 default:
234 ret = -EINVAL;
237 *rw = hdr->dout_xfer_len ? WRITE : READ;
238 return ret;
242 * map sg_io_v4 to a request.
244 static struct request *
245 bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
247 struct request_queue *q = bd->queue;
248 struct request *rq, *next_rq = NULL;
249 int ret, rw;
250 unsigned int dxfer_len;
251 void *dxferp = NULL;
253 dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
254 hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
255 hdr->din_xfer_len);
257 ret = bsg_validate_sgv4_hdr(q, hdr, &rw);
258 if (ret)
259 return ERR_PTR(ret);
262 * map scatter-gather elements seperately and string them to request
264 rq = blk_get_request(q, rw, GFP_KERNEL);
265 if (!rq)
266 return ERR_PTR(-ENOMEM);
267 ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, test_bit(BSG_F_WRITE_PERM,
268 &bd->flags));
269 if (ret)
270 goto out;
272 if (rw == WRITE && hdr->din_xfer_len) {
273 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
274 ret = -EOPNOTSUPP;
275 goto out;
278 next_rq = blk_get_request(q, READ, GFP_KERNEL);
279 if (!next_rq) {
280 ret = -ENOMEM;
281 goto out;
283 rq->next_rq = next_rq;
284 next_rq->cmd_type = rq->cmd_type;
286 dxferp = (void*)(unsigned long)hdr->din_xferp;
287 ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len);
288 if (ret)
289 goto out;
292 if (hdr->dout_xfer_len) {
293 dxfer_len = hdr->dout_xfer_len;
294 dxferp = (void*)(unsigned long)hdr->dout_xferp;
295 } else if (hdr->din_xfer_len) {
296 dxfer_len = hdr->din_xfer_len;
297 dxferp = (void*)(unsigned long)hdr->din_xferp;
298 } else
299 dxfer_len = 0;
301 if (dxfer_len) {
302 ret = blk_rq_map_user(q, rq, dxferp, dxfer_len);
303 if (ret)
304 goto out;
306 return rq;
307 out:
308 if (rq->cmd != rq->__cmd)
309 kfree(rq->cmd);
310 blk_put_request(rq);
311 if (next_rq) {
312 blk_rq_unmap_user(next_rq->bio);
313 blk_put_request(next_rq);
315 return ERR_PTR(ret);
319 * async completion call-back from the block layer, when scsi/ide/whatever
320 * calls end_that_request_last() on a request
322 static void bsg_rq_end_io(struct request *rq, int uptodate)
324 struct bsg_command *bc = rq->end_io_data;
325 struct bsg_device *bd = bc->bd;
326 unsigned long flags;
328 dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
329 bd->name, rq, bc, bc->bio, uptodate);
331 bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
333 spin_lock_irqsave(&bd->lock, flags);
334 list_move_tail(&bc->list, &bd->done_list);
335 bd->done_cmds++;
336 spin_unlock_irqrestore(&bd->lock, flags);
338 wake_up(&bd->wq_done);
342 * do final setup of a 'bc' and submit the matching 'rq' to the block
343 * layer for io
345 static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
346 struct bsg_command *bc, struct request *rq)
348 rq->sense = bc->sense;
349 rq->sense_len = 0;
352 * add bc command to busy queue and submit rq for io
354 bc->rq = rq;
355 bc->bio = rq->bio;
356 if (rq->next_rq)
357 bc->bidi_bio = rq->next_rq->bio;
358 bc->hdr.duration = jiffies;
359 spin_lock_irq(&bd->lock);
360 list_add_tail(&bc->list, &bd->busy_list);
361 spin_unlock_irq(&bd->lock);
363 dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
365 rq->end_io_data = bc;
366 blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io);
369 static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
371 struct bsg_command *bc = NULL;
373 spin_lock_irq(&bd->lock);
374 if (bd->done_cmds) {
375 bc = list_first_entry(&bd->done_list, struct bsg_command, list);
376 list_del(&bc->list);
377 bd->done_cmds--;
379 spin_unlock_irq(&bd->lock);
381 return bc;
385 * Get a finished command from the done list
387 static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
389 struct bsg_command *bc;
390 int ret;
392 do {
393 bc = bsg_next_done_cmd(bd);
394 if (bc)
395 break;
397 if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
398 bc = ERR_PTR(-EAGAIN);
399 break;
402 ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
403 if (ret) {
404 bc = ERR_PTR(-ERESTARTSYS);
405 break;
407 } while (1);
409 dprintk("%s: returning done %p\n", bd->name, bc);
411 return bc;
414 static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
415 struct bio *bio, struct bio *bidi_bio)
417 int ret = 0;
419 dprintk("rq %p bio %p %u\n", rq, bio, rq->errors);
421 * fill in all the output members
423 hdr->device_status = status_byte(rq->errors);
424 hdr->transport_status = host_byte(rq->errors);
425 hdr->driver_status = driver_byte(rq->errors);
426 hdr->info = 0;
427 if (hdr->device_status || hdr->transport_status || hdr->driver_status)
428 hdr->info |= SG_INFO_CHECK;
429 hdr->response_len = 0;
431 if (rq->sense_len && hdr->response) {
432 int len = min_t(unsigned int, hdr->max_response_len,
433 rq->sense_len);
435 ret = copy_to_user((void*)(unsigned long)hdr->response,
436 rq->sense, len);
437 if (!ret)
438 hdr->response_len = len;
439 else
440 ret = -EFAULT;
443 if (rq->next_rq) {
444 hdr->dout_resid = rq->data_len;
445 hdr->din_resid = rq->next_rq->data_len;
446 blk_rq_unmap_user(bidi_bio);
447 blk_put_request(rq->next_rq);
448 } else if (rq_data_dir(rq) == READ)
449 hdr->din_resid = rq->data_len;
450 else
451 hdr->dout_resid = rq->data_len;
454 * If the request generated a negative error number, return it
455 * (providing we aren't already returning an error); if it's
456 * just a protocol response (i.e. non negative), that gets
457 * processed above.
459 if (!ret && rq->errors < 0)
460 ret = rq->errors;
462 blk_rq_unmap_user(bio);
463 if (rq->cmd != rq->__cmd)
464 kfree(rq->cmd);
465 blk_put_request(rq);
467 return ret;
470 static int bsg_complete_all_commands(struct bsg_device *bd)
472 struct bsg_command *bc;
473 int ret, tret;
475 dprintk("%s: entered\n", bd->name);
478 * wait for all commands to complete
480 ret = 0;
481 do {
482 ret = bsg_io_schedule(bd);
484 * look for -ENODATA specifically -- we'll sometimes get
485 * -ERESTARTSYS when we've taken a signal, but we can't
486 * return until we're done freeing the queue, so ignore
487 * it. The signal will get handled when we're done freeing
488 * the bsg_device.
490 } while (ret != -ENODATA);
493 * discard done commands
495 ret = 0;
496 do {
497 spin_lock_irq(&bd->lock);
498 if (!bd->queued_cmds) {
499 spin_unlock_irq(&bd->lock);
500 break;
502 spin_unlock_irq(&bd->lock);
504 bc = bsg_get_done_cmd(bd);
505 if (IS_ERR(bc))
506 break;
508 tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
509 bc->bidi_bio);
510 if (!ret)
511 ret = tret;
513 bsg_free_command(bc);
514 } while (1);
516 return ret;
519 static int
520 __bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
521 const struct iovec *iov, ssize_t *bytes_read)
523 struct bsg_command *bc;
524 int nr_commands, ret;
526 if (count % sizeof(struct sg_io_v4))
527 return -EINVAL;
529 ret = 0;
530 nr_commands = count / sizeof(struct sg_io_v4);
531 while (nr_commands) {
532 bc = bsg_get_done_cmd(bd);
533 if (IS_ERR(bc)) {
534 ret = PTR_ERR(bc);
535 break;
539 * this is the only case where we need to copy data back
540 * after completing the request. so do that here,
541 * bsg_complete_work() cannot do that for us
543 ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
544 bc->bidi_bio);
546 if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
547 ret = -EFAULT;
549 bsg_free_command(bc);
551 if (ret)
552 break;
554 buf += sizeof(struct sg_io_v4);
555 *bytes_read += sizeof(struct sg_io_v4);
556 nr_commands--;
559 return ret;
562 static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
564 if (file->f_flags & O_NONBLOCK)
565 clear_bit(BSG_F_BLOCK, &bd->flags);
566 else
567 set_bit(BSG_F_BLOCK, &bd->flags);
570 static inline void bsg_set_write_perm(struct bsg_device *bd, struct file *file)
572 if (file->f_mode & FMODE_WRITE)
573 set_bit(BSG_F_WRITE_PERM, &bd->flags);
574 else
575 clear_bit(BSG_F_WRITE_PERM, &bd->flags);
579 * Check if the error is a "real" error that we should return.
581 static inline int err_block_err(int ret)
583 if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
584 return 1;
586 return 0;
589 static ssize_t
590 bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
592 struct bsg_device *bd = file->private_data;
593 int ret;
594 ssize_t bytes_read;
596 dprintk("%s: read %Zd bytes\n", bd->name, count);
598 bsg_set_block(bd, file);
599 bytes_read = 0;
600 ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
601 *ppos = bytes_read;
603 if (!bytes_read || (bytes_read && err_block_err(ret)))
604 bytes_read = ret;
606 return bytes_read;
609 static int __bsg_write(struct bsg_device *bd, const char __user *buf,
610 size_t count, ssize_t *bytes_written)
612 struct bsg_command *bc;
613 struct request *rq;
614 int ret, nr_commands;
616 if (count % sizeof(struct sg_io_v4))
617 return -EINVAL;
619 nr_commands = count / sizeof(struct sg_io_v4);
620 rq = NULL;
621 bc = NULL;
622 ret = 0;
623 while (nr_commands) {
624 struct request_queue *q = bd->queue;
626 bc = bsg_alloc_command(bd);
627 if (IS_ERR(bc)) {
628 ret = PTR_ERR(bc);
629 bc = NULL;
630 break;
633 if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
634 ret = -EFAULT;
635 break;
639 * get a request, fill in the blanks, and add to request queue
641 rq = bsg_map_hdr(bd, &bc->hdr);
642 if (IS_ERR(rq)) {
643 ret = PTR_ERR(rq);
644 rq = NULL;
645 break;
648 bsg_add_command(bd, q, bc, rq);
649 bc = NULL;
650 rq = NULL;
651 nr_commands--;
652 buf += sizeof(struct sg_io_v4);
653 *bytes_written += sizeof(struct sg_io_v4);
656 if (bc)
657 bsg_free_command(bc);
659 return ret;
662 static ssize_t
663 bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
665 struct bsg_device *bd = file->private_data;
666 ssize_t bytes_written;
667 int ret;
669 dprintk("%s: write %Zd bytes\n", bd->name, count);
671 bsg_set_block(bd, file);
672 bsg_set_write_perm(bd, file);
674 bytes_written = 0;
675 ret = __bsg_write(bd, buf, count, &bytes_written);
676 *ppos = bytes_written;
679 * return bytes written on non-fatal errors
681 if (!bytes_written || (bytes_written && err_block_err(ret)))
682 bytes_written = ret;
684 dprintk("%s: returning %Zd\n", bd->name, bytes_written);
685 return bytes_written;
688 static struct bsg_device *bsg_alloc_device(void)
690 struct bsg_device *bd;
692 bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
693 if (unlikely(!bd))
694 return NULL;
696 spin_lock_init(&bd->lock);
698 bd->max_queue = BSG_DEFAULT_CMDS;
700 INIT_LIST_HEAD(&bd->busy_list);
701 INIT_LIST_HEAD(&bd->done_list);
702 INIT_HLIST_NODE(&bd->dev_list);
704 init_waitqueue_head(&bd->wq_free);
705 init_waitqueue_head(&bd->wq_done);
706 return bd;
709 static void bsg_kref_release_function(struct kref *kref)
711 struct bsg_class_device *bcd =
712 container_of(kref, struct bsg_class_device, ref);
713 struct device *parent = bcd->parent;
715 if (bcd->release)
716 bcd->release(bcd->parent);
718 put_device(parent);
721 static int bsg_put_device(struct bsg_device *bd)
723 int ret = 0, do_free;
724 struct request_queue *q = bd->queue;
726 mutex_lock(&bsg_mutex);
728 do_free = atomic_dec_and_test(&bd->ref_count);
729 if (!do_free)
730 goto out;
732 dprintk("%s: tearing down\n", bd->name);
735 * close can always block
737 set_bit(BSG_F_BLOCK, &bd->flags);
740 * correct error detection baddies here again. it's the responsibility
741 * of the app to properly reap commands before close() if it wants
742 * fool-proof error detection
744 ret = bsg_complete_all_commands(bd);
746 hlist_del(&bd->dev_list);
747 kfree(bd);
748 out:
749 mutex_unlock(&bsg_mutex);
750 kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
751 if (do_free)
752 blk_put_queue(q);
753 return ret;
756 static struct bsg_device *bsg_add_device(struct inode *inode,
757 struct request_queue *rq,
758 struct file *file)
760 struct bsg_device *bd;
761 int ret;
762 #ifdef BSG_DEBUG
763 unsigned char buf[32];
764 #endif
765 ret = blk_get_queue(rq);
766 if (ret)
767 return ERR_PTR(-ENXIO);
769 bd = bsg_alloc_device();
770 if (!bd) {
771 blk_put_queue(rq);
772 return ERR_PTR(-ENOMEM);
775 bd->queue = rq;
776 bsg_set_block(bd, file);
778 atomic_set(&bd->ref_count, 1);
779 mutex_lock(&bsg_mutex);
780 hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
782 strncpy(bd->name, rq->bsg_dev.class_dev->bus_id, sizeof(bd->name) - 1);
783 dprintk("bound to <%s>, max queue %d\n",
784 format_dev_t(buf, inode->i_rdev), bd->max_queue);
786 mutex_unlock(&bsg_mutex);
787 return bd;
790 static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
792 struct bsg_device *bd;
793 struct hlist_node *entry;
795 mutex_lock(&bsg_mutex);
797 hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) {
798 if (bd->queue == q) {
799 atomic_inc(&bd->ref_count);
800 goto found;
803 bd = NULL;
804 found:
805 mutex_unlock(&bsg_mutex);
806 return bd;
809 static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
811 struct bsg_device *bd;
812 struct bsg_class_device *bcd;
815 * find the class device
817 mutex_lock(&bsg_mutex);
818 bcd = idr_find(&bsg_minor_idr, iminor(inode));
819 if (bcd)
820 kref_get(&bcd->ref);
821 mutex_unlock(&bsg_mutex);
823 if (!bcd)
824 return ERR_PTR(-ENODEV);
826 bd = __bsg_get_device(iminor(inode), bcd->queue);
827 if (bd)
828 return bd;
830 bd = bsg_add_device(inode, bcd->queue, file);
831 if (IS_ERR(bd))
832 kref_put(&bcd->ref, bsg_kref_release_function);
834 return bd;
837 static int bsg_open(struct inode *inode, struct file *file)
839 struct bsg_device *bd;
841 lock_kernel();
842 bd = bsg_get_device(inode, file);
843 unlock_kernel();
845 if (IS_ERR(bd))
846 return PTR_ERR(bd);
848 file->private_data = bd;
849 return 0;
852 static int bsg_release(struct inode *inode, struct file *file)
854 struct bsg_device *bd = file->private_data;
856 file->private_data = NULL;
857 return bsg_put_device(bd);
860 static unsigned int bsg_poll(struct file *file, poll_table *wait)
862 struct bsg_device *bd = file->private_data;
863 unsigned int mask = 0;
865 poll_wait(file, &bd->wq_done, wait);
866 poll_wait(file, &bd->wq_free, wait);
868 spin_lock_irq(&bd->lock);
869 if (!list_empty(&bd->done_list))
870 mask |= POLLIN | POLLRDNORM;
871 if (bd->queued_cmds >= bd->max_queue)
872 mask |= POLLOUT;
873 spin_unlock_irq(&bd->lock);
875 return mask;
878 static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
880 struct bsg_device *bd = file->private_data;
881 int __user *uarg = (int __user *) arg;
882 int ret;
884 switch (cmd) {
886 * our own ioctls
888 case SG_GET_COMMAND_Q:
889 return put_user(bd->max_queue, uarg);
890 case SG_SET_COMMAND_Q: {
891 int queue;
893 if (get_user(queue, uarg))
894 return -EFAULT;
895 if (queue < 1)
896 return -EINVAL;
898 spin_lock_irq(&bd->lock);
899 bd->max_queue = queue;
900 spin_unlock_irq(&bd->lock);
901 return 0;
905 * SCSI/sg ioctls
907 case SG_GET_VERSION_NUM:
908 case SCSI_IOCTL_GET_IDLUN:
909 case SCSI_IOCTL_GET_BUS_NUMBER:
910 case SG_SET_TIMEOUT:
911 case SG_GET_TIMEOUT:
912 case SG_GET_RESERVED_SIZE:
913 case SG_SET_RESERVED_SIZE:
914 case SG_EMULATED_HOST:
915 case SCSI_IOCTL_SEND_COMMAND: {
916 void __user *uarg = (void __user *) arg;
917 return scsi_cmd_ioctl(file, bd->queue, NULL, cmd, uarg);
919 case SG_IO: {
920 struct request *rq;
921 struct bio *bio, *bidi_bio = NULL;
922 struct sg_io_v4 hdr;
924 if (copy_from_user(&hdr, uarg, sizeof(hdr)))
925 return -EFAULT;
927 rq = bsg_map_hdr(bd, &hdr);
928 if (IS_ERR(rq))
929 return PTR_ERR(rq);
931 bio = rq->bio;
932 if (rq->next_rq)
933 bidi_bio = rq->next_rq->bio;
934 blk_execute_rq(bd->queue, NULL, rq, 0);
935 ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
937 if (copy_to_user(uarg, &hdr, sizeof(hdr)))
938 return -EFAULT;
940 return ret;
943 * block device ioctls
945 default:
946 #if 0
947 return ioctl_by_bdev(bd->bdev, cmd, arg);
948 #else
949 return -ENOTTY;
950 #endif
954 static const struct file_operations bsg_fops = {
955 .read = bsg_read,
956 .write = bsg_write,
957 .poll = bsg_poll,
958 .open = bsg_open,
959 .release = bsg_release,
960 .unlocked_ioctl = bsg_ioctl,
961 .owner = THIS_MODULE,
964 void bsg_unregister_queue(struct request_queue *q)
966 struct bsg_class_device *bcd = &q->bsg_dev;
968 if (!bcd->class_dev)
969 return;
971 mutex_lock(&bsg_mutex);
972 idr_remove(&bsg_minor_idr, bcd->minor);
973 sysfs_remove_link(&q->kobj, "bsg");
974 device_unregister(bcd->class_dev);
975 bcd->class_dev = NULL;
976 kref_put(&bcd->ref, bsg_kref_release_function);
977 mutex_unlock(&bsg_mutex);
979 EXPORT_SYMBOL_GPL(bsg_unregister_queue);
981 int bsg_register_queue(struct request_queue *q, struct device *parent,
982 const char *name, void (*release)(struct device *))
984 struct bsg_class_device *bcd;
985 dev_t dev;
986 int ret, minor;
987 struct device *class_dev = NULL;
988 const char *devname;
990 if (name)
991 devname = name;
992 else
993 devname = parent->bus_id;
996 * we need a proper transport to send commands, not a stacked device
998 if (!q->request_fn)
999 return 0;
1001 bcd = &q->bsg_dev;
1002 memset(bcd, 0, sizeof(*bcd));
1004 mutex_lock(&bsg_mutex);
1006 ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL);
1007 if (!ret) {
1008 ret = -ENOMEM;
1009 goto unlock;
1012 ret = idr_get_new(&bsg_minor_idr, bcd, &minor);
1013 if (ret < 0)
1014 goto unlock;
1016 if (minor >= BSG_MAX_DEVS) {
1017 printk(KERN_ERR "bsg: too many bsg devices\n");
1018 ret = -EINVAL;
1019 goto remove_idr;
1022 bcd->minor = minor;
1023 bcd->queue = q;
1024 bcd->parent = get_device(parent);
1025 bcd->release = release;
1026 kref_init(&bcd->ref);
1027 dev = MKDEV(bsg_major, bcd->minor);
1028 class_dev = device_create(bsg_class, parent, dev, "%s", devname);
1029 if (IS_ERR(class_dev)) {
1030 ret = PTR_ERR(class_dev);
1031 goto put_dev;
1033 bcd->class_dev = class_dev;
1035 if (q->kobj.sd) {
1036 ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
1037 if (ret)
1038 goto unregister_class_dev;
1041 mutex_unlock(&bsg_mutex);
1042 return 0;
1044 unregister_class_dev:
1045 device_unregister(class_dev);
1046 put_dev:
1047 put_device(parent);
1048 remove_idr:
1049 idr_remove(&bsg_minor_idr, minor);
1050 unlock:
1051 mutex_unlock(&bsg_mutex);
1052 return ret;
1054 EXPORT_SYMBOL_GPL(bsg_register_queue);
1056 static struct cdev bsg_cdev;
1058 static int __init bsg_init(void)
1060 int ret, i;
1061 dev_t devid;
1063 bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
1064 sizeof(struct bsg_command), 0, 0, NULL);
1065 if (!bsg_cmd_cachep) {
1066 printk(KERN_ERR "bsg: failed creating slab cache\n");
1067 return -ENOMEM;
1070 for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
1071 INIT_HLIST_HEAD(&bsg_device_list[i]);
1073 bsg_class = class_create(THIS_MODULE, "bsg");
1074 if (IS_ERR(bsg_class)) {
1075 ret = PTR_ERR(bsg_class);
1076 goto destroy_kmemcache;
1079 ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
1080 if (ret)
1081 goto destroy_bsg_class;
1083 bsg_major = MAJOR(devid);
1085 cdev_init(&bsg_cdev, &bsg_fops);
1086 ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
1087 if (ret)
1088 goto unregister_chrdev;
1090 printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
1091 " loaded (major %d)\n", bsg_major);
1092 return 0;
1093 unregister_chrdev:
1094 unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
1095 destroy_bsg_class:
1096 class_destroy(bsg_class);
1097 destroy_kmemcache:
1098 kmem_cache_destroy(bsg_cmd_cachep);
1099 return ret;
1102 MODULE_AUTHOR("Jens Axboe");
1103 MODULE_DESCRIPTION(BSG_DESCRIPTION);
1104 MODULE_LICENSE("GPL");
1106 device_initcall(bsg_init);