2 * SCSI target lib functions
4 * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
5 * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of the
10 * License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22 #include <linux/blkdev.h>
23 #include <linux/hash.h>
24 #include <linux/module.h>
25 #include <linux/pagemap.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_tgt.h>
31 #include <../drivers/md/dm-bio-list.h>
33 #include "scsi_tgt_priv.h"
35 static struct workqueue_struct
*scsi_tgtd
;
36 static struct kmem_cache
*scsi_tgt_cmd_cache
;
39 * TODO: this struct will be killed when the block layer supports large bios
40 * and James's work struct code is in
43 /* TODO replace work with James b's code */
44 struct work_struct work
;
45 /* TODO replace the lists with a large bio */
46 struct bio_list xfer_done_list
;
47 struct bio_list xfer_list
;
49 struct list_head hash_list
;
57 #define TGT_HASH_ORDER 4
58 #define cmd_hashfn(tag) hash_long((unsigned long) (tag), TGT_HASH_ORDER)
60 struct scsi_tgt_queuedata
{
61 struct Scsi_Host
*shost
;
62 struct list_head cmd_hash
[1 << TGT_HASH_ORDER
];
63 spinlock_t cmd_hash_lock
;
67 * Function: scsi_host_get_command()
69 * Purpose: Allocate and setup a scsi command block and blk request
71 * Arguments: shost - scsi host
72 * data_dir - dma data dir
73 * gfp_mask- allocator flags
75 * Returns: The allocated scsi command structure.
77 * This should be called by target LLDs to get a command.
79 struct scsi_cmnd
*scsi_host_get_command(struct Scsi_Host
*shost
,
80 enum dma_data_direction data_dir
,
83 int write
= (data_dir
== DMA_TO_DEVICE
);
85 struct scsi_cmnd
*cmd
;
86 struct scsi_tgt_cmd
*tcmd
;
88 /* Bail if we can't get a reference to the device */
89 if (!get_device(&shost
->shost_gendev
))
92 tcmd
= kmem_cache_alloc(scsi_tgt_cmd_cache
, GFP_ATOMIC
);
96 rq
= blk_get_request(shost
->uspace_req_q
, write
, gfp_mask
);
100 cmd
= __scsi_get_command(shost
, gfp_mask
);
104 memset(cmd
, 0, sizeof(*cmd
));
105 cmd
->sc_data_direction
= data_dir
;
106 cmd
->jiffies_at_alloc
= jiffies
;
110 rq
->cmd_type
= REQ_TYPE_SPECIAL
;
111 rq
->cmd_flags
|= REQ_TYPE_BLOCK_PC
;
112 rq
->end_io_data
= tcmd
;
114 bio_list_init(&tcmd
->xfer_list
);
115 bio_list_init(&tcmd
->xfer_done_list
);
123 kmem_cache_free(scsi_tgt_cmd_cache
, tcmd
);
125 put_device(&shost
->shost_gendev
);
129 EXPORT_SYMBOL_GPL(scsi_host_get_command
);
132 * Function: scsi_host_put_command()
134 * Purpose: Free a scsi command block
136 * Arguments: shost - scsi host
137 * cmd - command block to free
141 * Notes: The command must not belong to any lists.
143 void scsi_host_put_command(struct Scsi_Host
*shost
, struct scsi_cmnd
*cmd
)
145 struct request_queue
*q
= shost
->uspace_req_q
;
146 struct request
*rq
= cmd
->request
;
147 struct scsi_tgt_cmd
*tcmd
= rq
->end_io_data
;
150 kmem_cache_free(scsi_tgt_cmd_cache
, tcmd
);
152 spin_lock_irqsave(q
->queue_lock
, flags
);
153 __blk_put_request(q
, rq
);
154 spin_unlock_irqrestore(q
->queue_lock
, flags
);
156 __scsi_put_command(shost
, cmd
, &shost
->shost_gendev
);
158 EXPORT_SYMBOL_GPL(scsi_host_put_command
);
160 static void scsi_unmap_user_pages(struct scsi_tgt_cmd
*tcmd
)
164 /* must call bio_endio in case bio was bounced */
165 while ((bio
= bio_list_pop(&tcmd
->xfer_done_list
))) {
166 bio_endio(bio
, bio
->bi_size
, 0);
170 while ((bio
= bio_list_pop(&tcmd
->xfer_list
))) {
171 bio_endio(bio
, bio
->bi_size
, 0);
176 static void cmd_hashlist_del(struct scsi_cmnd
*cmd
)
178 struct request_queue
*q
= cmd
->request
->q
;
179 struct scsi_tgt_queuedata
*qdata
= q
->queuedata
;
181 struct scsi_tgt_cmd
*tcmd
= cmd
->request
->end_io_data
;
183 spin_lock_irqsave(&qdata
->cmd_hash_lock
, flags
);
184 list_del(&tcmd
->hash_list
);
185 spin_unlock_irqrestore(&qdata
->cmd_hash_lock
, flags
);
188 static void scsi_tgt_cmd_destroy(struct work_struct
*work
)
190 struct scsi_tgt_cmd
*tcmd
=
191 container_of(work
, struct scsi_tgt_cmd
, work
);
192 struct scsi_cmnd
*cmd
= tcmd
->rq
->special
;
194 dprintk("cmd %p %d %lu\n", cmd
, cmd
->sc_data_direction
,
195 rq_data_dir(cmd
->request
));
197 * We fix rq->cmd_flags here since when we told bio_map_user
198 * to write vm for WRITE commands, blk_rq_bio_prep set
199 * rq_data_dir the flags to READ.
201 if (cmd
->sc_data_direction
== DMA_TO_DEVICE
)
202 cmd
->request
->cmd_flags
|= REQ_RW
;
204 cmd
->request
->cmd_flags
&= ~REQ_RW
;
206 scsi_unmap_user_pages(tcmd
);
207 scsi_host_put_command(scsi_tgt_cmd_to_host(cmd
), cmd
);
210 static void init_scsi_tgt_cmd(struct request
*rq
, struct scsi_tgt_cmd
*tcmd
,
213 struct scsi_tgt_queuedata
*qdata
= rq
->q
->queuedata
;
215 struct list_head
*head
;
218 INIT_WORK(&tcmd
->work
, scsi_tgt_cmd_destroy
);
219 spin_lock_irqsave(&qdata
->cmd_hash_lock
, flags
);
220 head
= &qdata
->cmd_hash
[cmd_hashfn(tag
)];
221 list_add(&tcmd
->hash_list
, head
);
222 spin_unlock_irqrestore(&qdata
->cmd_hash_lock
, flags
);
226 * scsi_tgt_alloc_queue - setup queue used for message passing
229 * This should be called by the LLD after host allocation.
230 * And will be released when the host is released.
232 int scsi_tgt_alloc_queue(struct Scsi_Host
*shost
)
234 struct scsi_tgt_queuedata
*queuedata
;
235 struct request_queue
*q
;
239 * Do we need to send a netlink event or should uspace
240 * just respond to the hotplug event?
242 q
= __scsi_alloc_queue(shost
, NULL
);
246 queuedata
= kzalloc(sizeof(*queuedata
), GFP_KERNEL
);
251 queuedata
->shost
= shost
;
252 q
->queuedata
= queuedata
;
255 * this is a silly hack. We should probably just queue as many
256 * command as is recvd to userspace. uspace can then make
257 * sure we do not overload the HBA
259 q
->nr_requests
= shost
->hostt
->can_queue
;
261 * We currently only support software LLDs so this does
262 * not matter for now. Do we need this for the cards we support?
263 * If so we should make it a host template value.
265 blk_queue_dma_alignment(q
, 0);
266 shost
->uspace_req_q
= q
;
268 for (i
= 0; i
< ARRAY_SIZE(queuedata
->cmd_hash
); i
++)
269 INIT_LIST_HEAD(&queuedata
->cmd_hash
[i
]);
270 spin_lock_init(&queuedata
->cmd_hash_lock
);
275 blk_cleanup_queue(q
);
278 EXPORT_SYMBOL_GPL(scsi_tgt_alloc_queue
);
280 void scsi_tgt_free_queue(struct Scsi_Host
*shost
)
284 struct request_queue
*q
= shost
->uspace_req_q
;
285 struct scsi_cmnd
*cmd
;
286 struct scsi_tgt_queuedata
*qdata
= q
->queuedata
;
287 struct scsi_tgt_cmd
*tcmd
, *n
;
290 spin_lock_irqsave(&qdata
->cmd_hash_lock
, flags
);
292 for (i
= 0; i
< ARRAY_SIZE(qdata
->cmd_hash
); i
++) {
293 list_for_each_entry_safe(tcmd
, n
, &qdata
->cmd_hash
[i
],
295 list_del(&tcmd
->hash_list
);
296 list_add(&tcmd
->hash_list
, &cmds
);
300 spin_unlock_irqrestore(&qdata
->cmd_hash_lock
, flags
);
302 while (!list_empty(&cmds
)) {
303 tcmd
= list_entry(cmds
.next
, struct scsi_tgt_cmd
, hash_list
);
304 list_del(&tcmd
->hash_list
);
305 cmd
= tcmd
->rq
->special
;
307 shost
->hostt
->eh_abort_handler(cmd
);
308 scsi_tgt_cmd_destroy(&tcmd
->work
);
311 EXPORT_SYMBOL_GPL(scsi_tgt_free_queue
);
313 struct Scsi_Host
*scsi_tgt_cmd_to_host(struct scsi_cmnd
*cmd
)
315 struct scsi_tgt_queuedata
*queue
= cmd
->request
->q
->queuedata
;
318 EXPORT_SYMBOL_GPL(scsi_tgt_cmd_to_host
);
321 * scsi_tgt_queue_command - queue command for userspace processing
324 * @tag: unique value to identify this command for tmf
326 int scsi_tgt_queue_command(struct scsi_cmnd
*cmd
, struct scsi_lun
*scsilun
,
329 struct scsi_tgt_cmd
*tcmd
= cmd
->request
->end_io_data
;
332 init_scsi_tgt_cmd(cmd
->request
, tcmd
, tag
);
333 err
= scsi_tgt_uspace_send_cmd(cmd
, scsilun
, tag
);
335 cmd_hashlist_del(cmd
);
339 EXPORT_SYMBOL_GPL(scsi_tgt_queue_command
);
342 * This is run from a interrpt handler normally and the unmap
343 * needs process context so we must queue
345 static void scsi_tgt_cmd_done(struct scsi_cmnd
*cmd
)
347 struct scsi_tgt_cmd
*tcmd
= cmd
->request
->end_io_data
;
349 dprintk("cmd %p %lu\n", cmd
, rq_data_dir(cmd
->request
));
351 scsi_tgt_uspace_send_status(cmd
, tcmd
->tag
);
352 queue_work(scsi_tgtd
, &tcmd
->work
);
355 static int __scsi_tgt_transfer_response(struct scsi_cmnd
*cmd
)
357 struct Scsi_Host
*shost
= scsi_tgt_cmd_to_host(cmd
);
360 dprintk("cmd %p %lu\n", cmd
, rq_data_dir(cmd
->request
));
362 err
= shost
->hostt
->transfer_response(cmd
, scsi_tgt_cmd_done
);
364 case SCSI_MLQUEUE_HOST_BUSY
:
365 case SCSI_MLQUEUE_DEVICE_BUSY
:
372 static void scsi_tgt_transfer_response(struct scsi_cmnd
*cmd
)
374 struct scsi_tgt_cmd
*tcmd
= cmd
->request
->end_io_data
;
377 err
= __scsi_tgt_transfer_response(cmd
);
381 cmd
->result
= DID_BUS_BUSY
<< 16;
382 err
= scsi_tgt_uspace_send_status(cmd
, tcmd
->tag
);
384 /* the eh will have to pick this up */
385 printk(KERN_ERR
"Could not send cmd %p status\n", cmd
);
388 static int scsi_tgt_init_cmd(struct scsi_cmnd
*cmd
, gfp_t gfp_mask
)
390 struct request
*rq
= cmd
->request
;
391 struct scsi_tgt_cmd
*tcmd
= rq
->end_io_data
;
394 cmd
->use_sg
= rq
->nr_phys_segments
;
395 cmd
->request_buffer
= scsi_alloc_sgtable(cmd
, gfp_mask
);
396 if (!cmd
->request_buffer
)
399 cmd
->request_bufflen
= rq
->data_len
;
401 dprintk("cmd %p addr %p cnt %d %lu\n", cmd
, tcmd
->buffer
, cmd
->use_sg
,
403 count
= blk_rq_map_sg(rq
->q
, rq
, cmd
->request_buffer
);
404 if (likely(count
<= cmd
->use_sg
)) {
409 eprintk("cmd %p addr %p cnt %d\n", cmd
, tcmd
->buffer
, cmd
->use_sg
);
410 scsi_free_sgtable(cmd
->request_buffer
, cmd
->sglist_len
);
414 /* TODO: test this crap and replace bio_map_user with new interface maybe */
415 static int scsi_map_user_pages(struct scsi_tgt_cmd
*tcmd
, struct scsi_cmnd
*cmd
,
418 struct request_queue
*q
= cmd
->request
->q
;
419 struct request
*rq
= cmd
->request
;
420 void *uaddr
= tcmd
->buffer
;
421 unsigned int len
= tcmd
->bufflen
;
426 dprintk("%lx %u\n", (unsigned long) uaddr
, len
);
427 bio
= bio_map_user(q
, NULL
, (unsigned long) uaddr
, len
, rw
);
430 dprintk("fail to map %lx %u %d %x\n",
431 (unsigned long) uaddr
, len
, err
, cmd
->cmnd
[0]);
435 uaddr
+= bio
->bi_size
;
439 * The first bio is added and merged. We could probably
440 * try to add others using scsi_merge_bio() but for now
441 * we keep it simple. The first bio should be pretty large
442 * (either hitting the 1 MB bio pages limit or a queue limit)
443 * already but for really large IO we may want to try and
447 blk_rq_bio_prep(q
, rq
, bio
);
448 rq
->data_len
= bio
->bi_size
;
450 /* put list of bios to transfer in next go around */
451 bio_list_add(&tcmd
->xfer_list
, bio
);
455 err
= scsi_tgt_init_cmd(cmd
, GFP_KERNEL
);
463 bio_unmap_user(rq
->bio
);
464 while ((bio
= bio_list_pop(&tcmd
->xfer_list
)))
471 static int scsi_tgt_transfer_data(struct scsi_cmnd
*);
473 static void scsi_tgt_data_transfer_done(struct scsi_cmnd
*cmd
)
475 struct scsi_tgt_cmd
*tcmd
= cmd
->request
->end_io_data
;
479 /* should we free resources here on error ? */
482 err
= scsi_tgt_uspace_send_status(cmd
, tcmd
->tag
);
484 /* the tgt uspace eh will have to pick this up */
485 printk(KERN_ERR
"Could not send cmd %p status\n", cmd
);
489 dprintk("cmd %p request_bufflen %u bufflen %u\n",
490 cmd
, cmd
->request_bufflen
, tcmd
->bufflen
);
492 scsi_free_sgtable(cmd
->request_buffer
, cmd
->sglist_len
);
493 bio_list_add(&tcmd
->xfer_done_list
, cmd
->request
->bio
);
495 tcmd
->buffer
+= cmd
->request_bufflen
;
496 cmd
->offset
+= cmd
->request_bufflen
;
498 if (!tcmd
->xfer_list
.head
) {
499 scsi_tgt_transfer_response(cmd
);
503 dprintk("cmd2 %p request_bufflen %u bufflen %u\n",
504 cmd
, cmd
->request_bufflen
, tcmd
->bufflen
);
506 bio
= bio_list_pop(&tcmd
->xfer_list
);
509 blk_rq_bio_prep(cmd
->request
->q
, cmd
->request
, bio
);
510 cmd
->request
->data_len
= bio
->bi_size
;
511 err
= scsi_tgt_init_cmd(cmd
, GFP_ATOMIC
);
513 cmd
->result
= DID_ERROR
<< 16;
514 goto send_uspace_err
;
517 if (scsi_tgt_transfer_data(cmd
)) {
518 cmd
->result
= DID_NO_CONNECT
<< 16;
519 goto send_uspace_err
;
523 static int scsi_tgt_transfer_data(struct scsi_cmnd
*cmd
)
526 struct Scsi_Host
*host
= scsi_tgt_cmd_to_host(cmd
);
528 err
= host
->hostt
->transfer_data(cmd
, scsi_tgt_data_transfer_done
);
530 case SCSI_MLQUEUE_HOST_BUSY
:
531 case SCSI_MLQUEUE_DEVICE_BUSY
:
538 static int scsi_tgt_copy_sense(struct scsi_cmnd
*cmd
, unsigned long uaddr
,
541 char __user
*p
= (char __user
*) uaddr
;
543 if (copy_from_user(cmd
->sense_buffer
, p
,
544 min_t(unsigned, SCSI_SENSE_BUFFERSIZE
, len
))) {
545 printk(KERN_ERR
"Could not copy the sense buffer\n");
551 static int scsi_tgt_abort_cmd(struct Scsi_Host
*shost
, struct scsi_cmnd
*cmd
)
553 struct scsi_tgt_cmd
*tcmd
;
556 err
= shost
->hostt
->eh_abort_handler(cmd
);
558 eprintk("fail to abort %p\n", cmd
);
560 tcmd
= cmd
->request
->end_io_data
;
561 scsi_tgt_cmd_destroy(&tcmd
->work
);
565 static struct request
*tgt_cmd_hash_lookup(struct request_queue
*q
, u64 tag
)
567 struct scsi_tgt_queuedata
*qdata
= q
->queuedata
;
568 struct request
*rq
= NULL
;
569 struct list_head
*head
;
570 struct scsi_tgt_cmd
*tcmd
;
573 head
= &qdata
->cmd_hash
[cmd_hashfn(tag
)];
574 spin_lock_irqsave(&qdata
->cmd_hash_lock
, flags
);
575 list_for_each_entry(tcmd
, head
, hash_list
) {
576 if (tcmd
->tag
== tag
) {
578 list_del(&tcmd
->hash_list
);
582 spin_unlock_irqrestore(&qdata
->cmd_hash_lock
, flags
);
587 int scsi_tgt_kspace_exec(int host_no
, u64 tag
, int result
, u32 len
,
588 unsigned long uaddr
, u8 rw
)
590 struct Scsi_Host
*shost
;
591 struct scsi_cmnd
*cmd
;
593 struct scsi_tgt_cmd
*tcmd
;
596 dprintk("%d %llu %d %u %lx %u\n", host_no
, (unsigned long long) tag
,
597 result
, len
, uaddr
, rw
);
599 /* TODO: replace with a O(1) alg */
600 shost
= scsi_host_lookup(host_no
);
602 printk(KERN_ERR
"Could not find host no %d\n", host_no
);
606 if (!shost
->uspace_req_q
) {
607 printk(KERN_ERR
"Not target scsi host %d\n", host_no
);
611 rq
= tgt_cmd_hash_lookup(shost
->uspace_req_q
, tag
);
613 printk(KERN_ERR
"Could not find tag %llu\n",
614 (unsigned long long) tag
);
620 dprintk("cmd %p result %d len %d bufflen %u %lu %x\n", cmd
,
621 result
, len
, cmd
->request_bufflen
, rq_data_dir(rq
), cmd
->cmnd
[0]);
623 if (result
== TASK_ABORTED
) {
624 scsi_tgt_abort_cmd(shost
, cmd
);
628 * store the userspace values here, the working values are
629 * in the request_* values
631 tcmd
= cmd
->request
->end_io_data
;
632 tcmd
->buffer
= (void *)uaddr
;
634 cmd
->result
= result
;
636 if (!tcmd
->bufflen
|| cmd
->request_buffer
) {
637 err
= __scsi_tgt_transfer_response(cmd
);
642 * TODO: Do we need to handle case where request does not
645 err
= scsi_map_user_pages(rq
->end_io_data
, cmd
, rw
);
647 eprintk("%p %d\n", cmd
, err
);
652 /* userspace failure */
654 if (status_byte(cmd
->result
) == CHECK_CONDITION
)
655 scsi_tgt_copy_sense(cmd
, uaddr
, len
);
656 err
= __scsi_tgt_transfer_response(cmd
);
659 /* ask the target LLD to transfer the data to the buffer */
660 err
= scsi_tgt_transfer_data(cmd
);
663 scsi_host_put(shost
);
667 int scsi_tgt_tsk_mgmt_request(struct Scsi_Host
*shost
, int function
, u64 tag
,
668 struct scsi_lun
*scsilun
, void *data
)
672 /* TODO: need to retry if this fails. */
673 err
= scsi_tgt_uspace_send_tsk_mgmt(shost
->host_no
, function
,
676 eprintk("The task management request lost!\n");
679 EXPORT_SYMBOL_GPL(scsi_tgt_tsk_mgmt_request
);
681 int scsi_tgt_kspace_tsk_mgmt(int host_no
, u64 mid
, int result
)
683 struct Scsi_Host
*shost
;
686 dprintk("%d %d %llx\n", host_no
, result
, (unsigned long long) mid
);
688 shost
= scsi_host_lookup(host_no
);
690 printk(KERN_ERR
"Could not find host no %d\n", host_no
);
694 if (!shost
->uspace_req_q
) {
695 printk(KERN_ERR
"Not target scsi host %d\n", host_no
);
699 err
= shost
->hostt
->tsk_mgmt_response(mid
, result
);
701 scsi_host_put(shost
);
705 static int __init
scsi_tgt_init(void)
709 scsi_tgt_cmd_cache
= kmem_cache_create("scsi_tgt_cmd",
710 sizeof(struct scsi_tgt_cmd
),
712 if (!scsi_tgt_cmd_cache
)
715 scsi_tgtd
= create_workqueue("scsi_tgtd");
721 err
= scsi_tgt_if_init();
728 destroy_workqueue(scsi_tgtd
);
730 kmem_cache_destroy(scsi_tgt_cmd_cache
);
734 static void __exit
scsi_tgt_exit(void)
736 destroy_workqueue(scsi_tgtd
);
738 kmem_cache_destroy(scsi_tgt_cmd_cache
);
741 module_init(scsi_tgt_init
);
742 module_exit(scsi_tgt_exit
);
744 MODULE_DESCRIPTION("SCSI target core");
745 MODULE_LICENSE("GPL");