[PATCH] v4l: 771: the wm8775 is a wolfson microelectronics 24 bit 96khz adc with 4
[linux-2.6/suspend2-2.6.18.git] / drivers / scsi / scsi.c
blob0be60bba58d320d4c712426bb140606769f791fe
1 /*
2 * scsi.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
4 * Copyright (C) 2002, 2003 Christoph Hellwig
6 * generic mid-level SCSI driver
7 * Initial versions: Drew Eckhardt
8 * Subsequent revisions: Eric Youngdale
10 * <drew@colorado.edu>
12 * Bug correction thanks go to :
13 * Rik Faith <faith@cs.unc.edu>
14 * Tommy Thorn <tthorn>
15 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
17 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
18 * add scatter-gather, multiple outstanding request, and other
19 * enhancements.
21 * Native multichannel, wide scsi, /proc/scsi and hot plugging
22 * support added by Michael Neuffer <mike@i-connect.net>
24 * Added request_module("scsi_hostadapter") for kerneld:
25 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf)
26 * Bjorn Ekwall <bj0rn@blox.se>
27 * (changed to kmod)
29 * Major improvements to the timeout, abort, and reset processing,
30 * as well as performance modifications for large queue depths by
31 * Leonard N. Zubkoff <lnz@dandelion.com>
33 * Converted cli() code to spinlocks, Ingo Molnar
35 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
37 * out_of_space hacks, D. Gilbert (dpg) 990608
40 #include <linux/module.h>
41 #include <linux/moduleparam.h>
42 #include <linux/kernel.h>
43 #include <linux/sched.h>
44 #include <linux/timer.h>
45 #include <linux/string.h>
46 #include <linux/slab.h>
47 #include <linux/blkdev.h>
48 #include <linux/delay.h>
49 #include <linux/init.h>
50 #include <linux/completion.h>
51 #include <linux/devfs_fs_kernel.h>
52 #include <linux/unistd.h>
53 #include <linux/spinlock.h>
54 #include <linux/kmod.h>
55 #include <linux/interrupt.h>
56 #include <linux/notifier.h>
57 #include <linux/cpu.h>
59 #include <scsi/scsi.h>
60 #include <scsi/scsi_cmnd.h>
61 #include <scsi/scsi_dbg.h>
62 #include <scsi/scsi_device.h>
63 #include <scsi/scsi_eh.h>
64 #include <scsi/scsi_host.h>
65 #include <scsi/scsi_tcq.h>
66 #include <scsi/scsi_request.h>
68 #include "scsi_priv.h"
69 #include "scsi_logging.h"
71 static void scsi_done(struct scsi_cmnd *cmd);
72 static int scsi_retry_command(struct scsi_cmnd *cmd);
75 * Definitions and constants.
78 #define MIN_RESET_DELAY (2*HZ)
80 /* Do not call reset on error if we just did a reset within 15 sec. */
81 #define MIN_RESET_PERIOD (15*HZ)
84 * Macro to determine the size of SCSI command. This macro takes vendor
85 * unique commands into account. SCSI commands in groups 6 and 7 are
86 * vendor unique and we will depend upon the command length being
87 * supplied correctly in cmd_len.
89 #define CDB_SIZE(cmd) (((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \
90 COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len)
93 * Note - the initial logging level can be set here to log events at boot time.
94 * After the system is up, you may enable logging via the /proc interface.
96 unsigned int scsi_logging_level;
97 #if defined(CONFIG_SCSI_LOGGING)
98 EXPORT_SYMBOL(scsi_logging_level);
99 #endif
101 const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = {
102 "Direct-Access ",
103 "Sequential-Access",
104 "Printer ",
105 "Processor ",
106 "WORM ",
107 "CD-ROM ",
108 "Scanner ",
109 "Optical Device ",
110 "Medium Changer ",
111 "Communications ",
112 "Unknown ",
113 "Unknown ",
114 "RAID ",
115 "Enclosure ",
116 "Direct-Access-RBC",
118 EXPORT_SYMBOL(scsi_device_types);
121 * Function: scsi_allocate_request
123 * Purpose: Allocate a request descriptor.
125 * Arguments: device - device for which we want a request
126 * gfp_mask - allocation flags passed to kmalloc
128 * Lock status: No locks assumed to be held. This function is SMP-safe.
130 * Returns: Pointer to request block.
132 struct scsi_request *scsi_allocate_request(struct scsi_device *sdev,
133 gfp_t gfp_mask)
135 const int offset = ALIGN(sizeof(struct scsi_request), 4);
136 const int size = offset + sizeof(struct request);
137 struct scsi_request *sreq;
139 sreq = kmalloc(size, gfp_mask);
140 if (likely(sreq != NULL)) {
141 memset(sreq, 0, size);
142 sreq->sr_request = (struct request *)(((char *)sreq) + offset);
143 sreq->sr_device = sdev;
144 sreq->sr_host = sdev->host;
145 sreq->sr_magic = SCSI_REQ_MAGIC;
146 sreq->sr_data_direction = DMA_BIDIRECTIONAL;
149 return sreq;
151 EXPORT_SYMBOL(scsi_allocate_request);
153 void __scsi_release_request(struct scsi_request *sreq)
155 struct request *req = sreq->sr_request;
157 /* unlikely because the tag was usually ended earlier by the
158 * mid-layer. However, for layering reasons ULD's don't end
159 * the tag of commands they generate. */
160 if (unlikely(blk_rq_tagged(req))) {
161 unsigned long flags;
162 struct request_queue *q = req->q;
164 spin_lock_irqsave(q->queue_lock, flags);
165 blk_queue_end_tag(q, req);
166 spin_unlock_irqrestore(q->queue_lock, flags);
170 if (likely(sreq->sr_command != NULL)) {
171 struct scsi_cmnd *cmd = sreq->sr_command;
173 sreq->sr_command = NULL;
174 scsi_next_command(cmd);
179 * Function: scsi_release_request
181 * Purpose: Release a request descriptor.
183 * Arguments: sreq - request to release
185 * Lock status: No locks assumed to be held. This function is SMP-safe.
187 void scsi_release_request(struct scsi_request *sreq)
189 __scsi_release_request(sreq);
190 kfree(sreq);
192 EXPORT_SYMBOL(scsi_release_request);
194 struct scsi_host_cmd_pool {
195 kmem_cache_t *slab;
196 unsigned int users;
197 char *name;
198 unsigned int slab_flags;
199 gfp_t gfp_mask;
202 static struct scsi_host_cmd_pool scsi_cmd_pool = {
203 .name = "scsi_cmd_cache",
204 .slab_flags = SLAB_HWCACHE_ALIGN,
207 static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
208 .name = "scsi_cmd_cache(DMA)",
209 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
210 .gfp_mask = __GFP_DMA,
213 static DECLARE_MUTEX(host_cmd_pool_mutex);
215 static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost,
216 gfp_t gfp_mask)
218 struct scsi_cmnd *cmd;
220 cmd = kmem_cache_alloc(shost->cmd_pool->slab,
221 gfp_mask | shost->cmd_pool->gfp_mask);
223 if (unlikely(!cmd)) {
224 unsigned long flags;
226 spin_lock_irqsave(&shost->free_list_lock, flags);
227 if (likely(!list_empty(&shost->free_list))) {
228 cmd = list_entry(shost->free_list.next,
229 struct scsi_cmnd, list);
230 list_del_init(&cmd->list);
232 spin_unlock_irqrestore(&shost->free_list_lock, flags);
235 return cmd;
239 * Function: scsi_get_command()
241 * Purpose: Allocate and setup a scsi command block
243 * Arguments: dev - parent scsi device
244 * gfp_mask- allocator flags
246 * Returns: The allocated scsi command structure.
248 struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
250 struct scsi_cmnd *cmd;
252 /* Bail if we can't get a reference to the device */
253 if (!get_device(&dev->sdev_gendev))
254 return NULL;
256 cmd = __scsi_get_command(dev->host, gfp_mask);
258 if (likely(cmd != NULL)) {
259 unsigned long flags;
261 memset(cmd, 0, sizeof(*cmd));
262 cmd->device = dev;
263 init_timer(&cmd->eh_timeout);
264 INIT_LIST_HEAD(&cmd->list);
265 spin_lock_irqsave(&dev->list_lock, flags);
266 list_add_tail(&cmd->list, &dev->cmd_list);
267 spin_unlock_irqrestore(&dev->list_lock, flags);
268 } else
269 put_device(&dev->sdev_gendev);
271 cmd->jiffies_at_alloc = jiffies;
272 return cmd;
274 EXPORT_SYMBOL(scsi_get_command);
277 * Function: scsi_put_command()
279 * Purpose: Free a scsi command block
281 * Arguments: cmd - command block to free
283 * Returns: Nothing.
285 * Notes: The command must not belong to any lists.
287 void scsi_put_command(struct scsi_cmnd *cmd)
289 struct scsi_device *sdev = cmd->device;
290 struct Scsi_Host *shost = sdev->host;
291 unsigned long flags;
293 /* serious error if the command hasn't come from a device list */
294 spin_lock_irqsave(&cmd->device->list_lock, flags);
295 BUG_ON(list_empty(&cmd->list));
296 list_del_init(&cmd->list);
297 spin_unlock(&cmd->device->list_lock);
298 /* changing locks here, don't need to restore the irq state */
299 spin_lock(&shost->free_list_lock);
300 if (unlikely(list_empty(&shost->free_list))) {
301 list_add(&cmd->list, &shost->free_list);
302 cmd = NULL;
304 spin_unlock_irqrestore(&shost->free_list_lock, flags);
306 if (likely(cmd != NULL))
307 kmem_cache_free(shost->cmd_pool->slab, cmd);
309 put_device(&sdev->sdev_gendev);
311 EXPORT_SYMBOL(scsi_put_command);
314 * Function: scsi_setup_command_freelist()
316 * Purpose: Setup the command freelist for a scsi host.
318 * Arguments: shost - host to allocate the freelist for.
320 * Returns: Nothing.
322 int scsi_setup_command_freelist(struct Scsi_Host *shost)
324 struct scsi_host_cmd_pool *pool;
325 struct scsi_cmnd *cmd;
327 spin_lock_init(&shost->free_list_lock);
328 INIT_LIST_HEAD(&shost->free_list);
331 * Select a command slab for this host and create it if not
332 * yet existant.
334 down(&host_cmd_pool_mutex);
335 pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool);
336 if (!pool->users) {
337 pool->slab = kmem_cache_create(pool->name,
338 sizeof(struct scsi_cmnd), 0,
339 pool->slab_flags, NULL, NULL);
340 if (!pool->slab)
341 goto fail;
344 pool->users++;
345 shost->cmd_pool = pool;
346 up(&host_cmd_pool_mutex);
349 * Get one backup command for this host.
351 cmd = kmem_cache_alloc(shost->cmd_pool->slab,
352 GFP_KERNEL | shost->cmd_pool->gfp_mask);
353 if (!cmd)
354 goto fail2;
355 list_add(&cmd->list, &shost->free_list);
356 return 0;
358 fail2:
359 if (!--pool->users)
360 kmem_cache_destroy(pool->slab);
361 return -ENOMEM;
362 fail:
363 up(&host_cmd_pool_mutex);
364 return -ENOMEM;
369 * Function: scsi_destroy_command_freelist()
371 * Purpose: Release the command freelist for a scsi host.
373 * Arguments: shost - host that's freelist is going to be destroyed
375 void scsi_destroy_command_freelist(struct Scsi_Host *shost)
377 while (!list_empty(&shost->free_list)) {
378 struct scsi_cmnd *cmd;
380 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
381 list_del_init(&cmd->list);
382 kmem_cache_free(shost->cmd_pool->slab, cmd);
385 down(&host_cmd_pool_mutex);
386 if (!--shost->cmd_pool->users)
387 kmem_cache_destroy(shost->cmd_pool->slab);
388 up(&host_cmd_pool_mutex);
391 #ifdef CONFIG_SCSI_LOGGING
392 void scsi_log_send(struct scsi_cmnd *cmd)
394 unsigned int level;
395 struct scsi_device *sdev;
398 * If ML QUEUE log level is greater than or equal to:
400 * 1: nothing (match completion)
402 * 2: log opcode + command of all commands
404 * 3: same as 2 plus dump cmd address
406 * 4: same as 3 plus dump extra junk
408 if (unlikely(scsi_logging_level)) {
409 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
410 SCSI_LOG_MLQUEUE_BITS);
411 if (level > 1) {
412 sdev = cmd->device;
413 sdev_printk(KERN_INFO, sdev, "send ");
414 if (level > 2)
415 printk("0x%p ", cmd);
417 * spaces to match disposition and cmd->result
418 * output in scsi_log_completion.
420 printk(" ");
421 scsi_print_command(cmd);
422 if (level > 3) {
423 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
424 " done = 0x%p, queuecommand 0x%p\n",
425 cmd->buffer, cmd->bufflen,
426 cmd->done,
427 sdev->host->hostt->queuecommand);
434 void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
436 unsigned int level;
437 struct scsi_device *sdev;
440 * If ML COMPLETE log level is greater than or equal to:
442 * 1: log disposition, result, opcode + command, and conditionally
443 * sense data for failures or non SUCCESS dispositions.
445 * 2: same as 1 but for all command completions.
447 * 3: same as 2 plus dump cmd address
449 * 4: same as 3 plus dump extra junk
451 if (unlikely(scsi_logging_level)) {
452 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
453 SCSI_LOG_MLCOMPLETE_BITS);
454 if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
455 (level > 1)) {
456 sdev = cmd->device;
457 sdev_printk(KERN_INFO, sdev, "done ");
458 if (level > 2)
459 printk("0x%p ", cmd);
461 * Dump truncated values, so we usually fit within
462 * 80 chars.
464 switch (disposition) {
465 case SUCCESS:
466 printk("SUCCESS");
467 break;
468 case NEEDS_RETRY:
469 printk("RETRY ");
470 break;
471 case ADD_TO_MLQUEUE:
472 printk("MLQUEUE");
473 break;
474 case FAILED:
475 printk("FAILED ");
476 break;
477 case TIMEOUT_ERROR:
479 * If called via scsi_times_out.
481 printk("TIMEOUT");
482 break;
483 default:
484 printk("UNKNOWN");
486 printk(" %8x ", cmd->result);
487 scsi_print_command(cmd);
488 if (status_byte(cmd->result) & CHECK_CONDITION) {
490 * XXX The scsi_print_sense formatting/prefix
491 * doesn't match this function.
493 scsi_print_sense("", cmd);
495 if (level > 3) {
496 printk(KERN_INFO "scsi host busy %d failed %d\n",
497 sdev->host->host_busy,
498 sdev->host->host_failed);
503 #endif
506 * Assign a serial number and pid to the request for error recovery
507 * and debugging purposes. Protected by the Host_Lock of host.
509 static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
511 cmd->serial_number = host->cmd_serial_number++;
512 if (cmd->serial_number == 0)
513 cmd->serial_number = host->cmd_serial_number++;
515 cmd->pid = host->cmd_pid++;
516 if (cmd->pid == 0)
517 cmd->pid = host->cmd_pid++;
521 * Function: scsi_dispatch_command
523 * Purpose: Dispatch a command to the low-level driver.
525 * Arguments: cmd - command block we are dispatching.
527 * Notes:
529 int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
531 struct Scsi_Host *host = cmd->device->host;
532 unsigned long flags = 0;
533 unsigned long timeout;
534 int rtn = 0;
536 /* check if the device is still usable */
537 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
538 /* in SDEV_DEL we error all commands. DID_NO_CONNECT
539 * returns an immediate error upwards, and signals
540 * that the device is no longer present */
541 cmd->result = DID_NO_CONNECT << 16;
542 atomic_inc(&cmd->device->iorequest_cnt);
543 __scsi_done(cmd);
544 /* return 0 (because the command has been processed) */
545 goto out;
548 /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */
549 if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) {
551 * in SDEV_BLOCK, the command is just put back on the device
552 * queue. The suspend state has already blocked the queue so
553 * future requests should not occur until the device
554 * transitions out of the suspend state.
556 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
558 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
561 * NOTE: rtn is still zero here because we don't need the
562 * queue to be plugged on return (it's already stopped)
564 goto out;
568 * If SCSI-2 or lower, store the LUN value in cmnd.
570 if (cmd->device->scsi_level <= SCSI_2) {
571 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
572 (cmd->device->lun << 5 & 0xe0);
576 * We will wait MIN_RESET_DELAY clock ticks after the last reset so
577 * we can avoid the drive not being ready.
579 timeout = host->last_reset + MIN_RESET_DELAY;
581 if (host->resetting && time_before(jiffies, timeout)) {
582 int ticks_remaining = timeout - jiffies;
584 * NOTE: This may be executed from within an interrupt
585 * handler! This is bad, but for now, it'll do. The irq
586 * level of the interrupt handler has been masked out by the
587 * platform dependent interrupt handling code already, so the
588 * sti() here will not cause another call to the SCSI host's
589 * interrupt handler (assuming there is one irq-level per
590 * host).
592 while (--ticks_remaining >= 0)
593 mdelay(1 + 999 / HZ);
594 host->resetting = 0;
598 * AK: unlikely race here: for some reason the timer could
599 * expire before the serial number is set up below.
601 scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out);
603 scsi_log_send(cmd);
606 * We will use a queued command if possible, otherwise we will
607 * emulate the queuing and calling of completion function ourselves.
609 atomic_inc(&cmd->device->iorequest_cnt);
612 * Before we queue this command, check if the command
613 * length exceeds what the host adapter can handle.
615 if (CDB_SIZE(cmd) > cmd->device->host->max_cmd_len) {
616 SCSI_LOG_MLQUEUE(3,
617 printk("queuecommand : command too long.\n"));
618 cmd->result = (DID_ABORT << 16);
620 scsi_done(cmd);
621 goto out;
624 spin_lock_irqsave(host->host_lock, flags);
625 scsi_cmd_get_serial(host, cmd);
627 if (unlikely(host->shost_state == SHOST_DEL)) {
628 cmd->result = (DID_NO_CONNECT << 16);
629 scsi_done(cmd);
630 } else {
631 rtn = host->hostt->queuecommand(cmd, scsi_done);
633 spin_unlock_irqrestore(host->host_lock, flags);
634 if (rtn) {
635 if (scsi_delete_timer(cmd)) {
636 atomic_inc(&cmd->device->iodone_cnt);
637 scsi_queue_insert(cmd,
638 (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
639 rtn : SCSI_MLQUEUE_HOST_BUSY);
641 SCSI_LOG_MLQUEUE(3,
642 printk("queuecommand : request rejected\n"));
645 out:
646 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
647 return rtn;
651 * Function: scsi_init_cmd_from_req
653 * Purpose: Queue a SCSI command
654 * Purpose: Initialize a struct scsi_cmnd from a struct scsi_request
656 * Arguments: cmd - command descriptor.
657 * sreq - Request from the queue.
659 * Lock status: None needed.
661 * Returns: Nothing.
663 * Notes: Mainly transfer data from the request structure to the
664 * command structure. The request structure is allocated
665 * using the normal memory allocator, and requests can pile
666 * up to more or less any depth. The command structure represents
667 * a consumable resource, as these are allocated into a pool
668 * when the SCSI subsystem initializes. The preallocation is
669 * required so that in low-memory situations a disk I/O request
670 * won't cause the memory manager to try and write out a page.
671 * The request structure is generally used by ioctls and character
672 * devices.
674 void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, struct scsi_request *sreq)
676 sreq->sr_command = cmd;
678 cmd->cmd_len = sreq->sr_cmd_len;
679 cmd->use_sg = sreq->sr_use_sg;
681 cmd->request = sreq->sr_request;
682 memcpy(cmd->data_cmnd, sreq->sr_cmnd, sizeof(cmd->data_cmnd));
683 cmd->serial_number = 0;
684 cmd->bufflen = sreq->sr_bufflen;
685 cmd->buffer = sreq->sr_buffer;
686 cmd->retries = 0;
687 cmd->allowed = sreq->sr_allowed;
688 cmd->done = sreq->sr_done;
689 cmd->timeout_per_command = sreq->sr_timeout_per_command;
690 cmd->sc_data_direction = sreq->sr_data_direction;
691 cmd->sglist_len = sreq->sr_sglist_len;
692 cmd->underflow = sreq->sr_underflow;
693 cmd->sc_request = sreq;
694 memcpy(cmd->cmnd, sreq->sr_cmnd, sizeof(sreq->sr_cmnd));
697 * Zero the sense buffer. Some host adapters automatically request
698 * sense on error. 0 is not a valid sense code.
700 memset(cmd->sense_buffer, 0, sizeof(sreq->sr_sense_buffer));
701 cmd->request_buffer = sreq->sr_buffer;
702 cmd->request_bufflen = sreq->sr_bufflen;
703 cmd->old_use_sg = cmd->use_sg;
704 if (cmd->cmd_len == 0)
705 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
706 cmd->old_cmd_len = cmd->cmd_len;
707 cmd->sc_old_data_direction = cmd->sc_data_direction;
708 cmd->old_underflow = cmd->underflow;
711 * Start the timer ticking.
713 cmd->result = 0;
715 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_init_cmd_from_req()\n"));
719 * Per-CPU I/O completion queue.
721 static DEFINE_PER_CPU(struct list_head, scsi_done_q);
724 * scsi_done - Enqueue the finished SCSI command into the done queue.
725 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
726 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
728 * This function is the mid-level's (SCSI Core) interrupt routine, which
729 * regains ownership of the SCSI command (de facto) from a LLDD, and enqueues
730 * the command to the done queue for further processing.
732 * This is the producer of the done queue who enqueues at the tail.
734 * This function is interrupt context safe.
736 static void scsi_done(struct scsi_cmnd *cmd)
739 * We don't have to worry about this one timing out any more.
740 * If we are unable to remove the timer, then the command
741 * has already timed out. In which case, we have no choice but to
742 * let the timeout function run, as we have no idea where in fact
743 * that function could really be. It might be on another processor,
744 * etc, etc.
746 if (!scsi_delete_timer(cmd))
747 return;
748 __scsi_done(cmd);
751 /* Private entry to scsi_done() to complete a command when the timer
752 * isn't running --- used by scsi_times_out */
753 void __scsi_done(struct scsi_cmnd *cmd)
755 unsigned long flags;
758 * Set the serial numbers back to zero
760 cmd->serial_number = 0;
762 atomic_inc(&cmd->device->iodone_cnt);
763 if (cmd->result)
764 atomic_inc(&cmd->device->ioerr_cnt);
767 * Next, enqueue the command into the done queue.
768 * It is a per-CPU queue, so we just disable local interrupts
769 * and need no spinlock.
771 local_irq_save(flags);
772 list_add_tail(&cmd->eh_entry, &__get_cpu_var(scsi_done_q));
773 raise_softirq_irqoff(SCSI_SOFTIRQ);
774 local_irq_restore(flags);
778 * scsi_softirq - Perform post-interrupt processing of finished SCSI commands.
780 * This is the consumer of the done queue.
782 * This is called with all interrupts enabled. This should reduce
783 * interrupt latency, stack depth, and reentrancy of the low-level
784 * drivers.
786 static void scsi_softirq(struct softirq_action *h)
788 int disposition;
789 LIST_HEAD(local_q);
791 local_irq_disable();
792 list_splice_init(&__get_cpu_var(scsi_done_q), &local_q);
793 local_irq_enable();
795 while (!list_empty(&local_q)) {
796 struct scsi_cmnd *cmd = list_entry(local_q.next,
797 struct scsi_cmnd, eh_entry);
798 /* The longest time any command should be outstanding is the
799 * per command timeout multiplied by the number of retries.
801 * For a typical command, this is 2.5 minutes */
802 unsigned long wait_for
803 = cmd->allowed * cmd->timeout_per_command;
804 list_del_init(&cmd->eh_entry);
806 disposition = scsi_decide_disposition(cmd);
807 if (disposition != SUCCESS &&
808 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
809 sdev_printk(KERN_ERR, cmd->device,
810 "timing out command, waited %lus\n",
811 wait_for/HZ);
812 disposition = SUCCESS;
815 scsi_log_completion(cmd, disposition);
816 switch (disposition) {
817 case SUCCESS:
818 scsi_finish_command(cmd);
819 break;
820 case NEEDS_RETRY:
821 scsi_retry_command(cmd);
822 break;
823 case ADD_TO_MLQUEUE:
824 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
825 break;
826 default:
827 if (!scsi_eh_scmd_add(cmd, 0))
828 scsi_finish_command(cmd);
834 * Function: scsi_retry_command
836 * Purpose: Send a command back to the low level to be retried.
838 * Notes: This command is always executed in the context of the
839 * bottom half handler, or the error handler thread. Low
840 * level drivers should not become re-entrant as a result of
841 * this.
843 static int scsi_retry_command(struct scsi_cmnd *cmd)
846 * Restore the SCSI command state.
848 scsi_setup_cmd_retry(cmd);
851 * Zero the sense information from the last time we tried
852 * this command.
854 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
856 return scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
860 * Function: scsi_finish_command
862 * Purpose: Pass command off to upper layer for finishing of I/O
863 * request, waking processes that are waiting on results,
864 * etc.
866 void scsi_finish_command(struct scsi_cmnd *cmd)
868 struct scsi_device *sdev = cmd->device;
869 struct Scsi_Host *shost = sdev->host;
870 struct scsi_request *sreq;
872 scsi_device_unbusy(sdev);
875 * Clear the flags which say that the device/host is no longer
876 * capable of accepting new commands. These are set in scsi_queue.c
877 * for both the queue full condition on a device, and for a
878 * host full condition on the host.
880 * XXX(hch): What about locking?
882 shost->host_blocked = 0;
883 sdev->device_blocked = 0;
886 * If we have valid sense information, then some kind of recovery
887 * must have taken place. Make a note of this.
889 if (SCSI_SENSE_VALID(cmd))
890 cmd->result |= (DRIVER_SENSE << 24);
892 SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
893 "Notifying upper driver of completion "
894 "(result %x)\n", cmd->result));
897 * We can get here with use_sg=0, causing a panic in the upper level
899 cmd->use_sg = cmd->old_use_sg;
902 * If there is an associated request structure, copy the data over
903 * before we call the completion function.
905 sreq = cmd->sc_request;
906 if (sreq) {
907 sreq->sr_result = sreq->sr_command->result;
908 if (sreq->sr_result) {
909 memcpy(sreq->sr_sense_buffer,
910 sreq->sr_command->sense_buffer,
911 sizeof(sreq->sr_sense_buffer));
915 cmd->done(cmd);
917 EXPORT_SYMBOL(scsi_finish_command);
920 * Function: scsi_adjust_queue_depth()
922 * Purpose: Allow low level drivers to tell us to change the queue depth
923 * on a specific SCSI device
925 * Arguments: sdev - SCSI Device in question
926 * tagged - Do we use tagged queueing (non-0) or do we treat
927 * this device as an untagged device (0)
928 * tags - Number of tags allowed if tagged queueing enabled,
929 * or number of commands the low level driver can
930 * queue up in non-tagged mode (as per cmd_per_lun).
932 * Returns: Nothing
934 * Lock Status: None held on entry
936 * Notes: Low level drivers may call this at any time and we will do
937 * the right thing depending on whether or not the device is
938 * currently active and whether or not it even has the
939 * command blocks built yet.
941 void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
943 unsigned long flags;
946 * refuse to set tagged depth to an unworkable size
948 if (tags <= 0)
949 return;
951 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
953 /* Check to see if the queue is managed by the block layer
954 * if it is, and we fail to adjust the depth, exit */
955 if (blk_queue_tagged(sdev->request_queue) &&
956 blk_queue_resize_tags(sdev->request_queue, tags) != 0)
957 goto out;
959 sdev->queue_depth = tags;
960 switch (tagged) {
961 case MSG_ORDERED_TAG:
962 sdev->ordered_tags = 1;
963 sdev->simple_tags = 1;
964 break;
965 case MSG_SIMPLE_TAG:
966 sdev->ordered_tags = 0;
967 sdev->simple_tags = 1;
968 break;
969 default:
970 sdev_printk(KERN_WARNING, sdev,
971 "scsi_adjust_queue_depth, bad queue type, "
972 "disabled\n");
973 case 0:
974 sdev->ordered_tags = sdev->simple_tags = 0;
975 sdev->queue_depth = tags;
976 break;
978 out:
979 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
981 EXPORT_SYMBOL(scsi_adjust_queue_depth);
984 * Function: scsi_track_queue_full()
986 * Purpose: This function will track successive QUEUE_FULL events on a
987 * specific SCSI device to determine if and when there is a
988 * need to adjust the queue depth on the device.
990 * Arguments: sdev - SCSI Device in question
991 * depth - Current number of outstanding SCSI commands on
992 * this device, not counting the one returned as
993 * QUEUE_FULL.
995 * Returns: 0 - No change needed
996 * >0 - Adjust queue depth to this new depth
997 * -1 - Drop back to untagged operation using host->cmd_per_lun
998 * as the untagged command depth
1000 * Lock Status: None held on entry
1002 * Notes: Low level drivers may call this at any time and we will do
1003 * "The Right Thing." We are interrupt context safe.
1005 int scsi_track_queue_full(struct scsi_device *sdev, int depth)
1007 if ((jiffies >> 4) == sdev->last_queue_full_time)
1008 return 0;
1010 sdev->last_queue_full_time = (jiffies >> 4);
1011 if (sdev->last_queue_full_depth != depth) {
1012 sdev->last_queue_full_count = 1;
1013 sdev->last_queue_full_depth = depth;
1014 } else {
1015 sdev->last_queue_full_count++;
1018 if (sdev->last_queue_full_count <= 10)
1019 return 0;
1020 if (sdev->last_queue_full_depth < 8) {
1021 /* Drop back to untagged */
1022 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
1023 return -1;
1026 if (sdev->ordered_tags)
1027 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
1028 else
1029 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
1030 return depth;
1032 EXPORT_SYMBOL(scsi_track_queue_full);
1035 * scsi_device_get - get an addition reference to a scsi_device
1036 * @sdev: device to get a reference to
1038 * Gets a reference to the scsi_device and increments the use count
1039 * of the underlying LLDD module. You must hold host_lock of the
1040 * parent Scsi_Host or already have a reference when calling this.
1042 int scsi_device_get(struct scsi_device *sdev)
1044 if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL)
1045 return -ENXIO;
1046 if (!get_device(&sdev->sdev_gendev))
1047 return -ENXIO;
1048 if (!try_module_get(sdev->host->hostt->module)) {
1049 put_device(&sdev->sdev_gendev);
1050 return -ENXIO;
1052 return 0;
1054 EXPORT_SYMBOL(scsi_device_get);
1057 * scsi_device_put - release a reference to a scsi_device
1058 * @sdev: device to release a reference on.
1060 * Release a reference to the scsi_device and decrements the use count
1061 * of the underlying LLDD module. The device is freed once the last
1062 * user vanishes.
1064 void scsi_device_put(struct scsi_device *sdev)
1066 module_put(sdev->host->hostt->module);
1067 put_device(&sdev->sdev_gendev);
1069 EXPORT_SYMBOL(scsi_device_put);
1071 /* helper for shost_for_each_device, thus not documented */
1072 struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
1073 struct scsi_device *prev)
1075 struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
1076 struct scsi_device *next = NULL;
1077 unsigned long flags;
1079 spin_lock_irqsave(shost->host_lock, flags);
1080 while (list->next != &shost->__devices) {
1081 next = list_entry(list->next, struct scsi_device, siblings);
1082 /* skip devices that we can't get a reference to */
1083 if (!scsi_device_get(next))
1084 break;
1085 next = NULL;
1086 list = list->next;
1088 spin_unlock_irqrestore(shost->host_lock, flags);
1090 if (prev)
1091 scsi_device_put(prev);
1092 return next;
1094 EXPORT_SYMBOL(__scsi_iterate_devices);
1097 * starget_for_each_device - helper to walk all devices of a target
1098 * @starget: target whose devices we want to iterate over.
1100 * This traverses over each devices of @shost. The devices have
1101 * a reference that must be released by scsi_host_put when breaking
1102 * out of the loop.
1104 void starget_for_each_device(struct scsi_target *starget, void * data,
1105 void (*fn)(struct scsi_device *, void *))
1107 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1108 struct scsi_device *sdev;
1110 shost_for_each_device(sdev, shost) {
1111 if ((sdev->channel == starget->channel) &&
1112 (sdev->id == starget->id))
1113 fn(sdev, data);
1116 EXPORT_SYMBOL(starget_for_each_device);
1119 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED)
1120 * @starget: SCSI target pointer
1121 * @lun: SCSI Logical Unit Number
1123 * Looks up the scsi_device with the specified @lun for a give
1124 * @starget. The returned scsi_device does not have an additional
1125 * reference. You must hold the host's host_lock over this call and
1126 * any access to the returned scsi_device.
1128 * Note: The only reason why drivers would want to use this is because
1129 * they're need to access the device list in irq context. Otherwise you
1130 * really want to use scsi_device_lookup_by_target instead.
1132 struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
1133 uint lun)
1135 struct scsi_device *sdev;
1137 list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
1138 if (sdev->lun ==lun)
1139 return sdev;
1142 return NULL;
1144 EXPORT_SYMBOL(__scsi_device_lookup_by_target);
1147 * scsi_device_lookup_by_target - find a device given the target
1148 * @starget: SCSI target pointer
1149 * @lun: SCSI Logical Unit Number
1151 * Looks up the scsi_device with the specified @channel, @id, @lun for a
1152 * give host. The returned scsi_device has an additional reference that
1153 * needs to be release with scsi_host_put once you're done with it.
1155 struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
1156 uint lun)
1158 struct scsi_device *sdev;
1159 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1160 unsigned long flags;
1162 spin_lock_irqsave(shost->host_lock, flags);
1163 sdev = __scsi_device_lookup_by_target(starget, lun);
1164 if (sdev && scsi_device_get(sdev))
1165 sdev = NULL;
1166 spin_unlock_irqrestore(shost->host_lock, flags);
1168 return sdev;
1170 EXPORT_SYMBOL(scsi_device_lookup_by_target);
1173 * scsi_device_lookup - find a device given the host (UNLOCKED)
1174 * @shost: SCSI host pointer
1175 * @channel: SCSI channel (zero if only one channel)
1176 * @pun: SCSI target number (physical unit number)
1177 * @lun: SCSI Logical Unit Number
1179 * Looks up the scsi_device with the specified @channel, @id, @lun for a
1180 * give host. The returned scsi_device does not have an additional reference.
1181 * You must hold the host's host_lock over this call and any access to the
1182 * returned scsi_device.
1184 * Note: The only reason why drivers would want to use this is because
1185 * they're need to access the device list in irq context. Otherwise you
1186 * really want to use scsi_device_lookup instead.
1188 struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
1189 uint channel, uint id, uint lun)
1191 struct scsi_device *sdev;
1193 list_for_each_entry(sdev, &shost->__devices, siblings) {
1194 if (sdev->channel == channel && sdev->id == id &&
1195 sdev->lun ==lun)
1196 return sdev;
1199 return NULL;
1201 EXPORT_SYMBOL(__scsi_device_lookup);
1204 * scsi_device_lookup - find a device given the host
1205 * @shost: SCSI host pointer
1206 * @channel: SCSI channel (zero if only one channel)
1207 * @id: SCSI target number (physical unit number)
1208 * @lun: SCSI Logical Unit Number
1210 * Looks up the scsi_device with the specified @channel, @id, @lun for a
1211 * give host. The returned scsi_device has an additional reference that
1212 * needs to be release with scsi_host_put once you're done with it.
1214 struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
1215 uint channel, uint id, uint lun)
1217 struct scsi_device *sdev;
1218 unsigned long flags;
1220 spin_lock_irqsave(shost->host_lock, flags);
1221 sdev = __scsi_device_lookup(shost, channel, id, lun);
1222 if (sdev && scsi_device_get(sdev))
1223 sdev = NULL;
1224 spin_unlock_irqrestore(shost->host_lock, flags);
1226 return sdev;
1228 EXPORT_SYMBOL(scsi_device_lookup);
1231 * scsi_device_cancel - cancel outstanding IO to this device
1232 * @sdev: Pointer to struct scsi_device
1233 * @recovery: Boolean instructing function to recover device or not.
1236 int scsi_device_cancel(struct scsi_device *sdev, int recovery)
1238 struct scsi_cmnd *scmd;
1239 LIST_HEAD(active_list);
1240 struct list_head *lh, *lh_sf;
1241 unsigned long flags;
1243 scsi_device_set_state(sdev, SDEV_CANCEL);
1245 spin_lock_irqsave(&sdev->list_lock, flags);
1246 list_for_each_entry(scmd, &sdev->cmd_list, list) {
1247 if (scmd->request && scmd->request->rq_status != RQ_INACTIVE) {
1249 * If we are unable to remove the timer, it means
1250 * that the command has already timed out or
1251 * finished.
1253 if (!scsi_delete_timer(scmd))
1254 continue;
1255 list_add_tail(&scmd->eh_entry, &active_list);
1258 spin_unlock_irqrestore(&sdev->list_lock, flags);
1260 if (!list_empty(&active_list)) {
1261 list_for_each_safe(lh, lh_sf, &active_list) {
1262 scmd = list_entry(lh, struct scsi_cmnd, eh_entry);
1263 list_del_init(lh);
1264 if (recovery &&
1265 !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) {
1266 scmd->result = (DID_ABORT << 16);
1267 scsi_finish_command(scmd);
1272 return 0;
1274 EXPORT_SYMBOL(scsi_device_cancel);
1276 #ifdef CONFIG_HOTPLUG_CPU
1277 static int scsi_cpu_notify(struct notifier_block *self,
1278 unsigned long action, void *hcpu)
1280 int cpu = (unsigned long)hcpu;
1282 switch(action) {
1283 case CPU_DEAD:
1284 /* Drain scsi_done_q. */
1285 local_irq_disable();
1286 list_splice_init(&per_cpu(scsi_done_q, cpu),
1287 &__get_cpu_var(scsi_done_q));
1288 raise_softirq_irqoff(SCSI_SOFTIRQ);
1289 local_irq_enable();
1290 break;
1291 default:
1292 break;
1294 return NOTIFY_OK;
1297 static struct notifier_block __devinitdata scsi_cpu_nb = {
1298 .notifier_call = scsi_cpu_notify,
1301 #define register_scsi_cpu() register_cpu_notifier(&scsi_cpu_nb)
1302 #define unregister_scsi_cpu() unregister_cpu_notifier(&scsi_cpu_nb)
1303 #else
1304 #define register_scsi_cpu()
1305 #define unregister_scsi_cpu()
1306 #endif /* CONFIG_HOTPLUG_CPU */
1308 MODULE_DESCRIPTION("SCSI core");
1309 MODULE_LICENSE("GPL");
1311 module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
1312 MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
1314 static int __init init_scsi(void)
1316 int error, i;
1318 error = scsi_init_queue();
1319 if (error)
1320 return error;
1321 error = scsi_init_procfs();
1322 if (error)
1323 goto cleanup_queue;
1324 error = scsi_init_devinfo();
1325 if (error)
1326 goto cleanup_procfs;
1327 error = scsi_init_hosts();
1328 if (error)
1329 goto cleanup_devlist;
1330 error = scsi_init_sysctl();
1331 if (error)
1332 goto cleanup_hosts;
1333 error = scsi_sysfs_register();
1334 if (error)
1335 goto cleanup_sysctl;
1337 for (i = 0; i < NR_CPUS; i++)
1338 INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));
1340 devfs_mk_dir("scsi");
1341 open_softirq(SCSI_SOFTIRQ, scsi_softirq, NULL);
1342 register_scsi_cpu();
1343 printk(KERN_NOTICE "SCSI subsystem initialized\n");
1344 return 0;
1346 cleanup_sysctl:
1347 scsi_exit_sysctl();
1348 cleanup_hosts:
1349 scsi_exit_hosts();
1350 cleanup_devlist:
1351 scsi_exit_devinfo();
1352 cleanup_procfs:
1353 scsi_exit_procfs();
1354 cleanup_queue:
1355 scsi_exit_queue();
1356 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
1357 -error);
1358 return error;
1361 static void __exit exit_scsi(void)
1363 scsi_sysfs_unregister();
1364 scsi_exit_sysctl();
1365 scsi_exit_hosts();
1366 scsi_exit_devinfo();
1367 devfs_remove("scsi");
1368 scsi_exit_procfs();
1369 scsi_exit_queue();
1370 unregister_scsi_cpu();
1373 subsys_initcall(init_scsi);
1374 module_exit(exit_scsi);