RT-AC66 3.0.0.4.374.130 core
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / drivers / scsi / scsi.c
blob13a87c99da04becd86f31c116728f4d355574fe2
1 /*
2 * scsi.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
4 * Copyright (C) 2002, 2003 Christoph Hellwig
6 * generic mid-level SCSI driver
7 * Initial versions: Drew Eckhardt
8 * Subsequent revisions: Eric Youngdale
10 * <drew@colorado.edu>
12 * Bug correction thanks go to :
13 * Rik Faith <faith@cs.unc.edu>
14 * Tommy Thorn <tthorn>
15 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
17 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
18 * add scatter-gather, multiple outstanding request, and other
19 * enhancements.
21 * Native multichannel, wide scsi, /proc/scsi and hot plugging
22 * support added by Michael Neuffer <mike@i-connect.net>
24 * Added request_module("scsi_hostadapter") for kerneld:
25 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modprobe.conf)
26 * Bjorn Ekwall <bj0rn@blox.se>
27 * (changed to kmod)
29 * Major improvements to the timeout, abort, and reset processing,
30 * as well as performance modifications for large queue depths by
31 * Leonard N. Zubkoff <lnz@dandelion.com>
33 * Converted cli() code to spinlocks, Ingo Molnar
35 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
37 * out_of_space hacks, D. Gilbert (dpg) 990608
40 #include <linux/module.h>
41 #include <linux/moduleparam.h>
42 #include <linux/kernel.h>
43 #include <linux/timer.h>
44 #include <linux/string.h>
45 #include <linux/slab.h>
46 #include <linux/blkdev.h>
47 #include <linux/delay.h>
48 #include <linux/init.h>
49 #include <linux/completion.h>
50 #include <linux/unistd.h>
51 #include <linux/spinlock.h>
52 #include <linux/kmod.h>
53 #include <linux/interrupt.h>
54 #include <linux/notifier.h>
55 #include <linux/cpu.h>
56 #include <linux/mutex.h>
58 #include <scsi/scsi.h>
59 #include <scsi/scsi_cmnd.h>
60 #include <scsi/scsi_dbg.h>
61 #include <scsi/scsi_device.h>
62 #include <scsi/scsi_eh.h>
63 #include <scsi/scsi_host.h>
64 #include <scsi/scsi_tcq.h>
66 #include "scsi_priv.h"
67 #include "scsi_logging.h"
69 static void scsi_done(struct scsi_cmnd *cmd);
72 * Definitions and constants.
75 #define MIN_RESET_DELAY (2*HZ)
77 /* Do not call reset on error if we just did a reset within 15 sec. */
78 #define MIN_RESET_PERIOD (15*HZ)
81 * Macro to determine the size of SCSI command. This macro takes vendor
82 * unique commands into account. SCSI commands in groups 6 and 7 are
83 * vendor unique and we will depend upon the command length being
84 * supplied correctly in cmd_len.
86 #define CDB_SIZE(cmd) (((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \
87 COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len)
90 * Note - the initial logging level can be set here to log events at boot time.
91 * After the system is up, you may enable logging via the /proc interface.
93 unsigned int scsi_logging_level;
94 #if defined(CONFIG_SCSI_LOGGING)
95 EXPORT_SYMBOL(scsi_logging_level);
96 #endif
98 /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
99 * You may not alter any existing entry (although adding new ones is
100 * encouraged once assigned by ANSI/INCITS T10
102 static const char *const scsi_device_types[] = {
103 "Direct-Access ",
104 "Sequential-Access",
105 "Printer ",
106 "Processor ",
107 "WORM ",
108 "CD-ROM ",
109 "Scanner ",
110 "Optical Device ",
111 "Medium Changer ",
112 "Communications ",
113 "ASC IT8 ",
114 "ASC IT8 ",
115 "RAID ",
116 "Enclosure ",
117 "Direct-Access-RBC",
118 "Optical card ",
119 "Bridge controller",
120 "Object storage ",
121 "Automation/Drive ",
124 const char * scsi_device_type(unsigned type)
126 if (type == 0x1e)
127 return "Well-known LUN ";
128 if (type == 0x1f)
129 return "No Device ";
130 if (type >= ARRAY_SIZE(scsi_device_types))
131 return "Unknown ";
132 return scsi_device_types[type];
135 EXPORT_SYMBOL(scsi_device_type);
137 struct scsi_host_cmd_pool {
138 struct kmem_cache *cmd_slab;
139 struct kmem_cache *sense_slab;
140 unsigned int users;
141 char *cmd_name;
142 char *sense_name;
143 unsigned int slab_flags;
144 gfp_t gfp_mask;
147 static struct scsi_host_cmd_pool scsi_cmd_pool = {
148 .cmd_name = "scsi_cmd_cache",
149 .sense_name = "scsi_sense_cache",
150 .slab_flags = SLAB_HWCACHE_ALIGN,
153 static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {
154 .cmd_name = "scsi_cmd_cache(DMA)",
155 .sense_name = "scsi_sense_cache(DMA)",
156 .slab_flags = SLAB_HWCACHE_ALIGN|SLAB_CACHE_DMA,
157 .gfp_mask = __GFP_DMA,
160 static DEFINE_MUTEX(host_cmd_pool_mutex);
163 * scsi_pool_alloc_command - internal function to get a fully allocated command
164 * @pool: slab pool to allocate the command from
165 * @gfp_mask: mask for the allocation
167 * Returns a fully allocated command (with the allied sense buffer) or
168 * NULL on failure
170 static struct scsi_cmnd *
171 scsi_pool_alloc_command(struct scsi_host_cmd_pool *pool, gfp_t gfp_mask)
173 struct scsi_cmnd *cmd;
175 cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);
176 if (!cmd)
177 return NULL;
179 cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab,
180 gfp_mask | pool->gfp_mask);
181 if (!cmd->sense_buffer) {
182 kmem_cache_free(pool->cmd_slab, cmd);
183 return NULL;
186 return cmd;
190 * scsi_pool_free_command - internal function to release a command
191 * @pool: slab pool to allocate the command from
192 * @cmd: command to release
194 * the command must previously have been allocated by
195 * scsi_pool_alloc_command.
197 static void
198 scsi_pool_free_command(struct scsi_host_cmd_pool *pool,
199 struct scsi_cmnd *cmd)
201 kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
202 kmem_cache_free(pool->cmd_slab, cmd);
206 * __scsi_get_command - Allocate a struct scsi_cmnd
207 * @shost: host to transmit command
208 * @gfp_mask: allocation mask
210 * Description: allocate a struct scsi_cmd from host's slab, recycling from the
211 * host's free_list if necessary.
213 struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
215 struct scsi_cmnd *cmd;
216 unsigned char *buf;
218 cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
220 if (unlikely(!cmd)) {
221 unsigned long flags;
223 spin_lock_irqsave(&shost->free_list_lock, flags);
224 if (likely(!list_empty(&shost->free_list))) {
225 cmd = list_entry(shost->free_list.next,
226 struct scsi_cmnd, list);
227 list_del_init(&cmd->list);
229 spin_unlock_irqrestore(&shost->free_list_lock, flags);
231 if (cmd) {
232 buf = cmd->sense_buffer;
233 memset(cmd, 0, sizeof(*cmd));
234 cmd->sense_buffer = buf;
238 return cmd;
240 EXPORT_SYMBOL_GPL(__scsi_get_command);
243 * Function: scsi_get_command()
245 * Purpose: Allocate and setup a scsi command block
247 * Arguments: dev - parent scsi device
248 * gfp_mask- allocator flags
250 * Returns: The allocated scsi command structure.
252 struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
254 struct scsi_cmnd *cmd;
256 /* Bail if we can't get a reference to the device */
257 if (!get_device(&dev->sdev_gendev))
258 return NULL;
260 cmd = __scsi_get_command(dev->host, gfp_mask);
262 if (likely(cmd != NULL)) {
263 unsigned long flags;
265 cmd->device = dev;
266 init_timer(&cmd->eh_timeout);
267 INIT_LIST_HEAD(&cmd->list);
268 spin_lock_irqsave(&dev->list_lock, flags);
269 list_add_tail(&cmd->list, &dev->cmd_list);
270 spin_unlock_irqrestore(&dev->list_lock, flags);
271 cmd->jiffies_at_alloc = jiffies;
272 } else
273 put_device(&dev->sdev_gendev);
275 return cmd;
277 EXPORT_SYMBOL(scsi_get_command);
279 void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd,
280 struct device *dev)
282 unsigned long flags;
284 /* changing locks here, don't need to restore the irq state */
285 spin_lock_irqsave(&shost->free_list_lock, flags);
286 if (unlikely(list_empty(&shost->free_list))) {
287 list_add(&cmd->list, &shost->free_list);
288 cmd = NULL;
290 spin_unlock_irqrestore(&shost->free_list_lock, flags);
292 if (likely(cmd != NULL))
293 scsi_pool_free_command(shost->cmd_pool, cmd);
295 put_device(dev);
297 EXPORT_SYMBOL(__scsi_put_command);
300 * Function: scsi_put_command()
302 * Purpose: Free a scsi command block
304 * Arguments: cmd - command block to free
306 * Returns: Nothing.
308 * Notes: The command must not belong to any lists.
310 void scsi_put_command(struct scsi_cmnd *cmd)
312 struct scsi_device *sdev = cmd->device;
313 unsigned long flags;
315 /* serious error if the command hasn't come from a device list */
316 spin_lock_irqsave(&cmd->device->list_lock, flags);
317 BUG_ON(list_empty(&cmd->list));
318 list_del_init(&cmd->list);
319 spin_unlock_irqrestore(&cmd->device->list_lock, flags);
321 __scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev);
323 EXPORT_SYMBOL(scsi_put_command);
325 static struct scsi_host_cmd_pool *scsi_get_host_cmd_pool(gfp_t gfp_mask)
327 struct scsi_host_cmd_pool *retval = NULL, *pool;
329 * Select a command slab for this host and create it if not
330 * yet existant.
332 mutex_lock(&host_cmd_pool_mutex);
333 pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool :
334 &scsi_cmd_pool;
335 if (!pool->users) {
336 pool->cmd_slab = kmem_cache_create(pool->cmd_name,
337 sizeof(struct scsi_cmnd), 0,
338 pool->slab_flags, NULL, NULL);
339 if (!pool->cmd_slab)
340 goto fail;
342 pool->sense_slab = kmem_cache_create(pool->sense_name,
343 SCSI_SENSE_BUFFERSIZE, 0,
344 pool->slab_flags, NULL, NULL);
345 if (!pool->sense_slab) {
346 kmem_cache_destroy(pool->cmd_slab);
347 goto fail;
351 pool->users++;
352 retval = pool;
353 fail:
354 mutex_unlock(&host_cmd_pool_mutex);
355 return retval;
358 static void scsi_put_host_cmd_pool(gfp_t gfp_mask)
360 struct scsi_host_cmd_pool *pool;
362 mutex_lock(&host_cmd_pool_mutex);
363 pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool :
364 &scsi_cmd_pool;
366 * This may happen if a driver has a mismatched get and put
367 * of the command pool; the driver should be implicated in
368 * the stack trace
370 BUG_ON(pool->users == 0);
372 if (!--pool->users) {
373 kmem_cache_destroy(pool->cmd_slab);
374 kmem_cache_destroy(pool->sense_slab);
376 mutex_unlock(&host_cmd_pool_mutex);
380 * scsi_allocate_command - get a fully allocated SCSI command
381 * @gfp_mask: allocation mask
383 * This function is for use outside of the normal host based pools.
384 * It allocates the relevant command and takes an additional reference
385 * on the pool it used. This function *must* be paired with
386 * scsi_free_command which also has the identical mask, otherwise the
387 * free pool counts will eventually go wrong and you'll trigger a bug.
389 * This function should *only* be used by drivers that need a static
390 * command allocation at start of day for internal functions.
392 struct scsi_cmnd *scsi_allocate_command(gfp_t gfp_mask)
394 struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask);
396 if (!pool)
397 return NULL;
399 return scsi_pool_alloc_command(pool, gfp_mask);
401 EXPORT_SYMBOL(scsi_allocate_command);
404 * scsi_free_command - free a command allocated by scsi_allocate_command
405 * @gfp_mask: mask used in the original allocation
406 * @cmd: command to free
408 * Note: using the original allocation mask is vital because that's
409 * what determines which command pool we use to free the command. Any
410 * mismatch will cause the system to BUG eventually.
412 void scsi_free_command(gfp_t gfp_mask, struct scsi_cmnd *cmd)
414 struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask);
417 * this could trigger if the mask to scsi_allocate_command
418 * doesn't match this mask. Otherwise we're guaranteed that this
419 * succeeds because scsi_allocate_command must have taken a reference
420 * on the pool
422 BUG_ON(!pool);
424 scsi_pool_free_command(pool, cmd);
426 * scsi_put_host_cmd_pool is called twice; once to release the
427 * reference we took above, and once to release the reference
428 * originally taken by scsi_allocate_command
430 scsi_put_host_cmd_pool(gfp_mask);
431 scsi_put_host_cmd_pool(gfp_mask);
433 EXPORT_SYMBOL(scsi_free_command);
436 * scsi_setup_command_freelist - Setup the command freelist for a scsi host.
437 * @shost: host to allocate the freelist for.
439 * Description: The command freelist protects against system-wide out of memory
440 * deadlock by preallocating one SCSI command structure for each host, so the
441 * system can always write to a swap file on a device associated with that host.
443 * Returns: Nothing.
445 int scsi_setup_command_freelist(struct Scsi_Host *shost)
447 struct scsi_cmnd *cmd;
448 const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL;
450 spin_lock_init(&shost->free_list_lock);
451 INIT_LIST_HEAD(&shost->free_list);
453 shost->cmd_pool = scsi_get_host_cmd_pool(gfp_mask);
455 if (!shost->cmd_pool)
456 return -ENOMEM;
459 * Get one backup command for this host.
461 cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
462 if (!cmd) {
463 scsi_put_host_cmd_pool(gfp_mask);
464 shost->cmd_pool = NULL;
465 return -ENOMEM;
467 list_add(&cmd->list, &shost->free_list);
468 return 0;
472 * Function: scsi_destroy_command_freelist()
474 * Purpose: Release the command freelist for a scsi host.
476 * Arguments: shost - host that's freelist is going to be destroyed
478 void scsi_destroy_command_freelist(struct Scsi_Host *shost)
481 * If cmd_pool is NULL the free list was not initialized, so
482 * do not attempt to release resources.
484 if (!shost->cmd_pool)
485 return;
487 while (!list_empty(&shost->free_list)) {
488 struct scsi_cmnd *cmd;
490 cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);
491 list_del_init(&cmd->list);
492 scsi_pool_free_command(shost->cmd_pool, cmd);
494 shost->cmd_pool = NULL;
495 scsi_put_host_cmd_pool(shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL);
498 #ifdef CONFIG_SCSI_LOGGING
499 void scsi_log_send(struct scsi_cmnd *cmd)
501 unsigned int level;
504 * If ML QUEUE log level is greater than or equal to:
506 * 1: nothing (match completion)
508 * 2: log opcode + command of all commands
510 * 3: same as 2 plus dump cmd address
512 * 4: same as 3 plus dump extra junk
514 if (unlikely(scsi_logging_level)) {
515 level = SCSI_LOG_LEVEL(SCSI_LOG_MLQUEUE_SHIFT,
516 SCSI_LOG_MLQUEUE_BITS);
517 if (level > 1) {
518 scmd_printk(KERN_INFO, cmd, "Send: ");
519 if (level > 2)
520 printk("0x%p ", cmd);
521 printk("\n");
522 scsi_print_command(cmd);
523 if (level > 3) {
524 printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
525 " done = 0x%p, queuecommand 0x%p\n",
526 scsi_sglist(cmd), scsi_bufflen(cmd),
527 cmd->done,
528 cmd->device->host->hostt->queuecommand);
535 void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
537 unsigned int level;
540 * If ML COMPLETE log level is greater than or equal to:
542 * 1: log disposition, result, opcode + command, and conditionally
543 * sense data for failures or non SUCCESS dispositions.
545 * 2: same as 1 but for all command completions.
547 * 3: same as 2 plus dump cmd address
549 * 4: same as 3 plus dump extra junk
551 if (unlikely(scsi_logging_level)) {
552 level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
553 SCSI_LOG_MLCOMPLETE_BITS);
554 if (((level > 0) && (cmd->result || disposition != SUCCESS)) ||
555 (level > 1)) {
556 scmd_printk(KERN_INFO, cmd, "Done: ");
557 if (level > 2)
558 printk("0x%p ", cmd);
560 * Dump truncated values, so we usually fit within
561 * 80 chars.
563 switch (disposition) {
564 case SUCCESS:
565 printk("SUCCESS\n");
566 break;
567 case NEEDS_RETRY:
568 printk("RETRY\n");
569 break;
570 case ADD_TO_MLQUEUE:
571 printk("MLQUEUE\n");
572 break;
573 case FAILED:
574 printk("FAILED\n");
575 break;
576 case TIMEOUT_ERROR:
578 * If called via scsi_times_out.
580 printk("TIMEOUT\n");
581 break;
582 default:
583 printk("UNKNOWN\n");
585 scsi_print_result(cmd);
586 scsi_print_command(cmd);
587 if (status_byte(cmd->result) & CHECK_CONDITION)
588 scsi_print_sense("", cmd);
589 if (level > 3)
590 scmd_printk(KERN_INFO, cmd,
591 "scsi host busy %d failed %d\n",
592 cmd->device->host->host_busy,
593 cmd->device->host->host_failed);
597 #endif
600 * Assign a serial number and pid to the request for error recovery
601 * and debugging purposes. Protected by the Host_Lock of host.
603 static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd)
605 cmd->serial_number = host->cmd_serial_number++;
606 if (cmd->serial_number == 0)
607 cmd->serial_number = host->cmd_serial_number++;
609 cmd->pid = host->cmd_pid++;
610 if (cmd->pid == 0)
611 cmd->pid = host->cmd_pid++;
615 * Function: scsi_dispatch_command
617 * Purpose: Dispatch a command to the low-level driver.
619 * Arguments: cmd - command block we are dispatching.
621 * Notes:
623 int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
625 struct Scsi_Host *host = cmd->device->host;
626 unsigned long flags = 0;
627 unsigned long timeout;
628 int rtn = 0;
630 /* check if the device is still usable */
631 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
632 /* in SDEV_DEL we error all commands. DID_NO_CONNECT
633 * returns an immediate error upwards, and signals
634 * that the device is no longer present */
635 cmd->result = DID_NO_CONNECT << 16;
636 atomic_inc(&cmd->device->iorequest_cnt);
637 __scsi_done(cmd);
638 /* return 0 (because the command has been processed) */
639 goto out;
642 /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */
643 if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) {
645 * in SDEV_BLOCK, the command is just put back on the device
646 * queue. The suspend state has already blocked the queue so
647 * future requests should not occur until the device
648 * transitions out of the suspend state.
650 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
652 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
655 * NOTE: rtn is still zero here because we don't need the
656 * queue to be plugged on return (it's already stopped)
658 goto out;
662 * If SCSI-2 or lower, store the LUN value in cmnd.
664 if (cmd->device->scsi_level <= SCSI_2 &&
665 cmd->device->scsi_level != SCSI_UNKNOWN) {
666 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
667 (cmd->device->lun << 5 & 0xe0);
671 * We will wait MIN_RESET_DELAY clock ticks after the last reset so
672 * we can avoid the drive not being ready.
674 timeout = host->last_reset + MIN_RESET_DELAY;
676 if (host->resetting && time_before(jiffies, timeout)) {
677 int ticks_remaining = timeout - jiffies;
679 * NOTE: This may be executed from within an interrupt
680 * handler! This is bad, but for now, it'll do. The irq
681 * level of the interrupt handler has been masked out by the
682 * platform dependent interrupt handling code already, so the
683 * sti() here will not cause another call to the SCSI host's
684 * interrupt handler (assuming there is one irq-level per
685 * host).
687 while (--ticks_remaining >= 0)
688 mdelay(1 + 999 / HZ);
689 host->resetting = 0;
693 * AK: unlikely race here: for some reason the timer could
694 * expire before the serial number is set up below.
696 scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out);
698 scsi_log_send(cmd);
701 * We will use a queued command if possible, otherwise we will
702 * emulate the queuing and calling of completion function ourselves.
704 atomic_inc(&cmd->device->iorequest_cnt);
707 * Before we queue this command, check if the command
708 * length exceeds what the host adapter can handle.
710 if (CDB_SIZE(cmd) > cmd->device->host->max_cmd_len) {
711 SCSI_LOG_MLQUEUE(3,
712 printk("queuecommand : command too long.\n"));
713 cmd->result = (DID_ABORT << 16);
715 scsi_done(cmd);
716 goto out;
719 spin_lock_irqsave(host->host_lock, flags);
720 scsi_cmd_get_serial(host, cmd);
722 if (unlikely(host->shost_state == SHOST_DEL)) {
723 cmd->result = (DID_NO_CONNECT << 16);
724 scsi_done(cmd);
725 } else {
726 rtn = host->hostt->queuecommand(cmd, scsi_done);
728 spin_unlock_irqrestore(host->host_lock, flags);
729 if (rtn) {
730 if (scsi_delete_timer(cmd)) {
731 atomic_inc(&cmd->device->iodone_cnt);
732 scsi_queue_insert(cmd,
733 (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
734 rtn : SCSI_MLQUEUE_HOST_BUSY);
736 SCSI_LOG_MLQUEUE(3,
737 printk("queuecommand : request rejected\n"));
740 out:
741 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
742 return rtn;
746 * scsi_req_abort_cmd -- Request command recovery for the specified command
747 * cmd: pointer to the SCSI command of interest
749 * This function requests that SCSI Core start recovery for the
750 * command by deleting the timer and adding the command to the eh
751 * queue. It can be called by either LLDDs or SCSI Core. LLDDs who
752 * implement their own error recovery MAY ignore the timeout event if
753 * they generated scsi_req_abort_cmd.
755 void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
757 if (!scsi_delete_timer(cmd))
758 return;
759 scsi_times_out(cmd);
761 EXPORT_SYMBOL(scsi_req_abort_cmd);
764 * scsi_done - Enqueue the finished SCSI command into the done queue.
765 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
766 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
768 * This function is the mid-level's (SCSI Core) interrupt routine, which
769 * regains ownership of the SCSI command (de facto) from a LLDD, and enqueues
770 * the command to the done queue for further processing.
772 * This is the producer of the done queue who enqueues at the tail.
774 * This function is interrupt context safe.
776 static void scsi_done(struct scsi_cmnd *cmd)
779 * We don't have to worry about this one timing out any more.
780 * If we are unable to remove the timer, then the command
781 * has already timed out. In which case, we have no choice but to
782 * let the timeout function run, as we have no idea where in fact
783 * that function could really be. It might be on another processor,
784 * etc, etc.
786 if (!scsi_delete_timer(cmd))
787 return;
788 __scsi_done(cmd);
791 /* Private entry to scsi_done() to complete a command when the timer
792 * isn't running --- used by scsi_times_out */
793 void __scsi_done(struct scsi_cmnd *cmd)
795 struct request *rq = cmd->request;
798 * Set the serial numbers back to zero
800 cmd->serial_number = 0;
802 atomic_inc(&cmd->device->iodone_cnt);
803 if (cmd->result)
804 atomic_inc(&cmd->device->ioerr_cnt);
806 BUG_ON(!rq);
809 * The uptodate/nbytes values don't matter, as we allow partial
810 * completes and thus will check this in the softirq callback
812 rq->completion_data = cmd;
813 blk_complete_request(rq);
817 * Function: scsi_finish_command
819 * Purpose: Pass command off to upper layer for finishing of I/O
820 * request, waking processes that are waiting on results,
821 * etc.
823 void scsi_finish_command(struct scsi_cmnd *cmd)
825 struct scsi_device *sdev = cmd->device;
826 struct Scsi_Host *shost = sdev->host;
828 scsi_device_unbusy(sdev);
831 * Clear the flags which say that the device/host is no longer
832 * capable of accepting new commands. These are set in scsi_queue.c
833 * for both the queue full condition on a device, and for a
834 * host full condition on the host.
836 * XXX(hch): What about locking?
838 shost->host_blocked = 0;
839 sdev->device_blocked = 0;
842 * If we have valid sense information, then some kind of recovery
843 * must have taken place. Make a note of this.
845 if (SCSI_SENSE_VALID(cmd))
846 cmd->result |= (DRIVER_SENSE << 24);
848 SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev,
849 "Notifying upper driver of completion "
850 "(result %x)\n", cmd->result));
852 cmd->done(cmd);
854 EXPORT_SYMBOL(scsi_finish_command);
857 * Function: scsi_adjust_queue_depth()
859 * Purpose: Allow low level drivers to tell us to change the queue depth
860 * on a specific SCSI device
862 * Arguments: sdev - SCSI Device in question
863 * tagged - Do we use tagged queueing (non-0) or do we treat
864 * this device as an untagged device (0)
865 * tags - Number of tags allowed if tagged queueing enabled,
866 * or number of commands the low level driver can
867 * queue up in non-tagged mode (as per cmd_per_lun).
869 * Returns: Nothing
871 * Lock Status: None held on entry
873 * Notes: Low level drivers may call this at any time and we will do
874 * the right thing depending on whether or not the device is
875 * currently active and whether or not it even has the
876 * command blocks built yet.
878 void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
880 unsigned long flags;
883 * refuse to set tagged depth to an unworkable size
885 if (tags <= 0)
886 return;
888 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
890 /* Check to see if the queue is managed by the block layer
891 * if it is, and we fail to adjust the depth, exit */
892 if (blk_queue_tagged(sdev->request_queue) &&
893 blk_queue_resize_tags(sdev->request_queue, tags) != 0)
894 goto out;
896 sdev->queue_depth = tags;
897 switch (tagged) {
898 case MSG_ORDERED_TAG:
899 sdev->ordered_tags = 1;
900 sdev->simple_tags = 1;
901 break;
902 case MSG_SIMPLE_TAG:
903 sdev->ordered_tags = 0;
904 sdev->simple_tags = 1;
905 break;
906 default:
907 sdev_printk(KERN_WARNING, sdev,
908 "scsi_adjust_queue_depth, bad queue type, "
909 "disabled\n");
910 case 0:
911 sdev->ordered_tags = sdev->simple_tags = 0;
912 sdev->queue_depth = tags;
913 break;
915 out:
916 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
918 EXPORT_SYMBOL(scsi_adjust_queue_depth);
921 * Function: scsi_track_queue_full()
923 * Purpose: This function will track successive QUEUE_FULL events on a
924 * specific SCSI device to determine if and when there is a
925 * need to adjust the queue depth on the device.
927 * Arguments: sdev - SCSI Device in question
928 * depth - Current number of outstanding SCSI commands on
929 * this device, not counting the one returned as
930 * QUEUE_FULL.
932 * Returns: 0 - No change needed
933 * >0 - Adjust queue depth to this new depth
934 * -1 - Drop back to untagged operation using host->cmd_per_lun
935 * as the untagged command depth
937 * Lock Status: None held on entry
939 * Notes: Low level drivers may call this at any time and we will do
940 * "The Right Thing." We are interrupt context safe.
942 int scsi_track_queue_full(struct scsi_device *sdev, int depth)
944 if ((jiffies >> 4) == sdev->last_queue_full_time)
945 return 0;
947 sdev->last_queue_full_time = (jiffies >> 4);
948 if (sdev->last_queue_full_depth != depth) {
949 sdev->last_queue_full_count = 1;
950 sdev->last_queue_full_depth = depth;
951 } else {
952 sdev->last_queue_full_count++;
955 if (sdev->last_queue_full_count <= 10)
956 return 0;
957 if (sdev->last_queue_full_depth < 8) {
958 /* Drop back to untagged */
959 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
960 return -1;
963 if (sdev->ordered_tags)
964 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
965 else
966 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
967 return depth;
969 EXPORT_SYMBOL(scsi_track_queue_full);
972 * scsi_device_get - get an addition reference to a scsi_device
973 * @sdev: device to get a reference to
975 * Gets a reference to the scsi_device and increments the use count
976 * of the underlying LLDD module. You must hold host_lock of the
977 * parent Scsi_Host or already have a reference when calling this.
979 int scsi_device_get(struct scsi_device *sdev)
981 if (sdev->sdev_state == SDEV_DEL)
982 return -ENXIO;
983 if (!get_device(&sdev->sdev_gendev))
984 return -ENXIO;
985 /* We can fail this if we're doing SCSI operations
986 * from module exit (like cache flush) */
987 try_module_get(sdev->host->hostt->module);
989 return 0;
991 EXPORT_SYMBOL(scsi_device_get);
994 * scsi_device_put - release a reference to a scsi_device
995 * @sdev: device to release a reference on.
997 * Release a reference to the scsi_device and decrements the use count
998 * of the underlying LLDD module. The device is freed once the last
999 * user vanishes.
1001 void scsi_device_put(struct scsi_device *sdev)
1003 #ifdef CONFIG_MODULE_UNLOAD
1004 struct module *module = sdev->host->hostt->module;
1006 /* The module refcount will be zero if scsi_device_get()
1007 * was called from a module removal routine */
1008 if (module && module_refcount(module) != 0)
1009 module_put(module);
1010 #endif
1011 put_device(&sdev->sdev_gendev);
1013 EXPORT_SYMBOL(scsi_device_put);
1015 /* helper for shost_for_each_device, thus not documented */
1016 struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost,
1017 struct scsi_device *prev)
1019 struct list_head *list = (prev ? &prev->siblings : &shost->__devices);
1020 struct scsi_device *next = NULL;
1021 unsigned long flags;
1023 spin_lock_irqsave(shost->host_lock, flags);
1024 while (list->next != &shost->__devices) {
1025 next = list_entry(list->next, struct scsi_device, siblings);
1026 /* skip devices that we can't get a reference to */
1027 if (!scsi_device_get(next))
1028 break;
1029 next = NULL;
1030 list = list->next;
1032 spin_unlock_irqrestore(shost->host_lock, flags);
1034 if (prev)
1035 scsi_device_put(prev);
1036 return next;
1038 EXPORT_SYMBOL(__scsi_iterate_devices);
1041 * starget_for_each_device - helper to walk all devices of a target
1042 * @starget: target whose devices we want to iterate over.
1044 * This traverses over each devices of @shost. The devices have
1045 * a reference that must be released by scsi_host_put when breaking
1046 * out of the loop.
1048 void starget_for_each_device(struct scsi_target *starget, void * data,
1049 void (*fn)(struct scsi_device *, void *))
1051 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1052 struct scsi_device *sdev;
1054 shost_for_each_device(sdev, shost) {
1055 if ((sdev->channel == starget->channel) &&
1056 (sdev->id == starget->id))
1057 fn(sdev, data);
1060 EXPORT_SYMBOL(starget_for_each_device);
1063 * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED)
1064 * @starget: SCSI target pointer
1065 * @lun: SCSI Logical Unit Number
1067 * Looks up the scsi_device with the specified @lun for a give
1068 * @starget. The returned scsi_device does not have an additional
1069 * reference. You must hold the host's host_lock over this call and
1070 * any access to the returned scsi_device. A scsi_device in state
1071 * SDEV_DEL is skipped.
1073 * Note: The only reason why drivers would want to use this is because
1074 * they're need to access the device list in irq context. Otherwise you
1075 * really want to use scsi_device_lookup_by_target instead.
1077 struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
1078 uint lun)
1080 struct scsi_device *sdev;
1082 list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
1083 if (sdev->sdev_state == SDEV_DEL)
1084 continue;
1085 if (sdev->lun ==lun)
1086 return sdev;
1089 return NULL;
1091 EXPORT_SYMBOL(__scsi_device_lookup_by_target);
1094 * scsi_device_lookup_by_target - find a device given the target
1095 * @starget: SCSI target pointer
1096 * @lun: SCSI Logical Unit Number
1098 * Looks up the scsi_device with the specified @channel, @id, @lun for a
1099 * give host. The returned scsi_device has an additional reference that
1100 * needs to be release with scsi_host_put once you're done with it.
1102 struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
1103 uint lun)
1105 struct scsi_device *sdev;
1106 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1107 unsigned long flags;
1109 spin_lock_irqsave(shost->host_lock, flags);
1110 sdev = __scsi_device_lookup_by_target(starget, lun);
1111 if (sdev && scsi_device_get(sdev))
1112 sdev = NULL;
1113 spin_unlock_irqrestore(shost->host_lock, flags);
1115 return sdev;
1117 EXPORT_SYMBOL(scsi_device_lookup_by_target);
1120 * scsi_device_lookup - find a device given the host (UNLOCKED)
1121 * @shost: SCSI host pointer
1122 * @channel: SCSI channel (zero if only one channel)
1123 * @pun: SCSI target number (physical unit number)
1124 * @lun: SCSI Logical Unit Number
1126 * Looks up the scsi_device with the specified @channel, @id, @lun for a
1127 * give host. The returned scsi_device does not have an additional reference.
1128 * You must hold the host's host_lock over this call and any access to the
1129 * returned scsi_device.
1131 * Note: The only reason why drivers would want to use this is because
1132 * they're need to access the device list in irq context. Otherwise you
1133 * really want to use scsi_device_lookup instead.
1135 struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
1136 uint channel, uint id, uint lun)
1138 struct scsi_device *sdev;
1140 list_for_each_entry(sdev, &shost->__devices, siblings) {
1141 if (sdev->channel == channel && sdev->id == id &&
1142 sdev->lun ==lun)
1143 return sdev;
1146 return NULL;
1148 EXPORT_SYMBOL(__scsi_device_lookup);
1151 * scsi_device_lookup - find a device given the host
1152 * @shost: SCSI host pointer
1153 * @channel: SCSI channel (zero if only one channel)
1154 * @id: SCSI target number (physical unit number)
1155 * @lun: SCSI Logical Unit Number
1157 * Looks up the scsi_device with the specified @channel, @id, @lun for a
1158 * give host. The returned scsi_device has an additional reference that
1159 * needs to be release with scsi_host_put once you're done with it.
1161 struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
1162 uint channel, uint id, uint lun)
1164 struct scsi_device *sdev;
1165 unsigned long flags;
1167 spin_lock_irqsave(shost->host_lock, flags);
1168 sdev = __scsi_device_lookup(shost, channel, id, lun);
1169 if (sdev && scsi_device_get(sdev))
1170 sdev = NULL;
1171 spin_unlock_irqrestore(shost->host_lock, flags);
1173 return sdev;
1175 EXPORT_SYMBOL(scsi_device_lookup);
1178 * scsi_device_cancel - cancel outstanding IO to this device
1179 * @sdev: Pointer to struct scsi_device
1180 * @recovery: Boolean instructing function to recover device or not.
1183 int scsi_device_cancel(struct scsi_device *sdev, int recovery)
1185 struct scsi_cmnd *scmd;
1186 LIST_HEAD(active_list);
1187 struct list_head *lh, *lh_sf;
1188 unsigned long flags;
1190 scsi_device_set_state(sdev, SDEV_CANCEL);
1192 spin_lock_irqsave(&sdev->list_lock, flags);
1193 list_for_each_entry(scmd, &sdev->cmd_list, list) {
1194 if (scmd->request) {
1196 * If we are unable to remove the timer, it means
1197 * that the command has already timed out or
1198 * finished.
1200 if (!scsi_delete_timer(scmd))
1201 continue;
1202 list_add_tail(&scmd->eh_entry, &active_list);
1205 spin_unlock_irqrestore(&sdev->list_lock, flags);
1207 if (!list_empty(&active_list)) {
1208 list_for_each_safe(lh, lh_sf, &active_list) {
1209 scmd = list_entry(lh, struct scsi_cmnd, eh_entry);
1210 list_del_init(lh);
1211 if (recovery &&
1212 !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) {
1213 scmd->result = (DID_ABORT << 16);
1214 scsi_finish_command(scmd);
1219 return 0;
1221 EXPORT_SYMBOL(scsi_device_cancel);
1223 MODULE_DESCRIPTION("SCSI core");
1224 MODULE_LICENSE("GPL");
1226 module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
1227 MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
1229 static int __init init_scsi(void)
1231 int error;
1233 error = scsi_init_queue();
1234 if (error)
1235 return error;
1236 error = scsi_init_procfs();
1237 if (error)
1238 goto cleanup_queue;
1239 error = scsi_init_devinfo();
1240 if (error)
1241 goto cleanup_procfs;
1242 error = scsi_init_hosts();
1243 if (error)
1244 goto cleanup_devlist;
1245 error = scsi_init_sysctl();
1246 if (error)
1247 goto cleanup_hosts;
1248 error = scsi_sysfs_register();
1249 if (error)
1250 goto cleanup_sysctl;
1252 scsi_netlink_init();
1254 printk(KERN_NOTICE "SCSI subsystem initialized\n");
1255 return 0;
1257 cleanup_sysctl:
1258 scsi_exit_sysctl();
1259 cleanup_hosts:
1260 scsi_exit_hosts();
1261 cleanup_devlist:
1262 scsi_exit_devinfo();
1263 cleanup_procfs:
1264 scsi_exit_procfs();
1265 cleanup_queue:
1266 scsi_exit_queue();
1267 printk(KERN_ERR "SCSI subsystem failed to initialize, error = %d\n",
1268 -error);
1269 return error;
1272 static void __exit exit_scsi(void)
1274 scsi_netlink_exit();
1275 scsi_sysfs_unregister();
1276 scsi_exit_sysctl();
1277 scsi_exit_hosts();
1278 scsi_exit_devinfo();
1279 scsi_exit_procfs();
1280 scsi_exit_queue();
1283 subsys_initcall(init_scsi);
1284 module_exit(exit_scsi);