2 * scsi.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
5 * generic mid-level SCSI driver
6 * Initial versions: Drew Eckhardt
7 * Subsequent revisions: Eric Youngdale
11 * Bug correction thanks go to :
12 * Rik Faith <faith@cs.unc.edu>
13 * Tommy Thorn <tthorn>
14 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
16 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
17 * add scatter-gather, multiple outstanding request, and other
20 * Native multichannel, wide scsi, /proc/scsi and hot plugging
21 * support added by Michael Neuffer <mike@i-connect.net>
23 * Added request_module("scsi_hostadapter") for kerneld:
24 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modules.conf)
25 * Bjorn Ekwall <bj0rn@blox.se>
28 * Major improvements to the timeout, abort, and reset processing,
29 * as well as performance modifications for large queue depths by
30 * Leonard N. Zubkoff <lnz@dandelion.com>
32 * Converted cli() code to spinlocks, Ingo Molnar
34 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
36 * out_of_space hacks, D. Gilbert (dpg) 990608
39 #include <linux/config.h>
40 #include <linux/module.h>
41 #include <linux/moduleparam.h>
42 #include <linux/kernel.h>
43 #include <linux/sched.h>
44 #include <linux/timer.h>
45 #include <linux/string.h>
46 #include <linux/slab.h>
47 #include <linux/blkdev.h>
48 #include <linux/delay.h>
49 #include <linux/init.h>
50 #include <linux/completion.h>
51 #include <linux/devfs_fs_kernel.h>
52 #include <linux/unistd.h>
53 #include <linux/spinlock.h>
54 #include <linux/kmod.h>
55 #include <linux/interrupt.h>
60 #include "scsi_priv.h"
61 #include "scsi_logging.h"
65 * Definitions and constants.
68 #define MIN_RESET_DELAY (2*HZ)
70 /* Do not call reset on error if we just did a reset within 15 sec. */
71 #define MIN_RESET_PERIOD (15*HZ)
74 * Macro to determine the size of SCSI command. This macro takes vendor
75 * unique commands into account. SCSI commands in groups 6 and 7 are
76 * vendor unique and we will depend upon the command length being
77 * supplied correctly in cmd_len.
79 #define CDB_SIZE(cmd) (((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \
80 COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len)
85 unsigned long scsi_pid
;
86 static unsigned long serial_number
;
89 * Note - the initial logging level can be set here to log events at boot time.
90 * After the system is up, you may enable logging via the /proc interface.
92 unsigned int scsi_logging_level
;
94 const char *const scsi_device_types
[MAX_SCSI_DEVICE_CODE
] = {
112 * Function: scsi_allocate_request
114 * Purpose: Allocate a request descriptor.
116 * Arguments: device - device for which we want a request
118 * Lock status: No locks assumed to be held. This function is SMP-safe.
120 * Returns: Pointer to request block.
122 * Notes: With the new queueing code, it becomes important
123 * to track the difference between a command and a
124 * request. A request is a pending item in the queue that
125 * has not yet reached the top of the queue.
127 * XXX(hch): Need to add a gfp_mask argument.
129 struct scsi_request
*scsi_allocate_request(struct scsi_device
*sdev
)
131 const int offset
= ALIGN(sizeof(struct scsi_request
), 4);
132 const int size
= offset
+ sizeof(struct request
);
133 struct scsi_request
*sreq
;
135 sreq
= kmalloc(size
, GFP_ATOMIC
);
136 if (likely(sreq
!= NULL
)) {
137 memset(sreq
, 0, size
);
138 sreq
->sr_request
= (struct request
*)(((char *)sreq
) + offset
);
139 sreq
->sr_device
= sdev
;
140 sreq
->sr_host
= sdev
->host
;
141 sreq
->sr_magic
= SCSI_REQ_MAGIC
;
142 sreq
->sr_data_direction
= DMA_BIDIRECTIONAL
;
148 void __scsi_release_request(struct scsi_request
*sreq
)
150 if (likely(sreq
->sr_command
!= NULL
)) {
151 struct scsi_cmnd
*cmd
= sreq
->sr_command
;
153 sreq
->sr_command
= NULL
;
154 scsi_next_command(cmd
);
159 * Function: scsi_release_request
161 * Purpose: Release a request descriptor.
163 * Arguments: sreq - request to release
165 * Lock status: No locks assumed to be held. This function is SMP-safe.
167 void scsi_release_request(struct scsi_request
*sreq
)
169 __scsi_release_request(sreq
);
173 struct scsi_host_cmd_pool
{
177 unsigned int slab_flags
;
178 unsigned int gfp_mask
;
181 static struct scsi_host_cmd_pool scsi_cmd_pool
= {
182 .name
= "scsi_cmd_cache",
183 .slab_flags
= SLAB_HWCACHE_ALIGN
,
186 static struct scsi_host_cmd_pool scsi_cmd_dma_pool
= {
187 .name
= "scsi_cmd_cache(DMA)",
188 .slab_flags
= SLAB_HWCACHE_ALIGN
|SLAB_CACHE_DMA
,
189 .gfp_mask
= __GFP_DMA
,
192 static DECLARE_MUTEX(host_cmd_pool_mutex
);
194 static struct scsi_cmnd
*__scsi_get_command(struct Scsi_Host
*shost
,
197 struct scsi_cmnd
*cmd
;
199 cmd
= kmem_cache_alloc(shost
->cmd_pool
->slab
,
200 gfp_mask
| shost
->cmd_pool
->gfp_mask
);
202 if (unlikely(!cmd
)) {
205 spin_lock_irqsave(&shost
->free_list_lock
, flags
);
206 if (likely(!list_empty(&shost
->free_list
))) {
207 cmd
= list_entry(shost
->free_list
.next
,
208 struct scsi_cmnd
, list
);
209 list_del_init(&cmd
->list
);
211 spin_unlock_irqrestore(&shost
->free_list_lock
, flags
);
218 * Function: scsi_get_command()
220 * Purpose: Allocate and setup a scsi command block
222 * Arguments: dev - parent scsi device
223 * gfp_mask- allocator flags
225 * Returns: The allocated scsi command structure.
227 struct scsi_cmnd
*scsi_get_command(struct scsi_device
*dev
, int gfp_mask
)
229 struct scsi_cmnd
*cmd
= __scsi_get_command(dev
->host
, gfp_mask
);
231 if (likely(cmd
!= NULL
)) {
234 memset(cmd
, 0, sizeof(*cmd
));
236 cmd
->state
= SCSI_STATE_UNUSED
;
237 cmd
->owner
= SCSI_OWNER_NOBODY
;
238 init_timer(&cmd
->eh_timeout
);
239 INIT_LIST_HEAD(&cmd
->list
);
240 spin_lock_irqsave(&dev
->list_lock
, flags
);
241 list_add_tail(&cmd
->list
, &dev
->cmd_list
);
242 spin_unlock_irqrestore(&dev
->list_lock
, flags
);
249 * Function: scsi_put_command()
251 * Purpose: Free a scsi command block
253 * Arguments: cmd - command block to free
257 * Notes: The command must not belong to any lists.
259 void scsi_put_command(struct scsi_cmnd
*cmd
)
261 struct Scsi_Host
*shost
= cmd
->device
->host
;
264 /* serious error if the command hasn't come from a device list */
265 spin_lock_irqsave(&cmd
->device
->list_lock
, flags
);
266 BUG_ON(list_empty(&cmd
->list
));
267 list_del_init(&cmd
->list
);
268 spin_unlock(&cmd
->device
->list_lock
);
269 /* changing locks here, don't need to restore the irq state */
270 spin_lock(&shost
->free_list_lock
);
271 if (unlikely(list_empty(&shost
->free_list
))) {
272 list_add(&cmd
->list
, &shost
->free_list
);
275 spin_unlock_irqrestore(&shost
->free_list_lock
, flags
);
277 if (likely(cmd
!= NULL
))
278 kmem_cache_free(shost
->cmd_pool
->slab
, cmd
);
282 * Function: scsi_setup_command_freelist()
284 * Purpose: Setup the command freelist for a scsi host.
286 * Arguments: shost - host to allocate the freelist for.
290 int scsi_setup_command_freelist(struct Scsi_Host
*shost
)
292 struct scsi_host_cmd_pool
*pool
;
293 struct scsi_cmnd
*cmd
;
295 spin_lock_init(&shost
->free_list_lock
);
296 INIT_LIST_HEAD(&shost
->free_list
);
299 * Select a command slab for this host and create it if not
302 down(&host_cmd_pool_mutex
);
303 pool
= (shost
->unchecked_isa_dma
? &scsi_cmd_dma_pool
: &scsi_cmd_pool
);
305 pool
->slab
= kmem_cache_create(pool
->name
,
306 sizeof(struct scsi_cmnd
), 0,
307 pool
->slab_flags
, NULL
, NULL
);
313 shost
->cmd_pool
= pool
;
314 up(&host_cmd_pool_mutex
);
317 * Get one backup command for this host.
319 cmd
= kmem_cache_alloc(shost
->cmd_pool
->slab
,
320 GFP_KERNEL
| shost
->cmd_pool
->gfp_mask
);
323 list_add(&cmd
->list
, &shost
->free_list
);
328 kmem_cache_destroy(pool
->slab
);
331 up(&host_cmd_pool_mutex
);
337 * Function: scsi_destroy_command_freelist()
339 * Purpose: Release the command freelist for a scsi host.
341 * Arguments: shost - host that's freelist is going to be destroyed
343 void scsi_destroy_command_freelist(struct Scsi_Host
*shost
)
345 while (!list_empty(&shost
->free_list
)) {
346 struct scsi_cmnd
*cmd
;
348 cmd
= list_entry(shost
->free_list
.next
, struct scsi_cmnd
, list
);
349 list_del_init(&cmd
->list
);
350 kmem_cache_free(shost
->cmd_pool
->slab
, cmd
);
353 down(&host_cmd_pool_mutex
);
354 if (!--shost
->cmd_pool
->users
)
355 kmem_cache_destroy(shost
->cmd_pool
->slab
);
356 up(&host_cmd_pool_mutex
);
360 * Function: scsi_dispatch_command
362 * Purpose: Dispatch a command to the low-level driver.
364 * Arguments: cmd - command block we are dispatching.
368 int scsi_dispatch_cmd(struct scsi_cmnd
*cmd
)
370 struct Scsi_Host
*host
= cmd
->device
->host
;
371 unsigned long flags
= 0;
372 unsigned long timeout
;
375 /* Assign a unique nonzero serial_number. */
376 /* XXX(hch): this is racy */
377 if (++serial_number
== 0)
379 cmd
->serial_number
= serial_number
;
380 cmd
->pid
= scsi_pid
++;
383 * If SCSI-2 or lower, store the LUN value in cmnd.
385 if (cmd
->device
->scsi_level
<= SCSI_2
) {
386 cmd
->cmnd
[1] = (cmd
->cmnd
[1] & 0x1f) |
387 (cmd
->device
->lun
<< 5 & 0xe0);
391 * We will wait MIN_RESET_DELAY clock ticks after the last reset so
392 * we can avoid the drive not being ready.
394 timeout
= host
->last_reset
+ MIN_RESET_DELAY
;
396 if (host
->resetting
&& time_before(jiffies
, timeout
)) {
397 int ticks_remaining
= timeout
- jiffies
;
399 * NOTE: This may be executed from within an interrupt
400 * handler! This is bad, but for now, it'll do. The irq
401 * level of the interrupt handler has been masked out by the
402 * platform dependent interrupt handling code already, so the
403 * sti() here will not cause another call to the SCSI host's
404 * interrupt handler (assuming there is one irq-level per
407 while (--ticks_remaining
>= 0)
408 mdelay(1 + 999 / HZ
);
412 scsi_add_timer(cmd
, cmd
->timeout_per_command
, scsi_times_out
);
415 * We will use a queued command if possible, otherwise we will
416 * emulate the queuing and calling of completion function ourselves.
418 SCSI_LOG_MLQUEUE(3, printk("scsi_dispatch_cmnd (host = %d, "
419 "channel = %d, target = %d, command = %p, "
420 "buffer = %p, \nbufflen = %d, done = %p)\n",
421 host
->host_no
, cmd
->device
->channel
,
422 cmd
->device
->id
, cmd
->cmnd
, cmd
->buffer
,
423 cmd
->bufflen
, cmd
->done
));
425 cmd
->state
= SCSI_STATE_QUEUED
;
426 cmd
->owner
= SCSI_OWNER_LOWLEVEL
;
429 * Before we queue this command, check if the command
430 * length exceeds what the host adapter can handle.
432 if (CDB_SIZE(cmd
) > cmd
->device
->host
->max_cmd_len
) {
434 printk("queuecommand : command too long.\n"));
435 cmd
->result
= (DID_ABORT
<< 16);
437 spin_lock_irqsave(host
->host_lock
, flags
);
439 spin_unlock_irqrestore(host
->host_lock
, flags
);
443 SCSI_LOG_MLQUEUE(3, printk("queuecommand : routine at %p\n",
444 host
->hostt
->queuecommand
));
446 spin_lock_irqsave(host
->host_lock
, flags
);
447 rtn
= host
->hostt
->queuecommand(cmd
, scsi_done
);
448 spin_unlock_irqrestore(host
->host_lock
, flags
);
450 scsi_queue_insert(cmd
,
451 (rtn
== SCSI_MLQUEUE_DEVICE_BUSY
) ?
452 rtn
: SCSI_MLQUEUE_HOST_BUSY
);
454 printk("queuecommand : request rejected\n"));
458 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
463 * Function: scsi_init_cmd_from_req
465 * Purpose: Queue a SCSI command
466 * Purpose: Initialize a struct scsi_cmnd from a struct scsi_request
468 * Arguments: cmd - command descriptor.
469 * sreq - Request from the queue.
471 * Lock status: None needed.
475 * Notes: Mainly transfer data from the request structure to the
476 * command structure. The request structure is allocated
477 * using the normal memory allocator, and requests can pile
478 * up to more or less any depth. The command structure represents
479 * a consumable resource, as these are allocated into a pool
480 * when the SCSI subsystem initializes. The preallocation is
481 * required so that in low-memory situations a disk I/O request
482 * won't cause the memory manager to try and write out a page.
483 * The request structure is generally used by ioctls and character
486 void scsi_init_cmd_from_req(struct scsi_cmnd
*cmd
, struct scsi_request
*sreq
)
488 sreq
->sr_command
= cmd
;
490 cmd
->owner
= SCSI_OWNER_MIDLEVEL
;
491 cmd
->cmd_len
= sreq
->sr_cmd_len
;
492 cmd
->use_sg
= sreq
->sr_use_sg
;
494 cmd
->request
= sreq
->sr_request
;
495 memcpy(cmd
->data_cmnd
, sreq
->sr_cmnd
, sizeof(cmd
->data_cmnd
));
496 cmd
->serial_number
= 0;
497 cmd
->serial_number_at_timeout
= 0;
498 cmd
->bufflen
= sreq
->sr_bufflen
;
499 cmd
->buffer
= sreq
->sr_buffer
;
502 cmd
->allowed
= sreq
->sr_allowed
;
503 cmd
->done
= sreq
->sr_done
;
504 cmd
->timeout_per_command
= sreq
->sr_timeout_per_command
;
505 cmd
->sc_data_direction
= sreq
->sr_data_direction
;
506 cmd
->sglist_len
= sreq
->sr_sglist_len
;
507 cmd
->underflow
= sreq
->sr_underflow
;
508 cmd
->sc_request
= sreq
;
509 memcpy(cmd
->cmnd
, sreq
->sr_cmnd
, sizeof(sreq
->sr_cmnd
));
512 * Zero the sense buffer. Some host adapters automatically request
513 * sense on error. 0 is not a valid sense code.
515 memset(cmd
->sense_buffer
, 0, sizeof(sreq
->sr_sense_buffer
));
516 cmd
->request_buffer
= sreq
->sr_buffer
;
517 cmd
->request_bufflen
= sreq
->sr_bufflen
;
518 cmd
->old_use_sg
= cmd
->use_sg
;
519 if (cmd
->cmd_len
== 0)
520 cmd
->cmd_len
= COMMAND_SIZE(cmd
->cmnd
[0]);
521 cmd
->old_cmd_len
= cmd
->cmd_len
;
522 cmd
->sc_old_data_direction
= cmd
->sc_data_direction
;
523 cmd
->old_underflow
= cmd
->underflow
;
526 * Start the timer ticking.
528 cmd
->internal_timeout
= NORMAL_TIMEOUT
;
529 cmd
->abort_reason
= 0;
532 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_init_cmd_from_req()\n"));
536 * Per-CPU I/O completion queue.
538 static struct list_head done_q
[NR_CPUS
] __cacheline_aligned
;
541 * scsi_done - Enqueue the finished SCSI command into the done queue.
542 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
543 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
545 * This function is the mid-level's (SCSI Core) interrupt routine, which
546 * regains ownership of the SCSI command (de facto) from a LLDD, and enqueues
547 * the command to the done queue for further processing.
549 * This is the producer of the done queue who enqueues at the tail.
551 * This function is interrupt context safe.
553 void scsi_done(struct scsi_cmnd
*cmd
)
559 * We don't have to worry about this one timing out any more.
560 * If we are unable to remove the timer, then the command
561 * has already timed out. In which case, we have no choice but to
562 * let the timeout function run, as we have no idea where in fact
563 * that function could really be. It might be on another processor,
566 if (!scsi_delete_timer(cmd
))
570 * Set the serial numbers back to zero
572 cmd
->serial_number
= 0;
573 cmd
->serial_number_at_timeout
= 0;
574 cmd
->state
= SCSI_STATE_BHQUEUE
;
575 cmd
->owner
= SCSI_OWNER_BH_HANDLER
;
578 * Next, enqueue the command into the done queue.
579 * It is a per-CPU queue, so we just disable local interrupts
580 * and need no spinlock.
582 local_irq_save(flags
);
583 cpu
= smp_processor_id();
584 list_add_tail(&cmd
->eh_entry
, &done_q
[cpu
]);
585 raise_softirq_irqoff(SCSI_SOFTIRQ
);
586 local_irq_restore(flags
);
590 * scsi_softirq - Perform post-interrupt processing of finished SCSI commands.
592 * This is the consumer of the done queue.
594 * This is called with all interrupts enabled. This should reduce
595 * interrupt latency, stack depth, and reentrancy of the low-level
598 static void scsi_softirq(struct softirq_action
*h
)
603 list_splice_init(&done_q
[smp_processor_id()], &local_q
);
606 while (!list_empty(&local_q
)) {
607 struct scsi_cmnd
*cmd
= list_entry(local_q
.next
,
608 struct scsi_cmnd
, eh_entry
);
609 list_del_init(&cmd
->eh_entry
);
611 switch (scsi_decide_disposition(cmd
)) {
616 SCSI_LOG_MLCOMPLETE(3,
617 printk("Command finished %d %d "
619 cmd
->device
->host
->host_busy
,
620 cmd
->device
->host
->host_failed
,
623 scsi_finish_command(cmd
);
627 * We only come in here if we want to retry a
628 * command. The test to see whether the
629 * command should be retried should be keeping
630 * track of the number of tries, so we don't
631 * end up looping, of course.
633 SCSI_LOG_MLCOMPLETE(3, printk("Command needs retry "
635 cmd
->device
->host
->host_busy
,
636 cmd
->device
->host
->host_failed
,
639 scsi_retry_command(cmd
);
643 * This typically happens for a QUEUE_FULL
644 * message - typically only when the queue
645 * depth is only approximate for a given
646 * device. Adding a command to the queue for
647 * the device will prevent further commands
648 * from being sent to the device, so we
649 * shouldn't end up with tons of things being
650 * sent down that shouldn't be.
652 SCSI_LOG_MLCOMPLETE(3, printk("Command rejected as "
653 "device queue full, "
654 "put on ml queue %p\n",
656 scsi_queue_insert(cmd
, SCSI_MLQUEUE_DEVICE_BUSY
);
660 * Here we have a fatal error of some sort.
661 * Turn it over to the error handler.
663 SCSI_LOG_MLCOMPLETE(3,
664 printk("Command failed %p %x "
665 "busy=%d failed=%d\n",
667 cmd
->device
->host
->host_busy
,
668 cmd
->device
->host
->host_failed
));
671 * Dump the sense information too.
673 if (status_byte(cmd
->result
) & CHECK_CONDITION
)
674 SCSI_LOG_MLCOMPLETE(3, print_sense("bh", cmd
));
677 * We only fail here if the error recovery thread
680 if (!scsi_eh_scmd_add(cmd
, 0))
681 scsi_finish_command(cmd
);
687 * Function: scsi_retry_command
689 * Purpose: Send a command back to the low level to be retried.
691 * Notes: This command is always executed in the context of the
692 * bottom half handler, or the error handler thread. Low
693 * level drivers should not become re-entrant as a result of
696 int scsi_retry_command(struct scsi_cmnd
*cmd
)
699 * Restore the SCSI command state.
701 scsi_setup_cmd_retry(cmd
);
704 * Zero the sense information from the last time we tried
707 memset(cmd
->sense_buffer
, 0, sizeof(cmd
->sense_buffer
));
709 return scsi_dispatch_cmd(cmd
);
713 * Function: scsi_finish_command
715 * Purpose: Pass command off to upper layer for finishing of I/O
716 * request, waking processes that are waiting on results,
719 void scsi_finish_command(struct scsi_cmnd
*cmd
)
721 struct scsi_device
*sdev
= cmd
->device
;
722 struct Scsi_Host
*shost
= sdev
->host
;
723 struct scsi_request
*sreq
;
725 scsi_device_unbusy(sdev
);
728 * Clear the flags which say that the device/host is no longer
729 * capable of accepting new commands. These are set in scsi_queue.c
730 * for both the queue full condition on a device, and for a
731 * host full condition on the host.
733 * XXX(hch): What about locking?
735 shost
->host_blocked
= 0;
736 sdev
->device_blocked
= 0;
739 * If we have valid sense information, then some kind of recovery
740 * must have taken place. Make a note of this.
742 if (SCSI_SENSE_VALID(cmd
))
743 cmd
->result
|= (DRIVER_SENSE
<< 24);
745 SCSI_LOG_MLCOMPLETE(3, printk("Notifying upper driver of completion "
746 "for device %d %x\n", sdev
->id
, cmd
->result
));
748 cmd
->owner
= SCSI_OWNER_HIGHLEVEL
;
749 cmd
->state
= SCSI_STATE_FINISHED
;
752 * We can get here with use_sg=0, causing a panic in the upper level
754 cmd
->use_sg
= cmd
->old_use_sg
;
757 * If there is an associated request structure, copy the data over
758 * before we call the completion function.
760 sreq
= cmd
->sc_request
;
762 sreq
->sr_result
= sreq
->sr_command
->result
;
763 if (sreq
->sr_result
) {
764 memcpy(sreq
->sr_sense_buffer
,
765 sreq
->sr_command
->sense_buffer
,
766 sizeof(sreq
->sr_sense_buffer
));
774 * Function: scsi_adjust_queue_depth()
776 * Purpose: Allow low level drivers to tell us to change the queue depth
777 * on a specific SCSI device
779 * Arguments: sdev - SCSI Device in question
780 * tagged - Do we use tagged queueing (non-0) or do we treat
781 * this device as an untagged device (0)
782 * tags - Number of tags allowed if tagged queueing enabled,
783 * or number of commands the low level driver can
784 * queue up in non-tagged mode (as per cmd_per_lun).
788 * Lock Status: None held on entry
790 * Notes: Low level drivers may call this at any time and we will do
791 * the right thing depending on whether or not the device is
792 * currently active and whether or not it even has the
793 * command blocks built yet.
795 * XXX(hch): What exactly is device_request_lock trying to protect?
797 void scsi_adjust_queue_depth(struct scsi_device
*sdev
, int tagged
, int tags
)
799 static spinlock_t device_request_lock
= SPIN_LOCK_UNLOCKED
;
803 * refuse to set tagged depth to an unworkable size
808 * Limit max queue depth on a single lun to 256 for now. Remember,
809 * we allocate a struct scsi_command for each of these and keep it
810 * around forever. Too deep of a depth just wastes memory.
815 spin_lock_irqsave(&device_request_lock
, flags
);
816 sdev
->queue_depth
= tags
;
818 case MSG_ORDERED_TAG
:
819 sdev
->ordered_tags
= 1;
820 sdev
->simple_tags
= 1;
823 sdev
->ordered_tags
= 0;
824 sdev
->simple_tags
= 1;
827 printk(KERN_WARNING
"(scsi%d:%d:%d:%d) "
828 "scsi_adjust_queue_depth, bad queue type, "
829 "disabled\n", sdev
->host
->host_no
,
830 sdev
->channel
, sdev
->id
, sdev
->lun
);
832 sdev
->ordered_tags
= sdev
->simple_tags
= 0;
833 sdev
->queue_depth
= tags
;
836 spin_unlock_irqrestore(&device_request_lock
, flags
);
840 * Function: scsi_track_queue_full()
842 * Purpose: This function will track successive QUEUE_FULL events on a
843 * specific SCSI device to determine if and when there is a
844 * need to adjust the queue depth on the device.
846 * Arguments: sdev - SCSI Device in question
847 * depth - Current number of outstanding SCSI commands on
848 * this device, not counting the one returned as
851 * Returns: 0 - No change needed
852 * >0 - Adjust queue depth to this new depth
853 * -1 - Drop back to untagged operation using host->cmd_per_lun
854 * as the untagged command depth
856 * Lock Status: None held on entry
858 * Notes: Low level drivers may call this at any time and we will do
859 * "The Right Thing." We are interrupt context safe.
861 int scsi_track_queue_full(struct scsi_device
*sdev
, int depth
)
863 if ((jiffies
>> 4) == sdev
->last_queue_full_time
)
866 sdev
->last_queue_full_time
= (jiffies
>> 4);
867 if (sdev
->last_queue_full_depth
!= depth
) {
868 sdev
->last_queue_full_count
= 1;
869 sdev
->last_queue_full_depth
= depth
;
871 sdev
->last_queue_full_count
++;
874 if (sdev
->last_queue_full_count
<= 10)
876 if (sdev
->last_queue_full_depth
< 8) {
877 /* Drop back to untagged */
878 scsi_adjust_queue_depth(sdev
, 0, sdev
->host
->cmd_per_lun
);
882 if (sdev
->ordered_tags
)
883 scsi_adjust_queue_depth(sdev
, MSG_ORDERED_TAG
, depth
);
885 scsi_adjust_queue_depth(sdev
, MSG_SIMPLE_TAG
, depth
);
889 int scsi_device_get(struct scsi_device
*sdev
)
891 if (!try_module_get(sdev
->host
->hostt
->module
))
894 sdev
->access_count
++;
898 void scsi_device_put(struct scsi_device
*sdev
)
900 sdev
->access_count
--;
901 module_put(sdev
->host
->hostt
->module
);
905 * scsi_set_device_offline - set scsi_device offline
906 * @sdev: pointer to struct scsi_device to offline.
908 * Locks: host_lock held on entry.
910 void scsi_set_device_offline(struct scsi_device
*sdev
)
912 struct scsi_cmnd
*scmd
;
913 LIST_HEAD(active_list
);
914 struct list_head
*lh
, *lh_sf
;
919 spin_lock_irqsave(&sdev
->list_lock
, flags
);
920 list_for_each_entry(scmd
, &sdev
->cmd_list
, list
) {
921 if (scmd
->request
&& scmd
->request
->rq_status
!= RQ_INACTIVE
) {
923 * If we are unable to remove the timer, it means
924 * that the command has already timed out or
927 if (!scsi_delete_timer(scmd
))
929 list_add_tail(&scmd
->eh_entry
, &active_list
);
932 spin_unlock_irqrestore(&sdev
->list_lock
, flags
);
934 if (!list_empty(&active_list
)) {
935 list_for_each_safe(lh
, lh_sf
, &active_list
) {
936 scmd
= list_entry(lh
, struct scsi_cmnd
, eh_entry
);
937 scsi_eh_scmd_add(scmd
, SCSI_EH_CANCEL_CMD
);
940 /* FIXME: Send online state change hotplug event */
944 MODULE_DESCRIPTION("SCSI core");
945 MODULE_LICENSE("GPL");
947 module_param(scsi_logging_level
, int, S_IRUGO
|S_IWUSR
);
948 MODULE_PARM_DESC(scsi_logging_level
, "a bit mask of logging levels");
950 static int __init
init_scsi(void)
954 error
= scsi_init_queue();
957 error
= scsi_init_procfs();
960 error
= scsi_init_devinfo();
963 error
= scsi_sysfs_register();
965 goto cleanup_devlist
;
967 for (i
= 0; i
< NR_CPUS
; i
++)
968 INIT_LIST_HEAD(&done_q
[i
]);
970 devfs_mk_dir("scsi");
971 open_softirq(SCSI_SOFTIRQ
, scsi_softirq
, NULL
);
972 printk(KERN_NOTICE
"SCSI subsystem initialized\n");
981 printk(KERN_ERR
"SCSI subsystem failed to initialize, error = %d\n",
986 static void __exit
exit_scsi(void)
988 scsi_sysfs_unregister();
990 devfs_remove("scsi");
995 subsys_initcall(init_scsi
);
996 module_exit(exit_scsi
);