2 * scsi.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
5 * generic mid-level SCSI driver
6 * Initial versions: Drew Eckhardt
7 * Subsequent revisions: Eric Youngdale
11 * Bug correction thanks go to :
12 * Rik Faith <faith@cs.unc.edu>
13 * Tommy Thorn <tthorn>
14 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
16 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
17 * add scatter-gather, multiple outstanding request, and other
20 * Native multichannel, wide scsi, /proc/scsi and hot plugging
21 * support added by Michael Neuffer <mike@i-connect.net>
23 * Added request_module("scsi_hostadapter") for kerneld:
24 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modules.conf)
25 * Bjorn Ekwall <bj0rn@blox.se>
28 * Major improvements to the timeout, abort, and reset processing,
29 * as well as performance modifications for large queue depths by
30 * Leonard N. Zubkoff <lnz@dandelion.com>
32 * Converted cli() code to spinlocks, Ingo Molnar
34 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
36 * out_of_space hacks, D. Gilbert (dpg) 990608
39 #define REVISION "Revision: 1.00"
40 #define VERSION "Id: scsi.c 1.00 2000/09/26"
42 #include <linux/config.h>
43 #include <linux/module.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/string.h>
48 #include <linux/malloc.h>
49 #include <linux/ioport.h>
50 #include <linux/kernel.h>
51 #include <linux/stat.h>
52 #include <linux/blk.h>
53 #include <linux/interrupt.h>
54 #include <linux/delay.h>
55 #include <linux/init.h>
57 #define __KERNEL_SYSCALLS__
59 #include <linux/unistd.h>
60 #include <linux/spinlock.h>
62 #include <asm/system.h>
65 #include <asm/uaccess.h>
69 #include "constants.h"
72 #include <linux/kmod.h>
75 #undef USE_STATIC_SCSI_MEMORY
77 struct proc_dir_entry
*proc_scsi
;
80 static int scsi_proc_info(char *buffer
, char **start
, off_t offset
, int length
);
81 static void scsi_dump_status(int level
);
85 static const char RCSid[] = "$Header: /vger/u4/cvs/linux/drivers/scsi/scsi.c,v 1.38 1997/01/19 23:07:18 davem Exp $";
89 * Definitions and constants.
92 #define MIN_RESET_DELAY (2*HZ)
94 /* Do not call reset on error if we just did a reset within 15 sec. */
95 #define MIN_RESET_PERIOD (15*HZ)
101 unsigned long scsi_pid
;
102 Scsi_Cmnd
*last_cmnd
;
103 /* Command groups 3 and 4 are reserved and should never be used. */
104 const unsigned char scsi_command_size
[8] =
109 static unsigned long serial_number
;
110 static Scsi_Cmnd
*scsi_bh_queue_head
;
111 static Scsi_Cmnd
*scsi_bh_queue_tail
;
114 * Note - the initial logging level can be set here to log events at boot time.
115 * After the system is up, you may enable logging via the /proc interface.
117 unsigned int scsi_logging_level
;
119 const char *const scsi_device_types
[MAX_SCSI_DEVICE_CODE
] =
138 * Function prototypes.
140 extern void scsi_times_out(Scsi_Cmnd
* SCpnt
);
141 void scsi_build_commandblocks(Scsi_Device
* SDpnt
);
144 * These are the interface to the old error handling code. It should go away
147 extern void scsi_old_done(Scsi_Cmnd
* SCpnt
);
148 extern void scsi_old_times_out(Scsi_Cmnd
* SCpnt
);
152 * Function: scsi_initialize_queue()
154 * Purpose: Selects queue handler function for a device.
156 * Arguments: SDpnt - device for which we need a handler function.
160 * Lock status: No locking assumed or required.
162 * Notes: Most devices will end up using scsi_request_fn for the
163 * handler function (at least as things are done now).
164 * The "block" feature basically ensures that only one of
165 * the blocked hosts is active at one time, mainly to work around
166 * buggy DMA chipsets where the memory gets starved.
167 * For this case, we have a special handler function, which
168 * does some checks and ultimately calls scsi_request_fn.
170 * The single_lun feature is a similar special case.
172 * We handle these things by stacking the handlers. The
173 * special case handlers simply check a few conditions,
174 * and return if they are not supposed to do anything.
175 * In the event that things are OK, then they call the next
176 * handler in the list - ultimately they call scsi_request_fn
177 * to do the dirty deed.
179 void scsi_initialize_queue(Scsi_Device
* SDpnt
, struct Scsi_Host
* SHpnt
) {
180 blk_init_queue(&SDpnt
->request_queue
, scsi_request_fn
);
181 blk_queue_headactive(&SDpnt
->request_queue
, 0);
182 SDpnt
->request_queue
.queuedata
= (void *) SDpnt
;
186 MODULE_PARM(scsi_logging_level
, "i");
187 MODULE_PARM_DESC(scsi_logging_level
, "SCSI logging level; should be zero or nonzero");
191 static int __init
scsi_logging_setup(char *str
)
195 if (get_option(&str
, &tmp
) == 1) {
196 scsi_logging_level
= (tmp
? ~0 : 0);
199 printk(KERN_INFO
"scsi_logging_setup : usage scsi_logging_level=n "
200 "(n should be 0 or non-zero)\n");
205 __setup("scsi_logging=", scsi_logging_setup
);
210 * Issue a command and wait for it to complete
213 static void scsi_wait_done(Scsi_Cmnd
* SCpnt
)
217 req
= &SCpnt
->request
;
218 req
->rq_status
= RQ_SCSI_DONE
; /* Busy, but indicate request done */
220 if (req
->sem
!= NULL
) {
226 * This lock protects the freelist for all devices on the system.
227 * We could make this finer grained by having a single lock per
228 * device if it is ever found that there is excessive contention
231 static spinlock_t device_request_lock
= SPIN_LOCK_UNLOCKED
;
234 * Used to protect insertion into and removal from the queue of
235 * commands to be processed by the bottom half handler.
237 static spinlock_t scsi_bhqueue_lock
= SPIN_LOCK_UNLOCKED
;
240 * Function: scsi_allocate_request
242 * Purpose: Allocate a request descriptor.
244 * Arguments: device - device for which we want a request
246 * Lock status: No locks assumed to be held. This function is SMP-safe.
248 * Returns: Pointer to request block.
250 * Notes: With the new queueing code, it becomes important
251 * to track the difference between a command and a
252 * request. A request is a pending item in the queue that
253 * has not yet reached the top of the queue.
256 Scsi_Request
*scsi_allocate_request(Scsi_Device
* device
)
258 Scsi_Request
*SRpnt
= NULL
;
261 panic("No device passed to scsi_allocate_request().\n");
263 SRpnt
= (Scsi_Request
*) kmalloc(sizeof(Scsi_Request
), GFP_ATOMIC
);
269 memset(SRpnt
, 0, sizeof(Scsi_Request
));
270 SRpnt
->sr_device
= device
;
271 SRpnt
->sr_host
= device
->host
;
272 SRpnt
->sr_magic
= SCSI_REQ_MAGIC
;
273 SRpnt
->sr_data_direction
= SCSI_DATA_UNKNOWN
;
279 * Function: scsi_release_request
281 * Purpose: Release a request descriptor.
283 * Arguments: device - device for which we want a request
285 * Lock status: No locks assumed to be held. This function is SMP-safe.
287 * Returns: Pointer to request block.
289 * Notes: With the new queueing code, it becomes important
290 * to track the difference between a command and a
291 * request. A request is a pending item in the queue that
292 * has not yet reached the top of the queue. We still need
293 * to free a request when we are done with it, of course.
295 void scsi_release_request(Scsi_Request
* req
)
297 if( req
->sr_command
!= NULL
)
299 scsi_release_command(req
->sr_command
);
300 req
->sr_command
= NULL
;
307 * Function: scsi_allocate_device
309 * Purpose: Allocate a command descriptor.
311 * Arguments: device - device for which we want a command descriptor
312 * wait - 1 if we should wait in the event that none
314 * interruptible - 1 if we should unblock and return NULL
315 * in the event that we must wait, and a signal
318 * Lock status: No locks assumed to be held. This function is SMP-safe.
320 * Returns: Pointer to command descriptor.
322 * Notes: Prior to the new queue code, this function was not SMP-safe.
324 * If the wait flag is true, and we are waiting for a free
325 * command block, this function will interrupt and return
326 * NULL in the event that a signal arrives that needs to
329 * This function is deprecated, and drivers should be
330 * rewritten to use Scsi_Request instead of Scsi_Cmnd.
333 Scsi_Cmnd
*scsi_allocate_device(Scsi_Device
* device
, int wait
,
336 struct Scsi_Host
*host
;
337 Scsi_Cmnd
*SCpnt
= NULL
;
342 panic("No device passed to scsi_allocate_device().\n");
346 spin_lock_irqsave(&device_request_lock
, flags
);
350 if (!device
->device_blocked
) {
351 if (device
->single_lun
) {
353 * FIXME(eric) - this is not at all optimal. Given that
354 * single lun devices are rare and usually slow
355 * (i.e. CD changers), this is good enough for now, but
356 * we may want to come back and optimize this later.
358 * Scan through all of the devices attached to this
359 * host, and see if any are active or not. If so,
360 * we need to defer this command.
362 * We really need a busy counter per device. This would
363 * allow us to more easily figure out whether we should
364 * do anything here or not.
366 for (SDpnt
= host
->host_queue
;
368 SDpnt
= SDpnt
->next
) {
370 * Only look for other devices on the same bus
371 * with the same target ID.
373 if (SDpnt
->channel
!= device
->channel
374 || SDpnt
->id
!= device
->id
375 || SDpnt
== device
) {
378 if( atomic_read(&SDpnt
->device_active
) != 0)
385 * Some other device in this cluster is busy.
386 * If asked to wait, we need to wait, otherwise
394 * Now we can check for a free command block for this device.
396 for (SCpnt
= device
->device_queue
; SCpnt
; SCpnt
= SCpnt
->next
) {
397 if (SCpnt
->request
.rq_status
== RQ_INACTIVE
)
402 * If we couldn't find a free command block, and we have been
403 * asked to wait, then do so.
410 * If we have been asked to wait for a free block, then
414 DECLARE_WAITQUEUE(wait
, current
);
417 * We need to wait for a free commandblock. We need to
418 * insert ourselves into the list before we release the
419 * lock. This way if a block were released the same
420 * microsecond that we released the lock, the call
421 * to schedule() wouldn't block (well, it might switch,
422 * but the current task will still be schedulable.
424 add_wait_queue(&device
->scpnt_wait
, &wait
);
425 if( interruptable
) {
426 set_current_state(TASK_INTERRUPTIBLE
);
428 set_current_state(TASK_UNINTERRUPTIBLE
);
431 spin_unlock_irqrestore(&device_request_lock
, flags
);
434 * This should block until a device command block
439 spin_lock_irqsave(&device_request_lock
, flags
);
441 remove_wait_queue(&device
->scpnt_wait
, &wait
);
443 * FIXME - Isn't this redundant?? Someone
444 * else will have forced the state back to running.
446 set_current_state(TASK_RUNNING
);
448 * In the event that a signal has arrived that we need
449 * to consider, then simply return NULL. Everyone
450 * that calls us should be prepared for this
451 * possibility, and pass the appropriate code back
454 if( interruptable
) {
455 if (signal_pending(current
)) {
456 spin_unlock_irqrestore(&device_request_lock
, flags
);
461 spin_unlock_irqrestore(&device_request_lock
, flags
);
466 SCpnt
->request
.rq_status
= RQ_SCSI_BUSY
;
467 SCpnt
->request
.sem
= NULL
; /* And no one is waiting for this
469 atomic_inc(&SCpnt
->host
->host_active
);
470 atomic_inc(&SCpnt
->device
->device_active
);
472 SCpnt
->buffer
= NULL
;
474 SCpnt
->request_buffer
= NULL
;
475 SCpnt
->request_bufflen
= 0;
477 SCpnt
->use_sg
= 0; /* Reset the scatter-gather flag */
478 SCpnt
->old_use_sg
= 0;
479 SCpnt
->transfersize
= 0; /* No default transfer size */
482 SCpnt
->sc_data_direction
= SCSI_DATA_UNKNOWN
;
483 SCpnt
->sc_request
= NULL
;
484 SCpnt
->sc_magic
= SCSI_CMND_MAGIC
;
487 SCpnt
->underflow
= 0; /* Do not flag underflow conditions */
488 SCpnt
->old_underflow
= 0;
490 SCpnt
->state
= SCSI_STATE_INITIALIZING
;
491 SCpnt
->owner
= SCSI_OWNER_HIGHLEVEL
;
493 spin_unlock_irqrestore(&device_request_lock
, flags
);
495 SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n",
497 atomic_read(&SCpnt
->host
->host_active
)));
502 inline void __scsi_release_command(Scsi_Cmnd
* SCpnt
)
507 spin_lock_irqsave(&device_request_lock
, flags
);
509 SDpnt
= SCpnt
->device
;
511 SCpnt
->request
.rq_status
= RQ_INACTIVE
;
512 SCpnt
->state
= SCSI_STATE_UNUSED
;
513 SCpnt
->owner
= SCSI_OWNER_NOBODY
;
514 atomic_dec(&SCpnt
->host
->host_active
);
515 atomic_dec(&SDpnt
->device_active
);
517 SCSI_LOG_MLQUEUE(5, printk("Deactivating command for device %d (active=%d, failed=%d)\n",
519 atomic_read(&SCpnt
->host
->host_active
),
520 SCpnt
->host
->host_failed
));
521 if (SCpnt
->host
->host_failed
!= 0) {
522 SCSI_LOG_ERROR_RECOVERY(5, printk("Error handler thread %d %d\n",
523 SCpnt
->host
->in_recovery
,
524 SCpnt
->host
->eh_active
));
527 * If the host is having troubles, then look to see if this was the last
528 * command that might have failed. If so, wake up the error handler.
530 if (SCpnt
->host
->in_recovery
531 && !SCpnt
->host
->eh_active
532 && SCpnt
->host
->host_busy
== SCpnt
->host
->host_failed
) {
533 SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
534 atomic_read(&SCpnt
->host
->eh_wait
->count
)));
535 up(SCpnt
->host
->eh_wait
);
538 spin_unlock_irqrestore(&device_request_lock
, flags
);
541 * Wake up anyone waiting for this device. Do this after we
542 * have released the lock, as they will need it as soon as
545 wake_up(&SDpnt
->scpnt_wait
);
549 * Function: scsi_release_command
551 * Purpose: Release a command block.
553 * Arguments: SCpnt - command block we are releasing.
555 * Notes: The command block can no longer be used by the caller once
556 * this funciton is called. This is in effect the inverse
557 * of scsi_allocate_device. Note that we also must perform
558 * a couple of additional tasks. We must first wake up any
559 * processes that might have blocked waiting for a command
560 * block, and secondly we must hit the queue handler function
561 * to make sure that the device is busy. Note - there is an
562 * option to not do this - there were instances where we could
563 * recurse too deeply and blow the stack if this happened
564 * when we were indirectly called from the request function
567 * The idea is that a lot of the mid-level internals gunk
568 * gets hidden in this function. Upper level drivers don't
569 * have any chickens to wave in the air to get things to
572 * This function is deprecated, and drivers should be
573 * rewritten to use Scsi_Request instead of Scsi_Cmnd.
575 void scsi_release_command(Scsi_Cmnd
* SCpnt
)
580 SDpnt
= SCpnt
->device
;
582 __scsi_release_command(SCpnt
);
585 * Finally, hit the queue request function to make sure that
586 * the device is actually busy if there are requests present.
587 * This won't block - if the device cannot take any more, life
590 q
= &SDpnt
->request_queue
;
591 scsi_queue_next_request(q
, NULL
);
595 * Function: scsi_dispatch_command
597 * Purpose: Dispatch a command to the low-level driver.
599 * Arguments: SCpnt - command block we are dispatching.
603 int scsi_dispatch_cmd(Scsi_Cmnd
* SCpnt
)
608 struct Scsi_Host
*host
;
610 unsigned long flags
= 0;
611 unsigned long timeout
;
613 ASSERT_LOCK(&io_request_lock
, 0);
616 unsigned long *ret
= 0;
618 __asm__
__volatile__("move\t%0,$31":"=r"(ret
));
620 ret
= __builtin_return_address(0);
626 /* Assign a unique nonzero serial_number. */
627 if (++serial_number
== 0)
629 SCpnt
->serial_number
= serial_number
;
632 * We will wait MIN_RESET_DELAY clock ticks after the last reset so
633 * we can avoid the drive not being ready.
635 timeout
= host
->last_reset
+ MIN_RESET_DELAY
;
637 if (host
->resetting
&& time_before(jiffies
, timeout
)) {
638 int ticks_remaining
= timeout
- jiffies
;
640 * NOTE: This may be executed from within an interrupt
641 * handler! This is bad, but for now, it'll do. The irq
642 * level of the interrupt handler has been masked out by the
643 * platform dependent interrupt handling code already, so the
644 * sti() here will not cause another call to the SCSI host's
645 * interrupt handler (assuming there is one irq-level per
648 while (--ticks_remaining
>= 0)
649 mdelay(1 + 999 / HZ
);
652 if (host
->hostt
->use_new_eh_code
) {
653 scsi_add_timer(SCpnt
, SCpnt
->timeout_per_command
, scsi_times_out
);
655 scsi_add_timer(SCpnt
, SCpnt
->timeout_per_command
,
660 * We will use a queued command if possible, otherwise we will emulate the
661 * queuing and calling of completion function ourselves.
663 SCSI_LOG_MLQUEUE(3, printk("scsi_dispatch_cmnd (host = %d, channel = %d, target = %d, "
664 "command = %p, buffer = %p, \nbufflen = %d, done = %p)\n",
665 SCpnt
->host
->host_no
, SCpnt
->channel
, SCpnt
->target
, SCpnt
->cmnd
,
666 SCpnt
->buffer
, SCpnt
->bufflen
, SCpnt
->done
));
668 SCpnt
->state
= SCSI_STATE_QUEUED
;
669 SCpnt
->owner
= SCSI_OWNER_LOWLEVEL
;
670 if (host
->can_queue
) {
671 SCSI_LOG_MLQUEUE(3, printk("queuecommand : routine at %p\n",
672 host
->hostt
->queuecommand
));
674 * Use the old error handling code if we haven't converted the driver
675 * to use the new one yet. Note - only the new queuecommand variant
676 * passes a meaningful return value.
678 if (host
->hostt
->use_new_eh_code
) {
679 spin_lock_irqsave(&io_request_lock
, flags
);
680 rtn
= host
->hostt
->queuecommand(SCpnt
, scsi_done
);
681 spin_unlock_irqrestore(&io_request_lock
, flags
);
683 scsi_delete_timer(SCpnt
);
684 scsi_mlqueue_insert(SCpnt
, SCSI_MLQUEUE_HOST_BUSY
);
685 SCSI_LOG_MLQUEUE(3, printk("queuecommand : request rejected\n"));
688 spin_lock_irqsave(&io_request_lock
, flags
);
689 host
->hostt
->queuecommand(SCpnt
, scsi_old_done
);
690 spin_unlock_irqrestore(&io_request_lock
, flags
);
695 SCSI_LOG_MLQUEUE(3, printk("command() : routine at %p\n", host
->hostt
->command
));
696 spin_lock_irqsave(&io_request_lock
, flags
);
697 temp
= host
->hostt
->command(SCpnt
);
698 SCpnt
->result
= temp
;
700 spin_unlock_irqrestore(&io_request_lock
, flags
);
701 clock
= jiffies
+ 4 * HZ
;
702 while (time_before(jiffies
, clock
))
704 printk("done(host = %d, result = %04x) : routine at %p\n",
705 host
->host_no
, temp
, host
->hostt
->command
);
706 spin_lock_irqsave(&io_request_lock
, flags
);
708 if (host
->hostt
->use_new_eh_code
) {
711 scsi_old_done(SCpnt
);
713 spin_unlock_irqrestore(&io_request_lock
, flags
);
715 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
719 devfs_handle_t scsi_devfs_handle
;
722 * scsi_do_cmd sends all the commands out to the low-level driver. It
723 * handles the specifics required for each low level driver - ie queued
724 * or non queued. It also prevents conflicts when different high level
725 * drivers go for the same host at the same time.
728 void scsi_wait_req (Scsi_Request
* SRpnt
, const void *cmnd
,
729 void *buffer
, unsigned bufflen
,
730 int timeout
, int retries
)
732 DECLARE_MUTEX_LOCKED(sem
);
734 SRpnt
->sr_request
.sem
= &sem
;
735 SRpnt
->sr_request
.rq_status
= RQ_SCSI_BUSY
;
736 scsi_do_req (SRpnt
, (void *) cmnd
,
737 buffer
, bufflen
, scsi_wait_done
, timeout
, retries
);
739 SRpnt
->sr_request
.sem
= NULL
;
740 if( SRpnt
->sr_command
!= NULL
)
742 scsi_release_command(SRpnt
->sr_command
);
743 SRpnt
->sr_command
= NULL
;
749 * Function: scsi_do_req
751 * Purpose: Queue a SCSI request
753 * Arguments: SRpnt - command descriptor.
754 * cmnd - actual SCSI command to be performed.
755 * buffer - data buffer.
756 * bufflen - size of data buffer.
757 * done - completion function to be run.
758 * timeout - how long to let it run before timeout.
759 * retries - number of retries we allow.
761 * Lock status: With the new queueing code, this is SMP-safe, and no locks
762 * need be held upon entry. The old queueing code the lock was
763 * assumed to be held upon entry.
767 * Notes: Prior to the new queue code, this function was not SMP-safe.
768 * Also, this function is now only used for queueing requests
769 * for things like ioctls and character device requests - this
770 * is because we essentially just inject a request into the
771 * queue for the device. Normal block device handling manipulates
772 * the queue directly.
774 void scsi_do_req(Scsi_Request
* SRpnt
, const void *cmnd
,
775 void *buffer
, unsigned bufflen
, void (*done
) (Scsi_Cmnd
*),
776 int timeout
, int retries
)
778 Scsi_Device
* SDpnt
= SRpnt
->sr_device
;
779 struct Scsi_Host
*host
= SDpnt
->host
;
781 ASSERT_LOCK(&io_request_lock
, 0);
786 int target
= SDpnt
->id
;
787 printk("scsi_do_req (host = %d, channel = %d target = %d, "
788 "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
790 "command : ", host
->host_no
, SDpnt
->channel
, target
, buffer
,
791 bufflen
, done
, timeout
, retries
);
792 for (i
= 0; i
< 10; ++i
)
793 printk("%02x ", ((unsigned char *) cmnd
)[i
]);
798 panic("Invalid or not present host.\n");
802 * If the upper level driver is reusing these things, then
803 * we should release the low-level block now. Another one will
804 * be allocated later when this request is getting queued.
806 if( SRpnt
->sr_command
!= NULL
)
808 scsi_release_command(SRpnt
->sr_command
);
809 SRpnt
->sr_command
= NULL
;
813 * We must prevent reentrancy to the lowlevel host driver. This prevents
814 * it - we enter a loop until the host we want to talk to is not busy.
815 * Race conditions are prevented, as interrupts are disabled in between the
816 * time we check for the host being not busy, and the time we mark it busy
822 * Our own function scsi_done (which marks the host as not busy, disables
823 * the timeout counter, etc) will be called by us or by the
824 * scsi_hosts[host].queuecommand() function needs to also call
825 * the completion function for the high level driver.
828 memcpy((void *) SRpnt
->sr_cmnd
, (const void *) cmnd
,
829 sizeof(SRpnt
->sr_cmnd
));
830 SRpnt
->sr_bufflen
= bufflen
;
831 SRpnt
->sr_buffer
= buffer
;
832 SRpnt
->sr_allowed
= retries
;
833 SRpnt
->sr_done
= done
;
834 SRpnt
->sr_timeout_per_command
= timeout
;
836 memcpy((void *) SRpnt
->sr_cmnd
, (const void *) cmnd
,
837 sizeof(SRpnt
->sr_cmnd
));
839 if (SRpnt
->sr_cmd_len
== 0)
840 SRpnt
->sr_cmd_len
= COMMAND_SIZE(SRpnt
->sr_cmnd
[0]);
843 * At this point, we merely set up the command, stick it in the normal
844 * request queue, and return. Eventually that request will come to the
845 * top of the list, and will be dispatched.
847 scsi_insert_special_req(SRpnt
, 0);
849 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_cmd()\n"));
853 * Function: scsi_init_cmd_from_req
855 * Purpose: Queue a SCSI command
856 * Purpose: Initialize a Scsi_Cmnd from a Scsi_Request
858 * Arguments: SCpnt - command descriptor.
859 * SRpnt - Request from the queue.
861 * Lock status: None needed.
865 * Notes: Mainly transfer data from the request structure to the
866 * command structure. The request structure is allocated
867 * using the normal memory allocator, and requests can pile
868 * up to more or less any depth. The command structure represents
869 * a consumable resource, as these are allocated into a pool
870 * when the SCSI subsystem initializes. The preallocation is
871 * required so that in low-memory situations a disk I/O request
872 * won't cause the memory manager to try and write out a page.
873 * The request structure is generally used by ioctls and character
876 void scsi_init_cmd_from_req(Scsi_Cmnd
* SCpnt
, Scsi_Request
* SRpnt
)
878 struct Scsi_Host
*host
= SCpnt
->host
;
880 ASSERT_LOCK(&io_request_lock
, 0);
882 SCpnt
->owner
= SCSI_OWNER_MIDLEVEL
;
883 SRpnt
->sr_command
= SCpnt
;
886 panic("Invalid or not present host.\n");
889 SCpnt
->cmd_len
= SRpnt
->sr_cmd_len
;
890 SCpnt
->use_sg
= SRpnt
->sr_use_sg
;
892 memcpy((void *) &SCpnt
->request
, (const void *) &SRpnt
->sr_request
,
893 sizeof(SRpnt
->sr_request
));
894 memcpy((void *) SCpnt
->data_cmnd
, (const void *) SRpnt
->sr_cmnd
,
895 sizeof(SCpnt
->data_cmnd
));
896 SCpnt
->reset_chain
= NULL
;
897 SCpnt
->serial_number
= 0;
898 SCpnt
->serial_number_at_timeout
= 0;
899 SCpnt
->bufflen
= SRpnt
->sr_bufflen
;
900 SCpnt
->buffer
= SRpnt
->sr_buffer
;
903 SCpnt
->allowed
= SRpnt
->sr_allowed
;
904 SCpnt
->done
= SRpnt
->sr_done
;
905 SCpnt
->timeout_per_command
= SRpnt
->sr_timeout_per_command
;
907 SCpnt
->sc_data_direction
= SRpnt
->sr_data_direction
;
909 SCpnt
->sglist_len
= SRpnt
->sr_sglist_len
;
910 SCpnt
->underflow
= SRpnt
->sr_underflow
;
912 SCpnt
->sc_request
= SRpnt
;
914 memcpy((void *) SCpnt
->cmnd
, (const void *) SRpnt
->sr_cmnd
,
915 sizeof(SCpnt
->cmnd
));
916 /* Zero the sense buffer. Some host adapters automatically request
917 * sense on error. 0 is not a valid sense code.
919 memset((void *) SCpnt
->sense_buffer
, 0, sizeof SCpnt
->sense_buffer
);
920 SCpnt
->request_buffer
= SRpnt
->sr_buffer
;
921 SCpnt
->request_bufflen
= SRpnt
->sr_bufflen
;
922 SCpnt
->old_use_sg
= SCpnt
->use_sg
;
923 if (SCpnt
->cmd_len
== 0)
924 SCpnt
->cmd_len
= COMMAND_SIZE(SCpnt
->cmnd
[0]);
925 SCpnt
->old_cmd_len
= SCpnt
->cmd_len
;
926 SCpnt
->sc_old_data_direction
= SCpnt
->sc_data_direction
;
927 SCpnt
->old_underflow
= SCpnt
->underflow
;
929 /* Start the timer ticking. */
931 SCpnt
->internal_timeout
= NORMAL_TIMEOUT
;
932 SCpnt
->abort_reason
= 0;
935 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_cmd()\n"));
939 * Function: scsi_do_cmd
941 * Purpose: Queue a SCSI command
943 * Arguments: SCpnt - command descriptor.
944 * cmnd - actual SCSI command to be performed.
945 * buffer - data buffer.
946 * bufflen - size of data buffer.
947 * done - completion function to be run.
948 * timeout - how long to let it run before timeout.
949 * retries - number of retries we allow.
951 * Lock status: With the new queueing code, this is SMP-safe, and no locks
952 * need be held upon entry. The old queueing code the lock was
953 * assumed to be held upon entry.
957 * Notes: Prior to the new queue code, this function was not SMP-safe.
958 * Also, this function is now only used for queueing requests
959 * for things like ioctls and character device requests - this
960 * is because we essentially just inject a request into the
961 * queue for the device. Normal block device handling manipulates
962 * the queue directly.
964 void scsi_do_cmd(Scsi_Cmnd
* SCpnt
, const void *cmnd
,
965 void *buffer
, unsigned bufflen
, void (*done
) (Scsi_Cmnd
*),
966 int timeout
, int retries
)
968 struct Scsi_Host
*host
= SCpnt
->host
;
970 ASSERT_LOCK(&io_request_lock
, 0);
972 SCpnt
->owner
= SCSI_OWNER_MIDLEVEL
;
977 int target
= SCpnt
->target
;
978 printk("scsi_do_cmd (host = %d, channel = %d target = %d, "
979 "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
981 "command : ", host
->host_no
, SCpnt
->channel
, target
, buffer
,
982 bufflen
, done
, timeout
, retries
);
983 for (i
= 0; i
< 10; ++i
)
984 printk("%02x ", ((unsigned char *) cmnd
)[i
]);
989 panic("Invalid or not present host.\n");
992 * We must prevent reentrancy to the lowlevel host driver. This prevents
993 * it - we enter a loop until the host we want to talk to is not busy.
994 * Race conditions are prevented, as interrupts are disabled in between the
995 * time we check for the host being not busy, and the time we mark it busy
1001 * Our own function scsi_done (which marks the host as not busy, disables
1002 * the timeout counter, etc) will be called by us or by the
1003 * scsi_hosts[host].queuecommand() function needs to also call
1004 * the completion function for the high level driver.
1007 memcpy((void *) SCpnt
->data_cmnd
, (const void *) cmnd
,
1008 sizeof(SCpnt
->data_cmnd
));
1009 SCpnt
->reset_chain
= NULL
;
1010 SCpnt
->serial_number
= 0;
1011 SCpnt
->serial_number_at_timeout
= 0;
1012 SCpnt
->bufflen
= bufflen
;
1013 SCpnt
->buffer
= buffer
;
1016 SCpnt
->allowed
= retries
;
1018 SCpnt
->timeout_per_command
= timeout
;
1020 memcpy((void *) SCpnt
->cmnd
, (const void *) cmnd
,
1021 sizeof(SCpnt
->cmnd
));
1022 /* Zero the sense buffer. Some host adapters automatically request
1023 * sense on error. 0 is not a valid sense code.
1025 memset((void *) SCpnt
->sense_buffer
, 0, sizeof SCpnt
->sense_buffer
);
1026 SCpnt
->request_buffer
= buffer
;
1027 SCpnt
->request_bufflen
= bufflen
;
1028 SCpnt
->old_use_sg
= SCpnt
->use_sg
;
1029 if (SCpnt
->cmd_len
== 0)
1030 SCpnt
->cmd_len
= COMMAND_SIZE(SCpnt
->cmnd
[0]);
1031 SCpnt
->old_cmd_len
= SCpnt
->cmd_len
;
1032 SCpnt
->sc_old_data_direction
= SCpnt
->sc_data_direction
;
1033 SCpnt
->old_underflow
= SCpnt
->underflow
;
1035 /* Start the timer ticking. */
1037 SCpnt
->internal_timeout
= NORMAL_TIMEOUT
;
1038 SCpnt
->abort_reason
= 0;
1042 * At this point, we merely set up the command, stick it in the normal
1043 * request queue, and return. Eventually that request will come to the
1044 * top of the list, and will be dispatched.
1046 scsi_insert_special_cmd(SCpnt
, 0);
1048 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_cmd()\n"));
1052 * This function is the mid-level interrupt routine, which decides how
1053 * to handle error conditions. Each invocation of this function must
1054 * do one and *only* one of the following:
1056 * 1) Insert command in BH queue.
1057 * 2) Activate error handler for host.
1059 * FIXME(eric) - I am concerned about stack overflow (still). An
1060 * interrupt could come while we are processing the bottom queue,
1061 * which would cause another command to be stuffed onto the bottom
1062 * queue, and it would in turn be processed as that interrupt handler
1063 * is returning. Given a sufficiently steady rate of returning
1064 * commands, this could cause the stack to overflow. I am not sure
1065 * what is the most appropriate solution here - we should probably
1066 * keep a depth count, and not process any commands while we still
1067 * have a bottom handler active higher in the stack.
1069 * There is currently code in the bottom half handler to monitor
1070 * recursion in the bottom handler and report if it ever happens. If
1071 * this becomes a problem, it won't be hard to engineer something to
1072 * deal with it so that only the outer layer ever does any real
1075 void scsi_done(Scsi_Cmnd
* SCpnt
)
1077 unsigned long flags
;
1081 * We don't have to worry about this one timing out any more.
1083 tstatus
= scsi_delete_timer(SCpnt
);
1086 * If we are unable to remove the timer, it means that the command
1087 * has already timed out. In this case, we have no choice but to
1088 * let the timeout function run, as we have no idea where in fact
1089 * that function could really be. It might be on another processor,
1093 SCpnt
->done_late
= 1;
1096 /* Set the serial numbers back to zero */
1097 SCpnt
->serial_number
= 0;
1100 * First, see whether this command already timed out. If so, we ignore
1101 * the response. We treat it as if the command never finished.
1103 * Since serial_number is now 0, the error handler cound detect this
1104 * situation and avoid to call the the low level driver abort routine.
1107 * FIXME(eric) - I believe that this test is now redundant, due to
1108 * the test of the return status of del_timer().
1110 if (SCpnt
->state
== SCSI_STATE_TIMEOUT
) {
1111 SCSI_LOG_MLCOMPLETE(1, printk("Ignoring completion of %p due to timeout status", SCpnt
));
1114 spin_lock_irqsave(&scsi_bhqueue_lock
, flags
);
1116 SCpnt
->serial_number_at_timeout
= 0;
1117 SCpnt
->state
= SCSI_STATE_BHQUEUE
;
1118 SCpnt
->owner
= SCSI_OWNER_BH_HANDLER
;
1119 SCpnt
->bh_next
= NULL
;
1122 * Next, put this command in the BH queue.
1124 * We need a spinlock here, or compare and exchange if we can reorder incoming
1125 * Scsi_Cmnds, as it happens pretty often scsi_done is called multiple times
1126 * before bh is serviced. -jj
1128 * We already have the io_request_lock here, since we are called from the
1129 * interrupt handler or the error handler. (DB)
1131 * This may be true at the moment, but I would like to wean all of the low
1132 * level drivers away from using io_request_lock. Technically they should
1133 * all use their own locking. I am adding a small spinlock to protect
1134 * this datastructure to make it safe for that day. (ERY)
1136 if (!scsi_bh_queue_head
) {
1137 scsi_bh_queue_head
= SCpnt
;
1138 scsi_bh_queue_tail
= SCpnt
;
1140 scsi_bh_queue_tail
->bh_next
= SCpnt
;
1141 scsi_bh_queue_tail
= SCpnt
;
1144 spin_unlock_irqrestore(&scsi_bhqueue_lock
, flags
);
1146 * Mark the bottom half handler to be run.
1152 * Procedure: scsi_bottom_half_handler
1154 * Purpose: Called after we have finished processing interrupts, it
1155 * performs post-interrupt handling for commands that may
1158 * Notes: This is called with all interrupts enabled. This should reduce
1159 * interrupt latency, stack depth, and reentrancy of the low-level
1162 * The io_request_lock is required in all the routine. There was a subtle
1163 * race condition when scsi_done is called after a command has already
1164 * timed out but before the time out is processed by the error handler.
1167 * I believe I have corrected this. We simply monitor the return status of
1168 * del_timer() - if this comes back as 0, it means that the timer has fired
1169 * and that a timeout is in progress. I have modified scsi_done() such
1170 * that in this instance the command is never inserted in the bottom
1171 * half queue. Thus the only time we hold the lock here is when
1172 * we wish to atomically remove the contents of the queue.
1174 void scsi_bottom_half_handler(void)
1178 unsigned long flags
;
1182 spin_lock_irqsave(&scsi_bhqueue_lock
, flags
);
1183 SCpnt
= scsi_bh_queue_head
;
1184 scsi_bh_queue_head
= NULL
;
1185 spin_unlock_irqrestore(&scsi_bhqueue_lock
, flags
);
1187 if (SCpnt
== NULL
) {
1190 SCnext
= SCpnt
->bh_next
;
1192 for (; SCpnt
; SCpnt
= SCnext
) {
1193 SCnext
= SCpnt
->bh_next
;
1195 switch (scsi_decide_disposition(SCpnt
)) {
1200 SCSI_LOG_MLCOMPLETE(3, printk("Command finished %d %d 0x%x\n", SCpnt
->host
->host_busy
,
1201 SCpnt
->host
->host_failed
,
1204 scsi_finish_command(SCpnt
);
1208 * We only come in here if we want to retry a command. The
1209 * test to see whether the command should be retried should be
1210 * keeping track of the number of tries, so we don't end up looping,
1213 SCSI_LOG_MLCOMPLETE(3, printk("Command needs retry %d %d 0x%x\n", SCpnt
->host
->host_busy
,
1214 SCpnt
->host
->host_failed
, SCpnt
->result
));
1216 scsi_retry_command(SCpnt
);
1218 case ADD_TO_MLQUEUE
:
1220 * This typically happens for a QUEUE_FULL message -
1221 * typically only when the queue depth is only
1222 * approximate for a given device. Adding a command
1223 * to the queue for the device will prevent further commands
1224 * from being sent to the device, so we shouldn't end up
1225 * with tons of things being sent down that shouldn't be.
1227 SCSI_LOG_MLCOMPLETE(3, printk("Command rejected as device queue full, put on ml queue %p\n",
1229 scsi_mlqueue_insert(SCpnt
, SCSI_MLQUEUE_DEVICE_BUSY
);
1233 * Here we have a fatal error of some sort. Turn it over to
1234 * the error handler.
1236 SCSI_LOG_MLCOMPLETE(3, printk("Command failed %p %x active=%d busy=%d failed=%d\n",
1237 SCpnt
, SCpnt
->result
,
1238 atomic_read(&SCpnt
->host
->host_active
),
1239 SCpnt
->host
->host_busy
,
1240 SCpnt
->host
->host_failed
));
1243 * Dump the sense information too.
1245 if ((status_byte(SCpnt
->result
) & CHECK_CONDITION
) != 0) {
1246 SCSI_LOG_MLCOMPLETE(3, print_sense("bh", SCpnt
));
1248 if (SCpnt
->host
->eh_wait
!= NULL
) {
1249 SCpnt
->host
->host_failed
++;
1250 SCpnt
->owner
= SCSI_OWNER_ERROR_HANDLER
;
1251 SCpnt
->state
= SCSI_STATE_FAILED
;
1252 SCpnt
->host
->in_recovery
= 1;
1254 * If the host is having troubles, then look to see if this was the last
1255 * command that might have failed. If so, wake up the error handler.
1257 if (SCpnt
->host
->host_busy
== SCpnt
->host
->host_failed
) {
1258 SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
1259 atomic_read(&SCpnt
->host
->eh_wait
->count
)));
1260 up(SCpnt
->host
->eh_wait
);
1264 * We only get here if the error recovery thread has died.
1266 scsi_finish_command(SCpnt
);
1269 } /* for(; SCpnt...) */
1276 * Function: scsi_retry_command
1278 * Purpose: Send a command back to the low level to be retried.
1280 * Notes: This command is always executed in the context of the
1281 * bottom half handler, or the error handler thread. Low
1282 * level drivers should not become re-entrant as a result of
1285 int scsi_retry_command(Scsi_Cmnd
* SCpnt
)
1287 memcpy((void *) SCpnt
->cmnd
, (void *) SCpnt
->data_cmnd
,
1288 sizeof(SCpnt
->data_cmnd
));
1289 SCpnt
->request_buffer
= SCpnt
->buffer
;
1290 SCpnt
->request_bufflen
= SCpnt
->bufflen
;
1291 SCpnt
->use_sg
= SCpnt
->old_use_sg
;
1292 SCpnt
->cmd_len
= SCpnt
->old_cmd_len
;
1293 SCpnt
->sc_data_direction
= SCpnt
->sc_old_data_direction
;
1294 SCpnt
->underflow
= SCpnt
->old_underflow
;
1297 * Zero the sense information from the last time we tried
1300 memset((void *) SCpnt
->sense_buffer
, 0, sizeof SCpnt
->sense_buffer
);
1302 return scsi_dispatch_cmd(SCpnt
);
1306 * Function: scsi_finish_command
1308 * Purpose: Pass command off to upper layer for finishing of I/O
1309 * request, waking processes that are waiting on results,
1312 void scsi_finish_command(Scsi_Cmnd
* SCpnt
)
1314 struct Scsi_Host
*host
;
1315 Scsi_Device
*device
;
1316 Scsi_Request
* SRpnt
;
1317 unsigned long flags
;
1319 ASSERT_LOCK(&io_request_lock
, 0);
1322 device
= SCpnt
->device
;
1325 * We need to protect the decrement, as otherwise a race condition
1326 * would exist. Fiddling with SCpnt isn't a problem as the
1327 * design only allows a single SCpnt to be active in only
1328 * one execution context, but the device and host structures are
1331 spin_lock_irqsave(&io_request_lock
, flags
);
1332 host
->host_busy
--; /* Indicate that we are free */
1333 device
->device_busy
--; /* Decrement device usage counter. */
1334 spin_unlock_irqrestore(&io_request_lock
, flags
);
1337 * Clear the flags which say that the device/host is no longer
1338 * capable of accepting new commands. These are set in scsi_queue.c
1339 * for both the queue full condition on a device, and for a
1340 * host full condition on the host.
1342 host
->host_blocked
= FALSE
;
1343 device
->device_blocked
= FALSE
;
1346 * If we have valid sense information, then some kind of recovery
1347 * must have taken place. Make a note of this.
1349 if (scsi_sense_valid(SCpnt
)) {
1350 SCpnt
->result
|= (DRIVER_SENSE
<< 24);
1352 SCSI_LOG_MLCOMPLETE(3, printk("Notifying upper driver of completion for device %d %x\n",
1353 SCpnt
->device
->id
, SCpnt
->result
));
1355 SCpnt
->owner
= SCSI_OWNER_HIGHLEVEL
;
1356 SCpnt
->state
= SCSI_STATE_FINISHED
;
1358 /* We can get here with use_sg=0, causing a panic in the upper level (DB) */
1359 SCpnt
->use_sg
= SCpnt
->old_use_sg
;
1362 * If there is an associated request structure, copy the data over before we call the
1363 * completion function.
1365 SRpnt
= SCpnt
->sc_request
;
1366 if( SRpnt
!= NULL
) {
1367 SRpnt
->sr_result
= SRpnt
->sr_command
->result
;
1368 if( SRpnt
->sr_result
!= 0 ) {
1369 memcpy(SRpnt
->sr_sense_buffer
,
1370 SRpnt
->sr_command
->sense_buffer
,
1371 sizeof(SRpnt
->sr_sense_buffer
));
1378 static int scsi_register_host(Scsi_Host_Template
*);
1379 static void scsi_unregister_host(Scsi_Host_Template
*);
1382 * Function: scsi_release_commandblocks()
1384 * Purpose: Release command blocks associated with a device.
1386 * Arguments: SDpnt - device
1390 * Lock status: No locking assumed or required.
1394 void scsi_release_commandblocks(Scsi_Device
* SDpnt
)
1396 Scsi_Cmnd
*SCpnt
, *SCnext
;
1397 unsigned long flags
;
1399 spin_lock_irqsave(&device_request_lock
, flags
);
1400 for (SCpnt
= SDpnt
->device_queue
; SCpnt
; SCpnt
= SCnext
) {
1401 SDpnt
->device_queue
= SCnext
= SCpnt
->next
;
1402 kfree((char *) SCpnt
);
1404 SDpnt
->has_cmdblocks
= 0;
1405 SDpnt
->queue_depth
= 0;
1406 spin_unlock_irqrestore(&device_request_lock
, flags
);
1410 * Function: scsi_build_commandblocks()
1412 * Purpose: Allocate command blocks associated with a device.
1414 * Arguments: SDpnt - device
1418 * Lock status: No locking assumed or required.
1422 void scsi_build_commandblocks(Scsi_Device
* SDpnt
)
1424 unsigned long flags
;
1425 struct Scsi_Host
*host
= SDpnt
->host
;
1429 spin_lock_irqsave(&device_request_lock
, flags
);
1431 if (SDpnt
->queue_depth
== 0)
1433 SDpnt
->queue_depth
= host
->cmd_per_lun
;
1434 if (SDpnt
->queue_depth
== 0)
1435 SDpnt
->queue_depth
= 1; /* live to fight another day */
1437 SDpnt
->device_queue
= NULL
;
1439 for (j
= 0; j
< SDpnt
->queue_depth
; j
++) {
1440 SCpnt
= (Scsi_Cmnd
*)
1441 kmalloc(sizeof(Scsi_Cmnd
),
1443 (host
->unchecked_isa_dma
? GFP_DMA
: 0));
1445 break; /* If not, the next line will oops ... */
1446 memset(SCpnt
, 0, sizeof(Scsi_Cmnd
));
1448 SCpnt
->device
= SDpnt
;
1449 SCpnt
->target
= SDpnt
->id
;
1450 SCpnt
->lun
= SDpnt
->lun
;
1451 SCpnt
->channel
= SDpnt
->channel
;
1452 SCpnt
->request
.rq_status
= RQ_INACTIVE
;
1454 SCpnt
->old_use_sg
= 0;
1455 SCpnt
->old_cmd_len
= 0;
1456 SCpnt
->underflow
= 0;
1457 SCpnt
->old_underflow
= 0;
1458 SCpnt
->transfersize
= 0;
1460 SCpnt
->serial_number
= 0;
1461 SCpnt
->serial_number_at_timeout
= 0;
1462 SCpnt
->host_scribble
= NULL
;
1463 SCpnt
->next
= SDpnt
->device_queue
;
1464 SDpnt
->device_queue
= SCpnt
;
1465 SCpnt
->state
= SCSI_STATE_UNUSED
;
1466 SCpnt
->owner
= SCSI_OWNER_NOBODY
;
1468 if (j
< SDpnt
->queue_depth
) { /* low on space (D.Gilbert 990424) */
1469 printk(KERN_WARNING
"scsi_build_commandblocks: want=%d, space for=%d blocks\n",
1470 SDpnt
->queue_depth
, j
);
1471 SDpnt
->queue_depth
= j
;
1472 SDpnt
->has_cmdblocks
= (0 != j
);
1474 SDpnt
->has_cmdblocks
= 1;
1476 spin_unlock_irqrestore(&device_request_lock
, flags
);
1479 static int proc_scsi_gen_write(struct file
* file
, const char * buf
,
1480 unsigned long length
, void *data
);
1482 void __init
scsi_host_no_insert(char *str
, int n
)
1484 Scsi_Host_Name
*shn
, *shn2
;
1488 if (len
&& (shn
= (Scsi_Host_Name
*) kmalloc(sizeof(Scsi_Host_Name
), GFP_ATOMIC
))) {
1489 if ((shn
->name
= kmalloc(len
+1, GFP_ATOMIC
))) {
1490 strncpy(shn
->name
, str
, len
);
1493 shn
->host_registered
= 0;
1494 shn
->loaded_as_module
= 1; /* numbers shouldn't be freed in any case */
1496 if (scsi_host_no_list
) {
1497 for (shn2
= scsi_host_no_list
;shn2
->next
;shn2
= shn2
->next
)
1502 scsi_host_no_list
= shn
;
1503 max_scsi_hosts
= n
+1;
1506 kfree((char *) shn
);
1510 #ifdef CONFIG_PROC_FS
1511 static int scsi_proc_info(char *buffer
, char **start
, off_t offset
, int length
)
1514 struct Scsi_Host
*HBA_ptr
;
1520 * First, see if there are any attached devices or not.
1522 for (HBA_ptr
= scsi_hostlist
; HBA_ptr
; HBA_ptr
= HBA_ptr
->next
) {
1523 if (HBA_ptr
->host_queue
!= NULL
) {
1527 size
= sprintf(buffer
+ len
, "Attached devices: %s\n", (HBA_ptr
) ? "" : "none");
1530 for (HBA_ptr
= scsi_hostlist
; HBA_ptr
; HBA_ptr
= HBA_ptr
->next
) {
1532 size
+= sprintf(buffer
+ len
, "scsi%2d: %s\n", (int) HBA_ptr
->host_no
,
1533 HBA_ptr
->hostt
->procname
);
1537 for (scd
= HBA_ptr
->host_queue
; scd
; scd
= scd
->next
) {
1538 proc_print_scsidevice(scd
, buffer
, &size
, len
);
1546 if (pos
> offset
+ length
)
1552 *start
= buffer
+ (offset
- begin
); /* Start of wanted data */
1553 len
-= (offset
- begin
); /* Start slop */
1555 len
= length
; /* Ending slop */
1559 static int proc_scsi_gen_write(struct file
* file
, const char * buf
,
1560 unsigned long length
, void *data
)
1562 struct Scsi_Device_Template
*SDTpnt
;
1564 struct Scsi_Host
*HBA_ptr
;
1566 int host
, channel
, id
, lun
;
1570 if (!buf
|| length
>PAGE_SIZE
)
1573 if (!(buffer
= (char *) __get_free_page(GFP_KERNEL
)))
1575 copy_from_user(buffer
, buf
, length
);
1578 if (length
< 11 || strncmp("scsi", buffer
, 4))
1582 * Usage: echo "scsi dump #N" > /proc/scsi/scsi
1583 * to dump status of all scsi commands. The number is used to specify the level
1584 * of detail in the dump.
1586 if (!strncmp("dump", buffer
+ 5, 4)) {
1594 level
= simple_strtoul(p
, NULL
, 0);
1595 scsi_dump_status(level
);
1598 * Usage: echo "scsi log token #N" > /proc/scsi/scsi
1599 * where token is one of [error,scan,mlqueue,mlcomplete,llqueue,
1600 * llcomplete,hlqueue,hlcomplete]
1602 #ifdef CONFIG_SCSI_LOGGING /* { */
1604 if (!strncmp("log", buffer
+ 5, 3)) {
1610 while (*p
!= ' ' && *p
!= '\t' && *p
!= '\0') {
1615 if (strncmp(token
, "all", 3) == 0) {
1617 * Turn on absolutely everything.
1619 scsi_logging_level
= ~0;
1620 } else if (strncmp(token
, "none", 4) == 0) {
1622 * Turn off absolutely everything.
1624 scsi_logging_level
= 0;
1631 level
= simple_strtoul(p
, NULL
, 0);
1634 * Now figure out what to do with it.
1636 if (strcmp(token
, "error") == 0) {
1637 SCSI_SET_ERROR_RECOVERY_LOGGING(level
);
1638 } else if (strcmp(token
, "timeout") == 0) {
1639 SCSI_SET_TIMEOUT_LOGGING(level
);
1640 } else if (strcmp(token
, "scan") == 0) {
1641 SCSI_SET_SCAN_BUS_LOGGING(level
);
1642 } else if (strcmp(token
, "mlqueue") == 0) {
1643 SCSI_SET_MLQUEUE_LOGGING(level
);
1644 } else if (strcmp(token
, "mlcomplete") == 0) {
1645 SCSI_SET_MLCOMPLETE_LOGGING(level
);
1646 } else if (strcmp(token
, "llqueue") == 0) {
1647 SCSI_SET_LLQUEUE_LOGGING(level
);
1648 } else if (strcmp(token
, "llcomplete") == 0) {
1649 SCSI_SET_LLCOMPLETE_LOGGING(level
);
1650 } else if (strcmp(token
, "hlqueue") == 0) {
1651 SCSI_SET_HLQUEUE_LOGGING(level
);
1652 } else if (strcmp(token
, "hlcomplete") == 0) {
1653 SCSI_SET_HLCOMPLETE_LOGGING(level
);
1654 } else if (strcmp(token
, "ioctl") == 0) {
1655 SCSI_SET_IOCTL_LOGGING(level
);
1661 printk(KERN_INFO
"scsi logging level set to 0x%8.8x\n", scsi_logging_level
);
1663 #endif /* CONFIG_SCSI_LOGGING */ /* } */
1666 * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi
1667 * with "0 1 2 3" replaced by your "Host Channel Id Lun".
1668 * Consider this feature BETA.
1669 * CAUTION: This is not for hotplugging your peripherals. As
1670 * SCSI was not designed for this you could damage your
1672 * However perhaps it is legal to switch on an
1673 * already connected device. It is perhaps not
1674 * guaranteed this device doesn't corrupt an ongoing data transfer.
1676 if (!strncmp("add-single-device", buffer
+ 5, 17)) {
1679 host
= simple_strtoul(p
, &p
, 0);
1680 channel
= simple_strtoul(p
+ 1, &p
, 0);
1681 id
= simple_strtoul(p
+ 1, &p
, 0);
1682 lun
= simple_strtoul(p
+ 1, &p
, 0);
1684 printk(KERN_INFO
"scsi singledevice %d %d %d %d\n", host
, channel
,
1687 for (HBA_ptr
= scsi_hostlist
; HBA_ptr
; HBA_ptr
= HBA_ptr
->next
) {
1688 if (HBA_ptr
->host_no
== host
) {
1696 for (scd
= HBA_ptr
->host_queue
; scd
; scd
= scd
->next
) {
1697 if ((scd
->channel
== channel
1699 && scd
->lun
== lun
)) {
1706 goto out
; /* We do not yet support unplugging */
1708 scan_scsis(HBA_ptr
, 1, channel
, id
, lun
);
1710 /* FIXME (DB) This assumes that the queue_depth routines can be used
1711 in this context as well, while they were all designed to be
1712 called only once after the detect routine. (DB) */
1713 /* queue_depth routine moved to inside scan_scsis(,1,,,) so
1714 it is called before build_commandblocks() */
1720 * Usage: echo "scsi remove-single-device 0 1 2 3" >/proc/scsi/scsi
1721 * with "0 1 2 3" replaced by your "Host Channel Id Lun".
1723 * Consider this feature pre-BETA.
1725 * CAUTION: This is not for hotplugging your peripherals. As
1726 * SCSI was not designed for this you could damage your
1727 * hardware and thoroughly confuse the SCSI subsystem.
1730 else if (!strncmp("remove-single-device", buffer
+ 5, 20)) {
1733 host
= simple_strtoul(p
, &p
, 0);
1734 channel
= simple_strtoul(p
+ 1, &p
, 0);
1735 id
= simple_strtoul(p
+ 1, &p
, 0);
1736 lun
= simple_strtoul(p
+ 1, &p
, 0);
1739 for (HBA_ptr
= scsi_hostlist
; HBA_ptr
; HBA_ptr
= HBA_ptr
->next
) {
1740 if (HBA_ptr
->host_no
== host
) {
1748 for (scd
= HBA_ptr
->host_queue
; scd
; scd
= scd
->next
) {
1749 if ((scd
->channel
== channel
1751 && scd
->lun
== lun
)) {
1757 goto out
; /* there is no such device attached */
1760 if (scd
->access_count
)
1763 SDTpnt
= scsi_devicelist
;
1764 while (SDTpnt
!= NULL
) {
1766 (*SDTpnt
->detach
) (scd
);
1767 SDTpnt
= SDTpnt
->next
;
1770 if (scd
->attached
== 0) {
1772 * Nobody is using this device any more.
1773 * Free all of the command structures.
1775 if (HBA_ptr
->hostt
->revoke
)
1776 HBA_ptr
->hostt
->revoke(scd
);
1777 devfs_unregister (scd
->de
);
1778 scsi_release_commandblocks(scd
);
1780 /* Now we can remove the device structure */
1781 if (scd
->next
!= NULL
)
1782 scd
->next
->prev
= scd
->prev
;
1784 if (scd
->prev
!= NULL
)
1785 scd
->prev
->next
= scd
->next
;
1787 if (HBA_ptr
->host_queue
== scd
) {
1788 HBA_ptr
->host_queue
= scd
->next
;
1790 blk_cleanup_queue(&scd
->request_queue
);
1791 kfree((char *) scd
);
1799 free_page((unsigned long) buffer
);
1805 * This entry point should be called by a driver if it is trying
1806 * to add a low level scsi driver to the system.
1808 static int scsi_register_host(Scsi_Host_Template
* tpnt
)
1811 struct Scsi_Host
*shpnt
;
1813 struct Scsi_Device_Template
*sdtpnt
;
1815 unsigned long flags
;
1816 int out_of_space
= 0;
1818 if (tpnt
->next
|| !tpnt
->detect
)
1819 return 1; /* Must be already loaded, or
1820 * no detect routine available
1822 pcount
= next_scsi_host
;
1824 /* The detect routine must carefully spinunlock/spinlock if
1825 it enables interrupts, since all interrupt handlers do
1827 All lame drivers are going to fail due to the following
1828 spinlock. For the time beeing let's use it only for drivers
1829 using the new scsi code. NOTE: the detect routine could
1830 redefine the value tpnt->use_new_eh_code. (DB, 13 May 1998) */
1832 if (tpnt
->use_new_eh_code
) {
1833 spin_lock_irqsave(&io_request_lock
, flags
);
1834 tpnt
->present
= tpnt
->detect(tpnt
);
1835 spin_unlock_irqrestore(&io_request_lock
, flags
);
1837 tpnt
->present
= tpnt
->detect(tpnt
);
1839 if (tpnt
->present
) {
1840 if (pcount
== next_scsi_host
) {
1841 if (tpnt
->present
> 1) {
1842 printk(KERN_ERR
"scsi: Failure to register low-level scsi driver");
1843 scsi_unregister_host(tpnt
);
1847 * The low-level driver failed to register a driver.
1848 * We can do this now.
1850 if(scsi_register(tpnt
, 0)==NULL
)
1852 printk(KERN_ERR
"scsi: register failed.\n");
1853 scsi_unregister_host(tpnt
);
1857 tpnt
->next
= scsi_hosts
; /* Add to the linked list */
1860 /* Add the new driver to /proc/scsi */
1861 #ifdef CONFIG_PROC_FS
1862 build_proc_dir_entries(tpnt
);
1867 * Add the kernel threads for each host adapter that will
1868 * handle error correction.
1870 for (shpnt
= scsi_hostlist
; shpnt
; shpnt
= shpnt
->next
) {
1871 if (shpnt
->hostt
== tpnt
&& shpnt
->hostt
->use_new_eh_code
) {
1872 DECLARE_MUTEX_LOCKED(sem
);
1874 shpnt
->eh_notify
= &sem
;
1875 kernel_thread((int (*)(void *)) scsi_error_handler
,
1879 * Now wait for the kernel error thread to initialize itself
1880 * as it might be needed when we scan the bus.
1883 shpnt
->eh_notify
= NULL
;
1887 for (shpnt
= scsi_hostlist
; shpnt
; shpnt
= shpnt
->next
) {
1888 if (shpnt
->hostt
== tpnt
) {
1890 name
= tpnt
->info(shpnt
);
1894 printk(KERN_INFO
"scsi%d : %s\n", /* And print a little message */
1895 shpnt
->host_no
, name
);
1899 /* The next step is to call scan_scsis here. This generates the
1900 * Scsi_Devices entries
1902 for (shpnt
= scsi_hostlist
; shpnt
; shpnt
= shpnt
->next
) {
1903 if (shpnt
->hostt
== tpnt
) {
1904 scan_scsis(shpnt
, 0, 0, 0, 0);
1905 if (shpnt
->select_queue_depths
!= NULL
) {
1906 (shpnt
->select_queue_depths
) (shpnt
, shpnt
->host_queue
);
1911 for (sdtpnt
= scsi_devicelist
; sdtpnt
; sdtpnt
= sdtpnt
->next
) {
1912 if (sdtpnt
->init
&& sdtpnt
->dev_noticed
)
1917 * Next we create the Scsi_Cmnd structures for this host
1919 for (shpnt
= scsi_hostlist
; shpnt
; shpnt
= shpnt
->next
) {
1920 for (SDpnt
= shpnt
->host_queue
; SDpnt
; SDpnt
= SDpnt
->next
)
1921 if (SDpnt
->host
->hostt
== tpnt
) {
1922 for (sdtpnt
= scsi_devicelist
; sdtpnt
; sdtpnt
= sdtpnt
->next
)
1924 (*sdtpnt
->attach
) (SDpnt
);
1925 if (SDpnt
->attached
) {
1926 scsi_build_commandblocks(SDpnt
);
1927 if (0 == SDpnt
->has_cmdblocks
)
1934 * Now that we have all of the devices, resize the DMA pool,
1937 scsi_resize_dma_pool();
1940 /* This does any final handling that is required. */
1941 for (sdtpnt
= scsi_devicelist
; sdtpnt
; sdtpnt
= sdtpnt
->next
) {
1942 if (sdtpnt
->finish
&& sdtpnt
->nr_dev
) {
1943 (*sdtpnt
->finish
) ();
1947 #if defined(USE_STATIC_SCSI_MEMORY)
1948 printk("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
1949 (scsi_memory_upper_value
- scsi_memory_lower_value
) / 1024,
1950 (scsi_init_memory_start
- scsi_memory_lower_value
) / 1024,
1951 (scsi_memory_upper_value
- scsi_init_memory_start
) / 1024);
1957 scsi_unregister_host(tpnt
); /* easiest way to clean up?? */
1964 * Similarly, this entry point should be called by a loadable module if it
1965 * is trying to remove a low level scsi driver from the system.
1967 * Note - there is a fatal flaw in the deregister module function.
1968 * There is no way to return a code that says 'I cannot be unloaded now'.
1969 * The system relies entirely upon usage counts that are maintained,
1970 * and the assumption is that if the usage count is 0, then the module
1973 static void scsi_unregister_host(Scsi_Host_Template
* tpnt
)
1976 int pcount0
, pcount
;
1979 Scsi_Device
*SDpnt1
;
1980 struct Scsi_Device_Template
*sdtpnt
;
1981 struct Scsi_Host
*sh1
;
1982 struct Scsi_Host
*shpnt
;
1983 char name
[10]; /* host_no>=10^9? I don't think so. */
1986 * First verify that this host adapter is completely free with no pending
1989 for (shpnt
= scsi_hostlist
; shpnt
; shpnt
= shpnt
->next
) {
1990 for (SDpnt
= shpnt
->host_queue
; SDpnt
;
1991 SDpnt
= SDpnt
->next
) {
1992 if (SDpnt
->host
->hostt
== tpnt
1993 && SDpnt
->host
->hostt
->module
1994 && GET_USE_COUNT(SDpnt
->host
->hostt
->module
))
1997 * FIXME(eric) - We need to find a way to notify the
1998 * low level driver that we are shutting down - via the
1999 * special device entry that still needs to get added.
2001 * Is detach interface below good enough for this?
2007 * FIXME(eric) put a spinlock on this. We force all of the devices offline
2008 * to help prevent race conditions where other hosts/processors could try and
2009 * get in and queue a command.
2011 for (shpnt
= scsi_hostlist
; shpnt
; shpnt
= shpnt
->next
) {
2012 for (SDpnt
= shpnt
->host_queue
; SDpnt
;
2013 SDpnt
= SDpnt
->next
) {
2014 if (SDpnt
->host
->hostt
== tpnt
)
2015 SDpnt
->online
= FALSE
;
2020 for (shpnt
= scsi_hostlist
; shpnt
; shpnt
= shpnt
->next
) {
2021 if (shpnt
->hostt
!= tpnt
) {
2024 for (SDpnt
= shpnt
->host_queue
; SDpnt
;
2025 SDpnt
= SDpnt
->next
) {
2027 * Loop over all of the commands associated with the device. If any of
2028 * them are busy, then set the state back to inactive and bail.
2030 for (SCpnt
= SDpnt
->device_queue
; SCpnt
;
2031 SCpnt
= SCpnt
->next
) {
2032 online_status
= SDpnt
->online
;
2033 SDpnt
->online
= FALSE
;
2034 if (SCpnt
->request
.rq_status
!= RQ_INACTIVE
) {
2035 printk(KERN_ERR
"SCSI device not inactive - rq_status=%d, target=%d, pid=%ld, state=%d, owner=%d.\n",
2036 SCpnt
->request
.rq_status
, SCpnt
->target
, SCpnt
->pid
,
2037 SCpnt
->state
, SCpnt
->owner
);
2038 for (SDpnt1
= shpnt
->host_queue
; SDpnt1
;
2039 SDpnt1
= SDpnt1
->next
) {
2040 for (SCpnt
= SDpnt1
->device_queue
; SCpnt
;
2041 SCpnt
= SCpnt
->next
)
2042 if (SCpnt
->request
.rq_status
== RQ_SCSI_DISCONNECTING
)
2043 SCpnt
->request
.rq_status
= RQ_INACTIVE
;
2045 SDpnt
->online
= online_status
;
2046 printk(KERN_ERR
"Device busy???\n");
2050 * No, this device is really free. Mark it as such, and
2053 SCpnt
->state
= SCSI_STATE_DISCONNECTING
;
2054 SCpnt
->request
.rq_status
= RQ_SCSI_DISCONNECTING
; /* Mark as busy */
2058 /* Next we detach the high level drivers from the Scsi_Device structures */
2060 for (shpnt
= scsi_hostlist
; shpnt
; shpnt
= shpnt
->next
) {
2061 if (shpnt
->hostt
!= tpnt
) {
2064 for (SDpnt
= shpnt
->host_queue
; SDpnt
;
2065 SDpnt
= SDpnt
->next
) {
2066 for (sdtpnt
= scsi_devicelist
; sdtpnt
; sdtpnt
= sdtpnt
->next
)
2068 (*sdtpnt
->detach
) (SDpnt
);
2070 /* If something still attached, punt */
2071 if (SDpnt
->attached
) {
2072 printk(KERN_ERR
"Attached usage count = %d\n", SDpnt
->attached
);
2075 devfs_unregister (SDpnt
->de
);
2080 * Next, kill the kernel error recovery thread for this host.
2082 for (shpnt
= scsi_hostlist
; shpnt
; shpnt
= shpnt
->next
) {
2083 if (shpnt
->hostt
== tpnt
2084 && shpnt
->hostt
->use_new_eh_code
2085 && shpnt
->ehandler
!= NULL
) {
2086 DECLARE_MUTEX_LOCKED(sem
);
2088 shpnt
->eh_notify
= &sem
;
2089 send_sig(SIGHUP
, shpnt
->ehandler
, 1);
2091 shpnt
->eh_notify
= NULL
;
2095 /* Next we free up the Scsi_Cmnd structures for this host */
2097 for (shpnt
= scsi_hostlist
; shpnt
; shpnt
= shpnt
->next
) {
2098 if (shpnt
->hostt
!= tpnt
) {
2101 for (SDpnt
= shpnt
->host_queue
; SDpnt
;
2102 SDpnt
= shpnt
->host_queue
) {
2103 scsi_release_commandblocks(SDpnt
);
2105 blk_cleanup_queue(&SDpnt
->request_queue
);
2106 /* Next free up the Scsi_Device structures for this host */
2107 shpnt
->host_queue
= SDpnt
->next
;
2108 kfree((char *) SDpnt
);
2113 /* Next we go through and remove the instances of the individual hosts
2114 * that were detected */
2116 pcount0
= next_scsi_host
;
2117 for (shpnt
= scsi_hostlist
; shpnt
; shpnt
= sh1
) {
2119 if (shpnt
->hostt
!= tpnt
)
2121 pcount
= next_scsi_host
;
2122 /* Remove the /proc/scsi directory entry */
2123 sprintf(name
,"%d",shpnt
->host_no
);
2124 remove_proc_entry(name
, tpnt
->proc_dir
);
2126 (*tpnt
->release
) (shpnt
);
2128 /* This is the default case for the release function.
2129 * It should do the right thing for most correctly
2130 * written host adapters.
2133 free_irq(shpnt
->irq
, NULL
);
2134 if (shpnt
->dma_channel
!= 0xff)
2135 free_dma(shpnt
->dma_channel
);
2136 if (shpnt
->io_port
&& shpnt
->n_io_port
)
2137 release_region(shpnt
->io_port
, shpnt
->n_io_port
);
2139 if (pcount
== next_scsi_host
)
2140 scsi_unregister(shpnt
);
2145 * If there are absolutely no more hosts left, it is safe
2146 * to completely nuke the DMA pool. The resize operation will
2147 * do the right thing and free everything.
2150 scsi_resize_dma_pool();
2152 if (pcount0
!= next_scsi_host
)
2153 printk(KERN_INFO
"scsi : %d host%s left.\n", next_scsi_host
,
2154 (next_scsi_host
== 1) ? "" : "s");
2156 #if defined(USE_STATIC_SCSI_MEMORY)
2157 printk("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
2158 (scsi_memory_upper_value
- scsi_memory_lower_value
) / 1024,
2159 (scsi_init_memory_start
- scsi_memory_lower_value
) / 1024,
2160 (scsi_memory_upper_value
- scsi_init_memory_start
) / 1024);
2164 * Remove it from the linked list and /proc if all
2165 * hosts were successfully removed (ie preset == 0)
2167 if (!tpnt
->present
) {
2168 Scsi_Host_Template
**SHTp
= &scsi_hosts
;
2169 Scsi_Host_Template
*SHT
;
2171 while ((SHT
= *SHTp
) != NULL
) {
2174 remove_proc_entry(tpnt
->proc_name
, proc_scsi
);
2183 static int scsi_unregister_device(struct Scsi_Device_Template
*tpnt
);
2186 * This entry point should be called by a loadable module if it is trying
2187 * add a high level scsi driver to the system.
2189 static int scsi_register_device_module(struct Scsi_Device_Template
*tpnt
)
2192 struct Scsi_Host
*shpnt
;
2193 int out_of_space
= 0;
2198 scsi_register_device(tpnt
);
2200 * First scan the devices that we know about, and see if we notice them.
2203 for (shpnt
= scsi_hostlist
; shpnt
; shpnt
= shpnt
->next
) {
2204 for (SDpnt
= shpnt
->host_queue
; SDpnt
;
2205 SDpnt
= SDpnt
->next
) {
2207 SDpnt
->attached
+= (*tpnt
->detect
) (SDpnt
);
2212 * If any of the devices would match this driver, then perform the
2215 if (tpnt
->init
&& tpnt
->dev_noticed
)
2216 if ((*tpnt
->init
) ())
2220 * Now actually connect the devices to the new driver.
2222 for (shpnt
= scsi_hostlist
; shpnt
; shpnt
= shpnt
->next
) {
2223 for (SDpnt
= shpnt
->host_queue
; SDpnt
;
2224 SDpnt
= SDpnt
->next
) {
2226 (*tpnt
->attach
) (SDpnt
);
2228 * If this driver attached to the device, and don't have any
2229 * command blocks for this device, allocate some.
2231 if (SDpnt
->attached
&& SDpnt
->has_cmdblocks
== 0) {
2232 SDpnt
->online
= TRUE
;
2233 scsi_build_commandblocks(SDpnt
);
2234 if (0 == SDpnt
->has_cmdblocks
)
2241 * This does any final handling that is required.
2243 if (tpnt
->finish
&& tpnt
->nr_dev
)
2246 scsi_resize_dma_pool();
2250 scsi_unregister_device(tpnt
); /* easiest way to clean up?? */
2256 static int scsi_unregister_device(struct Scsi_Device_Template
*tpnt
)
2259 struct Scsi_Host
*shpnt
;
2260 struct Scsi_Device_Template
*spnt
;
2261 struct Scsi_Device_Template
*prev_spnt
;
2264 * If we are busy, this is not going to fly.
2266 if (GET_USE_COUNT(tpnt
->module
) != 0)
2270 * Next, detach the devices from the driver.
2273 for (shpnt
= scsi_hostlist
; shpnt
; shpnt
= shpnt
->next
) {
2274 for (SDpnt
= shpnt
->host_queue
; SDpnt
;
2275 SDpnt
= SDpnt
->next
) {
2277 (*tpnt
->detach
) (SDpnt
);
2278 if (SDpnt
->attached
== 0) {
2279 SDpnt
->online
= FALSE
;
2282 * Nobody is using this device any more. Free all of the
2283 * command structures.
2285 scsi_release_commandblocks(SDpnt
);
2290 * Extract the template from the linked list.
2292 spnt
= scsi_devicelist
;
2294 while (spnt
!= tpnt
) {
2298 if (prev_spnt
== NULL
)
2299 scsi_devicelist
= tpnt
->next
;
2301 prev_spnt
->next
= spnt
->next
;
2305 * Final cleanup for the driver is done in the driver sources in the
2312 /* This function should be called by drivers which needs to register
2313 * with the midlevel scsi system. As of 2.4.0-test9pre3 this is our
2314 * main device/hosts register function /mathiasen
2316 int scsi_register_module(int module_type
, void *ptr
)
2318 switch (module_type
) {
2319 case MODULE_SCSI_HA
:
2320 return scsi_register_host((Scsi_Host_Template
*) ptr
);
2322 /* Load upper level device handler of some kind */
2323 case MODULE_SCSI_DEV
:
2325 if (scsi_hosts
== NULL
)
2326 request_module("scsi_hostadapter");
2328 return scsi_register_device_module((struct Scsi_Device_Template
*) ptr
);
2329 /* The rest of these are not yet implemented */
2331 /* Load constants.o */
2332 case MODULE_SCSI_CONST
:
2334 /* Load specialized ioctl handler for some device. Intended for
2335 * cdroms that have non-SCSI2 audio command sets. */
2336 case MODULE_SCSI_IOCTL
:
2343 /* Reverse the actions taken above
2345 void scsi_unregister_module(int module_type
, void *ptr
)
2347 switch (module_type
) {
2348 case MODULE_SCSI_HA
:
2349 scsi_unregister_host((Scsi_Host_Template
*) ptr
);
2351 case MODULE_SCSI_DEV
:
2352 scsi_unregister_device((struct Scsi_Device_Template
*) ptr
);
2354 /* The rest of these are not yet implemented. */
2355 case MODULE_SCSI_CONST
:
2356 case MODULE_SCSI_IOCTL
:
2363 #ifdef CONFIG_PROC_FS
2365 * Function: scsi_dump_status
2367 * Purpose: Brain dump of scsi system, used for problem solving.
2369 * Arguments: level - used to indicate level of detail.
2371 * Notes: The level isn't used at all yet, but we need to find some way
2372 * of sensibly logging varying degrees of information. A quick one-line
2373 * display of each command, plus the status would be most useful.
2375 * This does depend upon CONFIG_SCSI_LOGGING - I do want some way of turning
2376 * it all off if the user wants a lean and mean kernel. It would probably
2377 * also be useful to allow the user to specify one single host to be dumped.
2378 * A second argument to the function would be useful for that purpose.
2380 * FIXME - some formatting of the output into tables would be very handy.
2382 static void scsi_dump_status(int level
)
2384 #ifdef CONFIG_SCSI_LOGGING /* { */
2386 struct Scsi_Host
*shpnt
;
2389 printk(KERN_INFO
"Dump of scsi host parameters:\n");
2391 for (shpnt
= scsi_hostlist
; shpnt
; shpnt
= shpnt
->next
) {
2392 printk(KERN_INFO
" %d %d %d : %d %d\n",
2395 atomic_read(&shpnt
->host_active
),
2396 shpnt
->host_blocked
,
2397 shpnt
->host_self_blocked
);
2400 printk(KERN_INFO
"\n\n");
2401 printk(KERN_INFO
"Dump of scsi command parameters:\n");
2402 for (shpnt
= scsi_hostlist
; shpnt
; shpnt
= shpnt
->next
) {
2403 printk(KERN_INFO
"h:c:t:l (dev sect nsect cnumsec sg) (ret all flg) (to/cmd to ito) cmd snse result\n");
2404 for (SDpnt
= shpnt
->host_queue
; SDpnt
; SDpnt
= SDpnt
->next
) {
2405 for (SCpnt
= SDpnt
->device_queue
; SCpnt
; SCpnt
= SCpnt
->next
) {
2406 /* (0) h:c:t:l (dev sect nsect cnumsec sg) (ret all flg) (to/cmd to ito) cmd snse result %d %x */
2407 printk(KERN_INFO
"(%3d) %2d:%1d:%2d:%2d (%6s %4ld %4ld %4ld %4x %1d) (%1d %1d 0x%2x) (%4d %4d %4d) 0x%2.2x 0x%2.2x 0x%8.8x\n",
2410 SCpnt
->host
->host_no
,
2415 kdevname(SCpnt
->request
.rq_dev
),
2416 SCpnt
->request
.sector
,
2417 SCpnt
->request
.nr_sectors
,
2418 SCpnt
->request
.current_nr_sectors
,
2419 SCpnt
->request
.rq_status
,
2426 SCpnt
->timeout_per_command
,
2428 SCpnt
->internal_timeout
,
2431 SCpnt
->sense_buffer
[2],
2437 for (shpnt
= scsi_hostlist
; shpnt
; shpnt
= shpnt
->next
) {
2438 for (SDpnt
= shpnt
->host_queue
; SDpnt
; SDpnt
= SDpnt
->next
) {
2439 /* Now dump the request lists for each block device */
2440 printk(KERN_INFO
"Dump of pending block device requests\n");
2441 for (i
= 0; i
< MAX_BLKDEV
; i
++) {
2442 struct list_head
* queue_head
;
2444 queue_head
= &blk_dev
[i
].request_queue
.queue_head
;
2445 if (!list_empty(queue_head
)) {
2446 struct request
*req
;
2447 struct list_head
* entry
;
2449 printk(KERN_INFO
"%d: ", i
);
2450 entry
= queue_head
->next
;
2452 req
= blkdev_entry_to_request(entry
);
2453 printk("(%s %d %ld %ld %ld) ",
2454 kdevname(req
->rq_dev
),
2458 req
->current_nr_sectors
);
2459 } while ((entry
= entry
->next
) != queue_head
);
2465 #endif /* CONFIG_SCSI_LOGGING */ /* } */
2467 #endif /* CONFIG_PROC_FS */
2469 static int __init
scsi_host_no_init (char *str
)
2471 static int next_no
= 0;
2476 while (*temp
&& (*temp
!= ':') && (*temp
!= ','))
2482 scsi_host_no_insert(str
, next_no
);
2489 static char *scsihosts
;
2491 MODULE_PARM(scsihosts
, "s");
2492 MODULE_DESCRIPTION("SCSI core");
2495 int __init
scsi_setup(char *str
)
2501 __setup("scsihosts=", scsi_setup
);
2504 static int __init
init_scsi(void)
2506 struct proc_dir_entry
*generic
;
2508 printk(KERN_INFO
"SCSI subsystem driver " REVISION
"\n");
2510 if( scsi_init_minimal_dma_pool() != 0 )
2516 * This makes /proc/scsi and /proc/scsi/scsi visible.
2518 #ifdef CONFIG_PROC_FS
2519 proc_scsi
= proc_mkdir("scsi", 0);
2521 printk (KERN_ERR
"cannot init /proc/scsi\n");
2524 generic
= create_proc_info_entry ("scsi/scsi", 0, 0, scsi_proc_info
);
2526 printk (KERN_ERR
"cannot init /proc/scsi/scsi\n");
2527 remove_proc_entry("scsi", 0);
2530 generic
->write_proc
= proc_scsi_gen_write
;
2533 scsi_devfs_handle
= devfs_mk_dir (NULL
, "scsi", NULL
);
2535 printk(KERN_INFO
"scsi: host order: %s\n", scsihosts
);
2536 scsi_host_no_init (scsihosts
);
2538 * This is where the processing takes place for most everything
2539 * when commands are completed.
2541 init_bh(SCSI_BH
, scsi_bottom_half_handler
);
2546 static void __exit
exit_scsi(void)
2548 Scsi_Host_Name
*shn
, *shn2
= NULL
;
2552 devfs_unregister (scsi_devfs_handle
);
2553 for (shn
= scsi_host_no_list
;shn
;shn
= shn
->next
) {
2563 #ifdef CONFIG_PROC_FS
2564 /* No, we're not here anymore. Don't show the /proc/scsi files. */
2565 remove_proc_entry ("scsi/scsi", 0);
2566 remove_proc_entry ("scsi", 0);
2570 * Free up the DMA pool.
2572 scsi_resize_dma_pool();
2576 module_init(init_scsi
);
2577 module_exit(exit_scsi
);
2580 * Function: scsi_get_host_dev()
2582 * Purpose: Create a Scsi_Device that points to the host adapter itself.
2584 * Arguments: SHpnt - Host that needs a Scsi_Device
2586 * Lock status: None assumed.
2588 * Returns: The Scsi_Device or NULL
2592 Scsi_Device
* scsi_get_host_dev(struct Scsi_Host
* SHpnt
)
2594 Scsi_Device
* SDpnt
;
2597 * Attach a single Scsi_Device to the Scsi_Host - this should
2598 * be made to look like a "pseudo-device" that points to the
2599 * HA itself. For the moment, we include it at the head of
2600 * the host_queue itself - I don't think we want to show this
2601 * to the HA in select_queue_depths(), as this would probably confuse
2603 * Note - this device is not accessible from any high-level
2604 * drivers (including generics), which is probably not
2605 * optimal. We can add hooks later to attach
2607 SDpnt
= (Scsi_Device
*) kmalloc(sizeof(Scsi_Device
),
2612 memset(SDpnt
, 0, sizeof(Scsi_Device
));
2614 SDpnt
->host
= SHpnt
;
2615 SDpnt
->id
= SHpnt
->this_id
;
2617 SDpnt
->queue_depth
= 1;
2619 scsi_build_commandblocks(SDpnt
);
2621 scsi_initialize_queue(SDpnt
, SHpnt
);
2623 SDpnt
->online
= TRUE
;
2626 * Initialize the object that we will use to wait for command blocks.
2628 init_waitqueue_head(&SDpnt
->scpnt_wait
);
2633 * Function: scsi_free_host_dev()
2635 * Purpose: Create a Scsi_Device that points to the host adapter itself.
2637 * Arguments: SHpnt - Host that needs a Scsi_Device
2639 * Lock status: None assumed.
2645 void scsi_free_host_dev(Scsi_Device
* SDpnt
)
2647 if( (unsigned char) SDpnt
->id
!= (unsigned char) SDpnt
->host
->this_id
)
2649 panic("Attempt to delete wrong device\n");
2652 blk_cleanup_queue(&SDpnt
->request_queue
);
2655 * We only have a single SCpnt attached to this device. Free
2658 scsi_release_commandblocks(SDpnt
);
2663 * Overrides for Emacs so that we follow Linus's tabbing style.
2664 * Emacs will notice this stuff at the end of the file and automatically
2665 * adjust the settings for this buffer only. This must remain at the end
2667 * ---------------------------------------------------------------------------
2670 * c-brace-imaginary-offset: 0
2671 * c-brace-offset: -4
2672 * c-argdecl-indent: 4
2673 * c-label-offset: -4
2674 * c-continued-statement-offset: 4
2675 * c-continued-brace-offset: 0
2676 * indent-tabs-mode: nil