This is pre8 ...
[linux-2.6/linux-mips.git] / drivers / scsi / scsi.c
bloba82cbd4dd06de19118f2f81fb270d3e3a5ee5fde
1 /*
2 * scsi.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
5 * generic mid-level SCSI driver
6 * Initial versions: Drew Eckhardt
7 * Subsequent revisions: Eric Youngdale
9 * <drew@colorado.edu>
11 * Bug correction thanks go to :
12 * Rik Faith <faith@cs.unc.edu>
13 * Tommy Thorn <tthorn>
14 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
16 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
17 * add scatter-gather, multiple outstanding request, and other
18 * enhancements.
20 * Native multichannel, wide scsi, /proc/scsi and hot plugging
21 * support added by Michael Neuffer <mike@i-connect.net>
23 * Added request_module("scsi_hostadapter") for kerneld:
24 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modules.conf)
25 * Bjorn Ekwall <bj0rn@blox.se>
26 * (changed to kmod)
28 * Major improvements to the timeout, abort, and reset processing,
29 * as well as performance modifications for large queue depths by
30 * Leonard N. Zubkoff <lnz@dandelion.com>
32 * Converted cli() code to spinlocks, Ingo Molnar
34 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
36 * out_of_space hacks, D. Gilbert (dpg) 990608
39 #include <linux/config.h>
40 #include <linux/module.h>
42 #include <linux/sched.h>
43 #include <linux/timer.h>
44 #include <linux/string.h>
45 #include <linux/malloc.h>
46 #include <linux/ioport.h>
47 #include <linux/kernel.h>
48 #include <linux/stat.h>
49 #include <linux/blk.h>
50 #include <linux/interrupt.h>
51 #include <linux/delay.h>
52 #include <linux/init.h>
54 #define __KERNEL_SYSCALLS__
56 #include <linux/unistd.h>
57 #include <linux/spinlock.h>
59 #include <asm/system.h>
60 #include <asm/irq.h>
61 #include <asm/dma.h>
62 #include <asm/uaccess.h>
64 #include "scsi.h"
65 #include "hosts.h"
66 #include "constants.h"
68 #ifdef CONFIG_KMOD
69 #include <linux/kmod.h>
70 #endif
72 #undef USE_STATIC_SCSI_MEMORY
74 struct proc_dir_entry *proc_scsi = NULL;
76 #ifdef CONFIG_PROC_FS
77 static int scsi_proc_info(char *buffer, char **start, off_t offset, int length);
78 static void scsi_dump_status(int level);
79 #endif
82 static const char RCSid[] = "$Header: /vger/u4/cvs/linux/drivers/scsi/scsi.c,v 1.38 1997/01/19 23:07:18 davem Exp $";
86 * Definitions and constants.
89 #define MIN_RESET_DELAY (2*HZ)
91 /* Do not call reset on error if we just did a reset within 15 sec. */
92 #define MIN_RESET_PERIOD (15*HZ)
96 * Data declarations.
98 unsigned long scsi_pid = 0;
99 Scsi_Cmnd *last_cmnd = NULL;
100 /* Command groups 3 and 4 are reserved and should never be used. */
101 const unsigned char scsi_command_size[8] =
103 6, 10, 10, 12,
104 12, 12, 10, 10
106 static unsigned long serial_number = 0;
107 static Scsi_Cmnd *scsi_bh_queue_head = NULL;
108 static Scsi_Cmnd *scsi_bh_queue_tail = NULL;
111 * Note - the initial logging level can be set here to log events at boot time.
112 * After the system is up, you may enable logging via the /proc interface.
114 unsigned int scsi_logging_level = 0;
116 const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] =
118 "Direct-Access ",
119 "Sequential-Access",
120 "Printer ",
121 "Processor ",
122 "WORM ",
123 "CD-ROM ",
124 "Scanner ",
125 "Optical Device ",
126 "Medium Changer ",
127 "Communications ",
128 "Unknown ",
129 "Unknown ",
130 "Unknown ",
131 "Enclosure ",
135 * Function prototypes.
137 extern void scsi_times_out(Scsi_Cmnd * SCpnt);
138 void scsi_build_commandblocks(Scsi_Device * SDpnt);
141 * These are the interface to the old error handling code. It should go away
142 * someday soon.
144 extern void scsi_old_done(Scsi_Cmnd * SCpnt);
145 extern void scsi_old_times_out(Scsi_Cmnd * SCpnt);
149 * Function: scsi_initialize_queue()
151 * Purpose: Selects queue handler function for a device.
153 * Arguments: SDpnt - device for which we need a handler function.
155 * Returns: Nothing
157 * Lock status: No locking assumed or required.
159 * Notes: Most devices will end up using scsi_request_fn for the
160 * handler function (at least as things are done now).
161 * The "block" feature basically ensures that only one of
162 * the blocked hosts is active at one time, mainly to work around
163 * buggy DMA chipsets where the memory gets starved.
164 * For this case, we have a special handler function, which
165 * does some checks and ultimately calls scsi_request_fn.
167 * The single_lun feature is a similar special case.
169 * We handle these things by stacking the handlers. The
170 * special case handlers simply check a few conditions,
171 * and return if they are not supposed to do anything.
172 * In the event that things are OK, then they call the next
173 * handler in the list - ultimately they call scsi_request_fn
174 * to do the dirty deed.
176 void scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt) {
177 blk_init_queue(&SDpnt->request_queue, scsi_request_fn);
178 blk_queue_headactive(&SDpnt->request_queue, 0);
179 SDpnt->request_queue.queuedata = (void *) SDpnt;
182 #ifdef MODULE
183 MODULE_PARM(scsi_logging_level, "i");
184 MODULE_PARM_DESC(scsi_logging_level, "SCSI logging level; should be zero or nonzero");
186 #else
188 static int __init scsi_logging_setup(char *str)
190 int tmp;
192 if (get_option(&str, &tmp) == 1) {
193 scsi_logging_level = (tmp ? ~0 : 0);
194 return 1;
195 } else {
196 printk("scsi_logging_setup : usage scsi_logging_level=n "
197 "(n should be 0 or non-zero)\n");
198 return 0;
202 __setup("scsi_logging=", scsi_logging_setup);
204 #endif
207 * Issue a command and wait for it to complete
210 static void scsi_wait_done(Scsi_Cmnd * SCpnt)
212 struct request *req;
214 req = &SCpnt->request;
215 req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
217 if (req->sem != NULL) {
218 up(req->sem);
223 * This lock protects the freelist for all devices on the system.
224 * We could make this finer grained by having a single lock per
225 * device if it is ever found that there is excessive contention
226 * on this lock.
228 static spinlock_t device_request_lock = SPIN_LOCK_UNLOCKED;
231 * Used to protect insertion into and removal from the queue of
232 * commands to be processed by the bottom half handler.
234 static spinlock_t scsi_bhqueue_lock = SPIN_LOCK_UNLOCKED;
237 * Function: scsi_allocate_request
239 * Purpose: Allocate a request descriptor.
241 * Arguments: device - device for which we want a request
243 * Lock status: No locks assumed to be held. This function is SMP-safe.
245 * Returns: Pointer to request block.
247 * Notes: With the new queueing code, it becomes important
248 * to track the difference between a command and a
249 * request. A request is a pending item in the queue that
250 * has not yet reached the top of the queue.
253 Scsi_Request *scsi_allocate_request(Scsi_Device * device)
255 Scsi_Request *SRpnt = NULL;
257 if (!device)
258 panic("No device passed to scsi_allocate_request().\n");
260 SRpnt = (Scsi_Request *) kmalloc(sizeof(Scsi_Request), GFP_ATOMIC);
261 if( SRpnt == NULL )
263 return NULL;
266 memset(SRpnt, 0, sizeof(Scsi_Request));
267 SRpnt->sr_device = device;
268 SRpnt->sr_host = device->host;
269 SRpnt->sr_magic = SCSI_REQ_MAGIC;
270 SRpnt->sr_data_direction = SCSI_DATA_UNKNOWN;
272 return SRpnt;
276 * Function: scsi_release_request
278 * Purpose: Release a request descriptor.
280 * Arguments: device - device for which we want a request
282 * Lock status: No locks assumed to be held. This function is SMP-safe.
284 * Returns: Pointer to request block.
286 * Notes: With the new queueing code, it becomes important
287 * to track the difference between a command and a
288 * request. A request is a pending item in the queue that
289 * has not yet reached the top of the queue. We still need
290 * to free a request when we are done with it, of course.
292 void scsi_release_request(Scsi_Request * req)
294 if( req->sr_command != NULL )
296 scsi_release_command(req->sr_command);
297 req->sr_command = NULL;
300 kfree(req);
304 * Function: scsi_allocate_device
306 * Purpose: Allocate a command descriptor.
308 * Arguments: device - device for which we want a command descriptor
309 * wait - 1 if we should wait in the event that none
310 * are available.
311 * interruptible - 1 if we should unblock and return NULL
312 * in the event that we must wait, and a signal
313 * arrives.
315 * Lock status: No locks assumed to be held. This function is SMP-safe.
317 * Returns: Pointer to command descriptor.
319 * Notes: Prior to the new queue code, this function was not SMP-safe.
321 * If the wait flag is true, and we are waiting for a free
322 * command block, this function will interrupt and return
323 * NULL in the event that a signal arrives that needs to
324 * be handled.
326 * This function is deprecated, and drivers should be
327 * rewritten to use Scsi_Request instead of Scsi_Cmnd.
330 Scsi_Cmnd *scsi_allocate_device(Scsi_Device * device, int wait,
331 int interruptable)
333 struct Scsi_Host *host;
334 Scsi_Cmnd *SCpnt = NULL;
335 Scsi_Device *SDpnt;
336 unsigned long flags;
338 if (!device)
339 panic("No device passed to scsi_allocate_device().\n");
341 host = device->host;
343 spin_lock_irqsave(&device_request_lock, flags);
345 while (1 == 1) {
346 SCpnt = NULL;
347 if (!device->device_blocked) {
348 if (device->single_lun) {
350 * FIXME(eric) - this is not at all optimal. Given that
351 * single lun devices are rare and usually slow
352 * (i.e. CD changers), this is good enough for now, but
353 * we may want to come back and optimize this later.
355 * Scan through all of the devices attached to this
356 * host, and see if any are active or not. If so,
357 * we need to defer this command.
359 * We really need a busy counter per device. This would
360 * allow us to more easily figure out whether we should
361 * do anything here or not.
363 for (SDpnt = host->host_queue;
364 SDpnt;
365 SDpnt = SDpnt->next) {
367 * Only look for other devices on the same bus
368 * with the same target ID.
370 if (SDpnt->channel != device->channel
371 || SDpnt->id != device->id
372 || SDpnt == device) {
373 continue;
375 if( atomic_read(&SDpnt->device_active) != 0)
377 break;
380 if (SDpnt) {
382 * Some other device in this cluster is busy.
383 * If asked to wait, we need to wait, otherwise
384 * return NULL.
386 SCpnt = NULL;
387 break;
391 * Now we can check for a free command block for this device.
393 for (SCpnt = device->device_queue; SCpnt; SCpnt = SCpnt->next) {
394 if (SCpnt->request.rq_status == RQ_INACTIVE)
395 break;
399 * If we couldn't find a free command block, and we have been
400 * asked to wait, then do so.
402 if (SCpnt) {
403 break;
406 * If we have been asked to wait for a free block, then
407 * wait here.
409 if (wait) {
410 DECLARE_WAITQUEUE(wait, current);
413 * We need to wait for a free commandblock. We need to
414 * insert ourselves into the list before we release the
415 * lock. This way if a block were released the same
416 * microsecond that we released the lock, the call
417 * to schedule() wouldn't block (well, it might switch,
418 * but the current task will still be schedulable.
420 add_wait_queue(&device->scpnt_wait, &wait);
421 if( interruptable ) {
422 set_current_state(TASK_INTERRUPTIBLE);
423 } else {
424 set_current_state(TASK_UNINTERRUPTIBLE);
427 spin_unlock_irqrestore(&device_request_lock, flags);
430 * This should block until a device command block
431 * becomes available.
433 schedule();
435 spin_lock_irqsave(&device_request_lock, flags);
437 remove_wait_queue(&device->scpnt_wait, &wait);
439 * FIXME - Isn't this redundant?? Someone
440 * else will have forced the state back to running.
442 set_current_state(TASK_RUNNING);
444 * In the event that a signal has arrived that we need
445 * to consider, then simply return NULL. Everyone
446 * that calls us should be prepared for this
447 * possibility, and pass the appropriate code back
448 * to the user.
450 if( interruptable ) {
451 if (signal_pending(current)) {
452 spin_unlock_irqrestore(&device_request_lock, flags);
453 return NULL;
456 } else {
457 spin_unlock_irqrestore(&device_request_lock, flags);
458 return NULL;
462 SCpnt->request.rq_status = RQ_SCSI_BUSY;
463 SCpnt->request.sem = NULL; /* And no one is waiting for this
464 * to complete */
465 atomic_inc(&SCpnt->host->host_active);
466 atomic_inc(&SCpnt->device->device_active);
468 SCpnt->buffer = NULL;
469 SCpnt->bufflen = 0;
470 SCpnt->request_buffer = NULL;
471 SCpnt->request_bufflen = 0;
473 SCpnt->use_sg = 0; /* Reset the scatter-gather flag */
474 SCpnt->old_use_sg = 0;
475 SCpnt->transfersize = 0; /* No default transfer size */
476 SCpnt->cmd_len = 0;
478 SCpnt->sc_data_direction = SCSI_DATA_UNKNOWN;
479 SCpnt->sc_request = NULL;
480 SCpnt->sc_magic = SCSI_CMND_MAGIC;
482 SCpnt->result = 0;
483 SCpnt->underflow = 0; /* Do not flag underflow conditions */
484 SCpnt->old_underflow = 0;
485 SCpnt->resid = 0;
486 SCpnt->state = SCSI_STATE_INITIALIZING;
487 SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
489 spin_unlock_irqrestore(&device_request_lock, flags);
491 SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n",
492 SCpnt->target,
493 atomic_read(&SCpnt->host->host_active)));
495 return SCpnt;
499 * Function: scsi_release_command
501 * Purpose: Release a command block.
503 * Arguments: SCpnt - command block we are releasing.
505 * Notes: The command block can no longer be used by the caller once
506 * this funciton is called. This is in effect the inverse
507 * of scsi_allocate_device. Note that we also must perform
508 * a couple of additional tasks. We must first wake up any
509 * processes that might have blocked waiting for a command
510 * block, and secondly we must hit the queue handler function
511 * to make sure that the device is busy.
513 * The idea is that a lot of the mid-level internals gunk
514 * gets hidden in this function. Upper level drivers don't
515 * have any chickens to wave in the air to get things to
516 * work reliably.
518 * This function is deprecated, and drivers should be
519 * rewritten to use Scsi_Request instead of Scsi_Cmnd.
521 void scsi_release_command(Scsi_Cmnd * SCpnt)
523 unsigned long flags;
524 Scsi_Device * SDpnt;
526 spin_lock_irqsave(&device_request_lock, flags);
528 SDpnt = SCpnt->device;
530 SCpnt->request.rq_status = RQ_INACTIVE;
531 SCpnt->state = SCSI_STATE_UNUSED;
532 SCpnt->owner = SCSI_OWNER_NOBODY;
533 atomic_dec(&SCpnt->host->host_active);
534 atomic_dec(&SDpnt->device_active);
536 SCSI_LOG_MLQUEUE(5, printk("Deactivating command for device %d (active=%d, failed=%d)\n",
537 SCpnt->target,
538 atomic_read(&SCpnt->host->host_active),
539 SCpnt->host->host_failed));
540 if (SCpnt->host->host_failed != 0) {
541 SCSI_LOG_ERROR_RECOVERY(5, printk("Error handler thread %d %d\n",
542 SCpnt->host->in_recovery,
543 SCpnt->host->eh_active));
546 * If the host is having troubles, then look to see if this was the last
547 * command that might have failed. If so, wake up the error handler.
549 if (SCpnt->host->in_recovery
550 && !SCpnt->host->eh_active
551 && SCpnt->host->host_busy == SCpnt->host->host_failed) {
552 SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
553 atomic_read(&SCpnt->host->eh_wait->count)));
554 up(SCpnt->host->eh_wait);
557 spin_unlock_irqrestore(&device_request_lock, flags);
560 * Wake up anyone waiting for this device. Do this after we
561 * have released the lock, as they will need it as soon as
562 * they wake up.
564 wake_up(&SDpnt->scpnt_wait);
567 * Finally, hit the queue request function to make sure that
568 * the device is actually busy if there are requests present.
569 * This won't block - if the device cannot take any more, life
570 * will go on.
573 request_queue_t *q;
575 q = &SDpnt->request_queue;
576 scsi_queue_next_request(q, NULL);
581 * Function: scsi_dispatch_command
583 * Purpose: Dispatch a command to the low-level driver.
585 * Arguments: SCpnt - command block we are dispatching.
587 * Notes:
589 int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
591 #ifdef DEBUG_DELAY
592 unsigned long clock;
593 #endif
594 struct Scsi_Host *host;
595 int rtn = 0;
596 unsigned long flags = 0;
597 unsigned long timeout;
599 ASSERT_LOCK(&io_request_lock, 0);
601 #if DEBUG
602 unsigned long *ret = 0;
603 #ifdef __mips__
604 __asm__ __volatile__("move\t%0,$31":"=r"(ret));
605 #else
606 ret = __builtin_return_address(0);
607 #endif
608 #endif
610 host = SCpnt->host;
612 /* Assign a unique nonzero serial_number. */
613 if (++serial_number == 0)
614 serial_number = 1;
615 SCpnt->serial_number = serial_number;
618 * We will wait MIN_RESET_DELAY clock ticks after the last reset so
619 * we can avoid the drive not being ready.
621 timeout = host->last_reset + MIN_RESET_DELAY;
623 if (host->resetting && time_before(jiffies, timeout)) {
624 int ticks_remaining = timeout - jiffies;
626 * NOTE: This may be executed from within an interrupt
627 * handler! This is bad, but for now, it'll do. The irq
628 * level of the interrupt handler has been masked out by the
629 * platform dependent interrupt handling code already, so the
630 * sti() here will not cause another call to the SCSI host's
631 * interrupt handler (assuming there is one irq-level per
632 * host).
634 while (--ticks_remaining >= 0)
635 mdelay(1 + 999 / HZ);
636 host->resetting = 0;
638 if (host->hostt->use_new_eh_code) {
639 scsi_add_timer(SCpnt, SCpnt->timeout_per_command, scsi_times_out);
640 } else {
641 scsi_add_timer(SCpnt, SCpnt->timeout_per_command,
642 scsi_old_times_out);
646 * We will use a queued command if possible, otherwise we will emulate the
647 * queuing and calling of completion function ourselves.
649 SCSI_LOG_MLQUEUE(3, printk("scsi_dispatch_cmnd (host = %d, channel = %d, target = %d, "
650 "command = %p, buffer = %p, \nbufflen = %d, done = %p)\n",
651 SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd,
652 SCpnt->buffer, SCpnt->bufflen, SCpnt->done));
654 SCpnt->state = SCSI_STATE_QUEUED;
655 SCpnt->owner = SCSI_OWNER_LOWLEVEL;
656 if (host->can_queue) {
657 SCSI_LOG_MLQUEUE(3, printk("queuecommand : routine at %p\n",
658 host->hostt->queuecommand));
660 * Use the old error handling code if we haven't converted the driver
661 * to use the new one yet. Note - only the new queuecommand variant
662 * passes a meaningful return value.
664 if (host->hostt->use_new_eh_code) {
665 spin_lock_irqsave(&io_request_lock, flags);
666 rtn = host->hostt->queuecommand(SCpnt, scsi_done);
667 spin_unlock_irqrestore(&io_request_lock, flags);
668 if (rtn != 0) {
669 scsi_delete_timer(SCpnt);
670 scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY);
671 SCSI_LOG_MLQUEUE(3, printk("queuecommand : request rejected\n"));
673 } else {
674 spin_lock_irqsave(&io_request_lock, flags);
675 host->hostt->queuecommand(SCpnt, scsi_old_done);
676 spin_unlock_irqrestore(&io_request_lock, flags);
678 } else {
679 int temp;
681 SCSI_LOG_MLQUEUE(3, printk("command() : routine at %p\n", host->hostt->command));
682 spin_lock_irqsave(&io_request_lock, flags);
683 temp = host->hostt->command(SCpnt);
684 SCpnt->result = temp;
685 #ifdef DEBUG_DELAY
686 spin_unlock_irqrestore(&io_request_lock, flags);
687 clock = jiffies + 4 * HZ;
688 while (time_before(jiffies, clock))
689 barrier();
690 printk("done(host = %d, result = %04x) : routine at %p\n",
691 host->host_no, temp, host->hostt->command);
692 spin_lock_irqsave(&io_request_lock, flags);
693 #endif
694 if (host->hostt->use_new_eh_code) {
695 scsi_done(SCpnt);
696 } else {
697 scsi_old_done(SCpnt);
699 spin_unlock_irqrestore(&io_request_lock, flags);
701 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
702 return rtn;
705 devfs_handle_t scsi_devfs_handle = NULL;
708 * scsi_do_cmd sends all the commands out to the low-level driver. It
709 * handles the specifics required for each low level driver - ie queued
710 * or non queued. It also prevents conflicts when different high level
711 * drivers go for the same host at the same time.
714 void scsi_wait_req (Scsi_Request * SRpnt, const void *cmnd ,
715 void *buffer, unsigned bufflen,
716 int timeout, int retries)
718 DECLARE_MUTEX_LOCKED(sem);
720 SRpnt->sr_request.sem = &sem;
721 SRpnt->sr_request.rq_status = RQ_SCSI_BUSY;
722 scsi_do_req (SRpnt, (void *) cmnd,
723 buffer, bufflen, scsi_wait_done, timeout, retries);
724 down (&sem);
725 SRpnt->sr_request.sem = NULL;
726 if( SRpnt->sr_command != NULL )
728 scsi_release_command(SRpnt->sr_command);
729 SRpnt->sr_command = NULL;
735 * Function: scsi_do_req
737 * Purpose: Queue a SCSI request
739 * Arguments: SRpnt - command descriptor.
740 * cmnd - actual SCSI command to be performed.
741 * buffer - data buffer.
742 * bufflen - size of data buffer.
743 * done - completion function to be run.
744 * timeout - how long to let it run before timeout.
745 * retries - number of retries we allow.
747 * Lock status: With the new queueing code, this is SMP-safe, and no locks
748 * need be held upon entry. The old queueing code the lock was
749 * assumed to be held upon entry.
751 * Returns: Nothing.
753 * Notes: Prior to the new queue code, this function was not SMP-safe.
754 * Also, this function is now only used for queueing requests
755 * for things like ioctls and character device requests - this
756 * is because we essentially just inject a request into the
757 * queue for the device. Normal block device handling manipulates
758 * the queue directly.
760 void scsi_do_req(Scsi_Request * SRpnt, const void *cmnd,
761 void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
762 int timeout, int retries)
764 Scsi_Device * SDpnt = SRpnt->sr_device;
765 struct Scsi_Host *host = SDpnt->host;
767 ASSERT_LOCK(&io_request_lock, 0);
769 SCSI_LOG_MLQUEUE(4,
771 int i;
772 int target = SDpnt->id;
773 printk("scsi_do_req (host = %d, channel = %d target = %d, "
774 "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
775 "retries = %d)\n"
776 "command : ", host->host_no, SDpnt->channel, target, buffer,
777 bufflen, done, timeout, retries);
778 for (i = 0; i < 10; ++i)
779 printk("%02x ", ((unsigned char *) cmnd)[i]);
780 printk("\n");
783 if (!host) {
784 panic("Invalid or not present host.\n");
788 * If the upper level driver is reusing these things, then
789 * we should release the low-level block now. Another one will
790 * be allocated later when this request is getting queued.
792 if( SRpnt->sr_command != NULL )
794 scsi_release_command(SRpnt->sr_command);
795 SRpnt->sr_command = NULL;
799 * We must prevent reentrancy to the lowlevel host driver. This prevents
800 * it - we enter a loop until the host we want to talk to is not busy.
801 * Race conditions are prevented, as interrupts are disabled in between the
802 * time we check for the host being not busy, and the time we mark it busy
803 * ourselves.
808 * Our own function scsi_done (which marks the host as not busy, disables
809 * the timeout counter, etc) will be called by us or by the
810 * scsi_hosts[host].queuecommand() function needs to also call
811 * the completion function for the high level driver.
814 memcpy((void *) SRpnt->sr_cmnd, (const void *) cmnd,
815 sizeof(SRpnt->sr_cmnd));
816 SRpnt->sr_bufflen = bufflen;
817 SRpnt->sr_buffer = buffer;
818 SRpnt->sr_allowed = retries;
819 SRpnt->sr_done = done;
820 SRpnt->sr_timeout_per_command = timeout;
822 memcpy((void *) SRpnt->sr_cmnd, (const void *) cmnd,
823 sizeof(SRpnt->sr_cmnd));
825 if (SRpnt->sr_cmd_len == 0)
826 SRpnt->sr_cmd_len = COMMAND_SIZE(SRpnt->sr_cmnd[0]);
829 * At this point, we merely set up the command, stick it in the normal
830 * request queue, and return. Eventually that request will come to the
831 * top of the list, and will be dispatched.
833 scsi_insert_special_req(SRpnt, 0);
835 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_cmd()\n"));
839 * Function: scsi_init_cmd_from_req
841 * Purpose: Queue a SCSI command
842 * Purpose: Initialize a Scsi_Cmnd from a Scsi_Request
844 * Arguments: SCpnt - command descriptor.
845 * SRpnt - Request from the queue.
847 * Lock status: None needed.
849 * Returns: Nothing.
851 * Notes: Mainly transfer data from the request structure to the
852 * command structure. The request structure is allocated
853 * using the normal memory allocator, and requests can pile
854 * up to more or less any depth. The command structure represents
855 * a consumable resource, as these are allocated into a pool
856 * when the SCSI subsystem initializes. The preallocation is
857 * required so that in low-memory situations a disk I/O request
858 * won't cause the memory manager to try and write out a page.
859 * The request structure is generally used by ioctls and character
860 * devices.
862 void scsi_init_cmd_from_req(Scsi_Cmnd * SCpnt, Scsi_Request * SRpnt)
864 struct Scsi_Host *host = SCpnt->host;
866 ASSERT_LOCK(&io_request_lock, 0);
868 SCpnt->owner = SCSI_OWNER_MIDLEVEL;
869 SRpnt->sr_command = SCpnt;
871 if (!host) {
872 panic("Invalid or not present host.\n");
875 SCpnt->cmd_len = SRpnt->sr_cmd_len;
876 SCpnt->use_sg = SRpnt->sr_use_sg;
878 memcpy((void *) &SCpnt->request, (const void *) &SRpnt->sr_request,
879 sizeof(SRpnt->sr_request));
880 memcpy((void *) SCpnt->data_cmnd, (const void *) SRpnt->sr_cmnd,
881 sizeof(SCpnt->data_cmnd));
882 SCpnt->reset_chain = NULL;
883 SCpnt->serial_number = 0;
884 SCpnt->serial_number_at_timeout = 0;
885 SCpnt->bufflen = SRpnt->sr_bufflen;
886 SCpnt->buffer = SRpnt->sr_buffer;
887 SCpnt->flags = 0;
888 SCpnt->retries = 0;
889 SCpnt->allowed = SRpnt->sr_allowed;
890 SCpnt->done = SRpnt->sr_done;
891 SCpnt->timeout_per_command = SRpnt->sr_timeout_per_command;
893 SCpnt->sc_data_direction = SRpnt->sr_data_direction;
895 SCpnt->sglist_len = SRpnt->sr_sglist_len;
896 SCpnt->underflow = SRpnt->sr_underflow;
898 SCpnt->sc_request = SRpnt;
900 memcpy((void *) SCpnt->cmnd, (const void *) SRpnt->sr_cmnd,
901 sizeof(SCpnt->cmnd));
902 /* Zero the sense buffer. Some host adapters automatically request
903 * sense on error. 0 is not a valid sense code.
905 memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
906 SCpnt->request_buffer = SRpnt->sr_buffer;
907 SCpnt->request_bufflen = SRpnt->sr_bufflen;
908 SCpnt->old_use_sg = SCpnt->use_sg;
909 if (SCpnt->cmd_len == 0)
910 SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
911 SCpnt->old_cmd_len = SCpnt->cmd_len;
912 SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
913 SCpnt->old_underflow = SCpnt->underflow;
915 /* Start the timer ticking. */
917 SCpnt->internal_timeout = NORMAL_TIMEOUT;
918 SCpnt->abort_reason = 0;
919 SCpnt->result = 0;
921 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_cmd()\n"));
925 * Function: scsi_do_cmd
927 * Purpose: Queue a SCSI command
929 * Arguments: SCpnt - command descriptor.
930 * cmnd - actual SCSI command to be performed.
931 * buffer - data buffer.
932 * bufflen - size of data buffer.
933 * done - completion function to be run.
934 * timeout - how long to let it run before timeout.
935 * retries - number of retries we allow.
937 * Lock status: With the new queueing code, this is SMP-safe, and no locks
938 * need be held upon entry. The old queueing code the lock was
939 * assumed to be held upon entry.
941 * Returns: Nothing.
943 * Notes: Prior to the new queue code, this function was not SMP-safe.
944 * Also, this function is now only used for queueing requests
945 * for things like ioctls and character device requests - this
946 * is because we essentially just inject a request into the
947 * queue for the device. Normal block device handling manipulates
948 * the queue directly.
950 void scsi_do_cmd(Scsi_Cmnd * SCpnt, const void *cmnd,
951 void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
952 int timeout, int retries)
954 struct Scsi_Host *host = SCpnt->host;
956 ASSERT_LOCK(&io_request_lock, 0);
958 SCpnt->owner = SCSI_OWNER_MIDLEVEL;
960 SCSI_LOG_MLQUEUE(4,
962 int i;
963 int target = SCpnt->target;
964 printk("scsi_do_cmd (host = %d, channel = %d target = %d, "
965 "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
966 "retries = %d)\n"
967 "command : ", host->host_no, SCpnt->channel, target, buffer,
968 bufflen, done, timeout, retries);
969 for (i = 0; i < 10; ++i)
970 printk("%02x ", ((unsigned char *) cmnd)[i]);
971 printk("\n");
974 if (!host) {
975 panic("Invalid or not present host.\n");
978 * We must prevent reentrancy to the lowlevel host driver. This prevents
979 * it - we enter a loop until the host we want to talk to is not busy.
980 * Race conditions are prevented, as interrupts are disabled in between the
981 * time we check for the host being not busy, and the time we mark it busy
982 * ourselves.
987 * Our own function scsi_done (which marks the host as not busy, disables
988 * the timeout counter, etc) will be called by us or by the
989 * scsi_hosts[host].queuecommand() function needs to also call
990 * the completion function for the high level driver.
993 memcpy((void *) SCpnt->data_cmnd, (const void *) cmnd,
994 sizeof(SCpnt->data_cmnd));
995 SCpnt->reset_chain = NULL;
996 SCpnt->serial_number = 0;
997 SCpnt->serial_number_at_timeout = 0;
998 SCpnt->bufflen = bufflen;
999 SCpnt->buffer = buffer;
1000 SCpnt->flags = 0;
1001 SCpnt->retries = 0;
1002 SCpnt->allowed = retries;
1003 SCpnt->done = done;
1004 SCpnt->timeout_per_command = timeout;
1006 memcpy((void *) SCpnt->cmnd, (const void *) cmnd,
1007 sizeof(SCpnt->cmnd));
1008 /* Zero the sense buffer. Some host adapters automatically request
1009 * sense on error. 0 is not a valid sense code.
1011 memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
1012 SCpnt->request_buffer = buffer;
1013 SCpnt->request_bufflen = bufflen;
1014 SCpnt->old_use_sg = SCpnt->use_sg;
1015 if (SCpnt->cmd_len == 0)
1016 SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
1017 SCpnt->old_cmd_len = SCpnt->cmd_len;
1018 SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
1019 SCpnt->old_underflow = SCpnt->underflow;
1021 /* Start the timer ticking. */
1023 SCpnt->internal_timeout = NORMAL_TIMEOUT;
1024 SCpnt->abort_reason = 0;
1025 SCpnt->result = 0;
1028 * At this point, we merely set up the command, stick it in the normal
1029 * request queue, and return. Eventually that request will come to the
1030 * top of the list, and will be dispatched.
1032 scsi_insert_special_cmd(SCpnt, 0);
1034 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_cmd()\n"));
1038 * This function is the mid-level interrupt routine, which decides how
1039 * to handle error conditions. Each invocation of this function must
1040 * do one and *only* one of the following:
1042 * 1) Insert command in BH queue.
1043 * 2) Activate error handler for host.
1045 * FIXME(eric) - I am concerned about stack overflow (still). An
1046 * interrupt could come while we are processing the bottom queue,
1047 * which would cause another command to be stuffed onto the bottom
1048 * queue, and it would in turn be processed as that interrupt handler
1049 * is returning. Given a sufficiently steady rate of returning
1050 * commands, this could cause the stack to overflow. I am not sure
1051 * what is the most appropriate solution here - we should probably
1052 * keep a depth count, and not process any commands while we still
1053 * have a bottom handler active higher in the stack.
1055 * There is currently code in the bottom half handler to monitor
1056 * recursion in the bottom handler and report if it ever happens. If
1057 * this becomes a problem, it won't be hard to engineer something to
1058 * deal with it so that only the outer layer ever does any real
1059 * processing.
1061 void scsi_done(Scsi_Cmnd * SCpnt)
1063 unsigned long flags;
1064 int tstatus;
1067 * We don't have to worry about this one timing out any more.
1069 tstatus = scsi_delete_timer(SCpnt);
1072 * If we are unable to remove the timer, it means that the command
1073 * has already timed out. In this case, we have no choice but to
1074 * let the timeout function run, as we have no idea where in fact
1075 * that function could really be. It might be on another processor,
1076 * etc, etc.
1078 if (!tstatus) {
1079 SCpnt->done_late = 1;
1080 return;
1082 /* Set the serial numbers back to zero */
1083 SCpnt->serial_number = 0;
1086 * First, see whether this command already timed out. If so, we ignore
1087 * the response. We treat it as if the command never finished.
1089 * Since serial_number is now 0, the error handler cound detect this
1090 * situation and avoid to call the the low level driver abort routine.
1091 * (DB)
1093 * FIXME(eric) - I believe that this test is now redundant, due to
1094 * the test of the return status of del_timer().
1096 if (SCpnt->state == SCSI_STATE_TIMEOUT) {
1097 SCSI_LOG_MLCOMPLETE(1, printk("Ignoring completion of %p due to timeout status", SCpnt));
1098 return;
1100 spin_lock_irqsave(&scsi_bhqueue_lock, flags);
1102 SCpnt->serial_number_at_timeout = 0;
1103 SCpnt->state = SCSI_STATE_BHQUEUE;
1104 SCpnt->owner = SCSI_OWNER_BH_HANDLER;
1105 SCpnt->bh_next = NULL;
1108 * Next, put this command in the BH queue.
1110 * We need a spinlock here, or compare and exchange if we can reorder incoming
1111 * Scsi_Cmnds, as it happens pretty often scsi_done is called multiple times
1112 * before bh is serviced. -jj
1114 * We already have the io_request_lock here, since we are called from the
1115 * interrupt handler or the error handler. (DB)
1117 * This may be true at the moment, but I would like to wean all of the low
1118 * level drivers away from using io_request_lock. Technically they should
1119 * all use their own locking. I am adding a small spinlock to protect
1120 * this datastructure to make it safe for that day. (ERY)
1122 if (!scsi_bh_queue_head) {
1123 scsi_bh_queue_head = SCpnt;
1124 scsi_bh_queue_tail = SCpnt;
1125 } else {
1126 scsi_bh_queue_tail->bh_next = SCpnt;
1127 scsi_bh_queue_tail = SCpnt;
1130 spin_unlock_irqrestore(&scsi_bhqueue_lock, flags);
1132 * Mark the bottom half handler to be run.
1134 mark_bh(SCSI_BH);
1138 * Procedure: scsi_bottom_half_handler
1140 * Purpose: Called after we have finished processing interrupts, it
1141 * performs post-interrupt handling for commands that may
1142 * have completed.
1144 * Notes: This is called with all interrupts enabled. This should reduce
1145 * interrupt latency, stack depth, and reentrancy of the low-level
1146 * drivers.
1148 * The io_request_lock is required in all the routine. There was a subtle
1149 * race condition when scsi_done is called after a command has already
1150 * timed out but before the time out is processed by the error handler.
1151 * (DB)
1153 * I believe I have corrected this. We simply monitor the return status of
1154 * del_timer() - if this comes back as 0, it means that the timer has fired
1155 * and that a timeout is in progress. I have modified scsi_done() such
1156 * that in this instance the command is never inserted in the bottom
1157 * half queue. Thus the only time we hold the lock here is when
1158 * we wish to atomically remove the contents of the queue.
1160 void scsi_bottom_half_handler(void)
1162 Scsi_Cmnd *SCpnt;
1163 Scsi_Cmnd *SCnext;
1164 unsigned long flags;
1167 while (1 == 1) {
1168 spin_lock_irqsave(&scsi_bhqueue_lock, flags);
1169 SCpnt = scsi_bh_queue_head;
1170 scsi_bh_queue_head = NULL;
1171 spin_unlock_irqrestore(&scsi_bhqueue_lock, flags);
1173 if (SCpnt == NULL) {
1174 return;
1176 SCnext = SCpnt->bh_next;
1178 for (; SCpnt; SCpnt = SCnext) {
1179 SCnext = SCpnt->bh_next;
1181 switch (scsi_decide_disposition(SCpnt)) {
1182 case SUCCESS:
1184 * Add to BH queue.
1186 SCSI_LOG_MLCOMPLETE(3, printk("Command finished %d %d 0x%x\n", SCpnt->host->host_busy,
1187 SCpnt->host->host_failed,
1188 SCpnt->result));
1190 scsi_finish_command(SCpnt);
1191 break;
1192 case NEEDS_RETRY:
1194 * We only come in here if we want to retry a command. The
1195 * test to see whether the command should be retried should be
1196 * keeping track of the number of tries, so we don't end up looping,
1197 * of course.
1199 SCSI_LOG_MLCOMPLETE(3, printk("Command needs retry %d %d 0x%x\n", SCpnt->host->host_busy,
1200 SCpnt->host->host_failed, SCpnt->result));
1202 scsi_retry_command(SCpnt);
1203 break;
1204 case ADD_TO_MLQUEUE:
1206 * This typically happens for a QUEUE_FULL message -
1207 * typically only when the queue depth is only
1208 * approximate for a given device. Adding a command
1209 * to the queue for the device will prevent further commands
1210 * from being sent to the device, so we shouldn't end up
1211 * with tons of things being sent down that shouldn't be.
1213 SCSI_LOG_MLCOMPLETE(3, printk("Command rejected as device queue full, put on ml queue %p\n",
1214 SCpnt));
1215 scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_DEVICE_BUSY);
1216 break;
1217 default:
1219 * Here we have a fatal error of some sort. Turn it over to
1220 * the error handler.
1222 SCSI_LOG_MLCOMPLETE(3, printk("Command failed %p %x active=%d busy=%d failed=%d\n",
1223 SCpnt, SCpnt->result,
1224 atomic_read(&SCpnt->host->host_active),
1225 SCpnt->host->host_busy,
1226 SCpnt->host->host_failed));
1229 * Dump the sense information too.
1231 if ((status_byte(SCpnt->result) & CHECK_CONDITION) != 0) {
1232 SCSI_LOG_MLCOMPLETE(3, print_sense("bh", SCpnt));
1234 if (SCpnt->host->eh_wait != NULL) {
1235 SCpnt->host->host_failed++;
1236 SCpnt->owner = SCSI_OWNER_ERROR_HANDLER;
1237 SCpnt->state = SCSI_STATE_FAILED;
1238 SCpnt->host->in_recovery = 1;
1240 * If the host is having troubles, then look to see if this was the last
1241 * command that might have failed. If so, wake up the error handler.
1243 if (SCpnt->host->host_busy == SCpnt->host->host_failed) {
1244 SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
1245 atomic_read(&SCpnt->host->eh_wait->count)));
1246 up(SCpnt->host->eh_wait);
1248 } else {
1250 * We only get here if the error recovery thread has died.
1252 scsi_finish_command(SCpnt);
1255 } /* for(; SCpnt...) */
1257 } /* while(1==1) */
1262 * Function: scsi_retry_command
1264 * Purpose: Send a command back to the low level to be retried.
1266 * Notes: This command is always executed in the context of the
1267 * bottom half handler, or the error handler thread. Low
1268 * level drivers should not become re-entrant as a result of
1269 * this.
1271 int scsi_retry_command(Scsi_Cmnd * SCpnt)
1273 memcpy((void *) SCpnt->cmnd, (void *) SCpnt->data_cmnd,
1274 sizeof(SCpnt->data_cmnd));
1275 SCpnt->request_buffer = SCpnt->buffer;
1276 SCpnt->request_bufflen = SCpnt->bufflen;
1277 SCpnt->use_sg = SCpnt->old_use_sg;
1278 SCpnt->cmd_len = SCpnt->old_cmd_len;
1279 SCpnt->sc_data_direction = SCpnt->sc_old_data_direction;
1280 SCpnt->underflow = SCpnt->old_underflow;
1283 * Zero the sense information from the last time we tried
1284 * this command.
1286 memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
1288 return scsi_dispatch_cmd(SCpnt);
1292 * Function: scsi_finish_command
1294 * Purpose: Pass command off to upper layer for finishing of I/O
1295 * request, waking processes that are waiting on results,
1296 * etc.
1298 void scsi_finish_command(Scsi_Cmnd * SCpnt)
1300 struct Scsi_Host *host;
1301 Scsi_Device *device;
1302 Scsi_Request * SRpnt;
1303 unsigned long flags;
1305 ASSERT_LOCK(&io_request_lock, 0);
1307 host = SCpnt->host;
1308 device = SCpnt->device;
1311 * We need to protect the decrement, as otherwise a race condition
1312 * would exist. Fiddling with SCpnt isn't a problem as the
1313 * design only allows a single SCpnt to be active in only
1314 * one execution context, but the device and host structures are
1315 * shared.
1317 spin_lock_irqsave(&io_request_lock, flags);
1318 host->host_busy--; /* Indicate that we are free */
1319 device->device_busy--; /* Decrement device usage counter. */
1320 spin_unlock_irqrestore(&io_request_lock, flags);
1323 * Clear the flags which say that the device/host is no longer
1324 * capable of accepting new commands. These are set in scsi_queue.c
1325 * for both the queue full condition on a device, and for a
1326 * host full condition on the host.
1328 host->host_blocked = FALSE;
1329 device->device_blocked = FALSE;
1332 * If we have valid sense information, then some kind of recovery
1333 * must have taken place. Make a note of this.
1335 if (scsi_sense_valid(SCpnt)) {
1336 SCpnt->result |= (DRIVER_SENSE << 24);
1338 SCSI_LOG_MLCOMPLETE(3, printk("Notifying upper driver of completion for device %d %x\n",
1339 SCpnt->device->id, SCpnt->result));
1341 SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
1342 SCpnt->state = SCSI_STATE_FINISHED;
1344 /* We can get here with use_sg=0, causing a panic in the upper level (DB) */
1345 SCpnt->use_sg = SCpnt->old_use_sg;
1348 * If there is an associated request structure, copy the data over before we call the
1349 * completion function.
1351 SRpnt = SCpnt->sc_request;
1352 if( SRpnt != NULL ) {
1353 SRpnt->sr_result = SRpnt->sr_command->result;
1354 if( SRpnt->sr_result != 0 ) {
1355 memcpy(SRpnt->sr_sense_buffer,
1356 SRpnt->sr_command->sense_buffer,
1357 sizeof(SRpnt->sr_sense_buffer));
1361 SCpnt->done(SCpnt);
1364 #ifdef CONFIG_MODULES
1365 static int scsi_register_host(Scsi_Host_Template *);
1366 static void scsi_unregister_host(Scsi_Host_Template *);
1367 #endif
1370 int scsi_loadable_module_flag; /* Set after we scan builtin drivers */
1373 * Function: scsi_release_commandblocks()
1375 * Purpose: Release command blocks associated with a device.
1377 * Arguments: SDpnt - device
1379 * Returns: Nothing
1381 * Lock status: No locking assumed or required.
1383 * Notes:
1385 void scsi_release_commandblocks(Scsi_Device * SDpnt)
1387 Scsi_Cmnd *SCpnt, *SCnext;
1388 unsigned long flags;
1390 spin_lock_irqsave(&device_request_lock, flags);
1391 for (SCpnt = SDpnt->device_queue; SCpnt; SCpnt = SCnext) {
1392 SDpnt->device_queue = SCnext = SCpnt->next;
1393 kfree((char *) SCpnt);
1395 SDpnt->has_cmdblocks = 0;
1396 SDpnt->queue_depth = 0;
1397 spin_unlock_irqrestore(&device_request_lock, flags);
1401 * Function: scsi_build_commandblocks()
1403 * Purpose: Allocate command blocks associated with a device.
1405 * Arguments: SDpnt - device
1407 * Returns: Nothing
1409 * Lock status: No locking assumed or required.
1411 * Notes:
1413 void scsi_build_commandblocks(Scsi_Device * SDpnt)
1415 unsigned long flags;
1416 struct Scsi_Host *host = SDpnt->host;
1417 int j;
1418 Scsi_Cmnd *SCpnt;
1420 spin_lock_irqsave(&device_request_lock, flags);
1422 if (SDpnt->queue_depth == 0)
1424 SDpnt->queue_depth = host->cmd_per_lun;
1425 if (SDpnt->queue_depth == 0)
1426 SDpnt->queue_depth = 1; /* live to fight another day */
1428 SDpnt->device_queue = NULL;
1430 for (j = 0; j < SDpnt->queue_depth; j++) {
1431 SCpnt = (Scsi_Cmnd *)
1432 kmalloc(sizeof(Scsi_Cmnd),
1433 GFP_ATOMIC |
1434 (host->unchecked_isa_dma ? GFP_DMA : 0));
1435 memset(SCpnt, 0, sizeof(Scsi_Cmnd));
1436 if (NULL == SCpnt)
1437 break; /* If not, the next line will oops ... */
1438 memset(&SCpnt->eh_timeout, 0, sizeof(SCpnt->eh_timeout));
1439 SCpnt->host = host;
1440 SCpnt->device = SDpnt;
1441 SCpnt->target = SDpnt->id;
1442 SCpnt->lun = SDpnt->lun;
1443 SCpnt->channel = SDpnt->channel;
1444 SCpnt->request.rq_status = RQ_INACTIVE;
1445 SCpnt->use_sg = 0;
1446 SCpnt->old_use_sg = 0;
1447 SCpnt->old_cmd_len = 0;
1448 SCpnt->underflow = 0;
1449 SCpnt->old_underflow = 0;
1450 SCpnt->transfersize = 0;
1451 SCpnt->resid = 0;
1452 SCpnt->serial_number = 0;
1453 SCpnt->serial_number_at_timeout = 0;
1454 SCpnt->host_scribble = NULL;
1455 SCpnt->next = SDpnt->device_queue;
1456 SDpnt->device_queue = SCpnt;
1457 SCpnt->state = SCSI_STATE_UNUSED;
1458 SCpnt->owner = SCSI_OWNER_NOBODY;
1460 if (j < SDpnt->queue_depth) { /* low on space (D.Gilbert 990424) */
1461 printk("scsi_build_commandblocks: want=%d, space for=%d blocks\n",
1462 SDpnt->queue_depth, j);
1463 SDpnt->queue_depth = j;
1464 SDpnt->has_cmdblocks = (0 != j);
1465 } else {
1466 SDpnt->has_cmdblocks = 1;
1468 spin_unlock_irqrestore(&device_request_lock, flags);
1471 static int proc_scsi_gen_write(struct file * file, const char * buf,
1472 unsigned long length, void *data);
1474 void __init scsi_host_no_insert(char *str, int n)
1476 Scsi_Host_Name *shn, *shn2;
1477 int len;
1479 len = strlen(str);
1480 if (len && (shn = (Scsi_Host_Name *) kmalloc(sizeof(Scsi_Host_Name), GFP_ATOMIC))) {
1481 if ((shn->name = kmalloc(len+1, GFP_ATOMIC))) {
1482 strncpy(shn->name, str, len);
1483 shn->name[len] = 0;
1484 shn->host_no = n;
1485 shn->host_registered = 0;
1486 shn->loaded_as_module = 1; /* numbers shouldn't be freed in any case */
1487 shn->next = NULL;
1488 if (scsi_host_no_list) {
1489 for (shn2 = scsi_host_no_list;shn2->next;shn2 = shn2->next)
1491 shn2->next = shn;
1493 else
1494 scsi_host_no_list = shn;
1495 max_scsi_hosts = n+1;
1497 else
1498 kfree((char *) shn);
1502 #ifndef MODULE /* { */
1504 char scsi_host_no_table[20][10] __initdata = {};
1505 int scsi_host_no_set __initdata = 0;
1508 * scsi_dev_init() is our initialization routine, which in turn calls host
1509 * initialization, bus scanning, and sd/st initialization routines.
1510 * This is only used at boot time.
1512 int __init scsi_dev_init(void)
1514 Scsi_Device *SDpnt;
1515 struct Scsi_Host *shpnt;
1516 struct Scsi_Device_Template *sdtpnt;
1517 struct proc_dir_entry *generic;
1518 #ifdef FOO_ON_YOU
1519 return;
1520 #endif
1522 /* Initialize list of host_no if kernel parameter set */
1523 if (scsi_host_no_set) {
1524 int i;
1525 for (i = 0;i < sizeof(scsi_host_no_table)/sizeof(scsi_host_no_table[0]);i++)
1526 scsi_host_no_insert(scsi_host_no_table[i], i);
1529 /* Yes we're here... */
1531 scsi_devfs_handle = devfs_mk_dir (NULL, "scsi", NULL);
1533 * This makes /proc/scsi and /proc/scsi/scsi visible.
1535 #ifdef CONFIG_PROC_FS
1536 proc_scsi = proc_mkdir("scsi", 0);
1537 if (!proc_scsi) {
1538 printk (KERN_ERR "cannot init /proc/scsi\n");
1539 return -ENOMEM;
1542 generic = create_proc_info_entry ("scsi/scsi", 0, 0, scsi_proc_info);
1543 if (!generic) {
1544 printk (KERN_ERR "cannot init /proc/scsi/scsi\n");
1545 remove_proc_entry("scsi", 0);
1546 return -ENOMEM;
1548 generic->write_proc = proc_scsi_gen_write;
1549 #endif
1551 /* Init a few things so we can "malloc" memory. */
1552 scsi_loadable_module_flag = 0;
1554 /* initialize all hosts */
1555 scsi_init();
1558 * This is where the processing takes place for most everything
1559 * when commands are completed. Until we do this, we will not be able
1560 * to queue any commands.
1562 init_bh(SCSI_BH, scsi_bottom_half_handler);
1564 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
1565 scan_scsis(shpnt, 0, 0, 0, 0); /* scan for scsi devices */
1566 if (shpnt->select_queue_depths != NULL)
1567 (shpnt->select_queue_depths) (shpnt, shpnt->host_queue);
1570 printk("scsi : detected ");
1571 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
1572 if (sdtpnt->dev_noticed && sdtpnt->name)
1573 printk("%d SCSI %s%s ", sdtpnt->dev_noticed, sdtpnt->name,
1574 (sdtpnt->dev_noticed != 1) ? "s" : "");
1575 printk("total.\n");
1577 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
1578 if (sdtpnt->init && sdtpnt->dev_noticed)
1579 (*sdtpnt->init) ();
1581 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
1582 for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
1583 /* SDpnt->scsi_request_fn = NULL; */
1584 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
1585 if (sdtpnt->attach)
1586 (*sdtpnt->attach) (SDpnt);
1587 if (SDpnt->attached) {
1588 scsi_build_commandblocks(SDpnt);
1589 if (0 == SDpnt->has_cmdblocks) {
1590 printk("scsi_dev_init: DANGER, no command blocks\n");
1591 /* What to do now ?? */
1598 * This should build the DMA pool.
1600 scsi_resize_dma_pool();
1603 * OK, now we finish the initialization by doing spin-up, read
1604 * capacity, etc, etc
1606 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
1607 if (sdtpnt->finish && sdtpnt->nr_dev)
1608 (*sdtpnt->finish) ();
1610 scsi_loadable_module_flag = 1;
1612 return 0;
1614 #endif /* MODULE */ /* } */
1616 #ifdef CONFIG_PROC_FS
1617 static int scsi_proc_info(char *buffer, char **start, off_t offset, int length)
1619 Scsi_Device *scd;
1620 struct Scsi_Host *HBA_ptr;
1621 int size, len = 0;
1622 off_t begin = 0;
1623 off_t pos = 0;
1626 * First, see if there are any attached devices or not.
1628 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1629 if (HBA_ptr->host_queue != NULL) {
1630 break;
1633 size = sprintf(buffer + len, "Attached devices: %s\n", (HBA_ptr) ? "" : "none");
1634 len += size;
1635 pos = begin + len;
1636 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1637 #if 0
1638 size += sprintf(buffer + len, "scsi%2d: %s\n", (int) HBA_ptr->host_no,
1639 HBA_ptr->hostt->procname);
1640 len += size;
1641 pos = begin + len;
1642 #endif
1643 for (scd = HBA_ptr->host_queue; scd; scd = scd->next) {
1644 proc_print_scsidevice(scd, buffer, &size, len);
1645 len += size;
1646 pos = begin + len;
1648 if (pos < offset) {
1649 len = 0;
1650 begin = pos;
1652 if (pos > offset + length)
1653 goto stop_output;
1657 stop_output:
1658 *start = buffer + (offset - begin); /* Start of wanted data */
1659 len -= (offset - begin); /* Start slop */
1660 if (len > length)
1661 len = length; /* Ending slop */
1662 return (len);
1665 static int proc_scsi_gen_write(struct file * file, const char * buf,
1666 unsigned long length, void *data)
1668 struct Scsi_Device_Template *SDTpnt;
1669 Scsi_Device *scd;
1670 struct Scsi_Host *HBA_ptr;
1671 char *p;
1672 int host, channel, id, lun;
1673 char * buffer;
1674 int err;
1676 if (!buf || length>PAGE_SIZE)
1677 return -EINVAL;
1679 if (!(buffer = (char *) __get_free_page(GFP_KERNEL)))
1680 return -ENOMEM;
1681 copy_from_user(buffer, buf, length);
1683 err = -EINVAL;
1684 if (length < 11 || strncmp("scsi", buffer, 4))
1685 goto out;
1688 * Usage: echo "scsi dump #N" > /proc/scsi/scsi
1689 * to dump status of all scsi commands. The number is used to specify the level
1690 * of detail in the dump.
1692 if (!strncmp("dump", buffer + 5, 4)) {
1693 unsigned int level;
1695 p = buffer + 10;
1697 if (*p == '\0')
1698 goto out;
1700 level = simple_strtoul(p, NULL, 0);
1701 scsi_dump_status(level);
1704 * Usage: echo "scsi log token #N" > /proc/scsi/scsi
1705 * where token is one of [error,scan,mlqueue,mlcomplete,llqueue,
1706 * llcomplete,hlqueue,hlcomplete]
1708 #ifdef CONFIG_SCSI_LOGGING /* { */
1710 if (!strncmp("log", buffer + 5, 3)) {
1711 char *token;
1712 unsigned int level;
1714 p = buffer + 9;
1715 token = p;
1716 while (*p != ' ' && *p != '\t' && *p != '\0') {
1717 p++;
1720 if (*p == '\0') {
1721 if (strncmp(token, "all", 3) == 0) {
1723 * Turn on absolutely everything.
1725 scsi_logging_level = ~0;
1726 } else if (strncmp(token, "none", 4) == 0) {
1728 * Turn off absolutely everything.
1730 scsi_logging_level = 0;
1731 } else {
1732 goto out;
1734 } else {
1735 *p++ = '\0';
1737 level = simple_strtoul(p, NULL, 0);
1740 * Now figure out what to do with it.
1742 if (strcmp(token, "error") == 0) {
1743 SCSI_SET_ERROR_RECOVERY_LOGGING(level);
1744 } else if (strcmp(token, "timeout") == 0) {
1745 SCSI_SET_TIMEOUT_LOGGING(level);
1746 } else if (strcmp(token, "scan") == 0) {
1747 SCSI_SET_SCAN_BUS_LOGGING(level);
1748 } else if (strcmp(token, "mlqueue") == 0) {
1749 SCSI_SET_MLQUEUE_LOGGING(level);
1750 } else if (strcmp(token, "mlcomplete") == 0) {
1751 SCSI_SET_MLCOMPLETE_LOGGING(level);
1752 } else if (strcmp(token, "llqueue") == 0) {
1753 SCSI_SET_LLQUEUE_LOGGING(level);
1754 } else if (strcmp(token, "llcomplete") == 0) {
1755 SCSI_SET_LLCOMPLETE_LOGGING(level);
1756 } else if (strcmp(token, "hlqueue") == 0) {
1757 SCSI_SET_HLQUEUE_LOGGING(level);
1758 } else if (strcmp(token, "hlcomplete") == 0) {
1759 SCSI_SET_HLCOMPLETE_LOGGING(level);
1760 } else if (strcmp(token, "ioctl") == 0) {
1761 SCSI_SET_IOCTL_LOGGING(level);
1762 } else {
1763 goto out;
1767 printk("scsi logging level set to 0x%8.8x\n", scsi_logging_level);
1769 #endif /* CONFIG_SCSI_LOGGING */ /* } */
1772 * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi
1773 * with "0 1 2 3" replaced by your "Host Channel Id Lun".
1774 * Consider this feature BETA.
1775 * CAUTION: This is not for hotplugging your peripherals. As
1776 * SCSI was not designed for this you could damage your
1777 * hardware !
1778 * However perhaps it is legal to switch on an
1779 * already connected device. It is perhaps not
1780 * guaranteed this device doesn't corrupt an ongoing data transfer.
1782 if (!strncmp("add-single-device", buffer + 5, 17)) {
1783 p = buffer + 23;
1785 host = simple_strtoul(p, &p, 0);
1786 channel = simple_strtoul(p + 1, &p, 0);
1787 id = simple_strtoul(p + 1, &p, 0);
1788 lun = simple_strtoul(p + 1, &p, 0);
1790 printk("scsi singledevice %d %d %d %d\n", host, channel,
1791 id, lun);
1793 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1794 if (HBA_ptr->host_no == host) {
1795 break;
1798 err = -ENXIO;
1799 if (!HBA_ptr)
1800 goto out;
1802 for (scd = HBA_ptr->host_queue; scd; scd = scd->next) {
1803 if ((scd->channel == channel
1804 && scd->id == id
1805 && scd->lun == lun)) {
1806 break;
1810 err = -ENOSYS;
1811 if (scd)
1812 goto out; /* We do not yet support unplugging */
1814 scan_scsis(HBA_ptr, 1, channel, id, lun);
1816 /* FIXME (DB) This assumes that the queue_depth routines can be used
1817 in this context as well, while they were all designed to be
1818 called only once after the detect routine. (DB) */
1819 /* queue_depth routine moved to inside scan_scsis(,1,,,) so
1820 it is called before build_commandblocks() */
1822 err = length;
1823 goto out;
1826 * Usage: echo "scsi remove-single-device 0 1 2 3" >/proc/scsi/scsi
1827 * with "0 1 2 3" replaced by your "Host Channel Id Lun".
1829 * Consider this feature pre-BETA.
1831 * CAUTION: This is not for hotplugging your peripherals. As
1832 * SCSI was not designed for this you could damage your
1833 * hardware and thoroughly confuse the SCSI subsystem.
1836 else if (!strncmp("remove-single-device", buffer + 5, 20)) {
1837 p = buffer + 26;
1839 host = simple_strtoul(p, &p, 0);
1840 channel = simple_strtoul(p + 1, &p, 0);
1841 id = simple_strtoul(p + 1, &p, 0);
1842 lun = simple_strtoul(p + 1, &p, 0);
1845 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1846 if (HBA_ptr->host_no == host) {
1847 break;
1850 err = -ENODEV;
1851 if (!HBA_ptr)
1852 goto out;
1854 for (scd = HBA_ptr->host_queue; scd; scd = scd->next) {
1855 if ((scd->channel == channel
1856 && scd->id == id
1857 && scd->lun == lun)) {
1858 break;
1862 if (scd == NULL)
1863 goto out; /* there is no such device attached */
1865 err = -EBUSY;
1866 if (scd->access_count)
1867 goto out;
1869 SDTpnt = scsi_devicelist;
1870 while (SDTpnt != NULL) {
1871 if (SDTpnt->detach)
1872 (*SDTpnt->detach) (scd);
1873 SDTpnt = SDTpnt->next;
1876 if (scd->attached == 0) {
1878 * Nobody is using this device any more.
1879 * Free all of the command structures.
1881 if (HBA_ptr->hostt->revoke)
1882 HBA_ptr->hostt->revoke(scd);
1883 devfs_unregister (scd->de);
1884 scsi_release_commandblocks(scd);
1886 /* Now we can remove the device structure */
1887 if (scd->next != NULL)
1888 scd->next->prev = scd->prev;
1890 if (scd->prev != NULL)
1891 scd->prev->next = scd->next;
1893 if (HBA_ptr->host_queue == scd) {
1894 HBA_ptr->host_queue = scd->next;
1896 blk_cleanup_queue(&scd->request_queue);
1897 kfree((char *) scd);
1898 } else {
1899 goto out;
1901 err = 0;
1903 out:
1905 free_page((unsigned long) buffer);
1906 return err;
1908 #endif
1911 * Some host adapters that are plugging into other subsystems register
1912 * their hosts through the modules entrypoints, and don't use the big
1913 * list in hosts.c.
1915 #if defined(CONFIG_MODULES) || defined(CONFIG_BLK_DEV_IDESCSI) || defined(CONFIG_USB_STORAGE) /* a big #ifdef block... */
1918 * This entry point should be called by a loadable module if it is trying
1919 * add a low level scsi driver to the system.
1921 static int scsi_register_host(Scsi_Host_Template * tpnt)
1923 int pcount;
1924 struct Scsi_Host *shpnt;
1925 Scsi_Device *SDpnt;
1926 struct Scsi_Device_Template *sdtpnt;
1927 const char *name;
1928 unsigned long flags;
1929 int out_of_space = 0;
1931 if (tpnt->next || !tpnt->detect)
1932 return 1; /* Must be already loaded, or
1933 * no detect routine available
1935 pcount = next_scsi_host;
1937 /* The detect routine must carefully spinunlock/spinlock if
1938 it enables interrupts, since all interrupt handlers do
1939 spinlock as well.
1940 All lame drivers are going to fail due to the following
1941 spinlock. For the time beeing let's use it only for drivers
1942 using the new scsi code. NOTE: the detect routine could
1943 redefine the value tpnt->use_new_eh_code. (DB, 13 May 1998) */
1945 if (tpnt->use_new_eh_code) {
1946 spin_lock_irqsave(&io_request_lock, flags);
1947 tpnt->present = tpnt->detect(tpnt);
1948 spin_unlock_irqrestore(&io_request_lock, flags);
1949 } else
1950 tpnt->present = tpnt->detect(tpnt);
1952 if (tpnt->present) {
1953 if (pcount == next_scsi_host) {
1954 if (tpnt->present > 1) {
1955 printk("Failure to register low-level scsi driver");
1956 scsi_unregister_host(tpnt);
1957 return 1;
1960 * The low-level driver failed to register a driver. We
1961 * can do this now.
1963 scsi_register(tpnt, 0);
1965 tpnt->next = scsi_hosts; /* Add to the linked list */
1966 scsi_hosts = tpnt;
1968 /* Add the new driver to /proc/scsi */
1969 #ifdef CONFIG_PROC_FS
1970 build_proc_dir_entries(tpnt);
1971 #endif
1975 * Add the kernel threads for each host adapter that will
1976 * handle error correction.
1978 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
1979 if (shpnt->hostt == tpnt && shpnt->hostt->use_new_eh_code) {
1980 DECLARE_MUTEX_LOCKED(sem);
1982 shpnt->eh_notify = &sem;
1983 kernel_thread((int (*)(void *)) scsi_error_handler,
1984 (void *) shpnt, 0);
1987 * Now wait for the kernel error thread to initialize itself
1988 * as it might be needed when we scan the bus.
1990 down(&sem);
1991 shpnt->eh_notify = NULL;
1995 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
1996 if (shpnt->hostt == tpnt) {
1997 if (tpnt->info) {
1998 name = tpnt->info(shpnt);
1999 } else {
2000 name = tpnt->name;
2002 printk("scsi%d : %s\n", /* And print a little message */
2003 shpnt->host_no, name);
2007 printk("scsi : %d host%s.\n", next_scsi_host,
2008 (next_scsi_host == 1) ? "" : "s");
2010 /* The next step is to call scan_scsis here. This generates the
2011 * Scsi_Devices entries
2013 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2014 if (shpnt->hostt == tpnt) {
2015 scan_scsis(shpnt, 0, 0, 0, 0);
2016 if (shpnt->select_queue_depths != NULL) {
2017 (shpnt->select_queue_depths) (shpnt, shpnt->host_queue);
2022 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
2023 if (sdtpnt->init && sdtpnt->dev_noticed)
2024 (*sdtpnt->init) ();
2028 * Next we create the Scsi_Cmnd structures for this host
2030 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2031 for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next)
2032 if (SDpnt->host->hostt == tpnt) {
2033 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2034 if (sdtpnt->attach)
2035 (*sdtpnt->attach) (SDpnt);
2036 if (SDpnt->attached) {
2037 scsi_build_commandblocks(SDpnt);
2038 if (0 == SDpnt->has_cmdblocks)
2039 out_of_space = 1;
2045 * Now that we have all of the devices, resize the DMA pool,
2046 * as required. */
2047 if (!out_of_space)
2048 scsi_resize_dma_pool();
2051 /* This does any final handling that is required. */
2052 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
2053 if (sdtpnt->finish && sdtpnt->nr_dev) {
2054 (*sdtpnt->finish) ();
2058 #if defined(USE_STATIC_SCSI_MEMORY)
2059 printk("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
2060 (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
2061 (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
2062 (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
2063 #endif
2065 MOD_INC_USE_COUNT;
2067 if (out_of_space) {
2068 scsi_unregister_host(tpnt); /* easiest way to clean up?? */
2069 return 1;
2070 } else
2071 return 0;
2075 * Similarly, this entry point should be called by a loadable module if it
2076 * is trying to remove a low level scsi driver from the system.
2078 * Note - there is a fatal flaw in the deregister module function.
2079 * There is no way to return a code that says 'I cannot be unloaded now'.
2080 * The system relies entirely upon usage counts that are maintained,
2081 * and the assumption is that if the usage count is 0, then the module
2082 * can be unloaded.
2084 static void scsi_unregister_host(Scsi_Host_Template * tpnt)
2086 int online_status;
2087 int pcount;
2088 Scsi_Cmnd *SCpnt;
2089 Scsi_Device *SDpnt;
2090 Scsi_Device *SDpnt1;
2091 struct Scsi_Device_Template *sdtpnt;
2092 struct Scsi_Host *sh1;
2093 struct Scsi_Host *shpnt;
2094 Scsi_Host_Template *SHT;
2095 Scsi_Host_Template *SHTp;
2096 char name[10]; /* host_no>=10^9? I don't think so. */
2099 * First verify that this host adapter is completely free with no pending
2100 * commands
2102 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2103 for (SDpnt = shpnt->host_queue; SDpnt;
2104 SDpnt = SDpnt->next) {
2105 if (SDpnt->host->hostt == tpnt
2106 && SDpnt->host->hostt->module
2107 && GET_USE_COUNT(SDpnt->host->hostt->module))
2108 return;
2110 * FIXME(eric) - We need to find a way to notify the
2111 * low level driver that we are shutting down - via the
2112 * special device entry that still needs to get added.
2114 * Is detach interface below good enough for this?
2120 * FIXME(eric) put a spinlock on this. We force all of the devices offline
2121 * to help prevent race conditions where other hosts/processors could try and
2122 * get in and queue a command.
2124 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2125 for (SDpnt = shpnt->host_queue; SDpnt;
2126 SDpnt = SDpnt->next) {
2127 if (SDpnt->host->hostt == tpnt)
2128 SDpnt->online = FALSE;
2133 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2134 if (shpnt->hostt != tpnt) {
2135 continue;
2137 for (SDpnt = shpnt->host_queue; SDpnt;
2138 SDpnt = SDpnt->next) {
2140 * Loop over all of the commands associated with the device. If any of
2141 * them are busy, then set the state back to inactive and bail.
2143 for (SCpnt = SDpnt->device_queue; SCpnt;
2144 SCpnt = SCpnt->next) {
2145 online_status = SDpnt->online;
2146 SDpnt->online = FALSE;
2147 if (SCpnt->request.rq_status != RQ_INACTIVE) {
2148 printk("SCSI device not inactive - rq_status=%d, target=%d, pid=%ld, state=%d, owner=%d.\n",
2149 SCpnt->request.rq_status, SCpnt->target, SCpnt->pid,
2150 SCpnt->state, SCpnt->owner);
2151 for (SDpnt1 = shpnt->host_queue; SDpnt1;
2152 SDpnt1 = SDpnt1->next) {
2153 for (SCpnt = SDpnt1->device_queue; SCpnt;
2154 SCpnt = SCpnt->next)
2155 if (SCpnt->request.rq_status == RQ_SCSI_DISCONNECTING)
2156 SCpnt->request.rq_status = RQ_INACTIVE;
2158 SDpnt->online = online_status;
2159 printk("Device busy???\n");
2160 return;
2163 * No, this device is really free. Mark it as such, and
2164 * continue on.
2166 SCpnt->state = SCSI_STATE_DISCONNECTING;
2167 SCpnt->request.rq_status = RQ_SCSI_DISCONNECTING; /* Mark as busy */
2171 /* Next we detach the high level drivers from the Scsi_Device structures */
2173 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2174 if (shpnt->hostt != tpnt) {
2175 continue;
2177 for (SDpnt = shpnt->host_queue; SDpnt;
2178 SDpnt = SDpnt->next) {
2179 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2180 if (sdtpnt->detach)
2181 (*sdtpnt->detach) (SDpnt);
2183 /* If something still attached, punt */
2184 if (SDpnt->attached) {
2185 printk("Attached usage count = %d\n", SDpnt->attached);
2186 return;
2188 devfs_unregister (SDpnt->de);
2193 * Next, kill the kernel error recovery thread for this host.
2195 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2196 if (shpnt->hostt == tpnt
2197 && shpnt->hostt->use_new_eh_code
2198 && shpnt->ehandler != NULL) {
2199 DECLARE_MUTEX_LOCKED(sem);
2201 shpnt->eh_notify = &sem;
2202 send_sig(SIGHUP, shpnt->ehandler, 1);
2203 down(&sem);
2204 shpnt->eh_notify = NULL;
2208 /* Next we free up the Scsi_Cmnd structures for this host */
2210 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2211 if (shpnt->hostt != tpnt) {
2212 continue;
2214 for (SDpnt = shpnt->host_queue; SDpnt;
2215 SDpnt = shpnt->host_queue) {
2216 scsi_release_commandblocks(SDpnt);
2218 blk_cleanup_queue(&SDpnt->request_queue);
2219 /* Next free up the Scsi_Device structures for this host */
2220 shpnt->host_queue = SDpnt->next;
2221 kfree((char *) SDpnt);
2226 /* Next we go through and remove the instances of the individual hosts
2227 * that were detected */
2229 for (shpnt = scsi_hostlist; shpnt; shpnt = sh1) {
2230 sh1 = shpnt->next;
2231 if (shpnt->hostt != tpnt || !shpnt->loaded_as_module)
2232 continue;
2233 pcount = next_scsi_host;
2234 /* Remove the /proc/scsi directory entry */
2235 sprintf(name,"%d",shpnt->host_no);
2236 remove_proc_entry(name, tpnt->proc_dir);
2237 if (tpnt->release)
2238 (*tpnt->release) (shpnt);
2239 else {
2240 /* This is the default case for the release function.
2241 * It should do the right thing for most correctly
2242 * written host adapters.
2244 if (shpnt->irq)
2245 free_irq(shpnt->irq, NULL);
2246 if (shpnt->dma_channel != 0xff)
2247 free_dma(shpnt->dma_channel);
2248 if (shpnt->io_port && shpnt->n_io_port)
2249 release_region(shpnt->io_port, shpnt->n_io_port);
2251 if (pcount == next_scsi_host)
2252 scsi_unregister(shpnt);
2253 tpnt->present--;
2257 * If there are absolutely no more hosts left, it is safe
2258 * to completely nuke the DMA pool. The resize operation will
2259 * do the right thing and free everything.
2261 if (!scsi_hosts)
2262 scsi_resize_dma_pool();
2264 printk("scsi : %d host%s.\n", next_scsi_host,
2265 (next_scsi_host == 1) ? "" : "s");
2267 #if defined(USE_STATIC_SCSI_MEMORY)
2268 printk("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
2269 (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
2270 (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
2271 (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
2272 #endif
2274 /* There were some hosts that were loaded at boot time, so we cannot
2275 do any more than this */
2276 if (tpnt->present)
2277 return;
2279 /* OK, this is the very last step. Remove this host adapter from the
2280 linked list. */
2281 for (SHTp = NULL, SHT = scsi_hosts; SHT; SHTp = SHT, SHT = SHT->next)
2282 if (SHT == tpnt) {
2283 if (SHTp)
2284 SHTp->next = SHT->next;
2285 else
2286 scsi_hosts = SHT->next;
2287 SHT->next = NULL;
2288 break;
2290 /* Rebuild the /proc/scsi directory entries */
2291 remove_proc_entry(tpnt->proc_name, proc_scsi);
2292 MOD_DEC_USE_COUNT;
2295 static int scsi_unregister_device(struct Scsi_Device_Template *tpnt);
2298 * This entry point should be called by a loadable module if it is trying
2299 * add a high level scsi driver to the system.
2301 static int scsi_register_device_module(struct Scsi_Device_Template *tpnt)
2303 Scsi_Device *SDpnt;
2304 struct Scsi_Host *shpnt;
2305 int out_of_space = 0;
2307 if (tpnt->next)
2308 return 1;
2310 scsi_register_device(tpnt);
2312 * First scan the devices that we know about, and see if we notice them.
2315 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2316 for (SDpnt = shpnt->host_queue; SDpnt;
2317 SDpnt = SDpnt->next) {
2318 if (tpnt->detect)
2319 SDpnt->attached += (*tpnt->detect) (SDpnt);
2324 * If any of the devices would match this driver, then perform the
2325 * init function.
2327 if (tpnt->init && tpnt->dev_noticed)
2328 if ((*tpnt->init) ())
2329 return 1;
2332 * Now actually connect the devices to the new driver.
2334 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2335 for (SDpnt = shpnt->host_queue; SDpnt;
2336 SDpnt = SDpnt->next) {
2337 if (tpnt->attach)
2338 (*tpnt->attach) (SDpnt);
2340 * If this driver attached to the device, and don't have any
2341 * command blocks for this device, allocate some.
2343 if (SDpnt->attached && SDpnt->has_cmdblocks == 0) {
2344 SDpnt->online = TRUE;
2345 scsi_build_commandblocks(SDpnt);
2346 if (0 == SDpnt->has_cmdblocks)
2347 out_of_space = 1;
2353 * This does any final handling that is required.
2355 if (tpnt->finish && tpnt->nr_dev)
2356 (*tpnt->finish) ();
2357 if (!out_of_space)
2358 scsi_resize_dma_pool();
2359 MOD_INC_USE_COUNT;
2361 if (out_of_space) {
2362 scsi_unregister_device(tpnt); /* easiest way to clean up?? */
2363 return 1;
2364 } else
2365 return 0;
2368 static int scsi_unregister_device(struct Scsi_Device_Template *tpnt)
2370 Scsi_Device *SDpnt;
2371 struct Scsi_Host *shpnt;
2372 struct Scsi_Device_Template *spnt;
2373 struct Scsi_Device_Template *prev_spnt;
2376 * If we are busy, this is not going to fly.
2378 if (GET_USE_COUNT(tpnt->module) != 0)
2379 return 0;
2382 * Next, detach the devices from the driver.
2385 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2386 for (SDpnt = shpnt->host_queue; SDpnt;
2387 SDpnt = SDpnt->next) {
2388 if (tpnt->detach)
2389 (*tpnt->detach) (SDpnt);
2390 if (SDpnt->attached == 0) {
2391 SDpnt->online = FALSE;
2394 * Nobody is using this device any more. Free all of the
2395 * command structures.
2397 scsi_release_commandblocks(SDpnt);
2402 * Extract the template from the linked list.
2404 spnt = scsi_devicelist;
2405 prev_spnt = NULL;
2406 while (spnt != tpnt) {
2407 prev_spnt = spnt;
2408 spnt = spnt->next;
2410 if (prev_spnt == NULL)
2411 scsi_devicelist = tpnt->next;
2412 else
2413 prev_spnt->next = spnt->next;
2415 MOD_DEC_USE_COUNT;
2417 * Final cleanup for the driver is done in the driver sources in the
2418 * cleanup function.
2420 return 0;
2424 int scsi_register_module(int module_type, void *ptr)
2426 switch (module_type) {
2427 case MODULE_SCSI_HA:
2428 return scsi_register_host((Scsi_Host_Template *) ptr);
2430 /* Load upper level device handler of some kind */
2431 case MODULE_SCSI_DEV:
2432 #ifdef CONFIG_KMOD
2433 if (scsi_hosts == NULL)
2434 request_module("scsi_hostadapter");
2435 #endif
2436 return scsi_register_device_module((struct Scsi_Device_Template *) ptr);
2437 /* The rest of these are not yet implemented */
2439 /* Load constants.o */
2440 case MODULE_SCSI_CONST:
2442 /* Load specialized ioctl handler for some device. Intended for
2443 * cdroms that have non-SCSI2 audio command sets. */
2444 case MODULE_SCSI_IOCTL:
2446 default:
2447 return 1;
2451 void scsi_unregister_module(int module_type, void *ptr)
2453 switch (module_type) {
2454 case MODULE_SCSI_HA:
2455 scsi_unregister_host((Scsi_Host_Template *) ptr);
2456 break;
2457 case MODULE_SCSI_DEV:
2458 scsi_unregister_device((struct Scsi_Device_Template *) ptr);
2459 break;
2460 /* The rest of these are not yet implemented. */
2461 case MODULE_SCSI_CONST:
2462 case MODULE_SCSI_IOCTL:
2463 break;
2464 default:
2466 return;
2469 #endif /* CONFIG_MODULES */
2471 #ifdef CONFIG_PROC_FS
2473 * Function: scsi_dump_status
2475 * Purpose: Brain dump of scsi system, used for problem solving.
2477 * Arguments: level - used to indicate level of detail.
2479 * Notes: The level isn't used at all yet, but we need to find some way
2480 * of sensibly logging varying degrees of information. A quick one-line
2481 * display of each command, plus the status would be most useful.
2483 * This does depend upon CONFIG_SCSI_LOGGING - I do want some way of turning
2484 * it all off if the user wants a lean and mean kernel. It would probably
2485 * also be useful to allow the user to specify one single host to be dumped.
2486 * A second argument to the function would be useful for that purpose.
2488 * FIXME - some formatting of the output into tables would be very handy.
2490 static void scsi_dump_status(int level)
2492 #ifdef CONFIG_SCSI_LOGGING /* { */
2493 int i;
2494 struct Scsi_Host *shpnt;
2495 Scsi_Cmnd *SCpnt;
2496 Scsi_Device *SDpnt;
2497 printk("Dump of scsi host parameters:\n");
2498 i = 0;
2499 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2500 printk(" %d %d %d : %d %d\n",
2501 shpnt->host_failed,
2502 shpnt->host_busy,
2503 atomic_read(&shpnt->host_active),
2504 shpnt->host_blocked,
2505 shpnt->host_self_blocked);
2508 printk("\n\n");
2509 printk("Dump of scsi command parameters:\n");
2510 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2511 printk("h:c:t:l (dev sect nsect cnumsec sg) (ret all flg) (to/cmd to ito) cmd snse result\n");
2512 for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
2513 for (SCpnt = SDpnt->device_queue; SCpnt; SCpnt = SCpnt->next) {
2514 /* (0) h:c:t:l (dev sect nsect cnumsec sg) (ret all flg) (to/cmd to ito) cmd snse result %d %x */
2515 printk("(%3d) %2d:%1d:%2d:%2d (%6s %4ld %4ld %4ld %4x %1d) (%1d %1d 0x%2x) (%4d %4d %4d) 0x%2.2x 0x%2.2x 0x%8.8x\n",
2516 i++,
2518 SCpnt->host->host_no,
2519 SCpnt->channel,
2520 SCpnt->target,
2521 SCpnt->lun,
2523 kdevname(SCpnt->request.rq_dev),
2524 SCpnt->request.sector,
2525 SCpnt->request.nr_sectors,
2526 SCpnt->request.current_nr_sectors,
2527 SCpnt->request.rq_status,
2528 SCpnt->use_sg,
2530 SCpnt->retries,
2531 SCpnt->allowed,
2532 SCpnt->flags,
2534 SCpnt->timeout_per_command,
2535 SCpnt->timeout,
2536 SCpnt->internal_timeout,
2538 SCpnt->cmnd[0],
2539 SCpnt->sense_buffer[2],
2540 SCpnt->result);
2545 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2546 for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
2547 /* Now dump the request lists for each block device */
2548 printk("Dump of pending block device requests\n");
2549 for (i = 0; i < MAX_BLKDEV; i++) {
2550 struct list_head * queue_head;
2552 queue_head = &blk_dev[i].request_queue.queue_head;
2553 if (!list_empty(queue_head)) {
2554 struct request *req;
2555 struct list_head * entry;
2557 printk("%d: ", i);
2558 entry = queue_head->next;
2559 do {
2560 req = blkdev_entry_to_request(entry);
2561 printk("(%s %d %ld %ld %ld) ",
2562 kdevname(req->rq_dev),
2563 req->cmd,
2564 req->sector,
2565 req->nr_sectors,
2566 req->current_nr_sectors);
2567 } while ((entry = entry->next) != queue_head);
2568 printk("\n");
2573 #endif /* CONFIG_SCSI_LOGGING */ /* } */
2575 #endif /* CONFIG_PROC_FS */
2577 static int scsi_host_no_init (char *str)
2579 static int next_no = 0;
2580 char *temp;
2582 #ifndef MODULE
2583 int len;
2584 scsi_host_no_set = 1;
2585 memset(scsi_host_no_table, 0, sizeof(scsi_host_no_table));
2586 #endif /* MODULE */
2588 while (str) {
2589 temp = str;
2590 while (*temp && (*temp != ':') && (*temp != ','))
2591 temp++;
2592 if (!*temp)
2593 temp = NULL;
2594 else
2595 *temp++ = 0;
2596 #ifdef MODULE
2597 scsi_host_no_insert(str, next_no);
2598 #else
2599 if (next_no < sizeof(scsi_host_no_table)/sizeof(scsi_host_no_table[0])) {
2600 if ((len = strlen(str)) >= sizeof(scsi_host_no_table[0]))
2601 len = sizeof(scsi_host_no_table[0])-1;
2602 strncpy(scsi_host_no_table[next_no], str, len);
2603 scsi_host_no_table[next_no][len] = 0;
2605 #endif /* MODULE */
2606 str = temp;
2607 next_no++;
2609 return 1;
2612 #ifndef MODULE
2613 __setup("scsihosts=", scsi_host_no_init);
2614 #endif
2616 #ifdef MODULE
2617 static char *scsihosts;
2619 MODULE_PARM(scsihosts, "s");
2621 int init_module(void)
2623 struct proc_dir_entry *generic;
2625 if( scsi_init_minimal_dma_pool() != 0 )
2627 return 1;
2631 * This makes /proc/scsi and /proc/scsi/scsi visible.
2633 #ifdef CONFIG_PROC_FS
2634 proc_scsi = proc_mkdir("scsi", 0);
2635 if (!proc_scsi) {
2636 printk (KERN_ERR "cannot init /proc/scsi\n");
2637 return -ENOMEM;
2639 generic = create_proc_info_entry ("scsi/scsi", 0, 0, scsi_proc_info);
2640 if (!generic) {
2641 printk (KERN_ERR "cannot init /proc/scsi/scsi\n");
2642 remove_proc_entry("scsi", 0);
2643 return -ENOMEM;
2645 generic->write_proc = proc_scsi_gen_write;
2646 #endif
2648 scsi_loadable_module_flag = 1;
2650 scsi_devfs_handle = devfs_mk_dir (NULL, "scsi", NULL);
2651 scsi_host_no_init (scsihosts);
2653 * This is where the processing takes place for most everything
2654 * when commands are completed.
2656 init_bh(SCSI_BH, scsi_bottom_half_handler);
2658 return 0;
2661 void cleanup_module(void)
2663 Scsi_Host_Name *shn, *shn2 = NULL;
2665 remove_bh(SCSI_BH);
2667 devfs_unregister (scsi_devfs_handle);
2668 for (shn = scsi_host_no_list;shn;shn = shn->next) {
2669 if (shn->name)
2670 kfree(shn->name);
2671 if (shn2)
2672 kfree (shn2);
2673 shn2 = shn;
2675 if (shn2)
2676 kfree (shn2);
2678 #ifdef CONFIG_PROC_FS
2679 /* No, we're not here anymore. Don't show the /proc/scsi files. */
2680 remove_proc_entry ("scsi/scsi", 0);
2681 remove_proc_entry ("scsi", 0);
2682 #endif
2685 * Free up the DMA pool.
2687 scsi_resize_dma_pool();
2691 #endif /* MODULE */
2694 * Function: scsi_get_host_dev()
2696 * Purpose: Create a Scsi_Device that points to the host adapter itself.
2698 * Arguments: SHpnt - Host that needs a Scsi_Device
2700 * Lock status: None assumed.
2702 * Returns: Nothing
2704 * Notes:
2706 Scsi_Device * scsi_get_host_dev(struct Scsi_Host * SHpnt)
2708 Scsi_Device * SDpnt;
2711 * Attach a single Scsi_Device to the Scsi_Host - this should
2712 * be made to look like a "pseudo-device" that points to the
2713 * HA itself. For the moment, we include it at the head of
2714 * the host_queue itself - I don't think we want to show this
2715 * to the HA in select_queue_depths(), as this would probably confuse
2716 * matters.
2717 * Note - this device is not accessible from any high-level
2718 * drivers (including generics), which is probably not
2719 * optimal. We can add hooks later to attach
2721 SDpnt = (Scsi_Device *) kmalloc(sizeof(Scsi_Device),
2722 GFP_ATOMIC);
2723 memset(SDpnt, 0, sizeof(Scsi_Device));
2725 SDpnt->host = SHpnt;
2726 SDpnt->id = SHpnt->this_id;
2727 SDpnt->type = -1;
2728 SDpnt->queue_depth = 1;
2730 scsi_build_commandblocks(SDpnt);
2732 scsi_initialize_queue(SDpnt, SHpnt);
2734 SDpnt->online = TRUE;
2737 * Initialize the object that we will use to wait for command blocks.
2739 init_waitqueue_head(&SDpnt->scpnt_wait);
2740 return SDpnt;
2744 * Function: scsi_free_host_dev()
2746 * Purpose: Create a Scsi_Device that points to the host adapter itself.
2748 * Arguments: SHpnt - Host that needs a Scsi_Device
2750 * Lock status: None assumed.
2752 * Returns: Nothing
2754 * Notes:
2756 void scsi_free_host_dev(Scsi_Device * SDpnt)
2758 if( (unsigned char) SDpnt->id != (unsigned char) SDpnt->host->this_id )
2760 panic("Attempt to delete wrong device\n");
2763 blk_cleanup_queue(&SDpnt->request_queue);
2766 * We only have a single SCpnt attached to this device. Free
2767 * it now.
2769 scsi_release_commandblocks(SDpnt);
2770 kfree(SDpnt);
2774 * Overrides for Emacs so that we follow Linus's tabbing style.
2775 * Emacs will notice this stuff at the end of the file and automatically
2776 * adjust the settings for this buffer only. This must remain at the end
2777 * of the file.
2778 * ---------------------------------------------------------------------------
2779 * Local variables:
2780 * c-indent-level: 4
2781 * c-brace-imaginary-offset: 0
2782 * c-brace-offset: -4
2783 * c-argdecl-indent: 4
2784 * c-label-offset: -4
2785 * c-continued-statement-offset: 4
2786 * c-continued-brace-offset: 0
2787 * indent-tabs-mode: nil
2788 * tab-width: 8
2789 * End: