Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / drivers / scsi / scsi.c
blobcfd290c11860e6370eca9d94a628b85dbdff1f70
1 /*
2 * scsi.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
5 * generic mid-level SCSI driver
6 * Initial versions: Drew Eckhardt
7 * Subsequent revisions: Eric Youngdale
9 * <drew@colorado.edu>
11 * Bug correction thanks go to :
12 * Rik Faith <faith@cs.unc.edu>
13 * Tommy Thorn <tthorn>
14 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
16 * Modified by Eric Youngdale eric@andante.org or ericy@gnu.ai.mit.edu to
17 * add scatter-gather, multiple outstanding request, and other
18 * enhancements.
20 * Native multichannel, wide scsi, /proc/scsi and hot plugging
21 * support added by Michael Neuffer <mike@i-connect.net>
23 * Added request_module("scsi_hostadapter") for kerneld:
24 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/modules.conf)
25 * Bjorn Ekwall <bj0rn@blox.se>
26 * (changed to kmod)
28 * Major improvements to the timeout, abort, and reset processing,
29 * as well as performance modifications for large queue depths by
30 * Leonard N. Zubkoff <lnz@dandelion.com>
32 * Converted cli() code to spinlocks, Ingo Molnar
34 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
36 * out_of_space hacks, D. Gilbert (dpg) 990608
39 #define REVISION "Revision: 1.00"
40 #define VERSION "Id: scsi.c 1.00 2000/09/26"
42 #include <linux/config.h>
43 #include <linux/module.h>
45 #include <linux/sched.h>
46 #include <linux/timer.h>
47 #include <linux/string.h>
48 #include <linux/malloc.h>
49 #include <linux/ioport.h>
50 #include <linux/kernel.h>
51 #include <linux/stat.h>
52 #include <linux/blk.h>
53 #include <linux/interrupt.h>
54 #include <linux/delay.h>
55 #include <linux/init.h>
57 #define __KERNEL_SYSCALLS__
59 #include <linux/unistd.h>
60 #include <linux/spinlock.h>
62 #include <asm/system.h>
63 #include <asm/irq.h>
64 #include <asm/dma.h>
65 #include <asm/uaccess.h>
67 #include "scsi.h"
68 #include "hosts.h"
69 #include "constants.h"
71 #ifdef CONFIG_KMOD
72 #include <linux/kmod.h>
73 #endif
75 #undef USE_STATIC_SCSI_MEMORY
77 struct proc_dir_entry *proc_scsi;
79 #ifdef CONFIG_PROC_FS
80 static int scsi_proc_info(char *buffer, char **start, off_t offset, int length);
81 static void scsi_dump_status(int level);
82 #endif
85 static const char RCSid[] = "$Header: /vger/u4/cvs/linux/drivers/scsi/scsi.c,v 1.38 1997/01/19 23:07:18 davem Exp $";
89 * Definitions and constants.
92 #define MIN_RESET_DELAY (2*HZ)
94 /* Do not call reset on error if we just did a reset within 15 sec. */
95 #define MIN_RESET_PERIOD (15*HZ)
99 * Data declarations.
101 unsigned long scsi_pid;
102 Scsi_Cmnd *last_cmnd;
103 /* Command groups 3 and 4 are reserved and should never be used. */
104 const unsigned char scsi_command_size[8] =
106 6, 10, 10, 12,
107 12, 12, 10, 10
109 static unsigned long serial_number;
110 static Scsi_Cmnd *scsi_bh_queue_head;
111 static Scsi_Cmnd *scsi_bh_queue_tail;
114 * Note - the initial logging level can be set here to log events at boot time.
115 * After the system is up, you may enable logging via the /proc interface.
117 unsigned int scsi_logging_level;
119 const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] =
121 "Direct-Access ",
122 "Sequential-Access",
123 "Printer ",
124 "Processor ",
125 "WORM ",
126 "CD-ROM ",
127 "Scanner ",
128 "Optical Device ",
129 "Medium Changer ",
130 "Communications ",
131 "Unknown ",
132 "Unknown ",
133 "Unknown ",
134 "Enclosure ",
138 * Function prototypes.
140 extern void scsi_times_out(Scsi_Cmnd * SCpnt);
141 void scsi_build_commandblocks(Scsi_Device * SDpnt);
144 * These are the interface to the old error handling code. It should go away
145 * someday soon.
147 extern void scsi_old_done(Scsi_Cmnd * SCpnt);
148 extern void scsi_old_times_out(Scsi_Cmnd * SCpnt);
152 * Function: scsi_initialize_queue()
154 * Purpose: Selects queue handler function for a device.
156 * Arguments: SDpnt - device for which we need a handler function.
158 * Returns: Nothing
160 * Lock status: No locking assumed or required.
162 * Notes: Most devices will end up using scsi_request_fn for the
163 * handler function (at least as things are done now).
164 * The "block" feature basically ensures that only one of
165 * the blocked hosts is active at one time, mainly to work around
166 * buggy DMA chipsets where the memory gets starved.
167 * For this case, we have a special handler function, which
168 * does some checks and ultimately calls scsi_request_fn.
170 * The single_lun feature is a similar special case.
172 * We handle these things by stacking the handlers. The
173 * special case handlers simply check a few conditions,
174 * and return if they are not supposed to do anything.
175 * In the event that things are OK, then they call the next
176 * handler in the list - ultimately they call scsi_request_fn
177 * to do the dirty deed.
179 void scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt) {
180 blk_init_queue(&SDpnt->request_queue, scsi_request_fn);
181 blk_queue_headactive(&SDpnt->request_queue, 0);
182 SDpnt->request_queue.queuedata = (void *) SDpnt;
185 #ifdef MODULE
186 MODULE_PARM(scsi_logging_level, "i");
187 MODULE_PARM_DESC(scsi_logging_level, "SCSI logging level; should be zero or nonzero");
189 #else
191 static int __init scsi_logging_setup(char *str)
193 int tmp;
195 if (get_option(&str, &tmp) == 1) {
196 scsi_logging_level = (tmp ? ~0 : 0);
197 return 1;
198 } else {
199 printk(KERN_INFO "scsi_logging_setup : usage scsi_logging_level=n "
200 "(n should be 0 or non-zero)\n");
201 return 0;
205 __setup("scsi_logging=", scsi_logging_setup);
207 #endif
210 * Issue a command and wait for it to complete
213 static void scsi_wait_done(Scsi_Cmnd * SCpnt)
215 struct request *req;
217 req = &SCpnt->request;
218 req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
220 if (req->sem != NULL) {
221 up(req->sem);
226 * This lock protects the freelist for all devices on the system.
227 * We could make this finer grained by having a single lock per
228 * device if it is ever found that there is excessive contention
229 * on this lock.
231 static spinlock_t device_request_lock = SPIN_LOCK_UNLOCKED;
234 * Used to protect insertion into and removal from the queue of
235 * commands to be processed by the bottom half handler.
237 static spinlock_t scsi_bhqueue_lock = SPIN_LOCK_UNLOCKED;
240 * Function: scsi_allocate_request
242 * Purpose: Allocate a request descriptor.
244 * Arguments: device - device for which we want a request
246 * Lock status: No locks assumed to be held. This function is SMP-safe.
248 * Returns: Pointer to request block.
250 * Notes: With the new queueing code, it becomes important
251 * to track the difference between a command and a
252 * request. A request is a pending item in the queue that
253 * has not yet reached the top of the queue.
256 Scsi_Request *scsi_allocate_request(Scsi_Device * device)
258 Scsi_Request *SRpnt = NULL;
260 if (!device)
261 panic("No device passed to scsi_allocate_request().\n");
263 SRpnt = (Scsi_Request *) kmalloc(sizeof(Scsi_Request), GFP_ATOMIC);
264 if( SRpnt == NULL )
266 return NULL;
269 memset(SRpnt, 0, sizeof(Scsi_Request));
270 SRpnt->sr_device = device;
271 SRpnt->sr_host = device->host;
272 SRpnt->sr_magic = SCSI_REQ_MAGIC;
273 SRpnt->sr_data_direction = SCSI_DATA_UNKNOWN;
275 return SRpnt;
279 * Function: scsi_release_request
281 * Purpose: Release a request descriptor.
283 * Arguments: device - device for which we want a request
285 * Lock status: No locks assumed to be held. This function is SMP-safe.
287 * Returns: Pointer to request block.
289 * Notes: With the new queueing code, it becomes important
290 * to track the difference between a command and a
291 * request. A request is a pending item in the queue that
292 * has not yet reached the top of the queue. We still need
293 * to free a request when we are done with it, of course.
295 void scsi_release_request(Scsi_Request * req)
297 if( req->sr_command != NULL )
299 scsi_release_command(req->sr_command);
300 req->sr_command = NULL;
303 kfree(req);
307 * Function: scsi_allocate_device
309 * Purpose: Allocate a command descriptor.
311 * Arguments: device - device for which we want a command descriptor
312 * wait - 1 if we should wait in the event that none
313 * are available.
314 * interruptible - 1 if we should unblock and return NULL
315 * in the event that we must wait, and a signal
316 * arrives.
318 * Lock status: No locks assumed to be held. This function is SMP-safe.
320 * Returns: Pointer to command descriptor.
322 * Notes: Prior to the new queue code, this function was not SMP-safe.
324 * If the wait flag is true, and we are waiting for a free
325 * command block, this function will interrupt and return
326 * NULL in the event that a signal arrives that needs to
327 * be handled.
329 * This function is deprecated, and drivers should be
330 * rewritten to use Scsi_Request instead of Scsi_Cmnd.
333 Scsi_Cmnd *scsi_allocate_device(Scsi_Device * device, int wait,
334 int interruptable)
336 struct Scsi_Host *host;
337 Scsi_Cmnd *SCpnt = NULL;
338 Scsi_Device *SDpnt;
339 unsigned long flags;
341 if (!device)
342 panic("No device passed to scsi_allocate_device().\n");
344 host = device->host;
346 spin_lock_irqsave(&device_request_lock, flags);
348 while (1 == 1) {
349 SCpnt = NULL;
350 if (!device->device_blocked) {
351 if (device->single_lun) {
353 * FIXME(eric) - this is not at all optimal. Given that
354 * single lun devices are rare and usually slow
355 * (i.e. CD changers), this is good enough for now, but
356 * we may want to come back and optimize this later.
358 * Scan through all of the devices attached to this
359 * host, and see if any are active or not. If so,
360 * we need to defer this command.
362 * We really need a busy counter per device. This would
363 * allow us to more easily figure out whether we should
364 * do anything here or not.
366 for (SDpnt = host->host_queue;
367 SDpnt;
368 SDpnt = SDpnt->next) {
370 * Only look for other devices on the same bus
371 * with the same target ID.
373 if (SDpnt->channel != device->channel
374 || SDpnt->id != device->id
375 || SDpnt == device) {
376 continue;
378 if( atomic_read(&SDpnt->device_active) != 0)
380 break;
383 if (SDpnt) {
385 * Some other device in this cluster is busy.
386 * If asked to wait, we need to wait, otherwise
387 * return NULL.
389 SCpnt = NULL;
390 goto busy;
394 * Now we can check for a free command block for this device.
396 for (SCpnt = device->device_queue; SCpnt; SCpnt = SCpnt->next) {
397 if (SCpnt->request.rq_status == RQ_INACTIVE)
398 break;
402 * If we couldn't find a free command block, and we have been
403 * asked to wait, then do so.
405 if (SCpnt) {
406 break;
408 busy:
410 * If we have been asked to wait for a free block, then
411 * wait here.
413 if (wait) {
414 DECLARE_WAITQUEUE(wait, current);
417 * We need to wait for a free commandblock. We need to
418 * insert ourselves into the list before we release the
419 * lock. This way if a block were released the same
420 * microsecond that we released the lock, the call
421 * to schedule() wouldn't block (well, it might switch,
422 * but the current task will still be schedulable.
424 add_wait_queue(&device->scpnt_wait, &wait);
425 if( interruptable ) {
426 set_current_state(TASK_INTERRUPTIBLE);
427 } else {
428 set_current_state(TASK_UNINTERRUPTIBLE);
431 spin_unlock_irqrestore(&device_request_lock, flags);
434 * This should block until a device command block
435 * becomes available.
437 schedule();
439 spin_lock_irqsave(&device_request_lock, flags);
441 remove_wait_queue(&device->scpnt_wait, &wait);
443 * FIXME - Isn't this redundant?? Someone
444 * else will have forced the state back to running.
446 set_current_state(TASK_RUNNING);
448 * In the event that a signal has arrived that we need
449 * to consider, then simply return NULL. Everyone
450 * that calls us should be prepared for this
451 * possibility, and pass the appropriate code back
452 * to the user.
454 if( interruptable ) {
455 if (signal_pending(current)) {
456 spin_unlock_irqrestore(&device_request_lock, flags);
457 return NULL;
460 } else {
461 spin_unlock_irqrestore(&device_request_lock, flags);
462 return NULL;
466 SCpnt->request.rq_status = RQ_SCSI_BUSY;
467 SCpnt->request.sem = NULL; /* And no one is waiting for this
468 * to complete */
469 atomic_inc(&SCpnt->host->host_active);
470 atomic_inc(&SCpnt->device->device_active);
472 SCpnt->buffer = NULL;
473 SCpnt->bufflen = 0;
474 SCpnt->request_buffer = NULL;
475 SCpnt->request_bufflen = 0;
477 SCpnt->use_sg = 0; /* Reset the scatter-gather flag */
478 SCpnt->old_use_sg = 0;
479 SCpnt->transfersize = 0; /* No default transfer size */
480 SCpnt->cmd_len = 0;
482 SCpnt->sc_data_direction = SCSI_DATA_UNKNOWN;
483 SCpnt->sc_request = NULL;
484 SCpnt->sc_magic = SCSI_CMND_MAGIC;
486 SCpnt->result = 0;
487 SCpnt->underflow = 0; /* Do not flag underflow conditions */
488 SCpnt->old_underflow = 0;
489 SCpnt->resid = 0;
490 SCpnt->state = SCSI_STATE_INITIALIZING;
491 SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
493 spin_unlock_irqrestore(&device_request_lock, flags);
495 SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n",
496 SCpnt->target,
497 atomic_read(&SCpnt->host->host_active)));
499 return SCpnt;
502 inline void __scsi_release_command(Scsi_Cmnd * SCpnt)
504 unsigned long flags;
505 Scsi_Device * SDpnt;
507 spin_lock_irqsave(&device_request_lock, flags);
509 SDpnt = SCpnt->device;
511 SCpnt->request.rq_status = RQ_INACTIVE;
512 SCpnt->state = SCSI_STATE_UNUSED;
513 SCpnt->owner = SCSI_OWNER_NOBODY;
514 atomic_dec(&SCpnt->host->host_active);
515 atomic_dec(&SDpnt->device_active);
517 SCSI_LOG_MLQUEUE(5, printk("Deactivating command for device %d (active=%d, failed=%d)\n",
518 SCpnt->target,
519 atomic_read(&SCpnt->host->host_active),
520 SCpnt->host->host_failed));
521 if (SCpnt->host->host_failed != 0) {
522 SCSI_LOG_ERROR_RECOVERY(5, printk("Error handler thread %d %d\n",
523 SCpnt->host->in_recovery,
524 SCpnt->host->eh_active));
527 * If the host is having troubles, then look to see if this was the last
528 * command that might have failed. If so, wake up the error handler.
530 if (SCpnt->host->in_recovery
531 && !SCpnt->host->eh_active
532 && SCpnt->host->host_busy == SCpnt->host->host_failed) {
533 SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
534 atomic_read(&SCpnt->host->eh_wait->count)));
535 up(SCpnt->host->eh_wait);
538 spin_unlock_irqrestore(&device_request_lock, flags);
541 * Wake up anyone waiting for this device. Do this after we
542 * have released the lock, as they will need it as soon as
543 * they wake up.
545 wake_up(&SDpnt->scpnt_wait);
549 * Function: scsi_release_command
551 * Purpose: Release a command block.
553 * Arguments: SCpnt - command block we are releasing.
555 * Notes: The command block can no longer be used by the caller once
556 * this funciton is called. This is in effect the inverse
557 * of scsi_allocate_device. Note that we also must perform
558 * a couple of additional tasks. We must first wake up any
559 * processes that might have blocked waiting for a command
560 * block, and secondly we must hit the queue handler function
561 * to make sure that the device is busy. Note - there is an
562 * option to not do this - there were instances where we could
563 * recurse too deeply and blow the stack if this happened
564 * when we were indirectly called from the request function
565 * itself.
567 * The idea is that a lot of the mid-level internals gunk
568 * gets hidden in this function. Upper level drivers don't
569 * have any chickens to wave in the air to get things to
570 * work reliably.
572 * This function is deprecated, and drivers should be
573 * rewritten to use Scsi_Request instead of Scsi_Cmnd.
575 void scsi_release_command(Scsi_Cmnd * SCpnt)
577 request_queue_t *q;
578 Scsi_Device * SDpnt;
580 SDpnt = SCpnt->device;
582 __scsi_release_command(SCpnt);
585 * Finally, hit the queue request function to make sure that
586 * the device is actually busy if there are requests present.
587 * This won't block - if the device cannot take any more, life
588 * will go on.
590 q = &SDpnt->request_queue;
591 scsi_queue_next_request(q, NULL);
595 * Function: scsi_dispatch_command
597 * Purpose: Dispatch a command to the low-level driver.
599 * Arguments: SCpnt - command block we are dispatching.
601 * Notes:
603 int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
605 #ifdef DEBUG_DELAY
606 unsigned long clock;
607 #endif
608 struct Scsi_Host *host;
609 int rtn = 0;
610 unsigned long flags = 0;
611 unsigned long timeout;
613 ASSERT_LOCK(&io_request_lock, 0);
615 #if DEBUG
616 unsigned long *ret = 0;
617 #ifdef __mips__
618 __asm__ __volatile__("move\t%0,$31":"=r"(ret));
619 #else
620 ret = __builtin_return_address(0);
621 #endif
622 #endif
624 host = SCpnt->host;
626 /* Assign a unique nonzero serial_number. */
627 if (++serial_number == 0)
628 serial_number = 1;
629 SCpnt->serial_number = serial_number;
632 * We will wait MIN_RESET_DELAY clock ticks after the last reset so
633 * we can avoid the drive not being ready.
635 timeout = host->last_reset + MIN_RESET_DELAY;
637 if (host->resetting && time_before(jiffies, timeout)) {
638 int ticks_remaining = timeout - jiffies;
640 * NOTE: This may be executed from within an interrupt
641 * handler! This is bad, but for now, it'll do. The irq
642 * level of the interrupt handler has been masked out by the
643 * platform dependent interrupt handling code already, so the
644 * sti() here will not cause another call to the SCSI host's
645 * interrupt handler (assuming there is one irq-level per
646 * host).
648 while (--ticks_remaining >= 0)
649 mdelay(1 + 999 / HZ);
650 host->resetting = 0;
652 if (host->hostt->use_new_eh_code) {
653 scsi_add_timer(SCpnt, SCpnt->timeout_per_command, scsi_times_out);
654 } else {
655 scsi_add_timer(SCpnt, SCpnt->timeout_per_command,
656 scsi_old_times_out);
660 * We will use a queued command if possible, otherwise we will emulate the
661 * queuing and calling of completion function ourselves.
663 SCSI_LOG_MLQUEUE(3, printk("scsi_dispatch_cmnd (host = %d, channel = %d, target = %d, "
664 "command = %p, buffer = %p, \nbufflen = %d, done = %p)\n",
665 SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd,
666 SCpnt->buffer, SCpnt->bufflen, SCpnt->done));
668 SCpnt->state = SCSI_STATE_QUEUED;
669 SCpnt->owner = SCSI_OWNER_LOWLEVEL;
670 if (host->can_queue) {
671 SCSI_LOG_MLQUEUE(3, printk("queuecommand : routine at %p\n",
672 host->hostt->queuecommand));
674 * Use the old error handling code if we haven't converted the driver
675 * to use the new one yet. Note - only the new queuecommand variant
676 * passes a meaningful return value.
678 if (host->hostt->use_new_eh_code) {
679 spin_lock_irqsave(&io_request_lock, flags);
680 rtn = host->hostt->queuecommand(SCpnt, scsi_done);
681 spin_unlock_irqrestore(&io_request_lock, flags);
682 if (rtn != 0) {
683 scsi_delete_timer(SCpnt);
684 scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY);
685 SCSI_LOG_MLQUEUE(3, printk("queuecommand : request rejected\n"));
687 } else {
688 spin_lock_irqsave(&io_request_lock, flags);
689 host->hostt->queuecommand(SCpnt, scsi_old_done);
690 spin_unlock_irqrestore(&io_request_lock, flags);
692 } else {
693 int temp;
695 SCSI_LOG_MLQUEUE(3, printk("command() : routine at %p\n", host->hostt->command));
696 spin_lock_irqsave(&io_request_lock, flags);
697 temp = host->hostt->command(SCpnt);
698 SCpnt->result = temp;
699 #ifdef DEBUG_DELAY
700 spin_unlock_irqrestore(&io_request_lock, flags);
701 clock = jiffies + 4 * HZ;
702 while (time_before(jiffies, clock))
703 barrier();
704 printk("done(host = %d, result = %04x) : routine at %p\n",
705 host->host_no, temp, host->hostt->command);
706 spin_lock_irqsave(&io_request_lock, flags);
707 #endif
708 if (host->hostt->use_new_eh_code) {
709 scsi_done(SCpnt);
710 } else {
711 scsi_old_done(SCpnt);
713 spin_unlock_irqrestore(&io_request_lock, flags);
715 SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
716 return rtn;
719 devfs_handle_t scsi_devfs_handle;
722 * scsi_do_cmd sends all the commands out to the low-level driver. It
723 * handles the specifics required for each low level driver - ie queued
724 * or non queued. It also prevents conflicts when different high level
725 * drivers go for the same host at the same time.
728 void scsi_wait_req (Scsi_Request * SRpnt, const void *cmnd ,
729 void *buffer, unsigned bufflen,
730 int timeout, int retries)
732 DECLARE_MUTEX_LOCKED(sem);
734 SRpnt->sr_request.sem = &sem;
735 SRpnt->sr_request.rq_status = RQ_SCSI_BUSY;
736 scsi_do_req (SRpnt, (void *) cmnd,
737 buffer, bufflen, scsi_wait_done, timeout, retries);
738 down (&sem);
739 SRpnt->sr_request.sem = NULL;
740 if( SRpnt->sr_command != NULL )
742 scsi_release_command(SRpnt->sr_command);
743 SRpnt->sr_command = NULL;
749 * Function: scsi_do_req
751 * Purpose: Queue a SCSI request
753 * Arguments: SRpnt - command descriptor.
754 * cmnd - actual SCSI command to be performed.
755 * buffer - data buffer.
756 * bufflen - size of data buffer.
757 * done - completion function to be run.
758 * timeout - how long to let it run before timeout.
759 * retries - number of retries we allow.
761 * Lock status: With the new queueing code, this is SMP-safe, and no locks
762 * need be held upon entry. The old queueing code the lock was
763 * assumed to be held upon entry.
765 * Returns: Nothing.
767 * Notes: Prior to the new queue code, this function was not SMP-safe.
768 * Also, this function is now only used for queueing requests
769 * for things like ioctls and character device requests - this
770 * is because we essentially just inject a request into the
771 * queue for the device. Normal block device handling manipulates
772 * the queue directly.
774 void scsi_do_req(Scsi_Request * SRpnt, const void *cmnd,
775 void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
776 int timeout, int retries)
778 Scsi_Device * SDpnt = SRpnt->sr_device;
779 struct Scsi_Host *host = SDpnt->host;
781 ASSERT_LOCK(&io_request_lock, 0);
783 SCSI_LOG_MLQUEUE(4,
785 int i;
786 int target = SDpnt->id;
787 printk("scsi_do_req (host = %d, channel = %d target = %d, "
788 "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
789 "retries = %d)\n"
790 "command : ", host->host_no, SDpnt->channel, target, buffer,
791 bufflen, done, timeout, retries);
792 for (i = 0; i < 10; ++i)
793 printk("%02x ", ((unsigned char *) cmnd)[i]);
794 printk("\n");
797 if (!host) {
798 panic("Invalid or not present host.\n");
802 * If the upper level driver is reusing these things, then
803 * we should release the low-level block now. Another one will
804 * be allocated later when this request is getting queued.
806 if( SRpnt->sr_command != NULL )
808 scsi_release_command(SRpnt->sr_command);
809 SRpnt->sr_command = NULL;
813 * We must prevent reentrancy to the lowlevel host driver. This prevents
814 * it - we enter a loop until the host we want to talk to is not busy.
815 * Race conditions are prevented, as interrupts are disabled in between the
816 * time we check for the host being not busy, and the time we mark it busy
817 * ourselves.
822 * Our own function scsi_done (which marks the host as not busy, disables
823 * the timeout counter, etc) will be called by us or by the
824 * scsi_hosts[host].queuecommand() function needs to also call
825 * the completion function for the high level driver.
828 memcpy((void *) SRpnt->sr_cmnd, (const void *) cmnd,
829 sizeof(SRpnt->sr_cmnd));
830 SRpnt->sr_bufflen = bufflen;
831 SRpnt->sr_buffer = buffer;
832 SRpnt->sr_allowed = retries;
833 SRpnt->sr_done = done;
834 SRpnt->sr_timeout_per_command = timeout;
836 memcpy((void *) SRpnt->sr_cmnd, (const void *) cmnd,
837 sizeof(SRpnt->sr_cmnd));
839 if (SRpnt->sr_cmd_len == 0)
840 SRpnt->sr_cmd_len = COMMAND_SIZE(SRpnt->sr_cmnd[0]);
843 * At this point, we merely set up the command, stick it in the normal
844 * request queue, and return. Eventually that request will come to the
845 * top of the list, and will be dispatched.
847 scsi_insert_special_req(SRpnt, 0);
849 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_cmd()\n"));
853 * Function: scsi_init_cmd_from_req
855 * Purpose: Queue a SCSI command
856 * Purpose: Initialize a Scsi_Cmnd from a Scsi_Request
858 * Arguments: SCpnt - command descriptor.
859 * SRpnt - Request from the queue.
861 * Lock status: None needed.
863 * Returns: Nothing.
865 * Notes: Mainly transfer data from the request structure to the
866 * command structure. The request structure is allocated
867 * using the normal memory allocator, and requests can pile
868 * up to more or less any depth. The command structure represents
869 * a consumable resource, as these are allocated into a pool
870 * when the SCSI subsystem initializes. The preallocation is
871 * required so that in low-memory situations a disk I/O request
872 * won't cause the memory manager to try and write out a page.
873 * The request structure is generally used by ioctls and character
874 * devices.
876 void scsi_init_cmd_from_req(Scsi_Cmnd * SCpnt, Scsi_Request * SRpnt)
878 struct Scsi_Host *host = SCpnt->host;
880 ASSERT_LOCK(&io_request_lock, 0);
882 SCpnt->owner = SCSI_OWNER_MIDLEVEL;
883 SRpnt->sr_command = SCpnt;
885 if (!host) {
886 panic("Invalid or not present host.\n");
889 SCpnt->cmd_len = SRpnt->sr_cmd_len;
890 SCpnt->use_sg = SRpnt->sr_use_sg;
892 memcpy((void *) &SCpnt->request, (const void *) &SRpnt->sr_request,
893 sizeof(SRpnt->sr_request));
894 memcpy((void *) SCpnt->data_cmnd, (const void *) SRpnt->sr_cmnd,
895 sizeof(SCpnt->data_cmnd));
896 SCpnt->reset_chain = NULL;
897 SCpnt->serial_number = 0;
898 SCpnt->serial_number_at_timeout = 0;
899 SCpnt->bufflen = SRpnt->sr_bufflen;
900 SCpnt->buffer = SRpnt->sr_buffer;
901 SCpnt->flags = 0;
902 SCpnt->retries = 0;
903 SCpnt->allowed = SRpnt->sr_allowed;
904 SCpnt->done = SRpnt->sr_done;
905 SCpnt->timeout_per_command = SRpnt->sr_timeout_per_command;
907 SCpnt->sc_data_direction = SRpnt->sr_data_direction;
909 SCpnt->sglist_len = SRpnt->sr_sglist_len;
910 SCpnt->underflow = SRpnt->sr_underflow;
912 SCpnt->sc_request = SRpnt;
914 memcpy((void *) SCpnt->cmnd, (const void *) SRpnt->sr_cmnd,
915 sizeof(SCpnt->cmnd));
916 /* Zero the sense buffer. Some host adapters automatically request
917 * sense on error. 0 is not a valid sense code.
919 memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
920 SCpnt->request_buffer = SRpnt->sr_buffer;
921 SCpnt->request_bufflen = SRpnt->sr_bufflen;
922 SCpnt->old_use_sg = SCpnt->use_sg;
923 if (SCpnt->cmd_len == 0)
924 SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
925 SCpnt->old_cmd_len = SCpnt->cmd_len;
926 SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
927 SCpnt->old_underflow = SCpnt->underflow;
929 /* Start the timer ticking. */
931 SCpnt->internal_timeout = NORMAL_TIMEOUT;
932 SCpnt->abort_reason = 0;
933 SCpnt->result = 0;
935 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_cmd()\n"));
939 * Function: scsi_do_cmd
941 * Purpose: Queue a SCSI command
943 * Arguments: SCpnt - command descriptor.
944 * cmnd - actual SCSI command to be performed.
945 * buffer - data buffer.
946 * bufflen - size of data buffer.
947 * done - completion function to be run.
948 * timeout - how long to let it run before timeout.
949 * retries - number of retries we allow.
951 * Lock status: With the new queueing code, this is SMP-safe, and no locks
952 * need be held upon entry. The old queueing code the lock was
953 * assumed to be held upon entry.
955 * Returns: Nothing.
957 * Notes: Prior to the new queue code, this function was not SMP-safe.
958 * Also, this function is now only used for queueing requests
959 * for things like ioctls and character device requests - this
960 * is because we essentially just inject a request into the
961 * queue for the device. Normal block device handling manipulates
962 * the queue directly.
964 void scsi_do_cmd(Scsi_Cmnd * SCpnt, const void *cmnd,
965 void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
966 int timeout, int retries)
968 struct Scsi_Host *host = SCpnt->host;
970 ASSERT_LOCK(&io_request_lock, 0);
972 SCpnt->owner = SCSI_OWNER_MIDLEVEL;
974 SCSI_LOG_MLQUEUE(4,
976 int i;
977 int target = SCpnt->target;
978 printk("scsi_do_cmd (host = %d, channel = %d target = %d, "
979 "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
980 "retries = %d)\n"
981 "command : ", host->host_no, SCpnt->channel, target, buffer,
982 bufflen, done, timeout, retries);
983 for (i = 0; i < 10; ++i)
984 printk("%02x ", ((unsigned char *) cmnd)[i]);
985 printk("\n");
988 if (!host) {
989 panic("Invalid or not present host.\n");
992 * We must prevent reentrancy to the lowlevel host driver. This prevents
993 * it - we enter a loop until the host we want to talk to is not busy.
994 * Race conditions are prevented, as interrupts are disabled in between the
995 * time we check for the host being not busy, and the time we mark it busy
996 * ourselves.
1001 * Our own function scsi_done (which marks the host as not busy, disables
1002 * the timeout counter, etc) will be called by us or by the
1003 * scsi_hosts[host].queuecommand() function needs to also call
1004 * the completion function for the high level driver.
1007 memcpy((void *) SCpnt->data_cmnd, (const void *) cmnd,
1008 sizeof(SCpnt->data_cmnd));
1009 SCpnt->reset_chain = NULL;
1010 SCpnt->serial_number = 0;
1011 SCpnt->serial_number_at_timeout = 0;
1012 SCpnt->bufflen = bufflen;
1013 SCpnt->buffer = buffer;
1014 SCpnt->flags = 0;
1015 SCpnt->retries = 0;
1016 SCpnt->allowed = retries;
1017 SCpnt->done = done;
1018 SCpnt->timeout_per_command = timeout;
1020 memcpy((void *) SCpnt->cmnd, (const void *) cmnd,
1021 sizeof(SCpnt->cmnd));
1022 /* Zero the sense buffer. Some host adapters automatically request
1023 * sense on error. 0 is not a valid sense code.
1025 memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
1026 SCpnt->request_buffer = buffer;
1027 SCpnt->request_bufflen = bufflen;
1028 SCpnt->old_use_sg = SCpnt->use_sg;
1029 if (SCpnt->cmd_len == 0)
1030 SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
1031 SCpnt->old_cmd_len = SCpnt->cmd_len;
1032 SCpnt->sc_old_data_direction = SCpnt->sc_data_direction;
1033 SCpnt->old_underflow = SCpnt->underflow;
1035 /* Start the timer ticking. */
1037 SCpnt->internal_timeout = NORMAL_TIMEOUT;
1038 SCpnt->abort_reason = 0;
1039 SCpnt->result = 0;
1042 * At this point, we merely set up the command, stick it in the normal
1043 * request queue, and return. Eventually that request will come to the
1044 * top of the list, and will be dispatched.
1046 scsi_insert_special_cmd(SCpnt, 0);
1048 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_cmd()\n"));
1052 * This function is the mid-level interrupt routine, which decides how
1053 * to handle error conditions. Each invocation of this function must
1054 * do one and *only* one of the following:
1056 * 1) Insert command in BH queue.
1057 * 2) Activate error handler for host.
1059 * FIXME(eric) - I am concerned about stack overflow (still). An
1060 * interrupt could come while we are processing the bottom queue,
1061 * which would cause another command to be stuffed onto the bottom
1062 * queue, and it would in turn be processed as that interrupt handler
1063 * is returning. Given a sufficiently steady rate of returning
1064 * commands, this could cause the stack to overflow. I am not sure
1065 * what is the most appropriate solution here - we should probably
1066 * keep a depth count, and not process any commands while we still
1067 * have a bottom handler active higher in the stack.
1069 * There is currently code in the bottom half handler to monitor
1070 * recursion in the bottom handler and report if it ever happens. If
1071 * this becomes a problem, it won't be hard to engineer something to
1072 * deal with it so that only the outer layer ever does any real
1073 * processing.
1075 void scsi_done(Scsi_Cmnd * SCpnt)
1077 unsigned long flags;
1078 int tstatus;
1081 * We don't have to worry about this one timing out any more.
1083 tstatus = scsi_delete_timer(SCpnt);
1086 * If we are unable to remove the timer, it means that the command
1087 * has already timed out. In this case, we have no choice but to
1088 * let the timeout function run, as we have no idea where in fact
1089 * that function could really be. It might be on another processor,
1090 * etc, etc.
1092 if (!tstatus) {
1093 SCpnt->done_late = 1;
1094 return;
1096 /* Set the serial numbers back to zero */
1097 SCpnt->serial_number = 0;
1100 * First, see whether this command already timed out. If so, we ignore
1101 * the response. We treat it as if the command never finished.
1103 * Since serial_number is now 0, the error handler cound detect this
1104 * situation and avoid to call the the low level driver abort routine.
1105 * (DB)
1107 * FIXME(eric) - I believe that this test is now redundant, due to
1108 * the test of the return status of del_timer().
1110 if (SCpnt->state == SCSI_STATE_TIMEOUT) {
1111 SCSI_LOG_MLCOMPLETE(1, printk("Ignoring completion of %p due to timeout status", SCpnt));
1112 return;
1114 spin_lock_irqsave(&scsi_bhqueue_lock, flags);
1116 SCpnt->serial_number_at_timeout = 0;
1117 SCpnt->state = SCSI_STATE_BHQUEUE;
1118 SCpnt->owner = SCSI_OWNER_BH_HANDLER;
1119 SCpnt->bh_next = NULL;
1122 * Next, put this command in the BH queue.
1124 * We need a spinlock here, or compare and exchange if we can reorder incoming
1125 * Scsi_Cmnds, as it happens pretty often scsi_done is called multiple times
1126 * before bh is serviced. -jj
1128 * We already have the io_request_lock here, since we are called from the
1129 * interrupt handler or the error handler. (DB)
1131 * This may be true at the moment, but I would like to wean all of the low
1132 * level drivers away from using io_request_lock. Technically they should
1133 * all use their own locking. I am adding a small spinlock to protect
1134 * this datastructure to make it safe for that day. (ERY)
1136 if (!scsi_bh_queue_head) {
1137 scsi_bh_queue_head = SCpnt;
1138 scsi_bh_queue_tail = SCpnt;
1139 } else {
1140 scsi_bh_queue_tail->bh_next = SCpnt;
1141 scsi_bh_queue_tail = SCpnt;
1144 spin_unlock_irqrestore(&scsi_bhqueue_lock, flags);
1146 * Mark the bottom half handler to be run.
1148 mark_bh(SCSI_BH);
1152 * Procedure: scsi_bottom_half_handler
1154 * Purpose: Called after we have finished processing interrupts, it
1155 * performs post-interrupt handling for commands that may
1156 * have completed.
1158 * Notes: This is called with all interrupts enabled. This should reduce
1159 * interrupt latency, stack depth, and reentrancy of the low-level
1160 * drivers.
1162 * The io_request_lock is required in all the routine. There was a subtle
1163 * race condition when scsi_done is called after a command has already
1164 * timed out but before the time out is processed by the error handler.
1165 * (DB)
1167 * I believe I have corrected this. We simply monitor the return status of
1168 * del_timer() - if this comes back as 0, it means that the timer has fired
1169 * and that a timeout is in progress. I have modified scsi_done() such
1170 * that in this instance the command is never inserted in the bottom
1171 * half queue. Thus the only time we hold the lock here is when
1172 * we wish to atomically remove the contents of the queue.
1174 void scsi_bottom_half_handler(void)
1176 Scsi_Cmnd *SCpnt;
1177 Scsi_Cmnd *SCnext;
1178 unsigned long flags;
1181 while (1 == 1) {
1182 spin_lock_irqsave(&scsi_bhqueue_lock, flags);
1183 SCpnt = scsi_bh_queue_head;
1184 scsi_bh_queue_head = NULL;
1185 spin_unlock_irqrestore(&scsi_bhqueue_lock, flags);
1187 if (SCpnt == NULL) {
1188 return;
1190 SCnext = SCpnt->bh_next;
1192 for (; SCpnt; SCpnt = SCnext) {
1193 SCnext = SCpnt->bh_next;
1195 switch (scsi_decide_disposition(SCpnt)) {
1196 case SUCCESS:
1198 * Add to BH queue.
1200 SCSI_LOG_MLCOMPLETE(3, printk("Command finished %d %d 0x%x\n", SCpnt->host->host_busy,
1201 SCpnt->host->host_failed,
1202 SCpnt->result));
1204 scsi_finish_command(SCpnt);
1205 break;
1206 case NEEDS_RETRY:
1208 * We only come in here if we want to retry a command. The
1209 * test to see whether the command should be retried should be
1210 * keeping track of the number of tries, so we don't end up looping,
1211 * of course.
1213 SCSI_LOG_MLCOMPLETE(3, printk("Command needs retry %d %d 0x%x\n", SCpnt->host->host_busy,
1214 SCpnt->host->host_failed, SCpnt->result));
1216 scsi_retry_command(SCpnt);
1217 break;
1218 case ADD_TO_MLQUEUE:
1220 * This typically happens for a QUEUE_FULL message -
1221 * typically only when the queue depth is only
1222 * approximate for a given device. Adding a command
1223 * to the queue for the device will prevent further commands
1224 * from being sent to the device, so we shouldn't end up
1225 * with tons of things being sent down that shouldn't be.
1227 SCSI_LOG_MLCOMPLETE(3, printk("Command rejected as device queue full, put on ml queue %p\n",
1228 SCpnt));
1229 scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_DEVICE_BUSY);
1230 break;
1231 default:
1233 * Here we have a fatal error of some sort. Turn it over to
1234 * the error handler.
1236 SCSI_LOG_MLCOMPLETE(3, printk("Command failed %p %x active=%d busy=%d failed=%d\n",
1237 SCpnt, SCpnt->result,
1238 atomic_read(&SCpnt->host->host_active),
1239 SCpnt->host->host_busy,
1240 SCpnt->host->host_failed));
1243 * Dump the sense information too.
1245 if ((status_byte(SCpnt->result) & CHECK_CONDITION) != 0) {
1246 SCSI_LOG_MLCOMPLETE(3, print_sense("bh", SCpnt));
1248 if (SCpnt->host->eh_wait != NULL) {
1249 SCpnt->host->host_failed++;
1250 SCpnt->owner = SCSI_OWNER_ERROR_HANDLER;
1251 SCpnt->state = SCSI_STATE_FAILED;
1252 SCpnt->host->in_recovery = 1;
1254 * If the host is having troubles, then look to see if this was the last
1255 * command that might have failed. If so, wake up the error handler.
1257 if (SCpnt->host->host_busy == SCpnt->host->host_failed) {
1258 SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
1259 atomic_read(&SCpnt->host->eh_wait->count)));
1260 up(SCpnt->host->eh_wait);
1262 } else {
1264 * We only get here if the error recovery thread has died.
1266 scsi_finish_command(SCpnt);
1269 } /* for(; SCpnt...) */
1271 } /* while(1==1) */
1276 * Function: scsi_retry_command
1278 * Purpose: Send a command back to the low level to be retried.
1280 * Notes: This command is always executed in the context of the
1281 * bottom half handler, or the error handler thread. Low
1282 * level drivers should not become re-entrant as a result of
1283 * this.
1285 int scsi_retry_command(Scsi_Cmnd * SCpnt)
1287 memcpy((void *) SCpnt->cmnd, (void *) SCpnt->data_cmnd,
1288 sizeof(SCpnt->data_cmnd));
1289 SCpnt->request_buffer = SCpnt->buffer;
1290 SCpnt->request_bufflen = SCpnt->bufflen;
1291 SCpnt->use_sg = SCpnt->old_use_sg;
1292 SCpnt->cmd_len = SCpnt->old_cmd_len;
1293 SCpnt->sc_data_direction = SCpnt->sc_old_data_direction;
1294 SCpnt->underflow = SCpnt->old_underflow;
1297 * Zero the sense information from the last time we tried
1298 * this command.
1300 memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
1302 return scsi_dispatch_cmd(SCpnt);
1306 * Function: scsi_finish_command
1308 * Purpose: Pass command off to upper layer for finishing of I/O
1309 * request, waking processes that are waiting on results,
1310 * etc.
1312 void scsi_finish_command(Scsi_Cmnd * SCpnt)
1314 struct Scsi_Host *host;
1315 Scsi_Device *device;
1316 Scsi_Request * SRpnt;
1317 unsigned long flags;
1319 ASSERT_LOCK(&io_request_lock, 0);
1321 host = SCpnt->host;
1322 device = SCpnt->device;
1325 * We need to protect the decrement, as otherwise a race condition
1326 * would exist. Fiddling with SCpnt isn't a problem as the
1327 * design only allows a single SCpnt to be active in only
1328 * one execution context, but the device and host structures are
1329 * shared.
1331 spin_lock_irqsave(&io_request_lock, flags);
1332 host->host_busy--; /* Indicate that we are free */
1333 device->device_busy--; /* Decrement device usage counter. */
1334 spin_unlock_irqrestore(&io_request_lock, flags);
1337 * Clear the flags which say that the device/host is no longer
1338 * capable of accepting new commands. These are set in scsi_queue.c
1339 * for both the queue full condition on a device, and for a
1340 * host full condition on the host.
1342 host->host_blocked = FALSE;
1343 device->device_blocked = FALSE;
1346 * If we have valid sense information, then some kind of recovery
1347 * must have taken place. Make a note of this.
1349 if (scsi_sense_valid(SCpnt)) {
1350 SCpnt->result |= (DRIVER_SENSE << 24);
1352 SCSI_LOG_MLCOMPLETE(3, printk("Notifying upper driver of completion for device %d %x\n",
1353 SCpnt->device->id, SCpnt->result));
1355 SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
1356 SCpnt->state = SCSI_STATE_FINISHED;
1358 /* We can get here with use_sg=0, causing a panic in the upper level (DB) */
1359 SCpnt->use_sg = SCpnt->old_use_sg;
1362 * If there is an associated request structure, copy the data over before we call the
1363 * completion function.
1365 SRpnt = SCpnt->sc_request;
1366 if( SRpnt != NULL ) {
1367 SRpnt->sr_result = SRpnt->sr_command->result;
1368 if( SRpnt->sr_result != 0 ) {
1369 memcpy(SRpnt->sr_sense_buffer,
1370 SRpnt->sr_command->sense_buffer,
1371 sizeof(SRpnt->sr_sense_buffer));
1375 SCpnt->done(SCpnt);
1378 static int scsi_register_host(Scsi_Host_Template *);
1379 static void scsi_unregister_host(Scsi_Host_Template *);
1382 * Function: scsi_release_commandblocks()
1384 * Purpose: Release command blocks associated with a device.
1386 * Arguments: SDpnt - device
1388 * Returns: Nothing
1390 * Lock status: No locking assumed or required.
1392 * Notes:
1394 void scsi_release_commandblocks(Scsi_Device * SDpnt)
1396 Scsi_Cmnd *SCpnt, *SCnext;
1397 unsigned long flags;
1399 spin_lock_irqsave(&device_request_lock, flags);
1400 for (SCpnt = SDpnt->device_queue; SCpnt; SCpnt = SCnext) {
1401 SDpnt->device_queue = SCnext = SCpnt->next;
1402 kfree((char *) SCpnt);
1404 SDpnt->has_cmdblocks = 0;
1405 SDpnt->queue_depth = 0;
1406 spin_unlock_irqrestore(&device_request_lock, flags);
1410 * Function: scsi_build_commandblocks()
1412 * Purpose: Allocate command blocks associated with a device.
1414 * Arguments: SDpnt - device
1416 * Returns: Nothing
1418 * Lock status: No locking assumed or required.
1420 * Notes:
1422 void scsi_build_commandblocks(Scsi_Device * SDpnt)
1424 unsigned long flags;
1425 struct Scsi_Host *host = SDpnt->host;
1426 int j;
1427 Scsi_Cmnd *SCpnt;
1429 spin_lock_irqsave(&device_request_lock, flags);
1431 if (SDpnt->queue_depth == 0)
1433 SDpnt->queue_depth = host->cmd_per_lun;
1434 if (SDpnt->queue_depth == 0)
1435 SDpnt->queue_depth = 1; /* live to fight another day */
1437 SDpnt->device_queue = NULL;
1439 for (j = 0; j < SDpnt->queue_depth; j++) {
1440 SCpnt = (Scsi_Cmnd *)
1441 kmalloc(sizeof(Scsi_Cmnd),
1442 GFP_ATOMIC |
1443 (host->unchecked_isa_dma ? GFP_DMA : 0));
1444 if (NULL == SCpnt)
1445 break; /* If not, the next line will oops ... */
1446 memset(SCpnt, 0, sizeof(Scsi_Cmnd));
1447 SCpnt->host = host;
1448 SCpnt->device = SDpnt;
1449 SCpnt->target = SDpnt->id;
1450 SCpnt->lun = SDpnt->lun;
1451 SCpnt->channel = SDpnt->channel;
1452 SCpnt->request.rq_status = RQ_INACTIVE;
1453 SCpnt->use_sg = 0;
1454 SCpnt->old_use_sg = 0;
1455 SCpnt->old_cmd_len = 0;
1456 SCpnt->underflow = 0;
1457 SCpnt->old_underflow = 0;
1458 SCpnt->transfersize = 0;
1459 SCpnt->resid = 0;
1460 SCpnt->serial_number = 0;
1461 SCpnt->serial_number_at_timeout = 0;
1462 SCpnt->host_scribble = NULL;
1463 SCpnt->next = SDpnt->device_queue;
1464 SDpnt->device_queue = SCpnt;
1465 SCpnt->state = SCSI_STATE_UNUSED;
1466 SCpnt->owner = SCSI_OWNER_NOBODY;
1468 if (j < SDpnt->queue_depth) { /* low on space (D.Gilbert 990424) */
1469 printk(KERN_WARNING "scsi_build_commandblocks: want=%d, space for=%d blocks\n",
1470 SDpnt->queue_depth, j);
1471 SDpnt->queue_depth = j;
1472 SDpnt->has_cmdblocks = (0 != j);
1473 } else {
1474 SDpnt->has_cmdblocks = 1;
1476 spin_unlock_irqrestore(&device_request_lock, flags);
1479 static int proc_scsi_gen_write(struct file * file, const char * buf,
1480 unsigned long length, void *data);
1482 void __init scsi_host_no_insert(char *str, int n)
1484 Scsi_Host_Name *shn, *shn2;
1485 int len;
1487 len = strlen(str);
1488 if (len && (shn = (Scsi_Host_Name *) kmalloc(sizeof(Scsi_Host_Name), GFP_ATOMIC))) {
1489 if ((shn->name = kmalloc(len+1, GFP_ATOMIC))) {
1490 strncpy(shn->name, str, len);
1491 shn->name[len] = 0;
1492 shn->host_no = n;
1493 shn->host_registered = 0;
1494 shn->loaded_as_module = 1; /* numbers shouldn't be freed in any case */
1495 shn->next = NULL;
1496 if (scsi_host_no_list) {
1497 for (shn2 = scsi_host_no_list;shn2->next;shn2 = shn2->next)
1499 shn2->next = shn;
1501 else
1502 scsi_host_no_list = shn;
1503 max_scsi_hosts = n+1;
1505 else
1506 kfree((char *) shn);
1510 #ifdef CONFIG_PROC_FS
1511 static int scsi_proc_info(char *buffer, char **start, off_t offset, int length)
1513 Scsi_Device *scd;
1514 struct Scsi_Host *HBA_ptr;
1515 int size, len = 0;
1516 off_t begin = 0;
1517 off_t pos = 0;
1520 * First, see if there are any attached devices or not.
1522 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1523 if (HBA_ptr->host_queue != NULL) {
1524 break;
1527 size = sprintf(buffer + len, "Attached devices: %s\n", (HBA_ptr) ? "" : "none");
1528 len += size;
1529 pos = begin + len;
1530 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1531 #if 0
1532 size += sprintf(buffer + len, "scsi%2d: %s\n", (int) HBA_ptr->host_no,
1533 HBA_ptr->hostt->procname);
1534 len += size;
1535 pos = begin + len;
1536 #endif
1537 for (scd = HBA_ptr->host_queue; scd; scd = scd->next) {
1538 proc_print_scsidevice(scd, buffer, &size, len);
1539 len += size;
1540 pos = begin + len;
1542 if (pos < offset) {
1543 len = 0;
1544 begin = pos;
1546 if (pos > offset + length)
1547 goto stop_output;
1551 stop_output:
1552 *start = buffer + (offset - begin); /* Start of wanted data */
1553 len -= (offset - begin); /* Start slop */
1554 if (len > length)
1555 len = length; /* Ending slop */
1556 return (len);
1559 static int proc_scsi_gen_write(struct file * file, const char * buf,
1560 unsigned long length, void *data)
1562 struct Scsi_Device_Template *SDTpnt;
1563 Scsi_Device *scd;
1564 struct Scsi_Host *HBA_ptr;
1565 char *p;
1566 int host, channel, id, lun;
1567 char * buffer;
1568 int err;
1570 if (!buf || length>PAGE_SIZE)
1571 return -EINVAL;
1573 if (!(buffer = (char *) __get_free_page(GFP_KERNEL)))
1574 return -ENOMEM;
1575 copy_from_user(buffer, buf, length);
1577 err = -EINVAL;
1578 if (length < 11 || strncmp("scsi", buffer, 4))
1579 goto out;
1582 * Usage: echo "scsi dump #N" > /proc/scsi/scsi
1583 * to dump status of all scsi commands. The number is used to specify the level
1584 * of detail in the dump.
1586 if (!strncmp("dump", buffer + 5, 4)) {
1587 unsigned int level;
1589 p = buffer + 10;
1591 if (*p == '\0')
1592 goto out;
1594 level = simple_strtoul(p, NULL, 0);
1595 scsi_dump_status(level);
1598 * Usage: echo "scsi log token #N" > /proc/scsi/scsi
1599 * where token is one of [error,scan,mlqueue,mlcomplete,llqueue,
1600 * llcomplete,hlqueue,hlcomplete]
1602 #ifdef CONFIG_SCSI_LOGGING /* { */
1604 if (!strncmp("log", buffer + 5, 3)) {
1605 char *token;
1606 unsigned int level;
1608 p = buffer + 9;
1609 token = p;
1610 while (*p != ' ' && *p != '\t' && *p != '\0') {
1611 p++;
1614 if (*p == '\0') {
1615 if (strncmp(token, "all", 3) == 0) {
1617 * Turn on absolutely everything.
1619 scsi_logging_level = ~0;
1620 } else if (strncmp(token, "none", 4) == 0) {
1622 * Turn off absolutely everything.
1624 scsi_logging_level = 0;
1625 } else {
1626 goto out;
1628 } else {
1629 *p++ = '\0';
1631 level = simple_strtoul(p, NULL, 0);
1634 * Now figure out what to do with it.
1636 if (strcmp(token, "error") == 0) {
1637 SCSI_SET_ERROR_RECOVERY_LOGGING(level);
1638 } else if (strcmp(token, "timeout") == 0) {
1639 SCSI_SET_TIMEOUT_LOGGING(level);
1640 } else if (strcmp(token, "scan") == 0) {
1641 SCSI_SET_SCAN_BUS_LOGGING(level);
1642 } else if (strcmp(token, "mlqueue") == 0) {
1643 SCSI_SET_MLQUEUE_LOGGING(level);
1644 } else if (strcmp(token, "mlcomplete") == 0) {
1645 SCSI_SET_MLCOMPLETE_LOGGING(level);
1646 } else if (strcmp(token, "llqueue") == 0) {
1647 SCSI_SET_LLQUEUE_LOGGING(level);
1648 } else if (strcmp(token, "llcomplete") == 0) {
1649 SCSI_SET_LLCOMPLETE_LOGGING(level);
1650 } else if (strcmp(token, "hlqueue") == 0) {
1651 SCSI_SET_HLQUEUE_LOGGING(level);
1652 } else if (strcmp(token, "hlcomplete") == 0) {
1653 SCSI_SET_HLCOMPLETE_LOGGING(level);
1654 } else if (strcmp(token, "ioctl") == 0) {
1655 SCSI_SET_IOCTL_LOGGING(level);
1656 } else {
1657 goto out;
1661 printk(KERN_INFO "scsi logging level set to 0x%8.8x\n", scsi_logging_level);
1663 #endif /* CONFIG_SCSI_LOGGING */ /* } */
1666 * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi
1667 * with "0 1 2 3" replaced by your "Host Channel Id Lun".
1668 * Consider this feature BETA.
1669 * CAUTION: This is not for hotplugging your peripherals. As
1670 * SCSI was not designed for this you could damage your
1671 * hardware !
1672 * However perhaps it is legal to switch on an
1673 * already connected device. It is perhaps not
1674 * guaranteed this device doesn't corrupt an ongoing data transfer.
1676 if (!strncmp("add-single-device", buffer + 5, 17)) {
1677 p = buffer + 23;
1679 host = simple_strtoul(p, &p, 0);
1680 channel = simple_strtoul(p + 1, &p, 0);
1681 id = simple_strtoul(p + 1, &p, 0);
1682 lun = simple_strtoul(p + 1, &p, 0);
1684 printk(KERN_INFO "scsi singledevice %d %d %d %d\n", host, channel,
1685 id, lun);
1687 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1688 if (HBA_ptr->host_no == host) {
1689 break;
1692 err = -ENXIO;
1693 if (!HBA_ptr)
1694 goto out;
1696 for (scd = HBA_ptr->host_queue; scd; scd = scd->next) {
1697 if ((scd->channel == channel
1698 && scd->id == id
1699 && scd->lun == lun)) {
1700 break;
1704 err = -ENOSYS;
1705 if (scd)
1706 goto out; /* We do not yet support unplugging */
1708 scan_scsis(HBA_ptr, 1, channel, id, lun);
1710 /* FIXME (DB) This assumes that the queue_depth routines can be used
1711 in this context as well, while they were all designed to be
1712 called only once after the detect routine. (DB) */
1713 /* queue_depth routine moved to inside scan_scsis(,1,,,) so
1714 it is called before build_commandblocks() */
1716 err = length;
1717 goto out;
1720 * Usage: echo "scsi remove-single-device 0 1 2 3" >/proc/scsi/scsi
1721 * with "0 1 2 3" replaced by your "Host Channel Id Lun".
1723 * Consider this feature pre-BETA.
1725 * CAUTION: This is not for hotplugging your peripherals. As
1726 * SCSI was not designed for this you could damage your
1727 * hardware and thoroughly confuse the SCSI subsystem.
1730 else if (!strncmp("remove-single-device", buffer + 5, 20)) {
1731 p = buffer + 26;
1733 host = simple_strtoul(p, &p, 0);
1734 channel = simple_strtoul(p + 1, &p, 0);
1735 id = simple_strtoul(p + 1, &p, 0);
1736 lun = simple_strtoul(p + 1, &p, 0);
1739 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
1740 if (HBA_ptr->host_no == host) {
1741 break;
1744 err = -ENODEV;
1745 if (!HBA_ptr)
1746 goto out;
1748 for (scd = HBA_ptr->host_queue; scd; scd = scd->next) {
1749 if ((scd->channel == channel
1750 && scd->id == id
1751 && scd->lun == lun)) {
1752 break;
1756 if (scd == NULL)
1757 goto out; /* there is no such device attached */
1759 err = -EBUSY;
1760 if (scd->access_count)
1761 goto out;
1763 SDTpnt = scsi_devicelist;
1764 while (SDTpnt != NULL) {
1765 if (SDTpnt->detach)
1766 (*SDTpnt->detach) (scd);
1767 SDTpnt = SDTpnt->next;
1770 if (scd->attached == 0) {
1772 * Nobody is using this device any more.
1773 * Free all of the command structures.
1775 if (HBA_ptr->hostt->revoke)
1776 HBA_ptr->hostt->revoke(scd);
1777 devfs_unregister (scd->de);
1778 scsi_release_commandblocks(scd);
1780 /* Now we can remove the device structure */
1781 if (scd->next != NULL)
1782 scd->next->prev = scd->prev;
1784 if (scd->prev != NULL)
1785 scd->prev->next = scd->next;
1787 if (HBA_ptr->host_queue == scd) {
1788 HBA_ptr->host_queue = scd->next;
1790 blk_cleanup_queue(&scd->request_queue);
1791 kfree((char *) scd);
1792 } else {
1793 goto out;
1795 err = 0;
1797 out:
1799 free_page((unsigned long) buffer);
1800 return err;
1802 #endif
1805 * This entry point should be called by a driver if it is trying
1806 * to add a low level scsi driver to the system.
1808 static int scsi_register_host(Scsi_Host_Template * tpnt)
1810 int pcount;
1811 struct Scsi_Host *shpnt;
1812 Scsi_Device *SDpnt;
1813 struct Scsi_Device_Template *sdtpnt;
1814 const char *name;
1815 unsigned long flags;
1816 int out_of_space = 0;
1818 if (tpnt->next || !tpnt->detect)
1819 return 1; /* Must be already loaded, or
1820 * no detect routine available
1822 pcount = next_scsi_host;
1824 /* The detect routine must carefully spinunlock/spinlock if
1825 it enables interrupts, since all interrupt handlers do
1826 spinlock as well.
1827 All lame drivers are going to fail due to the following
1828 spinlock. For the time beeing let's use it only for drivers
1829 using the new scsi code. NOTE: the detect routine could
1830 redefine the value tpnt->use_new_eh_code. (DB, 13 May 1998) */
1832 if (tpnt->use_new_eh_code) {
1833 spin_lock_irqsave(&io_request_lock, flags);
1834 tpnt->present = tpnt->detect(tpnt);
1835 spin_unlock_irqrestore(&io_request_lock, flags);
1836 } else
1837 tpnt->present = tpnt->detect(tpnt);
1839 if (tpnt->present) {
1840 if (pcount == next_scsi_host) {
1841 if (tpnt->present > 1) {
1842 printk(KERN_ERR "scsi: Failure to register low-level scsi driver");
1843 scsi_unregister_host(tpnt);
1844 return 1;
1847 * The low-level driver failed to register a driver.
1848 * We can do this now.
1850 if(scsi_register(tpnt, 0)==NULL)
1852 printk(KERN_ERR "scsi: register failed.\n");
1853 scsi_unregister_host(tpnt);
1854 return 1;
1857 tpnt->next = scsi_hosts; /* Add to the linked list */
1858 scsi_hosts = tpnt;
1860 /* Add the new driver to /proc/scsi */
1861 #ifdef CONFIG_PROC_FS
1862 build_proc_dir_entries(tpnt);
1863 #endif
1867 * Add the kernel threads for each host adapter that will
1868 * handle error correction.
1870 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
1871 if (shpnt->hostt == tpnt && shpnt->hostt->use_new_eh_code) {
1872 DECLARE_MUTEX_LOCKED(sem);
1874 shpnt->eh_notify = &sem;
1875 kernel_thread((int (*)(void *)) scsi_error_handler,
1876 (void *) shpnt, 0);
1879 * Now wait for the kernel error thread to initialize itself
1880 * as it might be needed when we scan the bus.
1882 down(&sem);
1883 shpnt->eh_notify = NULL;
1887 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
1888 if (shpnt->hostt == tpnt) {
1889 if (tpnt->info) {
1890 name = tpnt->info(shpnt);
1891 } else {
1892 name = tpnt->name;
1894 printk(KERN_INFO "scsi%d : %s\n", /* And print a little message */
1895 shpnt->host_no, name);
1899 /* The next step is to call scan_scsis here. This generates the
1900 * Scsi_Devices entries
1902 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
1903 if (shpnt->hostt == tpnt) {
1904 scan_scsis(shpnt, 0, 0, 0, 0);
1905 if (shpnt->select_queue_depths != NULL) {
1906 (shpnt->select_queue_depths) (shpnt, shpnt->host_queue);
1911 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
1912 if (sdtpnt->init && sdtpnt->dev_noticed)
1913 (*sdtpnt->init) ();
1917 * Next we create the Scsi_Cmnd structures for this host
1919 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
1920 for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next)
1921 if (SDpnt->host->hostt == tpnt) {
1922 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
1923 if (sdtpnt->attach)
1924 (*sdtpnt->attach) (SDpnt);
1925 if (SDpnt->attached) {
1926 scsi_build_commandblocks(SDpnt);
1927 if (0 == SDpnt->has_cmdblocks)
1928 out_of_space = 1;
1934 * Now that we have all of the devices, resize the DMA pool,
1935 * as required. */
1936 if (!out_of_space)
1937 scsi_resize_dma_pool();
1940 /* This does any final handling that is required. */
1941 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
1942 if (sdtpnt->finish && sdtpnt->nr_dev) {
1943 (*sdtpnt->finish) ();
1947 #if defined(USE_STATIC_SCSI_MEMORY)
1948 printk("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
1949 (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
1950 (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
1951 (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
1952 #endif
1954 MOD_INC_USE_COUNT;
1956 if (out_of_space) {
1957 scsi_unregister_host(tpnt); /* easiest way to clean up?? */
1958 return 1;
1959 } else
1960 return 0;
1964 * Similarly, this entry point should be called by a loadable module if it
1965 * is trying to remove a low level scsi driver from the system.
1967 * Note - there is a fatal flaw in the deregister module function.
1968 * There is no way to return a code that says 'I cannot be unloaded now'.
1969 * The system relies entirely upon usage counts that are maintained,
1970 * and the assumption is that if the usage count is 0, then the module
1971 * can be unloaded.
1973 static void scsi_unregister_host(Scsi_Host_Template * tpnt)
1975 int online_status;
1976 int pcount0, pcount;
1977 Scsi_Cmnd *SCpnt;
1978 Scsi_Device *SDpnt;
1979 Scsi_Device *SDpnt1;
1980 struct Scsi_Device_Template *sdtpnt;
1981 struct Scsi_Host *sh1;
1982 struct Scsi_Host *shpnt;
1983 char name[10]; /* host_no>=10^9? I don't think so. */
1986 * First verify that this host adapter is completely free with no pending
1987 * commands
1989 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
1990 for (SDpnt = shpnt->host_queue; SDpnt;
1991 SDpnt = SDpnt->next) {
1992 if (SDpnt->host->hostt == tpnt
1993 && SDpnt->host->hostt->module
1994 && GET_USE_COUNT(SDpnt->host->hostt->module))
1995 return;
1997 * FIXME(eric) - We need to find a way to notify the
1998 * low level driver that we are shutting down - via the
1999 * special device entry that still needs to get added.
2001 * Is detach interface below good enough for this?
2007 * FIXME(eric) put a spinlock on this. We force all of the devices offline
2008 * to help prevent race conditions where other hosts/processors could try and
2009 * get in and queue a command.
2011 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2012 for (SDpnt = shpnt->host_queue; SDpnt;
2013 SDpnt = SDpnt->next) {
2014 if (SDpnt->host->hostt == tpnt)
2015 SDpnt->online = FALSE;
2020 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2021 if (shpnt->hostt != tpnt) {
2022 continue;
2024 for (SDpnt = shpnt->host_queue; SDpnt;
2025 SDpnt = SDpnt->next) {
2027 * Loop over all of the commands associated with the device. If any of
2028 * them are busy, then set the state back to inactive and bail.
2030 for (SCpnt = SDpnt->device_queue; SCpnt;
2031 SCpnt = SCpnt->next) {
2032 online_status = SDpnt->online;
2033 SDpnt->online = FALSE;
2034 if (SCpnt->request.rq_status != RQ_INACTIVE) {
2035 printk(KERN_ERR "SCSI device not inactive - rq_status=%d, target=%d, pid=%ld, state=%d, owner=%d.\n",
2036 SCpnt->request.rq_status, SCpnt->target, SCpnt->pid,
2037 SCpnt->state, SCpnt->owner);
2038 for (SDpnt1 = shpnt->host_queue; SDpnt1;
2039 SDpnt1 = SDpnt1->next) {
2040 for (SCpnt = SDpnt1->device_queue; SCpnt;
2041 SCpnt = SCpnt->next)
2042 if (SCpnt->request.rq_status == RQ_SCSI_DISCONNECTING)
2043 SCpnt->request.rq_status = RQ_INACTIVE;
2045 SDpnt->online = online_status;
2046 printk(KERN_ERR "Device busy???\n");
2047 return;
2050 * No, this device is really free. Mark it as such, and
2051 * continue on.
2053 SCpnt->state = SCSI_STATE_DISCONNECTING;
2054 SCpnt->request.rq_status = RQ_SCSI_DISCONNECTING; /* Mark as busy */
2058 /* Next we detach the high level drivers from the Scsi_Device structures */
2060 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2061 if (shpnt->hostt != tpnt) {
2062 continue;
2064 for (SDpnt = shpnt->host_queue; SDpnt;
2065 SDpnt = SDpnt->next) {
2066 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2067 if (sdtpnt->detach)
2068 (*sdtpnt->detach) (SDpnt);
2070 /* If something still attached, punt */
2071 if (SDpnt->attached) {
2072 printk(KERN_ERR "Attached usage count = %d\n", SDpnt->attached);
2073 return;
2075 devfs_unregister (SDpnt->de);
2080 * Next, kill the kernel error recovery thread for this host.
2082 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2083 if (shpnt->hostt == tpnt
2084 && shpnt->hostt->use_new_eh_code
2085 && shpnt->ehandler != NULL) {
2086 DECLARE_MUTEX_LOCKED(sem);
2088 shpnt->eh_notify = &sem;
2089 send_sig(SIGHUP, shpnt->ehandler, 1);
2090 down(&sem);
2091 shpnt->eh_notify = NULL;
2095 /* Next we free up the Scsi_Cmnd structures for this host */
2097 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2098 if (shpnt->hostt != tpnt) {
2099 continue;
2101 for (SDpnt = shpnt->host_queue; SDpnt;
2102 SDpnt = shpnt->host_queue) {
2103 scsi_release_commandblocks(SDpnt);
2105 blk_cleanup_queue(&SDpnt->request_queue);
2106 /* Next free up the Scsi_Device structures for this host */
2107 shpnt->host_queue = SDpnt->next;
2108 kfree((char *) SDpnt);
2113 /* Next we go through and remove the instances of the individual hosts
2114 * that were detected */
2116 pcount0 = next_scsi_host;
2117 for (shpnt = scsi_hostlist; shpnt; shpnt = sh1) {
2118 sh1 = shpnt->next;
2119 if (shpnt->hostt != tpnt)
2120 continue;
2121 pcount = next_scsi_host;
2122 /* Remove the /proc/scsi directory entry */
2123 sprintf(name,"%d",shpnt->host_no);
2124 remove_proc_entry(name, tpnt->proc_dir);
2125 if (tpnt->release)
2126 (*tpnt->release) (shpnt);
2127 else {
2128 /* This is the default case for the release function.
2129 * It should do the right thing for most correctly
2130 * written host adapters.
2132 if (shpnt->irq)
2133 free_irq(shpnt->irq, NULL);
2134 if (shpnt->dma_channel != 0xff)
2135 free_dma(shpnt->dma_channel);
2136 if (shpnt->io_port && shpnt->n_io_port)
2137 release_region(shpnt->io_port, shpnt->n_io_port);
2139 if (pcount == next_scsi_host)
2140 scsi_unregister(shpnt);
2141 tpnt->present--;
2145 * If there are absolutely no more hosts left, it is safe
2146 * to completely nuke the DMA pool. The resize operation will
2147 * do the right thing and free everything.
2149 if (!scsi_hosts)
2150 scsi_resize_dma_pool();
2152 if (pcount0 != next_scsi_host)
2153 printk(KERN_INFO "scsi : %d host%s left.\n", next_scsi_host,
2154 (next_scsi_host == 1) ? "" : "s");
2156 #if defined(USE_STATIC_SCSI_MEMORY)
2157 printk("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
2158 (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
2159 (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
2160 (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
2161 #endif
2164 * Remove it from the linked list and /proc if all
2165 * hosts were successfully removed (ie preset == 0)
2167 if (!tpnt->present) {
2168 Scsi_Host_Template **SHTp = &scsi_hosts;
2169 Scsi_Host_Template *SHT;
2171 while ((SHT = *SHTp) != NULL) {
2172 if (SHT == tpnt) {
2173 *SHTp = SHT->next;
2174 remove_proc_entry(tpnt->proc_name, proc_scsi);
2175 break;
2177 SHTp = &SHT->next;
2180 MOD_DEC_USE_COUNT;
2183 static int scsi_unregister_device(struct Scsi_Device_Template *tpnt);
2186 * This entry point should be called by a loadable module if it is trying
2187 * add a high level scsi driver to the system.
2189 static int scsi_register_device_module(struct Scsi_Device_Template *tpnt)
2191 Scsi_Device *SDpnt;
2192 struct Scsi_Host *shpnt;
2193 int out_of_space = 0;
2195 if (tpnt->next)
2196 return 1;
2198 scsi_register_device(tpnt);
2200 * First scan the devices that we know about, and see if we notice them.
2203 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2204 for (SDpnt = shpnt->host_queue; SDpnt;
2205 SDpnt = SDpnt->next) {
2206 if (tpnt->detect)
2207 SDpnt->attached += (*tpnt->detect) (SDpnt);
2212 * If any of the devices would match this driver, then perform the
2213 * init function.
2215 if (tpnt->init && tpnt->dev_noticed)
2216 if ((*tpnt->init) ())
2217 return 1;
2220 * Now actually connect the devices to the new driver.
2222 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2223 for (SDpnt = shpnt->host_queue; SDpnt;
2224 SDpnt = SDpnt->next) {
2225 if (tpnt->attach)
2226 (*tpnt->attach) (SDpnt);
2228 * If this driver attached to the device, and don't have any
2229 * command blocks for this device, allocate some.
2231 if (SDpnt->attached && SDpnt->has_cmdblocks == 0) {
2232 SDpnt->online = TRUE;
2233 scsi_build_commandblocks(SDpnt);
2234 if (0 == SDpnt->has_cmdblocks)
2235 out_of_space = 1;
2241 * This does any final handling that is required.
2243 if (tpnt->finish && tpnt->nr_dev)
2244 (*tpnt->finish) ();
2245 if (!out_of_space)
2246 scsi_resize_dma_pool();
2247 MOD_INC_USE_COUNT;
2249 if (out_of_space) {
2250 scsi_unregister_device(tpnt); /* easiest way to clean up?? */
2251 return 1;
2252 } else
2253 return 0;
2256 static int scsi_unregister_device(struct Scsi_Device_Template *tpnt)
2258 Scsi_Device *SDpnt;
2259 struct Scsi_Host *shpnt;
2260 struct Scsi_Device_Template *spnt;
2261 struct Scsi_Device_Template *prev_spnt;
2264 * If we are busy, this is not going to fly.
2266 if (GET_USE_COUNT(tpnt->module) != 0)
2267 return 0;
2270 * Next, detach the devices from the driver.
2273 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2274 for (SDpnt = shpnt->host_queue; SDpnt;
2275 SDpnt = SDpnt->next) {
2276 if (tpnt->detach)
2277 (*tpnt->detach) (SDpnt);
2278 if (SDpnt->attached == 0) {
2279 SDpnt->online = FALSE;
2282 * Nobody is using this device any more. Free all of the
2283 * command structures.
2285 scsi_release_commandblocks(SDpnt);
2290 * Extract the template from the linked list.
2292 spnt = scsi_devicelist;
2293 prev_spnt = NULL;
2294 while (spnt != tpnt) {
2295 prev_spnt = spnt;
2296 spnt = spnt->next;
2298 if (prev_spnt == NULL)
2299 scsi_devicelist = tpnt->next;
2300 else
2301 prev_spnt->next = spnt->next;
2303 MOD_DEC_USE_COUNT;
2305 * Final cleanup for the driver is done in the driver sources in the
2306 * cleanup function.
2308 return 0;
2312 /* This function should be called by drivers which needs to register
2313 * with the midlevel scsi system. As of 2.4.0-test9pre3 this is our
2314 * main device/hosts register function /mathiasen
2316 int scsi_register_module(int module_type, void *ptr)
2318 switch (module_type) {
2319 case MODULE_SCSI_HA:
2320 return scsi_register_host((Scsi_Host_Template *) ptr);
2322 /* Load upper level device handler of some kind */
2323 case MODULE_SCSI_DEV:
2324 #ifdef CONFIG_KMOD
2325 if (scsi_hosts == NULL)
2326 request_module("scsi_hostadapter");
2327 #endif
2328 return scsi_register_device_module((struct Scsi_Device_Template *) ptr);
2329 /* The rest of these are not yet implemented */
2331 /* Load constants.o */
2332 case MODULE_SCSI_CONST:
2334 /* Load specialized ioctl handler for some device. Intended for
2335 * cdroms that have non-SCSI2 audio command sets. */
2336 case MODULE_SCSI_IOCTL:
2338 default:
2339 return 1;
2343 /* Reverse the actions taken above
2345 void scsi_unregister_module(int module_type, void *ptr)
2347 switch (module_type) {
2348 case MODULE_SCSI_HA:
2349 scsi_unregister_host((Scsi_Host_Template *) ptr);
2350 break;
2351 case MODULE_SCSI_DEV:
2352 scsi_unregister_device((struct Scsi_Device_Template *) ptr);
2353 break;
2354 /* The rest of these are not yet implemented. */
2355 case MODULE_SCSI_CONST:
2356 case MODULE_SCSI_IOCTL:
2357 break;
2358 default:
2360 return;
2363 #ifdef CONFIG_PROC_FS
2365 * Function: scsi_dump_status
2367 * Purpose: Brain dump of scsi system, used for problem solving.
2369 * Arguments: level - used to indicate level of detail.
2371 * Notes: The level isn't used at all yet, but we need to find some way
2372 * of sensibly logging varying degrees of information. A quick one-line
2373 * display of each command, plus the status would be most useful.
2375 * This does depend upon CONFIG_SCSI_LOGGING - I do want some way of turning
2376 * it all off if the user wants a lean and mean kernel. It would probably
2377 * also be useful to allow the user to specify one single host to be dumped.
2378 * A second argument to the function would be useful for that purpose.
2380 * FIXME - some formatting of the output into tables would be very handy.
2382 static void scsi_dump_status(int level)
2384 #ifdef CONFIG_SCSI_LOGGING /* { */
2385 int i;
2386 struct Scsi_Host *shpnt;
2387 Scsi_Cmnd *SCpnt;
2388 Scsi_Device *SDpnt;
2389 printk(KERN_INFO "Dump of scsi host parameters:\n");
2390 i = 0;
2391 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2392 printk(KERN_INFO " %d %d %d : %d %d\n",
2393 shpnt->host_failed,
2394 shpnt->host_busy,
2395 atomic_read(&shpnt->host_active),
2396 shpnt->host_blocked,
2397 shpnt->host_self_blocked);
2400 printk(KERN_INFO "\n\n");
2401 printk(KERN_INFO "Dump of scsi command parameters:\n");
2402 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2403 printk(KERN_INFO "h:c:t:l (dev sect nsect cnumsec sg) (ret all flg) (to/cmd to ito) cmd snse result\n");
2404 for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
2405 for (SCpnt = SDpnt->device_queue; SCpnt; SCpnt = SCpnt->next) {
2406 /* (0) h:c:t:l (dev sect nsect cnumsec sg) (ret all flg) (to/cmd to ito) cmd snse result %d %x */
2407 printk(KERN_INFO "(%3d) %2d:%1d:%2d:%2d (%6s %4ld %4ld %4ld %4x %1d) (%1d %1d 0x%2x) (%4d %4d %4d) 0x%2.2x 0x%2.2x 0x%8.8x\n",
2408 i++,
2410 SCpnt->host->host_no,
2411 SCpnt->channel,
2412 SCpnt->target,
2413 SCpnt->lun,
2415 kdevname(SCpnt->request.rq_dev),
2416 SCpnt->request.sector,
2417 SCpnt->request.nr_sectors,
2418 SCpnt->request.current_nr_sectors,
2419 SCpnt->request.rq_status,
2420 SCpnt->use_sg,
2422 SCpnt->retries,
2423 SCpnt->allowed,
2424 SCpnt->flags,
2426 SCpnt->timeout_per_command,
2427 SCpnt->timeout,
2428 SCpnt->internal_timeout,
2430 SCpnt->cmnd[0],
2431 SCpnt->sense_buffer[2],
2432 SCpnt->result);
2437 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2438 for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
2439 /* Now dump the request lists for each block device */
2440 printk(KERN_INFO "Dump of pending block device requests\n");
2441 for (i = 0; i < MAX_BLKDEV; i++) {
2442 struct list_head * queue_head;
2444 queue_head = &blk_dev[i].request_queue.queue_head;
2445 if (!list_empty(queue_head)) {
2446 struct request *req;
2447 struct list_head * entry;
2449 printk(KERN_INFO "%d: ", i);
2450 entry = queue_head->next;
2451 do {
2452 req = blkdev_entry_to_request(entry);
2453 printk("(%s %d %ld %ld %ld) ",
2454 kdevname(req->rq_dev),
2455 req->cmd,
2456 req->sector,
2457 req->nr_sectors,
2458 req->current_nr_sectors);
2459 } while ((entry = entry->next) != queue_head);
2460 printk("\n");
2465 #endif /* CONFIG_SCSI_LOGGING */ /* } */
2467 #endif /* CONFIG_PROC_FS */
2469 static int __init scsi_host_no_init (char *str)
2471 static int next_no = 0;
2472 char *temp;
2474 while (str) {
2475 temp = str;
2476 while (*temp && (*temp != ':') && (*temp != ','))
2477 temp++;
2478 if (!*temp)
2479 temp = NULL;
2480 else
2481 *temp++ = 0;
2482 scsi_host_no_insert(str, next_no);
2483 str = temp;
2484 next_no++;
2486 return 1;
2489 static char *scsihosts;
2491 MODULE_PARM(scsihosts, "s");
2492 MODULE_DESCRIPTION("SCSI core");
2494 #ifndef MODULE
2495 int __init scsi_setup(char *str)
2497 scsihosts = str;
2498 return 1;
2501 __setup("scsihosts=", scsi_setup);
2502 #endif
2504 static int __init init_scsi(void)
2506 struct proc_dir_entry *generic;
2508 printk(KERN_INFO "SCSI subsystem driver " REVISION "\n");
2510 if( scsi_init_minimal_dma_pool() != 0 )
2512 return 1;
2516 * This makes /proc/scsi and /proc/scsi/scsi visible.
2518 #ifdef CONFIG_PROC_FS
2519 proc_scsi = proc_mkdir("scsi", 0);
2520 if (!proc_scsi) {
2521 printk (KERN_ERR "cannot init /proc/scsi\n");
2522 return -ENOMEM;
2524 generic = create_proc_info_entry ("scsi/scsi", 0, 0, scsi_proc_info);
2525 if (!generic) {
2526 printk (KERN_ERR "cannot init /proc/scsi/scsi\n");
2527 remove_proc_entry("scsi", 0);
2528 return -ENOMEM;
2530 generic->write_proc = proc_scsi_gen_write;
2531 #endif
2533 scsi_devfs_handle = devfs_mk_dir (NULL, "scsi", NULL);
2534 if (scsihosts)
2535 printk(KERN_INFO "scsi: host order: %s\n", scsihosts);
2536 scsi_host_no_init (scsihosts);
2538 * This is where the processing takes place for most everything
2539 * when commands are completed.
2541 init_bh(SCSI_BH, scsi_bottom_half_handler);
2543 return 0;
2546 static void __exit exit_scsi(void)
2548 Scsi_Host_Name *shn, *shn2 = NULL;
2550 remove_bh(SCSI_BH);
2552 devfs_unregister (scsi_devfs_handle);
2553 for (shn = scsi_host_no_list;shn;shn = shn->next) {
2554 if (shn->name)
2555 kfree(shn->name);
2556 if (shn2)
2557 kfree (shn2);
2558 shn2 = shn;
2560 if (shn2)
2561 kfree (shn2);
2563 #ifdef CONFIG_PROC_FS
2564 /* No, we're not here anymore. Don't show the /proc/scsi files. */
2565 remove_proc_entry ("scsi/scsi", 0);
2566 remove_proc_entry ("scsi", 0);
2567 #endif
2570 * Free up the DMA pool.
2572 scsi_resize_dma_pool();
2576 module_init(init_scsi);
2577 module_exit(exit_scsi);
2580 * Function: scsi_get_host_dev()
2582 * Purpose: Create a Scsi_Device that points to the host adapter itself.
2584 * Arguments: SHpnt - Host that needs a Scsi_Device
2586 * Lock status: None assumed.
2588 * Returns: The Scsi_Device or NULL
2590 * Notes:
2592 Scsi_Device * scsi_get_host_dev(struct Scsi_Host * SHpnt)
2594 Scsi_Device * SDpnt;
2597 * Attach a single Scsi_Device to the Scsi_Host - this should
2598 * be made to look like a "pseudo-device" that points to the
2599 * HA itself. For the moment, we include it at the head of
2600 * the host_queue itself - I don't think we want to show this
2601 * to the HA in select_queue_depths(), as this would probably confuse
2602 * matters.
2603 * Note - this device is not accessible from any high-level
2604 * drivers (including generics), which is probably not
2605 * optimal. We can add hooks later to attach
2607 SDpnt = (Scsi_Device *) kmalloc(sizeof(Scsi_Device),
2608 GFP_ATOMIC);
2609 if(SDpnt == NULL)
2610 return NULL;
2612 memset(SDpnt, 0, sizeof(Scsi_Device));
2614 SDpnt->host = SHpnt;
2615 SDpnt->id = SHpnt->this_id;
2616 SDpnt->type = -1;
2617 SDpnt->queue_depth = 1;
2619 scsi_build_commandblocks(SDpnt);
2621 scsi_initialize_queue(SDpnt, SHpnt);
2623 SDpnt->online = TRUE;
2626 * Initialize the object that we will use to wait for command blocks.
2628 init_waitqueue_head(&SDpnt->scpnt_wait);
2629 return SDpnt;
2633 * Function: scsi_free_host_dev()
2635 * Purpose: Create a Scsi_Device that points to the host adapter itself.
2637 * Arguments: SHpnt - Host that needs a Scsi_Device
2639 * Lock status: None assumed.
2641 * Returns: Nothing
2643 * Notes:
2645 void scsi_free_host_dev(Scsi_Device * SDpnt)
2647 if( (unsigned char) SDpnt->id != (unsigned char) SDpnt->host->this_id )
2649 panic("Attempt to delete wrong device\n");
2652 blk_cleanup_queue(&SDpnt->request_queue);
2655 * We only have a single SCpnt attached to this device. Free
2656 * it now.
2658 scsi_release_commandblocks(SDpnt);
2659 kfree(SDpnt);
2663 * Overrides for Emacs so that we follow Linus's tabbing style.
2664 * Emacs will notice this stuff at the end of the file and automatically
2665 * adjust the settings for this buffer only. This must remain at the end
2666 * of the file.
2667 * ---------------------------------------------------------------------------
2668 * Local variables:
2669 * c-indent-level: 4
2670 * c-brace-imaginary-offset: 0
2671 * c-brace-offset: -4
2672 * c-argdecl-indent: 4
2673 * c-label-offset: -4
2674 * c-continued-statement-offset: 4
2675 * c-continued-brace-offset: 0
2676 * indent-tabs-mode: nil
2677 * tab-width: 8
2678 * End: