Import 2.3.18pre1
[davej-history.git] / drivers / scsi / scsi.c
blob7c44cdf73537a69d165c26d6c99a2f067d8974bd
1 /*
2 * scsi.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995 Eric Youngdale
5 * generic mid-level SCSI driver
6 * Initial versions: Drew Eckhardt
7 * Subsequent revisions: Eric Youngdale
9 * <drew@colorado.edu>
11 * Bug correction thanks go to :
12 * Rik Faith <faith@cs.unc.edu>
13 * Tommy Thorn <tthorn>
14 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
16 * Modified by Eric Youngdale eric@andante.jic.com or ericy@gnu.ai.mit.edu to
17 * add scatter-gather, multiple outstanding request, and other
18 * enhancements.
20 * Native multichannel, wide scsi, /proc/scsi and hot plugging
21 * support added by Michael Neuffer <mike@i-connect.net>
23 * Added request_module("scsi_hostadapter") for kerneld:
24 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/conf.modules)
25 * Bjorn Ekwall <bj0rn@blox.se>
26 * (changed to kmod)
28 * Major improvements to the timeout, abort, and reset processing,
29 * as well as performance modifications for large queue depths by
30 * Leonard N. Zubkoff <lnz@dandelion.com>
32 * Converted cli() code to spinlocks, Ingo Molnar
34 * Jiffies wrap fixes (host->resetting), 3 Dec 1998 Andrea Arcangeli
36 * out_of_space hacks, D. Gilbert (dpg) 990608
39 #include <linux/config.h>
40 #include <linux/module.h>
42 #include <linux/sched.h>
43 #include <linux/timer.h>
44 #include <linux/string.h>
45 #include <linux/malloc.h>
46 #include <linux/ioport.h>
47 #include <linux/kernel.h>
48 #include <linux/stat.h>
49 #include <linux/blk.h>
50 #include <linux/interrupt.h>
51 #include <linux/delay.h>
52 #include <linux/init.h>
54 #define __KERNEL_SYSCALLS__
56 #include <linux/unistd.h>
57 #include <linux/spinlock.h>
59 #include <asm/system.h>
60 #include <asm/irq.h>
61 #include <asm/dma.h>
63 #include "scsi.h"
64 #include "hosts.h"
65 #include "constants.h"
67 #ifdef CONFIG_KMOD
68 #include <linux/kmod.h>
69 #endif
71 #undef USE_STATIC_SCSI_MEMORY
74 static const char RCSid[] = "$Header: /vger/u4/cvs/linux/drivers/scsi/scsi.c,v 1.38 1997/01/19 23:07:18 davem Exp $";
78 * Definitions and constants.
80 #define INTERNAL_ERROR (panic ("Internal error in file %s, line %d.\n", __FILE__, __LINE__))
83 * PAGE_SIZE must be a multiple of the sector size (512). True
84 * for all reasonably recent architectures (even the VAX...).
86 #define SECTOR_SIZE 512
87 #define SECTORS_PER_PAGE (PAGE_SIZE/SECTOR_SIZE)
89 #if SECTORS_PER_PAGE <= 8
90 typedef unsigned char FreeSectorBitmap;
91 #elif SECTORS_PER_PAGE <= 32
92 typedef unsigned int FreeSectorBitmap;
93 #else
94 #error You lose.
95 #endif
97 #define MIN_RESET_DELAY (2*HZ)
99 /* Do not call reset on error if we just did a reset within 15 sec. */
100 #define MIN_RESET_PERIOD (15*HZ)
102 /* The following devices are known not to tolerate a lun != 0 scan for
103 * one reason or another. Some will respond to all luns, others will
104 * lock up.
107 #define BLIST_NOLUN 0x01
108 #define BLIST_FORCELUN 0x02
109 #define BLIST_BORKEN 0x04
110 #define BLIST_KEY 0x08
111 #define BLIST_SINGLELUN 0x10
112 #define BLIST_NOTQ 0x20
113 #define BLIST_SPARSELUN 0x40
114 #define BLIST_MAX5LUN 0x80
117 * Data declarations.
119 unsigned long scsi_pid = 0;
120 Scsi_Cmnd *last_cmnd = NULL;
121 /* Command groups 3 and 4 are reserved and should never be used. */
122 const unsigned char scsi_command_size[8] = {
123 6, 10, 10, 12,
124 12, 12, 10, 10
126 static unsigned long serial_number = 0;
127 static Scsi_Cmnd *scsi_bh_queue_head = NULL;
128 static Scsi_Cmnd *scsi_bh_queue_tail = NULL;
129 static FreeSectorBitmap *dma_malloc_freelist = NULL;
130 static int need_isa_bounce_buffers;
131 static unsigned int dma_sectors = 0;
132 unsigned int scsi_dma_free_sectors = 0;
133 unsigned int scsi_need_isa_buffer = 0;
134 static unsigned char **dma_malloc_pages = NULL;
137 * Note - the initial logging level can be set here to log events at boot time.
138 * After the system is up, you may enable logging via the /proc interface.
140 unsigned int scsi_logging_level = 0;
142 volatile struct Scsi_Host *host_active = NULL;
144 #if CONFIG_PROC_FS
146 * This is the pointer to the /proc/scsi code.
147 * It is only initialized to !=0 if the scsi code is present
149 struct proc_dir_entry proc_scsi_scsi =
151 PROC_SCSI_SCSI, 4, "scsi",
152 S_IFREG | S_IRUGO | S_IWUSR, 1, 0, 0, 0,
153 NULL,
154 NULL, NULL,
155 NULL, NULL, NULL
157 #endif
160 const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] =
162 "Direct-Access ",
163 "Sequential-Access",
164 "Printer ",
165 "Processor ",
166 "WORM ",
167 "CD-ROM ",
168 "Scanner ",
169 "Optical Device ",
170 "Medium Changer ",
171 "Communications ",
172 "Unknown ",
173 "Unknown ",
174 "Unknown ",
175 "Enclosure ",
179 * Function prototypes.
181 static void resize_dma_pool(void);
182 static void print_inquiry(unsigned char *data);
183 extern void scsi_times_out(Scsi_Cmnd * SCpnt);
184 static int scan_scsis_single(int channel, int dev, int lun, int *max_scsi_dev,
185 int *sparse_lun, Scsi_Device ** SDpnt, Scsi_Cmnd * SCpnt,
186 struct Scsi_Host *shpnt, char *scsi_result);
187 void scsi_build_commandblocks(Scsi_Device * SDpnt);
188 static int scsi_unregister_device(struct Scsi_Device_Template *tpnt);
191 * These are the interface to the old error handling code. It should go away
192 * someday soon.
194 extern void scsi_old_done(Scsi_Cmnd * SCpnt);
195 extern void scsi_old_times_out(Scsi_Cmnd * SCpnt);
197 #if CONFIG_PROC_FS
198 extern int (*dispatch_scsi_info_ptr) (int ino, char *buffer, char **start,
199 off_t offset, int length, int inout);
200 extern int dispatch_scsi_info(int ino, char *buffer, char **start,
201 off_t offset, int length, int inout);
202 #endif
204 #define SCSI_BLOCK(DEVICE, HOST) \
205 ((HOST->block && host_active && HOST != host_active) \
206 || ((HOST)->can_queue && HOST->host_busy >= HOST->can_queue) \
207 || ((HOST)->host_blocked) \
208 || ((DEVICE) != NULL && (DEVICE)->device_blocked) )
210 static void scsi_dump_status(int level);
213 struct dev_info {
214 const char *vendor;
215 const char *model;
216 const char *revision; /* Latest revision known to be bad. Not used yet */
217 unsigned flags;
221 * This is what was previously known as the blacklist. The concept
222 * has been expanded so that we can specify other types of things we
223 * need to be aware of.
225 static struct dev_info device_list[] =
227 {"Aashima", "IMAGERY 2400SP", "1.03", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
228 {"CHINON", "CD-ROM CDS-431", "H42", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
229 {"CHINON", "CD-ROM CDS-535", "Q14", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
230 {"DENON", "DRD-25X", "V", BLIST_NOLUN}, /* Locks up if probed for lun != 0 */
231 {"HITACHI", "DK312C", "CM81", BLIST_NOLUN}, /* Responds to all lun - dtg */
232 {"HITACHI", "DK314C", "CR21", BLIST_NOLUN}, /* responds to all lun */
233 {"IMS", "CDD521/10", "2.06", BLIST_NOLUN}, /* Locks-up when LUN>0 polled. */
234 {"MAXTOR", "XT-3280", "PR02", BLIST_NOLUN}, /* Locks-up when LUN>0 polled. */
235 {"MAXTOR", "XT-4380S", "B3C", BLIST_NOLUN}, /* Locks-up when LUN>0 polled. */
236 {"MAXTOR", "MXT-1240S", "I1.2", BLIST_NOLUN}, /* Locks up when LUN>0 polled */
237 {"MAXTOR", "XT-4170S", "B5A", BLIST_NOLUN}, /* Locks-up sometimes when LUN>0 polled. */
238 {"MAXTOR", "XT-8760S", "B7B", BLIST_NOLUN}, /* guess what? */
239 {"MEDIAVIS", "RENO CD-ROMX2A", "2.03", BLIST_NOLUN}, /*Responds to all lun */
240 {"MICROP", "4110", "*", BLIST_NOTQ}, /* Buggy Tagged Queuing */
241 {"NEC", "CD-ROM DRIVE:841", "1.0", BLIST_NOLUN}, /* Locks-up when LUN>0 polled. */
242 {"PHILIPS", "PCA80SC", "V4-2", BLIST_NOLUN}, /* Responds to all lun */
243 {"RODIME", "RO3000S", "2.33", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
244 {"SANYO", "CRD-250S", "1.20", BLIST_NOLUN}, /* causes failed REQUEST SENSE on lun 1
245 * for aha152x controller, which causes
246 * SCSI code to reset bus.*/
247 {"SEAGATE", "ST157N", "\004|j", BLIST_NOLUN}, /* causes failed REQUEST SENSE on lun 1
248 * for aha152x controller, which causes
249 * SCSI code to reset bus.*/
250 {"SEAGATE", "ST296", "921", BLIST_NOLUN}, /* Responds to all lun */
251 {"SEAGATE", "ST1581", "6538", BLIST_NOLUN}, /* Responds to all lun */
252 {"SONY", "CD-ROM CDU-541", "4.3d", BLIST_NOLUN},
253 {"SONY", "CD-ROM CDU-55S", "1.0i", BLIST_NOLUN},
254 {"SONY", "CD-ROM CDU-561", "1.7x", BLIST_NOLUN},
255 {"SONY", "CD-ROM CDU-8012", "*", BLIST_NOLUN},
256 {"TANDBERG", "TDC 3600", "U07", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
257 {"TEAC", "CD-R55S", "1.0H", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
258 {"TEAC", "CD-ROM", "1.06", BLIST_NOLUN}, /* causes failed REQUEST SENSE on lun 1
259 * for seagate controller, which causes
260 * SCSI code to reset bus.*/
261 {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN}, /* causes failed REQUEST SENSE on lun 1
262 * for seagate controller, which causes
263 * SCSI code to reset bus.*/
264 {"QUANTUM", "LPS525S", "3110", BLIST_NOLUN}, /* Locks sometimes if polled for lun != 0 */
265 {"QUANTUM", "PD1225S", "3110", BLIST_NOLUN}, /* Locks sometimes if polled for lun != 0 */
266 {"MEDIAVIS", "CDR-H93MV", "1.31", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
267 {"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */
268 {"HP", "C1750A", "3226", BLIST_NOLUN}, /* scanjet iic */
269 {"HP", "C1790A", "", BLIST_NOLUN}, /* scanjet iip */
270 {"HP", "C2500A", "", BLIST_NOLUN}, /* scanjet iicx */
271 {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* extra reset */
272 {"RELISYS", "Scorpio", "*", BLIST_NOLUN}, /* responds to all LUN */
275 * Other types of devices that have special flags.
277 {"SONY", "CD-ROM CDU-8001", "*", BLIST_BORKEN},
278 {"TEXEL", "CD-ROM", "1.06", BLIST_BORKEN},
279 {"IOMEGA", "Io20S *F", "*", BLIST_KEY},
280 {"INSITE", "Floptical F*8I", "*", BLIST_KEY},
281 {"INSITE", "I325VM", "*", BLIST_KEY},
282 {"NRC", "MBR-7", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
283 {"NRC", "MBR-7.4", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
284 {"REGAL", "CDC-4X", "*", BLIST_MAX5LUN | BLIST_SINGLELUN},
285 {"NAKAMICH", "MJ-4.8S", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
286 {"NAKAMICH", "MJ-5.16S", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
287 {"PIONEER", "CD-ROM DRM-600", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
288 {"PIONEER", "CD-ROM DRM-602X", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
289 {"PIONEER", "CD-ROM DRM-604X", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
290 {"EMULEX", "MD21/S2 ESDI", "*", BLIST_SINGLELUN},
291 {"CANON", "IPUBJD", "*", BLIST_SPARSELUN},
292 {"nCipher", "Fastness Crypto", "*", BLIST_FORCELUN},
293 {"NEC", "PD-1 ODX654P", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
294 {"MATSHITA", "PD", "*", BLIST_FORCELUN | BLIST_SINGLELUN},
295 {"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
296 {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
297 {"iomega", "jaz 1GB", "J.86", BLIST_NOTQ | BLIST_NOLUN},
300 * Must be at end of list...
302 {NULL, NULL, NULL}
305 static int get_device_flags(unsigned char *response_data)
307 int i = 0;
308 unsigned char *pnt;
309 for (i = 0; 1; i++) {
310 if (device_list[i].vendor == NULL)
311 return 0;
312 pnt = &response_data[8];
313 while (*pnt && *pnt == ' ')
314 pnt++;
315 if (memcmp(device_list[i].vendor, pnt,
316 strlen(device_list[i].vendor)))
317 continue;
318 pnt = &response_data[16];
319 while (*pnt && *pnt == ' ')
320 pnt++;
321 if (memcmp(device_list[i].model, pnt,
322 strlen(device_list[i].model)))
323 continue;
324 return device_list[i].flags;
326 return 0;
330 * Function: scsi_make_blocked_list
332 * Purpose: Build linked list of hosts that require blocking.
334 * Arguments: None.
336 * Returns: Nothing
338 * Notes: Blocking is sort of a hack that is used to prevent more than one
339 * host adapter from being active at one time. This is used in cases
340 * where the ISA bus becomes unreliable if you have more than one
341 * host adapter really pumping data through.
343 * We spent a lot of time examining the problem, and I *believe* that
344 * the problem is bus related as opposed to being a driver bug.
346 * The blocked list is used as part of the synchronization object
347 * that we use to ensure that only one host is active at one time.
348 * I (ERY) would like to make this go away someday, but this would
349 * require that we have a recursive mutex object.
352 void scsi_make_blocked_list(void)
354 int block_count = 0, index;
355 struct Scsi_Host *sh[128], *shpnt;
358 * Create a circular linked list from the scsi hosts which have
359 * the "wish_block" field in the Scsi_Host structure set.
360 * The blocked list should include all the scsi hosts using ISA DMA.
361 * In some systems, using two dma channels simultaneously causes
362 * unpredictable results.
363 * Among the scsi hosts in the blocked list, only one host at a time
364 * is allowed to have active commands queued. The transition from
365 * one active host to the next one is allowed only when host_busy == 0
366 * for the active host (which implies host_busy == 0 for all the hosts
367 * in the list). Moreover for block devices the transition to a new
368 * active host is allowed only when a request is completed, since a
369 * block device request can be divided into multiple scsi commands
370 * (when there are few sg lists or clustering is disabled).
372 * (DB, 4 Feb 1995)
376 host_active = NULL;
378 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
380 #if 0
382 * Is this is a candidate for the blocked list?
383 * Useful to put into the blocked list all the hosts whose driver
384 * does not know about the host->block feature.
386 if (shpnt->unchecked_isa_dma)
387 shpnt->wish_block = 1;
388 #endif
390 if (shpnt->wish_block)
391 sh[block_count++] = shpnt;
394 if (block_count == 1)
395 sh[0]->block = NULL;
397 else if (block_count > 1) {
399 for (index = 0; index < block_count - 1; index++) {
400 sh[index]->block = sh[index + 1];
401 printk("scsi%d : added to blocked host list.\n",
402 sh[index]->host_no);
405 sh[block_count - 1]->block = sh[0];
406 printk("scsi%d : added to blocked host list.\n",
407 sh[index]->host_no);
411 static void scan_scsis_done(Scsi_Cmnd * SCpnt)
414 SCSI_LOG_MLCOMPLETE(1, printk("scan_scsis_done(%p, %06x)\n", SCpnt->host, SCpnt->result));
415 SCpnt->request.rq_status = RQ_SCSI_DONE;
417 if (SCpnt->request.sem != NULL)
418 up(SCpnt->request.sem);
421 MODULE_PARM(scsi_logging_level, "i");
422 MODULE_PARM_DESC(scsi_logging_level, "SCSI logging level; should be zero or nonzero");
424 #ifndef MODULE
426 static int __init scsi_logging_setup(char *str)
428 int tmp;
430 if (get_option(&str, &tmp) == 1) {
431 scsi_logging_level = (tmp ? ~0 : 0);
432 return 1;
433 } else {
434 printk("scsi_logging_setup : usage scsi_logging_level=n "
435 "(n should be 0 or non-zero)\n");
436 return 0;
440 __setup("scsi_logging=", scsi_logging_setup);
442 #endif
444 #ifdef CONFIG_SCSI_MULTI_LUN
445 static int max_scsi_luns = 8;
446 #else
447 static int max_scsi_luns = 1;
448 #endif
450 MODULE_PARM(max_scsi_luns, "i");
451 MODULE_PARM_DESC(max_scsi_luns, "last scsi LUN (should be between 1 and 8)");
453 #ifndef MODULE
455 static int __init scsi_luns_setup(char *str)
457 int tmp;
459 if (get_option(&str, &tmp) == 1) {
460 max_scsi_luns = tmp;
461 return 1;
462 } else {
463 printk("scsi_luns_setup : usage max_scsi_luns=n "
464 "(n should be between 1 and 8)\n");
465 return 0;
469 __setup("max_scsi_luns=", scsi_luns_setup);
471 #endif
474 * Detecting SCSI devices :
475 * We scan all present host adapter's busses, from ID 0 to ID (max_id).
476 * We use the INQUIRY command, determine device type, and pass the ID /
477 * lun address of all sequential devices to the tape driver, all random
478 * devices to the disk driver.
480 static void scan_scsis(struct Scsi_Host *shpnt,
481 unchar hardcoded,
482 unchar hchannel,
483 unchar hid,
484 unchar hlun)
486 int channel;
487 int dev;
488 int lun;
489 int max_dev_lun;
490 Scsi_Cmnd *SCpnt;
491 unsigned char *scsi_result;
492 unsigned char scsi_result0[256];
493 Scsi_Device *SDpnt;
494 Scsi_Device *SDtail;
495 int sparse_lun;
497 scsi_result = NULL;
498 SCpnt = (Scsi_Cmnd *) scsi_init_malloc(sizeof(Scsi_Cmnd),
499 GFP_ATOMIC | GFP_DMA);
500 if (SCpnt) {
501 SDpnt = (Scsi_Device *) scsi_init_malloc(sizeof(Scsi_Device),
502 GFP_ATOMIC);
503 if (SDpnt) {
504 /* Make sure we have something that is valid for DMA purposes */
505 scsi_result = ((!shpnt->unchecked_isa_dma)
506 ? &scsi_result0[0] : scsi_init_malloc(512, GFP_DMA));
509 if (scsi_result == NULL) {
510 printk("Unable to obtain scsi_result buffer\n");
511 goto leave;
514 * We must chain ourself in the host_queue, so commands can time out
516 SCpnt->next = NULL;
517 SDpnt->device_queue = SCpnt;
518 SDpnt->host = shpnt;
519 SDpnt->online = TRUE;
521 init_waitqueue_head(&SDpnt->device_wait);
524 * Next, hook the device to the host in question.
526 SDpnt->prev = NULL;
527 SDpnt->next = NULL;
528 if (shpnt->host_queue != NULL) {
529 SDtail = shpnt->host_queue;
530 while (SDtail->next != NULL)
531 SDtail = SDtail->next;
533 SDtail->next = SDpnt;
534 SDpnt->prev = SDtail;
535 } else {
536 shpnt->host_queue = SDpnt;
540 * We need to increment the counter for this one device so we can track when
541 * things are quiet.
543 atomic_inc(&shpnt->host_active);
545 if (hardcoded == 1) {
546 Scsi_Device *oldSDpnt = SDpnt;
547 struct Scsi_Device_Template *sdtpnt;
548 channel = hchannel;
549 if (channel > shpnt->max_channel)
550 goto leave;
551 dev = hid;
552 if (dev >= shpnt->max_id)
553 goto leave;
554 lun = hlun;
555 if (lun >= shpnt->max_lun)
556 goto leave;
557 scan_scsis_single(channel, dev, lun, &max_dev_lun, &sparse_lun,
558 &SDpnt, SCpnt, shpnt, scsi_result);
559 if (SDpnt != oldSDpnt) {
561 /* it could happen the blockdevice hasn't yet been inited */
562 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
563 if (sdtpnt->init && sdtpnt->dev_noticed)
564 (*sdtpnt->init) ();
566 oldSDpnt->scsi_request_fn = NULL;
567 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
568 if (sdtpnt->attach) {
569 (*sdtpnt->attach) (oldSDpnt);
570 if (oldSDpnt->attached) {
571 scsi_build_commandblocks(oldSDpnt);
572 if (0 == oldSDpnt->has_cmdblocks) {
573 printk("scan_scsis: DANGER, no command blocks\n");
574 /* What to do now ?? */
579 resize_dma_pool();
581 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
582 if (sdtpnt->finish && sdtpnt->nr_dev) {
583 (*sdtpnt->finish) ();
587 } else {
588 /* Actual LUN. PC ordering is 0->n IBM/spec ordering is n->0 */
589 int order_dev;
591 for (channel = 0; channel <= shpnt->max_channel; channel++) {
592 for (dev = 0; dev < shpnt->max_id; ++dev) {
593 if (shpnt->reverse_ordering)
594 /* Shift to scanning 15,14,13... or 7,6,5,4, */
595 order_dev = shpnt->max_id - dev - 1;
596 else
597 order_dev = dev;
599 if (shpnt->this_id != order_dev) {
602 * We need the for so our continue, etc. work fine. We put this in
603 * a variable so that we can override it during the scan if we
604 * detect a device *KNOWN* to have multiple logical units.
606 max_dev_lun = (max_scsi_luns < shpnt->max_lun ?
607 max_scsi_luns : shpnt->max_lun);
608 sparse_lun = 0;
609 for (lun = 0; lun < max_dev_lun; ++lun) {
610 if (!scan_scsis_single(channel, order_dev, lun, &max_dev_lun,
611 &sparse_lun, &SDpnt, SCpnt, shpnt,
612 scsi_result)
613 && !sparse_lun)
614 break; /* break means don't probe further for luns!=0 */
615 } /* for lun ends */
616 } /* if this_id != id ends */
617 } /* for dev ends */
618 } /* for channel ends */
619 } /* if/else hardcoded */
622 * We need to decrement the counter for this one device
623 * so we know when everything is quiet.
625 atomic_dec(&shpnt->host_active);
627 leave:
629 { /* Unchain SCpnt from host_queue */
630 Scsi_Device *prev, *next;
631 Scsi_Device *dqptr;
633 for (dqptr = shpnt->host_queue; dqptr != SDpnt; dqptr = dqptr->next)
634 continue;
635 if (dqptr) {
636 prev = dqptr->prev;
637 next = dqptr->next;
638 if (prev)
639 prev->next = next;
640 else
641 shpnt->host_queue = next;
642 if (next)
643 next->prev = prev;
647 /* Last device block does not exist. Free memory. */
648 if (SDpnt != NULL)
649 scsi_init_free((char *) SDpnt, sizeof(Scsi_Device));
651 if (SCpnt != NULL)
652 scsi_init_free((char *) SCpnt, sizeof(Scsi_Cmnd));
654 /* If we allocated a buffer so we could do DMA, free it now */
655 if (scsi_result != &scsi_result0[0] && scsi_result != NULL) {
656 scsi_init_free(scsi_result, 512);
658 Scsi_Device *sdev;
659 Scsi_Cmnd *scmd;
661 SCSI_LOG_SCAN_BUS(4, printk("Host status for host %p:\n", shpnt));
662 for (sdev = shpnt->host_queue; sdev; sdev = sdev->next) {
663 SCSI_LOG_SCAN_BUS(4, printk("Device %d %p: ", sdev->id, sdev));
664 for (scmd = sdev->device_queue; scmd; scmd = scmd->next) {
665 SCSI_LOG_SCAN_BUS(4, printk("%p ", scmd));
667 SCSI_LOG_SCAN_BUS(4, printk("\n"));
673 * The worker for scan_scsis.
674 * Returning 0 means Please don't ask further for lun!=0, 1 means OK go on.
675 * Global variables used : scsi_devices(linked list)
677 int scan_scsis_single(int channel, int dev, int lun, int *max_dev_lun,
678 int *sparse_lun, Scsi_Device ** SDpnt2, Scsi_Cmnd * SCpnt,
679 struct Scsi_Host *shpnt, char *scsi_result)
681 unsigned char scsi_cmd[12];
682 struct Scsi_Device_Template *sdtpnt;
683 Scsi_Device *SDtail, *SDpnt = *SDpnt2;
684 int bflags, type = -1;
686 SDpnt->host = shpnt;
687 SDpnt->id = dev;
688 SDpnt->lun = lun;
689 SDpnt->channel = channel;
690 SDpnt->online = TRUE;
692 /* Some low level driver could use device->type (DB) */
693 SDpnt->type = -1;
696 * Assume that the device will have handshaking problems, and then fix this
697 * field later if it turns out it doesn't
699 SDpnt->borken = 1;
700 SDpnt->was_reset = 0;
701 SDpnt->expecting_cc_ua = 0;
703 scsi_cmd[0] = TEST_UNIT_READY;
704 scsi_cmd[1] = lun << 5;
705 scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[4] = scsi_cmd[5] = 0;
707 SCpnt->host = SDpnt->host;
708 SCpnt->device = SDpnt;
709 SCpnt->target = SDpnt->id;
710 SCpnt->lun = SDpnt->lun;
711 SCpnt->channel = SDpnt->channel;
713 DECLARE_MUTEX_LOCKED(sem);
714 SCpnt->request.sem = &sem;
715 SCpnt->request.rq_status = RQ_SCSI_BUSY;
716 spin_lock_irq(&io_request_lock);
717 scsi_do_cmd(SCpnt, (void *) scsi_cmd,
718 (void *) NULL,
719 0, scan_scsis_done, SCSI_TIMEOUT + 4 * HZ, 5);
720 spin_unlock_irq(&io_request_lock);
721 down(&sem);
722 SCpnt->request.sem = NULL;
725 SCSI_LOG_SCAN_BUS(3, printk("scsi: scan_scsis_single id %d lun %d. Return code 0x%08x\n",
726 dev, lun, SCpnt->result));
727 SCSI_LOG_SCAN_BUS(3, print_driverbyte(SCpnt->result));
728 SCSI_LOG_SCAN_BUS(3, print_hostbyte(SCpnt->result));
729 SCSI_LOG_SCAN_BUS(3, printk("\n"));
731 if (SCpnt->result) {
732 if (((driver_byte(SCpnt->result) & DRIVER_SENSE) ||
733 (status_byte(SCpnt->result) & CHECK_CONDITION)) &&
734 ((SCpnt->sense_buffer[0] & 0x70) >> 4) == 7) {
735 if (((SCpnt->sense_buffer[2] & 0xf) != NOT_READY) &&
736 ((SCpnt->sense_buffer[2] & 0xf) != UNIT_ATTENTION) &&
737 ((SCpnt->sense_buffer[2] & 0xf) != ILLEGAL_REQUEST || lun > 0))
738 return 1;
739 } else
740 return 0;
742 SCSI_LOG_SCAN_BUS(3, printk("scsi: performing INQUIRY\n"));
744 * Build an INQUIRY command block.
746 scsi_cmd[0] = INQUIRY;
747 scsi_cmd[1] = (lun << 5) & 0xe0;
748 scsi_cmd[2] = 0;
749 scsi_cmd[3] = 0;
750 scsi_cmd[4] = 255;
751 scsi_cmd[5] = 0;
752 SCpnt->cmd_len = 0;
754 DECLARE_MUTEX_LOCKED(sem);
755 SCpnt->request.sem = &sem;
756 SCpnt->request.rq_status = RQ_SCSI_BUSY;
757 spin_lock_irq(&io_request_lock);
758 scsi_do_cmd(SCpnt, (void *) scsi_cmd,
759 (void *) scsi_result,
760 256, scan_scsis_done, SCSI_TIMEOUT, 3);
761 spin_unlock_irq(&io_request_lock);
762 down(&sem);
763 SCpnt->request.sem = NULL;
766 SCSI_LOG_SCAN_BUS(3, printk("scsi: INQUIRY %s with code 0x%x\n",
767 SCpnt->result ? "failed" : "successful", SCpnt->result));
769 if (SCpnt->result)
770 return 0; /* assume no peripheral if any sort of error */
773 * Check the peripheral qualifier field - this tells us whether LUNS
774 * are supported here or not.
776 if ((scsi_result[0] >> 5) == 3) {
777 return 0; /* assume no peripheral if any sort of error */
780 * It would seem some TOSHIBA CDROM gets things wrong
782 if (!strncmp(scsi_result + 8, "TOSHIBA", 7) &&
783 !strncmp(scsi_result + 16, "CD-ROM", 6) &&
784 scsi_result[0] == TYPE_DISK) {
785 scsi_result[0] = TYPE_ROM;
786 scsi_result[1] |= 0x80; /* removable */
788 memcpy(SDpnt->vendor, scsi_result + 8, 8);
789 memcpy(SDpnt->model, scsi_result + 16, 16);
790 memcpy(SDpnt->rev, scsi_result + 32, 4);
792 SDpnt->removable = (0x80 & scsi_result[1]) >> 7;
793 SDpnt->online = TRUE;
794 SDpnt->lockable = SDpnt->removable;
795 SDpnt->changed = 0;
796 SDpnt->access_count = 0;
797 SDpnt->busy = 0;
798 SDpnt->has_cmdblocks = 0;
800 * Currently, all sequential devices are assumed to be tapes, all random
801 * devices disk, with the appropriate read only flags set for ROM / WORM
802 * treated as RO.
804 switch (type = (scsi_result[0] & 0x1f)) {
805 case TYPE_TAPE:
806 case TYPE_DISK:
807 case TYPE_MOD:
808 case TYPE_PROCESSOR:
809 case TYPE_SCANNER:
810 case TYPE_MEDIUM_CHANGER:
811 case TYPE_ENCLOSURE:
812 SDpnt->writeable = 1;
813 break;
814 case TYPE_WORM:
815 case TYPE_ROM:
816 SDpnt->writeable = 0;
817 break;
818 default:
819 printk("scsi: unknown type %d\n", type);
822 SDpnt->device_blocked = FALSE;
823 SDpnt->device_busy = 0;
824 SDpnt->single_lun = 0;
825 SDpnt->soft_reset =
826 (scsi_result[7] & 1) && ((scsi_result[3] & 7) == 2);
827 SDpnt->random = (type == TYPE_TAPE) ? 0 : 1;
828 SDpnt->type = (type & 0x1f);
830 print_inquiry(scsi_result);
832 for (sdtpnt = scsi_devicelist; sdtpnt;
833 sdtpnt = sdtpnt->next)
834 if (sdtpnt->detect)
835 SDpnt->attached +=
836 (*sdtpnt->detect) (SDpnt);
838 SDpnt->scsi_level = scsi_result[2] & 0x07;
839 if (SDpnt->scsi_level >= 2 ||
840 (SDpnt->scsi_level == 1 &&
841 (scsi_result[3] & 0x0f) == 1))
842 SDpnt->scsi_level++;
845 * Accommodate drivers that want to sleep when they should be in a polling
846 * loop.
848 SDpnt->disconnect = 0;
851 * Get any flags for this device.
853 bflags = get_device_flags(scsi_result);
856 * Set the tagged_queue flag for SCSI-II devices that purport to support
857 * tagged queuing in the INQUIRY data.
859 SDpnt->tagged_queue = 0;
860 if ((SDpnt->scsi_level >= SCSI_2) &&
861 (scsi_result[7] & 2) &&
862 !(bflags & BLIST_NOTQ)) {
863 SDpnt->tagged_supported = 1;
864 SDpnt->current_tag = 0;
867 * Some revisions of the Texel CD ROM drives have handshaking problems when
868 * used with the Seagate controllers. Before we know what type of device
869 * we're talking to, we assume it's borken and then change it here if it
870 * turns out that it isn't a TEXEL drive.
872 if ((bflags & BLIST_BORKEN) == 0)
873 SDpnt->borken = 0;
876 * If we want to only allow I/O to one of the luns attached to this device
877 * at a time, then we set this flag.
879 if (bflags & BLIST_SINGLELUN)
880 SDpnt->single_lun = 1;
883 * These devices need this "key" to unlock the devices so we can use it
885 if ((bflags & BLIST_KEY) != 0) {
886 printk("Unlocked floptical drive.\n");
887 SDpnt->lockable = 0;
888 scsi_cmd[0] = MODE_SENSE;
889 scsi_cmd[1] = (lun << 5) & 0xe0;
890 scsi_cmd[2] = 0x2e;
891 scsi_cmd[3] = 0;
892 scsi_cmd[4] = 0x2a;
893 scsi_cmd[5] = 0;
894 SCpnt->cmd_len = 0;
896 DECLARE_MUTEX_LOCKED(sem);
897 SCpnt->request.rq_status = RQ_SCSI_BUSY;
898 SCpnt->request.sem = &sem;
899 spin_lock_irq(&io_request_lock);
900 scsi_do_cmd(SCpnt, (void *) scsi_cmd,
901 (void *) scsi_result, 0x2a,
902 scan_scsis_done, SCSI_TIMEOUT, 3);
903 spin_unlock_irq(&io_request_lock);
904 down(&sem);
905 SCpnt->request.sem = NULL;
909 * Detach the command from the device. It was just a temporary to be used while
910 * scanning the bus - the real ones will be allocated later.
912 SDpnt->device_queue = NULL;
915 * This device was already hooked up to the host in question,
916 * so at this point we just let go of it and it should be fine. We do need to
917 * allocate a new one and attach it to the host so that we can further scan the bus.
919 SDpnt = (Scsi_Device *) scsi_init_malloc(sizeof(Scsi_Device), GFP_ATOMIC);
920 *SDpnt2 = SDpnt;
921 if (!SDpnt) {
922 printk("scsi: scan_scsis_single: Cannot malloc\n");
923 return 0;
926 * And hook up our command block to the new device we will be testing
927 * for.
929 SDpnt->device_queue = SCpnt;
930 SDpnt->online = TRUE;
932 init_waitqueue_head(&SDpnt->device_wait);
935 * Since we just found one device, there had damn well better be one in the list
936 * already.
938 if (shpnt->host_queue == NULL)
939 panic("scan_scsis_single: Host queue == NULL\n");
941 SDtail = shpnt->host_queue;
942 while (SDtail->next) {
943 SDtail = SDtail->next;
946 /* Add this device to the linked list at the end */
947 SDtail->next = SDpnt;
948 SDpnt->prev = SDtail;
949 SDpnt->next = NULL;
952 * Some scsi devices cannot be polled for lun != 0 due to firmware bugs
954 if (bflags & BLIST_NOLUN)
955 return 0; /* break; */
958 * If this device is known to support sparse multiple units, override the
959 * other settings, and scan all of them.
961 if (bflags & BLIST_SPARSELUN) {
962 *max_dev_lun = 8;
963 *sparse_lun = 1;
964 return 1;
967 * If this device is known to support multiple units, override the other
968 * settings, and scan all of them.
970 if (bflags & BLIST_FORCELUN) {
971 *max_dev_lun = 8;
972 return 1;
975 * REGAL CDC-4X: avoid hang after LUN 4
977 if (bflags & BLIST_MAX5LUN) {
978 *max_dev_lun = 5;
979 return 1;
982 * We assume the device can't handle lun!=0 if: - it reports scsi-0 (ANSI
983 * SCSI Revision 0) (old drives like MAXTOR XT-3280) or - it reports scsi-1
984 * (ANSI SCSI Revision 1) and Response Data Format 0
986 if (((scsi_result[2] & 0x07) == 0)
988 ((scsi_result[2] & 0x07) == 1 &&
989 (scsi_result[3] & 0x0f) == 0))
990 return 0;
991 return 1;
995 * Flag bits for the internal_timeout array
997 #define NORMAL_TIMEOUT 0
998 #define IN_ABORT 1
999 #define IN_RESET 2
1000 #define IN_RESET2 4
1001 #define IN_RESET3 8
1004 /* This function takes a quick look at a request, and decides if it
1005 * can be queued now, or if there would be a stall while waiting for
1006 * something else to finish. This routine assumes that interrupts are
1007 * turned off when entering the routine. It is the responsibility
1008 * of the calling code to ensure that this is the case.
1011 Scsi_Cmnd *scsi_request_queueable(struct request * req, Scsi_Device * device)
1013 Scsi_Cmnd *SCpnt = NULL;
1014 int tablesize;
1015 Scsi_Cmnd *found = NULL;
1016 struct buffer_head *bh, *bhp;
1018 if (!device)
1019 panic("No device passed to scsi_request_queueable().\n");
1021 if (req && req->rq_status == RQ_INACTIVE)
1022 panic("Inactive in scsi_request_queueable");
1025 * Look for a free command block. If we have been instructed not to queue
1026 * multiple commands to multi-lun devices, then check to see what else is
1027 * going for this device first.
1030 if (!device->single_lun) {
1031 SCpnt = device->device_queue;
1032 while (SCpnt) {
1033 if (SCpnt->request.rq_status == RQ_INACTIVE)
1034 break;
1035 SCpnt = SCpnt->next;
1037 } else {
1038 SCpnt = device->device_queue;
1039 while (SCpnt) {
1040 if (SCpnt->channel == device->channel
1041 && SCpnt->target == device->id) {
1042 if (SCpnt->lun == device->lun) {
1043 if (found == NULL
1044 && SCpnt->request.rq_status == RQ_INACTIVE) {
1045 found = SCpnt;
1048 if (SCpnt->request.rq_status != RQ_INACTIVE) {
1050 * I think that we should really limit things to one
1051 * outstanding command per device - this is what tends
1052 * to trip up buggy firmware.
1054 return NULL;
1057 SCpnt = SCpnt->next;
1059 SCpnt = found;
1062 if (!SCpnt)
1063 return NULL;
1065 if (SCSI_BLOCK(device, device->host))
1066 return NULL;
1068 if (req) {
1069 memcpy(&SCpnt->request, req, sizeof(struct request));
1070 tablesize = device->host->sg_tablesize;
1071 bhp = bh = req->bh;
1072 if (!tablesize)
1073 bh = NULL;
1074 /* Take a quick look through the table to see how big it is.
1075 * We already have our copy of req, so we can mess with that
1076 * if we want to.
1078 while (req->nr_sectors && bh) {
1079 bhp = bhp->b_reqnext;
1080 if (!bhp || !CONTIGUOUS_BUFFERS(bh, bhp))
1081 tablesize--;
1082 req->nr_sectors -= bh->b_size >> 9;
1083 req->sector += bh->b_size >> 9;
1084 if (!tablesize)
1085 break;
1086 bh = bhp;
1088 if (req->nr_sectors && bh && bh->b_reqnext) { /* Any leftovers? */
1089 SCpnt->request.bhtail = bh;
1090 req->bh = bh->b_reqnext; /* Divide request */
1091 bh->b_reqnext = NULL;
1092 bh = req->bh;
1094 /* Now reset things so that req looks OK */
1095 SCpnt->request.nr_sectors -= req->nr_sectors;
1096 req->current_nr_sectors = bh->b_size >> 9;
1097 req->buffer = bh->b_data;
1098 SCpnt->request.sem = NULL; /* Wait until whole thing done */
1099 } else {
1100 req->rq_status = RQ_INACTIVE;
1101 wake_up(&wait_for_request);
1103 } else {
1104 SCpnt->request.rq_status = RQ_SCSI_BUSY; /* Busy, but no request */
1105 SCpnt->request.sem = NULL; /* And no one is waiting for the device
1106 * either */
1109 atomic_inc(&SCpnt->host->host_active);
1110 SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n", SCpnt->target,
1111 atomic_read(&SCpnt->host->host_active)));
1112 SCpnt->use_sg = 0; /* Reset the scatter-gather flag */
1113 SCpnt->old_use_sg = 0;
1114 SCpnt->transfersize = 0;
1115 SCpnt->underflow = 0;
1116 SCpnt->cmd_len = 0;
1119 * Since not everyone seems to set the device info correctly
1120 * before Scsi_Cmnd gets send out to scsi_do_command, we do it here.
1123 SCpnt->channel = device->channel;
1124 SCpnt->lun = device->lun;
1125 SCpnt->target = device->id;
1126 SCpnt->state = SCSI_STATE_INITIALIZING;
1127 SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
1129 return SCpnt;
1132 /* This function returns a structure pointer that will be valid for
1133 * the device. The wait parameter tells us whether we should wait for
1134 * the unit to become free or not. We are also able to tell this routine
1135 * not to return a descriptor if the host is unable to accept any more
1136 * commands for the time being. We need to keep in mind that there is no
1137 * guarantee that the host remain not busy. Keep in mind the
1138 * scsi_request_queueable function also knows the internal allocation scheme
1139 * of the packets for each device
1142 Scsi_Cmnd *scsi_allocate_device(struct request ** reqp, Scsi_Device * device,
1143 int wait)
1145 kdev_t dev;
1146 struct request *req = NULL;
1147 int tablesize;
1148 struct buffer_head *bh, *bhp;
1149 struct Scsi_Host *host;
1150 Scsi_Cmnd *SCpnt = NULL;
1151 Scsi_Cmnd *SCwait = NULL;
1152 Scsi_Cmnd *found = NULL;
1154 if (!device)
1155 panic("No device passed to scsi_allocate_device().\n");
1157 if (reqp)
1158 req = *reqp;
1161 * See if this request has already been queued by an
1162 * interrupt routine
1165 if (req) {
1166 if (req->rq_status == RQ_INACTIVE)
1167 return NULL;
1168 dev = req->rq_dev;
1169 } else
1170 dev = 0; /* unused */
1172 host = device->host;
1174 if (in_interrupt() && SCSI_BLOCK(device, host))
1175 return NULL;
1177 while (1 == 1) {
1178 if (!device->single_lun) {
1179 SCpnt = device->device_queue;
1180 while (SCpnt) {
1181 SCwait = SCpnt;
1182 if (SCpnt->request.rq_status == RQ_INACTIVE)
1183 break;
1184 SCpnt = SCpnt->next;
1186 } else {
1187 SCpnt = device->device_queue;
1188 while (SCpnt) {
1189 if (SCpnt->channel == device->channel
1190 && SCpnt->target == device->id) {
1191 if (SCpnt->lun == device->lun) {
1192 SCwait = SCpnt;
1193 if (found == NULL
1194 && SCpnt->request.rq_status == RQ_INACTIVE) {
1195 found = SCpnt;
1198 if (SCpnt->request.rq_status != RQ_INACTIVE) {
1200 * I think that we should really limit things to one
1201 * outstanding command per device - this is what tends
1202 * to trip up buggy firmware.
1204 found = NULL;
1205 break;
1208 SCpnt = SCpnt->next;
1210 SCpnt = found;
1213 /* See if this request has already been queued by an interrupt routine
1215 if (req && (req->rq_status == RQ_INACTIVE || req->rq_dev != dev)) {
1216 return NULL;
1218 if (!SCpnt || SCpnt->request.rq_status != RQ_INACTIVE) { /* Might have changed */
1219 if (wait && SCwait && SCwait->request.rq_status != RQ_INACTIVE) {
1220 spin_unlock(&io_request_lock); /* FIXME!!!! */
1221 sleep_on(&device->device_wait);
1222 spin_lock_irq(&io_request_lock); /* FIXME!!!! */
1223 } else {
1224 if (!wait)
1225 return NULL;
1226 if (!SCwait) {
1227 printk("Attempt to allocate device channel %d,"
1228 " target %d, lun %d\n", device->channel,
1229 device->id, device->lun);
1230 panic("No device found in scsi_allocate_device\n");
1233 } else {
1234 if (req) {
1235 memcpy(&SCpnt->request, req, sizeof(struct request));
1236 tablesize = device->host->sg_tablesize;
1237 bhp = bh = req->bh;
1238 if (!tablesize)
1239 bh = NULL;
1240 /* Take a quick look through the table to see how big it is.
1241 * We already have our copy of req, so we can mess with that
1242 * if we want to.
1244 while (req->nr_sectors && bh) {
1245 bhp = bhp->b_reqnext;
1246 if (!bhp || !CONTIGUOUS_BUFFERS(bh, bhp))
1247 tablesize--;
1248 req->nr_sectors -= bh->b_size >> 9;
1249 req->sector += bh->b_size >> 9;
1250 if (!tablesize)
1251 break;
1252 bh = bhp;
1254 if (req->nr_sectors && bh && bh->b_reqnext) { /* Any leftovers? */
1255 SCpnt->request.bhtail = bh;
1256 req->bh = bh->b_reqnext; /* Divide request */
1257 bh->b_reqnext = NULL;
1258 bh = req->bh;
1259 /* Now reset things so that req looks OK */
1260 SCpnt->request.nr_sectors -= req->nr_sectors;
1261 req->current_nr_sectors = bh->b_size >> 9;
1262 req->buffer = bh->b_data;
1263 SCpnt->request.sem = NULL; /* Wait until whole thing done */
1264 } else {
1265 req->rq_status = RQ_INACTIVE;
1266 *reqp = req->next;
1267 wake_up(&wait_for_request);
1269 } else {
1270 SCpnt->request.rq_status = RQ_SCSI_BUSY;
1271 SCpnt->request.sem = NULL; /* And no one is waiting for this
1272 * to complete */
1274 atomic_inc(&SCpnt->host->host_active);
1275 SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n",
1276 SCpnt->target,
1277 atomic_read(&SCpnt->host->host_active)));
1278 break;
1282 SCpnt->use_sg = 0; /* Reset the scatter-gather flag */
1283 SCpnt->old_use_sg = 0;
1284 SCpnt->transfersize = 0; /* No default transfer size */
1285 SCpnt->cmd_len = 0;
1287 SCpnt->underflow = 0; /* Do not flag underflow conditions */
1289 /* Since not everyone seems to set the device info correctly
1290 * before Scsi_Cmnd gets send out to scsi_do_command, we do it here.
1291 * FIXME(eric) This doesn't make any sense.
1293 SCpnt->channel = device->channel;
1294 SCpnt->lun = device->lun;
1295 SCpnt->target = device->id;
1296 SCpnt->state = SCSI_STATE_INITIALIZING;
1297 SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
1299 return SCpnt;
1303 * Function: scsi_release_command
1305 * Purpose: Release a command block.
1307 * Arguments: SCpnt - command block we are releasing.
1309 * Notes: The command block can no longer be used by the caller once
1310 * this funciton is called. This is in effect the inverse
1311 * of scsi_allocate_device/scsi_request_queueable.
1313 void scsi_release_command(Scsi_Cmnd * SCpnt)
1315 SCpnt->request.rq_status = RQ_INACTIVE;
1316 SCpnt->state = SCSI_STATE_UNUSED;
1317 SCpnt->owner = SCSI_OWNER_NOBODY;
1318 atomic_dec(&SCpnt->host->host_active);
1320 SCSI_LOG_MLQUEUE(5, printk("Deactivating command for device %d (active=%d, failed=%d)\n",
1321 SCpnt->target,
1322 atomic_read(&SCpnt->host->host_active),
1323 SCpnt->host->host_failed));
1324 if (SCpnt->host->host_failed != 0) {
1325 SCSI_LOG_ERROR_RECOVERY(5, printk("Error handler thread %d %d\n",
1326 SCpnt->host->in_recovery,
1327 SCpnt->host->eh_active));
1330 * If the host is having troubles, then look to see if this was the last
1331 * command that might have failed. If so, wake up the error handler.
1333 if (SCpnt->host->in_recovery
1334 && !SCpnt->host->eh_active
1335 && SCpnt->host->host_busy == SCpnt->host->host_failed) {
1336 SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
1337 atomic_read(&SCpnt->host->eh_wait->count)));
1338 up(SCpnt->host->eh_wait);
1343 * This is inline because we have stack problemes if we recurse to deeply.
1346 inline int internal_cmnd(Scsi_Cmnd * SCpnt)
1348 #ifdef DEBUG_DELAY
1349 unsigned long clock;
1350 #endif
1351 struct Scsi_Host *host;
1352 int rtn = 0;
1353 unsigned long timeout;
1355 #if DEBUG
1356 unsigned long *ret = 0;
1357 #ifdef __mips__
1358 __asm__ __volatile__("move\t%0,$31":"=r"(ret));
1359 #else
1360 ret = __builtin_return_address(0);
1361 #endif
1362 #endif
1364 host = SCpnt->host;
1366 /* Assign a unique nonzero serial_number. */
1367 if (++serial_number == 0)
1368 serial_number = 1;
1369 SCpnt->serial_number = serial_number;
1372 * We will wait MIN_RESET_DELAY clock ticks after the last reset so
1373 * we can avoid the drive not being ready.
1375 timeout = host->last_reset + MIN_RESET_DELAY;
1377 if (host->resetting && time_before(jiffies, timeout)) {
1378 int ticks_remaining = timeout - jiffies;
1380 * NOTE: This may be executed from within an interrupt
1381 * handler! This is bad, but for now, it'll do. The irq
1382 * level of the interrupt handler has been masked out by the
1383 * platform dependent interrupt handling code already, so the
1384 * sti() here will not cause another call to the SCSI host's
1385 * interrupt handler (assuming there is one irq-level per
1386 * host).
1388 spin_unlock_irq(&io_request_lock);
1389 while (--ticks_remaining >= 0)
1390 mdelay(1 + 999 / HZ);
1391 host->resetting = 0;
1392 spin_lock_irq(&io_request_lock);
1394 if (host->hostt->use_new_eh_code) {
1395 scsi_add_timer(SCpnt, SCpnt->timeout_per_command, scsi_times_out);
1396 } else {
1397 scsi_add_timer(SCpnt, SCpnt->timeout_per_command,
1398 scsi_old_times_out);
1402 * We will use a queued command if possible, otherwise we will emulate the
1403 * queuing and calling of completion function ourselves.
1405 SCSI_LOG_MLQUEUE(3, printk("internal_cmnd (host = %d, channel = %d, target = %d, "
1406 "command = %p, buffer = %p, \nbufflen = %d, done = %p)\n",
1407 SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd,
1408 SCpnt->buffer, SCpnt->bufflen, SCpnt->done));
1410 SCpnt->state = SCSI_STATE_QUEUED;
1411 SCpnt->owner = SCSI_OWNER_LOWLEVEL;
1412 if (host->can_queue) {
1413 SCSI_LOG_MLQUEUE(3, printk("queuecommand : routine at %p\n",
1414 host->hostt->queuecommand));
1416 * Use the old error handling code if we haven't converted the driver
1417 * to use the new one yet. Note - only the new queuecommand variant
1418 * passes a meaningful return value.
1420 if (host->hostt->use_new_eh_code) {
1421 rtn = host->hostt->queuecommand(SCpnt, scsi_done);
1422 if (rtn != 0) {
1423 scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY);
1425 } else {
1426 host->hostt->queuecommand(SCpnt, scsi_old_done);
1428 } else {
1429 int temp;
1431 SCSI_LOG_MLQUEUE(3, printk("command() : routine at %p\n", host->hostt->command));
1432 temp = host->hostt->command(SCpnt);
1433 SCpnt->result = temp;
1434 #ifdef DEBUG_DELAY
1435 clock = jiffies + 4 * HZ;
1436 spin_unlock_irq(&io_request_lock);
1437 while (time_before(jiffies, clock))
1438 barrier();
1439 spin_lock_irq(&io_request_lock);
1440 printk("done(host = %d, result = %04x) : routine at %p\n",
1441 host->host_no, temp, host->hostt->command);
1442 #endif
1443 if (host->hostt->use_new_eh_code) {
1444 scsi_done(SCpnt);
1445 } else {
1446 scsi_old_done(SCpnt);
1449 SCSI_LOG_MLQUEUE(3, printk("leaving internal_cmnd()\n"));
1450 return rtn;
1454 * scsi_do_cmd sends all the commands out to the low-level driver. It
1455 * handles the specifics required for each low level driver - ie queued
1456 * or non queued. It also prevents conflicts when different high level
1457 * drivers go for the same host at the same time.
1460 void scsi_do_cmd(Scsi_Cmnd * SCpnt, const void *cmnd,
1461 void *buffer, unsigned bufflen, void (*done) (Scsi_Cmnd *),
1462 int timeout, int retries)
1464 struct Scsi_Host *host = SCpnt->host;
1465 Scsi_Device *device = SCpnt->device;
1467 SCpnt->owner = SCSI_OWNER_MIDLEVEL;
1469 SCSI_LOG_MLQUEUE(4,
1471 int i;
1472 int target = SCpnt->target;
1473 printk("scsi_do_cmd (host = %d, channel = %d target = %d, "
1474 "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
1475 "retries = %d)\n"
1476 "command : ", host->host_no, SCpnt->channel, target, buffer,
1477 bufflen, done, timeout, retries);
1478 for (i = 0; i < 10; ++i)
1479 printk("%02x ", ((unsigned char *) cmnd)[i]);
1480 printk("\n");
1483 if (!host) {
1484 panic("Invalid or not present host.\n");
1487 * We must prevent reentrancy to the lowlevel host driver. This prevents
1488 * it - we enter a loop until the host we want to talk to is not busy.
1489 * Race conditions are prevented, as interrupts are disabled in between the
1490 * time we check for the host being not busy, and the time we mark it busy
1491 * ourselves.
1494 SCpnt->pid = scsi_pid++;
1496 while (SCSI_BLOCK((Scsi_Device *) NULL, host)) {
1497 spin_unlock(&io_request_lock); /* FIXME!!! */
1498 SCSI_SLEEP(&host->host_wait, SCSI_BLOCK((Scsi_Device *) NULL, host));
1499 spin_lock_irq(&io_request_lock); /* FIXME!!! */
1502 if (host->block)
1503 host_active = host;
1505 host->host_busy++;
1506 device->device_busy++;
1509 * Our own function scsi_done (which marks the host as not busy, disables
1510 * the timeout counter, etc) will be called by us or by the
1511 * scsi_hosts[host].queuecommand() function needs to also call
1512 * the completion function for the high level driver.
1515 memcpy((void *) SCpnt->data_cmnd, (const void *) cmnd, 12);
1516 SCpnt->reset_chain = NULL;
1517 SCpnt->serial_number = 0;
1518 SCpnt->serial_number_at_timeout = 0;
1519 SCpnt->bufflen = bufflen;
1520 SCpnt->buffer = buffer;
1521 SCpnt->flags = 0;
1522 SCpnt->retries = 0;
1523 SCpnt->allowed = retries;
1524 SCpnt->done = done;
1525 SCpnt->timeout_per_command = timeout;
1527 memcpy((void *) SCpnt->cmnd, (const void *) cmnd, 12);
1528 /* Zero the sense buffer. Some host adapters automatically request
1529 * sense on error. 0 is not a valid sense code.
1531 memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
1532 SCpnt->request_buffer = buffer;
1533 SCpnt->request_bufflen = bufflen;
1534 SCpnt->old_use_sg = SCpnt->use_sg;
1535 if (SCpnt->cmd_len == 0)
1536 SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
1537 SCpnt->old_cmd_len = SCpnt->cmd_len;
1539 /* Start the timer ticking. */
1541 SCpnt->internal_timeout = NORMAL_TIMEOUT;
1542 SCpnt->abort_reason = 0;
1543 SCpnt->result = 0;
1544 internal_cmnd(SCpnt);
1546 SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_do_cmd()\n"));
1549 /* This function is the mid-level interrupt routine, which decides how
1550 * to handle error conditions. Each invocation of this function must
1551 * do one and *only* one of the following:
1553 * 1) Insert command in BH queue.
1554 * 2) Activate error handler for host.
1556 * FIXME(eric) - I am concerned about stack overflow (still). An interrupt could
1557 * come while we are processing the bottom queue, which would cause another command
1558 * to be stuffed onto the bottom queue, and it would in turn be processed as that
1559 * interrupt handler is returning. Given a sufficiently steady rate of returning
1560 * commands, this could cause the stack to overflow. I am not sure what is the most
1561 * appropriate solution here - we should probably keep a depth count, and not process
1562 * any commands while we still have a bottom handler active higher in the stack.
1564 * There is currently code in the bottom half handler to monitor recursion in the bottom
1565 * handler and report if it ever happens. If this becomes a problem, it won't be hard to
1566 * engineer something to deal with it so that only the outer layer ever does any real
1567 * processing.
1569 void scsi_done(Scsi_Cmnd * SCpnt)
1573 * We don't have to worry about this one timing out any more.
1575 scsi_delete_timer(SCpnt);
1577 /* Set the serial numbers back to zero */
1578 SCpnt->serial_number = 0;
1581 * First, see whether this command already timed out. If so, we ignore
1582 * the response. We treat it as if the command never finished.
1584 * Since serial_number is now 0, the error handler cound detect this
1585 * situation and avoid to call the the low level driver abort routine.
1586 * (DB)
1588 if (SCpnt->state == SCSI_STATE_TIMEOUT) {
1589 SCSI_LOG_MLCOMPLETE(1, printk("Ignoring completion of %p due to timeout status", SCpnt));
1590 return;
1592 SCpnt->serial_number_at_timeout = 0;
1593 SCpnt->state = SCSI_STATE_BHQUEUE;
1594 SCpnt->owner = SCSI_OWNER_BH_HANDLER;
1595 SCpnt->bh_next = NULL;
1598 * Next, put this command in the BH queue.
1600 * We need a spinlock here, or compare and exchange if we can reorder incoming
1601 * Scsi_Cmnds, as it happens pretty often scsi_done is called multiple times
1602 * before bh is serviced. -jj
1604 * We already have the io_request_lock here, since we are called from the
1605 * interrupt handler or the error handler. (DB)
1608 if (!scsi_bh_queue_head) {
1609 scsi_bh_queue_head = SCpnt;
1610 scsi_bh_queue_tail = SCpnt;
1611 } else {
1612 scsi_bh_queue_tail->bh_next = SCpnt;
1613 scsi_bh_queue_tail = SCpnt;
1617 * Mark the bottom half handler to be run.
1619 mark_bh(SCSI_BH);
1623 * Procedure: scsi_bottom_half_handler
1625 * Purpose: Called after we have finished processing interrupts, it
1626 * performs post-interrupt handling for commands that may
1627 * have completed.
1629 * Notes: This is called with all interrupts enabled. This should reduce
1630 * interrupt latency, stack depth, and reentrancy of the low-level
1631 * drivers.
1633 * The io_request_lock is required in all the routine. There was a subtle
1634 * race condition when scsi_done is called after a command has already
1635 * timed out but before the time out is processed by the error handler.
1636 * (DB)
1638 void scsi_bottom_half_handler(void)
1640 Scsi_Cmnd *SCpnt;
1641 Scsi_Cmnd *SCnext;
1642 unsigned long flags;
1644 spin_lock_irqsave(&io_request_lock, flags);
1646 while (1 == 1) {
1647 SCpnt = scsi_bh_queue_head;
1648 scsi_bh_queue_head = NULL;
1650 if (SCpnt == NULL) {
1651 spin_unlock_irqrestore(&io_request_lock, flags);
1652 return;
1654 SCnext = SCpnt->bh_next;
1656 for (; SCpnt; SCpnt = SCnext) {
1657 SCnext = SCpnt->bh_next;
1659 switch (scsi_decide_disposition(SCpnt)) {
1660 case SUCCESS:
1662 * Add to BH queue.
1664 SCSI_LOG_MLCOMPLETE(3, printk("Command finished %d %d 0x%x\n", SCpnt->host->host_busy,
1665 SCpnt->host->host_failed,
1666 SCpnt->result));
1668 scsi_finish_command(SCpnt);
1669 break;
1670 case NEEDS_RETRY:
1672 * We only come in here if we want to retry a command. The
1673 * test to see whether the command should be retried should be
1674 * keeping track of the number of tries, so we don't end up looping,
1675 * of course.
1677 SCSI_LOG_MLCOMPLETE(3, printk("Command needs retry %d %d 0x%x\n", SCpnt->host->host_busy,
1678 SCpnt->host->host_failed, SCpnt->result));
1680 scsi_retry_command(SCpnt);
1681 break;
1682 case ADD_TO_MLQUEUE:
1684 * This typically happens for a QUEUE_FULL message -
1685 * typically only when the queue depth is only
1686 * approximate for a given device. Adding a command
1687 * to the queue for the device will prevent further commands
1688 * from being sent to the device, so we shouldn't end up
1689 * with tons of things being sent down that shouldn't be.
1691 scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_DEVICE_BUSY);
1692 break;
1693 default:
1695 * Here we have a fatal error of some sort. Turn it over to
1696 * the error handler.
1698 SCSI_LOG_MLCOMPLETE(3, printk("Command failed %p %x active=%d busy=%d failed=%d\n",
1699 SCpnt, SCpnt->result,
1700 atomic_read(&SCpnt->host->host_active),
1701 SCpnt->host->host_busy,
1702 SCpnt->host->host_failed));
1705 * Dump the sense information too.
1707 if ((status_byte(SCpnt->result) & CHECK_CONDITION) != 0) {
1708 SCSI_LOG_MLCOMPLETE(3, print_sense("bh", SCpnt));
1710 if (SCpnt->host->eh_wait != NULL) {
1711 SCpnt->host->host_failed++;
1712 SCpnt->owner = SCSI_OWNER_ERROR_HANDLER;
1713 SCpnt->state = SCSI_STATE_FAILED;
1714 SCpnt->host->in_recovery = 1;
1716 * If the host is having troubles, then look to see if this was the last
1717 * command that might have failed. If so, wake up the error handler.
1719 if (SCpnt->host->host_busy == SCpnt->host->host_failed) {
1720 SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
1721 atomic_read(&SCpnt->host->eh_wait->count)));
1722 up(SCpnt->host->eh_wait);
1724 } else {
1726 * We only get here if the error recovery thread has died.
1728 scsi_finish_command(SCpnt);
1731 } /* for(; SCpnt...) */
1733 } /* while(1==1) */
1735 spin_unlock_irqrestore(&io_request_lock, flags);
1740 * Function: scsi_retry_command
1742 * Purpose: Send a command back to the low level to be retried.
1744 * Notes: This command is always executed in the context of the
1745 * bottom half handler, or the error handler thread. Low
1746 * level drivers should not become re-entrant as a result of
1747 * this.
1749 int scsi_retry_command(Scsi_Cmnd * SCpnt)
1751 memcpy((void *) SCpnt->cmnd, (void *) SCpnt->data_cmnd,
1752 sizeof(SCpnt->data_cmnd));
1753 SCpnt->request_buffer = SCpnt->buffer;
1754 SCpnt->request_bufflen = SCpnt->bufflen;
1755 SCpnt->use_sg = SCpnt->old_use_sg;
1756 SCpnt->cmd_len = SCpnt->old_cmd_len;
1757 SCpnt->result = 0;
1758 memset((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
1759 return internal_cmnd(SCpnt);
1763 * Function: scsi_finish_command
1765 * Purpose: Pass command off to upper layer for finishing of I/O
1766 * request, waking processes that are waiting on results,
1767 * etc.
1769 void scsi_finish_command(Scsi_Cmnd * SCpnt)
1771 struct Scsi_Host *host;
1772 Scsi_Device *device;
1774 host = SCpnt->host;
1775 device = SCpnt->device;
1777 host->host_busy--; /* Indicate that we are free */
1778 device->device_busy--; /* Decrement device usage counter. */
1780 if (host->block && host->host_busy == 0) {
1781 host_active = NULL;
1783 /* For block devices "wake_up" is done in end_scsi_request */
1784 if (!SCSI_BLK_MAJOR(MAJOR(SCpnt->request.rq_dev))) {
1785 struct Scsi_Host *next;
1787 for (next = host->block; next != host; next = next->block)
1788 wake_up(&next->host_wait);
1792 * Now try and drain the mid-level queue if any commands have been
1793 * inserted. Check to see whether the queue even has anything in
1794 * it first, as otherwise this is useless overhead.
1796 if (SCpnt->host->pending_commands != NULL) {
1797 scsi_mlqueue_finish(SCpnt->host, SCpnt->device);
1799 wake_up(&host->host_wait);
1802 * If we have valid sense information, then some kind of recovery
1803 * must have taken place. Make a note of this.
1805 if (scsi_sense_valid(SCpnt)) {
1806 SCpnt->result |= (DRIVER_SENSE << 24);
1808 SCSI_LOG_MLCOMPLETE(3, printk("Notifying upper driver of completion for device %d %x\n",
1809 SCpnt->device->id, SCpnt->result));
1811 SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
1812 SCpnt->state = SCSI_STATE_FINISHED;
1814 /* We can get here with use_sg=0, causing a panic in the upper level (DB) */
1815 SCpnt->use_sg = SCpnt->old_use_sg;
1817 SCpnt->done(SCpnt);
1820 #ifdef CONFIG_MODULES
1821 static int scsi_register_host(Scsi_Host_Template *);
1822 static void scsi_unregister_host(Scsi_Host_Template *);
1823 #endif
1825 void *scsi_malloc(unsigned int len)
1827 unsigned int nbits, mask;
1828 int i, j;
1829 if (len % SECTOR_SIZE != 0 || len > PAGE_SIZE)
1830 return NULL;
1832 nbits = len >> 9;
1833 mask = (1 << nbits) - 1;
1835 for (i = 0; i < dma_sectors / SECTORS_PER_PAGE; i++)
1836 for (j = 0; j <= SECTORS_PER_PAGE - nbits; j++) {
1837 if ((dma_malloc_freelist[i] & (mask << j)) == 0) {
1838 dma_malloc_freelist[i] |= (mask << j);
1839 scsi_dma_free_sectors -= nbits;
1840 #ifdef DEBUG
1841 SCSI_LOG_MLQUEUE(3, printk("SMalloc: %d %p [From:%p]\n", len, dma_malloc_pages[i] + (j << 9)));
1842 printk("SMalloc: %d %p [From:%p]\n", len, dma_malloc_pages[i] + (j << 9));
1843 #endif
1844 return (void *) ((unsigned long) dma_malloc_pages[i] + (j << 9));
1847 return NULL; /* Nope. No more */
1850 int scsi_free(void *obj, unsigned int len)
1852 unsigned int page, sector, nbits, mask;
1854 #ifdef DEBUG
1855 unsigned long ret = 0;
1857 #ifdef __mips__
1858 __asm__ __volatile__("move\t%0,$31":"=r"(ret));
1859 #else
1860 ret = __builtin_return_address(0);
1861 #endif
1862 printk("scsi_free %p %d\n", obj, len);
1863 SCSI_LOG_MLQUEUE(3, printk("SFree: %p %d\n", obj, len));
1864 #endif
1866 for (page = 0; page < dma_sectors / SECTORS_PER_PAGE; page++) {
1867 unsigned long page_addr = (unsigned long) dma_malloc_pages[page];
1868 if ((unsigned long) obj >= page_addr &&
1869 (unsigned long) obj < page_addr + PAGE_SIZE) {
1870 sector = (((unsigned long) obj) - page_addr) >> 9;
1872 nbits = len >> 9;
1873 mask = (1 << nbits) - 1;
1875 if ((mask << sector) >= (1 << SECTORS_PER_PAGE))
1876 panic("scsi_free:Bad memory alignment");
1878 if ((dma_malloc_freelist[page] &
1879 (mask << sector)) != (mask << sector)) {
1880 #ifdef DEBUG
1881 printk("scsi_free(obj=%p, len=%d) called from %08lx\n",
1882 obj, len, ret);
1883 #endif
1884 panic("scsi_free:Trying to free unused memory");
1886 scsi_dma_free_sectors += nbits;
1887 dma_malloc_freelist[page] &= ~(mask << sector);
1888 return 0;
1891 panic("scsi_free:Bad offset");
1895 int scsi_loadable_module_flag; /* Set after we scan builtin drivers */
1897 void *scsi_init_malloc(unsigned int size, int gfp_mask)
1899 void *retval;
1902 * For buffers used by the DMA pool, we assume page aligned
1903 * structures.
1905 if ((size % PAGE_SIZE) == 0) {
1906 int order, a_size;
1907 for (order = 0, a_size = PAGE_SIZE;
1908 a_size < size; order++, a_size <<= 1);
1909 retval = (void *) __get_free_pages(gfp_mask | GFP_DMA, order);
1910 } else
1911 retval = kmalloc(size, gfp_mask);
1913 if (retval)
1914 memset(retval, 0, size);
1915 return retval;
1919 void scsi_init_free(char *ptr, unsigned int size)
1922 * We need this special code here because the DMA pool assumes
1923 * page aligned data. Besides, it is wasteful to allocate
1924 * page sized chunks with kmalloc.
1926 if ((size % PAGE_SIZE) == 0) {
1927 int order, a_size;
1929 for (order = 0, a_size = PAGE_SIZE;
1930 a_size < size; order++, a_size <<= 1);
1931 free_pages((unsigned long) ptr, order);
1932 } else
1933 kfree(ptr);
1936 void scsi_build_commandblocks(Scsi_Device * SDpnt)
1938 struct Scsi_Host *host = SDpnt->host;
1939 int j;
1940 Scsi_Cmnd *SCpnt;
1942 if (SDpnt->queue_depth == 0)
1943 SDpnt->queue_depth = host->cmd_per_lun;
1944 SDpnt->device_queue = NULL;
1946 for (j = 0; j < SDpnt->queue_depth; j++) {
1947 SCpnt = (Scsi_Cmnd *)
1948 scsi_init_malloc(sizeof(Scsi_Cmnd),
1949 GFP_ATOMIC |
1950 (host->unchecked_isa_dma ? GFP_DMA : 0));
1951 if (NULL == SCpnt)
1952 break; /* If not, the next line will oops ... */
1953 memset(&SCpnt->eh_timeout, 0, sizeof(SCpnt->eh_timeout));
1954 SCpnt->host = host;
1955 SCpnt->device = SDpnt;
1956 SCpnt->target = SDpnt->id;
1957 SCpnt->lun = SDpnt->lun;
1958 SCpnt->channel = SDpnt->channel;
1959 SCpnt->request.rq_status = RQ_INACTIVE;
1960 SCpnt->host_wait = FALSE;
1961 SCpnt->device_wait = FALSE;
1962 SCpnt->use_sg = 0;
1963 SCpnt->old_use_sg = 0;
1964 SCpnt->old_cmd_len = 0;
1965 SCpnt->underflow = 0;
1966 SCpnt->transfersize = 0;
1967 SCpnt->serial_number = 0;
1968 SCpnt->serial_number_at_timeout = 0;
1969 SCpnt->host_scribble = NULL;
1970 SCpnt->next = SDpnt->device_queue;
1971 SDpnt->device_queue = SCpnt;
1972 SCpnt->state = SCSI_STATE_UNUSED;
1973 SCpnt->owner = SCSI_OWNER_NOBODY;
1975 if (j < SDpnt->queue_depth) { /* low on space (D.Gilbert 990424) */
1976 printk("scsi_build_commandblocks: want=%d, space for=%d blocks\n",
1977 SDpnt->queue_depth, j);
1978 SDpnt->queue_depth = j;
1979 SDpnt->has_cmdblocks = (0 != j);
1980 } else
1981 SDpnt->has_cmdblocks = 1;
1984 #ifndef MODULE /* { */
1986 * scsi_dev_init() is our initialization routine, which in turn calls host
1987 * initialization, bus scanning, and sd/st initialization routines.
1988 * This is only used at boot time.
1990 int __init scsi_dev_init(void)
1992 Scsi_Device *SDpnt;
1993 struct Scsi_Host *shpnt;
1994 struct Scsi_Device_Template *sdtpnt;
1995 #ifdef FOO_ON_YOU
1996 return;
1997 #endif
1999 /* Yes we're here... */
2000 #if CONFIG_PROC_FS
2001 dispatch_scsi_info_ptr = dispatch_scsi_info;
2002 #endif
2004 /* Init a few things so we can "malloc" memory. */
2005 scsi_loadable_module_flag = 0;
2007 /* Register the /proc/scsi/scsi entry */
2008 #if CONFIG_PROC_FS
2009 proc_scsi_register(0, &proc_scsi_scsi);
2010 #endif
2012 /* initialize all hosts */
2013 scsi_init();
2016 * This is where the processing takes place for most everything
2017 * when commands are completed. Until we do this, we will not be able
2018 * to queue any commands.
2020 init_bh(SCSI_BH, scsi_bottom_half_handler);
2022 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2023 scan_scsis(shpnt, 0, 0, 0, 0); /* scan for scsi devices */
2024 if (shpnt->select_queue_depths != NULL)
2025 (shpnt->select_queue_depths) (shpnt, shpnt->host_queue);
2028 printk("scsi : detected ");
2029 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2030 if (sdtpnt->dev_noticed && sdtpnt->name)
2031 printk("%d SCSI %s%s ", sdtpnt->dev_noticed, sdtpnt->name,
2032 (sdtpnt->dev_noticed != 1) ? "s" : "");
2033 printk("total.\n");
2035 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2036 if (sdtpnt->init && sdtpnt->dev_noticed)
2037 (*sdtpnt->init) ();
2039 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2040 for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
2041 /* SDpnt->scsi_request_fn = NULL; */
2042 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2043 if (sdtpnt->attach)
2044 (*sdtpnt->attach) (SDpnt);
2045 if (SDpnt->attached) {
2046 scsi_build_commandblocks(SDpnt);
2047 if (0 == SDpnt->has_cmdblocks) {
2048 printk("scsi_dev_init: DANGER, no command blocks\n");
2049 /* What to do now ?? */
2056 * This should build the DMA pool.
2058 resize_dma_pool();
2061 * OK, now we finish the initialization by doing spin-up, read
2062 * capacity, etc, etc
2064 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2065 if (sdtpnt->finish && sdtpnt->nr_dev)
2066 (*sdtpnt->finish) ();
2068 scsi_loadable_module_flag = 1;
2070 return 0;
2072 #endif /* MODULE */ /* } */
2074 static void print_inquiry(unsigned char *data)
2076 int i;
2078 printk(" Vendor: ");
2079 for (i = 8; i < 16; i++) {
2080 if (data[i] >= 0x20 && i < data[4] + 5)
2081 printk("%c", data[i]);
2082 else
2083 printk(" ");
2086 printk(" Model: ");
2087 for (i = 16; i < 32; i++) {
2088 if (data[i] >= 0x20 && i < data[4] + 5)
2089 printk("%c", data[i]);
2090 else
2091 printk(" ");
2094 printk(" Rev: ");
2095 for (i = 32; i < 36; i++) {
2096 if (data[i] >= 0x20 && i < data[4] + 5)
2097 printk("%c", data[i]);
2098 else
2099 printk(" ");
2102 printk("\n");
2104 i = data[0] & 0x1f;
2106 printk(" Type: %s ",
2107 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] : "Unknown ");
2108 printk(" ANSI SCSI revision: %02x", data[2] & 0x07);
2109 if ((data[2] & 0x07) == 1 && (data[3] & 0x0f) == 1)
2110 printk(" CCS\n");
2111 else
2112 printk("\n");
2116 #ifdef CONFIG_PROC_FS
2117 int scsi_proc_info(char *buffer, char **start, off_t offset, int length,
2118 int hostno, int inout)
2120 Scsi_Cmnd *SCpnt;
2121 struct Scsi_Device_Template *SDTpnt;
2122 Scsi_Device *scd;
2123 struct Scsi_Host *HBA_ptr;
2124 char *p;
2125 int host, channel, id, lun;
2126 int size, len = 0;
2127 off_t begin = 0;
2128 off_t pos = 0;
2130 if (inout == 0) {
2132 * First, see if there are any attached devices or not.
2134 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
2135 if (HBA_ptr->host_queue != NULL) {
2136 break;
2139 size = sprintf(buffer + len, "Attached devices: %s\n", (HBA_ptr) ? "" : "none");
2140 len += size;
2141 pos = begin + len;
2142 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
2143 #if 0
2144 size += sprintf(buffer + len, "scsi%2d: %s\n", (int) HBA_ptr->host_no,
2145 HBA_ptr->hostt->procname);
2146 len += size;
2147 pos = begin + len;
2148 #endif
2149 for (scd = HBA_ptr->host_queue; scd; scd = scd->next) {
2150 proc_print_scsidevice(scd, buffer, &size, len);
2151 len += size;
2152 pos = begin + len;
2154 if (pos < offset) {
2155 len = 0;
2156 begin = pos;
2158 if (pos > offset + length)
2159 goto stop_output;
2163 stop_output:
2164 *start = buffer + (offset - begin); /* Start of wanted data */
2165 len -= (offset - begin); /* Start slop */
2166 if (len > length)
2167 len = length; /* Ending slop */
2168 return (len);
2170 if (!buffer || length < 11 || strncmp("scsi", buffer, 4))
2171 return (-EINVAL);
2174 * Usage: echo "scsi dump #N" > /proc/scsi/scsi
2175 * to dump status of all scsi commands. The number is used to specify the level
2176 * of detail in the dump.
2178 if (!strncmp("dump", buffer + 5, 4)) {
2179 unsigned int level;
2181 p = buffer + 10;
2183 if (*p == '\0')
2184 return (-EINVAL);
2186 level = simple_strtoul(p, NULL, 0);
2187 scsi_dump_status(level);
2190 * Usage: echo "scsi log token #N" > /proc/scsi/scsi
2191 * where token is one of [error,scan,mlqueue,mlcomplete,llqueue,
2192 * llcomplete,hlqueue,hlcomplete]
2194 #if CONFIG_SCSI_LOGGING /* { */
2196 if (!strncmp("log", buffer + 5, 3)) {
2197 char *token;
2198 unsigned int level;
2200 p = buffer + 9;
2201 token = p;
2202 while (*p != ' ' && *p != '\t' && *p != '\0') {
2203 p++;
2206 if (*p == '\0') {
2207 if (strncmp(token, "all", 3) == 0) {
2209 * Turn on absolutely everything.
2211 scsi_logging_level = ~0;
2212 } else if (strncmp(token, "none", 4) == 0) {
2214 * Turn off absolutely everything.
2216 scsi_logging_level = 0;
2217 } else {
2218 return (-EINVAL);
2220 } else {
2221 *p++ = '\0';
2223 level = simple_strtoul(p, NULL, 0);
2226 * Now figure out what to do with it.
2228 if (strcmp(token, "error") == 0) {
2229 SCSI_SET_ERROR_RECOVERY_LOGGING(level);
2230 } else if (strcmp(token, "timeout") == 0) {
2231 SCSI_SET_TIMEOUT_LOGGING(level);
2232 } else if (strcmp(token, "scan") == 0) {
2233 SCSI_SET_SCAN_BUS_LOGGING(level);
2234 } else if (strcmp(token, "mlqueue") == 0) {
2235 SCSI_SET_MLQUEUE_LOGGING(level);
2236 } else if (strcmp(token, "mlcomplete") == 0) {
2237 SCSI_SET_MLCOMPLETE_LOGGING(level);
2238 } else if (strcmp(token, "llqueue") == 0) {
2239 SCSI_SET_LLQUEUE_LOGGING(level);
2240 } else if (strcmp(token, "llcomplete") == 0) {
2241 SCSI_SET_LLCOMPLETE_LOGGING(level);
2242 } else if (strcmp(token, "hlqueue") == 0) {
2243 SCSI_SET_HLQUEUE_LOGGING(level);
2244 } else if (strcmp(token, "hlcomplete") == 0) {
2245 SCSI_SET_HLCOMPLETE_LOGGING(level);
2246 } else if (strcmp(token, "ioctl") == 0) {
2247 SCSI_SET_IOCTL_LOGGING(level);
2248 } else {
2249 return (-EINVAL);
2253 printk("scsi logging level set to 0x%8.8x\n", scsi_logging_level);
2255 #endif /* CONFIG_SCSI_LOGGING */ /* } */
2258 * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi
2259 * with "0 1 2 3" replaced by your "Host Channel Id Lun".
2260 * Consider this feature BETA.
2261 * CAUTION: This is not for hotplugging your peripherals. As
2262 * SCSI was not designed for this you could damage your
2263 * hardware !
2264 * However perhaps it is legal to switch on an
2265 * already connected device. It is perhaps not
2266 * guaranteed this device doesn't corrupt an ongoing data transfer.
2268 if (!strncmp("add-single-device", buffer + 5, 17)) {
2269 p = buffer + 23;
2271 host = simple_strtoul(p, &p, 0);
2272 channel = simple_strtoul(p + 1, &p, 0);
2273 id = simple_strtoul(p + 1, &p, 0);
2274 lun = simple_strtoul(p + 1, &p, 0);
2276 printk("scsi singledevice %d %d %d %d\n", host, channel,
2277 id, lun);
2279 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
2280 if (HBA_ptr->host_no == host) {
2281 break;
2284 if (!HBA_ptr)
2285 return (-ENXIO);
2287 for (scd = HBA_ptr->host_queue; scd; scd = scd->next) {
2288 if ((scd->channel == channel
2289 && scd->id == id
2290 && scd->lun == lun)) {
2291 break;
2295 if (scd)
2296 return (-ENOSYS); /* We do not yet support unplugging */
2298 scan_scsis(HBA_ptr, 1, channel, id, lun);
2300 /* FIXME (DB) This assumes that the queue_depth routines can be used
2301 in this context as well, while they were all designed to be
2302 called only once after the detect routine. (DB) */
2303 if (HBA_ptr->select_queue_depths != NULL)
2304 (HBA_ptr->select_queue_depths) (HBA_ptr, HBA_ptr->host_queue);
2306 return (length);
2310 * Usage: echo "scsi remove-single-device 0 1 2 3" >/proc/scsi/scsi
2311 * with "0 1 2 3" replaced by your "Host Channel Id Lun".
2313 * Consider this feature pre-BETA.
2315 * CAUTION: This is not for hotplugging your peripherals. As
2316 * SCSI was not designed for this you could damage your
2317 * hardware and thoroughly confuse the SCSI subsystem.
2320 else if (!strncmp("remove-single-device", buffer + 5, 20)) {
2321 p = buffer + 26;
2323 host = simple_strtoul(p, &p, 0);
2324 channel = simple_strtoul(p + 1, &p, 0);
2325 id = simple_strtoul(p + 1, &p, 0);
2326 lun = simple_strtoul(p + 1, &p, 0);
2329 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next) {
2330 if (HBA_ptr->host_no == host) {
2331 break;
2334 if (!HBA_ptr)
2335 return (-ENODEV);
2337 for (scd = HBA_ptr->host_queue; scd; scd = scd->next) {
2338 if ((scd->channel == channel
2339 && scd->id == id
2340 && scd->lun == lun)) {
2341 break;
2345 if (scd == NULL)
2346 return (-ENODEV); /* there is no such device attached */
2348 if (scd->access_count)
2349 return (-EBUSY);
2351 SDTpnt = scsi_devicelist;
2352 while (SDTpnt != NULL) {
2353 if (SDTpnt->detach)
2354 (*SDTpnt->detach) (scd);
2355 SDTpnt = SDTpnt->next;
2358 if (scd->attached == 0) {
2360 * Nobody is using this device any more.
2361 * Free all of the command structures.
2363 for (SCpnt = scd->device_queue; SCpnt; SCpnt = SCpnt->next) {
2364 scd->device_queue = SCpnt->next;
2365 scsi_init_free((char *) SCpnt, sizeof(*SCpnt));
2367 /* Now we can remove the device structure */
2368 if (scd->next != NULL)
2369 scd->next->prev = scd->prev;
2371 if (scd->prev != NULL)
2372 scd->prev->next = scd->next;
2374 if (HBA_ptr->host_queue == scd) {
2375 HBA_ptr->host_queue = scd->next;
2377 scsi_init_free((char *) scd, sizeof(Scsi_Device));
2378 } else {
2379 return (-EBUSY);
2381 return (0);
2383 return (-EINVAL);
2385 #endif
2388 * Go through the device list and recompute the most appropriate size
2389 * for the dma pool. Then grab more memory (as required).
2391 static void resize_dma_pool(void)
2393 int i, k;
2394 unsigned long size;
2395 struct Scsi_Host *shpnt;
2396 struct Scsi_Host *host = NULL;
2397 Scsi_Device *SDpnt;
2398 FreeSectorBitmap *new_dma_malloc_freelist = NULL;
2399 unsigned int new_dma_sectors = 0;
2400 unsigned int new_need_isa_buffer = 0;
2401 unsigned char **new_dma_malloc_pages = NULL;
2402 int out_of_space = 0;
2404 if (!scsi_hostlist) {
2406 * Free up the DMA pool.
2408 if (scsi_dma_free_sectors != dma_sectors)
2409 panic("SCSI DMA pool memory leak %d %d\n", scsi_dma_free_sectors, dma_sectors);
2411 for (i = 0; i < dma_sectors / SECTORS_PER_PAGE; i++)
2412 scsi_init_free(dma_malloc_pages[i], PAGE_SIZE);
2413 if (dma_malloc_pages)
2414 scsi_init_free((char *) dma_malloc_pages,
2415 (dma_sectors / SECTORS_PER_PAGE) * sizeof(*dma_malloc_pages));
2416 dma_malloc_pages = NULL;
2417 if (dma_malloc_freelist)
2418 scsi_init_free((char *) dma_malloc_freelist,
2419 (dma_sectors / SECTORS_PER_PAGE) * sizeof(*dma_malloc_freelist));
2420 dma_malloc_freelist = NULL;
2421 dma_sectors = 0;
2422 scsi_dma_free_sectors = 0;
2423 return;
2425 /* Next, check to see if we need to extend the DMA buffer pool */
2427 new_dma_sectors = 2 * SECTORS_PER_PAGE; /* Base value we use */
2429 if (__pa(high_memory) - 1 > ISA_DMA_THRESHOLD)
2430 need_isa_bounce_buffers = 1;
2431 else
2432 need_isa_bounce_buffers = 0;
2434 if (scsi_devicelist)
2435 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
2436 new_dma_sectors += SECTORS_PER_PAGE; /* Increment for each host */
2438 for (host = scsi_hostlist; host; host = host->next) {
2439 for (SDpnt = host->host_queue; SDpnt; SDpnt = SDpnt->next) {
2441 * sd and sr drivers allocate scatterlists.
2442 * sr drivers may allocate for each command 1x2048 or 2x1024 extra
2443 * buffers for 2k sector size and 1k fs.
2444 * sg driver allocates buffers < 4k.
2445 * st driver does not need buffers from the dma pool.
2446 * estimate 4k buffer/command for devices of unknown type (should panic).
2448 if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM ||
2449 SDpnt->type == TYPE_DISK || SDpnt->type == TYPE_MOD) {
2450 new_dma_sectors += ((host->sg_tablesize *
2451 sizeof(struct scatterlist) + 511) >> 9) *
2452 SDpnt->queue_depth;
2453 if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM)
2454 new_dma_sectors += (2048 >> 9) * SDpnt->queue_depth;
2455 } else if (SDpnt->type == TYPE_SCANNER ||
2456 SDpnt->type == TYPE_PROCESSOR ||
2457 SDpnt->type == TYPE_MEDIUM_CHANGER ||
2458 SDpnt->type == TYPE_ENCLOSURE) {
2459 new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth;
2460 } else {
2461 if (SDpnt->type != TYPE_TAPE) {
2462 printk("resize_dma_pool: unknown device type %d\n", SDpnt->type);
2463 new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth;
2467 if (host->unchecked_isa_dma &&
2468 need_isa_bounce_buffers &&
2469 SDpnt->type != TYPE_TAPE) {
2470 new_dma_sectors += (PAGE_SIZE >> 9) * host->sg_tablesize *
2471 SDpnt->queue_depth;
2472 new_need_isa_buffer++;
2477 #ifdef DEBUG_INIT
2478 printk("resize_dma_pool: needed dma sectors = %d\n", new_dma_sectors);
2479 #endif
2481 /* limit DMA memory to 32MB: */
2482 new_dma_sectors = (new_dma_sectors + 15) & 0xfff0;
2485 * We never shrink the buffers - this leads to
2486 * race conditions that I would rather not even think
2487 * about right now.
2489 #if 0 /* Why do this? No gain and risks out_of_space */
2490 if (new_dma_sectors < dma_sectors)
2491 new_dma_sectors = dma_sectors;
2492 #endif
2493 if (new_dma_sectors <= dma_sectors)
2494 return; /* best to quit while we are in front */
2496 for (k = 0; k < 20; ++k) { /* just in case */
2497 out_of_space = 0;
2498 size = (new_dma_sectors / SECTORS_PER_PAGE) *
2499 sizeof(FreeSectorBitmap);
2500 new_dma_malloc_freelist = (FreeSectorBitmap *)
2501 scsi_init_malloc(size, GFP_ATOMIC);
2502 if (new_dma_malloc_freelist) {
2503 size = (new_dma_sectors / SECTORS_PER_PAGE) *
2504 sizeof(*new_dma_malloc_pages);
2505 new_dma_malloc_pages = (unsigned char **)
2506 scsi_init_malloc(size, GFP_ATOMIC);
2507 if (!new_dma_malloc_pages) {
2508 size = (new_dma_sectors / SECTORS_PER_PAGE) *
2509 sizeof(FreeSectorBitmap);
2510 scsi_init_free((char *) new_dma_malloc_freelist, size);
2511 out_of_space = 1;
2513 } else
2514 out_of_space = 1;
2516 if ((!out_of_space) && (new_dma_sectors > dma_sectors)) {
2517 for (i = dma_sectors / SECTORS_PER_PAGE;
2518 i < new_dma_sectors / SECTORS_PER_PAGE; i++) {
2519 new_dma_malloc_pages[i] = (unsigned char *)
2520 scsi_init_malloc(PAGE_SIZE, GFP_ATOMIC | GFP_DMA);
2521 if (!new_dma_malloc_pages[i])
2522 break;
2524 if (i != new_dma_sectors / SECTORS_PER_PAGE) { /* clean up */
2525 int k = i;
2527 out_of_space = 1;
2528 for (i = 0; i < k; ++i)
2529 scsi_init_free(new_dma_malloc_pages[i], PAGE_SIZE);
2532 if (out_of_space) { /* try scaling down new_dma_sectors request */
2533 printk("scsi::resize_dma_pool: WARNING, dma_sectors=%u, "
2534 "wanted=%u, scaling\n", dma_sectors, new_dma_sectors);
2535 if (new_dma_sectors < (8 * SECTORS_PER_PAGE))
2536 break; /* pretty well hopeless ... */
2537 new_dma_sectors = (new_dma_sectors * 3) / 4;
2538 new_dma_sectors = (new_dma_sectors + 15) & 0xfff0;
2539 if (new_dma_sectors <= dma_sectors)
2540 break; /* stick with what we have got */
2541 } else
2542 break; /* found space ... */
2543 } /* end of for loop */
2544 if (out_of_space) {
2545 scsi_need_isa_buffer = new_need_isa_buffer; /* some useful info */
2546 printk(" WARNING, not enough memory, pool not expanded\n");
2547 return;
2549 /* When we dick with the actual DMA list, we need to
2550 * protect things
2552 if (dma_malloc_freelist) {
2553 size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(FreeSectorBitmap);
2554 memcpy(new_dma_malloc_freelist, dma_malloc_freelist, size);
2555 scsi_init_free((char *) dma_malloc_freelist, size);
2557 dma_malloc_freelist = new_dma_malloc_freelist;
2559 if (dma_malloc_pages) {
2560 size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(*dma_malloc_pages);
2561 memcpy(new_dma_malloc_pages, dma_malloc_pages, size);
2562 scsi_init_free((char *) dma_malloc_pages, size);
2564 scsi_dma_free_sectors += new_dma_sectors - dma_sectors;
2565 dma_malloc_pages = new_dma_malloc_pages;
2566 dma_sectors = new_dma_sectors;
2567 scsi_need_isa_buffer = new_need_isa_buffer;
2569 #ifdef DEBUG_INIT
2570 printk("resize_dma_pool: dma free sectors = %d\n", scsi_dma_free_sectors);
2571 printk("resize_dma_pool: dma sectors = %d\n", dma_sectors);
2572 printk("resize_dma_pool: need isa buffers = %d\n", scsi_need_isa_buffer);
2573 #endif
2576 #ifdef CONFIG_MODULES /* a big #ifdef block... */
2579 * This entry point should be called by a loadable module if it is trying
2580 * add a low level scsi driver to the system.
2582 static int scsi_register_host(Scsi_Host_Template * tpnt)
2584 int pcount;
2585 struct Scsi_Host *shpnt;
2586 Scsi_Device *SDpnt;
2587 struct Scsi_Device_Template *sdtpnt;
2588 const char *name;
2589 unsigned long flags;
2590 int out_of_space = 0;
2592 if (tpnt->next || !tpnt->detect)
2593 return 1; /* Must be already loaded, or
2594 * no detect routine available
2596 pcount = next_scsi_host;
2598 /* The detect routine must carefully spinunlock/spinlock if
2599 it enables interrupts, since all interrupt handlers do
2600 spinlock as well.
2601 All lame drivers are going to fail due to the following
2602 spinlock. For the time beeing let's use it only for drivers
2603 using the new scsi code. NOTE: the detect routine could
2604 redefine the value tpnt->use_new_eh_code. (DB, 13 May 1998) */
2606 if (tpnt->use_new_eh_code) {
2607 spin_lock_irqsave(&io_request_lock, flags);
2608 tpnt->present = tpnt->detect(tpnt);
2609 spin_unlock_irqrestore(&io_request_lock, flags);
2610 } else
2611 tpnt->present = tpnt->detect(tpnt);
2613 if (tpnt->present) {
2614 if (pcount == next_scsi_host) {
2615 if (tpnt->present > 1) {
2616 printk("Failure to register low-level scsi driver");
2617 scsi_unregister_host(tpnt);
2618 return 1;
2621 * The low-level driver failed to register a driver. We
2622 * can do this now.
2624 scsi_register(tpnt, 0);
2626 tpnt->next = scsi_hosts; /* Add to the linked list */
2627 scsi_hosts = tpnt;
2629 /* Add the new driver to /proc/scsi */
2630 #if CONFIG_PROC_FS
2631 build_proc_dir_entries(tpnt);
2632 #endif
2636 * Add the kernel threads for each host adapter that will
2637 * handle error correction.
2639 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2640 if (shpnt->hostt == tpnt && shpnt->hostt->use_new_eh_code) {
2641 DECLARE_MUTEX_LOCKED(sem);
2643 shpnt->eh_notify = &sem;
2644 kernel_thread((int (*)(void *)) scsi_error_handler,
2645 (void *) shpnt, 0);
2648 * Now wait for the kernel error thread to initialize itself
2649 * as it might be needed when we scan the bus.
2651 down(&sem);
2652 shpnt->eh_notify = NULL;
2656 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2657 if (shpnt->hostt == tpnt) {
2658 if (tpnt->info) {
2659 name = tpnt->info(shpnt);
2660 } else {
2661 name = tpnt->name;
2663 printk("scsi%d : %s\n", /* And print a little message */
2664 shpnt->host_no, name);
2668 printk("scsi : %d host%s.\n", next_scsi_host,
2669 (next_scsi_host == 1) ? "" : "s");
2671 scsi_make_blocked_list();
2673 /* The next step is to call scan_scsis here. This generates the
2674 * Scsi_Devices entries
2676 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2677 if (shpnt->hostt == tpnt) {
2678 scan_scsis(shpnt, 0, 0, 0, 0);
2679 if (shpnt->select_queue_depths != NULL) {
2680 (shpnt->select_queue_depths) (shpnt, shpnt->host_queue);
2685 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
2686 if (sdtpnt->init && sdtpnt->dev_noticed)
2687 (*sdtpnt->init) ();
2691 * Next we create the Scsi_Cmnd structures for this host
2693 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2694 for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next)
2695 if (SDpnt->host->hostt == tpnt) {
2696 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2697 if (sdtpnt->attach)
2698 (*sdtpnt->attach) (SDpnt);
2699 if (SDpnt->attached) {
2700 scsi_build_commandblocks(SDpnt);
2701 if (0 == SDpnt->has_cmdblocks)
2702 out_of_space = 1;
2708 * Now that we have all of the devices, resize the DMA pool,
2709 * as required. */
2710 if (!out_of_space)
2711 resize_dma_pool();
2714 /* This does any final handling that is required. */
2715 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
2716 if (sdtpnt->finish && sdtpnt->nr_dev) {
2717 (*sdtpnt->finish) ();
2721 #if defined(USE_STATIC_SCSI_MEMORY)
2722 printk("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
2723 (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
2724 (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
2725 (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
2726 #endif
2728 MOD_INC_USE_COUNT;
2730 if (out_of_space) {
2731 scsi_unregister_host(tpnt); /* easiest way to clean up?? */
2732 return 1;
2733 } else
2734 return 0;
2738 * Similarly, this entry point should be called by a loadable module if it
2739 * is trying to remove a low level scsi driver from the system.
2741 * Note - there is a fatal flaw in the deregister module function.
2742 * There is no way to return a code that says 'I cannot be unloaded now'.
2743 * The system relies entirely upon usage counts that are maintained,
2744 * and the assumption is that if the usage count is 0, then the module
2745 * can be unloaded.
2747 static void scsi_unregister_host(Scsi_Host_Template * tpnt)
2749 int online_status;
2750 int pcount;
2751 Scsi_Cmnd *SCpnt;
2752 Scsi_Device *SDpnt;
2753 Scsi_Device *SDpnt1;
2754 struct Scsi_Device_Template *sdtpnt;
2755 struct Scsi_Host *sh1;
2756 struct Scsi_Host *shpnt;
2757 Scsi_Host_Template *SHT;
2758 Scsi_Host_Template *SHTp;
2761 * First verify that this host adapter is completely free with no pending
2762 * commands
2764 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2765 for (SDpnt = shpnt->host_queue; SDpnt;
2766 SDpnt = SDpnt->next) {
2767 if (SDpnt->host->hostt == tpnt
2768 && SDpnt->host->hostt->module
2769 && GET_USE_COUNT(SDpnt->host->hostt->module))
2770 return;
2772 * FIXME(eric) - We need to find a way to notify the
2773 * low level driver that we are shutting down - via the
2774 * special device entry that still needs to get added.
2776 * Is detach interface below good enough for this?
2782 * FIXME(eric) put a spinlock on this. We force all of the devices offline
2783 * to help prevent race conditions where other hosts/processors could try and
2784 * get in and queue a command.
2786 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2787 for (SDpnt = shpnt->host_queue; SDpnt;
2788 SDpnt = SDpnt->next) {
2789 if (SDpnt->host->hostt == tpnt)
2790 SDpnt->online = FALSE;
2795 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2796 if (shpnt->hostt != tpnt) {
2797 continue;
2799 for (SDpnt = shpnt->host_queue; SDpnt;
2800 SDpnt = SDpnt->next) {
2802 * Loop over all of the commands associated with the device. If any of
2803 * them are busy, then set the state back to inactive and bail.
2805 for (SCpnt = SDpnt->device_queue; SCpnt;
2806 SCpnt = SCpnt->next) {
2807 online_status = SDpnt->online;
2808 SDpnt->online = FALSE;
2809 if (SCpnt->request.rq_status != RQ_INACTIVE) {
2810 printk("SCSI device not inactive - rq_status=%d, target=%d, pid=%ld, state=%d, owner=%d.\n",
2811 SCpnt->request.rq_status, SCpnt->target, SCpnt->pid,
2812 SCpnt->state, SCpnt->owner);
2813 for (SDpnt1 = shpnt->host_queue; SDpnt1;
2814 SDpnt1 = SDpnt1->next) {
2815 for (SCpnt = SDpnt1->device_queue; SCpnt;
2816 SCpnt = SCpnt->next)
2817 if (SCpnt->request.rq_status == RQ_SCSI_DISCONNECTING)
2818 SCpnt->request.rq_status = RQ_INACTIVE;
2820 SDpnt->online = online_status;
2821 printk("Device busy???\n");
2822 return;
2825 * No, this device is really free. Mark it as such, and
2826 * continue on.
2828 SCpnt->state = SCSI_STATE_DISCONNECTING;
2829 SCpnt->request.rq_status = RQ_SCSI_DISCONNECTING; /* Mark as busy */
2833 /* Next we detach the high level drivers from the Scsi_Device structures */
2835 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2836 if (shpnt->hostt != tpnt) {
2837 continue;
2839 for (SDpnt = shpnt->host_queue; SDpnt;
2840 SDpnt = SDpnt->next) {
2841 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2842 if (sdtpnt->detach)
2843 (*sdtpnt->detach) (SDpnt);
2845 /* If something still attached, punt */
2846 if (SDpnt->attached) {
2847 printk("Attached usage count = %d\n", SDpnt->attached);
2848 return;
2854 * Next, kill the kernel error recovery thread for this host.
2856 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2857 if (shpnt->hostt == tpnt
2858 && shpnt->hostt->use_new_eh_code
2859 && shpnt->ehandler != NULL) {
2860 DECLARE_MUTEX_LOCKED(sem);
2862 shpnt->eh_notify = &sem;
2863 send_sig(SIGKILL, shpnt->ehandler, 1);
2864 down(&sem);
2865 shpnt->eh_notify = NULL;
2869 /* Next we free up the Scsi_Cmnd structures for this host */
2871 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2872 if (shpnt->hostt != tpnt) {
2873 continue;
2875 for (SDpnt = shpnt->host_queue; SDpnt;
2876 SDpnt = shpnt->host_queue) {
2877 while (SDpnt->device_queue) {
2878 SCpnt = SDpnt->device_queue->next;
2879 scsi_init_free((char *) SDpnt->device_queue, sizeof(Scsi_Cmnd));
2880 SDpnt->device_queue = SCpnt;
2882 SDpnt->has_cmdblocks = 0;
2884 /* Next free up the Scsi_Device structures for this host */
2885 shpnt->host_queue = SDpnt->next;
2886 scsi_init_free((char *) SDpnt, sizeof(Scsi_Device));
2891 /* Next we go through and remove the instances of the individual hosts
2892 * that were detected */
2894 for (shpnt = scsi_hostlist; shpnt; shpnt = sh1) {
2895 sh1 = shpnt->next;
2896 if (shpnt->hostt == tpnt) {
2897 if (shpnt->loaded_as_module) {
2898 pcount = next_scsi_host;
2899 /* Remove the /proc/scsi directory entry */
2900 #if CONFIG_PROC_FS
2901 proc_scsi_unregister(tpnt->proc_dir,
2902 shpnt->host_no + PROC_SCSI_FILE);
2903 #endif
2904 if (tpnt->release)
2905 (*tpnt->release) (shpnt);
2906 else {
2907 /* This is the default case for the release function.
2908 * It should do the right thing for most correctly
2909 * written host adapters.
2911 if (shpnt->irq)
2912 free_irq(shpnt->irq, NULL);
2913 if (shpnt->dma_channel != 0xff)
2914 free_dma(shpnt->dma_channel);
2915 if (shpnt->io_port && shpnt->n_io_port)
2916 release_region(shpnt->io_port, shpnt->n_io_port);
2918 if (pcount == next_scsi_host)
2919 scsi_unregister(shpnt);
2920 tpnt->present--;
2926 * If there are absolutely no more hosts left, it is safe
2927 * to completely nuke the DMA pool. The resize operation will
2928 * do the right thing and free everything.
2930 if (!scsi_hosts)
2931 resize_dma_pool();
2933 printk("scsi : %d host%s.\n", next_scsi_host,
2934 (next_scsi_host == 1) ? "" : "s");
2936 #if defined(USE_STATIC_SCSI_MEMORY)
2937 printk("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
2938 (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
2939 (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
2940 (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
2941 #endif
2943 scsi_make_blocked_list();
2945 /* There were some hosts that were loaded at boot time, so we cannot
2946 do any more than this */
2947 if (tpnt->present)
2948 return;
2950 /* OK, this is the very last step. Remove this host adapter from the
2951 linked list. */
2952 for (SHTp = NULL, SHT = scsi_hosts; SHT; SHTp = SHT, SHT = SHT->next)
2953 if (SHT == tpnt) {
2954 if (SHTp)
2955 SHTp->next = SHT->next;
2956 else
2957 scsi_hosts = SHT->next;
2958 SHT->next = NULL;
2959 break;
2961 /* Rebuild the /proc/scsi directory entries */
2962 #if CONFIG_PROC_FS
2963 proc_scsi_unregister(tpnt->proc_dir, tpnt->proc_dir->low_ino);
2964 #endif
2965 MOD_DEC_USE_COUNT;
2969 * This entry point should be called by a loadable module if it is trying
2970 * add a high level scsi driver to the system.
2972 static int scsi_register_device_module(struct Scsi_Device_Template *tpnt)
2974 Scsi_Device *SDpnt;
2975 struct Scsi_Host *shpnt;
2976 int out_of_space = 0;
2978 if (tpnt->next)
2979 return 1;
2981 scsi_register_device(tpnt);
2983 * First scan the devices that we know about, and see if we notice them.
2986 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2987 for (SDpnt = shpnt->host_queue; SDpnt;
2988 SDpnt = SDpnt->next) {
2989 if (tpnt->detect)
2990 SDpnt->attached += (*tpnt->detect) (SDpnt);
2995 * If any of the devices would match this driver, then perform the
2996 * init function.
2998 if (tpnt->init && tpnt->dev_noticed)
2999 if ((*tpnt->init) ())
3000 return 1;
3003 * Now actually connect the devices to the new driver.
3005 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
3006 for (SDpnt = shpnt->host_queue; SDpnt;
3007 SDpnt = SDpnt->next) {
3008 if (tpnt->attach)
3009 (*tpnt->attach) (SDpnt);
3011 * If this driver attached to the device, and don't have any
3012 * command blocks for this device, allocate some.
3014 if (SDpnt->attached && SDpnt->has_cmdblocks == 0) {
3015 SDpnt->online = TRUE;
3016 scsi_build_commandblocks(SDpnt);
3017 if (0 == SDpnt->has_cmdblocks)
3018 out_of_space = 1;
3024 * This does any final handling that is required.
3026 if (tpnt->finish && tpnt->nr_dev)
3027 (*tpnt->finish) ();
3028 if (!out_of_space)
3029 resize_dma_pool();
3030 MOD_INC_USE_COUNT;
3032 if (out_of_space) {
3033 scsi_unregister_device(tpnt); /* easiest way to clean up?? */
3034 return 1;
3035 } else
3036 return 0;
3039 static int scsi_unregister_device(struct Scsi_Device_Template *tpnt)
3041 Scsi_Device *SDpnt;
3042 Scsi_Cmnd *SCpnt;
3043 struct Scsi_Host *shpnt;
3044 struct Scsi_Device_Template *spnt;
3045 struct Scsi_Device_Template *prev_spnt;
3048 * If we are busy, this is not going to fly.
3050 if (GET_USE_COUNT(tpnt->module) != 0)
3051 return 0;
3054 * Next, detach the devices from the driver.
3057 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
3058 for (SDpnt = shpnt->host_queue; SDpnt;
3059 SDpnt = SDpnt->next) {
3060 if (tpnt->detach)
3061 (*tpnt->detach) (SDpnt);
3062 if (SDpnt->attached == 0) {
3063 SDpnt->online = FALSE;
3066 * Nobody is using this device any more. Free all of the
3067 * command structures.
3069 for (SCpnt = SDpnt->device_queue; SCpnt;
3070 SCpnt = SCpnt->next) {
3071 if (SCpnt == SDpnt->device_queue)
3072 SDpnt->device_queue = SCpnt->next;
3073 scsi_init_free((char *) SCpnt, sizeof(*SCpnt));
3075 SDpnt->has_cmdblocks = 0;
3080 * Extract the template from the linked list.
3082 spnt = scsi_devicelist;
3083 prev_spnt = NULL;
3084 while (spnt != tpnt) {
3085 prev_spnt = spnt;
3086 spnt = spnt->next;
3088 if (prev_spnt == NULL)
3089 scsi_devicelist = tpnt->next;
3090 else
3091 prev_spnt->next = spnt->next;
3093 MOD_DEC_USE_COUNT;
3095 * Final cleanup for the driver is done in the driver sources in the
3096 * cleanup function.
3098 return 0;
3102 int scsi_register_module(int module_type, void *ptr)
3104 switch (module_type) {
3105 case MODULE_SCSI_HA:
3106 return scsi_register_host((Scsi_Host_Template *) ptr);
3108 /* Load upper level device handler of some kind */
3109 case MODULE_SCSI_DEV:
3110 #ifdef CONFIG_KMOD
3111 if (scsi_hosts == NULL)
3112 request_module("scsi_hostadapter");
3113 #endif
3114 return scsi_register_device_module((struct Scsi_Device_Template *) ptr);
3115 /* The rest of these are not yet implemented */
3117 /* Load constants.o */
3118 case MODULE_SCSI_CONST:
3120 /* Load specialized ioctl handler for some device. Intended for
3121 * cdroms that have non-SCSI2 audio command sets. */
3122 case MODULE_SCSI_IOCTL:
3124 default:
3125 return 1;
3129 void scsi_unregister_module(int module_type, void *ptr)
3131 switch (module_type) {
3132 case MODULE_SCSI_HA:
3133 scsi_unregister_host((Scsi_Host_Template *) ptr);
3134 break;
3135 case MODULE_SCSI_DEV:
3136 scsi_unregister_device((struct Scsi_Device_Template *) ptr);
3137 break;
3138 /* The rest of these are not yet implemented. */
3139 case MODULE_SCSI_CONST:
3140 case MODULE_SCSI_IOCTL:
3141 break;
3142 default:
3144 return;
3147 #endif /* CONFIG_MODULES */
3150 * Function: scsi_dump_status
3152 * Purpose: Brain dump of scsi system, used for problem solving.
3154 * Arguments: level - used to indicate level of detail.
3156 * Notes: The level isn't used at all yet, but we need to find some way
3157 * of sensibly logging varying degrees of information. A quick one-line
3158 * display of each command, plus the status would be most useful.
3160 * This does depend upon CONFIG_SCSI_LOGGING - I do want some way of turning
3161 * it all off if the user wants a lean and mean kernel. It would probably
3162 * also be useful to allow the user to specify one single host to be dumped.
3163 * A second argument to the function would be useful for that purpose.
3165 * FIXME - some formatting of the output into tables would be very handy.
3167 static void scsi_dump_status(int level)
3169 #if CONFIG_PROC_FS
3170 #if CONFIG_SCSI_LOGGING /* { */
3171 int i;
3172 struct Scsi_Host *shpnt;
3173 Scsi_Cmnd *SCpnt;
3174 Scsi_Device *SDpnt;
3175 printk("Dump of scsi host parameters:\n");
3176 i = 0;
3177 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
3178 printk(" %d %d %d : %d %p\n",
3179 shpnt->host_failed,
3180 shpnt->host_busy,
3181 atomic_read(&shpnt->host_active),
3182 shpnt->host_blocked,
3183 shpnt->pending_commands);
3187 printk("\n\n");
3188 printk("Dump of scsi command parameters:\n");
3189 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
3190 printk("h:c:t:l (dev sect nsect cnumsec sg) (ret all flg) (to/cmd to ito) cmd snse result\n");
3191 for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
3192 for (SCpnt = SDpnt->device_queue; SCpnt; SCpnt = SCpnt->next) {
3193 /* (0) h:c:t:l (dev sect nsect cnumsec sg) (ret all flg) (to/cmd to ito) cmd snse result %d %x */
3194 printk("(%3d) %2d:%1d:%2d:%2d (%6s %4ld %4ld %4ld %4x %1d) (%1d %1d 0x%2x) (%4d %4d %4d) 0x%2.2x 0x%2.2x 0x%8.8x\n",
3195 i++,
3197 SCpnt->host->host_no,
3198 SCpnt->channel,
3199 SCpnt->target,
3200 SCpnt->lun,
3202 kdevname(SCpnt->request.rq_dev),
3203 SCpnt->request.sector,
3204 SCpnt->request.nr_sectors,
3205 SCpnt->request.current_nr_sectors,
3206 SCpnt->request.rq_status,
3207 SCpnt->use_sg,
3209 SCpnt->retries,
3210 SCpnt->allowed,
3211 SCpnt->flags,
3213 SCpnt->timeout_per_command,
3214 SCpnt->timeout,
3215 SCpnt->internal_timeout,
3217 SCpnt->cmnd[0],
3218 SCpnt->sense_buffer[2],
3219 SCpnt->result);
3224 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
3225 for (SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next) {
3226 /* Now dump the request lists for each block device */
3227 printk("Dump of pending block device requests\n");
3228 for (i = 0; i < MAX_BLKDEV; i++) {
3229 if (blk_dev[i].current_request) {
3230 struct request *req;
3231 printk("%d: ", i);
3232 req = blk_dev[i].current_request;
3233 while (req) {
3234 printk("(%s %d %ld %ld %ld) ",
3235 kdevname(req->rq_dev),
3236 req->cmd,
3237 req->sector,
3238 req->nr_sectors,
3239 req->current_nr_sectors);
3240 req = req->next;
3242 printk("\n");
3247 printk("wait_for_request = %p\n", wait_for_request);
3248 #endif /* CONFIG_SCSI_LOGGING */ /* } */
3249 #endif /* CONFIG_PROC_FS */
3252 #ifdef MODULE
3254 int init_module(void)
3256 unsigned long size;
3257 int has_space = 0;
3260 * This makes /proc/scsi visible.
3262 #if CONFIG_PROC_FS
3263 dispatch_scsi_info_ptr = dispatch_scsi_info;
3264 #endif
3266 scsi_loadable_module_flag = 1;
3268 /* Register the /proc/scsi/scsi entry */
3269 #if CONFIG_PROC_FS
3270 proc_scsi_register(0, &proc_scsi_scsi);
3271 #endif
3273 dma_sectors = PAGE_SIZE / SECTOR_SIZE;
3274 scsi_dma_free_sectors = dma_sectors;
3276 * Set up a minimal DMA buffer list - this will be used during scan_scsis
3277 * in some cases.
3280 /* One bit per sector to indicate free/busy */
3281 size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(FreeSectorBitmap);
3282 dma_malloc_freelist = (FreeSectorBitmap *)
3283 scsi_init_malloc(size, GFP_ATOMIC);
3284 if (dma_malloc_freelist) {
3285 /* One pointer per page for the page list */
3286 dma_malloc_pages = (unsigned char **) scsi_init_malloc(
3287 (dma_sectors / SECTORS_PER_PAGE) * sizeof(*dma_malloc_pages),
3288 GFP_ATOMIC);
3289 if (dma_malloc_pages) {
3290 dma_malloc_pages[0] = (unsigned char *)
3291 scsi_init_malloc(PAGE_SIZE, GFP_ATOMIC | GFP_DMA);
3292 if (dma_malloc_pages[0])
3293 has_space = 1;
3296 if (!has_space) {
3297 if (dma_malloc_freelist) {
3298 scsi_init_free((char *) dma_malloc_freelist, size);
3299 if (dma_malloc_pages)
3300 scsi_init_free((char *) dma_malloc_pages,
3301 (dma_sectors / SECTORS_PER_PAGE) * sizeof(*dma_malloc_pages));
3303 printk("scsi::init_module: failed, out of memory\n");
3304 return 1;
3307 * This is where the processing takes place for most everything
3308 * when commands are completed.
3310 init_bh(SCSI_BH, scsi_bottom_half_handler);
3312 return 0;
3315 void cleanup_module(void)
3317 remove_bh(SCSI_BH);
3319 #if CONFIG_PROC_FS
3320 proc_scsi_unregister(0, PROC_SCSI_SCSI);
3322 /* No, we're not here anymore. Don't show the /proc/scsi files. */
3323 dispatch_scsi_info_ptr = 0L;
3324 #endif
3327 * Free up the DMA pool.
3329 resize_dma_pool();
3333 #endif /* MODULE */
3336 * Overrides for Emacs so that we follow Linus's tabbing style.
3337 * Emacs will notice this stuff at the end of the file and automatically
3338 * adjust the settings for this buffer only. This must remain at the end
3339 * of the file.
3340 * ---------------------------------------------------------------------------
3341 * Local variables:
3342 * c-indent-level: 4
3343 * c-brace-imaginary-offset: 0
3344 * c-brace-offset: -4
3345 * c-argdecl-indent: 4
3346 * c-label-offset: -4
3347 * c-continued-statement-offset: 4
3348 * c-continued-brace-offset: 0
3349 * indent-tabs-mode: nil
3350 * tab-width: 8
3351 * End: