Import 2.1.81
[davej-history.git] / drivers / scsi / scsi.c
blob262c6da585674c9219ad6744e39232100c5f71fe
1 /*
2 * scsi.c Copyright (C) 1992 Drew Eckhardt
3 * Copyright (C) 1993, 1994, 1995 Eric Youngdale
5 * generic mid-level SCSI driver
6 * Initial versions: Drew Eckhardt
7 * Subsequent revisions: Eric Youngdale
9 * <drew@colorado.edu>
11 * Bug correction thanks go to :
12 * Rik Faith <faith@cs.unc.edu>
13 * Tommy Thorn <tthorn>
14 * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de>
16 * Modified by Eric Youngdale eric@andante.jic.com or ericy@gnu.ai.mit.edu to
17 * add scatter-gather, multiple outstanding request, and other
18 * enhancements.
20 * Native multichannel, wide scsi, /proc/scsi and hot plugging
21 * support added by Michael Neuffer <mike@i-connect.net>
23 * Added request_module("scsi_hostadapter") for kerneld:
24 * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/conf.modules)
25 * Bjorn Ekwall <bj0rn@blox.se>
27 * Major improvements to the timeout, abort, and reset processing,
28 * as well as performance modifications for large queue depths by
29 * Leonard N. Zubkoff <lnz@dandelion.com>
32 #include <linux/config.h>
33 #include <linux/module.h>
35 #include <linux/sched.h>
36 #include <linux/timer.h>
37 #include <linux/string.h>
38 #include <linux/malloc.h>
39 #include <linux/ioport.h>
40 #include <linux/kernel.h>
41 #include <linux/stat.h>
42 #include <linux/blk.h>
43 #include <linux/interrupt.h>
44 #include <linux/delay.h>
45 #include <linux/init.h>
47 #define __KERNEL_SYSCALLS__
49 #include <linux/unistd.h>
51 #include <asm/system.h>
52 #include <asm/irq.h>
53 #include <asm/dma.h>
55 #include "scsi.h"
56 #include "hosts.h"
57 #include "constants.h"
59 #ifdef CONFIG_KERNELD
60 #include <linux/kerneld.h>
61 #endif
63 #undef USE_STATIC_SCSI_MEMORY
66 static const char RCSid[] = "$Header: /vger/u4/cvs/linux/drivers/scsi/scsi.c,v 1.38 1997/01/19 23:07:18 davem Exp $";
70 * Definitions and constants.
72 #define INTERNAL_ERROR (panic ("Internal error in file %s, line %d.\n", __FILE__, __LINE__))
75 * PAGE_SIZE must be a multiple of the sector size (512). True
76 * for all reasonably recent architectures (even the VAX...).
78 #define SECTOR_SIZE 512
79 #define SECTORS_PER_PAGE (PAGE_SIZE/SECTOR_SIZE)
81 #if SECTORS_PER_PAGE <= 8
82 typedef unsigned char FreeSectorBitmap;
83 #elif SECTORS_PER_PAGE <= 32
84 typedef unsigned int FreeSectorBitmap;
85 #else
86 # error You lose.
87 #endif
89 #define MAX_SCSI_DEVICE_CODE 10
91 #ifdef DEBUG
92 #define SCSI_TIMEOUT (5*HZ)
93 #else
94 #define SCSI_TIMEOUT (2*HZ)
95 #endif
97 #define MIN_RESET_DELAY (2*HZ)
99 /* Do not call reset on error if we just did a reset within 15 sec. */
100 #define MIN_RESET_PERIOD (15*HZ)
102 /* The following devices are known not to tolerate a lun != 0 scan for
103 * one reason or another. Some will respond to all luns, others will
104 * lock up.
107 #define BLIST_NOLUN 0x01
108 #define BLIST_FORCELUN 0x02
109 #define BLIST_BORKEN 0x04
110 #define BLIST_KEY 0x08
111 #define BLIST_SINGLELUN 0x10
112 #define BLIST_NOTQ 0x20
113 #define BLIST_SPARSELUN 0x40
116 * Data declarations.
118 unsigned long scsi_pid = 0;
119 Scsi_Cmnd * last_cmnd = NULL;
120 /* Command groups 3 and 4 are reserved and should never be used. */
121 const unsigned char scsi_command_size[8] = { 6, 10, 10, 12,
122 12, 12, 10, 10 };
123 static unsigned long serial_number = 0;
124 static Scsi_Cmnd * scsi_bh_queue_head = NULL;
125 static FreeSectorBitmap * dma_malloc_freelist = NULL;
126 static int need_isa_bounce_buffers;
127 static unsigned int dma_sectors = 0;
128 unsigned int scsi_dma_free_sectors = 0;
129 unsigned int scsi_need_isa_buffer = 0;
130 static unsigned char ** dma_malloc_pages = NULL;
133 * Note - the initial logging level can be set here to log events at boot time.
134 * After the system is up, you may enable logging via the /proc interface.
136 unsigned int scsi_logging_level = 0;
138 static volatile struct Scsi_Host * host_active = NULL;
140 #if CONFIG_PROC_FS
142 * This is the pointer to the /proc/scsi code.
143 * It is only initialized to !=0 if the scsi code is present
145 struct proc_dir_entry proc_scsi_scsi = {
146 PROC_SCSI_SCSI, 4, "scsi",
147 S_IFREG | S_IRUGO | S_IWUSR, 1, 0, 0, 0,
148 NULL,
149 NULL, NULL,
150 NULL, NULL, NULL
152 #endif
155 const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] =
157 "Direct-Access ",
158 "Sequential-Access",
159 "Printer ",
160 "Processor ",
161 "WORM ",
162 "CD-ROM ",
163 "Scanner ",
164 "Optical Device ",
165 "Medium Changer ",
166 "Communications "
170 * Function prototypes.
172 static void resize_dma_pool(void);
173 static void print_inquiry(unsigned char *data);
174 extern void scsi_times_out (Scsi_Cmnd * SCpnt);
175 static int scan_scsis_single (int channel,int dev,int lun,int * max_scsi_dev ,
176 int * sparse_lun, Scsi_Device ** SDpnt, Scsi_Cmnd * SCpnt,
177 struct Scsi_Host *shpnt, char * scsi_result);
178 void scsi_build_commandblocks(Scsi_Device * SDpnt);
181 * These are the interface to the old error handling code. It should go away
182 * someday soon.
184 extern void scsi_old_done (Scsi_Cmnd *SCpnt);
185 extern void scsi_old_times_out (Scsi_Cmnd * SCpnt);
187 #if CONFIG_PROC_FS
188 extern int (* dispatch_scsi_info_ptr)(int ino, char *buffer, char **start,
189 off_t offset, int length, int inout);
190 extern int dispatch_scsi_info(int ino, char *buffer, char **start,
191 off_t offset, int length, int inout);
192 #endif
194 #define SCSI_BLOCK(DEVICE, HOST) \
195 ((HOST->block && host_active && HOST != host_active) \
196 || ((HOST)->can_queue && HOST->host_busy >= HOST->can_queue) \
197 || ((HOST)->host_blocked) \
198 || ((DEVICE) != NULL && (DEVICE)->device_blocked) )
200 static void scsi_dump_status(int level);
203 struct dev_info{
204 const char * vendor;
205 const char * model;
206 const char * revision; /* Latest revision known to be bad. Not used yet */
207 unsigned flags;
211 * This is what was previously known as the blacklist. The concept
212 * has been expanded so that we can specify other types of things we
213 * need to be aware of.
215 static struct dev_info device_list[] =
217 {"CHINON","CD-ROM CDS-431","H42", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
218 {"CHINON","CD-ROM CDS-535","Q14", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
219 {"DENON","DRD-25X","V", BLIST_NOLUN}, /* Locks up if probed for lun != 0 */
220 {"HITACHI","DK312C","CM81", BLIST_NOLUN}, /* Responds to all lun - dtg */
221 {"HITACHI","DK314C","CR21" , BLIST_NOLUN}, /* responds to all lun */
222 {"IMS", "CDD521/10","2.06", BLIST_NOLUN}, /* Locks-up when LUN>0 polled. */
223 {"MAXTOR","XT-3280","PR02", BLIST_NOLUN}, /* Locks-up when LUN>0 polled. */
224 {"MAXTOR","XT-4380S","B3C", BLIST_NOLUN}, /* Locks-up when LUN>0 polled. */
225 {"MAXTOR","MXT-1240S","I1.2", BLIST_NOLUN}, /* Locks up when LUN>0 polled */
226 {"MAXTOR","XT-4170S","B5A", BLIST_NOLUN}, /* Locks-up sometimes when LUN>0 polled. */
227 {"MAXTOR","XT-8760S","B7B", BLIST_NOLUN}, /* guess what? */
228 {"MEDIAVIS","RENO CD-ROMX2A","2.03",BLIST_NOLUN},/*Responds to all lun */
229 {"MICROP", "4110", "*", BLIST_NOTQ}, /* Buggy Tagged Queuing */
230 {"NEC","CD-ROM DRIVE:841","1.0", BLIST_NOLUN}, /* Locks-up when LUN>0 polled. */
231 {"RODIME","RO3000S","2.33", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
232 {"SANYO", "CRD-250S", "1.20", BLIST_NOLUN}, /* causes failed REQUEST SENSE on lun 1
233 * for aha152x controller, which causes
234 * SCSI code to reset bus.*/
235 {"SEAGATE", "ST157N", "\004|j", BLIST_NOLUN}, /* causes failed REQUEST SENSE on lun 1
236 * for aha152x controller, which causes
237 * SCSI code to reset bus.*/
238 {"SEAGATE", "ST296","921", BLIST_NOLUN}, /* Responds to all lun */
239 {"SEAGATE","ST1581","6538",BLIST_NOLUN}, /* Responds to all lun */
240 {"SONY","CD-ROM CDU-541","4.3d", BLIST_NOLUN},
241 {"SONY","CD-ROM CDU-55S","1.0i", BLIST_NOLUN},
242 {"SONY","CD-ROM CDU-561","1.7x", BLIST_NOLUN},
243 {"TANDBERG","TDC 3600","U07", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
244 {"TEAC","CD-ROM","1.06", BLIST_NOLUN}, /* causes failed REQUEST SENSE on lun 1
245 * for seagate controller, which causes
246 * SCSI code to reset bus.*/
247 {"TEXEL","CD-ROM","1.06", BLIST_NOLUN}, /* causes failed REQUEST SENSE on lun 1
248 * for seagate controller, which causes
249 * SCSI code to reset bus.*/
250 {"QUANTUM","LPS525S","3110", BLIST_NOLUN}, /* Locks sometimes if polled for lun != 0 */
251 {"QUANTUM","PD1225S","3110", BLIST_NOLUN}, /* Locks sometimes if polled for lun != 0 */
252 {"MEDIAVIS","CDR-H93MV","1.31", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
253 {"SANKYO", "CP525","6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */
254 {"HP", "C1750A", "3226", BLIST_NOLUN}, /* scanjet iic */
255 {"HP", "C1790A", "", BLIST_NOLUN}, /* scanjet iip */
256 {"HP", "C2500A", "", BLIST_NOLUN}, /* scanjet iicx */
257 {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* extra reset */
260 * Other types of devices that have special flags.
262 {"SONY","CD-ROM CDU-8001","*", BLIST_BORKEN},
263 {"TEXEL","CD-ROM","1.06", BLIST_BORKEN},
264 {"IOMEGA","Io20S *F","*", BLIST_KEY},
265 {"INSITE","Floptical F*8I","*", BLIST_KEY},
266 {"INSITE","I325VM","*", BLIST_KEY},
267 {"NRC","MBR-7","*", BLIST_FORCELUN | BLIST_SINGLELUN},
268 {"NRC","MBR-7.4","*", BLIST_FORCELUN | BLIST_SINGLELUN},
269 {"NAKAMICH","MJ-4.8S","*", BLIST_FORCELUN | BLIST_SINGLELUN},
270 {"PIONEER","CD-ROM DRM-602X","*", BLIST_FORCELUN | BLIST_SINGLELUN},
271 {"PIONEER","CD-ROM DRM-604X","*", BLIST_FORCELUN | BLIST_SINGLELUN},
272 {"EMULEX","MD21/S2 ESDI","*", BLIST_SINGLELUN},
273 {"CANON","IPUBJD","*", BLIST_SPARSELUN},
274 {"nCipher","Fastness Crypto","*", BLIST_FORCELUN},
275 {"MATSHITA","PD","*", BLIST_FORCELUN | BLIST_SINGLELUN},
276 {"YAMAHA","CDR100","1.00", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
277 {"YAMAHA","CDR102","1.00", BLIST_NOLUN}, /* Locks up if polled for lun != 0 */
279 * Must be at end of list...
281 {NULL, NULL, NULL}
284 static int get_device_flags(unsigned char * response_data){
285 int i = 0;
286 unsigned char * pnt;
287 for(i=0; 1; i++){
288 if(device_list[i].vendor == NULL) return 0;
289 pnt = &response_data[8];
290 while(*pnt && *pnt == ' ') pnt++;
291 if(memcmp(device_list[i].vendor, pnt,
292 strlen(device_list[i].vendor))) continue;
293 pnt = &response_data[16];
294 while(*pnt && *pnt == ' ') pnt++;
295 if(memcmp(device_list[i].model, pnt,
296 strlen(device_list[i].model))) continue;
297 return device_list[i].flags;
299 return 0;
303 * Function: scsi_make_blocked_list
305 * Purpose: Build linked list of hosts that require blocking.
307 * Arguments: None.
309 * Returns: Nothing
311 * Notes: Blocking is sort of a hack that is used to prevent more than one
312 * host adapter from being active at one time. This is used in cases
313 * where the ISA bus becomes unreliable if you have more than one
314 * host adapter really pumping data through.
316 * We spent a lot of time examining the problem, and I *believe* that
317 * the problem is bus related as opposed to being a driver bug.
319 * The blocked list is used as part of the synchronization object
320 * that we use to ensure that only one host is active at one time.
321 * I (ERY) would like to make this go away someday, but this would
322 * require that we have a recursive mutex object.
324 void
325 scsi_make_blocked_list(void)
327 int block_count = 0, index;
328 unsigned long flags;
329 struct Scsi_Host * sh[128], * shpnt;
332 * Create a circular linked list from the scsi hosts which have
333 * the "wish_block" field in the Scsi_Host structure set.
334 * The blocked list should include all the scsi hosts using ISA DMA.
335 * In some systems, using two dma channels simultaneously causes
336 * unpredictable results.
337 * Among the scsi hosts in the blocked list, only one host at a time
338 * is allowed to have active commands queued. The transition from
339 * one active host to the next one is allowed only when host_busy == 0
340 * for the active host (which implies host_busy == 0 for all the hosts
341 * in the list). Moreover for block devices the transition to a new
342 * active host is allowed only when a request is completed, since a
343 * block device request can be divided into multiple scsi commands
344 * (when there are few sg lists or clustering is disabled).
346 * (DB, 4 Feb 1995)
349 save_flags(flags);
350 cli();
351 host_active = NULL;
353 for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next) {
355 #if 0
357 * Is this is a candidate for the blocked list?
358 * Useful to put into the blocked list all the hosts whose driver
359 * does not know about the host->block feature.
361 if (shpnt->unchecked_isa_dma) shpnt->wish_block = 1;
362 #endif
364 if (shpnt->wish_block) sh[block_count++] = shpnt;
367 if (block_count == 1) sh[0]->block = NULL;
369 else if (block_count > 1) {
371 for(index = 0; index < block_count - 1; index++) {
372 sh[index]->block = sh[index + 1];
373 printk("scsi%d : added to blocked host list.\n",
374 sh[index]->host_no);
377 sh[block_count - 1]->block = sh[0];
378 printk("scsi%d : added to blocked host list.\n",
379 sh[index]->host_no);
382 restore_flags(flags);
385 static void scan_scsis_done (Scsi_Cmnd * SCpnt)
388 SCSI_LOG_MLCOMPLETE(1,printk ("scan_scsis_done(%p, %06x)\n", SCpnt->host, SCpnt->result));
389 SCpnt->request.rq_status = RQ_SCSI_DONE;
391 if (SCpnt->request.sem != NULL)
392 up(SCpnt->request.sem);
395 void scsi_logging_setup(char *str, int *ints)
397 if (ints[0] != 1) {
398 printk("scsi_logging_setup : usage scsi_logging_level=n "
399 "(n should be 0 or non-zero)\n");
400 } else {
401 scsi_logging_level = (ints[1])? ~0 : 0;
405 #ifdef CONFIG_SCSI_MULTI_LUN
406 static int max_scsi_luns = 8;
407 #else
408 static int max_scsi_luns = 1;
409 #endif
411 void scsi_luns_setup(char *str, int *ints)
413 if (ints[0] != 1)
414 printk("scsi_luns_setup : usage max_scsi_luns=n (n should be between 1 and 8)\n");
415 else
416 max_scsi_luns = ints[1];
420 * Detecting SCSI devices :
421 * We scan all present host adapter's busses, from ID 0 to ID (max_id).
422 * We use the INQUIRY command, determine device type, and pass the ID /
423 * lun address of all sequential devices to the tape driver, all random
424 * devices to the disk driver.
426 static void scan_scsis (struct Scsi_Host *shpnt,
427 unchar hardcoded,
428 unchar hchannel,
429 unchar hid,
430 unchar hlun)
432 int channel;
433 int dev;
434 int lun;
435 int max_dev_lun;
436 Scsi_Cmnd * SCpnt;
437 unsigned char * scsi_result;
438 unsigned char scsi_result0[256];
439 Scsi_Device * SDpnt;
440 Scsi_Device * SDtail;
441 int sparse_lun;
443 SCpnt = (Scsi_Cmnd *) scsi_init_malloc (sizeof (Scsi_Cmnd), GFP_ATOMIC | GFP_DMA);
444 memset (SCpnt, 0, sizeof (Scsi_Cmnd));
446 SDpnt = (Scsi_Device *) scsi_init_malloc (sizeof (Scsi_Device), GFP_ATOMIC);
447 memset (SDpnt, 0, sizeof (Scsi_Device));
450 /* Make sure we have something that is valid for DMA purposes */
451 scsi_result = ( ( !shpnt->unchecked_isa_dma )
452 ? &scsi_result0[0] : scsi_init_malloc (512, GFP_DMA));
454 if (scsi_result == NULL)
456 printk ("Unable to obtain scsi_result buffer\n");
457 goto leave;
461 * We must chain ourself in the host_queue, so commands can time out
463 SCpnt->next = NULL;
464 SDpnt->device_queue = SCpnt;
465 SDpnt->host = shpnt;
466 SDpnt->online = TRUE;
469 * Next, hook the device to the host in question.
471 SDpnt->prev = NULL;
472 SDpnt->next = NULL;
473 if( shpnt->host_queue != NULL )
475 SDtail = shpnt->host_queue;
476 while( SDtail->next != NULL )
477 SDtail = SDtail->next;
479 SDtail->next = SDpnt;
480 SDpnt->prev = SDtail;
482 else
484 shpnt->host_queue = SDpnt;
488 * We need to increment the counter for this one device so we can track when
489 * things are quiet.
491 atomic_inc(&shpnt->host_active);
493 if (hardcoded == 1) {
494 Scsi_Device *oldSDpnt=SDpnt;
495 struct Scsi_Device_Template * sdtpnt;
496 channel = hchannel;
497 if(channel > shpnt->max_channel) goto leave;
498 dev = hid;
499 if(dev >= shpnt->max_id) goto leave;
500 lun = hlun;
501 if(lun >= shpnt->max_lun) goto leave;
502 scan_scsis_single (channel, dev, lun, &max_dev_lun, &sparse_lun,
503 &SDpnt, SCpnt, shpnt, scsi_result);
504 if(SDpnt!=oldSDpnt) {
506 /* it could happen the blockdevice hasn't yet been inited */
507 for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
508 if(sdtpnt->init && sdtpnt->dev_noticed) (*sdtpnt->init)();
510 oldSDpnt->scsi_request_fn = NULL;
511 for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
512 if(sdtpnt->attach) {
513 (*sdtpnt->attach)(oldSDpnt);
514 if(oldSDpnt->attached) scsi_build_commandblocks(oldSDpnt);}
515 resize_dma_pool();
517 for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
518 if(sdtpnt->finish && sdtpnt->nr_dev)
519 {(*sdtpnt->finish)();}
524 else {
525 for (channel = 0; channel <= shpnt->max_channel; channel++) {
526 for (dev = 0; dev < shpnt->max_id; ++dev) {
527 if (shpnt->this_id != dev) {
530 * We need the for so our continue, etc. work fine. We put this in
531 * a variable so that we can override it during the scan if we
532 * detect a device *KNOWN* to have multiple logical units.
534 max_dev_lun = (max_scsi_luns < shpnt->max_lun ?
535 max_scsi_luns : shpnt->max_lun);
536 sparse_lun = 0;
537 for (lun = 0; lun < max_dev_lun; ++lun) {
538 if (!scan_scsis_single (channel, dev, lun, &max_dev_lun,
539 &sparse_lun, &SDpnt, SCpnt, shpnt,
540 scsi_result)
541 && !sparse_lun)
542 break; /* break means don't probe further for luns!=0 */
543 } /* for lun ends */
544 } /* if this_id != id ends */
545 } /* for dev ends */
546 } /* for channel ends */
547 } /* if/else hardcoded */
550 * We need to decrement the counter for this one device
551 * so we know when everything is quiet.
553 atomic_dec(&shpnt->host_active);
555 leave:
557 {/* Unchain SCpnt from host_queue */
558 Scsi_Device *prev, *next;
559 Scsi_Device * dqptr;
561 for(dqptr = shpnt->host_queue; dqptr != SDpnt; dqptr = dqptr->next)
562 continue;
563 if(dqptr)
565 prev = dqptr->prev;
566 next = dqptr->next;
567 if(prev)
568 prev->next = next;
569 else
570 shpnt->host_queue = next;
571 if(next) next->prev = prev;
575 /* Last device block does not exist. Free memory. */
576 if (SDpnt != NULL)
577 scsi_init_free ((char *) SDpnt, sizeof (Scsi_Device));
579 if (SCpnt != NULL)
580 scsi_init_free ((char *) SCpnt, sizeof (Scsi_Cmnd));
582 /* If we allocated a buffer so we could do DMA, free it now */
583 if (scsi_result != &scsi_result0[0] && scsi_result != NULL)
585 scsi_init_free (scsi_result, 512);
589 Scsi_Device * sdev;
590 Scsi_Cmnd * scmd;
592 SCSI_LOG_SCAN_BUS(4,printk("Host status for host %p:\n", shpnt));
593 for(sdev = shpnt->host_queue; sdev; sdev = sdev->next)
595 SCSI_LOG_SCAN_BUS(4,printk("Device %d %p: ", sdev->id, sdev));
596 for(scmd=sdev->device_queue; scmd; scmd = scmd->next)
598 SCSI_LOG_SCAN_BUS(4,printk("%p ", scmd));
600 SCSI_LOG_SCAN_BUS(4,printk("\n"));
606 * The worker for scan_scsis.
607 * Returning 0 means Please don't ask further for lun!=0, 1 means OK go on.
608 * Global variables used : scsi_devices(linked list)
610 int scan_scsis_single (int channel, int dev, int lun, int *max_dev_lun,
611 int *sparse_lun, Scsi_Device **SDpnt2, Scsi_Cmnd * SCpnt,
612 struct Scsi_Host * shpnt, char *scsi_result)
614 unsigned char scsi_cmd[12];
615 struct Scsi_Device_Template *sdtpnt;
616 Scsi_Device * SDtail, *SDpnt=*SDpnt2;
617 int bflags, type=-1;
619 SDpnt->host = shpnt;
620 SDpnt->id = dev;
621 SDpnt->lun = lun;
622 SDpnt->channel = channel;
623 SDpnt->online = TRUE;
625 /* Some low level driver could use device->type (DB) */
626 SDpnt->type = -1;
629 * Assume that the device will have handshaking problems, and then fix this
630 * field later if it turns out it doesn't
632 SDpnt->borken = 1;
633 SDpnt->was_reset = 0;
634 SDpnt->expecting_cc_ua = 0;
636 scsi_cmd[0] = TEST_UNIT_READY;
637 scsi_cmd[1] = lun << 5;
638 scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[4] = scsi_cmd[5] = 0;
640 SCpnt->host = SDpnt->host;
641 SCpnt->device = SDpnt;
642 SCpnt->target = SDpnt->id;
643 SCpnt->lun = SDpnt->lun;
644 SCpnt->channel = SDpnt->channel;
646 struct semaphore sem = MUTEX_LOCKED;
647 SCpnt->request.sem = &sem;
648 SCpnt->request.rq_status = RQ_SCSI_BUSY;
649 scsi_do_cmd (SCpnt, (void *) scsi_cmd,
650 (void *) scsi_result,
651 256, scan_scsis_done, SCSI_TIMEOUT + 4 * HZ, 5);
652 down (&sem);
653 SCpnt->request.sem = NULL;
656 SCSI_LOG_SCAN_BUS(3, printk ("scsi: scan_scsis_single id %d lun %d. Return code 0x%08x\n",
657 dev, lun, SCpnt->result));
658 SCSI_LOG_SCAN_BUS(3,print_driverbyte(SCpnt->result));
659 SCSI_LOG_SCAN_BUS(3,print_hostbyte(SCpnt->result));
660 SCSI_LOG_SCAN_BUS(3,printk("\n"));
662 if (SCpnt->result) {
663 if (((driver_byte (SCpnt->result) & DRIVER_SENSE) ||
664 (status_byte (SCpnt->result) & CHECK_CONDITION)) &&
665 ((SCpnt->sense_buffer[0] & 0x70) >> 4) == 7) {
666 if (((SCpnt->sense_buffer[2] & 0xf) != NOT_READY) &&
667 ((SCpnt->sense_buffer[2] & 0xf) != UNIT_ATTENTION) &&
668 ((SCpnt->sense_buffer[2] & 0xf) != ILLEGAL_REQUEST || lun > 0))
669 return 1;
671 else
672 return 0;
675 SCSI_LOG_SCAN_BUS(3,printk ("scsi: performing INQUIRY\n"));
677 * Build an INQUIRY command block.
679 scsi_cmd[0] = INQUIRY;
680 scsi_cmd[1] = (lun << 5) & 0xe0;
681 scsi_cmd[2] = 0;
682 scsi_cmd[3] = 0;
683 scsi_cmd[4] = 255;
684 scsi_cmd[5] = 0;
685 SCpnt->cmd_len = 0;
687 struct semaphore sem = MUTEX_LOCKED;
688 SCpnt->request.sem = &sem;
689 SCpnt->request.rq_status = RQ_SCSI_BUSY;
690 scsi_do_cmd (SCpnt, (void *) scsi_cmd,
691 (void *) scsi_result,
692 256, scan_scsis_done, SCSI_TIMEOUT, 3);
693 down (&sem);
694 SCpnt->request.sem = NULL;
697 SCSI_LOG_SCAN_BUS(3,printk ("scsi: INQUIRY %s with code 0x%x\n",
698 SCpnt->result ? "failed" : "successful", SCpnt->result));
700 if (SCpnt->result)
701 return 0; /* assume no peripheral if any sort of error */
704 * Check the peripheral qualifier field - this tells us whether LUNS
705 * are supported here or not.
707 if( (scsi_result[0] >> 5) == 3 )
709 return 0; /* assume no peripheral if any sort of error */
713 * It would seem some TOSHIBA CDROM gets things wrong
715 if (!strncmp (scsi_result + 8, "TOSHIBA", 7) &&
716 !strncmp (scsi_result + 16, "CD-ROM", 6) &&
717 scsi_result[0] == TYPE_DISK) {
718 scsi_result[0] = TYPE_ROM;
719 scsi_result[1] |= 0x80; /* removable */
722 memcpy (SDpnt->vendor, scsi_result + 8, 8);
723 memcpy (SDpnt->model, scsi_result + 16, 16);
724 memcpy (SDpnt->rev, scsi_result + 32, 4);
726 SDpnt->removable = (0x80 & scsi_result[1]) >> 7;
727 SDpnt->online = TRUE;
728 SDpnt->lockable = SDpnt->removable;
729 SDpnt->changed = 0;
730 SDpnt->access_count = 0;
731 SDpnt->busy = 0;
732 SDpnt->has_cmdblocks = 0;
734 * Currently, all sequential devices are assumed to be tapes, all random
735 * devices disk, with the appropriate read only flags set for ROM / WORM
736 * treated as RO.
738 switch (type = (scsi_result[0] & 0x1f)) {
739 case TYPE_TAPE:
740 case TYPE_DISK:
741 case TYPE_MOD:
742 case TYPE_PROCESSOR:
743 case TYPE_SCANNER:
744 SDpnt->writeable = 1;
745 break;
746 case TYPE_WORM:
747 case TYPE_ROM:
748 SDpnt->writeable = 0;
749 break;
750 default:
751 printk ("scsi: unknown type %d\n", type);
754 SDpnt->device_blocked = FALSE;
755 SDpnt->device_busy = 0;
756 SDpnt->single_lun = 0;
757 SDpnt->soft_reset =
758 (scsi_result[7] & 1) && ((scsi_result[3] & 7) == 2);
759 SDpnt->random = (type == TYPE_TAPE) ? 0 : 1;
760 SDpnt->type = (type & 0x1f);
762 print_inquiry (scsi_result);
764 for (sdtpnt = scsi_devicelist; sdtpnt;
765 sdtpnt = sdtpnt->next)
766 if (sdtpnt->detect)
767 SDpnt->attached +=
768 (*sdtpnt->detect) (SDpnt);
770 SDpnt->scsi_level = scsi_result[2] & 0x07;
771 if (SDpnt->scsi_level >= 2 ||
772 (SDpnt->scsi_level == 1 &&
773 (scsi_result[3] & 0x0f) == 1))
774 SDpnt->scsi_level++;
777 * Accommodate drivers that want to sleep when they should be in a polling
778 * loop.
780 SDpnt->disconnect = 0;
783 * Get any flags for this device.
785 bflags = get_device_flags (scsi_result);
788 * Set the tagged_queue flag for SCSI-II devices that purport to support
789 * tagged queuing in the INQUIRY data.
791 SDpnt->tagged_queue = 0;
792 if ((SDpnt->scsi_level >= SCSI_2) &&
793 (scsi_result[7] & 2) &&
794 !(bflags & BLIST_NOTQ)) {
795 SDpnt->tagged_supported = 1;
796 SDpnt->current_tag = 0;
800 * Some revisions of the Texel CD ROM drives have handshaking problems when
801 * used with the Seagate controllers. Before we know what type of device
802 * we're talking to, we assume it's borken and then change it here if it
803 * turns out that it isn't a TEXEL drive.
805 if ((bflags & BLIST_BORKEN) == 0)
806 SDpnt->borken = 0;
809 * If we want to only allow I/O to one of the luns attached to this device
810 * at a time, then we set this flag.
812 if (bflags & BLIST_SINGLELUN)
813 SDpnt->single_lun = 1;
816 * These devices need this "key" to unlock the devices so we can use it
818 if ((bflags & BLIST_KEY) != 0) {
819 printk ("Unlocked floptical drive.\n");
820 SDpnt->lockable = 0;
821 scsi_cmd[0] = MODE_SENSE;
822 scsi_cmd[1] = (lun << 5) & 0xe0;
823 scsi_cmd[2] = 0x2e;
824 scsi_cmd[3] = 0;
825 scsi_cmd[4] = 0x2a;
826 scsi_cmd[5] = 0;
827 SCpnt->cmd_len = 0;
829 struct semaphore sem = MUTEX_LOCKED;
830 SCpnt->request.rq_status = RQ_SCSI_BUSY;
831 SCpnt->request.sem = &sem;
832 scsi_do_cmd (SCpnt, (void *) scsi_cmd,
833 (void *) scsi_result, 0x2a,
834 scan_scsis_done, SCSI_TIMEOUT, 3);
835 down (&sem);
836 SCpnt->request.sem = NULL;
841 * Detach the command from the device. It was just a temporary to be used while
842 * scanning the bus - the real ones will be allocated later.
844 SDpnt->device_queue = NULL;
847 * This device was already hooked up to the host in question,
848 * so at this point we just let go of it and it should be fine. We do need to
849 * allocate a new one and attach it to the host so that we can further scan the bus.
851 SDpnt = (Scsi_Device *) scsi_init_malloc (sizeof (Scsi_Device), GFP_ATOMIC);
852 *SDpnt2=SDpnt;
853 if (!SDpnt)
855 printk ("scsi: scan_scsis_single: Cannot malloc\n");
856 return 0;
859 memset (SDpnt, 0, sizeof (Scsi_Device));
862 * And hook up our command block to the new device we will be testing
863 * for.
865 SDpnt->device_queue = SCpnt;
866 SDpnt->online = TRUE;
869 * Since we just found one device, there had damn well better be one in the list
870 * already.
872 if( shpnt->host_queue == NULL )
873 panic("scan_scsis_single: Host queue == NULL\n");
875 SDtail = shpnt->host_queue;
876 while (SDtail->next)
878 SDtail = SDtail->next;
881 /* Add this device to the linked list at the end */
882 SDtail->next = SDpnt;
883 SDpnt->prev = SDtail;
884 SDpnt->next = NULL;
887 * Some scsi devices cannot be polled for lun != 0 due to firmware bugs
889 if (bflags & BLIST_NOLUN)
890 return 0; /* break; */
893 * If this device is known to support sparse multiple units, override the
894 * other settings, and scan all of them.
896 if (bflags & BLIST_SPARSELUN) {
897 *max_dev_lun = 8;
898 *sparse_lun = 1;
899 return 1;
903 * If this device is known to support multiple units, override the other
904 * settings, and scan all of them.
906 if (bflags & BLIST_FORCELUN) {
907 *max_dev_lun = 8;
908 return 1;
911 * We assume the device can't handle lun!=0 if: - it reports scsi-0 (ANSI
912 * SCSI Revision 0) (old drives like MAXTOR XT-3280) or - it reports scsi-1
913 * (ANSI SCSI Revision 1) and Response Data Format 0
915 if (((scsi_result[2] & 0x07) == 0)
917 ((scsi_result[2] & 0x07) == 1 &&
918 (scsi_result[3] & 0x0f) == 0))
919 return 0;
920 return 1;
924 * Flag bits for the internal_timeout array
926 #define NORMAL_TIMEOUT 0
927 #define IN_ABORT 1
928 #define IN_RESET 2
929 #define IN_RESET2 4
930 #define IN_RESET3 8
933 /* This function takes a quick look at a request, and decides if it
934 * can be queued now, or if there would be a stall while waiting for
935 * something else to finish. This routine assumes that interrupts are
936 * turned off when entering the routine. It is the responsibility
937 * of the calling code to ensure that this is the case.
940 Scsi_Cmnd * scsi_request_queueable (struct request * req, Scsi_Device * device)
942 Scsi_Cmnd * SCpnt = NULL;
943 int tablesize;
944 Scsi_Cmnd * found = NULL;
945 struct buffer_head * bh, *bhp;
947 if (!device)
948 panic ("No device passed to scsi_request_queueable().\n");
950 if (req && req->rq_status == RQ_INACTIVE)
951 panic("Inactive in scsi_request_queueable");
954 * Look for a free command block. If we have been instructed not to queue
955 * multiple commands to multi-lun devices, then check to see what else is
956 * going for this device first.
959 if (!device->single_lun) {
960 SCpnt = device->device_queue;
961 while(SCpnt){
962 if(SCpnt->request.rq_status == RQ_INACTIVE) break;
963 SCpnt = SCpnt->next;
965 } else {
966 SCpnt = device->device_queue;
967 while(SCpnt){
968 if(SCpnt->channel == device->channel
969 && SCpnt->target == device->id) {
970 if (SCpnt->lun == device->lun) {
971 if(found == NULL
972 && SCpnt->request.rq_status == RQ_INACTIVE)
974 found=SCpnt;
977 if(SCpnt->request.rq_status != RQ_INACTIVE) {
979 * I think that we should really limit things to one
980 * outstanding command per device - this is what tends
981 * to trip up buggy firmware.
983 return NULL;
986 SCpnt = SCpnt->next;
988 SCpnt = found;
991 if (!SCpnt) return NULL;
993 if (SCSI_BLOCK(device, device->host)) return NULL;
995 if (req) {
996 memcpy(&SCpnt->request, req, sizeof(struct request));
997 tablesize = device->host->sg_tablesize;
998 bhp = bh = req->bh;
999 if(!tablesize) bh = NULL;
1000 /* Take a quick look through the table to see how big it is.
1001 * We already have our copy of req, so we can mess with that
1002 * if we want to.
1004 while(req->nr_sectors && bh){
1005 bhp = bhp->b_reqnext;
1006 if(!bhp || !CONTIGUOUS_BUFFERS(bh,bhp)) tablesize--;
1007 req->nr_sectors -= bh->b_size >> 9;
1008 req->sector += bh->b_size >> 9;
1009 if(!tablesize) break;
1010 bh = bhp;
1012 if(req->nr_sectors && bh && bh->b_reqnext){ /* Any leftovers? */
1013 SCpnt->request.bhtail = bh;
1014 req->bh = bh->b_reqnext; /* Divide request */
1015 bh->b_reqnext = NULL;
1016 bh = req->bh;
1018 /* Now reset things so that req looks OK */
1019 SCpnt->request.nr_sectors -= req->nr_sectors;
1020 req->current_nr_sectors = bh->b_size >> 9;
1021 req->buffer = bh->b_data;
1022 SCpnt->request.sem = NULL; /* Wait until whole thing done */
1023 } else {
1024 req->rq_status = RQ_INACTIVE;
1025 wake_up(&wait_for_request);
1027 } else {
1028 SCpnt->request.rq_status = RQ_SCSI_BUSY; /* Busy, but no request */
1029 SCpnt->request.sem = NULL; /* And no one is waiting for the device
1030 * either */
1033 atomic_inc(&SCpnt->host->host_active);
1034 SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n", SCpnt->target,
1035 atomic_read(&SCpnt->host->host_active)));
1036 SCpnt->use_sg = 0; /* Reset the scatter-gather flag */
1037 SCpnt->old_use_sg = 0;
1038 SCpnt->transfersize = 0;
1039 SCpnt->underflow = 0;
1040 SCpnt->cmd_len = 0;
1042 /* Since not everyone seems to set the device info correctly
1043 * before Scsi_Cmnd gets send out to scsi_do_command, we do it here.
1045 SCpnt->channel = device->channel;
1046 SCpnt->lun = device->lun;
1047 SCpnt->target = device->id;
1048 SCpnt->state = SCSI_STATE_INITIALIZING;
1049 SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
1051 return SCpnt;
1054 /* This function returns a structure pointer that will be valid for
1055 * the device. The wait parameter tells us whether we should wait for
1056 * the unit to become free or not. We are also able to tell this routine
1057 * not to return a descriptor if the host is unable to accept any more
1058 * commands for the time being. We need to keep in mind that there is no
1059 * guarantee that the host remain not busy. Keep in mind the
1060 * scsi_request_queueable function also knows the internal allocation scheme
1061 * of the packets for each device
1064 Scsi_Cmnd * scsi_allocate_device (struct request ** reqp, Scsi_Device * device,
1065 int wait)
1067 kdev_t dev;
1068 struct request * req = NULL;
1069 int tablesize;
1070 unsigned long flags;
1071 struct buffer_head * bh, *bhp;
1072 struct Scsi_Host * host;
1073 Scsi_Cmnd * SCpnt = NULL;
1074 Scsi_Cmnd * SCwait = NULL;
1075 Scsi_Cmnd * found = NULL;
1077 if (!device)
1078 panic ("No device passed to scsi_allocate_device().\n");
1080 if (reqp) req = *reqp;
1082 /* See if this request has already been queued by an interrupt routine */
1083 if (req) {
1084 if(req->rq_status == RQ_INACTIVE) return NULL;
1085 dev = req->rq_dev;
1086 } else
1087 dev = 0; /* unused */
1089 host = device->host;
1091 if (in_interrupt() && SCSI_BLOCK(device, host)) return NULL;
1093 while (1==1){
1094 if (!device->single_lun) {
1095 SCpnt = device->device_queue;
1096 while(SCpnt){
1097 SCwait = SCpnt;
1098 if(SCpnt->request.rq_status == RQ_INACTIVE) break;
1099 SCpnt = SCpnt->next;
1101 } else {
1102 SCpnt = device->device_queue;
1103 while(SCpnt){
1104 if(SCpnt->channel == device->channel
1105 && SCpnt->target == device->id) {
1106 if (SCpnt->lun == device->lun) {
1107 SCwait = SCpnt;
1108 if(found == NULL
1109 && SCpnt->request.rq_status == RQ_INACTIVE)
1111 found=SCpnt;
1114 if(SCpnt->request.rq_status != RQ_INACTIVE) {
1116 * I think that we should really limit things to one
1117 * outstanding command per device - this is what tends
1118 * to trip up buggy firmware.
1120 found = NULL;
1121 break;
1124 SCpnt = SCpnt->next;
1126 SCpnt = found;
1129 save_flags(flags);
1130 cli();
1131 /* See if this request has already been queued by an interrupt routine
1133 if (req && (req->rq_status == RQ_INACTIVE || req->rq_dev != dev)) {
1134 restore_flags(flags);
1135 return NULL;
1137 if (!SCpnt || SCpnt->request.rq_status != RQ_INACTIVE) /* Might have changed */
1139 if (wait && SCwait && SCwait->request.rq_status != RQ_INACTIVE){
1140 sleep_on(&device->device_wait);
1141 restore_flags(flags);
1142 } else {
1143 restore_flags(flags);
1144 if (!wait) return NULL;
1145 if (!SCwait) {
1146 printk("Attempt to allocate device channel %d,"
1147 " target %d, lun %d\n", device->channel,
1148 device->id, device->lun);
1149 panic("No device found in scsi_allocate_device\n");
1152 } else {
1153 if (req) {
1154 memcpy(&SCpnt->request, req, sizeof(struct request));
1155 tablesize = device->host->sg_tablesize;
1156 bhp = bh = req->bh;
1157 if(!tablesize) bh = NULL;
1158 /* Take a quick look through the table to see how big it is.
1159 * We already have our copy of req, so we can mess with that
1160 * if we want to.
1162 while(req->nr_sectors && bh){
1163 bhp = bhp->b_reqnext;
1164 if(!bhp || !CONTIGUOUS_BUFFERS(bh,bhp)) tablesize--;
1165 req->nr_sectors -= bh->b_size >> 9;
1166 req->sector += bh->b_size >> 9;
1167 if(!tablesize) break;
1168 bh = bhp;
1170 if(req->nr_sectors && bh && bh->b_reqnext){/* Any leftovers? */
1171 SCpnt->request.bhtail = bh;
1172 req->bh = bh->b_reqnext; /* Divide request */
1173 bh->b_reqnext = NULL;
1174 bh = req->bh;
1175 /* Now reset things so that req looks OK */
1176 SCpnt->request.nr_sectors -= req->nr_sectors;
1177 req->current_nr_sectors = bh->b_size >> 9;
1178 req->buffer = bh->b_data;
1179 SCpnt->request.sem = NULL; /* Wait until whole thing done*/
1181 else
1183 req->rq_status = RQ_INACTIVE;
1184 *reqp = req->next;
1185 wake_up(&wait_for_request);
1187 } else {
1188 SCpnt->request.rq_status = RQ_SCSI_BUSY;
1189 SCpnt->request.sem = NULL; /* And no one is waiting for this
1190 * to complete */
1192 atomic_inc(&SCpnt->host->host_active);
1193 restore_flags(flags);
1194 SCSI_LOG_MLQUEUE(5, printk("Activating command for device %d (%d)\n",
1195 SCpnt->target,
1196 atomic_read(&SCpnt->host->host_active)));
1197 break;
1201 SCpnt->use_sg = 0; /* Reset the scatter-gather flag */
1202 SCpnt->old_use_sg = 0;
1203 SCpnt->transfersize = 0; /* No default transfer size */
1204 SCpnt->cmd_len = 0;
1206 SCpnt->underflow = 0; /* Do not flag underflow conditions */
1208 /* Since not everyone seems to set the device info correctly
1209 * before Scsi_Cmnd gets send out to scsi_do_command, we do it here.
1210 * FIXME(eric) This doesn't make any sense.
1212 SCpnt->channel = device->channel;
1213 SCpnt->lun = device->lun;
1214 SCpnt->target = device->id;
1215 SCpnt->state = SCSI_STATE_INITIALIZING;
1216 SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
1218 return SCpnt;
1222 * Function: scsi_release_command
1224 * Purpose: Release a command block.
1226 * Arguments: SCpnt - command block we are releasing.
1228 * Notes: The command block can no longer be used by the caller once
1229 * this funciton is called. This is in effect the inverse
1230 * of scsi_allocate_device/scsi_request_queueable.
1232 void
1233 scsi_release_command(Scsi_Cmnd * SCpnt)
1235 SCpnt->request.rq_status = RQ_INACTIVE;
1236 SCpnt->state = SCSI_STATE_UNUSED;
1237 SCpnt->owner = SCSI_OWNER_NOBODY;
1238 atomic_dec(&SCpnt->host->host_active);
1240 SCSI_LOG_MLQUEUE(5, printk("Deactivating command for device %d (active=%d, failed=%d)\n",
1241 SCpnt->target,
1242 atomic_read(&SCpnt->host->host_active),
1243 SCpnt->host->host_failed));
1244 if( SCpnt->host->host_failed != 0 )
1246 SCSI_LOG_ERROR_RECOVERY(5, printk("Error handler thread %d %d\n",
1247 SCpnt->host->in_recovery,
1248 SCpnt->host->eh_active));
1252 * If the host is having troubles, then look to see if this was the last
1253 * command that might have failed. If so, wake up the error handler.
1255 if( SCpnt->host->in_recovery
1256 && !SCpnt->host->eh_active
1257 && atomic_read(&SCpnt->host->host_active) == SCpnt->host->host_failed )
1259 SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
1260 atomic_read(&SCpnt->host->eh_wait->count)));
1261 up(SCpnt->host->eh_wait);
1266 * This is inline because we have stack problemes if we recurse to deeply.
1269 inline int internal_cmnd (Scsi_Cmnd * SCpnt)
1271 #ifdef DEBUG_DELAY
1272 unsigned long clock;
1273 #endif
1274 unsigned long flags;
1275 struct Scsi_Host * host;
1276 int rtn = 0;
1277 unsigned long timeout;
1279 #if DEBUG
1280 unsigned long *ret = 0;
1281 #ifdef __mips__
1282 __asm__ __volatile__ ("move\t%0,$31":"=r"(ret));
1283 #else
1284 ret = __builtin_return_address(0);
1285 #endif
1286 #endif
1288 host = SCpnt->host;
1290 save_flags(flags);
1291 cli();
1292 /* Assign a unique nonzero serial_number. */
1293 if (++serial_number == 0) serial_number = 1;
1294 SCpnt->serial_number = serial_number;
1297 * We will wait MIN_RESET_DELAY clock ticks after the last reset so
1298 * we can avoid the drive not being ready.
1300 timeout = host->last_reset + MIN_RESET_DELAY;
1301 if (jiffies < timeout) {
1302 int ticks_remaining = timeout - jiffies;
1304 * NOTE: This may be executed from within an interrupt
1305 * handler! This is bad, but for now, it'll do. The irq
1306 * level of the interrupt handler has been masked out by the
1307 * platform dependent interrupt handling code already, so the
1308 * sti() here will not cause another call to the SCSI host's
1309 * interrupt handler (assuming there is one irq-level per
1310 * host).
1312 sti();
1313 while (--ticks_remaining >= 0) udelay(1000000/HZ);
1314 host->last_reset = jiffies - MIN_RESET_DELAY;
1316 restore_flags(flags);
1318 if( host->hostt->use_new_eh_code )
1320 scsi_add_timer(SCpnt, SCpnt->timeout_per_command, scsi_times_out);
1322 else
1324 scsi_add_timer(SCpnt, SCpnt->timeout_per_command,
1325 scsi_old_times_out);
1329 * We will use a queued command if possible, otherwise we will emulate the
1330 * queuing and calling of completion function ourselves.
1332 SCSI_LOG_MLQUEUE(3,printk("internal_cmnd (host = %d, channel = %d, target = %d, "
1333 "command = %p, buffer = %p, \nbufflen = %d, done = %p)\n",
1334 SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd,
1335 SCpnt->buffer, SCpnt->bufflen, SCpnt->done));
1337 SCpnt->state = SCSI_STATE_QUEUED;
1338 SCpnt->owner = SCSI_OWNER_LOWLEVEL;
1339 if (host->can_queue)
1341 SCSI_LOG_MLQUEUE(3,printk("queuecommand : routine at %p\n",
1342 host->hostt->queuecommand));
1343 /* This locking tries to prevent all sorts of races between
1344 * queuecommand and the interrupt code. In effect,
1345 * we are only allowed to be in queuecommand once at
1346 * any given time, and we can only be in the interrupt
1347 * handler and the queuecommand function at the same time
1348 * when queuecommand is called while servicing the
1349 * interrupt.
1352 if(!in_interrupt() && SCpnt->host->irq)
1353 disable_irq(SCpnt->host->irq);
1356 * Use the old error handling code if we haven't converted the driver
1357 * to use the new one yet. Note - only the new queuecommand variant
1358 * passes a meaningful return value.
1360 if( host->hostt->use_new_eh_code )
1362 rtn = host->hostt->queuecommand (SCpnt, scsi_done);
1363 if( rtn != 0 )
1365 scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_HOST_BUSY);
1368 else
1370 host->hostt->queuecommand (SCpnt, scsi_old_done);
1373 if(!in_interrupt() && SCpnt->host->irq)
1374 enable_irq(SCpnt->host->irq);
1376 else
1378 int temp;
1380 SCSI_LOG_MLQUEUE(3,printk("command() : routine at %p\n", host->hostt->command));
1381 temp = host->hostt->command (SCpnt);
1382 SCpnt->result = temp;
1383 #ifdef DEBUG_DELAY
1384 clock = jiffies + 4 * HZ;
1385 while (jiffies < clock) barrier();
1386 printk("done(host = %d, result = %04x) : routine at %p\n",
1387 host->host_no, temp, host->hostt->command);
1388 #endif
1389 if( host->hostt->use_new_eh_code )
1391 scsi_done(SCpnt);
1393 else
1395 scsi_old_done(SCpnt);
1398 SCSI_LOG_MLQUEUE(3,printk("leaving internal_cmnd()\n"));
1399 return rtn;
1403 * scsi_do_cmd sends all the commands out to the low-level driver. It
1404 * handles the specifics required for each low level driver - ie queued
1405 * or non queued. It also prevents conflicts when different high level
1406 * drivers go for the same host at the same time.
1409 void scsi_do_cmd (Scsi_Cmnd * SCpnt, const void *cmnd ,
1410 void *buffer, unsigned bufflen, void (*done)(Scsi_Cmnd *),
1411 int timeout, int retries)
1413 unsigned long flags;
1414 struct Scsi_Host * host = SCpnt->host;
1415 Scsi_Device * device = SCpnt->device;
1417 SCpnt->owner = SCSI_OWNER_MIDLEVEL;
1419 SCSI_LOG_MLQUEUE(4,
1421 int i;
1422 int target = SCpnt->target;
1423 printk ("scsi_do_cmd (host = %d, channel = %d target = %d, "
1424 "buffer =%p, bufflen = %d, done = %p, timeout = %d, "
1425 "retries = %d)\n"
1426 "command : " , host->host_no, SCpnt->channel, target, buffer,
1427 bufflen, done, timeout, retries);
1428 for (i = 0; i < 10; ++i)
1429 printk ("%02x ", ((unsigned char *) cmnd)[i]);
1430 printk("\n");
1433 if (!host)
1435 panic ("Invalid or not present host.\n");
1440 * We must prevent reentrancy to the lowlevel host driver. This prevents
1441 * it - we enter a loop until the host we want to talk to is not busy.
1442 * Race conditions are prevented, as interrupts are disabled in between the
1443 * time we check for the host being not busy, and the time we mark it busy
1444 * ourselves.
1447 save_flags(flags);
1448 cli();
1449 SCpnt->pid = scsi_pid++;
1451 while (SCSI_BLOCK((Scsi_Device *) NULL, host)) {
1452 restore_flags(flags);
1453 SCSI_SLEEP(&host->host_wait, SCSI_BLOCK((Scsi_Device *) NULL, host));
1454 cli();
1457 if (host->block) host_active = host;
1459 host->host_busy++;
1460 device->device_busy++;
1461 restore_flags(flags);
1464 * Our own function scsi_done (which marks the host as not busy, disables
1465 * the timeout counter, etc) will be called by us or by the
1466 * scsi_hosts[host].queuecommand() function needs to also call
1467 * the completion function for the high level driver.
1470 memcpy ((void *) SCpnt->data_cmnd , (const void *) cmnd, 12);
1471 SCpnt->reset_chain = NULL;
1472 SCpnt->serial_number = 0;
1473 SCpnt->bufflen = bufflen;
1474 SCpnt->buffer = buffer;
1475 SCpnt->flags = 0;
1476 SCpnt->retries = 0;
1477 SCpnt->allowed = retries;
1478 SCpnt->done = done;
1479 SCpnt->timeout_per_command = timeout;
1481 memcpy ((void *) SCpnt->cmnd , (const void *) cmnd, 12);
1482 /* Zero the sense buffer. Some host adapters automatically request
1483 * sense on error. 0 is not a valid sense code.
1485 memset ((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer);
1486 SCpnt->request_buffer = buffer;
1487 SCpnt->request_bufflen = bufflen;
1488 SCpnt->old_use_sg = SCpnt->use_sg;
1489 if (SCpnt->cmd_len == 0)
1490 SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0]);
1491 SCpnt->old_cmd_len = SCpnt->cmd_len;
1493 /* Start the timer ticking. */
1495 SCpnt->internal_timeout = NORMAL_TIMEOUT;
1496 SCpnt->abort_reason = 0;
1497 internal_cmnd (SCpnt);
1499 SCSI_LOG_MLQUEUE(3,printk ("Leaving scsi_do_cmd()\n"));
1502 /* This function is the mid-level interrupt routine, which decides how
1503 * to handle error conditions. Each invocation of this function must
1504 * do one and *only* one of the following:
1506 * 1) Insert command in BH queue.
1507 * 2) Activate error handler for host.
1509 * FIXME(eric) - I am concerned about stack overflow (still). An interrupt could
1510 * come while we are processing the bottom queue, which would cause another command
1511 * to be stuffed onto the bottom queue, and it would in turn be processed as that
1512 * interrupt handler is returning. Given a sufficiently steady rate of returning
1513 * commands, this could cause the stack to overflow. I am not sure what is the most
1514 * appropriate solution here - we should probably keep a depth count, and not process
1515 * any commands while we still have a bottom handler active higher in the stack.
1517 * There is currently code in the bottom half handler to monitor recursion in the bottom
1518 * handler and report if it ever happens. If this becomes a problem, it won't be hard to
1519 * engineer something to deal with it so that only the outer layer ever does any real
1520 * processing.
1522 void
1523 scsi_done (Scsi_Cmnd * SCpnt)
1525 unsigned long flags;
1526 Scsi_Cmnd * SCswap;
1529 * We don't have to worry about this one timing out any more.
1531 scsi_delete_timer(SCpnt);
1534 * First, see whether this command already timed out. If so, we ignore
1535 * the response. We treat it as if the command never finished.
1537 if( SCpnt->state == SCSI_STATE_TIMEOUT )
1539 SCSI_LOG_MLCOMPLETE(1,printk("Ignoring completion of %p due to timeout status", SCpnt));
1540 return;
1543 SCpnt->state = SCSI_STATE_BHQUEUE;
1544 SCpnt->owner = SCSI_OWNER_BH_HANDLER;
1545 SCpnt->bh_next = NULL;
1548 * Next, put this command in the BH queue. All processing of the command
1549 * past this point will take place with interrupts turned on.
1550 * We start by atomicly swapping the pointer into the queue head slot.
1551 * If it was NULL before, then everything is fine, and we are done
1552 * (this is the normal case). If it was not NULL, then we block interrupts,
1553 * and link them together.
1556 SCswap = (Scsi_Cmnd *) xchg(&scsi_bh_queue_head, SCpnt);
1557 if( SCswap != NULL )
1560 * If we assume that the interrupt handler doesn't dawdle, then it is safe to
1561 * say that we should come in here extremely rarely. Under very heavy load,
1562 * the requests might not be removed from the list fast enough so that we
1563 * *do* end up stacking them, and that would be bad.
1565 save_flags(flags);
1566 cli();
1569 * See if the pointer is NULL - it might have been serviced already
1571 if( scsi_bh_queue_head == NULL )
1573 scsi_bh_queue_head = SCswap;
1575 else
1577 SCswap->bh_next = scsi_bh_queue_head;
1578 scsi_bh_queue_head = SCswap;
1581 restore_flags(flags);
1585 * Mark the bottom half handler to be run.
1587 mark_bh(SCSI_BH);
1591 * Procedure: scsi_bottom_half_handler
1593 * Purpose: Called after we have finished processing interrupts, it
1594 * performs post-interrupt handling for commands that may
1595 * have completed.
1597 * Notes: This is called with all interrupts enabled. This should reduce
1598 * interrupt latency, stack depth, and reentrancy of the low-level
1599 * drivers.
1601 void scsi_bottom_half_handler(void)
1603 Scsi_Cmnd * SCpnt;
1604 Scsi_Cmnd * SCnext;
1605 static atomic_t recursion_depth;
1608 while(1==1)
1611 * If the counter is > 0, that means that there is another interrupt handler
1612 * out there somewhere processing commands. We don't want to get these guys
1613 * nested as this can lead to stack overflow problems, and there isn't any
1614 * real sense in it anyways.
1616 if( atomic_read(&recursion_depth) > 0 )
1618 printk("SCSI bottom half recursion depth = %d \n", atomic_read(&recursion_depth));
1619 SCSI_LOG_MLCOMPLETE(1,printk("SCSI bottom half recursion depth = %d \n",
1620 atomic_read(&recursion_depth)));
1621 break;
1625 * This is an atomic operation - swap the pointer with a NULL pointer
1626 * We will process everything we find in the list here.
1628 SCpnt = xchg(&scsi_bh_queue_head, NULL);
1630 if( SCpnt == NULL )
1632 return;
1635 atomic_inc(&recursion_depth);
1637 SCnext = SCpnt->bh_next;
1639 for(; SCpnt; SCpnt = SCnext)
1641 SCnext = SCpnt->bh_next;
1643 switch( scsi_decide_disposition(SCpnt) )
1645 case SUCCESS:
1647 * Add to BH queue.
1649 SCSI_LOG_MLCOMPLETE(3,printk("Command finished %d %d 0x%x\n", SCpnt->host->host_busy,
1650 SCpnt->host->host_failed,
1651 SCpnt->result));
1653 scsi_finish_command(SCpnt);
1654 break;
1655 case NEEDS_RETRY:
1657 * We only come in here if we want to retry a command. The
1658 * test to see whether the command should be retried should be
1659 * keeping track of the number of tries, so we don't end up looping,
1660 * of course.
1662 SCSI_LOG_MLCOMPLETE(3,printk("Command needs retry %d %d 0x%x\n", SCpnt->host->host_busy,
1663 SCpnt->host->host_failed, SCpnt->result));
1665 scsi_retry_command(SCpnt);
1666 break;
1667 case ADD_TO_MLQUEUE:
1669 * This typically happens for a QUEUE_FULL message -
1670 * typically only when the queue depth is only
1671 * approximate for a given device. Adding a command
1672 * to the queue for the device will prevent further commands
1673 * from being sent to the device, so we shouldn't end up
1674 * with tons of things being sent down that shouldn't be.
1676 scsi_mlqueue_insert(SCpnt, SCSI_MLQUEUE_DEVICE_BUSY);
1677 break;
1678 default:
1680 * Here we have a fatal error of some sort. Turn it over to
1681 * the error handler.
1683 SCSI_LOG_MLCOMPLETE(3,printk("Command failed %p %x active=%d busy=%d failed=%d\n",
1684 SCpnt, SCpnt->result,
1685 atomic_read(&SCpnt->host->host_active),
1686 SCpnt->host->host_busy,
1687 SCpnt->host->host_failed));
1690 * Dump the sense information too.
1692 if ((status_byte (SCpnt->result) & CHECK_CONDITION) != 0)
1694 SCSI_LOG_MLCOMPLETE(3,print_sense("bh",SCpnt));
1698 if( SCpnt->host->eh_wait != NULL )
1700 SCpnt->host->host_failed++;
1701 SCpnt->owner = SCSI_OWNER_ERROR_HANDLER;
1702 SCpnt->state = SCSI_STATE_FAILED;
1703 SCpnt->host->in_recovery = 1;
1705 * If the host is having troubles, then look to see if this was the last
1706 * command that might have failed. If so, wake up the error handler.
1708 if( atomic_read(&SCpnt->host->host_active) == SCpnt->host->host_failed )
1710 SCSI_LOG_ERROR_RECOVERY(5, printk("Waking error handler thread (%d)\n",
1711 atomic_read(&SCpnt->host->eh_wait->count)));
1712 up(SCpnt->host->eh_wait);
1715 else
1718 * We only get here if the error recovery thread has died.
1720 scsi_finish_command(SCpnt);
1723 } /* for(; SCpnt...) */
1725 atomic_dec(&recursion_depth);
1727 } /* while(1==1) */
1732 * Function: scsi_retry_command
1734 * Purpose: Send a command back to the low level to be retried.
1736 * Notes: This command is always executed in the context of the
1737 * bottom half handler, or the error handler thread. Low
1738 * level drivers should not become re-entrant as a result of
1739 * this.
1742 scsi_retry_command(Scsi_Cmnd * SCpnt)
1744 memcpy ((void *) SCpnt->cmnd, (void*) SCpnt->data_cmnd,
1745 sizeof(SCpnt->data_cmnd));
1746 SCpnt->request_buffer = SCpnt->buffer;
1747 SCpnt->request_bufflen = SCpnt->bufflen;
1748 SCpnt->use_sg = SCpnt->old_use_sg;
1749 SCpnt->cmd_len = SCpnt->old_cmd_len;
1750 return internal_cmnd (SCpnt);
1754 * Function: scsi_finish_command
1756 * Purpose: Pass command off to upper layer for finishing of I/O
1757 * request, waking processes that are waiting on results,
1758 * etc.
1760 void
1761 scsi_finish_command(Scsi_Cmnd * SCpnt)
1763 struct Scsi_Host * host;
1764 Scsi_Device * device;
1766 host = SCpnt->host;
1767 device = SCpnt->device;
1769 host->host_busy--; /* Indicate that we are free */
1770 device->device_busy--; /* Decrement device usage counter. */
1772 if (host->block && host->host_busy == 0)
1774 host_active = NULL;
1776 /* For block devices "wake_up" is done in end_scsi_request */
1777 if (MAJOR(SCpnt->request.rq_dev) != SCSI_DISK_MAJOR &&
1778 MAJOR(SCpnt->request.rq_dev) != SCSI_CDROM_MAJOR) {
1779 struct Scsi_Host * next;
1781 for (next = host->block; next != host; next = next->block)
1782 wake_up(&next->host_wait);
1788 * Now try and drain the mid-level queue if any commands have been
1789 * inserted. Check to see whether the queue even has anything in
1790 * it first, as otherwise this is useless overhead.
1792 if( SCpnt->host->pending_commands != NULL )
1794 scsi_mlqueue_finish(SCpnt->host, SCpnt->device);
1797 wake_up(&host->host_wait);
1800 * If we have valid sense information, then some kind of recovery
1801 * must have taken place. Make a note of this.
1803 if( scsi_sense_valid(SCpnt) )
1805 SCpnt->result |= (DRIVER_SENSE << 24);
1808 SCSI_LOG_MLCOMPLETE(3,printk("Notifying upper driver of completion for device %d %x\n",
1809 SCpnt->device->id, SCpnt->result));
1811 SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
1812 SCpnt->state = SCSI_STATE_FINISHED;
1813 SCpnt->done (SCpnt);
1816 #ifdef CONFIG_MODULES
1817 static int scsi_register_host(Scsi_Host_Template *);
1818 static void scsi_unregister_host(Scsi_Host_Template *);
1819 #endif
1821 void *scsi_malloc(unsigned int len)
1823 unsigned int nbits, mask;
1824 unsigned long flags;
1825 int i, j;
1826 if(len % SECTOR_SIZE != 0 || len > PAGE_SIZE)
1827 return NULL;
1829 save_flags(flags);
1830 cli();
1831 nbits = len >> 9;
1832 mask = (1 << nbits) - 1;
1834 for(i=0;i < dma_sectors / SECTORS_PER_PAGE; i++)
1835 for(j=0; j<=SECTORS_PER_PAGE - nbits; j++){
1836 if ((dma_malloc_freelist[i] & (mask << j)) == 0){
1837 dma_malloc_freelist[i] |= (mask << j);
1838 restore_flags(flags);
1839 scsi_dma_free_sectors -= nbits;
1840 #ifdef DEBUG
1841 SCSI_LOG_MLQUEUE(3,printk("SMalloc: %d %p [From:%p]\n",len, dma_malloc_pages[i] + (j << 9)));
1842 printk("SMalloc: %d %p [From:%p]\n",len, dma_malloc_pages[i] + (j << 9));
1843 #endif
1844 return (void *) ((unsigned long) dma_malloc_pages[i] + (j << 9));
1847 restore_flags(flags);
1848 return NULL; /* Nope. No more */
1851 int scsi_free(void *obj, unsigned int len)
1853 unsigned int page, sector, nbits, mask;
1854 unsigned long flags;
1856 #ifdef DEBUG
1857 unsigned long ret = 0;
1859 #ifdef __mips__
1860 __asm__ __volatile__ ("move\t%0,$31":"=r"(ret));
1861 #else
1862 ret = __builtin_return_address(0);
1863 #endif
1864 printk("scsi_free %p %d\n",obj, len);
1865 SCSI_LOG_MLQUEUE(3,printk("SFree: %p %d\n",obj, len));
1866 #endif
1868 for (page = 0; page < dma_sectors / SECTORS_PER_PAGE; page++) {
1869 unsigned long page_addr = (unsigned long) dma_malloc_pages[page];
1870 if ((unsigned long) obj >= page_addr &&
1871 (unsigned long) obj < page_addr + PAGE_SIZE)
1873 sector = (((unsigned long) obj) - page_addr) >> 9;
1875 nbits = len >> 9;
1876 mask = (1 << nbits) - 1;
1878 if ((mask << sector) >= (1 << SECTORS_PER_PAGE))
1879 panic ("scsi_free:Bad memory alignment");
1881 save_flags(flags);
1882 cli();
1883 if((dma_malloc_freelist[page] &
1884 (mask << sector)) != (mask<<sector)){
1885 #ifdef DEBUG
1886 printk("scsi_free(obj=%p, len=%d) called from %08lx\n",
1887 obj, len, ret);
1888 #endif
1889 panic("scsi_free:Trying to free unused memory");
1891 scsi_dma_free_sectors += nbits;
1892 dma_malloc_freelist[page] &= ~(mask << sector);
1893 restore_flags(flags);
1894 return 0;
1897 panic("scsi_free:Bad offset");
1901 int scsi_loadable_module_flag; /* Set after we scan builtin drivers */
1903 void * scsi_init_malloc(unsigned int size, int gfp_mask)
1905 void * retval;
1908 * For buffers used by the DMA pool, we assume page aligned
1909 * structures.
1911 if ((size % PAGE_SIZE) == 0) {
1912 int order, a_size;
1913 for (order = 0, a_size = PAGE_SIZE;
1914 a_size < size; order++, a_size <<= 1)
1916 retval = (void *) __get_free_pages(gfp_mask | GFP_DMA, order);
1917 } else
1918 retval = kmalloc(size, gfp_mask);
1920 if (retval)
1921 memset(retval, 0, size);
1922 return retval;
1926 void scsi_init_free(char * ptr, unsigned int size)
1929 * We need this special code here because the DMA pool assumes
1930 * page aligned data. Besides, it is wasteful to allocate
1931 * page sized chunks with kmalloc.
1933 if ((size % PAGE_SIZE) == 0) {
1934 int order, a_size;
1936 for (order = 0, a_size = PAGE_SIZE;
1937 a_size < size; order++, a_size <<= 1)
1939 free_pages((unsigned long)ptr, order);
1940 } else
1941 kfree(ptr);
1944 void scsi_build_commandblocks(Scsi_Device * SDpnt)
1946 struct Scsi_Host *host = SDpnt->host;
1947 int j;
1948 Scsi_Cmnd * SCpnt;
1950 if (SDpnt->queue_depth == 0)
1951 SDpnt->queue_depth = host->cmd_per_lun;
1952 SDpnt->device_queue = NULL;
1954 for(j=0;j<SDpnt->queue_depth;j++){
1955 SCpnt = (Scsi_Cmnd *)
1956 scsi_init_malloc(sizeof(Scsi_Cmnd),
1957 GFP_ATOMIC |
1958 (host->unchecked_isa_dma ? GFP_DMA : 0));
1959 memset(&SCpnt->eh_timeout, 0, sizeof(SCpnt->eh_timeout));
1960 SCpnt->host = host;
1961 SCpnt->device = SDpnt;
1962 SCpnt->target = SDpnt->id;
1963 SCpnt->lun = SDpnt->lun;
1964 SCpnt->channel = SDpnt->channel;
1965 SCpnt->request.rq_status = RQ_INACTIVE;
1966 SCpnt->host_wait = FALSE;
1967 SCpnt->device_wait = FALSE;
1968 SCpnt->use_sg = 0;
1969 SCpnt->old_use_sg = 0;
1970 SCpnt->old_cmd_len = 0;
1971 SCpnt->underflow = 0;
1972 SCpnt->transfersize = 0;
1973 SCpnt->serial_number = 0;
1974 SCpnt->serial_number_at_timeout = 0;
1975 SCpnt->host_scribble = NULL;
1976 SCpnt->next = SDpnt->device_queue;
1977 SDpnt->device_queue = SCpnt;
1978 SCpnt->state = SCSI_STATE_UNUSED;
1979 SCpnt->owner = SCSI_OWNER_NOBODY;
1981 SDpnt->has_cmdblocks = 1;
1984 #ifndef MODULE /* { */
1986 * scsi_dev_init() is our initialization routine, which in turn calls host
1987 * initialization, bus scanning, and sd/st initialization routines.
1988 * This is only used at boot time.
1990 __initfunc(int scsi_dev_init(void))
1992 Scsi_Device * SDpnt;
1993 struct Scsi_Host * shpnt;
1994 struct Scsi_Device_Template * sdtpnt;
1995 #ifdef FOO_ON_YOU
1996 return;
1997 #endif
1999 /* Yes we're here... */
2000 #if CONFIG_PROC_FS
2001 dispatch_scsi_info_ptr = dispatch_scsi_info;
2002 #endif
2004 /* Init a few things so we can "malloc" memory. */
2005 scsi_loadable_module_flag = 0;
2007 /* Register the /proc/scsi/scsi entry */
2008 #if CONFIG_PROC_FS
2009 proc_scsi_register(0, &proc_scsi_scsi);
2010 #endif
2012 /* initialize all hosts */
2013 scsi_init();
2016 * This is where the processing takes place for most everything
2017 * when commands are completed. Until we do this, we will not be able
2018 * to queue any commands.
2020 init_bh(SCSI_BH, scsi_bottom_half_handler);
2022 for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) {
2023 scan_scsis(shpnt,0,0,0,0); /* scan for scsi devices */
2024 if (shpnt->select_queue_depths != NULL)
2025 (shpnt->select_queue_depths)(shpnt, shpnt->host_queue);
2028 printk("scsi : detected ");
2029 for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2030 if (sdtpnt->dev_noticed && sdtpnt->name)
2031 printk("%d SCSI %s%s ", sdtpnt->dev_noticed, sdtpnt->name,
2032 (sdtpnt->dev_noticed != 1) ? "s" : "");
2033 printk("total.\n");
2035 for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2036 if(sdtpnt->init && sdtpnt->dev_noticed) (*sdtpnt->init)();
2038 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
2040 for(SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next)
2042 /* SDpnt->scsi_request_fn = NULL; */
2043 for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2044 if(sdtpnt->attach) (*sdtpnt->attach)(SDpnt);
2045 if(SDpnt->attached) scsi_build_commandblocks(SDpnt);
2050 * This should build the DMA pool.
2052 resize_dma_pool();
2055 * OK, now we finish the initialization by doing spin-up, read
2056 * capacity, etc, etc
2058 for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2059 if(sdtpnt->finish && sdtpnt->nr_dev)
2060 (*sdtpnt->finish)();
2062 scsi_loadable_module_flag = 1;
2064 return 0;
2066 #endif /* MODULE */ /* } */
2068 static void print_inquiry(unsigned char *data)
2070 int i;
2072 printk(" Vendor: ");
2073 for (i = 8; i < 16; i++)
2075 if (data[i] >= 0x20 && i < data[4] + 5)
2076 printk("%c", data[i]);
2077 else
2078 printk(" ");
2081 printk(" Model: ");
2082 for (i = 16; i < 32; i++)
2084 if (data[i] >= 0x20 && i < data[4] + 5)
2085 printk("%c", data[i]);
2086 else
2087 printk(" ");
2090 printk(" Rev: ");
2091 for (i = 32; i < 36; i++)
2093 if (data[i] >= 0x20 && i < data[4] + 5)
2094 printk("%c", data[i]);
2095 else
2096 printk(" ");
2099 printk("\n");
2101 i = data[0] & 0x1f;
2103 printk(" Type: %s ",
2104 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] : "Unknown " );
2105 printk(" ANSI SCSI revision: %02x", data[2] & 0x07);
2106 if ((data[2] & 0x07) == 1 && (data[3] & 0x0f) == 1)
2107 printk(" CCS\n");
2108 else
2109 printk("\n");
2113 #ifdef CONFIG_PROC_FS
2114 int scsi_proc_info(char *buffer, char **start, off_t offset, int length,
2115 int hostno, int inout)
2117 Scsi_Cmnd *SCpnt;
2118 struct Scsi_Device_Template *SDTpnt;
2119 Scsi_Device *scd;
2120 struct Scsi_Host *HBA_ptr;
2121 char *p;
2122 int host, channel, id, lun;
2123 int size, len = 0;
2124 off_t begin = 0;
2125 off_t pos = 0;
2127 if(inout == 0) {
2129 * First, see if there are any attached devices or not.
2131 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next)
2133 if( HBA_ptr->host_queue != NULL )
2135 break;
2138 size = sprintf(buffer+len,"Attached devices: %s\n", (HBA_ptr)?"":"none");
2139 len += size;
2140 pos = begin + len;
2141 for (HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next)
2143 #if 0
2144 size += sprintf(buffer+len,"scsi%2d: %s\n", (int) HBA_ptr->host_no,
2145 HBA_ptr->hostt->procname);
2146 len += size;
2147 pos = begin + len;
2148 #endif
2149 for(scd = HBA_ptr->host_queue; scd; scd = scd->next)
2151 proc_print_scsidevice(scd, buffer, &size, len);
2152 len += size;
2153 pos = begin + len;
2155 if (pos < offset) {
2156 len = 0;
2157 begin = pos;
2159 if (pos > offset + length)
2160 goto stop_output;
2164 stop_output:
2165 *start=buffer+(offset-begin); /* Start of wanted data */
2166 len-=(offset-begin); /* Start slop */
2167 if(len>length)
2168 len = length; /* Ending slop */
2169 return (len);
2172 if(!buffer || length < 11 || strncmp("scsi", buffer, 4))
2173 return(-EINVAL);
2176 * Usage: echo "scsi dump #N" > /proc/scsi/scsi
2177 * to dump status of all scsi commands. The number is used to specify the level
2178 * of detail in the dump.
2180 if(!strncmp("dump", buffer + 5, 4))
2182 unsigned int level;
2184 p = buffer + 10;
2186 if( *p == '\0' )
2187 return (-EINVAL);
2189 level = simple_strtoul(p, NULL, 0);
2190 scsi_dump_status(level);
2193 * Usage: echo "scsi log token #N" > /proc/scsi/scsi
2194 * where token is one of [error,scan,mlqueue,mlcomplete,llqueue,
2195 * llcomplete,hlqueue,hlcomplete]
2197 #if CONFIG_SCSI_LOGGING /* { */
2199 if(!strncmp("log", buffer + 5, 3))
2201 char * token;
2202 unsigned int level;
2204 p = buffer + 9;
2205 token = p;
2206 while(*p != ' ' && *p != '\t' && *p != '\0')
2208 p++;
2211 if( *p == '\0' )
2213 if( strncmp(token, "all", 3) == 0 )
2216 * Turn on absolutely everything.
2218 scsi_logging_level = ~0;
2220 else if( strncmp(token, "none", 4) == 0 )
2223 * Turn off absolutely everything.
2225 scsi_logging_level = 0;
2227 else
2229 return (-EINVAL);
2232 else
2234 *p++ = '\0';
2236 level = simple_strtoul(p, NULL, 0);
2239 * Now figure out what to do with it.
2241 if( strcmp(token, "error") == 0 )
2243 SCSI_SET_ERROR_RECOVERY_LOGGING(level);
2245 else if( strcmp(token, "timeout") == 0 )
2247 SCSI_SET_TIMEOUT_LOGGING(level);
2249 else if( strcmp(token, "scan") == 0 )
2251 SCSI_SET_SCAN_BUS_LOGGING(level);
2253 else if( strcmp(token, "mlqueue") == 0 )
2255 SCSI_SET_MLQUEUE_LOGGING(level);
2257 else if( strcmp(token, "mlcomplete") == 0 )
2259 SCSI_SET_MLCOMPLETE_LOGGING(level);
2261 else if( strcmp(token, "llqueue") == 0 )
2263 SCSI_SET_LLQUEUE_LOGGING(level);
2265 else if( strcmp(token, "llcomplete") == 0 )
2267 SCSI_SET_LLCOMPLETE_LOGGING(level);
2269 else if( strcmp(token, "hlqueue") == 0 )
2271 SCSI_SET_HLQUEUE_LOGGING(level);
2273 else if( strcmp(token, "hlcomplete") == 0 )
2275 SCSI_SET_HLCOMPLETE_LOGGING(level);
2277 else if( strcmp(token, "ioctl") == 0 )
2279 SCSI_SET_IOCTL_LOGGING(level);
2281 else
2283 return (-EINVAL);
2287 printk("scsi logging level set to 0x%8.8x\n", scsi_logging_level);
2289 #endif /* CONFIG_SCSI_LOGGING */ /* } */
2292 * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi
2293 * with "0 1 2 3" replaced by your "Host Channel Id Lun".
2294 * Consider this feature BETA.
2295 * CAUTION: This is not for hotplugging your peripherals. As
2296 * SCSI was not designed for this you could damage your
2297 * hardware !
2298 * However perhaps it is legal to switch on an
2299 * already connected device. It is perhaps not
2300 * guaranteed this device doesn't corrupt an ongoing data transfer.
2302 if(!strncmp("add-single-device", buffer + 5, 17)) {
2303 p = buffer + 23;
2305 host = simple_strtoul(p, &p, 0);
2306 channel = simple_strtoul(p+1, &p, 0);
2307 id = simple_strtoul(p+1, &p, 0);
2308 lun = simple_strtoul(p+1, &p, 0);
2310 printk("scsi singledevice %d %d %d %d\n", host, channel,
2311 id, lun);
2313 for(HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next)
2315 if( HBA_ptr->host_no == host )
2317 break;
2320 if(!HBA_ptr)
2321 return(-ENXIO);
2323 for(scd = HBA_ptr->host_queue; scd; scd = scd->next)
2325 if((scd->channel == channel
2326 && scd->id == id
2327 && scd->lun == lun))
2329 break;
2333 if(scd)
2334 return(-ENOSYS); /* We do not yet support unplugging */
2336 scan_scsis (HBA_ptr, 1, channel, id, lun);
2337 return(length);
2342 * Usage: echo "scsi remove-single-device 0 1 2 3" >/proc/scsi/scsi
2343 * with "0 1 2 3" replaced by your "Host Channel Id Lun".
2345 * Consider this feature pre-BETA.
2347 * CAUTION: This is not for hotplugging your peripherals. As
2348 * SCSI was not designed for this you could damage your
2349 * hardware and thoroughly confuse the SCSI subsystem.
2352 else if(!strncmp("remove-single-device", buffer + 5, 20)) {
2353 p = buffer + 26;
2355 host = simple_strtoul(p, &p, 0);
2356 channel = simple_strtoul(p+1, &p, 0);
2357 id = simple_strtoul(p+1, &p, 0);
2358 lun = simple_strtoul(p+1, &p, 0);
2361 for(HBA_ptr = scsi_hostlist; HBA_ptr; HBA_ptr = HBA_ptr->next)
2363 if( HBA_ptr->host_no == host )
2365 break;
2368 if(!HBA_ptr)
2369 return(-ENODEV);
2371 for(scd = HBA_ptr->host_queue; scd; scd = scd->next)
2373 if((scd->channel == channel
2374 && scd->id == id
2375 && scd->lun == lun))
2377 break;
2381 if(scd == NULL)
2382 return(-ENODEV); /* there is no such device attached */
2384 if(scd->access_count)
2385 return(-EBUSY);
2387 SDTpnt = scsi_devicelist;
2388 while(SDTpnt != NULL) {
2389 if(SDTpnt->detach) (*SDTpnt->detach)(scd);
2390 SDTpnt = SDTpnt->next;
2393 if(scd->attached == 0) {
2395 * Nobody is using this device any more.
2396 * Free all of the command structures.
2398 for(SCpnt=scd->device_queue; SCpnt; SCpnt = SCpnt->next)
2400 scd->device_queue = SCpnt->next;
2401 scsi_init_free((char *) SCpnt, sizeof(*SCpnt));
2403 /* Now we can remove the device structure */
2404 if( scd->next != NULL )
2405 scd->next->prev = scd->prev;
2407 if( scd->prev != NULL )
2408 scd->prev->next = scd->next;
2410 if( HBA_ptr->host_queue == scd )
2412 HBA_ptr->host_queue = scd->next;
2415 scsi_init_free((char *) scd, sizeof(Scsi_Device));
2416 } else {
2417 return(-EBUSY);
2419 return(0);
2421 return(-EINVAL);
2423 #endif
2426 * Go through the device list and recompute the most appropriate size
2427 * for the dma pool. Then grab more memory (as required).
2429 static void resize_dma_pool(void)
2431 int i;
2432 unsigned long size;
2433 struct Scsi_Host * shpnt;
2434 struct Scsi_Host * host = NULL;
2435 Scsi_Device * SDpnt;
2436 unsigned long flags;
2437 FreeSectorBitmap * new_dma_malloc_freelist = NULL;
2438 unsigned int new_dma_sectors = 0;
2439 unsigned int new_need_isa_buffer = 0;
2440 unsigned char ** new_dma_malloc_pages = NULL;
2442 if( !scsi_hostlist )
2445 * Free up the DMA pool.
2447 if( scsi_dma_free_sectors != dma_sectors )
2448 panic("SCSI DMA pool memory leak %d %d\n",scsi_dma_free_sectors,dma_sectors);
2450 for(i=0; i < dma_sectors / SECTORS_PER_PAGE; i++)
2451 scsi_init_free(dma_malloc_pages[i], PAGE_SIZE);
2452 if (dma_malloc_pages)
2453 scsi_init_free((char *) dma_malloc_pages,
2454 (dma_sectors / SECTORS_PER_PAGE)*sizeof(*dma_malloc_pages));
2455 dma_malloc_pages = NULL;
2456 if (dma_malloc_freelist)
2457 scsi_init_free((char *) dma_malloc_freelist,
2458 (dma_sectors / SECTORS_PER_PAGE)*sizeof(*dma_malloc_freelist));
2459 dma_malloc_freelist = NULL;
2460 dma_sectors = 0;
2461 scsi_dma_free_sectors = 0;
2462 return;
2464 /* Next, check to see if we need to extend the DMA buffer pool */
2466 new_dma_sectors = 2*SECTORS_PER_PAGE; /* Base value we use */
2468 if (__pa(high_memory)-1 > ISA_DMA_THRESHOLD)
2469 need_isa_bounce_buffers = 1;
2470 else
2471 need_isa_bounce_buffers = 0;
2473 if (scsi_devicelist)
2474 for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next)
2475 new_dma_sectors += SECTORS_PER_PAGE; /* Increment for each host */
2477 for (host = scsi_hostlist; host; host = host->next)
2479 for (SDpnt=host->host_queue; SDpnt; SDpnt = SDpnt->next)
2482 * sd and sr drivers allocate scatterlists.
2483 * sr drivers may allocate for each command 1x2048 or 2x1024 extra
2484 * buffers for 2k sector size and 1k fs.
2485 * sg driver allocates buffers < 4k.
2486 * st driver does not need buffers from the dma pool.
2487 * estimate 4k buffer/command for devices of unknown type (should panic).
2489 if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM ||
2490 SDpnt->type == TYPE_DISK || SDpnt->type == TYPE_MOD) {
2491 new_dma_sectors += ((host->sg_tablesize *
2492 sizeof(struct scatterlist) + 511) >> 9) *
2493 SDpnt->queue_depth;
2494 if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM)
2495 new_dma_sectors += (2048 >> 9) * SDpnt->queue_depth;
2497 else if (SDpnt->type == TYPE_SCANNER ||
2498 SDpnt->type == TYPE_PROCESSOR ||
2499 SDpnt->type == TYPE_MEDIUM_CHANGER) {
2500 new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth;
2502 else {
2503 if (SDpnt->type != TYPE_TAPE) {
2504 printk("resize_dma_pool: unknown device type %d\n", SDpnt->type);
2505 new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth;
2509 if(host->unchecked_isa_dma &&
2510 need_isa_bounce_buffers &&
2511 SDpnt->type != TYPE_TAPE) {
2512 new_dma_sectors += (PAGE_SIZE >> 9) * host->sg_tablesize *
2513 SDpnt->queue_depth;
2514 new_need_isa_buffer++;
2519 #ifdef DEBUG_INIT
2520 printk("resize_dma_pool: needed dma sectors = %d\n", new_dma_sectors);
2521 #endif
2523 /* limit DMA memory to 32MB: */
2524 new_dma_sectors = (new_dma_sectors + 15) & 0xfff0;
2527 * We never shrink the buffers - this leads to
2528 * race conditions that I would rather not even think
2529 * about right now.
2531 if( new_dma_sectors < dma_sectors )
2532 new_dma_sectors = dma_sectors;
2534 if (new_dma_sectors)
2536 size = (new_dma_sectors / SECTORS_PER_PAGE)*sizeof(FreeSectorBitmap);
2537 new_dma_malloc_freelist = (FreeSectorBitmap *) scsi_init_malloc(size, GFP_ATOMIC);
2538 memset(new_dma_malloc_freelist, 0, size);
2540 size = (new_dma_sectors / SECTORS_PER_PAGE)*sizeof(*new_dma_malloc_pages);
2541 new_dma_malloc_pages = (unsigned char **) scsi_init_malloc(size, GFP_ATOMIC);
2542 memset(new_dma_malloc_pages, 0, size);
2546 * If we need more buffers, expand the list.
2548 if( new_dma_sectors > dma_sectors ) {
2549 for(i=dma_sectors / SECTORS_PER_PAGE; i< new_dma_sectors / SECTORS_PER_PAGE; i++)
2550 new_dma_malloc_pages[i] = (unsigned char *)
2551 scsi_init_malloc(PAGE_SIZE, GFP_ATOMIC | GFP_DMA);
2554 /* When we dick with the actual DMA list, we need to
2555 * protect things
2557 save_flags(flags);
2558 cli();
2559 if (dma_malloc_freelist)
2561 size = (dma_sectors / SECTORS_PER_PAGE)*sizeof(FreeSectorBitmap);
2562 memcpy(new_dma_malloc_freelist, dma_malloc_freelist, size);
2563 scsi_init_free((char *) dma_malloc_freelist, size);
2565 dma_malloc_freelist = new_dma_malloc_freelist;
2567 if (dma_malloc_pages)
2569 size = (dma_sectors / SECTORS_PER_PAGE)*sizeof(*dma_malloc_pages);
2570 memcpy(new_dma_malloc_pages, dma_malloc_pages, size);
2571 scsi_init_free((char *) dma_malloc_pages, size);
2574 scsi_dma_free_sectors += new_dma_sectors - dma_sectors;
2575 dma_malloc_pages = new_dma_malloc_pages;
2576 dma_sectors = new_dma_sectors;
2577 scsi_need_isa_buffer = new_need_isa_buffer;
2578 restore_flags(flags);
2580 #ifdef DEBUG_INIT
2581 printk("resize_dma_pool: dma free sectors = %d\n", scsi_dma_free_sectors);
2582 printk("resize_dma_pool: dma sectors = %d\n", dma_sectors);
2583 printk("resize_dma_pool: need isa buffers = %d\n", scsi_need_isa_buffer);
2584 #endif
2587 #ifdef CONFIG_MODULES /* a big #ifdef block... */
2590 * This entry point should be called by a loadable module if it is trying
2591 * add a low level scsi driver to the system.
2593 static int scsi_register_host(Scsi_Host_Template * tpnt)
2595 int pcount;
2596 struct Scsi_Host * shpnt;
2597 Scsi_Device * SDpnt;
2598 struct Scsi_Device_Template * sdtpnt;
2599 const char * name;
2601 if (tpnt->next || !tpnt->detect) return 1;/* Must be already loaded, or
2602 * no detect routine available
2604 pcount = next_scsi_host;
2605 if ((tpnt->present = tpnt->detect(tpnt)))
2607 if(pcount == next_scsi_host)
2609 if(tpnt->present > 1)
2611 printk("Failure to register low-level scsi driver");
2612 scsi_unregister_host(tpnt);
2613 return 1;
2616 * The low-level driver failed to register a driver. We
2617 * can do this now.
2619 scsi_register(tpnt,0);
2621 tpnt->next = scsi_hosts; /* Add to the linked list */
2622 scsi_hosts = tpnt;
2624 /* Add the new driver to /proc/scsi */
2625 #if CONFIG_PROC_FS
2626 build_proc_dir_entries(tpnt);
2627 #endif
2631 * Add the kernel threads for each host adapter that will
2632 * handle error correction.
2634 for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next)
2636 if( shpnt->hostt == tpnt && shpnt->hostt->use_new_eh_code )
2638 struct semaphore sem = MUTEX_LOCKED;
2640 shpnt->eh_notify = &sem;
2641 kernel_thread((int (*)(void *))scsi_error_handler,
2642 (void *) shpnt, 0);
2645 * Now wait for the kernel error thread to initialize itself
2646 * as it might be needed when we scan the bus.
2648 down (&sem);
2649 shpnt->eh_notify = NULL;
2653 for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next)
2655 if(shpnt->hostt == tpnt)
2657 if(tpnt->info)
2659 name = tpnt->info(shpnt);
2661 else
2663 name = tpnt->name;
2665 printk ("scsi%d : %s\n", /* And print a little message */
2666 shpnt->host_no, name);
2670 printk ("scsi : %d host%s.\n", next_scsi_host,
2671 (next_scsi_host == 1) ? "" : "s");
2673 scsi_make_blocked_list();
2675 /* The next step is to call scan_scsis here. This generates the
2676 * Scsi_Devices entries
2678 for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next)
2680 if(shpnt->hostt == tpnt)
2682 scan_scsis(shpnt,0,0,0,0);
2683 if (shpnt->select_queue_depths != NULL)
2685 (shpnt->select_queue_depths)(shpnt, shpnt->host_queue);
2690 for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2692 if(sdtpnt->init && sdtpnt->dev_noticed) (*sdtpnt->init)();
2696 * Next we create the Scsi_Cmnd structures for this host
2698 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
2700 for(SDpnt = shpnt->host_queue; SDpnt; SDpnt = SDpnt->next)
2701 if(SDpnt->host->hostt == tpnt)
2703 for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2704 if(sdtpnt->attach) (*sdtpnt->attach)(SDpnt);
2705 if(SDpnt->attached) scsi_build_commandblocks(SDpnt);
2710 * Now that we have all of the devices, resize the DMA pool,
2711 * as required. */
2712 resize_dma_pool();
2715 /* This does any final handling that is required. */
2716 for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2718 if(sdtpnt->finish && sdtpnt->nr_dev)
2720 (*sdtpnt->finish)();
2725 #if defined(USE_STATIC_SCSI_MEMORY)
2726 printk ("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
2727 (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
2728 (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
2729 (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
2730 #endif
2732 MOD_INC_USE_COUNT;
2733 return 0;
2737 * Similarly, this entry point should be called by a loadable module if it
2738 * is trying to remove a low level scsi driver from the system.
2740 * Note - there is a fatal flaw in the deregister module function.
2741 * There is no way to return a code that says 'I cannot be unloaded now'.
2742 * The system relies entirely upon usage counts that are maintained,
2743 * and the assumption is that if the usage count is 0, then the module
2744 * can be unloaded.
2746 static void scsi_unregister_host(Scsi_Host_Template * tpnt)
2748 unsigned long flags;
2749 int online_status;
2750 int pcount;
2751 Scsi_Cmnd * SCpnt;
2752 Scsi_Device * SDpnt;
2753 Scsi_Device * SDpnt1;
2754 struct Scsi_Device_Template * sdtpnt;
2755 struct Scsi_Host * sh1;
2756 struct Scsi_Host * shpnt;
2757 Scsi_Host_Template * SHT;
2758 Scsi_Host_Template * SHTp;
2761 * First verify that this host adapter is completely free with no pending
2762 * commands
2764 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
2766 for(SDpnt = shpnt->host_queue; SDpnt;
2767 SDpnt = SDpnt->next)
2769 if(SDpnt->host->hostt == tpnt
2770 && SDpnt->host->hostt->module
2771 && SDpnt->host->hostt->module->usecount) return;
2773 * FIXME(eric) - We need to find a way to notify the
2774 * low level driver that we are shutting down - via the
2775 * special device entry that still needs to get added.
2777 * Is detach interface below good enough for this?
2783 * FIXME(eric) put a spinlock on this. We force all of the devices offline
2784 * to help prevent race conditions where other hosts/processors could try and
2785 * get in and queue a command.
2787 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
2789 for(SDpnt = shpnt->host_queue; SDpnt;
2790 SDpnt = SDpnt->next)
2792 if(SDpnt->host->hostt == tpnt )
2793 SDpnt->online = FALSE;
2798 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
2800 if (shpnt->hostt != tpnt)
2802 continue;
2805 for(SDpnt = shpnt->host_queue; SDpnt;
2806 SDpnt = SDpnt->next)
2809 * Loop over all of the commands associated with the device. If any of
2810 * them are busy, then set the state back to inactive and bail.
2812 for(SCpnt = SDpnt->device_queue; SCpnt;
2813 SCpnt = SCpnt->next)
2815 online_status = SDpnt->online;
2816 SDpnt->online = FALSE;
2817 save_flags(flags);
2818 cli();
2819 if(SCpnt->request.rq_status != RQ_INACTIVE)
2821 restore_flags(flags);
2822 printk("SCSI device not inactive - state=%d, id=%d\n",
2823 SCpnt->request.rq_status, SCpnt->target);
2824 for(SDpnt1 = shpnt->host_queue; SDpnt1;
2825 SDpnt1 = SDpnt1->next)
2827 for(SCpnt = SDpnt1->device_queue; SCpnt;
2828 SCpnt = SCpnt->next)
2829 if(SCpnt->request.rq_status == RQ_SCSI_DISCONNECTING)
2830 SCpnt->request.rq_status = RQ_INACTIVE;
2832 SDpnt->online = online_status;
2833 printk("Device busy???\n");
2834 return;
2837 * No, this device is really free. Mark it as such, and
2838 * continue on.
2840 SCpnt->state = SCSI_STATE_DISCONNECTING;
2841 SCpnt->request.rq_status = RQ_SCSI_DISCONNECTING; /* Mark as busy */
2842 restore_flags(flags);
2846 /* Next we detach the high level drivers from the Scsi_Device structures */
2848 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
2850 if(shpnt->hostt != tpnt)
2852 continue;
2855 for(SDpnt = shpnt->host_queue; SDpnt;
2856 SDpnt = SDpnt->next)
2858 for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next)
2859 if(sdtpnt->detach) (*sdtpnt->detach)(SDpnt);
2861 /* If something still attached, punt */
2862 if (SDpnt->attached)
2864 printk("Attached usage count = %d\n", SDpnt->attached);
2865 return;
2871 * Next, kill the kernel error recovery thread for this host.
2873 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
2875 if( shpnt->hostt == tpnt
2876 && shpnt->hostt->use_new_eh_code
2877 && shpnt->ehandler != NULL )
2879 struct semaphore sem = MUTEX_LOCKED;
2881 shpnt->eh_notify = &sem;
2882 send_sig(SIGKILL, shpnt->ehandler, 1);
2883 down(&sem);
2884 shpnt->eh_notify = NULL;
2888 /* Next we free up the Scsi_Cmnd structures for this host */
2890 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
2892 if(shpnt->hostt != tpnt)
2894 continue;
2897 for(SDpnt = shpnt->host_queue; SDpnt;
2898 SDpnt = shpnt->host_queue)
2900 while (SDpnt->device_queue)
2902 SCpnt = SDpnt->device_queue->next;
2903 scsi_init_free((char *) SDpnt->device_queue, sizeof(Scsi_Cmnd));
2904 SDpnt->device_queue = SCpnt;
2906 SDpnt->has_cmdblocks = 0;
2908 /* Next free up the Scsi_Device structures for this host */
2909 shpnt->host_queue = SDpnt->next;
2910 scsi_init_free((char *) SDpnt, sizeof (Scsi_Device));
2915 /* Next we go through and remove the instances of the individual hosts
2916 * that were detected */
2918 for(shpnt = scsi_hostlist; shpnt; shpnt = sh1)
2920 sh1 = shpnt->next;
2921 if(shpnt->hostt == tpnt) {
2922 if(shpnt->loaded_as_module) {
2923 pcount = next_scsi_host;
2924 /* Remove the /proc/scsi directory entry */
2925 #if CONFIG_PROC_FS
2926 proc_scsi_unregister(tpnt->proc_dir,
2927 shpnt->host_no + PROC_SCSI_FILE);
2928 #endif
2929 if(tpnt->release)
2930 (*tpnt->release)(shpnt);
2931 else {
2932 /* This is the default case for the release function.
2933 * It should do the right thing for most correctly
2934 * written host adapters.
2936 if (shpnt->irq) free_irq(shpnt->irq, NULL);
2937 if (shpnt->dma_channel != 0xff) free_dma(shpnt->dma_channel);
2938 if (shpnt->io_port && shpnt->n_io_port)
2939 release_region(shpnt->io_port, shpnt->n_io_port);
2941 if(pcount == next_scsi_host) scsi_unregister(shpnt);
2942 tpnt->present--;
2948 * If there are absolutely no more hosts left, it is safe
2949 * to completely nuke the DMA pool. The resize operation will
2950 * do the right thing and free everything.
2952 if( !scsi_hosts )
2953 resize_dma_pool();
2955 printk ("scsi : %d host%s.\n", next_scsi_host,
2956 (next_scsi_host == 1) ? "" : "s");
2958 #if defined(USE_STATIC_SCSI_MEMORY)
2959 printk ("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n",
2960 (scsi_memory_upper_value - scsi_memory_lower_value) / 1024,
2961 (scsi_init_memory_start - scsi_memory_lower_value) / 1024,
2962 (scsi_memory_upper_value - scsi_init_memory_start) / 1024);
2963 #endif
2965 scsi_make_blocked_list();
2967 /* There were some hosts that were loaded at boot time, so we cannot
2968 do any more than this */
2969 if (tpnt->present) return;
2971 /* OK, this is the very last step. Remove this host adapter from the
2972 linked list. */
2973 for(SHTp=NULL, SHT=scsi_hosts; SHT; SHTp=SHT, SHT=SHT->next)
2974 if(SHT == tpnt) {
2975 if(SHTp)
2976 SHTp->next = SHT->next;
2977 else
2978 scsi_hosts = SHT->next;
2979 SHT->next = NULL;
2980 break;
2983 /* Rebuild the /proc/scsi directory entries */
2984 #if CONFIG_PROC_FS
2985 proc_scsi_unregister(tpnt->proc_dir, tpnt->proc_dir->low_ino);
2986 #endif
2987 MOD_DEC_USE_COUNT;
2991 * This entry point should be called by a loadable module if it is trying
2992 * add a high level scsi driver to the system.
2994 static int scsi_register_device_module(struct Scsi_Device_Template * tpnt)
2996 Scsi_Device * SDpnt;
2997 struct Scsi_Host * shpnt;
2999 if (tpnt->next) return 1;
3001 scsi_register_device(tpnt);
3003 * First scan the devices that we know about, and see if we notice them.
3006 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
3008 for(SDpnt = shpnt->host_queue; SDpnt;
3009 SDpnt = SDpnt->next)
3011 if(tpnt->detect) SDpnt->attached += (*tpnt->detect)(SDpnt);
3016 * If any of the devices would match this driver, then perform the
3017 * init function.
3019 if(tpnt->init && tpnt->dev_noticed)
3020 if ((*tpnt->init)()) return 1;
3023 * Now actually connect the devices to the new driver.
3025 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
3027 for(SDpnt = shpnt->host_queue; SDpnt;
3028 SDpnt = SDpnt->next)
3030 if(tpnt->attach) (*tpnt->attach)(SDpnt);
3032 * If this driver attached to the device, and don't have any
3033 * command blocks for this device, allocate some.
3035 if(SDpnt->attached && SDpnt->has_cmdblocks == 0)
3037 SDpnt->online = TRUE;
3038 scsi_build_commandblocks(SDpnt);
3044 * This does any final handling that is required.
3046 if(tpnt->finish && tpnt->nr_dev) (*tpnt->finish)();
3047 resize_dma_pool();
3048 MOD_INC_USE_COUNT;
3049 return 0;
3052 static int scsi_unregister_device(struct Scsi_Device_Template * tpnt)
3054 Scsi_Device * SDpnt;
3055 Scsi_Cmnd * SCpnt;
3056 struct Scsi_Host * shpnt;
3057 struct Scsi_Device_Template * spnt;
3058 struct Scsi_Device_Template * prev_spnt;
3061 * If we are busy, this is not going to fly.
3063 if(tpnt->module->usecount != 0) return 0;
3066 * Next, detach the devices from the driver.
3069 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
3071 for(SDpnt = shpnt->host_queue; SDpnt;
3072 SDpnt = SDpnt->next)
3074 if(tpnt->detach) (*tpnt->detach)(SDpnt);
3075 if(SDpnt->attached == 0)
3077 SDpnt->online = FALSE;
3080 * Nobody is using this device any more. Free all of the
3081 * command structures.
3083 for(SCpnt = SDpnt->device_queue; SCpnt;
3084 SCpnt = SCpnt->next)
3086 if(SCpnt == SDpnt->device_queue)
3087 SDpnt->device_queue = SCpnt->next;
3088 scsi_init_free((char *) SCpnt, sizeof(*SCpnt));
3090 SDpnt->has_cmdblocks = 0;
3095 * Extract the template from the linked list.
3097 spnt = scsi_devicelist;
3098 prev_spnt = NULL;
3099 while(spnt != tpnt)
3101 prev_spnt = spnt;
3102 spnt = spnt->next;
3104 if(prev_spnt == NULL)
3105 scsi_devicelist = tpnt->next;
3106 else
3107 prev_spnt->next = spnt->next;
3109 MOD_DEC_USE_COUNT;
3111 * Final cleanup for the driver is done in the driver sources in the
3112 * cleanup function.
3114 return 0;
3118 int scsi_register_module(int module_type, void * ptr)
3120 switch(module_type)
3122 case MODULE_SCSI_HA:
3123 return scsi_register_host((Scsi_Host_Template *) ptr);
3125 /* Load upper level device handler of some kind */
3126 case MODULE_SCSI_DEV:
3127 #ifdef CONFIG_KERNELD
3128 if (scsi_hosts == NULL)
3129 request_module("scsi_hostadapter");
3130 #endif
3131 return scsi_register_device_module((struct Scsi_Device_Template *) ptr);
3132 /* The rest of these are not yet implemented */
3134 /* Load constants.o */
3135 case MODULE_SCSI_CONST:
3137 /* Load specialized ioctl handler for some device. Intended for
3138 * cdroms that have non-SCSI2 audio command sets. */
3139 case MODULE_SCSI_IOCTL:
3141 default:
3142 return 1;
3146 void scsi_unregister_module(int module_type, void * ptr)
3148 switch(module_type)
3150 case MODULE_SCSI_HA:
3151 scsi_unregister_host((Scsi_Host_Template *) ptr);
3152 break;
3153 case MODULE_SCSI_DEV:
3154 scsi_unregister_device((struct Scsi_Device_Template *) ptr);
3155 break;
3156 /* The rest of these are not yet implemented. */
3157 case MODULE_SCSI_CONST:
3158 case MODULE_SCSI_IOCTL:
3159 break;
3160 default:
3162 return;
3165 #endif /* CONFIG_MODULES */
3168 * Function: scsi_dump_status
3170 * Purpose: Brain dump of scsi system, used for problem solving.
3172 * Arguments: level - used to indicate level of detail.
3174 * Notes: The level isn't used at all yet, but we need to find some way
3175 * of sensibly logging varying degrees of information. A quick one-line
3176 * display of each command, plus the status would be most useful.
3178 * This does depend upon CONFIG_SCSI_LOGGING - I do want some way of turning
3179 * it all off if the user wants a lean and mean kernel. It would probably
3180 * also be useful to allow the user to specify one single host to be dumped.
3181 * A second argument to the function would be useful for that purpose.
3183 * FIXME - some formatting of the output into tables would be very handy.
3185 static void
3186 scsi_dump_status(int level)
3188 #if CONFIG_PROC_FS
3189 #if CONFIG_SCSI_LOGGING /* { */
3190 int i;
3191 struct Scsi_Host * shpnt;
3192 Scsi_Cmnd * SCpnt;
3193 Scsi_Device * SDpnt;
3194 printk("Dump of scsi host parameters:\n");
3195 i = 0;
3196 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
3198 printk(" %d %d %d : %d %p\n",
3199 shpnt->host_failed,
3200 shpnt->host_busy,
3201 atomic_read(&shpnt->host_active),
3202 shpnt->host_blocked,
3203 shpnt->pending_commands);
3207 printk("\n\n");
3208 printk("Dump of scsi command parameters:\n");
3209 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
3211 printk("h:c:t:l (dev sect nsect cnumsec sg) (ret all flg) (to/cmd to ito) cmd snse result\n");
3212 for(SDpnt=shpnt->host_queue; SDpnt; SDpnt = SDpnt->next)
3214 for(SCpnt=SDpnt->device_queue; SCpnt; SCpnt = SCpnt->next)
3216 /* (0) h:c:t:l (dev sect nsect cnumsec sg) (ret all flg) (to/cmd to ito) cmd snse result %d %x */
3217 printk("(%3d) %2d:%1d:%2d:%2d (%6s %4ld %4ld %4ld %4x %1d) (%1d %1d 0x%2x) (%4d %4d %4d) 0x%2.2x 0x%2.2x 0x%8.8x\n",
3218 i++,
3220 SCpnt->host->host_no,
3221 SCpnt->channel,
3222 SCpnt->target,
3223 SCpnt->lun,
3225 kdevname(SCpnt->request.rq_dev),
3226 SCpnt->request.sector,
3227 SCpnt->request.nr_sectors,
3228 SCpnt->request.current_nr_sectors,
3229 SCpnt->request.rq_status,
3230 SCpnt->use_sg,
3232 SCpnt->retries,
3233 SCpnt->allowed,
3234 SCpnt->flags,
3236 SCpnt->timeout_per_command,
3237 SCpnt->timeout,
3238 SCpnt->internal_timeout,
3240 SCpnt->cmnd[0],
3241 SCpnt->sense_buffer[2],
3242 SCpnt->result);
3247 for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
3249 for(SDpnt=shpnt->host_queue; SDpnt; SDpnt = SDpnt->next)
3251 /* Now dump the request lists for each block device */
3252 printk("Dump of pending block device requests\n");
3253 for(i=0; i<MAX_BLKDEV; i++)
3255 if(blk_dev[i].current_request)
3257 struct request * req;
3258 printk("%d: ", i);
3259 req = blk_dev[i].current_request;
3260 while(req)
3262 printk("(%s %d %ld %ld %ld) ",
3263 kdevname(req->rq_dev),
3264 req->cmd,
3265 req->sector,
3266 req->nr_sectors,
3267 req->current_nr_sectors);
3268 req = req->next;
3270 printk("\n");
3275 printk("wait_for_request = %p\n", wait_for_request);
3276 #endif /* CONFIG_SCSI_LOGGING */ /* } */
3277 #endif /* CONFIG_PROC_FS */
3280 #ifdef MODULE
3282 int init_module(void)
3284 unsigned long size;
3287 * This makes /proc/scsi visible.
3289 #if CONFIG_PROC_FS
3290 dispatch_scsi_info_ptr = dispatch_scsi_info;
3291 #endif
3293 scsi_loadable_module_flag = 1;
3295 /* Register the /proc/scsi/scsi entry */
3296 #if CONFIG_PROC_FS
3297 proc_scsi_register(0, &proc_scsi_scsi);
3298 #endif
3301 dma_sectors = PAGE_SIZE / SECTOR_SIZE;
3302 scsi_dma_free_sectors= dma_sectors;
3304 * Set up a minimal DMA buffer list - this will be used during scan_scsis
3305 * in some cases.
3308 /* One bit per sector to indicate free/busy */
3309 size = (dma_sectors / SECTORS_PER_PAGE)*sizeof(FreeSectorBitmap);
3310 dma_malloc_freelist = (unsigned char *) scsi_init_malloc(size, GFP_ATOMIC);
3311 memset(dma_malloc_freelist, 0, size);
3313 /* One pointer per page for the page list */
3314 dma_malloc_pages = (unsigned char **)
3315 scsi_init_malloc((dma_sectors / SECTORS_PER_PAGE)*sizeof(*dma_malloc_pages), GFP_ATOMIC);
3316 dma_malloc_pages[0] = (unsigned char *)
3317 scsi_init_malloc(PAGE_SIZE, GFP_ATOMIC | GFP_DMA);
3320 * This is where the processing takes place for most everything
3321 * when commands are completed.
3323 init_bh(SCSI_BH, scsi_bottom_half_handler);
3325 return 0;
3328 void cleanup_module( void)
3330 remove_bh(SCSI_BH);
3332 #if CONFIG_PROC_FS
3333 proc_scsi_unregister(0, PROC_SCSI_SCSI);
3335 /* No, we're not here anymore. Don't show the /proc/scsi files. */
3336 dispatch_scsi_info_ptr = 0L;
3337 #endif
3340 * Free up the DMA pool.
3342 resize_dma_pool();
3345 #endif /* MODULE */
3348 * Overrides for Emacs so that we follow Linus's tabbing style.
3349 * Emacs will notice this stuff at the end of the file and automatically
3350 * adjust the settings for this buffer only. This must remain at the end
3351 * of the file.
3352 * ---------------------------------------------------------------------------
3353 * Local variables:
3354 * c-indent-level: 4
3355 * c-brace-imaginary-offset: 0
3356 * c-brace-offset: -4
3357 * c-argdecl-indent: 4
3358 * c-label-offset: -4
3359 * c-continued-statement-offset: 4
3360 * c-continued-brace-offset: 0
3361 * indent-tabs-mode: nil
3362 * tab-width: 8
3363 * End: