Import 2.3.18pre1
[davej-history.git] / drivers / block / ll_rw_blk.c
blob2bb1610829dcadd25da7cf528f9370700a717954
1 /*
2 * linux/drivers/block/ll_rw_blk.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
6 */
8 /*
9 * This handles all read/write requests to block devices
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/config.h>
17 #include <linux/locks.h>
18 #include <linux/mm.h>
19 #include <linux/init.h>
20 #include <linux/smp_lock.h>
22 #include <asm/system.h>
23 #include <asm/io.h>
24 #include <linux/blk.h>
26 #include <linux/module.h>
29 * MAC Floppy IWM hooks
32 #ifdef CONFIG_MAC_FLOPPY_IWM
33 extern int mac_floppy_init(void);
34 #endif
37 * The request-struct contains all necessary data
38 * to load a nr of sectors into memory
40 static struct request all_requests[NR_REQUEST];
43 * The "disk" task queue is used to start the actual requests
44 * after a plug
46 DECLARE_TASK_QUEUE(tq_disk);
49 * Protect the request list against multiple users..
51 * With this spinlock the Linux block IO subsystem is 100% SMP threaded
52 * from the IRQ event side, and almost 100% SMP threaded from the syscall
53 * side (we still have protect against block device array operations, and
54 * the do_request() side is casually still unsafe. The kernel lock protects
55 * this part currently.).
57 * there is a fair chance that things will work just OK if these functions
58 * are called with no global kernel lock held ...
60 spinlock_t io_request_lock = SPIN_LOCK_UNLOCKED;
63 * used to wait on when there are no free requests
65 DECLARE_WAIT_QUEUE_HEAD(wait_for_request);
67 /* This specifies how many sectors to read ahead on the disk. */
69 int read_ahead[MAX_BLKDEV] = {0, };
71 /* blk_dev_struct is:
72 * *request_fn
73 * *current_request
75 struct blk_dev_struct blk_dev[MAX_BLKDEV]; /* initialized by blk_dev_init() */
78 * blk_size contains the size of all block-devices in units of 1024 byte
79 * sectors:
81 * blk_size[MAJOR][MINOR]
83 * if (!blk_size[MAJOR]) then no minor size checking is done.
85 int * blk_size[MAX_BLKDEV] = { NULL, NULL, };
88 * blksize_size contains the size of all block-devices:
90 * blksize_size[MAJOR][MINOR]
92 * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
94 int * blksize_size[MAX_BLKDEV] = { NULL, NULL, };
97 * hardsect_size contains the size of the hardware sector of a device.
99 * hardsect_size[MAJOR][MINOR]
101 * if (!hardsect_size[MAJOR])
102 * then 512 bytes is assumed.
103 * else
104 * sector_size is hardsect_size[MAJOR][MINOR]
105 * This is currently set by some scsi devices and read by the msdos fs driver.
106 * Other uses may appear later.
108 int * hardsect_size[MAX_BLKDEV] = { NULL, NULL, };
111 * The following tunes the read-ahead algorithm in mm/filemap.c
113 int * max_readahead[MAX_BLKDEV] = { NULL, NULL, };
116 * Max number of sectors per request
118 int * max_sectors[MAX_BLKDEV] = { NULL, NULL, };
121 * Max number of segments per request
123 int * max_segments[MAX_BLKDEV] = { NULL, NULL, };
125 static inline int get_max_sectors(kdev_t dev)
127 if (!max_sectors[MAJOR(dev)])
128 return MAX_SECTORS;
129 return max_sectors[MAJOR(dev)][MINOR(dev)];
132 static inline int get_max_segments(kdev_t dev)
134 if (!max_segments[MAJOR(dev)])
135 return MAX_SEGMENTS;
136 return max_segments[MAJOR(dev)][MINOR(dev)];
140 * Is called with the request spinlock aquired.
141 * NOTE: the device-specific queue() functions
142 * have to be atomic!
144 static inline struct request **get_queue(kdev_t dev)
146 int major = MAJOR(dev);
147 struct blk_dev_struct *bdev = blk_dev + major;
149 if (bdev->queue)
150 return bdev->queue(dev);
151 return &blk_dev[major].current_request;
155 * remove the plug and let it rip..
157 void unplug_device(void * data)
159 struct blk_dev_struct * dev = (struct blk_dev_struct *) data;
160 int queue_new_request=0;
161 unsigned long flags;
163 spin_lock_irqsave(&io_request_lock,flags);
164 if (dev->current_request == &dev->plug) {
165 struct request * next = dev->plug.next;
166 dev->current_request = next;
167 if (next || dev->queue) {
168 dev->plug.next = NULL;
169 queue_new_request = 1;
172 if (queue_new_request)
173 (dev->request_fn)();
175 spin_unlock_irqrestore(&io_request_lock,flags);
179 * "plug" the device if there are no outstanding requests: this will
180 * force the transfer to start only after we have put all the requests
181 * on the list.
183 * This is called with interrupts off and no requests on the queue.
184 * (and with the request spinlock aquired)
186 static inline void plug_device(struct blk_dev_struct * dev)
188 if (dev->current_request)
189 return;
190 dev->current_request = &dev->plug;
191 queue_task(&dev->plug_tq, &tq_disk);
195 * look for a free request in the first N entries.
196 * NOTE: interrupts must be disabled on the way in (on SMP the request queue
197 * spinlock has to be aquired), and will still be disabled on the way out.
199 static inline struct request * get_request(int n, kdev_t dev)
201 static struct request *prev_found = NULL, *prev_limit = NULL;
202 register struct request *req, *limit;
204 if (n <= 0)
205 panic("get_request(%d): impossible!\n", n);
207 limit = all_requests + n;
208 if (limit != prev_limit) {
209 prev_limit = limit;
210 prev_found = all_requests;
212 req = prev_found;
213 for (;;) {
214 req = ((req > all_requests) ? req : limit) - 1;
215 if (req->rq_status == RQ_INACTIVE)
216 break;
217 if (req == prev_found)
218 return NULL;
220 prev_found = req;
221 req->rq_status = RQ_ACTIVE;
222 req->rq_dev = dev;
223 return req;
227 * wait until a free request in the first N entries is available.
229 static struct request * __get_request_wait(int n, kdev_t dev)
231 register struct request *req;
232 DECLARE_WAITQUEUE(wait, current);
233 unsigned long flags;
235 add_wait_queue(&wait_for_request, &wait);
236 for (;;) {
237 current->state = TASK_UNINTERRUPTIBLE;
238 spin_lock_irqsave(&io_request_lock,flags);
239 req = get_request(n, dev);
240 spin_unlock_irqrestore(&io_request_lock,flags);
241 if (req)
242 break;
243 run_task_queue(&tq_disk);
244 schedule();
246 remove_wait_queue(&wait_for_request, &wait);
247 current->state = TASK_RUNNING;
248 return req;
251 static inline struct request * get_request_wait(int n, kdev_t dev)
253 register struct request *req;
254 unsigned long flags;
256 spin_lock_irqsave(&io_request_lock,flags);
257 req = get_request(n, dev);
258 spin_unlock_irqrestore(&io_request_lock,flags);
259 if (req)
260 return req;
261 return __get_request_wait(n, dev);
264 /* RO fail safe mechanism */
266 static long ro_bits[MAX_BLKDEV][8];
268 int is_read_only(kdev_t dev)
270 int minor,major;
272 major = MAJOR(dev);
273 minor = MINOR(dev);
274 if (major < 0 || major >= MAX_BLKDEV) return 0;
275 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
278 void set_device_ro(kdev_t dev,int flag)
280 int minor,major;
282 major = MAJOR(dev);
283 minor = MINOR(dev);
284 if (major < 0 || major >= MAX_BLKDEV) return;
285 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
286 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
289 static inline void drive_stat_acct(int cmd, unsigned long nr_sectors,
290 short disk_index)
292 kstat.dk_drive[disk_index]++;
293 if (cmd == READ) {
294 kstat.dk_drive_rio[disk_index]++;
295 kstat.dk_drive_rblk[disk_index] += nr_sectors;
296 } else if (cmd == WRITE) {
297 kstat.dk_drive_wio[disk_index]++;
298 kstat.dk_drive_wblk[disk_index] += nr_sectors;
299 } else
300 printk(KERN_ERR "drive_stat_acct: cmd not R/W?\n");
304 * add-request adds a request to the linked list.
305 * It disables interrupts (aquires the request spinlock) so that it can muck
306 * with the request-lists in peace. Thus it should be called with no spinlocks
307 * held.
309 * By this point, req->cmd is always either READ/WRITE, never READA,
310 * which is important for drive_stat_acct() above.
313 void add_request(struct blk_dev_struct * dev, struct request * req)
315 int major = MAJOR(req->rq_dev);
316 int minor = MINOR(req->rq_dev);
317 struct request * tmp, **current_request;
318 short disk_index;
319 unsigned long flags;
320 int queue_new_request = 0;
322 switch (major) {
323 case DAC960_MAJOR+0:
324 disk_index = (minor & 0x00f8) >> 3;
325 if (disk_index < 4)
326 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
327 break;
328 case SCSI_DISK0_MAJOR:
329 disk_index = (minor & 0x00f0) >> 4;
330 if (disk_index < 4)
331 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
332 break;
333 case IDE0_MAJOR: /* same as HD_MAJOR */
334 case XT_DISK_MAJOR:
335 disk_index = (minor & 0x0040) >> 6;
336 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
337 break;
338 case IDE1_MAJOR:
339 disk_index = ((minor & 0x0040) >> 6) + 2;
340 drive_stat_acct(req->cmd, req->nr_sectors, disk_index);
341 default:
342 break;
345 req->next = NULL;
348 * We use the goto to reduce locking complexity
350 spin_lock_irqsave(&io_request_lock,flags);
351 current_request = get_queue(req->rq_dev);
353 if (!(tmp = *current_request)) {
354 *current_request = req;
355 if (dev->current_request != &dev->plug)
356 queue_new_request = 1;
357 goto out;
359 for ( ; tmp->next ; tmp = tmp->next) {
360 const int after_current = IN_ORDER(tmp,req);
361 const int before_next = IN_ORDER(req,tmp->next);
363 if (!IN_ORDER(tmp,tmp->next)) {
364 if (after_current || before_next)
365 break;
366 } else {
367 if (after_current && before_next)
368 break;
371 req->next = tmp->next;
372 tmp->next = req;
374 /* for SCSI devices, call request_fn unconditionally */
375 if (scsi_blk_major(major))
376 queue_new_request = 1;
377 if (major >= COMPAQ_SMART2_MAJOR+0 &&
378 major <= COMPAQ_SMART2_MAJOR+7)
379 queue_new_request = 1;
380 if (major >= DAC960_MAJOR+0 && major <= DAC960_MAJOR+7)
381 queue_new_request = 1;
382 out:
383 if (queue_new_request)
384 (dev->request_fn)();
385 spin_unlock_irqrestore(&io_request_lock,flags);
389 * Has to be called with the request spinlock aquired
391 static inline void attempt_merge (struct request *req, int max_sectors)
393 struct request *next = req->next;
395 if (!next)
396 return;
397 if (req->sector + req->nr_sectors != next->sector)
398 return;
399 if (next->sem || req->cmd != next->cmd || req->rq_dev != next->rq_dev || req->nr_sectors + next->nr_sectors > max_sectors)
400 return;
401 req->bhtail->b_reqnext = next->bh;
402 req->bhtail = next->bhtail;
403 req->nr_sectors += next->nr_sectors;
404 next->rq_status = RQ_INACTIVE;
405 req->next = next->next;
406 wake_up (&wait_for_request);
409 void make_request(int major,int rw, struct buffer_head * bh)
411 unsigned int sector, count;
412 struct request * req;
413 int rw_ahead, max_req, max_sectors;
414 unsigned long flags;
416 count = bh->b_size >> 9;
417 sector = bh->b_rsector;
419 /* We'd better have a real physical mapping! */
420 if (!buffer_mapped(bh))
421 BUG();
423 /* It had better not be a new buffer by the time we see it */
424 if (buffer_new(bh))
425 BUG();
427 /* Only one thread can actually submit the I/O. */
428 if (test_and_set_bit(BH_Lock, &bh->b_state))
429 return;
431 if (blk_size[major]) {
432 unsigned long maxsector = (blk_size[major][MINOR(bh->b_rdev)] << 1) + 1;
434 if (maxsector < count || maxsector - count < sector) {
435 bh->b_state &= (1 << BH_Lock) | (1 << BH_Mapped);
436 /* This may well happen - the kernel calls bread()
437 without checking the size of the device, e.g.,
438 when mounting a device. */
439 printk(KERN_INFO
440 "attempt to access beyond end of device\n");
441 printk(KERN_INFO "%s: rw=%d, want=%d, limit=%d\n",
442 kdevname(bh->b_rdev), rw,
443 (sector + count)>>1,
444 blk_size[major][MINOR(bh->b_rdev)]);
445 goto end_io;
449 rw_ahead = 0; /* normal case; gets changed below for READA */
450 switch (rw) {
451 case READA:
452 rw_ahead = 1;
453 rw = READ; /* drop into READ */
454 case READ:
455 if (buffer_uptodate(bh)) /* Hmmph! Already have it */
456 goto end_io;
457 kstat.pgpgin++;
458 max_req = NR_REQUEST; /* reads take precedence */
459 break;
460 case WRITERAW:
461 rw = WRITE;
462 goto do_write; /* Skip the buffer refile */
463 case WRITE:
464 if (!test_and_clear_bit(BH_Dirty, &bh->b_state))
465 goto end_io; /* Hmmph! Nothing to write */
466 refile_buffer(bh);
467 do_write:
469 * We don't allow the write-requests to fill up the
470 * queue completely: we want some room for reads,
471 * as they take precedence. The last third of the
472 * requests are only for reads.
474 kstat.pgpgout++;
475 max_req = (NR_REQUEST * 2) / 3;
476 break;
477 default:
478 printk(KERN_ERR "make_request: bad block dev cmd,"
479 " must be R/W/RA/WA\n");
480 goto end_io;
483 /* look for a free request. */
484 /* Loop uses two requests, 1 for loop and 1 for the real device.
485 * Cut max_req in half to avoid running out and deadlocking. */
486 if ((major == LOOP_MAJOR) || (major == NBD_MAJOR))
487 max_req >>= 1;
490 * Try to coalesce the new request with old requests
492 max_sectors = get_max_sectors(bh->b_rdev);
495 * Now we acquire the request spinlock, we have to be mega careful
496 * not to schedule or do something nonatomic
498 spin_lock_irqsave(&io_request_lock,flags);
499 req = *get_queue(bh->b_rdev);
500 if (!req) {
501 /* MD and loop can't handle plugging without deadlocking */
502 if (major != MD_MAJOR && major != LOOP_MAJOR &&
503 major != DDV_MAJOR && major != NBD_MAJOR)
504 plug_device(blk_dev + major); /* is atomic */
505 } else switch (major) {
506 case IDE0_MAJOR: /* same as HD_MAJOR */
507 case IDE1_MAJOR:
508 case FLOPPY_MAJOR:
509 case IDE2_MAJOR:
510 case IDE3_MAJOR:
511 case IDE4_MAJOR:
512 case IDE5_MAJOR:
513 case IDE6_MAJOR:
514 case IDE7_MAJOR:
515 case IDE8_MAJOR:
516 case IDE9_MAJOR:
517 case ACSI_MAJOR:
518 case MFM_ACORN_MAJOR:
520 * The scsi disk and cdrom drivers completely remove the request
521 * from the queue when they start processing an entry. For this
522 * reason it is safe to continue to add links to the top entry for
523 * those devices.
525 * All other drivers need to jump over the first entry, as that
526 * entry may be busy being processed and we thus can't change it.
528 if (req == blk_dev[major].current_request)
529 req = req->next;
530 if (!req)
531 break;
532 /* fall through */
534 case SCSI_DISK0_MAJOR:
535 case SCSI_DISK1_MAJOR:
536 case SCSI_DISK2_MAJOR:
537 case SCSI_DISK3_MAJOR:
538 case SCSI_DISK4_MAJOR:
539 case SCSI_DISK5_MAJOR:
540 case SCSI_DISK6_MAJOR:
541 case SCSI_DISK7_MAJOR:
542 case SCSI_CDROM_MAJOR:
543 case DAC960_MAJOR+0:
544 case DAC960_MAJOR+1:
545 case DAC960_MAJOR+2:
546 case DAC960_MAJOR+3:
547 case DAC960_MAJOR+4:
548 case DAC960_MAJOR+5:
549 case DAC960_MAJOR+6:
550 case DAC960_MAJOR+7:
551 case I2O_MAJOR:
552 case COMPAQ_SMART2_MAJOR+0:
553 case COMPAQ_SMART2_MAJOR+1:
554 case COMPAQ_SMART2_MAJOR+2:
555 case COMPAQ_SMART2_MAJOR+3:
556 case COMPAQ_SMART2_MAJOR+4:
557 case COMPAQ_SMART2_MAJOR+5:
558 case COMPAQ_SMART2_MAJOR+6:
559 case COMPAQ_SMART2_MAJOR+7:
561 do {
562 if (req->sem)
563 continue;
564 if (req->cmd != rw)
565 continue;
566 if (req->nr_sectors + count > max_sectors)
567 continue;
568 if (req->rq_dev != bh->b_rdev)
569 continue;
570 /* Can we add it to the end of this request? */
571 if (req->sector + req->nr_sectors == sector) {
572 req->bhtail->b_reqnext = bh;
573 req->bhtail = bh;
574 req->nr_sectors += count;
575 /* Can we now merge this req with the next? */
576 attempt_merge(req, max_sectors);
577 /* or to the beginning? */
578 } else if (req->sector - count == sector) {
579 bh->b_reqnext = req->bh;
580 req->bh = bh;
581 req->buffer = bh->b_data;
582 req->current_nr_sectors = count;
583 req->sector = sector;
584 req->nr_sectors += count;
585 } else
586 continue;
588 spin_unlock_irqrestore(&io_request_lock,flags);
589 return;
591 } while ((req = req->next) != NULL);
594 /* find an unused request. */
595 req = get_request(max_req, bh->b_rdev);
597 spin_unlock_irqrestore(&io_request_lock,flags);
599 /* if no request available: if rw_ahead, forget it; otherwise try again blocking.. */
600 if (!req) {
601 if (rw_ahead)
602 goto end_io;
603 req = __get_request_wait(max_req, bh->b_rdev);
606 /* fill up the request-info, and add it to the queue */
607 req->cmd = rw;
608 req->errors = 0;
609 req->sector = sector;
610 req->nr_sectors = count;
611 req->current_nr_sectors = count;
612 req->buffer = bh->b_data;
613 req->sem = NULL;
614 req->bh = bh;
615 req->bhtail = bh;
616 req->next = NULL;
617 add_request(major+blk_dev,req);
618 return;
620 end_io:
621 bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state));
624 /* This function can be used to request a number of buffers from a block
625 device. Currently the only restriction is that all buffers must belong to
626 the same device */
628 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
630 unsigned int major;
631 int correct_size;
632 struct blk_dev_struct * dev;
633 int i;
635 dev = NULL;
636 if ((major = MAJOR(bh[0]->b_dev)) < MAX_BLKDEV)
637 dev = blk_dev + major;
638 if (!dev || !dev->request_fn) {
639 printk(KERN_ERR
640 "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
641 kdevname(bh[0]->b_dev), bh[0]->b_blocknr);
642 goto sorry;
645 /* Determine correct block size for this device. */
646 correct_size = BLOCK_SIZE;
647 if (blksize_size[major]) {
648 i = blksize_size[major][MINOR(bh[0]->b_dev)];
649 if (i)
650 correct_size = i;
653 /* Verify requested block sizes. */
654 for (i = 0; i < nr; i++) {
655 if (bh[i]->b_size != correct_size) {
656 printk(KERN_NOTICE "ll_rw_block: device %s: "
657 "only %d-char blocks implemented (%u)\n",
658 kdevname(bh[0]->b_dev),
659 correct_size, bh[i]->b_size);
660 goto sorry;
663 /* Md remaps blocks now */
664 bh[i]->b_rdev = bh[i]->b_dev;
665 bh[i]->b_rsector=bh[i]->b_blocknr*(bh[i]->b_size >> 9);
666 #ifdef CONFIG_BLK_DEV_MD
667 if (major==MD_MAJOR &&
668 md_map (MINOR(bh[i]->b_dev), &bh[i]->b_rdev,
669 &bh[i]->b_rsector, bh[i]->b_size >> 9)) {
670 printk (KERN_ERR
671 "Bad md_map in ll_rw_block\n");
672 goto sorry;
674 #endif
677 if ((rw & WRITE) && is_read_only(bh[0]->b_dev)) {
678 printk(KERN_NOTICE "Can't write to read-only device %s\n",
679 kdevname(bh[0]->b_dev));
680 goto sorry;
683 for (i = 0; i < nr; i++) {
684 set_bit(BH_Req, &bh[i]->b_state);
685 #ifdef CONFIG_BLK_DEV_MD
686 if (MAJOR(bh[i]->b_dev) == MD_MAJOR) {
687 md_make_request(MINOR (bh[i]->b_dev), rw, bh[i]);
688 continue;
690 #endif
691 make_request(MAJOR(bh[i]->b_rdev), rw, bh[i]);
693 return;
695 sorry:
696 for (i = 0; i < nr; i++) {
697 clear_bit(BH_Dirty, &bh[i]->b_state);
698 clear_bit(BH_Uptodate, &bh[i]->b_state);
699 bh[i]->b_end_io(bh[i], 0);
701 return;
704 #ifdef CONFIG_STRAM_SWAP
705 extern int stram_device_init( void );
706 #endif
709 * First step of what used to be end_request
711 * 0 means continue with end_that_request_last,
712 * 1 means we are done
715 int
716 end_that_request_first( struct request *req, int uptodate, char *name )
718 struct buffer_head * bh;
719 int nsect;
721 req->errors = 0;
722 if (!uptodate) {
723 printk("end_request: I/O error, dev %s (%s), sector %lu\n",
724 kdevname(req->rq_dev), name, req->sector);
725 if ((bh = req->bh) != NULL) {
726 nsect = bh->b_size >> 9;
727 req->nr_sectors--;
728 req->nr_sectors &= ~(nsect - 1);
729 req->sector += nsect;
730 req->sector &= ~(nsect - 1);
734 if ((bh = req->bh) != NULL) {
735 req->bh = bh->b_reqnext;
736 bh->b_reqnext = NULL;
737 bh->b_end_io(bh, uptodate);
738 if ((bh = req->bh) != NULL) {
739 req->current_nr_sectors = bh->b_size >> 9;
740 if (req->nr_sectors < req->current_nr_sectors) {
741 req->nr_sectors = req->current_nr_sectors;
742 printk("end_request: buffer-list destroyed\n");
744 req->buffer = bh->b_data;
745 return 1;
748 return 0;
751 void
752 end_that_request_last( struct request *req )
754 if (req->sem != NULL)
755 up(req->sem);
756 req->rq_status = RQ_INACTIVE;
757 wake_up(&wait_for_request);
760 int __init blk_dev_init(void)
762 struct request * req;
763 struct blk_dev_struct *dev;
765 for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
766 dev->request_fn = NULL;
767 dev->queue = NULL;
768 dev->current_request = NULL;
769 dev->plug.rq_status = RQ_INACTIVE;
770 dev->plug.cmd = -1;
771 dev->plug.next = NULL;
772 dev->plug_tq.sync = 0;
773 dev->plug_tq.routine = &unplug_device;
774 dev->plug_tq.data = dev;
777 req = all_requests + NR_REQUEST;
778 while (--req >= all_requests) {
779 req->rq_status = RQ_INACTIVE;
780 req->next = NULL;
782 memset(ro_bits,0,sizeof(ro_bits));
783 memset(max_readahead, 0, sizeof(max_readahead));
784 memset(max_sectors, 0, sizeof(max_sectors));
785 #ifdef CONFIG_AMIGA_Z2RAM
786 z2_init();
787 #endif
788 #ifdef CONFIG_STRAM_SWAP
789 stram_device_init();
790 #endif
791 #ifdef CONFIG_BLK_DEV_RAM
792 rd_init();
793 #endif
794 #ifdef CONFIG_BLK_DEV_LOOP
795 loop_init();
796 #endif
797 #ifdef CONFIG_ISP16_CDI
798 isp16_init();
799 #endif CONFIG_ISP16_CDI
800 #ifdef CONFIG_BLK_DEV_IDE
801 ide_init(); /* this MUST precede hd_init */
802 #endif
803 #ifdef CONFIG_BLK_DEV_HD
804 hd_init();
805 #endif
806 #ifdef CONFIG_BLK_DEV_PS2
807 ps2esdi_init();
808 #endif
809 #ifdef CONFIG_BLK_DEV_XD
810 xd_init();
811 #endif
812 #ifdef CONFIG_BLK_DEV_MFM
813 mfm_init();
814 #endif
815 #ifdef CONFIG_PARIDE
816 { extern void paride_init(void); paride_init(); };
817 #endif
818 #ifdef CONFIG_MAC_FLOPPY
819 swim3_init();
820 #endif
821 #ifdef CONFIG_BLK_DEV_SWIM_IOP
822 swimiop_init();
823 #endif
824 #ifdef CONFIG_AMIGA_FLOPPY
825 amiga_floppy_init();
826 #endif
827 #ifdef CONFIG_ATARI_FLOPPY
828 atari_floppy_init();
829 #endif
830 #ifdef CONFIG_BLK_DEV_FD
831 floppy_init();
832 #else
833 #if !defined (__mc68000__) && !defined(CONFIG_PPC) && !defined(__sparc__)\
834 && !defined(CONFIG_APUS) && !defined(__sh__)
835 outb_p(0xc, 0x3f2);
836 #endif
837 #endif
838 #ifdef CONFIG_CDU31A
839 cdu31a_init();
840 #endif CONFIG_CDU31A
841 #ifdef CONFIG_ATARI_ACSI
842 acsi_init();
843 #endif CONFIG_ATARI_ACSI
844 #ifdef CONFIG_MCD
845 mcd_init();
846 #endif CONFIG_MCD
847 #ifdef CONFIG_MCDX
848 mcdx_init();
849 #endif CONFIG_MCDX
850 #ifdef CONFIG_SBPCD
851 sbpcd_init();
852 #endif CONFIG_SBPCD
853 #ifdef CONFIG_AZTCD
854 aztcd_init();
855 #endif CONFIG_AZTCD
856 #ifdef CONFIG_CDU535
857 sony535_init();
858 #endif CONFIG_CDU535
859 #ifdef CONFIG_GSCD
860 gscd_init();
861 #endif CONFIG_GSCD
862 #ifdef CONFIG_CM206
863 cm206_init();
864 #endif
865 #ifdef CONFIG_OPTCD
866 optcd_init();
867 #endif CONFIG_OPTCD
868 #ifdef CONFIG_SJCD
869 sjcd_init();
870 #endif CONFIG_SJCD
871 #ifdef CONFIG_BLK_DEV_MD
872 md_init();
873 #endif CONFIG_BLK_DEV_MD
874 #ifdef CONFIG_APBLOCK
875 ap_init();
876 #endif
877 #ifdef CONFIG_DDV
878 ddv_init();
879 #endif
880 #ifdef CONFIG_BLK_DEV_NBD
881 nbd_init();
882 #endif
883 return 0;
886 EXPORT_SYMBOL(io_request_lock);
887 EXPORT_SYMBOL(end_that_request_first);
888 EXPORT_SYMBOL(end_that_request_last);