Import 2.3.99pre10-3
[davej-history.git] / drivers / block / ll_rw_blk.c
blobafb9e7bc4b3e3fb7a967541d061a50d74fa4476b
1 /*
2 * linux/drivers/block/ll_rw_blk.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
6 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
7 */
9 /*
10 * This handles all read/write requests to block devices
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/errno.h>
16 #include <linux/string.h>
17 #include <linux/config.h>
18 #include <linux/locks.h>
19 #include <linux/mm.h>
20 #include <linux/init.h>
21 #include <linux/smp_lock.h>
23 #include <asm/system.h>
24 #include <asm/io.h>
25 #include <linux/blk.h>
26 #include <linux/highmem.h>
27 #include <linux/raid/md.h>
29 #include <linux/module.h>
32 * MAC Floppy IWM hooks
35 #ifdef CONFIG_MAC_FLOPPY_IWM
36 extern int mac_floppy_init(void);
37 #endif
40 * The request-struct contains all necessary data
41 * to load a nr of sectors into memory
43 static struct request all_requests[NR_REQUEST];
46 * The "disk" task queue is used to start the actual requests
47 * after a plug
49 DECLARE_TASK_QUEUE(tq_disk);
52 * Protect the request list against multiple users..
54 * With this spinlock the Linux block IO subsystem is 100% SMP threaded
55 * from the IRQ event side, and almost 100% SMP threaded from the syscall
56 * side (we still have protect against block device array operations, and
57 * the do_request() side is casually still unsafe. The kernel lock protects
58 * this part currently.).
60 * there is a fair chance that things will work just OK if these functions
61 * are called with no global kernel lock held ...
63 spinlock_t io_request_lock = SPIN_LOCK_UNLOCKED;
66 * used to wait on when there are no free requests
68 DECLARE_WAIT_QUEUE_HEAD(wait_for_request);
70 /* This specifies how many sectors to read ahead on the disk. */
72 int read_ahead[MAX_BLKDEV];
74 /* blk_dev_struct is:
75 * *request_fn
76 * *current_request
78 struct blk_dev_struct blk_dev[MAX_BLKDEV]; /* initialized by blk_dev_init() */
81 * blk_size contains the size of all block-devices in units of 1024 byte
82 * sectors:
84 * blk_size[MAJOR][MINOR]
86 * if (!blk_size[MAJOR]) then no minor size checking is done.
88 int * blk_size[MAX_BLKDEV];
91 * blksize_size contains the size of all block-devices:
93 * blksize_size[MAJOR][MINOR]
95 * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
97 int * blksize_size[MAX_BLKDEV];
100 * hardsect_size contains the size of the hardware sector of a device.
102 * hardsect_size[MAJOR][MINOR]
104 * if (!hardsect_size[MAJOR])
105 * then 512 bytes is assumed.
106 * else
107 * sector_size is hardsect_size[MAJOR][MINOR]
108 * This is currently set by some scsi devices and read by the msdos fs driver.
109 * Other uses may appear later.
111 int * hardsect_size[MAX_BLKDEV];
114 * The following tunes the read-ahead algorithm in mm/filemap.c
116 int * max_readahead[MAX_BLKDEV];
119 * Max number of sectors per request
121 int * max_sectors[MAX_BLKDEV];
123 static inline int get_max_sectors(kdev_t dev)
125 if (!max_sectors[MAJOR(dev)])
126 return MAX_SECTORS;
127 return max_sectors[MAJOR(dev)][MINOR(dev)];
131 * NOTE: the device-specific queue() functions
132 * have to be atomic!
134 request_queue_t * blk_get_queue (kdev_t dev)
136 int major = MAJOR(dev);
137 struct blk_dev_struct *bdev = blk_dev + major;
138 unsigned long flags;
139 request_queue_t *ret;
141 spin_lock_irqsave(&io_request_lock,flags);
142 if (bdev->queue)
143 ret = bdev->queue(dev);
144 else
145 ret = &blk_dev[major].request_queue;
146 spin_unlock_irqrestore(&io_request_lock,flags);
148 return ret;
151 void blk_cleanup_queue(request_queue_t * q)
153 memset(q, 0, sizeof(*q));
156 void blk_queue_headactive(request_queue_t * q, int active)
158 q->head_active = active;
161 void blk_queue_pluggable (request_queue_t * q, plug_device_fn *plug)
163 q->plug_device_fn = plug;
166 void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
168 q->make_request_fn = mfn;
171 static inline int ll_new_segment(request_queue_t *q, struct request *req, int max_segments)
173 if (req->nr_segments < max_segments) {
174 req->nr_segments++;
175 q->elevator.nr_segments++;
176 return 1;
178 return 0;
181 static int ll_back_merge_fn(request_queue_t *q, struct request *req,
182 struct buffer_head *bh, int max_segments)
184 if (req->bhtail->b_data + req->bhtail->b_size == bh->b_data)
185 return 1;
186 return ll_new_segment(q, req, max_segments);
189 static int ll_front_merge_fn(request_queue_t *q, struct request *req,
190 struct buffer_head *bh, int max_segments)
192 if (bh->b_data + bh->b_size == req->bh->b_data)
193 return 1;
194 return ll_new_segment(q, req, max_segments);
197 static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
198 struct request *next, int max_segments)
200 int total_segments = req->nr_segments + next->nr_segments;
201 int same_segment;
203 same_segment = 0;
204 if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data) {
205 total_segments--;
206 same_segment = 1;
209 if (total_segments > max_segments)
210 return 0;
212 q->elevator.nr_segments -= same_segment;
213 req->nr_segments = total_segments;
214 return 1;
218 * "plug" the device if there are no outstanding requests: this will
219 * force the transfer to start only after we have put all the requests
220 * on the list.
222 * This is called with interrupts off and no requests on the queue.
223 * (and with the request spinlock aquired)
225 static void generic_plug_device (request_queue_t *q, kdev_t dev)
227 #ifdef CONFIG_BLK_DEV_MD
228 if (MAJOR(dev) == MD_MAJOR) {
229 spin_unlock_irq(&io_request_lock);
230 BUG();
232 #endif
233 if (!list_empty(&q->queue_head))
234 return;
236 q->plugged = 1;
237 queue_task(&q->plug_tq, &tq_disk);
240 void blk_init_queue(request_queue_t * q, request_fn_proc * rfn)
242 INIT_LIST_HEAD(&q->queue_head);
243 elevator_init(&q->elevator);
244 q->request_fn = rfn;
245 q->back_merge_fn = ll_back_merge_fn;
246 q->front_merge_fn = ll_front_merge_fn;
247 q->merge_requests_fn = ll_merge_requests_fn;
248 q->make_request_fn = NULL;
249 q->plug_tq.sync = 0;
250 q->plug_tq.routine = &generic_unplug_device;
251 q->plug_tq.data = q;
252 q->plugged = 0;
254 * These booleans describe the queue properties. We set the
255 * default (and most common) values here. Other drivers can
256 * use the appropriate functions to alter the queue properties.
257 * as appropriate.
259 q->plug_device_fn = generic_plug_device;
260 q->head_active = 1;
264 * remove the plug and let it rip..
266 void generic_unplug_device(void * data)
268 request_queue_t * q = (request_queue_t *) data;
269 unsigned long flags;
271 spin_lock_irqsave(&io_request_lock,flags);
272 if (q->plugged) {
273 q->plugged = 0;
274 if (!list_empty(&q->queue_head))
275 (q->request_fn)(q);
277 spin_unlock_irqrestore(&io_request_lock,flags);
281 * look for a free request in the first N entries.
282 * NOTE: interrupts must be disabled on the way in (on SMP the request queue
283 * spinlock has to be aquired), and will still be disabled on the way out.
285 static inline struct request * get_request(int n, kdev_t dev)
287 static struct request *prev_found = NULL, *prev_limit = NULL;
288 register struct request *req, *limit;
290 if (n <= 0)
291 panic("get_request(%d): impossible!\n", n);
293 limit = all_requests + n;
294 if (limit != prev_limit) {
295 prev_limit = limit;
296 prev_found = all_requests;
298 req = prev_found;
299 for (;;) {
300 req = ((req > all_requests) ? req : limit) - 1;
301 if (req->rq_status == RQ_INACTIVE)
302 break;
303 if (req == prev_found)
304 return NULL;
306 prev_found = req;
307 req->rq_status = RQ_ACTIVE;
308 req->rq_dev = dev;
309 req->special = NULL;
310 return req;
314 * wait until a free request in the first N entries is available.
316 static struct request * __get_request_wait(int n, kdev_t dev)
318 register struct request *req;
319 DECLARE_WAITQUEUE(wait, current);
320 unsigned long flags;
322 add_wait_queue_exclusive(&wait_for_request, &wait);
323 for (;;) {
324 __set_current_state(TASK_UNINTERRUPTIBLE|TASK_EXCLUSIVE);
325 spin_lock_irqsave(&io_request_lock,flags);
326 req = get_request(n, dev);
327 spin_unlock_irqrestore(&io_request_lock,flags);
328 if (req)
329 break;
330 run_task_queue(&tq_disk);
331 schedule();
333 remove_wait_queue(&wait_for_request, &wait);
334 current->state = TASK_RUNNING;
335 return req;
338 static inline struct request * get_request_wait(int n, kdev_t dev)
340 register struct request *req;
341 unsigned long flags;
343 spin_lock_irqsave(&io_request_lock,flags);
344 req = get_request(n, dev);
345 spin_unlock_irqrestore(&io_request_lock,flags);
346 if (req)
347 return req;
348 return __get_request_wait(n, dev);
351 /* RO fail safe mechanism */
353 static long ro_bits[MAX_BLKDEV][8];
355 int is_read_only(kdev_t dev)
357 int minor,major;
359 major = MAJOR(dev);
360 minor = MINOR(dev);
361 if (major < 0 || major >= MAX_BLKDEV) return 0;
362 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
365 void set_device_ro(kdev_t dev,int flag)
367 int minor,major;
369 major = MAJOR(dev);
370 minor = MINOR(dev);
371 if (major < 0 || major >= MAX_BLKDEV) return;
372 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
373 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
376 inline void drive_stat_acct (kdev_t dev, int rw,
377 unsigned long nr_sectors, int new_io)
379 unsigned int major = MAJOR(dev);
380 unsigned int index;
382 index = disk_index(dev);
383 if ((index >= DK_MAX_DISK) || (major >= DK_MAX_MAJOR))
384 return;
386 kstat.dk_drive[major][index] += new_io;
387 if (rw == READ) {
388 kstat.dk_drive_rio[major][index] += new_io;
389 kstat.dk_drive_rblk[major][index] += nr_sectors;
390 } else if (rw == WRITE) {
391 kstat.dk_drive_wio[major][index] += new_io;
392 kstat.dk_drive_wblk[major][index] += nr_sectors;
393 } else
394 printk(KERN_ERR "drive_stat_acct: cmd not R/W?\n");
398 * add-request adds a request to the linked list.
399 * It disables interrupts (aquires the request spinlock) so that it can muck
400 * with the request-lists in peace. Thus it should be called with no spinlocks
401 * held.
403 * By this point, req->cmd is always either READ/WRITE, never READA,
404 * which is important for drive_stat_acct() above.
407 static inline void add_request(request_queue_t * q, struct request * req,
408 struct list_head * head, int latency)
410 int major;
412 drive_stat_acct(req->rq_dev, req->cmd, req->nr_sectors, 1);
414 if (list_empty(head)) {
415 req->elevator_sequence = elevator_sequence(&q->elevator, latency);
416 list_add(&req->queue, &q->queue_head);
417 return;
419 q->elevator.elevator_fn(req, &q->elevator, &q->queue_head, head, latency);
422 * FIXME(eric) I don't understand why there is a need for this
423 * special case code. It clearly doesn't fit any more with
424 * the new queueing architecture, and it got added in 2.3.10.
425 * I am leaving this in here until I hear back from the COMPAQ
426 * people.
428 major = MAJOR(req->rq_dev);
429 if (major >= COMPAQ_SMART2_MAJOR+0 && major <= COMPAQ_SMART2_MAJOR+7)
431 (q->request_fn)(q);
434 if (major >= DAC960_MAJOR+0 && major <= DAC960_MAJOR+7)
436 (q->request_fn)(q);
441 * Has to be called with the request spinlock aquired
443 static void attempt_merge(request_queue_t * q,
444 struct request *req,
445 int max_sectors,
446 int max_segments)
448 struct request *next;
450 next = blkdev_next_request(req);
451 if (req->sector + req->nr_sectors != next->sector)
452 return;
453 if (req->cmd != next->cmd || req->rq_dev != next->rq_dev || req->nr_sectors + next->nr_sectors > max_sectors || next->sem)
454 return;
456 * If we are not allowed to merge these requests, then
457 * return. If we are allowed to merge, then the count
458 * will have been updated to the appropriate number,
459 * and we shouldn't do it here too.
461 if(!(q->merge_requests_fn)(q, req, next, max_segments))
462 return;
464 elevator_merge_requests(&q->elevator, req, next);
465 req->bhtail->b_reqnext = next->bh;
466 req->bhtail = next->bhtail;
467 req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
468 next->rq_status = RQ_INACTIVE;
469 list_del(&next->queue);
470 wake_up (&wait_for_request);
473 static inline void attempt_back_merge(request_queue_t * q,
474 struct request *req,
475 int max_sectors,
476 int max_segments)
478 if (&req->queue == q->queue_head.prev)
479 return;
480 attempt_merge(q, req, max_sectors, max_segments);
483 static inline void attempt_front_merge(request_queue_t * q,
484 struct list_head * head,
485 struct request *req,
486 int max_sectors,
487 int max_segments)
489 struct list_head * prev;
491 prev = req->queue.prev;
492 if (head == prev)
493 return;
494 attempt_merge(q, blkdev_entry_to_request(prev), max_sectors, max_segments);
497 static inline void __make_request(request_queue_t * q, int rw,
498 struct buffer_head * bh)
500 int major = MAJOR(bh->b_rdev);
501 unsigned int sector, count;
502 int max_segments = MAX_SEGMENTS;
503 struct request * req;
504 int rw_ahead, max_req, max_sectors;
505 unsigned long flags;
507 int orig_latency, latency, starving, sequence;
508 struct list_head * entry, * head = &q->queue_head;
509 elevator_t * elevator;
511 count = bh->b_size >> 9;
512 sector = bh->b_rsector;
514 if (blk_size[major]) {
515 unsigned long maxsector = (blk_size[major][MINOR(bh->b_rdev)] << 1) + 1;
517 if (maxsector < count || maxsector - count < sector) {
518 bh->b_state &= (1 << BH_Lock) | (1 << BH_Mapped);
519 if (!blk_size[major][MINOR(bh->b_rdev)])
520 goto end_io;
521 /* This may well happen - the kernel calls bread()
522 without checking the size of the device, e.g.,
523 when mounting a device. */
524 printk(KERN_INFO
525 "attempt to access beyond end of device\n");
526 printk(KERN_INFO "%s: rw=%d, want=%d, limit=%d\n",
527 kdevname(bh->b_rdev), rw,
528 (sector + count)>>1,
529 blk_size[major][MINOR(bh->b_rdev)]);
530 goto end_io;
534 rw_ahead = 0; /* normal case; gets changed below for READA */
535 switch (rw) {
536 case READA:
537 rw_ahead = 1;
538 rw = READ; /* drop into READ */
539 case READ:
540 if (buffer_uptodate(bh)) /* Hmmph! Already have it */
541 goto end_io;
542 kstat.pgpgin++;
543 max_req = NR_REQUEST; /* reads take precedence */
544 break;
545 case WRITERAW:
546 rw = WRITE;
547 goto do_write; /* Skip the buffer refile */
548 case WRITE:
549 if (!test_and_clear_bit(BH_Dirty, &bh->b_state))
550 goto end_io; /* Hmmph! Nothing to write */
551 refile_buffer(bh);
552 do_write:
554 * We don't allow the write-requests to fill up the
555 * queue completely: we want some room for reads,
556 * as they take precedence. The last third of the
557 * requests are only for reads.
559 kstat.pgpgout++;
560 max_req = (NR_REQUEST * 2) / 3;
561 break;
562 default:
563 BUG();
564 goto end_io;
567 /* We'd better have a real physical mapping!
568 Check this bit only if the buffer was dirty and just locked
569 down by us so at this point flushpage will block and
570 won't clear the mapped bit under us. */
571 if (!buffer_mapped(bh))
572 BUG();
575 * Temporary solution - in 2.5 this will be done by the lowlevel
576 * driver. Create a bounce buffer if the buffer data points into
577 * high memory - keep the original buffer otherwise.
579 #if CONFIG_HIGHMEM
580 bh = create_bounce(rw, bh);
581 #endif
583 /* look for a free request. */
585 * Loop uses two requests, 1 for loop and 1 for the real device.
586 * Cut max_req in half to avoid running out and deadlocking.
588 if ((major == LOOP_MAJOR) || (major == NBD_MAJOR))
589 max_req >>= 1;
592 * Try to coalesce the new request with old requests
594 max_sectors = get_max_sectors(bh->b_rdev);
596 elevator = &q->elevator;
597 orig_latency = elevator_request_latency(elevator, rw);
600 * Now we acquire the request spinlock, we have to be mega careful
601 * not to schedule or do something nonatomic
603 spin_lock_irqsave(&io_request_lock,flags);
604 elevator_debug(q, bh->b_rdev);
606 if (list_empty(head)) {
607 q->plug_device_fn(q, bh->b_rdev); /* is atomic */
608 goto get_rq;
611 /* avoid write-bombs to not hurt iteractiveness of reads */
612 if (rw != READ && elevator->read_pendings)
613 max_segments = elevator->max_bomb_segments;
615 sequence = elevator->sequence;
616 latency = orig_latency - elevator->nr_segments;
617 starving = 0;
618 entry = head;
621 * The scsi disk and cdrom drivers completely remove the request
622 * from the queue when they start processing an entry. For this
623 * reason it is safe to continue to add links to the top entry
624 * for those devices.
626 * All other drivers need to jump over the first entry, as that
627 * entry may be busy being processed and we thus can't change
628 * it.
630 if (q->head_active && !q->plugged)
631 head = head->next;
633 while ((entry = entry->prev) != head && !starving) {
634 req = blkdev_entry_to_request(entry);
635 if (!req->q)
636 break;
637 latency += req->nr_segments;
638 if (elevator_sequence_before(req->elevator_sequence, sequence))
639 starving = 1;
640 if (latency < 0)
641 continue;
643 if (req->sem)
644 continue;
645 if (req->cmd != rw)
646 continue;
647 if (req->nr_sectors + count > max_sectors)
648 continue;
649 if (req->rq_dev != bh->b_rdev)
650 continue;
651 /* Can we add it to the end of this request? */
652 if (req->sector + req->nr_sectors == sector) {
653 if (latency - req->nr_segments < 0)
654 break;
656 * The merge_fn is a more advanced way
657 * of accomplishing the same task. Instead
658 * of applying a fixed limit of some sort
659 * we instead define a function which can
660 * determine whether or not it is safe to
661 * merge the request or not.
663 * See if this queue has rules that
664 * may suggest that we shouldn't merge
665 * this
667 if(!(q->back_merge_fn)(q, req, bh, max_segments))
668 break;
669 req->bhtail->b_reqnext = bh;
670 req->bhtail = bh;
671 req->nr_sectors = req->hard_nr_sectors += count;
672 drive_stat_acct(req->rq_dev, req->cmd, count, 0);
674 elevator_merge_after(elevator, req, latency);
676 /* Can we now merge this req with the next? */
677 attempt_back_merge(q, req, max_sectors, max_segments);
678 /* or to the beginning? */
679 } else if (req->sector - count == sector) {
680 if (starving)
681 break;
683 * The merge_fn is a more advanced way
684 * of accomplishing the same task. Instead
685 * of applying a fixed limit of some sort
686 * we instead define a function which can
687 * determine whether or not it is safe to
688 * merge the request or not.
690 * See if this queue has rules that
691 * may suggest that we shouldn't merge
692 * this
694 if(!(q->front_merge_fn)(q, req, bh, max_segments))
695 break;
696 bh->b_reqnext = req->bh;
697 req->bh = bh;
698 req->buffer = bh->b_data;
699 req->current_nr_sectors = count;
700 req->sector = req->hard_sector = sector;
701 req->nr_sectors = req->hard_nr_sectors += count;
702 drive_stat_acct(req->rq_dev, req->cmd, count, 0);
704 elevator_merge_before(elevator, req, latency);
706 attempt_front_merge(q, head, req, max_sectors, max_segments);
707 } else
708 continue;
710 q->elevator.sequence++;
711 spin_unlock_irqrestore(&io_request_lock,flags);
712 return;
716 /* find an unused request. */
717 get_rq:
718 req = get_request(max_req, bh->b_rdev);
721 * if no request available: if rw_ahead, forget it,
722 * otherwise try again blocking..
724 if (!req) {
725 spin_unlock_irqrestore(&io_request_lock,flags);
726 if (rw_ahead)
727 goto end_io;
728 req = __get_request_wait(max_req, bh->b_rdev);
729 spin_lock_irqsave(&io_request_lock,flags);
731 /* revalidate elevator */
732 head = &q->queue_head;
733 if (q->head_active && !q->plugged)
734 head = head->next;
737 /* fill up the request-info, and add it to the queue */
738 req->cmd = rw;
739 req->errors = 0;
740 req->hard_sector = req->sector = sector;
741 req->hard_nr_sectors = req->nr_sectors = count;
742 req->current_nr_sectors = count;
743 req->nr_segments = 1; /* Always 1 for a new request. */
744 req->nr_hw_segments = 1; /* Always 1 for a new request. */
745 req->buffer = bh->b_data;
746 req->sem = NULL;
747 req->bh = bh;
748 req->bhtail = bh;
749 req->q = q;
750 add_request(q, req, head, orig_latency);
751 elevator_account_request(elevator, req);
753 spin_unlock_irqrestore(&io_request_lock, flags);
754 return;
756 end_io:
757 bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state));
760 int generic_make_request (request_queue_t *q, int rw, struct buffer_head * bh)
762 unsigned long flags;
763 int ret;
766 * Resolve the mapping until finished. (drivers are
767 * still free to implement/resolve their own stacking
768 * by explicitly returning 0)
771 while (q->make_request_fn) {
772 ret = q->make_request_fn(q, rw, bh);
773 if (ret > 0) {
774 q = blk_get_queue(bh->b_rdev);
775 continue;
777 return ret;
780 * Does the block device want us to queue
781 * the IO request? (normal case)
783 __make_request(q, rw, bh);
784 spin_lock_irqsave(&io_request_lock,flags);
785 if (q && !q->plugged)
786 (q->request_fn)(q);
787 spin_unlock_irqrestore(&io_request_lock,flags);
789 return 0;
792 /* This function can be used to request a number of buffers from a block
793 device. Currently the only restriction is that all buffers must belong to
794 the same device */
796 static void __ll_rw_block(int rw, int nr, struct buffer_head * bhs[],
797 int haslock)
799 struct buffer_head *bh;
800 request_queue_t *q;
801 unsigned int major;
802 int correct_size;
803 int i;
805 major = MAJOR(bhs[0]->b_dev);
806 q = blk_get_queue(bhs[0]->b_dev);
807 if (!q) {
808 printk(KERN_ERR
809 "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
810 kdevname(bhs[0]->b_dev), bhs[0]->b_blocknr);
811 goto sorry;
814 /* Determine correct block size for this device. */
815 correct_size = BLOCK_SIZE;
816 if (blksize_size[major]) {
817 i = blksize_size[major][MINOR(bhs[0]->b_dev)];
818 if (i)
819 correct_size = i;
822 /* Verify requested block sizes. */
823 for (i = 0; i < nr; i++) {
824 bh = bhs[i];
825 if (bh->b_size != correct_size) {
826 printk(KERN_NOTICE "ll_rw_block: device %s: "
827 "only %d-char blocks implemented (%u)\n",
828 kdevname(bhs[0]->b_dev),
829 correct_size, bh->b_size);
830 goto sorry;
834 if ((rw & WRITE) && is_read_only(bhs[0]->b_dev)) {
835 printk(KERN_NOTICE "Can't write to read-only device %s\n",
836 kdevname(bhs[0]->b_dev));
837 goto sorry;
840 for (i = 0; i < nr; i++) {
841 bh = bhs[i];
843 /* Only one thread can actually submit the I/O. */
844 if (haslock) {
845 if (!buffer_locked(bh))
846 BUG();
847 } else {
848 if (test_and_set_bit(BH_Lock, &bh->b_state))
849 continue;
851 set_bit(BH_Req, &bh->b_state);
854 * First step, 'identity mapping' - RAID or LVM might
855 * further remap this.
857 bh->b_rdev = bh->b_dev;
858 bh->b_rsector = bh->b_blocknr * (bh->b_size>>9);
860 generic_make_request(q, rw, bh);
862 return;
864 sorry:
865 for (i = 0; i < nr; i++)
866 buffer_IO_error(bhs[i]);
867 return;
870 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
872 __ll_rw_block(rw, nr, bh, 0);
875 void ll_rw_block_locked(int rw, int nr, struct buffer_head * bh[])
877 __ll_rw_block(rw, nr, bh, 1);
880 #ifdef CONFIG_STRAM_SWAP
881 extern int stram_device_init (void);
882 #endif
885 * First step of what used to be end_request
887 * 0 means continue with end_that_request_last,
888 * 1 means we are done
891 int end_that_request_first (struct request *req, int uptodate, char *name)
893 struct buffer_head * bh;
894 int nsect;
896 req->errors = 0;
897 if (!uptodate)
898 printk("end_request: I/O error, dev %s (%s), sector %lu\n",
899 kdevname(req->rq_dev), name, req->sector);
901 if ((bh = req->bh) != NULL) {
902 nsect = bh->b_size >> 9;
903 req->bh = bh->b_reqnext;
904 bh->b_reqnext = NULL;
905 bh->b_end_io(bh, uptodate);
906 if ((bh = req->bh) != NULL) {
907 req->hard_sector += nsect;
908 req->hard_nr_sectors -= nsect;
909 req->sector = req->hard_sector;
910 req->nr_sectors = req->hard_nr_sectors;
912 req->current_nr_sectors = bh->b_size >> 9;
913 if (req->nr_sectors < req->current_nr_sectors) {
914 req->nr_sectors = req->current_nr_sectors;
915 printk("end_request: buffer-list destroyed\n");
917 req->buffer = bh->b_data;
918 return 1;
921 return 0;
924 void end_that_request_last(struct request *req)
926 if (req->q)
927 BUG();
928 if (req->sem != NULL)
929 up(req->sem);
930 req->rq_status = RQ_INACTIVE;
931 wake_up(&wait_for_request);
934 int __init blk_dev_init(void)
936 struct request * req;
937 struct blk_dev_struct *dev;
939 for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) {
940 dev->queue = NULL;
941 blk_init_queue(&dev->request_queue, NULL);
944 req = all_requests + NR_REQUEST;
945 while (--req >= all_requests) {
946 req->rq_status = RQ_INACTIVE;
948 memset(ro_bits,0,sizeof(ro_bits));
949 memset(max_readahead, 0, sizeof(max_readahead));
950 memset(max_sectors, 0, sizeof(max_sectors));
951 #ifdef CONFIG_AMIGA_Z2RAM
952 z2_init();
953 #endif
954 #ifdef CONFIG_STRAM_SWAP
955 stram_device_init();
956 #endif
957 #ifdef CONFIG_BLK_DEV_RAM
958 rd_init();
959 #endif
960 #ifdef CONFIG_BLK_DEV_LOOP
961 loop_init();
962 #endif
963 #ifdef CONFIG_ISP16_CDI
964 isp16_init();
965 #endif CONFIG_ISP16_CDI
966 #if defined(CONFIG_IDE) && defined(CONFIG_BLK_DEV_IDE)
967 ide_init(); /* this MUST precede hd_init */
968 #endif
969 #if defined(CONFIG_IDE) && defined(CONFIG_BLK_DEV_HD)
970 hd_init();
971 #endif
972 #ifdef CONFIG_BLK_DEV_PS2
973 ps2esdi_init();
974 #endif
975 #ifdef CONFIG_BLK_DEV_XD
976 xd_init();
977 #endif
978 #ifdef CONFIG_BLK_DEV_MFM
979 mfm_init();
980 #endif
981 #ifdef CONFIG_PARIDE
982 { extern void paride_init(void); paride_init(); };
983 #endif
984 #ifdef CONFIG_MAC_FLOPPY
985 swim3_init();
986 #endif
987 #ifdef CONFIG_BLK_DEV_SWIM_IOP
988 swimiop_init();
989 #endif
990 #ifdef CONFIG_AMIGA_FLOPPY
991 amiga_floppy_init();
992 #endif
993 #ifdef CONFIG_ATARI_FLOPPY
994 atari_floppy_init();
995 #endif
996 #ifdef CONFIG_BLK_DEV_FD
997 floppy_init();
998 #else
999 #if defined(__i386__) /* Do we even need this? */
1000 outb_p(0xc, 0x3f2);
1001 #endif
1002 #endif
1003 #ifdef CONFIG_CDU31A
1004 cdu31a_init();
1005 #endif CONFIG_CDU31A
1006 #ifdef CONFIG_ATARI_ACSI
1007 acsi_init();
1008 #endif CONFIG_ATARI_ACSI
1009 #ifdef CONFIG_MCD
1010 mcd_init();
1011 #endif CONFIG_MCD
1012 #ifdef CONFIG_MCDX
1013 mcdx_init();
1014 #endif CONFIG_MCDX
1015 #ifdef CONFIG_SBPCD
1016 sbpcd_init();
1017 #endif CONFIG_SBPCD
1018 #ifdef CONFIG_AZTCD
1019 aztcd_init();
1020 #endif CONFIG_AZTCD
1021 #ifdef CONFIG_CDU535
1022 sony535_init();
1023 #endif CONFIG_CDU535
1024 #ifdef CONFIG_GSCD
1025 gscd_init();
1026 #endif CONFIG_GSCD
1027 #ifdef CONFIG_CM206
1028 cm206_init();
1029 #endif
1030 #ifdef CONFIG_OPTCD
1031 optcd_init();
1032 #endif CONFIG_OPTCD
1033 #ifdef CONFIG_SJCD
1034 sjcd_init();
1035 #endif CONFIG_SJCD
1036 #ifdef CONFIG_BLK_DEV_MD
1037 md_init();
1038 #endif CONFIG_BLK_DEV_MD
1039 #ifdef CONFIG_APBLOCK
1040 ap_init();
1041 #endif
1042 #ifdef CONFIG_DDV
1043 ddv_init();
1044 #endif
1045 #ifdef CONFIG_BLK_DEV_NBD
1046 nbd_init();
1047 #endif
1048 #ifdef CONFIG_MDISK
1049 mdisk_init();
1050 #endif
1051 #ifdef CONFIG_DASD
1052 dasd_init();
1053 #endif
1054 #ifdef CONFIG_SUN_JSFLASH
1055 jsfd_init();
1056 #endif
1057 #ifdef CONFIG_BLK_DEV_LVM
1058 lvm_init();
1059 #endif
1060 return 0;
1063 EXPORT_SYMBOL(io_request_lock);
1064 EXPORT_SYMBOL(end_that_request_first);
1065 EXPORT_SYMBOL(end_that_request_last);
1066 EXPORT_SYMBOL(blk_init_queue);
1067 EXPORT_SYMBOL(blk_get_queue);
1068 EXPORT_SYMBOL(blk_cleanup_queue);
1069 EXPORT_SYMBOL(blk_queue_headactive);
1070 EXPORT_SYMBOL(blk_queue_pluggable);
1071 EXPORT_SYMBOL(blk_queue_make_request);
1072 EXPORT_SYMBOL(generic_make_request);