Import 2.4.0-test3pre5
[davej-history.git] / drivers / block / ll_rw_blk.c
blob37e50dbcdaef1b7500e5db8c0ad9b8285fae2906
1 /*
2 * linux/drivers/block/ll_rw_blk.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
6 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
7 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
8 */
11 * This handles all read/write requests to block devices
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/errno.h>
17 #include <linux/string.h>
18 #include <linux/config.h>
19 #include <linux/locks.h>
20 #include <linux/mm.h>
21 #include <linux/init.h>
22 #include <linux/smp_lock.h>
24 #include <asm/system.h>
25 #include <asm/io.h>
26 #include <linux/blk.h>
27 #include <linux/highmem.h>
28 #include <linux/raid/md.h>
30 #include <linux/module.h>
33 * MAC Floppy IWM hooks
36 #ifdef CONFIG_MAC_FLOPPY_IWM
37 extern int mac_floppy_init(void);
38 #endif
40 extern int lvm_init(void);
43 * For the allocated request tables
45 static kmem_cache_t *request_cachep;
48 * The "disk" task queue is used to start the actual requests
49 * after a plug
51 DECLARE_TASK_QUEUE(tq_disk);
54 * Protect the request list against multiple users..
56 * With this spinlock the Linux block IO subsystem is 100% SMP threaded
57 * from the IRQ event side, and almost 100% SMP threaded from the syscall
58 * side (we still have protect against block device array operations, and
59 * the do_request() side is casually still unsafe. The kernel lock protects
60 * this part currently.).
62 * there is a fair chance that things will work just OK if these functions
63 * are called with no global kernel lock held ...
65 spinlock_t io_request_lock = SPIN_LOCK_UNLOCKED;
67 /* This specifies how many sectors to read ahead on the disk. */
69 int read_ahead[MAX_BLKDEV];
71 /* blk_dev_struct is:
72 * *request_fn
73 * *current_request
75 struct blk_dev_struct blk_dev[MAX_BLKDEV]; /* initialized by blk_dev_init() */
78 * blk_size contains the size of all block-devices in units of 1024 byte
79 * sectors:
81 * blk_size[MAJOR][MINOR]
83 * if (!blk_size[MAJOR]) then no minor size checking is done.
85 int * blk_size[MAX_BLKDEV];
88 * blksize_size contains the size of all block-devices:
90 * blksize_size[MAJOR][MINOR]
92 * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
94 int * blksize_size[MAX_BLKDEV];
97 * hardsect_size contains the size of the hardware sector of a device.
99 * hardsect_size[MAJOR][MINOR]
101 * if (!hardsect_size[MAJOR])
102 * then 512 bytes is assumed.
103 * else
104 * sector_size is hardsect_size[MAJOR][MINOR]
105 * This is currently set by some scsi devices and read by the msdos fs driver.
106 * Other uses may appear later.
108 int * hardsect_size[MAX_BLKDEV];
111 * The following tunes the read-ahead algorithm in mm/filemap.c
113 int * max_readahead[MAX_BLKDEV];
116 * Max number of sectors per request
118 int * max_sectors[MAX_BLKDEV];
120 static inline int get_max_sectors(kdev_t dev)
122 if (!max_sectors[MAJOR(dev)])
123 return MAX_SECTORS;
124 return max_sectors[MAJOR(dev)][MINOR(dev)];
127 static inline request_queue_t *__blk_get_queue(kdev_t dev)
129 struct blk_dev_struct *bdev = blk_dev + MAJOR(dev);
131 if (bdev->queue)
132 return bdev->queue(dev);
133 else
134 return &blk_dev[MAJOR(dev)].request_queue;
138 * NOTE: the device-specific queue() functions
139 * have to be atomic!
141 request_queue_t *blk_get_queue(kdev_t dev)
143 request_queue_t *ret;
144 unsigned long flags;
146 spin_lock_irqsave(&io_request_lock,flags);
147 ret = __blk_get_queue(dev);
148 spin_unlock_irqrestore(&io_request_lock,flags);
150 return ret;
153 static int __block_cleanup_queue(struct list_head *head)
155 struct list_head *entry;
156 struct request *rq;
157 int i = 0;
159 if (list_empty(head))
160 return 0;
162 entry = head->next;
163 do {
164 rq = list_entry(entry, struct request, table);
165 entry = entry->next;
166 list_del(&rq->table);
167 kmem_cache_free(request_cachep, rq);
168 i++;
169 } while (!list_empty(head));
171 return i;
175 * Hopefully the low level driver has finished any out standing requests
176 * first...
178 void blk_cleanup_queue(request_queue_t * q)
180 int count = QUEUE_NR_REQUESTS;
182 count -= __block_cleanup_queue(&q->request_freelist[READ]);
183 count -= __block_cleanup_queue(&q->request_freelist[WRITE]);
185 if (count)
186 printk("blk_cleanup_queue: leaked requests (%d)\n", count);
188 memset(q, 0, sizeof(*q));
191 void blk_queue_headactive(request_queue_t * q, int active)
193 q->head_active = active;
196 void blk_queue_pluggable (request_queue_t * q, plug_device_fn *plug)
198 q->plug_device_fn = plug;
201 void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
203 q->make_request_fn = mfn;
206 static inline int ll_new_segment(request_queue_t *q, struct request *req, int max_segments)
208 if (req->nr_segments < max_segments) {
209 req->nr_segments++;
210 q->elevator.nr_segments++;
211 return 1;
213 return 0;
216 static int ll_back_merge_fn(request_queue_t *q, struct request *req,
217 struct buffer_head *bh, int max_segments)
219 if (req->bhtail->b_data + req->bhtail->b_size == bh->b_data)
220 return 1;
221 return ll_new_segment(q, req, max_segments);
224 static int ll_front_merge_fn(request_queue_t *q, struct request *req,
225 struct buffer_head *bh, int max_segments)
227 if (bh->b_data + bh->b_size == req->bh->b_data)
228 return 1;
229 return ll_new_segment(q, req, max_segments);
232 static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
233 struct request *next, int max_segments)
235 int total_segments = req->nr_segments + next->nr_segments;
236 int same_segment;
238 same_segment = 0;
239 if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data) {
240 total_segments--;
241 same_segment = 1;
244 if (total_segments > max_segments)
245 return 0;
247 q->elevator.nr_segments -= same_segment;
248 req->nr_segments = total_segments;
249 return 1;
253 * "plug" the device if there are no outstanding requests: this will
254 * force the transfer to start only after we have put all the requests
255 * on the list.
257 * This is called with interrupts off and no requests on the queue.
258 * (and with the request spinlock aquired)
260 static void generic_plug_device(request_queue_t *q, kdev_t dev)
262 #ifdef CONFIG_BLK_DEV_MD
263 if (MAJOR(dev) == MD_MAJOR) {
264 spin_unlock_irq(&io_request_lock);
265 BUG();
267 #endif
269 * no need to replug device
271 if (!list_empty(&q->queue_head) || q->plugged)
272 return;
274 q->plugged = 1;
275 queue_task(&q->plug_tq, &tq_disk);
278 static void blk_init_free_list(request_queue_t *q)
280 struct request *rq;
281 int i;
284 * Divide requests in half between read and write. This used to
285 * be a 2/3 advantage for reads, but now reads can steal from
286 * the write free list.
288 for (i = 0; i < QUEUE_NR_REQUESTS; i++) {
289 rq = kmem_cache_alloc(request_cachep, SLAB_KERNEL);
290 rq->rq_status = RQ_INACTIVE;
291 list_add(&rq->table, &q->request_freelist[i & 1]);
294 init_waitqueue_head(&q->wait_for_request);
295 spin_lock_init(&q->request_lock);
298 void blk_init_queue(request_queue_t * q, request_fn_proc * rfn)
300 INIT_LIST_HEAD(&q->queue_head);
301 INIT_LIST_HEAD(&q->request_freelist[READ]);
302 INIT_LIST_HEAD(&q->request_freelist[WRITE]);
303 elevator_init(&q->elevator, ELEVATOR_LINUS);
304 blk_init_free_list(q);
305 q->request_fn = rfn;
306 q->back_merge_fn = ll_back_merge_fn;
307 q->front_merge_fn = ll_front_merge_fn;
308 q->merge_requests_fn = ll_merge_requests_fn;
309 q->make_request_fn = NULL;
310 q->plug_tq.sync = 0;
311 q->plug_tq.routine = &generic_unplug_device;
312 q->plug_tq.data = q;
313 q->plugged = 0;
315 * These booleans describe the queue properties. We set the
316 * default (and most common) values here. Other drivers can
317 * use the appropriate functions to alter the queue properties.
318 * as appropriate.
320 q->plug_device_fn = generic_plug_device;
321 q->head_active = 1;
325 * remove the plug and let it rip..
327 static inline void __generic_unplug_device(request_queue_t *q)
329 if (q->plugged) {
330 q->plugged = 0;
331 if (!list_empty(&q->queue_head))
332 q->request_fn(q);
336 void generic_unplug_device(void *data)
338 request_queue_t *q = (request_queue_t *) data;
339 unsigned long flags;
341 spin_lock_irqsave(&io_request_lock, flags);
342 __generic_unplug_device(q);
343 spin_unlock_irqrestore(&io_request_lock, flags);
346 #define blkdev_free_rq(list) list_entry((list)->next, struct request, table);
348 * Get a free request. io_request_lock must be held and interrupts
349 * disabled on the way in.
351 static inline struct request *get_request(request_queue_t *q, int rw)
353 struct list_head *list = &q->request_freelist[rw];
354 struct request *rq;
357 * Reads get preferential treatment and are allowed to steal
358 * from the write free list if necessary.
360 if (!list_empty(list)) {
361 rq = blkdev_free_rq(list);
362 goto got_rq;
366 * if the WRITE list is non-empty, we know that rw is READ
367 * and that the READ list is empty. allow reads to 'steal'
368 * from the WRITE list.
370 if (!list_empty(&q->request_freelist[WRITE])) {
371 list = &q->request_freelist[WRITE];
372 rq = blkdev_free_rq(list);
373 goto got_rq;
376 return NULL;
378 got_rq:
379 list_del(&rq->table);
380 rq->free_list = list;
381 rq->rq_status = RQ_ACTIVE;
382 rq->special = NULL;
383 rq->q = q;
384 return rq;
388 * No available requests for this queue, unplug the device.
390 static struct request *__get_request_wait(request_queue_t *q, int rw)
392 register struct request *rq;
393 DECLARE_WAITQUEUE(wait, current);
395 add_wait_queue_exclusive(&q->wait_for_request, &wait);
396 for (;;) {
397 __set_current_state(TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE);
398 spin_lock_irq(&io_request_lock);
399 rq = get_request(q, rw);
400 spin_unlock_irq(&io_request_lock);
401 if (rq)
402 break;
403 generic_unplug_device(q);
404 schedule();
406 remove_wait_queue(&q->wait_for_request, &wait);
407 current->state = TASK_RUNNING;
408 return rq;
411 static inline struct request *get_request_wait(request_queue_t *q, int rw)
413 register struct request *rq;
415 spin_lock_irq(&io_request_lock);
416 rq = get_request(q, rw);
417 spin_unlock_irq(&io_request_lock);
418 if (rq)
419 return rq;
420 return __get_request_wait(q, rw);
423 /* RO fail safe mechanism */
425 static long ro_bits[MAX_BLKDEV][8];
427 int is_read_only(kdev_t dev)
429 int minor,major;
431 major = MAJOR(dev);
432 minor = MINOR(dev);
433 if (major < 0 || major >= MAX_BLKDEV) return 0;
434 return ro_bits[major][minor >> 5] & (1 << (minor & 31));
437 void set_device_ro(kdev_t dev,int flag)
439 int minor,major;
441 major = MAJOR(dev);
442 minor = MINOR(dev);
443 if (major < 0 || major >= MAX_BLKDEV) return;
444 if (flag) ro_bits[major][minor >> 5] |= 1 << (minor & 31);
445 else ro_bits[major][minor >> 5] &= ~(1 << (minor & 31));
448 inline void drive_stat_acct (kdev_t dev, int rw,
449 unsigned long nr_sectors, int new_io)
451 unsigned int major = MAJOR(dev);
452 unsigned int index;
454 index = disk_index(dev);
455 if ((index >= DK_MAX_DISK) || (major >= DK_MAX_MAJOR))
456 return;
458 kstat.dk_drive[major][index] += new_io;
459 if (rw == READ) {
460 kstat.dk_drive_rio[major][index] += new_io;
461 kstat.dk_drive_rblk[major][index] += nr_sectors;
462 } else if (rw == WRITE) {
463 kstat.dk_drive_wio[major][index] += new_io;
464 kstat.dk_drive_wblk[major][index] += nr_sectors;
465 } else
466 printk(KERN_ERR "drive_stat_acct: cmd not R/W?\n");
470 * add-request adds a request to the linked list.
471 * It disables interrupts (aquires the request spinlock) so that it can muck
472 * with the request-lists in peace. Thus it should be called with no spinlocks
473 * held.
475 * By this point, req->cmd is always either READ/WRITE, never READA,
476 * which is important for drive_stat_acct() above.
479 static inline void add_request(request_queue_t * q, struct request * req,
480 struct list_head *head, int lat)
482 int major;
484 drive_stat_acct(req->rq_dev, req->cmd, req->nr_sectors, 1);
485 elevator_account_request(req);
487 * let selected elevator insert the request
489 q->elevator.elevator_fn(req, &q->elevator, &q->queue_head, head, lat);
492 * FIXME(eric) I don't understand why there is a need for this
493 * special case code. It clearly doesn't fit any more with
494 * the new queueing architecture, and it got added in 2.3.10.
495 * I am leaving this in here until I hear back from the COMPAQ
496 * people.
498 major = MAJOR(req->rq_dev);
499 if (major >= COMPAQ_SMART2_MAJOR+0 && major <= COMPAQ_SMART2_MAJOR+7)
500 (q->request_fn)(q);
501 if (major >= DAC960_MAJOR+0 && major <= DAC960_MAJOR+7)
502 (q->request_fn)(q);
506 * Must be called with io_request_lock held and interrupts disabled
508 void inline blkdev_release_request(struct request *req)
510 req->rq_status = RQ_INACTIVE;
513 * Request may not have originated from ll_rw_blk
515 if (req->free_list) {
516 list_add(&req->table, req->free_list);
517 req->free_list = NULL;
518 wake_up(&req->q->wait_for_request);
523 * Has to be called with the request spinlock aquired
525 static void attempt_merge(request_queue_t * q,
526 struct request *req,
527 int max_sectors,
528 int max_segments)
530 struct request *next;
532 next = blkdev_next_request(req);
533 if (req->sector + req->nr_sectors != next->sector)
534 return;
535 if (req->cmd != next->cmd || req->rq_dev != next->rq_dev || req->nr_sectors + next->nr_sectors > max_sectors || next->sem)
536 return;
538 * If we are not allowed to merge these requests, then
539 * return. If we are allowed to merge, then the count
540 * will have been updated to the appropriate number,
541 * and we shouldn't do it here too.
543 if(!(q->merge_requests_fn)(q, req, next, max_segments))
544 return;
546 elevator_merge_requests(req, next);
547 req->bhtail->b_reqnext = next->bh;
548 req->bhtail = next->bhtail;
549 req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
550 list_del(&next->queue);
551 blkdev_release_request(next);
554 static inline void attempt_back_merge(request_queue_t * q,
555 struct request *req,
556 int max_sectors,
557 int max_segments)
559 if (&req->queue == q->queue_head.prev)
560 return;
561 attempt_merge(q, req, max_sectors, max_segments);
564 static inline void attempt_front_merge(request_queue_t * q,
565 struct list_head * head,
566 struct request *req,
567 int max_sectors,
568 int max_segments)
570 struct list_head * prev;
572 prev = req->queue.prev;
573 if (head == prev)
574 return;
575 attempt_merge(q, blkdev_entry_to_request(prev), max_sectors, max_segments);
578 static inline void __make_request(request_queue_t * q, int rw,
579 struct buffer_head * bh)
581 int major = MAJOR(bh->b_rdev);
582 unsigned int sector, count;
583 int max_segments = MAX_SEGMENTS;
584 struct request * req = NULL;
585 int rw_ahead, max_sectors, el_ret;
586 struct list_head *head;
587 int latency;
588 elevator_t *elevator = &q->elevator;
590 count = bh->b_size >> 9;
591 sector = bh->b_rsector;
593 if (blk_size[major]) {
594 unsigned long maxsector = (blk_size[major][MINOR(bh->b_rdev)] << 1) + 1;
596 if (maxsector < count || maxsector - count < sector) {
597 bh->b_state &= (1 << BH_Lock) | (1 << BH_Mapped);
598 if (!blk_size[major][MINOR(bh->b_rdev)])
599 goto end_io;
600 /* This may well happen - the kernel calls bread()
601 without checking the size of the device, e.g.,
602 when mounting a device. */
603 printk(KERN_INFO
604 "attempt to access beyond end of device\n");
605 printk(KERN_INFO "%s: rw=%d, want=%d, limit=%d\n",
606 kdevname(bh->b_rdev), rw,
607 (sector + count)>>1,
608 blk_size[major][MINOR(bh->b_rdev)]);
609 goto end_io;
613 rw_ahead = 0; /* normal case; gets changed below for READA */
614 switch (rw) {
615 case READA:
616 rw_ahead = 1;
617 rw = READ; /* drop into READ */
618 case READ:
619 if (buffer_uptodate(bh)) /* Hmmph! Already have it */
620 goto end_io;
621 kstat.pgpgin++;
622 break;
623 case WRITERAW:
624 rw = WRITE;
625 goto do_write; /* Skip the buffer refile */
626 case WRITE:
627 if (!test_and_clear_bit(BH_Dirty, &bh->b_state))
628 goto end_io; /* Hmmph! Nothing to write */
629 refile_buffer(bh);
630 do_write:
631 kstat.pgpgout++;
632 break;
633 default:
634 BUG();
635 goto end_io;
638 /* We'd better have a real physical mapping!
639 Check this bit only if the buffer was dirty and just locked
640 down by us so at this point flushpage will block and
641 won't clear the mapped bit under us. */
642 if (!buffer_mapped(bh))
643 BUG();
646 * Temporary solution - in 2.5 this will be done by the lowlevel
647 * driver. Create a bounce buffer if the buffer data points into
648 * high memory - keep the original buffer otherwise.
650 #if CONFIG_HIGHMEM
651 bh = create_bounce(rw, bh);
652 #endif
654 /* look for a free request. */
656 * Try to coalesce the new request with old requests
658 max_sectors = get_max_sectors(bh->b_rdev);
660 latency = elevator_request_latency(elevator, rw);
663 * Now we acquire the request spinlock, we have to be mega careful
664 * not to schedule or do something nonatomic
666 spin_lock_irq(&io_request_lock);
667 elevator_default_debug(q, bh->b_rdev);
670 * skip first entry, for devices with active queue head
672 head = &q->queue_head;
673 if (q->head_active && !q->plugged)
674 head = head->next;
676 if (list_empty(head)) {
677 q->plug_device_fn(q, bh->b_rdev); /* is atomic */
678 goto get_rq;
681 el_ret = elevator->elevator_merge_fn(q, &req, bh, rw, &max_sectors, &max_segments);
682 switch (el_ret) {
684 case ELEVATOR_BACK_MERGE:
685 if (!q->back_merge_fn(q, req, bh, max_segments))
686 break;
687 req->bhtail->b_reqnext = bh;
688 req->bhtail = bh;
689 req->nr_sectors = req->hard_nr_sectors += count;
690 req->e = elevator;
691 drive_stat_acct(req->rq_dev, req->cmd, count, 0);
692 attempt_back_merge(q, req, max_sectors, max_segments);
693 goto out;
695 case ELEVATOR_FRONT_MERGE:
696 if (!q->front_merge_fn(q, req, bh, max_segments))
697 break;
698 bh->b_reqnext = req->bh;
699 req->bh = bh;
700 req->buffer = bh->b_data;
701 req->current_nr_sectors = count;
702 req->sector = req->hard_sector = sector;
703 req->nr_sectors = req->hard_nr_sectors += count;
704 req->e = elevator;
705 drive_stat_acct(req->rq_dev, req->cmd, count, 0);
706 attempt_front_merge(q, head, req, max_sectors, max_segments);
707 goto out;
709 * elevator says don't/can't merge. get new request
711 case ELEVATOR_NO_MERGE:
712 break;
714 default:
715 printk("elevator returned crap (%d)\n", el_ret);
716 BUG();
720 * Grab a free request from the freelist. Read first try their
721 * own queue - if that is empty, we steal from the write list.
722 * Writes must block if the write list is empty, and read aheads
723 * are not crucial.
725 get_rq:
726 if ((req = get_request(q, rw)) == NULL) {
727 spin_unlock_irq(&io_request_lock);
728 if (rw_ahead)
729 goto end_io;
731 req = __get_request_wait(q, rw);
732 spin_lock_irq(&io_request_lock);
734 head = &q->queue_head;
735 if (q->head_active && !q->plugged)
736 head = head->next;
739 /* fill up the request-info, and add it to the queue */
740 req->cmd = rw;
741 req->errors = 0;
742 req->hard_sector = req->sector = sector;
743 req->hard_nr_sectors = req->nr_sectors = count;
744 req->current_nr_sectors = count;
745 req->nr_segments = 1; /* Always 1 for a new request. */
746 req->nr_hw_segments = 1; /* Always 1 for a new request. */
747 req->buffer = bh->b_data;
748 req->sem = NULL;
749 req->bh = bh;
750 req->bhtail = bh;
751 req->rq_dev = bh->b_rdev;
752 req->e = elevator;
753 add_request(q, req, head, latency);
754 out:
755 spin_unlock_irq(&io_request_lock);
756 return;
757 end_io:
758 bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state));
761 int generic_make_request (request_queue_t *q, int rw, struct buffer_head * bh)
763 int ret;
766 * Resolve the mapping until finished. (drivers are
767 * still free to implement/resolve their own stacking
768 * by explicitly returning 0)
770 while (q->make_request_fn) {
771 ret = q->make_request_fn(q, rw, bh);
772 if (ret > 0) {
773 q = blk_get_queue(bh->b_rdev);
774 continue;
776 return ret;
779 * Does the block device want us to queue
780 * the IO request? (normal case)
782 __make_request(q, rw, bh);
783 spin_lock_irq(&io_request_lock);
784 if (q && !q->plugged)
785 (q->request_fn)(q);
786 spin_unlock_irq(&io_request_lock);
788 return 0;
791 /* This function can be used to request a number of buffers from a block
792 device. Currently the only restriction is that all buffers must belong to
793 the same device */
795 static void __ll_rw_block(int rw, int nr, struct buffer_head * bhs[],
796 int haslock)
798 struct buffer_head *bh;
799 request_queue_t *q;
800 unsigned int major;
801 int correct_size;
802 int i;
804 major = MAJOR(bhs[0]->b_dev);
805 q = blk_get_queue(bhs[0]->b_dev);
806 if (!q) {
807 printk(KERN_ERR
808 "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
809 kdevname(bhs[0]->b_dev), bhs[0]->b_blocknr);
810 goto sorry;
813 /* Determine correct block size for this device. */
814 correct_size = BLOCK_SIZE;
815 if (blksize_size[major]) {
816 i = blksize_size[major][MINOR(bhs[0]->b_dev)];
817 if (i)
818 correct_size = i;
821 /* Verify requested block sizes. */
822 for (i = 0; i < nr; i++) {
823 bh = bhs[i];
824 if (bh->b_size != correct_size) {
825 printk(KERN_NOTICE "ll_rw_block: device %s: "
826 "only %d-char blocks implemented (%u)\n",
827 kdevname(bhs[0]->b_dev),
828 correct_size, bh->b_size);
829 goto sorry;
833 if ((rw & WRITE) && is_read_only(bhs[0]->b_dev)) {
834 printk(KERN_NOTICE "Can't write to read-only device %s\n",
835 kdevname(bhs[0]->b_dev));
836 goto sorry;
839 for (i = 0; i < nr; i++) {
840 bh = bhs[i];
842 /* Only one thread can actually submit the I/O. */
843 if (haslock) {
844 if (!buffer_locked(bh))
845 BUG();
846 } else {
847 if (test_and_set_bit(BH_Lock, &bh->b_state))
848 continue;
850 set_bit(BH_Req, &bh->b_state);
853 * First step, 'identity mapping' - RAID or LVM might
854 * further remap this.
856 bh->b_rdev = bh->b_dev;
857 bh->b_rsector = bh->b_blocknr * (bh->b_size>>9);
859 generic_make_request(q, rw, bh);
861 return;
863 sorry:
864 for (i = 0; i < nr; i++)
865 buffer_IO_error(bhs[i]);
868 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
870 __ll_rw_block(rw, nr, bh, 0);
873 void ll_rw_block_locked(int rw, int nr, struct buffer_head * bh[])
875 __ll_rw_block(rw, nr, bh, 1);
878 #ifdef CONFIG_STRAM_SWAP
879 extern int stram_device_init (void);
880 #endif
883 * First step of what used to be end_request
885 * 0 means continue with end_that_request_last,
886 * 1 means we are done
889 int end_that_request_first (struct request *req, int uptodate, char *name)
891 struct buffer_head * bh;
892 int nsect;
894 req->errors = 0;
895 if (!uptodate)
896 printk("end_request: I/O error, dev %s (%s), sector %lu\n",
897 kdevname(req->rq_dev), name, req->sector);
899 if ((bh = req->bh) != NULL) {
900 nsect = bh->b_size >> 9;
901 req->bh = bh->b_reqnext;
902 bh->b_reqnext = NULL;
903 bh->b_end_io(bh, uptodate);
904 if ((bh = req->bh) != NULL) {
905 req->hard_sector += nsect;
906 req->hard_nr_sectors -= nsect;
907 req->sector = req->hard_sector;
908 req->nr_sectors = req->hard_nr_sectors;
910 req->current_nr_sectors = bh->b_size >> 9;
911 if (req->nr_sectors < req->current_nr_sectors) {
912 req->nr_sectors = req->current_nr_sectors;
913 printk("end_request: buffer-list destroyed\n");
915 req->buffer = bh->b_data;
916 return 1;
919 return 0;
922 void end_that_request_last(struct request *req)
924 if (req->e) {
925 printk("end_that_request_last called with non-dequeued req\n");
926 BUG();
928 if (req->sem != NULL)
929 up(req->sem);
931 blkdev_release_request(req);
934 int __init blk_dev_init(void)
936 struct blk_dev_struct *dev;
938 request_cachep = kmem_cache_create("blkdev_requests",
939 sizeof(struct request),
940 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
942 for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;)
943 dev->queue = NULL;
945 memset(ro_bits,0,sizeof(ro_bits));
946 memset(max_readahead, 0, sizeof(max_readahead));
947 memset(max_sectors, 0, sizeof(max_sectors));
948 #ifdef CONFIG_AMIGA_Z2RAM
949 z2_init();
950 #endif
951 #ifdef CONFIG_STRAM_SWAP
952 stram_device_init();
953 #endif
954 #ifdef CONFIG_BLK_DEV_RAM
955 rd_init();
956 #endif
957 #ifdef CONFIG_BLK_DEV_LOOP
958 loop_init();
959 #endif
960 #ifdef CONFIG_ISP16_CDI
961 isp16_init();
962 #endif CONFIG_ISP16_CDI
963 #if defined(CONFIG_IDE) && defined(CONFIG_BLK_DEV_IDE)
964 ide_init(); /* this MUST precede hd_init */
965 #endif
966 #if defined(CONFIG_IDE) && defined(CONFIG_BLK_DEV_HD)
967 hd_init();
968 #endif
969 #ifdef CONFIG_BLK_DEV_PS2
970 ps2esdi_init();
971 #endif
972 #ifdef CONFIG_BLK_DEV_XD
973 xd_init();
974 #endif
975 #ifdef CONFIG_BLK_DEV_MFM
976 mfm_init();
977 #endif
978 #ifdef CONFIG_PARIDE
979 { extern void paride_init(void); paride_init(); };
980 #endif
981 #ifdef CONFIG_MAC_FLOPPY
982 swim3_init();
983 #endif
984 #ifdef CONFIG_BLK_DEV_SWIM_IOP
985 swimiop_init();
986 #endif
987 #ifdef CONFIG_AMIGA_FLOPPY
988 amiga_floppy_init();
989 #endif
990 #ifdef CONFIG_ATARI_FLOPPY
991 atari_floppy_init();
992 #endif
993 #ifdef CONFIG_BLK_DEV_FD
994 floppy_init();
995 #else
996 #if defined(__i386__) /* Do we even need this? */
997 outb_p(0xc, 0x3f2);
998 #endif
999 #endif
1000 #ifdef CONFIG_CDU31A
1001 cdu31a_init();
1002 #endif CONFIG_CDU31A
1003 #ifdef CONFIG_ATARI_ACSI
1004 acsi_init();
1005 #endif CONFIG_ATARI_ACSI
1006 #ifdef CONFIG_MCD
1007 mcd_init();
1008 #endif CONFIG_MCD
1009 #ifdef CONFIG_MCDX
1010 mcdx_init();
1011 #endif CONFIG_MCDX
1012 #ifdef CONFIG_SBPCD
1013 sbpcd_init();
1014 #endif CONFIG_SBPCD
1015 #ifdef CONFIG_AZTCD
1016 aztcd_init();
1017 #endif CONFIG_AZTCD
1018 #ifdef CONFIG_CDU535
1019 sony535_init();
1020 #endif CONFIG_CDU535
1021 #ifdef CONFIG_GSCD
1022 gscd_init();
1023 #endif CONFIG_GSCD
1024 #ifdef CONFIG_CM206
1025 cm206_init();
1026 #endif
1027 #ifdef CONFIG_OPTCD
1028 optcd_init();
1029 #endif CONFIG_OPTCD
1030 #ifdef CONFIG_SJCD
1031 sjcd_init();
1032 #endif CONFIG_SJCD
1033 #ifdef CONFIG_BLK_DEV_MD
1034 md_init();
1035 #endif CONFIG_BLK_DEV_MD
1036 #ifdef CONFIG_APBLOCK
1037 ap_init();
1038 #endif
1039 #ifdef CONFIG_DDV
1040 ddv_init();
1041 #endif
1042 #ifdef CONFIG_BLK_DEV_NBD
1043 nbd_init();
1044 #endif
1045 #ifdef CONFIG_MDISK
1046 mdisk_init();
1047 #endif
1048 #ifdef CONFIG_DASD
1049 dasd_init();
1050 #endif
1051 #ifdef CONFIG_SUN_JSFLASH
1052 jsfd_init();
1053 #endif
1054 #ifdef CONFIG_BLK_DEV_LVM
1055 lvm_init();
1056 #endif
1057 return 0;
1060 EXPORT_SYMBOL(io_request_lock);
1061 EXPORT_SYMBOL(end_that_request_first);
1062 EXPORT_SYMBOL(end_that_request_last);
1063 EXPORT_SYMBOL(blk_init_queue);
1064 EXPORT_SYMBOL(blk_get_queue);
1065 EXPORT_SYMBOL(blk_cleanup_queue);
1066 EXPORT_SYMBOL(blk_queue_headactive);
1067 EXPORT_SYMBOL(blk_queue_pluggable);
1068 EXPORT_SYMBOL(blk_queue_make_request);
1069 EXPORT_SYMBOL(generic_make_request);
1070 EXPORT_SYMBOL(blkdev_release_request);