2 * linux/drivers/block/ll_rw_blk.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
6 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
10 * This handles all read/write requests to block devices
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/errno.h>
16 #include <linux/string.h>
17 #include <linux/config.h>
18 #include <linux/locks.h>
20 #include <linux/init.h>
21 #include <linux/smp_lock.h>
23 #include <asm/system.h>
25 #include <linux/blk.h>
26 #include <linux/highmem.h>
27 #include <linux/raid/md.h>
29 #include <linux/module.h>
32 * MAC Floppy IWM hooks
35 #ifdef CONFIG_MAC_FLOPPY_IWM
36 extern int mac_floppy_init(void);
40 * The request-struct contains all necessary data
41 * to load a nr of sectors into memory
43 static struct request all_requests
[NR_REQUEST
];
46 * The "disk" task queue is used to start the actual requests
49 DECLARE_TASK_QUEUE(tq_disk
);
52 * Protect the request list against multiple users..
54 * With this spinlock the Linux block IO subsystem is 100% SMP threaded
55 * from the IRQ event side, and almost 100% SMP threaded from the syscall
56 * side (we still have protect against block device array operations, and
57 * the do_request() side is casually still unsafe. The kernel lock protects
58 * this part currently.).
60 * there is a fair chance that things will work just OK if these functions
61 * are called with no global kernel lock held ...
63 spinlock_t io_request_lock
= SPIN_LOCK_UNLOCKED
;
66 * used to wait on when there are no free requests
68 DECLARE_WAIT_QUEUE_HEAD(wait_for_request
);
70 /* This specifies how many sectors to read ahead on the disk. */
72 int read_ahead
[MAX_BLKDEV
];
78 struct blk_dev_struct blk_dev
[MAX_BLKDEV
]; /* initialized by blk_dev_init() */
81 * blk_size contains the size of all block-devices in units of 1024 byte
84 * blk_size[MAJOR][MINOR]
86 * if (!blk_size[MAJOR]) then no minor size checking is done.
88 int * blk_size
[MAX_BLKDEV
];
91 * blksize_size contains the size of all block-devices:
93 * blksize_size[MAJOR][MINOR]
95 * if (!blksize_size[MAJOR]) then 1024 bytes is assumed.
97 int * blksize_size
[MAX_BLKDEV
];
100 * hardsect_size contains the size of the hardware sector of a device.
102 * hardsect_size[MAJOR][MINOR]
104 * if (!hardsect_size[MAJOR])
105 * then 512 bytes is assumed.
107 * sector_size is hardsect_size[MAJOR][MINOR]
108 * This is currently set by some scsi devices and read by the msdos fs driver.
109 * Other uses may appear later.
111 int * hardsect_size
[MAX_BLKDEV
];
114 * The following tunes the read-ahead algorithm in mm/filemap.c
116 int * max_readahead
[MAX_BLKDEV
];
119 * Max number of sectors per request
121 int * max_sectors
[MAX_BLKDEV
];
123 static inline int get_max_sectors(kdev_t dev
)
125 if (!max_sectors
[MAJOR(dev
)])
127 return max_sectors
[MAJOR(dev
)][MINOR(dev
)];
131 * NOTE: the device-specific queue() functions
134 request_queue_t
* blk_get_queue (kdev_t dev
)
136 int major
= MAJOR(dev
);
137 struct blk_dev_struct
*bdev
= blk_dev
+ major
;
139 request_queue_t
*ret
;
141 spin_lock_irqsave(&io_request_lock
,flags
);
143 ret
= bdev
->queue(dev
);
145 ret
= &blk_dev
[major
].request_queue
;
146 spin_unlock_irqrestore(&io_request_lock
,flags
);
151 void blk_cleanup_queue(request_queue_t
* q
)
153 memset(q
, 0, sizeof(*q
));
156 void blk_queue_headactive(request_queue_t
* q
, int active
)
158 q
->head_active
= active
;
161 void blk_queue_pluggable (request_queue_t
* q
, plug_device_fn
*plug
)
163 q
->plug_device_fn
= plug
;
166 void blk_queue_make_request(request_queue_t
* q
, make_request_fn
* mfn
)
168 q
->make_request_fn
= mfn
;
171 static inline int ll_new_segment(request_queue_t
*q
, struct request
*req
, int max_segments
)
173 if (req
->nr_segments
< max_segments
) {
175 q
->elevator
.nr_segments
++;
181 static int ll_back_merge_fn(request_queue_t
*q
, struct request
*req
,
182 struct buffer_head
*bh
, int max_segments
)
184 if (req
->bhtail
->b_data
+ req
->bhtail
->b_size
== bh
->b_data
)
186 return ll_new_segment(q
, req
, max_segments
);
189 static int ll_front_merge_fn(request_queue_t
*q
, struct request
*req
,
190 struct buffer_head
*bh
, int max_segments
)
192 if (bh
->b_data
+ bh
->b_size
== req
->bh
->b_data
)
194 return ll_new_segment(q
, req
, max_segments
);
197 static int ll_merge_requests_fn(request_queue_t
*q
, struct request
*req
,
198 struct request
*next
, int max_segments
)
200 int total_segments
= req
->nr_segments
+ next
->nr_segments
;
204 if (req
->bhtail
->b_data
+ req
->bhtail
->b_size
== next
->bh
->b_data
) {
209 if (total_segments
> max_segments
)
212 q
->elevator
.nr_segments
-= same_segment
;
213 req
->nr_segments
= total_segments
;
218 * "plug" the device if there are no outstanding requests: this will
219 * force the transfer to start only after we have put all the requests
222 * This is called with interrupts off and no requests on the queue.
223 * (and with the request spinlock aquired)
225 static void generic_plug_device (request_queue_t
*q
, kdev_t dev
)
227 #ifdef CONFIG_BLK_DEV_MD
228 if (MAJOR(dev
) == MD_MAJOR
) {
229 spin_unlock_irq(&io_request_lock
);
233 if (!list_empty(&q
->queue_head
))
237 queue_task(&q
->plug_tq
, &tq_disk
);
240 void blk_init_queue(request_queue_t
* q
, request_fn_proc
* rfn
)
242 INIT_LIST_HEAD(&q
->queue_head
);
243 elevator_init(&q
->elevator
);
245 q
->back_merge_fn
= ll_back_merge_fn
;
246 q
->front_merge_fn
= ll_front_merge_fn
;
247 q
->merge_requests_fn
= ll_merge_requests_fn
;
248 q
->make_request_fn
= NULL
;
250 q
->plug_tq
.routine
= &generic_unplug_device
;
254 * These booleans describe the queue properties. We set the
255 * default (and most common) values here. Other drivers can
256 * use the appropriate functions to alter the queue properties.
259 q
->plug_device_fn
= generic_plug_device
;
264 * remove the plug and let it rip..
266 void generic_unplug_device(void * data
)
268 request_queue_t
* q
= (request_queue_t
*) data
;
271 spin_lock_irqsave(&io_request_lock
,flags
);
274 if (!list_empty(&q
->queue_head
))
277 spin_unlock_irqrestore(&io_request_lock
,flags
);
281 * look for a free request in the first N entries.
282 * NOTE: interrupts must be disabled on the way in (on SMP the request queue
283 * spinlock has to be aquired), and will still be disabled on the way out.
285 static inline struct request
* get_request(int n
, kdev_t dev
)
287 static struct request
*prev_found
= NULL
, *prev_limit
= NULL
;
288 register struct request
*req
, *limit
;
291 panic("get_request(%d): impossible!\n", n
);
293 limit
= all_requests
+ n
;
294 if (limit
!= prev_limit
) {
296 prev_found
= all_requests
;
300 req
= ((req
> all_requests
) ? req
: limit
) - 1;
301 if (req
->rq_status
== RQ_INACTIVE
)
303 if (req
== prev_found
)
307 req
->rq_status
= RQ_ACTIVE
;
314 * wait until a free request in the first N entries is available.
316 static struct request
* __get_request_wait(int n
, kdev_t dev
)
318 register struct request
*req
;
319 DECLARE_WAITQUEUE(wait
, current
);
322 add_wait_queue_exclusive(&wait_for_request
, &wait
);
324 __set_current_state(TASK_UNINTERRUPTIBLE
|TASK_EXCLUSIVE
);
325 spin_lock_irqsave(&io_request_lock
,flags
);
326 req
= get_request(n
, dev
);
327 spin_unlock_irqrestore(&io_request_lock
,flags
);
330 run_task_queue(&tq_disk
);
333 remove_wait_queue(&wait_for_request
, &wait
);
334 current
->state
= TASK_RUNNING
;
338 static inline struct request
* get_request_wait(int n
, kdev_t dev
)
340 register struct request
*req
;
343 spin_lock_irqsave(&io_request_lock
,flags
);
344 req
= get_request(n
, dev
);
345 spin_unlock_irqrestore(&io_request_lock
,flags
);
348 return __get_request_wait(n
, dev
);
351 /* RO fail safe mechanism */
353 static long ro_bits
[MAX_BLKDEV
][8];
355 int is_read_only(kdev_t dev
)
361 if (major
< 0 || major
>= MAX_BLKDEV
) return 0;
362 return ro_bits
[major
][minor
>> 5] & (1 << (minor
& 31));
365 void set_device_ro(kdev_t dev
,int flag
)
371 if (major
< 0 || major
>= MAX_BLKDEV
) return;
372 if (flag
) ro_bits
[major
][minor
>> 5] |= 1 << (minor
& 31);
373 else ro_bits
[major
][minor
>> 5] &= ~(1 << (minor
& 31));
376 inline void drive_stat_acct (kdev_t dev
, int rw
,
377 unsigned long nr_sectors
, int new_io
)
379 unsigned int major
= MAJOR(dev
);
382 index
= disk_index(dev
);
383 if ((index
>= DK_MAX_DISK
) || (major
>= DK_MAX_MAJOR
))
386 kstat
.dk_drive
[major
][index
] += new_io
;
388 kstat
.dk_drive_rio
[major
][index
] += new_io
;
389 kstat
.dk_drive_rblk
[major
][index
] += nr_sectors
;
390 } else if (rw
== WRITE
) {
391 kstat
.dk_drive_wio
[major
][index
] += new_io
;
392 kstat
.dk_drive_wblk
[major
][index
] += nr_sectors
;
394 printk(KERN_ERR
"drive_stat_acct: cmd not R/W?\n");
398 * add-request adds a request to the linked list.
399 * It disables interrupts (aquires the request spinlock) so that it can muck
400 * with the request-lists in peace. Thus it should be called with no spinlocks
403 * By this point, req->cmd is always either READ/WRITE, never READA,
404 * which is important for drive_stat_acct() above.
407 static inline void add_request(request_queue_t
* q
, struct request
* req
,
408 struct list_head
* head
, int latency
)
412 drive_stat_acct(req
->rq_dev
, req
->cmd
, req
->nr_sectors
, 1);
414 if (list_empty(head
)) {
415 req
->elevator_sequence
= elevator_sequence(&q
->elevator
, latency
);
416 list_add(&req
->queue
, &q
->queue_head
);
419 q
->elevator
.elevator_fn(req
, &q
->elevator
, &q
->queue_head
, head
, latency
);
422 * FIXME(eric) I don't understand why there is a need for this
423 * special case code. It clearly doesn't fit any more with
424 * the new queueing architecture, and it got added in 2.3.10.
425 * I am leaving this in here until I hear back from the COMPAQ
428 major
= MAJOR(req
->rq_dev
);
429 if (major
>= COMPAQ_SMART2_MAJOR
+0 && major
<= COMPAQ_SMART2_MAJOR
+7)
434 if (major
>= DAC960_MAJOR
+0 && major
<= DAC960_MAJOR
+7)
441 * Has to be called with the request spinlock aquired
443 static void attempt_merge(request_queue_t
* q
,
448 struct request
*next
;
450 next
= blkdev_next_request(req
);
451 if (req
->sector
+ req
->nr_sectors
!= next
->sector
)
453 if (req
->cmd
!= next
->cmd
|| req
->rq_dev
!= next
->rq_dev
|| req
->nr_sectors
+ next
->nr_sectors
> max_sectors
|| next
->sem
)
456 * If we are not allowed to merge these requests, then
457 * return. If we are allowed to merge, then the count
458 * will have been updated to the appropriate number,
459 * and we shouldn't do it here too.
461 if(!(q
->merge_requests_fn
)(q
, req
, next
, max_segments
))
464 elevator_merge_requests(&q
->elevator
, req
, next
);
465 req
->bhtail
->b_reqnext
= next
->bh
;
466 req
->bhtail
= next
->bhtail
;
467 req
->nr_sectors
= req
->hard_nr_sectors
+= next
->hard_nr_sectors
;
468 next
->rq_status
= RQ_INACTIVE
;
469 list_del(&next
->queue
);
470 wake_up (&wait_for_request
);
473 static inline void attempt_back_merge(request_queue_t
* q
,
478 if (&req
->queue
== q
->queue_head
.prev
)
480 attempt_merge(q
, req
, max_sectors
, max_segments
);
483 static inline void attempt_front_merge(request_queue_t
* q
,
484 struct list_head
* head
,
489 struct list_head
* prev
;
491 prev
= req
->queue
.prev
;
494 attempt_merge(q
, blkdev_entry_to_request(prev
), max_sectors
, max_segments
);
497 static inline void __make_request(request_queue_t
* q
, int rw
,
498 struct buffer_head
* bh
)
500 int major
= MAJOR(bh
->b_rdev
);
501 unsigned int sector
, count
;
502 int max_segments
= MAX_SEGMENTS
;
503 struct request
* req
;
504 int rw_ahead
, max_req
, max_sectors
;
507 int orig_latency
, latency
, starving
, sequence
;
508 struct list_head
* entry
, * head
= &q
->queue_head
;
509 elevator_t
* elevator
;
511 count
= bh
->b_size
>> 9;
512 sector
= bh
->b_rsector
;
514 if (blk_size
[major
]) {
515 unsigned long maxsector
= (blk_size
[major
][MINOR(bh
->b_rdev
)] << 1) + 1;
517 if (maxsector
< count
|| maxsector
- count
< sector
) {
518 bh
->b_state
&= (1 << BH_Lock
) | (1 << BH_Mapped
);
519 if (!blk_size
[major
][MINOR(bh
->b_rdev
)])
521 /* This may well happen - the kernel calls bread()
522 without checking the size of the device, e.g.,
523 when mounting a device. */
525 "attempt to access beyond end of device\n");
526 printk(KERN_INFO
"%s: rw=%d, want=%d, limit=%d\n",
527 kdevname(bh
->b_rdev
), rw
,
529 blk_size
[major
][MINOR(bh
->b_rdev
)]);
534 rw_ahead
= 0; /* normal case; gets changed below for READA */
538 rw
= READ
; /* drop into READ */
540 if (buffer_uptodate(bh
)) /* Hmmph! Already have it */
543 max_req
= NR_REQUEST
; /* reads take precedence */
547 goto do_write
; /* Skip the buffer refile */
549 if (!test_and_clear_bit(BH_Dirty
, &bh
->b_state
))
550 goto end_io
; /* Hmmph! Nothing to write */
554 * We don't allow the write-requests to fill up the
555 * queue completely: we want some room for reads,
556 * as they take precedence. The last third of the
557 * requests are only for reads.
560 max_req
= (NR_REQUEST
* 2) / 3;
567 /* We'd better have a real physical mapping!
568 Check this bit only if the buffer was dirty and just locked
569 down by us so at this point flushpage will block and
570 won't clear the mapped bit under us. */
571 if (!buffer_mapped(bh
))
575 * Temporary solution - in 2.5 this will be done by the lowlevel
576 * driver. Create a bounce buffer if the buffer data points into
577 * high memory - keep the original buffer otherwise.
580 bh
= create_bounce(rw
, bh
);
583 /* look for a free request. */
585 * Loop uses two requests, 1 for loop and 1 for the real device.
586 * Cut max_req in half to avoid running out and deadlocking.
588 if ((major
== LOOP_MAJOR
) || (major
== NBD_MAJOR
))
592 * Try to coalesce the new request with old requests
594 max_sectors
= get_max_sectors(bh
->b_rdev
);
596 elevator
= &q
->elevator
;
597 orig_latency
= elevator_request_latency(elevator
, rw
);
600 * Now we acquire the request spinlock, we have to be mega careful
601 * not to schedule or do something nonatomic
603 spin_lock_irqsave(&io_request_lock
,flags
);
604 elevator_debug(q
, bh
->b_rdev
);
606 if (list_empty(head
)) {
607 q
->plug_device_fn(q
, bh
->b_rdev
); /* is atomic */
611 /* avoid write-bombs to not hurt iteractiveness of reads */
612 if (rw
!= READ
&& elevator
->read_pendings
)
613 max_segments
= elevator
->max_bomb_segments
;
615 sequence
= elevator
->sequence
;
616 latency
= orig_latency
- elevator
->nr_segments
;
621 * The scsi disk and cdrom drivers completely remove the request
622 * from the queue when they start processing an entry. For this
623 * reason it is safe to continue to add links to the top entry
626 * All other drivers need to jump over the first entry, as that
627 * entry may be busy being processed and we thus can't change
630 if (q
->head_active
&& !q
->plugged
)
633 while ((entry
= entry
->prev
) != head
&& !starving
) {
634 req
= blkdev_entry_to_request(entry
);
637 latency
+= req
->nr_segments
;
638 if (elevator_sequence_before(req
->elevator_sequence
, sequence
))
647 if (req
->nr_sectors
+ count
> max_sectors
)
649 if (req
->rq_dev
!= bh
->b_rdev
)
651 /* Can we add it to the end of this request? */
652 if (req
->sector
+ req
->nr_sectors
== sector
) {
653 if (latency
- req
->nr_segments
< 0)
656 * The merge_fn is a more advanced way
657 * of accomplishing the same task. Instead
658 * of applying a fixed limit of some sort
659 * we instead define a function which can
660 * determine whether or not it is safe to
661 * merge the request or not.
663 * See if this queue has rules that
664 * may suggest that we shouldn't merge
667 if(!(q
->back_merge_fn
)(q
, req
, bh
, max_segments
))
669 req
->bhtail
->b_reqnext
= bh
;
671 req
->nr_sectors
= req
->hard_nr_sectors
+= count
;
672 drive_stat_acct(req
->rq_dev
, req
->cmd
, count
, 0);
674 elevator_merge_after(elevator
, req
, latency
);
676 /* Can we now merge this req with the next? */
677 attempt_back_merge(q
, req
, max_sectors
, max_segments
);
678 /* or to the beginning? */
679 } else if (req
->sector
- count
== sector
) {
683 * The merge_fn is a more advanced way
684 * of accomplishing the same task. Instead
685 * of applying a fixed limit of some sort
686 * we instead define a function which can
687 * determine whether or not it is safe to
688 * merge the request or not.
690 * See if this queue has rules that
691 * may suggest that we shouldn't merge
694 if(!(q
->front_merge_fn
)(q
, req
, bh
, max_segments
))
696 bh
->b_reqnext
= req
->bh
;
698 req
->buffer
= bh
->b_data
;
699 req
->current_nr_sectors
= count
;
700 req
->sector
= req
->hard_sector
= sector
;
701 req
->nr_sectors
= req
->hard_nr_sectors
+= count
;
702 drive_stat_acct(req
->rq_dev
, req
->cmd
, count
, 0);
704 elevator_merge_before(elevator
, req
, latency
);
706 attempt_front_merge(q
, head
, req
, max_sectors
, max_segments
);
710 q
->elevator
.sequence
++;
711 spin_unlock_irqrestore(&io_request_lock
,flags
);
716 /* find an unused request. */
718 req
= get_request(max_req
, bh
->b_rdev
);
721 * if no request available: if rw_ahead, forget it,
722 * otherwise try again blocking..
725 spin_unlock_irqrestore(&io_request_lock
,flags
);
728 req
= __get_request_wait(max_req
, bh
->b_rdev
);
729 spin_lock_irqsave(&io_request_lock
,flags
);
731 /* revalidate elevator */
732 head
= &q
->queue_head
;
733 if (q
->head_active
&& !q
->plugged
)
737 /* fill up the request-info, and add it to the queue */
740 req
->hard_sector
= req
->sector
= sector
;
741 req
->hard_nr_sectors
= req
->nr_sectors
= count
;
742 req
->current_nr_sectors
= count
;
743 req
->nr_segments
= 1; /* Always 1 for a new request. */
744 req
->nr_hw_segments
= 1; /* Always 1 for a new request. */
745 req
->buffer
= bh
->b_data
;
750 add_request(q
, req
, head
, orig_latency
);
751 elevator_account_request(elevator
, req
);
753 spin_unlock_irqrestore(&io_request_lock
, flags
);
757 bh
->b_end_io(bh
, test_bit(BH_Uptodate
, &bh
->b_state
));
760 int generic_make_request (request_queue_t
*q
, int rw
, struct buffer_head
* bh
)
766 * Resolve the mapping until finished. (drivers are
767 * still free to implement/resolve their own stacking
768 * by explicitly returning 0)
771 while (q
->make_request_fn
) {
772 ret
= q
->make_request_fn(q
, rw
, bh
);
774 q
= blk_get_queue(bh
->b_rdev
);
780 * Does the block device want us to queue
781 * the IO request? (normal case)
783 __make_request(q
, rw
, bh
);
784 spin_lock_irqsave(&io_request_lock
,flags
);
785 if (q
&& !q
->plugged
)
787 spin_unlock_irqrestore(&io_request_lock
,flags
);
792 /* This function can be used to request a number of buffers from a block
793 device. Currently the only restriction is that all buffers must belong to
796 static void __ll_rw_block(int rw
, int nr
, struct buffer_head
* bhs
[],
799 struct buffer_head
*bh
;
805 major
= MAJOR(bhs
[0]->b_dev
);
806 q
= blk_get_queue(bhs
[0]->b_dev
);
809 "ll_rw_block: Trying to read nonexistent block-device %s (%ld)\n",
810 kdevname(bhs
[0]->b_dev
), bhs
[0]->b_blocknr
);
814 /* Determine correct block size for this device. */
815 correct_size
= BLOCK_SIZE
;
816 if (blksize_size
[major
]) {
817 i
= blksize_size
[major
][MINOR(bhs
[0]->b_dev
)];
822 /* Verify requested block sizes. */
823 for (i
= 0; i
< nr
; i
++) {
825 if (bh
->b_size
!= correct_size
) {
826 printk(KERN_NOTICE
"ll_rw_block: device %s: "
827 "only %d-char blocks implemented (%u)\n",
828 kdevname(bhs
[0]->b_dev
),
829 correct_size
, bh
->b_size
);
834 if ((rw
& WRITE
) && is_read_only(bhs
[0]->b_dev
)) {
835 printk(KERN_NOTICE
"Can't write to read-only device %s\n",
836 kdevname(bhs
[0]->b_dev
));
840 for (i
= 0; i
< nr
; i
++) {
843 /* Only one thread can actually submit the I/O. */
845 if (!buffer_locked(bh
))
848 if (test_and_set_bit(BH_Lock
, &bh
->b_state
))
851 set_bit(BH_Req
, &bh
->b_state
);
854 * First step, 'identity mapping' - RAID or LVM might
855 * further remap this.
857 bh
->b_rdev
= bh
->b_dev
;
858 bh
->b_rsector
= bh
->b_blocknr
* (bh
->b_size
>>9);
860 generic_make_request(q
, rw
, bh
);
865 for (i
= 0; i
< nr
; i
++)
866 buffer_IO_error(bhs
[i
]);
870 void ll_rw_block(int rw
, int nr
, struct buffer_head
* bh
[])
872 __ll_rw_block(rw
, nr
, bh
, 0);
875 void ll_rw_block_locked(int rw
, int nr
, struct buffer_head
* bh
[])
877 __ll_rw_block(rw
, nr
, bh
, 1);
880 #ifdef CONFIG_STRAM_SWAP
881 extern int stram_device_init (void);
885 * First step of what used to be end_request
887 * 0 means continue with end_that_request_last,
888 * 1 means we are done
891 int end_that_request_first (struct request
*req
, int uptodate
, char *name
)
893 struct buffer_head
* bh
;
898 printk("end_request: I/O error, dev %s (%s), sector %lu\n",
899 kdevname(req
->rq_dev
), name
, req
->sector
);
901 if ((bh
= req
->bh
) != NULL
) {
902 nsect
= bh
->b_size
>> 9;
903 req
->bh
= bh
->b_reqnext
;
904 bh
->b_reqnext
= NULL
;
905 bh
->b_end_io(bh
, uptodate
);
906 if ((bh
= req
->bh
) != NULL
) {
907 req
->hard_sector
+= nsect
;
908 req
->hard_nr_sectors
-= nsect
;
909 req
->sector
= req
->hard_sector
;
910 req
->nr_sectors
= req
->hard_nr_sectors
;
912 req
->current_nr_sectors
= bh
->b_size
>> 9;
913 if (req
->nr_sectors
< req
->current_nr_sectors
) {
914 req
->nr_sectors
= req
->current_nr_sectors
;
915 printk("end_request: buffer-list destroyed\n");
917 req
->buffer
= bh
->b_data
;
924 void end_that_request_last(struct request
*req
)
928 if (req
->sem
!= NULL
)
930 req
->rq_status
= RQ_INACTIVE
;
931 wake_up(&wait_for_request
);
934 int __init
blk_dev_init(void)
936 struct request
* req
;
937 struct blk_dev_struct
*dev
;
939 for (dev
= blk_dev
+ MAX_BLKDEV
; dev
-- != blk_dev
;) {
941 blk_init_queue(&dev
->request_queue
, NULL
);
944 req
= all_requests
+ NR_REQUEST
;
945 while (--req
>= all_requests
) {
946 req
->rq_status
= RQ_INACTIVE
;
948 memset(ro_bits
,0,sizeof(ro_bits
));
949 memset(max_readahead
, 0, sizeof(max_readahead
));
950 memset(max_sectors
, 0, sizeof(max_sectors
));
951 #ifdef CONFIG_AMIGA_Z2RAM
954 #ifdef CONFIG_STRAM_SWAP
957 #ifdef CONFIG_BLK_DEV_RAM
960 #ifdef CONFIG_BLK_DEV_LOOP
963 #ifdef CONFIG_ISP16_CDI
965 #endif CONFIG_ISP16_CDI
966 #if defined(CONFIG_IDE) && defined(CONFIG_BLK_DEV_IDE)
967 ide_init(); /* this MUST precede hd_init */
969 #if defined(CONFIG_IDE) && defined(CONFIG_BLK_DEV_HD)
972 #ifdef CONFIG_BLK_DEV_PS2
975 #ifdef CONFIG_BLK_DEV_XD
978 #ifdef CONFIG_BLK_DEV_MFM
982 { extern void paride_init(void); paride_init(); };
984 #ifdef CONFIG_MAC_FLOPPY
987 #ifdef CONFIG_BLK_DEV_SWIM_IOP
990 #ifdef CONFIG_AMIGA_FLOPPY
993 #ifdef CONFIG_ATARI_FLOPPY
996 #ifdef CONFIG_BLK_DEV_FD
999 #if defined(__i386__) /* Do we even need this? */
1003 #ifdef CONFIG_CDU31A
1005 #endif CONFIG_CDU31A
1006 #ifdef CONFIG_ATARI_ACSI
1008 #endif CONFIG_ATARI_ACSI
1021 #ifdef CONFIG_CDU535
1023 #endif CONFIG_CDU535
1036 #ifdef CONFIG_BLK_DEV_MD
1038 #endif CONFIG_BLK_DEV_MD
1039 #ifdef CONFIG_APBLOCK
1045 #ifdef CONFIG_BLK_DEV_NBD
1054 #ifdef CONFIG_SUN_JSFLASH
1057 #ifdef CONFIG_BLK_DEV_LVM
1063 EXPORT_SYMBOL(io_request_lock
);
1064 EXPORT_SYMBOL(end_that_request_first
);
1065 EXPORT_SYMBOL(end_that_request_last
);
1066 EXPORT_SYMBOL(blk_init_queue
);
1067 EXPORT_SYMBOL(blk_get_queue
);
1068 EXPORT_SYMBOL(blk_cleanup_queue
);
1069 EXPORT_SYMBOL(blk_queue_headactive
);
1070 EXPORT_SYMBOL(blk_queue_pluggable
);
1071 EXPORT_SYMBOL(blk_queue_make_request
);
1072 EXPORT_SYMBOL(generic_make_request
);