2 * linux/drivers/block/ll_rw_blk.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
6 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
7 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
8 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> - July2000
9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
13 * This handles all read/write requests to block devices
15 #include <linux/config.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/backing-dev.h>
19 #include <linux/bio.h>
20 #include <linux/blkdev.h>
21 #include <linux/highmem.h>
23 #include <linux/kernel_stat.h>
24 #include <linux/string.h>
25 #include <linux/init.h>
26 #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
27 #include <linux/completion.h>
28 #include <linux/slab.h>
29 #include <linux/swap.h>
31 static void blk_unplug_work(void *data
);
32 static void blk_unplug_timeout(unsigned long data
);
35 * For the allocated request tables
37 static kmem_cache_t
*request_cachep
;
42 static LIST_HEAD(blk_plug_list
);
43 static spinlock_t blk_plug_lock __cacheline_aligned_in_smp
= SPIN_LOCK_UNLOCKED
;
45 static wait_queue_head_t congestion_wqh
[2];
48 * Controlling structure to kblockd
50 static struct workqueue_struct
*kblockd_workqueue
;
52 unsigned long blk_max_low_pfn
, blk_max_pfn
;
54 /* Amount of time in which a process may batch requests */
55 #define BLK_BATCH_TIME (HZ/50UL)
57 /* Number of requests a "batching" process may submit */
58 #define BLK_BATCH_REQ 32
61 * Return the threshold (number of used requests) at which the queue is
62 * considered to be congested. It include a little hysteresis to keep the
63 * context switch rate down.
65 static inline int queue_congestion_on_threshold(struct request_queue
*q
)
69 ret
= q
->nr_requests
- (q
->nr_requests
/ 8) + 1;
71 if (ret
> q
->nr_requests
)
78 * The threshold at which a queue is considered to be uncongested
80 static inline int queue_congestion_off_threshold(struct request_queue
*q
)
84 ret
= q
->nr_requests
- (q
->nr_requests
/ 8) - 1;
93 * A queue has just exitted congestion. Note this in the global counter of
94 * congested queues, and wake up anyone who was waiting for requests to be
97 static void clear_queue_congested(request_queue_t
*q
, int rw
)
100 wait_queue_head_t
*wqh
= &congestion_wqh
[rw
];
102 bit
= (rw
== WRITE
) ? BDI_write_congested
: BDI_read_congested
;
103 clear_bit(bit
, &q
->backing_dev_info
.state
);
104 if (waitqueue_active(wqh
))
109 * A queue has just entered congestion. Flag that in the queue's VM-visible
110 * state flags and increment the global gounter of congested queues.
112 static void set_queue_congested(request_queue_t
*q
, int rw
)
116 bit
= (rw
== WRITE
) ? BDI_write_congested
: BDI_read_congested
;
117 set_bit(bit
, &q
->backing_dev_info
.state
);
121 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
124 * Locates the passed device's request queue and returns the address of its
127 * Will return NULL if the request queue cannot be located.
129 struct backing_dev_info
*blk_get_backing_dev_info(struct block_device
*bdev
)
131 struct backing_dev_info
*ret
= NULL
;
132 request_queue_t
*q
= bdev_get_queue(bdev
);
135 ret
= &q
->backing_dev_info
;
139 void blk_queue_activity_fn(request_queue_t
*q
, activity_fn
*fn
, void *data
)
142 q
->activity_data
= data
;
146 * blk_queue_prep_rq - set a prepare_request function for queue
148 * @pfn: prepare_request function
150 * It's possible for a queue to register a prepare_request callback which
151 * is invoked before the request is handed to the request_fn. The goal of
152 * the function is to prepare a request for I/O, it can be used to build a
153 * cdb from the request data for instance.
156 void blk_queue_prep_rq(request_queue_t
*q
, prep_rq_fn
*pfn
)
162 * blk_queue_merge_bvec - set a merge_bvec function for queue
164 * @mbfn: merge_bvec_fn
166 * Usually queues have static limitations on the max sectors or segments that
167 * we can put in a request. Stacking drivers may have some settings that
168 * are dynamic, and thus we have to query the queue whether it is ok to
169 * add a new bio_vec to a bio at a given offset or not. If the block device
170 * has such limitations, it needs to register a merge_bvec_fn to control
171 * the size of bio's sent to it. Per default now merge_bvec_fn is defined for
172 * a queue, and only the fixed limits are honored.
175 void blk_queue_merge_bvec(request_queue_t
*q
, merge_bvec_fn
*mbfn
)
177 q
->merge_bvec_fn
= mbfn
;
181 * blk_queue_make_request - define an alternate make_request function for a device
182 * @q: the request queue for the device to be affected
183 * @mfn: the alternate make_request function
186 * The normal way for &struct bios to be passed to a device
187 * driver is for them to be collected into requests on a request
188 * queue, and then to allow the device driver to select requests
189 * off that queue when it is ready. This works well for many block
190 * devices. However some block devices (typically virtual devices
191 * such as md or lvm) do not benefit from the processing on the
192 * request queue, and are served best by having the requests passed
193 * directly to them. This can be achieved by providing a function
194 * to blk_queue_make_request().
197 * The driver that does this *must* be able to deal appropriately
198 * with buffers in "highmemory". This can be accomplished by either calling
199 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
200 * blk_queue_bounce() to create a buffer in normal memory.
202 void blk_queue_make_request(request_queue_t
* q
, make_request_fn
* mfn
)
207 q
->nr_requests
= BLKDEV_MAX_RQ
;
208 q
->max_phys_segments
= MAX_PHYS_SEGMENTS
;
209 q
->max_hw_segments
= MAX_HW_SEGMENTS
;
210 q
->make_request_fn
= mfn
;
211 q
->backing_dev_info
.ra_pages
= (VM_MAX_READAHEAD
* 1024) / PAGE_CACHE_SIZE
;
212 q
->backing_dev_info
.state
= 0;
213 q
->backing_dev_info
.memory_backed
= 0;
214 blk_queue_max_sectors(q
, MAX_SECTORS
);
215 blk_queue_hardsect_size(q
, 512);
216 blk_queue_dma_alignment(q
, 511);
218 q
->unplug_thresh
= 4; /* hmm */
219 q
->unplug_delay
= (3 * HZ
) / 1000; /* 3 milliseconds */
220 if (q
->unplug_delay
== 0)
223 INIT_WORK(&q
->unplug_work
, blk_unplug_work
, q
);
225 q
->unplug_timer
.function
= blk_unplug_timeout
;
226 q
->unplug_timer
.data
= (unsigned long)q
;
229 * by default assume old behaviour and bounce for any highmem page
231 blk_queue_bounce_limit(q
, BLK_BOUNCE_HIGH
);
233 INIT_LIST_HEAD(&q
->plug_list
);
235 blk_queue_activity_fn(q
, NULL
, NULL
);
239 * blk_queue_bounce_limit - set bounce buffer limit for queue
240 * @q: the request queue for the device
241 * @dma_addr: bus address limit
244 * Different hardware can have different requirements as to what pages
245 * it can do I/O directly to. A low level driver can call
246 * blk_queue_bounce_limit to have lower memory pages allocated as bounce
247 * buffers for doing I/O to pages residing above @page. By default
248 * the block layer sets this to the highest numbered "low" memory page.
250 void blk_queue_bounce_limit(request_queue_t
*q
, u64 dma_addr
)
252 unsigned long bounce_pfn
= dma_addr
>> PAGE_SHIFT
;
253 unsigned long mb
= dma_addr
>> 20;
254 static request_queue_t
*last_q
;
257 * set appropriate bounce gfp mask -- unfortunately we don't have a
258 * full 4GB zone, so we have to resort to low memory for any bounces.
259 * ISA has its own < 16MB zone.
261 if (bounce_pfn
< blk_max_low_pfn
) {
262 BUG_ON(dma_addr
< BLK_BOUNCE_ISA
);
263 init_emergency_isa_pool();
264 q
->bounce_gfp
= GFP_NOIO
| GFP_DMA
;
266 q
->bounce_gfp
= GFP_NOIO
;
269 * keep this for debugging for now...
271 if (dma_addr
!= BLK_BOUNCE_HIGH
&& q
!= last_q
) {
272 printk("blk: queue %p, ", q
);
273 if (dma_addr
== BLK_BOUNCE_ANY
)
274 printk("no I/O memory limit\n");
276 printk("I/O limit %luMb (mask 0x%Lx)\n", mb
, (long long) dma_addr
);
279 q
->bounce_pfn
= bounce_pfn
;
285 * blk_queue_max_sectors - set max sectors for a request for this queue
286 * @q: the request queue for the device
287 * @max_sectors: max sectors in the usual 512b unit
290 * Enables a low level driver to set an upper limit on the size of
293 void blk_queue_max_sectors(request_queue_t
*q
, unsigned short max_sectors
)
295 if ((max_sectors
<< 9) < PAGE_CACHE_SIZE
) {
296 max_sectors
= 1 << (PAGE_CACHE_SHIFT
- 9);
297 printk("%s: set to minimum %d\n", __FUNCTION__
, max_sectors
);
300 q
->max_sectors
= max_sectors
;
304 * blk_queue_max_phys_segments - set max phys segments for a request for this queue
305 * @q: the request queue for the device
306 * @max_segments: max number of segments
309 * Enables a low level driver to set an upper limit on the number of
310 * physical data segments in a request. This would be the largest sized
311 * scatter list the driver could handle.
313 void blk_queue_max_phys_segments(request_queue_t
*q
, unsigned short max_segments
)
317 printk("%s: set to minimum %d\n", __FUNCTION__
, max_segments
);
320 q
->max_phys_segments
= max_segments
;
324 * blk_queue_max_hw_segments - set max hw segments for a request for this queue
325 * @q: the request queue for the device
326 * @max_segments: max number of segments
329 * Enables a low level driver to set an upper limit on the number of
330 * hw data segments in a request. This would be the largest number of
331 * address/length pairs the host adapter can actually give as once
334 void blk_queue_max_hw_segments(request_queue_t
*q
, unsigned short max_segments
)
338 printk("%s: set to minimum %d\n", __FUNCTION__
, max_segments
);
341 q
->max_hw_segments
= max_segments
;
345 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
346 * @q: the request queue for the device
347 * @max_size: max size of segment in bytes
350 * Enables a low level driver to set an upper limit on the size of a
353 void blk_queue_max_segment_size(request_queue_t
*q
, unsigned int max_size
)
355 if (max_size
< PAGE_CACHE_SIZE
) {
356 max_size
= PAGE_CACHE_SIZE
;
357 printk("%s: set to minimum %d\n", __FUNCTION__
, max_size
);
360 q
->max_segment_size
= max_size
;
364 * blk_queue_hardsect_size - set hardware sector size for the queue
365 * @q: the request queue for the device
366 * @size: the hardware sector size, in bytes
369 * This should typically be set to the lowest possible sector size
370 * that the hardware can operate on (possible without reverting to
371 * even internal read-modify-write operations). Usually the default
372 * of 512 covers most hardware.
374 void blk_queue_hardsect_size(request_queue_t
*q
, unsigned short size
)
376 q
->hardsect_size
= size
;
380 * Returns the minimum that is _not_ zero, unless both are zero.
382 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
385 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
386 * @t: the stacking driver (top)
387 * @b: the underlying device (bottom)
389 void blk_queue_stack_limits(request_queue_t
*t
, request_queue_t
*b
)
391 /* zero is "infinity" */
392 t
->max_sectors
= min_not_zero(t
->max_sectors
,b
->max_sectors
);
394 t
->max_phys_segments
= min(t
->max_phys_segments
,b
->max_phys_segments
);
395 t
->max_hw_segments
= min(t
->max_hw_segments
,b
->max_hw_segments
);
396 t
->max_segment_size
= min(t
->max_segment_size
,b
->max_segment_size
);
397 t
->hardsect_size
= max(t
->hardsect_size
,b
->hardsect_size
);
401 * blk_queue_segment_boundary - set boundary rules for segment merging
402 * @q: the request queue for the device
403 * @mask: the memory boundary mask
405 void blk_queue_segment_boundary(request_queue_t
*q
, unsigned long mask
)
407 if (mask
< PAGE_CACHE_SIZE
- 1) {
408 mask
= PAGE_CACHE_SIZE
- 1;
409 printk("%s: set to minimum %lx\n", __FUNCTION__
, mask
);
412 q
->seg_boundary_mask
= mask
;
416 * blk_queue_dma_alignment - set dma length and memory alignment
417 * @q: the request queue for the device
418 * @mask: alignment mask
421 * set required memory and length aligment for direct dma transactions.
422 * this is used when buiding direct io requests for the queue.
425 void blk_queue_dma_alignment(request_queue_t
*q
, int mask
)
427 q
->dma_alignment
= mask
;
431 * blk_queue_find_tag - find a request by its tag and queue
433 * @q: The request queue for the device
434 * @tag: The tag of the request
437 * Should be used when a device returns a tag and you want to match
440 * no locks need be held.
442 struct request
*blk_queue_find_tag(request_queue_t
*q
, int tag
)
444 struct blk_queue_tag
*bqt
= q
->queue_tags
;
446 if (unlikely(bqt
== NULL
|| tag
>= bqt
->real_max_depth
))
449 return bqt
->tag_index
[tag
];
453 * blk_queue_free_tags - release tag maintenance info
454 * @q: the request queue for the device
457 * blk_cleanup_queue() will take care of calling this function, if tagging
458 * has been used. So there's usually no need to call this directly, unless
459 * tagging is just being disabled but the queue remains in function.
461 void blk_queue_free_tags(request_queue_t
*q
)
463 struct blk_queue_tag
*bqt
= q
->queue_tags
;
468 if (atomic_dec_and_test(&bqt
->refcnt
)) {
470 BUG_ON(!list_empty(&bqt
->busy_list
));
472 kfree(bqt
->tag_index
);
473 bqt
->tag_index
= NULL
;
481 q
->queue_tags
= NULL
;
482 q
->queue_flags
&= ~(1 << QUEUE_FLAG_QUEUED
);
486 init_tag_map(request_queue_t
*q
, struct blk_queue_tag
*tags
, int depth
)
490 if (depth
> q
->nr_requests
* 2) {
491 depth
= q
->nr_requests
* 2;
492 printk(KERN_ERR
"%s: adjusted depth to %d\n",
493 __FUNCTION__
, depth
);
496 tags
->tag_index
= kmalloc(depth
* sizeof(struct request
*), GFP_ATOMIC
);
497 if (!tags
->tag_index
)
500 bits
= (depth
/ BLK_TAGS_PER_LONG
) + 1;
501 tags
->tag_map
= kmalloc(bits
* sizeof(unsigned long), GFP_ATOMIC
);
505 memset(tags
->tag_index
, 0, depth
* sizeof(struct request
*));
506 memset(tags
->tag_map
, 0, bits
* sizeof(unsigned long));
507 tags
->max_depth
= depth
;
508 tags
->real_max_depth
= bits
* BITS_PER_LONG
;
511 * set the upper bits if the depth isn't a multiple of the word size
513 for (i
= depth
; i
< bits
* BLK_TAGS_PER_LONG
; i
++)
514 __set_bit(i
, tags
->tag_map
);
516 INIT_LIST_HEAD(&tags
->busy_list
);
518 atomic_set(&tags
->refcnt
, 1);
521 kfree(tags
->tag_index
);
526 * blk_queue_init_tags - initialize the queue tag info
527 * @q: the request queue for the device
528 * @depth: the maximum queue depth supported
530 int blk_queue_init_tags(request_queue_t
*q
, int depth
,
531 struct blk_queue_tag
*tags
)
534 tags
= kmalloc(sizeof(struct blk_queue_tag
), GFP_ATOMIC
);
538 if (init_tag_map(q
, tags
, depth
))
541 atomic_inc(&tags
->refcnt
);
544 * assign it, all done
546 q
->queue_tags
= tags
;
547 q
->queue_flags
|= (1 << QUEUE_FLAG_QUEUED
);
555 * blk_queue_resize_tags - change the queueing depth
556 * @q: the request queue for the device
557 * @new_depth: the new max command queueing depth
560 * Must be called with the queue lock held.
562 int blk_queue_resize_tags(request_queue_t
*q
, int new_depth
)
564 struct blk_queue_tag
*bqt
= q
->queue_tags
;
565 struct request
**tag_index
;
566 unsigned long *tag_map
;
573 * don't bother sizing down
575 if (new_depth
<= bqt
->real_max_depth
) {
576 bqt
->max_depth
= new_depth
;
581 * save the old state info, so we can copy it back
583 tag_index
= bqt
->tag_index
;
584 tag_map
= bqt
->tag_map
;
585 max_depth
= bqt
->real_max_depth
;
587 if (init_tag_map(q
, bqt
, new_depth
))
590 memcpy(bqt
->tag_index
, tag_index
, max_depth
* sizeof(struct request
*));
591 bits
= max_depth
/ BLK_TAGS_PER_LONG
;
592 memcpy(bqt
->tag_map
, tag_map
, bits
* sizeof(unsigned long));
600 * blk_queue_end_tag - end tag operations for a request
601 * @q: the request queue for the device
602 * @rq: the request that has completed
605 * Typically called when end_that_request_first() returns 0, meaning
606 * all transfers have been done for a request. It's important to call
607 * this function before end_that_request_last(), as that will put the
608 * request back on the free list thus corrupting the internal tag list.
611 * queue lock must be held.
613 void blk_queue_end_tag(request_queue_t
*q
, struct request
*rq
)
615 struct blk_queue_tag
*bqt
= q
->queue_tags
;
620 if (unlikely(tag
>= bqt
->real_max_depth
))
623 if (unlikely(!__test_and_clear_bit(tag
, bqt
->tag_map
))) {
624 printk("attempt to clear non-busy tag (%d)\n", tag
);
628 list_del_init(&rq
->queuelist
);
629 rq
->flags
&= ~REQ_QUEUED
;
632 if (unlikely(bqt
->tag_index
[tag
] == NULL
))
633 printk("tag %d is missing\n", tag
);
635 bqt
->tag_index
[tag
] = NULL
;
640 * blk_queue_start_tag - find a free tag and assign it
641 * @q: the request queue for the device
642 * @rq: the block request that needs tagging
645 * This can either be used as a stand-alone helper, or possibly be
646 * assigned as the queue &prep_rq_fn (in which case &struct request
647 * automagically gets a tag assigned). Note that this function
648 * assumes that any type of request can be queued! if this is not
649 * true for your device, you must check the request type before
650 * calling this function. The request will also be removed from
651 * the request queue, so it's the drivers responsibility to readd
652 * it if it should need to be restarted for some reason.
655 * queue lock must be held.
657 int blk_queue_start_tag(request_queue_t
*q
, struct request
*rq
)
659 struct blk_queue_tag
*bqt
= q
->queue_tags
;
660 unsigned long *map
= bqt
->tag_map
;
663 if (unlikely((rq
->flags
& REQ_QUEUED
))) {
665 "request %p for device [%s] already tagged %d",
666 rq
, rq
->rq_disk
? rq
->rq_disk
->disk_name
: "?", rq
->tag
);
670 for (map
= bqt
->tag_map
; *map
== -1UL; map
++) {
671 tag
+= BLK_TAGS_PER_LONG
;
673 if (tag
>= bqt
->max_depth
)
678 __set_bit(tag
, bqt
->tag_map
);
680 rq
->flags
|= REQ_QUEUED
;
682 bqt
->tag_index
[tag
] = rq
;
683 blkdev_dequeue_request(rq
);
684 list_add(&rq
->queuelist
, &bqt
->busy_list
);
690 * blk_queue_invalidate_tags - invalidate all pending tags
691 * @q: the request queue for the device
694 * Hardware conditions may dictate a need to stop all pending requests.
695 * In this case, we will safely clear the block side of the tag queue and
696 * readd all requests to the request queue in the right order.
699 * queue lock must be held.
701 void blk_queue_invalidate_tags(request_queue_t
*q
)
703 struct blk_queue_tag
*bqt
= q
->queue_tags
;
704 struct list_head
*tmp
, *n
;
707 list_for_each_safe(tmp
, n
, &bqt
->busy_list
) {
708 rq
= list_entry_rq(tmp
);
711 printk("bad tag found on list\n");
712 list_del_init(&rq
->queuelist
);
713 rq
->flags
&= ~REQ_QUEUED
;
715 blk_queue_end_tag(q
, rq
);
717 rq
->flags
&= ~REQ_STARTED
;
718 __elv_add_request(q
, rq
, ELEVATOR_INSERT_BACK
, 0);
722 static char *rq_flags
[] = {
740 "REQ_DRIVE_TASKFILE",
747 void blk_dump_rq_flags(struct request
*rq
, char *msg
)
751 printk("%s: dev %s: flags = ", msg
,
752 rq
->rq_disk
? rq
->rq_disk
->disk_name
: "?");
755 if (rq
->flags
& (1 << bit
))
756 printk("%s ", rq_flags
[bit
]);
758 } while (bit
< __REQ_NR_BITS
);
760 printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq
->sector
,
762 rq
->current_nr_sectors
);
763 printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq
->bio
, rq
->biotail
, rq
->buffer
, rq
->data
, rq
->data_len
);
765 if (rq
->flags
& (REQ_BLOCK_PC
| REQ_PC
)) {
767 for (bit
= 0; bit
< sizeof(rq
->cmd
); bit
++)
768 printk("%02x ", rq
->cmd
[bit
]);
773 void blk_recount_segments(request_queue_t
*q
, struct bio
*bio
)
775 struct bio_vec
*bv
, *bvprv
= NULL
;
776 int i
, nr_phys_segs
, nr_hw_segs
, seg_size
, cluster
;
778 if (unlikely(!bio
->bi_io_vec
))
781 cluster
= q
->queue_flags
& (1 << QUEUE_FLAG_CLUSTER
);
782 seg_size
= nr_phys_segs
= nr_hw_segs
= 0;
783 bio_for_each_segment(bv
, bio
, i
) {
784 if (bvprv
&& cluster
) {
785 if (seg_size
+ bv
->bv_len
> q
->max_segment_size
)
787 if (!BIOVEC_PHYS_MERGEABLE(bvprv
, bv
))
789 if (!BIOVEC_SEG_BOUNDARY(q
, bvprv
, bv
))
792 seg_size
+= bv
->bv_len
;
797 if (!bvprv
|| !BIOVEC_VIRT_MERGEABLE(bvprv
, bv
))
802 seg_size
= bv
->bv_len
;
805 bio
->bi_phys_segments
= nr_phys_segs
;
806 bio
->bi_hw_segments
= nr_hw_segs
;
807 bio
->bi_flags
|= (1 << BIO_SEG_VALID
);
811 int blk_phys_contig_segment(request_queue_t
*q
, struct bio
*bio
,
814 if (!(q
->queue_flags
& (1 << QUEUE_FLAG_CLUSTER
)))
817 if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio
), __BVEC_START(nxt
)))
819 if (bio
->bi_size
+ nxt
->bi_size
> q
->max_segment_size
)
823 * bio and nxt are contigous in memory, check if the queue allows
824 * these two to be merged into one
826 if (BIO_SEG_BOUNDARY(q
, bio
, nxt
))
832 int blk_hw_contig_segment(request_queue_t
*q
, struct bio
*bio
,
835 if (!(q
->queue_flags
& (1 << QUEUE_FLAG_CLUSTER
)))
838 if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio
), __BVEC_START(nxt
)))
840 if (bio
->bi_size
+ nxt
->bi_size
> q
->max_segment_size
)
844 * bio and nxt are contigous in memory, check if the queue allows
845 * these two to be merged into one
847 if (BIO_SEG_BOUNDARY(q
, bio
, nxt
))
854 * map a request to scatterlist, return number of sg entries setup. Caller
855 * must make sure sg can hold rq->nr_phys_segments entries
857 int blk_rq_map_sg(request_queue_t
*q
, struct request
*rq
, struct scatterlist
*sg
)
859 struct bio_vec
*bvec
, *bvprv
;
861 int nsegs
, i
, cluster
;
864 cluster
= q
->queue_flags
& (1 << QUEUE_FLAG_CLUSTER
);
870 rq_for_each_bio(bio
, rq
) {
872 * for each segment in bio
874 bio_for_each_segment(bvec
, bio
, i
) {
875 int nbytes
= bvec
->bv_len
;
877 if (bvprv
&& cluster
) {
878 if (sg
[nsegs
- 1].length
+ nbytes
> q
->max_segment_size
)
881 if (!BIOVEC_PHYS_MERGEABLE(bvprv
, bvec
))
883 if (!BIOVEC_SEG_BOUNDARY(q
, bvprv
, bvec
))
886 sg
[nsegs
- 1].length
+= nbytes
;
889 memset(&sg
[nsegs
],0,sizeof(struct scatterlist
));
890 sg
[nsegs
].page
= bvec
->bv_page
;
891 sg
[nsegs
].length
= nbytes
;
892 sg
[nsegs
].offset
= bvec
->bv_offset
;
897 } /* segments in bio */
904 * the standard queue merge functions, can be overridden with device
905 * specific ones if so desired
908 static inline int ll_new_mergeable(request_queue_t
*q
,
912 int nr_phys_segs
= bio_phys_segments(q
, bio
);
914 if (req
->nr_phys_segments
+ nr_phys_segs
> q
->max_phys_segments
) {
915 req
->flags
|= REQ_NOMERGE
;
916 q
->last_merge
= NULL
;
921 * A hw segment is just getting larger, bump just the phys
924 req
->nr_phys_segments
+= nr_phys_segs
;
928 static inline int ll_new_hw_segment(request_queue_t
*q
,
932 int nr_hw_segs
= bio_hw_segments(q
, bio
);
933 int nr_phys_segs
= bio_phys_segments(q
, bio
);
935 if (req
->nr_hw_segments
+ nr_hw_segs
> q
->max_hw_segments
936 || req
->nr_phys_segments
+ nr_phys_segs
> q
->max_phys_segments
) {
937 req
->flags
|= REQ_NOMERGE
;
938 q
->last_merge
= NULL
;
943 * This will form the start of a new hw segment. Bump both
946 req
->nr_hw_segments
+= nr_hw_segs
;
947 req
->nr_phys_segments
+= nr_phys_segs
;
951 static int ll_back_merge_fn(request_queue_t
*q
, struct request
*req
,
954 if (req
->nr_sectors
+ bio_sectors(bio
) > q
->max_sectors
) {
955 req
->flags
|= REQ_NOMERGE
;
956 q
->last_merge
= NULL
;
960 if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req
->biotail
), __BVEC_START(bio
)))
961 return ll_new_mergeable(q
, req
, bio
);
963 return ll_new_hw_segment(q
, req
, bio
);
966 static int ll_front_merge_fn(request_queue_t
*q
, struct request
*req
,
969 if (req
->nr_sectors
+ bio_sectors(bio
) > q
->max_sectors
) {
970 req
->flags
|= REQ_NOMERGE
;
971 q
->last_merge
= NULL
;
975 if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio
), __BVEC_START(req
->bio
)))
976 return ll_new_mergeable(q
, req
, bio
);
978 return ll_new_hw_segment(q
, req
, bio
);
981 static int ll_merge_requests_fn(request_queue_t
*q
, struct request
*req
,
982 struct request
*next
)
984 int total_phys_segments
= req
->nr_phys_segments
+next
->nr_phys_segments
;
985 int total_hw_segments
= req
->nr_hw_segments
+ next
->nr_hw_segments
;
988 * First check if the either of the requests are re-queued
989 * requests. Can't merge them if they are.
991 if (req
->special
|| next
->special
)
995 * Will it become to large?
997 if ((req
->nr_sectors
+ next
->nr_sectors
) > q
->max_sectors
)
1000 total_phys_segments
= req
->nr_phys_segments
+ next
->nr_phys_segments
;
1001 if (blk_phys_contig_segment(q
, req
->biotail
, next
->bio
))
1002 total_phys_segments
--;
1004 if (total_phys_segments
> q
->max_phys_segments
)
1007 total_hw_segments
= req
->nr_hw_segments
+ next
->nr_hw_segments
;
1008 if (blk_hw_contig_segment(q
, req
->biotail
, next
->bio
))
1009 total_hw_segments
--;
1011 if (total_hw_segments
> q
->max_hw_segments
)
1014 /* Merge is OK... */
1015 req
->nr_phys_segments
= total_phys_segments
;
1016 req
->nr_hw_segments
= total_hw_segments
;
1021 * "plug" the device if there are no outstanding requests: this will
1022 * force the transfer to start only after we have put all the requests
1025 * This is called with interrupts off and no requests on the queue and
1026 * with the queue lock held.
1028 void blk_plug_device(request_queue_t
*q
)
1030 WARN_ON(!irqs_disabled());
1033 * don't plug a stopped queue, it must be paired with blk_start_queue()
1034 * which will restart the queueing
1036 if (!blk_queue_plugged(q
)
1037 && !test_bit(QUEUE_FLAG_STOPPED
, &q
->queue_flags
)) {
1038 spin_lock(&blk_plug_lock
);
1039 list_add_tail(&q
->plug_list
, &blk_plug_list
);
1040 mod_timer(&q
->unplug_timer
, jiffies
+ q
->unplug_delay
);
1041 spin_unlock(&blk_plug_lock
);
1046 * remove the queue from the plugged list, if present. called with
1047 * queue lock held and interrupts disabled.
1049 int blk_remove_plug(request_queue_t
*q
)
1051 WARN_ON(!irqs_disabled());
1052 if (blk_queue_plugged(q
)) {
1053 spin_lock(&blk_plug_lock
);
1054 list_del_init(&q
->plug_list
);
1055 del_timer(&q
->unplug_timer
);
1056 spin_unlock(&blk_plug_lock
);
1064 * remove the plug and let it rip..
1066 static inline void __generic_unplug_device(request_queue_t
*q
)
1068 if (test_bit(QUEUE_FLAG_STOPPED
, &q
->queue_flags
))
1071 if (!blk_remove_plug(q
))
1074 del_timer(&q
->unplug_timer
);
1077 * was plugged, fire request_fn if queue has stuff to do
1079 if (elv_next_request(q
))
1084 * generic_unplug_device - fire a request queue
1085 * @data: The &request_queue_t in question
1088 * Linux uses plugging to build bigger requests queues before letting
1089 * the device have at them. If a queue is plugged, the I/O scheduler
1090 * is still adding and merging requests on the queue. Once the queue
1091 * gets unplugged (either by manually calling this function, or by
1092 * calling blk_run_queues()), the request_fn defined for the
1093 * queue is invoked and transfers started.
1095 void generic_unplug_device(void *data
)
1097 request_queue_t
*q
= data
;
1099 spin_lock_irq(q
->queue_lock
);
1100 __generic_unplug_device(q
);
1101 spin_unlock_irq(q
->queue_lock
);
1104 static void blk_unplug_work(void *data
)
1106 request_queue_t
*q
= data
;
1110 static void blk_unplug_timeout(unsigned long data
)
1112 request_queue_t
*q
= (request_queue_t
*)data
;
1114 kblockd_schedule_work(&q
->unplug_work
);
1118 * blk_start_queue - restart a previously stopped queue
1119 * @q: The &request_queue_t in question
1122 * blk_start_queue() will clear the stop flag on the queue, and call
1123 * the request_fn for the queue if it was in a stopped state when
1124 * entered. Also see blk_stop_queue(). Must not be called from driver
1125 * request function due to recursion issues. Queue lock must be held.
1127 void blk_start_queue(request_queue_t
*q
)
1129 clear_bit(QUEUE_FLAG_STOPPED
, &q
->queue_flags
);
1130 schedule_work(&q
->unplug_work
);
1134 * blk_stop_queue - stop a queue
1135 * @q: The &request_queue_t in question
1138 * The Linux block layer assumes that a block driver will consume all
1139 * entries on the request queue when the request_fn strategy is called.
1140 * Often this will not happen, because of hardware limitations (queue
1141 * depth settings). If a device driver gets a 'queue full' response,
1142 * or if it simply chooses not to queue more I/O at one point, it can
1143 * call this function to prevent the request_fn from being called until
1144 * the driver has signalled it's ready to go again. This happens by calling
1145 * blk_start_queue() to restart queue operations. Queue lock must be held.
1147 void blk_stop_queue(request_queue_t
*q
)
1150 set_bit(QUEUE_FLAG_STOPPED
, &q
->queue_flags
);
1154 * blk_run_queue - run a single device queue
1155 * @q: The queue to run
1157 void blk_run_queue(struct request_queue
*q
)
1159 unsigned long flags
;
1161 spin_lock_irqsave(q
->queue_lock
, flags
);
1164 spin_unlock_irqrestore(q
->queue_lock
, flags
);
1168 * blk_run_queues - fire all plugged queues
1171 * Start I/O on all plugged queues known to the block layer. Queues that
1172 * are currently stopped are ignored. This is equivalent to the older
1173 * tq_disk task queue run.
1175 #define blk_plug_entry(entry) list_entry((entry), request_queue_t, plug_list)
1176 void blk_run_queues(void)
1178 LIST_HEAD(local_plug_list
);
1180 spin_lock_irq(&blk_plug_lock
);
1183 * this will happen fairly often
1185 if (list_empty(&blk_plug_list
))
1188 list_splice_init(&blk_plug_list
, &local_plug_list
);
1190 while (!list_empty(&local_plug_list
)) {
1191 request_queue_t
*q
= blk_plug_entry(local_plug_list
.next
);
1193 spin_unlock_irq(&blk_plug_lock
);
1195 spin_lock_irq(&blk_plug_lock
);
1198 spin_unlock_irq(&blk_plug_lock
);
1202 * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
1203 * @q: the request queue to be released
1206 * blk_cleanup_queue is the pair to blk_init_queue() or
1207 * blk_queue_make_request(). It should be called when a request queue is
1208 * being released; typically when a block device is being de-registered.
1209 * Currently, its primary task it to free all the &struct request
1210 * structures that were allocated to the queue and the queue itself.
1213 * Hopefully the low level driver will have finished any
1214 * outstanding requests first...
1216 void blk_cleanup_queue(request_queue_t
* q
)
1218 struct request_list
*rl
= &q
->rq
;
1220 if (!atomic_dec_and_test(&q
->refcnt
))
1225 del_timer_sync(&q
->unplug_timer
);
1229 mempool_destroy(rl
->rq_pool
);
1231 if (blk_queue_tagged(q
))
1232 blk_queue_free_tags(q
);
1237 static int blk_init_free_list(request_queue_t
*q
)
1239 struct request_list
*rl
= &q
->rq
;
1241 rl
->count
[READ
] = rl
->count
[WRITE
] = 0;
1242 init_waitqueue_head(&rl
->wait
[READ
]);
1243 init_waitqueue_head(&rl
->wait
[WRITE
]);
1245 rl
->rq_pool
= mempool_create(BLKDEV_MIN_RQ
, mempool_alloc_slab
, mempool_free_slab
, request_cachep
);
1253 static int __make_request(request_queue_t
*, struct bio
*);
1255 static elevator_t
*chosen_elevator
=
1256 #if defined(CONFIG_IOSCHED_AS)
1258 #elif defined(CONFIG_IOSCHED_DEADLINE)
1260 #elif defined(CONFIG_IOSCHED_NOOP)
1264 #error "You must have at least 1 I/O scheduler selected"
1267 #if defined(CONFIG_IOSCHED_AS) || defined(CONFIG_IOSCHED_DEADLINE) || defined (CONFIG_IOSCHED_NOOP)
1268 static int __init
elevator_setup(char *str
)
1270 #ifdef CONFIG_IOSCHED_DEADLINE
1271 if (!strcmp(str
, "deadline"))
1272 chosen_elevator
= &iosched_deadline
;
1274 #ifdef CONFIG_IOSCHED_AS
1275 if (!strcmp(str
, "as"))
1276 chosen_elevator
= &iosched_as
;
1278 #ifdef CONFIG_IOSCHED_NOOP
1279 if (!strcmp(str
, "noop"))
1280 chosen_elevator
= &elevator_noop
;
1285 __setup("elevator=", elevator_setup
);
1286 #endif /* CONFIG_IOSCHED_AS || CONFIG_IOSCHED_DEADLINE || CONFIG_IOSCHED_NOOP */
1288 request_queue_t
*blk_alloc_queue(int gfp_mask
)
1290 request_queue_t
*q
= kmalloc(sizeof(*q
), gfp_mask
);
1295 memset(q
, 0, sizeof(*q
));
1296 init_timer(&q
->unplug_timer
);
1297 atomic_set(&q
->refcnt
, 1);
1302 * blk_init_queue - prepare a request queue for use with a block device
1303 * @rfn: The function to be called to process requests that have been
1304 * placed on the queue.
1305 * @lock: Request queue spin lock
1308 * If a block device wishes to use the standard request handling procedures,
1309 * which sorts requests and coalesces adjacent requests, then it must
1310 * call blk_init_queue(). The function @rfn will be called when there
1311 * are requests on the queue that need to be processed. If the device
1312 * supports plugging, then @rfn may not be called immediately when requests
1313 * are available on the queue, but may be called at some time later instead.
1314 * Plugged queues are generally unplugged when a buffer belonging to one
1315 * of the requests on the queue is needed, or due to memory pressure.
1317 * @rfn is not required, or even expected, to remove all requests off the
1318 * queue, but only as many as it can handle at a time. If it does leave
1319 * requests on the queue, it is responsible for arranging that the requests
1320 * get dealt with eventually.
1322 * The queue spin lock must be held while manipulating the requests on the
1325 * Function returns a pointer to the initialized request queue, or NULL if
1326 * it didn't succeed.
1329 * blk_init_queue() must be paired with a blk_cleanup_queue() call
1330 * when the block device is deactivated (such as at module unload).
1332 request_queue_t
*blk_init_queue(request_fn_proc
*rfn
, spinlock_t
*lock
)
1337 q
= blk_alloc_queue(GFP_KERNEL
);
1341 if (blk_init_free_list(q
))
1346 printk("Using %s io scheduler\n", chosen_elevator
->elevator_name
);
1349 if (elevator_init(q
, chosen_elevator
))
1352 q
->request_fn
= rfn
;
1353 q
->back_merge_fn
= ll_back_merge_fn
;
1354 q
->front_merge_fn
= ll_front_merge_fn
;
1355 q
->merge_requests_fn
= ll_merge_requests_fn
;
1356 q
->prep_rq_fn
= NULL
;
1357 q
->unplug_fn
= generic_unplug_device
;
1358 q
->queue_flags
= (1 << QUEUE_FLAG_CLUSTER
);
1359 q
->queue_lock
= lock
;
1361 blk_queue_segment_boundary(q
, 0xffffffff);
1363 blk_queue_make_request(q
, __make_request
);
1364 blk_queue_max_segment_size(q
, MAX_SEGMENT_SIZE
);
1366 blk_queue_max_hw_segments(q
, MAX_HW_SEGMENTS
);
1367 blk_queue_max_phys_segments(q
, MAX_PHYS_SEGMENTS
);
1371 blk_cleanup_queue(q
);
1378 int blk_get_queue(request_queue_t
*q
)
1380 if (!test_bit(QUEUE_FLAG_DEAD
, &q
->queue_flags
)) {
1381 atomic_inc(&q
->refcnt
);
1388 static inline void blk_free_request(request_queue_t
*q
, struct request
*rq
)
1390 elv_put_request(q
, rq
);
1391 mempool_free(rq
, q
->rq
.rq_pool
);
1394 static inline struct request
*blk_alloc_request(request_queue_t
*q
,int gfp_mask
)
1396 struct request
*rq
= mempool_alloc(q
->rq
.rq_pool
, gfp_mask
);
1401 if (!elv_set_request(q
, rq
, gfp_mask
))
1404 mempool_free(rq
, q
->rq
.rq_pool
);
1409 * ioc_batching returns true if the ioc is a valid batching request and
1410 * should be given priority access to a request.
1412 static inline int ioc_batching(struct io_context
*ioc
)
1418 * Make sure the process is able to allocate at least 1 request
1419 * even if the batch times out, otherwise we could theoretically
1422 return ioc
->nr_batch_requests
== BLK_BATCH_REQ
||
1423 (ioc
->nr_batch_requests
> 0
1424 && time_before(jiffies
, ioc
->last_waited
+ BLK_BATCH_TIME
));
1428 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
1429 * will cause the process to be a "batcher" on all queues in the system. This
1430 * is the behaviour we want though - once it gets a wakeup it should be given
1433 void ioc_set_batching(struct io_context
*ioc
)
1435 if (!ioc
|| ioc_batching(ioc
))
1438 ioc
->nr_batch_requests
= BLK_BATCH_REQ
;
1439 ioc
->last_waited
= jiffies
;
1443 * A request has just been released. Account for it, update the full and
1444 * congestion status, wake up any waiters. Called under q->queue_lock.
1446 static void freed_request(request_queue_t
*q
, int rw
)
1448 struct request_list
*rl
= &q
->rq
;
1451 if (rl
->count
[rw
] < queue_congestion_off_threshold(q
))
1452 clear_queue_congested(q
, rw
);
1453 if (rl
->count
[rw
]+1 <= q
->nr_requests
) {
1455 if (waitqueue_active(&rl
->wait
[rw
]))
1456 wake_up(&rl
->wait
[rw
]);
1457 if (!waitqueue_active(&rl
->wait
[rw
]))
1458 blk_clear_queue_full(q
, rw
);
1462 #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
1464 * Get a free request, queue_lock must not be held
1466 static struct request
*get_request(request_queue_t
*q
, int rw
, int gfp_mask
)
1468 struct request
*rq
= NULL
;
1469 struct request_list
*rl
= &q
->rq
;
1470 struct io_context
*ioc
= get_io_context(gfp_mask
);
1472 spin_lock_irq(q
->queue_lock
);
1473 if (rl
->count
[rw
]+1 >= q
->nr_requests
) {
1475 * The queue will fill after this allocation, so set it as
1476 * full, and mark this process as "batching". This process
1477 * will be allowed to complete a batch of requests, others
1480 if (!blk_queue_full(q
, rw
)) {
1481 ioc_set_batching(ioc
);
1482 blk_set_queue_full(q
, rw
);
1486 if (blk_queue_full(q
, rw
)
1487 && !ioc_batching(ioc
) && !elv_may_queue(q
, rw
)) {
1489 * The queue is full and the allocating process is not a
1490 * "batcher", and not exempted by the IO scheduler
1492 spin_unlock_irq(q
->queue_lock
);
1497 if (rl
->count
[rw
] >= queue_congestion_on_threshold(q
))
1498 set_queue_congested(q
, rw
);
1499 spin_unlock_irq(q
->queue_lock
);
1501 rq
= blk_alloc_request(q
, gfp_mask
);
1504 * Allocation failed presumably due to memory. Undo anything
1505 * we might have messed up.
1507 * Allocating task should really be put onto the front of the
1508 * wait queue, but this is pretty rare.
1510 spin_lock_irq(q
->queue_lock
);
1511 freed_request(q
, rw
);
1512 spin_unlock_irq(q
->queue_lock
);
1516 if (ioc_batching(ioc
))
1517 ioc
->nr_batch_requests
--;
1519 INIT_LIST_HEAD(&rq
->queuelist
);
1522 * first three bits are identical in rq->flags and bio->bi_rw,
1523 * see bio.h and blkdev.h
1528 rq
->rq_status
= RQ_ACTIVE
;
1529 rq
->bio
= rq
->biotail
= NULL
;
1540 put_io_context(ioc
);
1545 * No available requests for this queue, unplug the device and wait for some
1546 * requests to become available.
1548 static struct request
*get_request_wait(request_queue_t
*q
, int rw
)
1553 generic_unplug_device(q
);
1555 struct request_list
*rl
= &q
->rq
;
1557 prepare_to_wait_exclusive(&rl
->wait
[rw
], &wait
,
1558 TASK_UNINTERRUPTIBLE
);
1560 rq
= get_request(q
, rw
, GFP_NOIO
);
1563 struct io_context
*ioc
;
1568 * After sleeping, we become a "batching" process and
1569 * will be able to allocate at least one request, and
1570 * up to a big batch of them for a small period time.
1571 * See ioc_batching, ioc_set_batching
1573 ioc
= get_io_context(GFP_NOIO
);
1574 ioc_set_batching(ioc
);
1575 put_io_context(ioc
);
1577 finish_wait(&rl
->wait
[rw
], &wait
);
1583 struct request
*blk_get_request(request_queue_t
*q
, int rw
, int gfp_mask
)
1587 BUG_ON(rw
!= READ
&& rw
!= WRITE
);
1589 if (gfp_mask
& __GFP_WAIT
)
1590 rq
= get_request_wait(q
, rw
);
1592 rq
= get_request(q
, rw
, gfp_mask
);
1597 * blk_requeue_request - put a request back on queue
1598 * @q: request queue where request should be inserted
1599 * @rq: request to be inserted
1602 * Drivers often keep queueing requests until the hardware cannot accept
1603 * more, when that condition happens we need to put the request back
1604 * on the queue. Must be called with queue lock held.
1606 void blk_requeue_request(request_queue_t
*q
, struct request
*rq
)
1608 if (blk_rq_tagged(rq
))
1609 blk_queue_end_tag(q
, rq
);
1611 elv_requeue_request(q
, rq
);
1615 * blk_insert_request - insert a special request in to a request queue
1616 * @q: request queue where request should be inserted
1617 * @rq: request to be inserted
1618 * @at_head: insert request at head or tail of queue
1619 * @data: private data
1620 * @reinsert: true if request it a reinsertion of previously processed one
1623 * Many block devices need to execute commands asynchronously, so they don't
1624 * block the whole kernel from preemption during request execution. This is
1625 * accomplished normally by inserting aritficial requests tagged as
1626 * REQ_SPECIAL in to the corresponding request queue, and letting them be
1627 * scheduled for actual execution by the request queue.
1629 * We have the option of inserting the head or the tail of the queue.
1630 * Typically we use the tail for new ioctls and so forth. We use the head
1631 * of the queue for things like a QUEUE_FULL message from a device, or a
1632 * host that is unable to accept a particular command.
1634 void blk_insert_request(request_queue_t
*q
, struct request
*rq
,
1635 int at_head
, void *data
, int reinsert
)
1637 unsigned long flags
;
1640 * tell I/O scheduler that this isn't a regular read/write (ie it
1641 * must not attempt merges on this) and that it acts as a soft
1644 rq
->flags
|= REQ_SPECIAL
| REQ_SOFTBARRIER
;
1648 spin_lock_irqsave(q
->queue_lock
, flags
);
1651 * If command is tagged, release the tag
1654 blk_requeue_request(q
, rq
);
1656 int where
= ELEVATOR_INSERT_BACK
;
1659 where
= ELEVATOR_INSERT_FRONT
;
1661 if (blk_rq_tagged(rq
))
1662 blk_queue_end_tag(q
, rq
);
1664 drive_stat_acct(rq
, rq
->nr_sectors
, 1);
1665 __elv_add_request(q
, rq
, where
, 0);
1668 spin_unlock_irqrestore(q
->queue_lock
, flags
);
1671 void drive_stat_acct(struct request
*rq
, int nr_sectors
, int new_io
)
1673 int rw
= rq_data_dir(rq
);
1675 if (!blk_fs_request(rq
) || !rq
->rq_disk
)
1679 disk_stat_add(rq
->rq_disk
, read_sectors
, nr_sectors
);
1681 disk_stat_inc(rq
->rq_disk
, read_merges
);
1682 } else if (rw
== WRITE
) {
1683 disk_stat_add(rq
->rq_disk
, write_sectors
, nr_sectors
);
1685 disk_stat_inc(rq
->rq_disk
, write_merges
);
1688 disk_round_stats(rq
->rq_disk
);
1689 rq
->rq_disk
->in_flight
++;
1694 * add-request adds a request to the linked list.
1695 * queue lock is held and interrupts disabled, as we muck with the
1696 * request queue list.
1698 static inline void add_request(request_queue_t
* q
, struct request
* req
)
1700 drive_stat_acct(req
, req
->nr_sectors
, 1);
1703 q
->activity_fn(q
->activity_data
, rq_data_dir(req
));
1706 * elevator indicated where it wants this request to be
1707 * inserted at elevator_merge time
1709 __elv_add_request(q
, req
, ELEVATOR_INSERT_SORT
, 0);
1713 * disk_round_stats() - Round off the performance stats on a struct
1716 * The average IO queue length and utilisation statistics are maintained
1717 * by observing the current state of the queue length and the amount of
1718 * time it has been in this state for.
1720 * Normally, that accounting is done on IO completion, but that can result
1721 * in more than a second's worth of IO being accounted for within any one
1722 * second, leading to >100% utilisation. To deal with that, we call this
1723 * function to do a round-off before returning the results when reading
1724 * /proc/diskstats. This accounts immediately for all queue usage up to
1725 * the current jiffies and restarts the counters again.
1727 void disk_round_stats(struct gendisk
*disk
)
1729 unsigned long now
= jiffies
;
1731 disk_stat_add(disk
, time_in_queue
,
1732 disk
->in_flight
* (now
- disk
->stamp
));
1735 if (disk
->in_flight
)
1736 disk_stat_add(disk
, io_ticks
, (now
- disk
->stamp_idle
));
1737 disk
->stamp_idle
= now
;
1741 * queue lock must be held
1743 void __blk_put_request(request_queue_t
*q
, struct request
*req
)
1745 struct request_list
*rl
= req
->rl
;
1749 if (unlikely(--req
->ref_count
))
1752 elv_completed_request(req
->q
, req
);
1754 req
->rq_status
= RQ_INACTIVE
;
1759 * Request may not have originated from ll_rw_blk. if not,
1760 * it didn't come out of our reserved rq pools
1763 int rw
= rq_data_dir(req
);
1765 BUG_ON(!list_empty(&req
->queuelist
));
1767 blk_free_request(q
, req
);
1768 freed_request(q
, rw
);
1772 void blk_put_request(struct request
*req
)
1774 request_queue_t
*q
= req
->q
;
1777 * if req->q isn't set, this request didnt originate from the
1778 * block layer, so it's safe to just disregard it
1781 unsigned long flags
;
1783 spin_lock_irqsave(q
->queue_lock
, flags
);
1784 __blk_put_request(q
, req
);
1785 spin_unlock_irqrestore(q
->queue_lock
, flags
);
1790 * blk_congestion_wait - wait for a queue to become uncongested
1791 * @rw: READ or WRITE
1792 * @timeout: timeout in jiffies
1794 * Waits for up to @timeout jiffies for a queue (any queue) to exit congestion.
1795 * If no queues are congested then just wait for the next request to be
1798 void blk_congestion_wait(int rw
, long timeout
)
1801 wait_queue_head_t
*wqh
= &congestion_wqh
[rw
];
1804 prepare_to_wait(wqh
, &wait
, TASK_UNINTERRUPTIBLE
);
1805 io_schedule_timeout(timeout
);
1806 finish_wait(wqh
, &wait
);
1810 * Has to be called with the request spinlock acquired
1812 static int attempt_merge(request_queue_t
*q
, struct request
*req
,
1813 struct request
*next
)
1815 if (!rq_mergeable(req
) || !rq_mergeable(next
))
1821 if (req
->sector
+ req
->nr_sectors
!= next
->sector
)
1824 if (rq_data_dir(req
) != rq_data_dir(next
)
1825 || req
->rq_disk
!= next
->rq_disk
1826 || next
->waiting
|| next
->special
)
1830 * If we are allowed to merge, then append bio list
1831 * from next to rq and release next. merge_requests_fn
1832 * will have updated segment counts, update sector
1835 if (!q
->merge_requests_fn(q
, req
, next
))
1838 req
->biotail
->bi_next
= next
->bio
;
1839 req
->biotail
= next
->biotail
;
1841 req
->nr_sectors
= req
->hard_nr_sectors
+= next
->hard_nr_sectors
;
1843 elv_merge_requests(q
, req
, next
);
1846 disk_round_stats(req
->rq_disk
);
1847 req
->rq_disk
->in_flight
--;
1850 __blk_put_request(q
, next
);
1854 static inline int attempt_back_merge(request_queue_t
*q
, struct request
*rq
)
1856 struct request
*next
= elv_latter_request(q
, rq
);
1859 return attempt_merge(q
, rq
, next
);
1864 static inline int attempt_front_merge(request_queue_t
*q
, struct request
*rq
)
1866 struct request
*prev
= elv_former_request(q
, rq
);
1869 return attempt_merge(q
, prev
, rq
);
1875 * blk_attempt_remerge - attempt to remerge active head with next request
1876 * @q: The &request_queue_t belonging to the device
1877 * @rq: The head request (usually)
1880 * For head-active devices, the queue can easily be unplugged so quickly
1881 * that proper merging is not done on the front request. This may hurt
1882 * performance greatly for some devices. The block layer cannot safely
1883 * do merging on that first request for these queues, but the driver can
1884 * call this function and make it happen any way. Only the driver knows
1885 * when it is safe to do so.
1887 void blk_attempt_remerge(request_queue_t
*q
, struct request
*rq
)
1889 unsigned long flags
;
1891 spin_lock_irqsave(q
->queue_lock
, flags
);
1892 attempt_back_merge(q
, rq
);
1893 spin_unlock_irqrestore(q
->queue_lock
, flags
);
1897 * Non-locking blk_attempt_remerge variant.
1899 void __blk_attempt_remerge(request_queue_t
*q
, struct request
*rq
)
1901 attempt_back_merge(q
, rq
);
1904 static int __make_request(request_queue_t
*q
, struct bio
*bio
)
1906 struct request
*req
, *freereq
= NULL
;
1907 int el_ret
, rw
, nr_sectors
, cur_nr_sectors
, barrier
, ra
;
1910 sector
= bio
->bi_sector
;
1911 nr_sectors
= bio_sectors(bio
);
1912 cur_nr_sectors
= bio_cur_sectors(bio
);
1914 rw
= bio_data_dir(bio
);
1917 * low level driver can indicate that it wants pages above a
1918 * certain limit bounced to low memory (ie for highmem, or even
1919 * ISA dma in theory)
1921 blk_queue_bounce(q
, &bio
);
1923 spin_lock_prefetch(q
->queue_lock
);
1925 barrier
= test_bit(BIO_RW_BARRIER
, &bio
->bi_rw
);
1927 ra
= bio
->bi_rw
& (1 << BIO_RW_AHEAD
);
1930 spin_lock_irq(q
->queue_lock
);
1932 if (elv_queue_empty(q
)) {
1939 el_ret
= elv_merge(q
, &req
, bio
);
1941 case ELEVATOR_BACK_MERGE
:
1942 BUG_ON(!rq_mergeable(req
));
1944 if (!q
->back_merge_fn(q
, req
, bio
))
1947 req
->biotail
->bi_next
= bio
;
1949 req
->nr_sectors
= req
->hard_nr_sectors
+= nr_sectors
;
1950 drive_stat_acct(req
, nr_sectors
, 0);
1951 if (!attempt_back_merge(q
, req
))
1952 elv_merged_request(q
, req
);
1955 case ELEVATOR_FRONT_MERGE
:
1956 BUG_ON(!rq_mergeable(req
));
1958 if (!q
->front_merge_fn(q
, req
, bio
))
1961 bio
->bi_next
= req
->bio
;
1962 req
->cbio
= req
->bio
= bio
;
1963 req
->nr_cbio_segments
= bio_segments(bio
);
1964 req
->nr_cbio_sectors
= bio_sectors(bio
);
1967 * may not be valid. if the low level driver said
1968 * it didn't need a bounce buffer then it better
1969 * not touch req->buffer either...
1971 req
->buffer
= bio_data(bio
);
1972 req
->current_nr_sectors
= cur_nr_sectors
;
1973 req
->hard_cur_sectors
= cur_nr_sectors
;
1974 req
->sector
= req
->hard_sector
= sector
;
1975 req
->nr_sectors
= req
->hard_nr_sectors
+= nr_sectors
;
1976 drive_stat_acct(req
, nr_sectors
, 0);
1977 if (!attempt_front_merge(q
, req
))
1978 elv_merged_request(q
, req
);
1982 * elevator says don't/can't merge. get new request
1984 case ELEVATOR_NO_MERGE
:
1988 printk("elevator returned crap (%d)\n", el_ret
);
1993 * Grab a free request from the freelist - if that is empty, check
1994 * if we are doing read ahead and abort instead of blocking for
2002 spin_unlock_irq(q
->queue_lock
);
2003 if ((freereq
= get_request(q
, rw
, GFP_ATOMIC
)) == NULL
) {
2010 freereq
= get_request_wait(q
, rw
);
2016 * first three bits are identical in rq->flags and bio->bi_rw,
2017 * see bio.h and blkdev.h
2019 req
->flags
= (bio
->bi_rw
& 7) | REQ_CMD
;
2022 * REQ_BARRIER implies no merging, but lets make it explicit
2025 req
->flags
|= (REQ_HARDBARRIER
| REQ_NOMERGE
);
2028 * don't stack up retries for read ahead
2031 req
->flags
|= REQ_FAILFAST
;
2034 req
->hard_sector
= req
->sector
= sector
;
2035 req
->hard_nr_sectors
= req
->nr_sectors
= nr_sectors
;
2036 req
->current_nr_sectors
= req
->hard_cur_sectors
= cur_nr_sectors
;
2037 req
->nr_phys_segments
= bio_phys_segments(q
, bio
);
2038 req
->nr_hw_segments
= bio_hw_segments(q
, bio
);
2039 req
->nr_cbio_segments
= bio_segments(bio
);
2040 req
->nr_cbio_sectors
= bio_sectors(bio
);
2041 req
->buffer
= bio_data(bio
); /* see ->buffer comment above */
2042 req
->waiting
= NULL
;
2043 req
->cbio
= req
->bio
= req
->biotail
= bio
;
2044 req
->rq_disk
= bio
->bi_bdev
->bd_disk
;
2045 req
->start_time
= jiffies
;
2047 add_request(q
, req
);
2050 __blk_put_request(q
, freereq
);
2052 if (blk_queue_plugged(q
)) {
2053 int nr_queued
= q
->rq
.count
[READ
] + q
->rq
.count
[WRITE
];
2055 if (nr_queued
== q
->unplug_thresh
)
2056 __generic_unplug_device(q
);
2058 spin_unlock_irq(q
->queue_lock
);
2062 bio_endio(bio
, nr_sectors
<< 9, -EWOULDBLOCK
);
2067 * If bio->bi_dev is a partition, remap the location
2069 static inline void blk_partition_remap(struct bio
*bio
)
2071 struct block_device
*bdev
= bio
->bi_bdev
;
2073 if (bdev
!= bdev
->bd_contains
) {
2074 struct hd_struct
*p
= bdev
->bd_part
;
2076 switch (bio
->bi_rw
) {
2078 p
->read_sectors
+= bio_sectors(bio
);
2082 p
->write_sectors
+= bio_sectors(bio
);
2086 bio
->bi_sector
+= p
->start_sect
;
2087 bio
->bi_bdev
= bdev
->bd_contains
;
2092 * generic_make_request: hand a buffer to its device driver for I/O
2093 * @bio: The bio describing the location in memory and on the device.
2095 * generic_make_request() is used to make I/O requests of block
2096 * devices. It is passed a &struct bio, which describes the I/O that needs
2099 * generic_make_request() does not return any status. The
2100 * success/failure status of the request, along with notification of
2101 * completion, is delivered asynchronously through the bio->bi_end_io
2102 * function described (one day) else where.
2104 * The caller of generic_make_request must make sure that bi_io_vec
2105 * are set to describe the memory buffer, and that bi_dev and bi_sector are
2106 * set to describe the device address, and the
2107 * bi_end_io and optionally bi_private are set to describe how
2108 * completion notification should be signaled.
2110 * generic_make_request and the drivers it calls may use bi_next if this
2111 * bio happens to be merged with someone else, and may change bi_dev and
2112 * bi_sector for remaps as it sees fit. So the values of these fields
2113 * should NOT be depended on after the call to generic_make_request.
2115 void generic_make_request(struct bio
*bio
)
2119 int ret
, nr_sectors
= bio_sectors(bio
);
2121 /* Test device or partition size, when known. */
2122 maxsector
= bio
->bi_bdev
->bd_inode
->i_size
>> 9;
2124 sector_t sector
= bio
->bi_sector
;
2126 if (maxsector
< nr_sectors
||
2127 maxsector
- nr_sectors
< sector
) {
2128 char b
[BDEVNAME_SIZE
];
2129 /* This may well happen - the kernel calls
2130 * bread() without checking the size of the
2131 * device, e.g., when mounting a device. */
2133 "attempt to access beyond end of device\n");
2134 printk(KERN_INFO
"%s: rw=%ld, want=%Lu, limit=%Lu\n",
2135 bdevname(bio
->bi_bdev
, b
),
2137 (unsigned long long) sector
+ nr_sectors
,
2138 (long long) maxsector
);
2140 set_bit(BIO_EOF
, &bio
->bi_flags
);
2146 * Resolve the mapping until finished. (drivers are
2147 * still free to implement/resolve their own stacking
2148 * by explicitly returning 0)
2150 * NOTE: we don't repeat the blk_size check for each new device.
2151 * Stacking drivers are expected to know what they are doing.
2154 char b
[BDEVNAME_SIZE
];
2156 q
= bdev_get_queue(bio
->bi_bdev
);
2159 "generic_make_request: Trying to access "
2160 "nonexistent block-device %s (%Lu)\n",
2161 bdevname(bio
->bi_bdev
, b
),
2162 (long long) bio
->bi_sector
);
2164 bio_endio(bio
, bio
->bi_size
, -EIO
);
2168 if (unlikely(bio_sectors(bio
) > q
->max_sectors
)) {
2169 printk("bio too big device %s (%u > %u)\n",
2170 bdevname(bio
->bi_bdev
, b
),
2176 if (test_bit(QUEUE_FLAG_DEAD
, &q
->queue_flags
))
2180 * If this device has partitions, remap block n
2181 * of partition p to block n+start(p) of the disk.
2183 blk_partition_remap(bio
);
2185 ret
= q
->make_request_fn(q
, bio
);
2190 * submit_bio: submit a bio to the block device layer for I/O
2191 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
2192 * @bio: The &struct bio which describes the I/O
2194 * submit_bio() is very similar in purpose to generic_make_request(), and
2195 * uses that function to do most of the work. Both are fairly rough
2196 * interfaces, @bio must be presetup and ready for I/O.
2199 int submit_bio(int rw
, struct bio
*bio
)
2201 int count
= bio_sectors(bio
);
2203 BIO_BUG_ON(!bio
->bi_size
);
2204 BIO_BUG_ON(!bio
->bi_io_vec
);
2207 mod_page_state(pgpgout
, count
);
2209 mod_page_state(pgpgin
, count
);
2210 generic_make_request(bio
);
2215 * blk_rq_next_segment
2216 * @rq: the request being processed
2219 * Points to the next segment in the request if the current segment
2220 * is complete. Leaves things unchanged if this segment is not over
2221 * or if no more segments are left in this request.
2223 * Meant to be used for bio traversal during I/O submission
2224 * Does not affect any I/O completions or update completion state
2225 * in the request, and does not modify any bio fields.
2227 * Decrementing rq->nr_sectors, rq->current_nr_sectors and
2228 * rq->nr_cbio_sectors as data is transferred is the caller's
2229 * responsibility and should be done before calling this routine.
2231 void blk_rq_next_segment(struct request
*rq
)
2233 if (rq
->current_nr_sectors
> 0)
2236 if (rq
->nr_cbio_sectors
> 0) {
2237 --rq
->nr_cbio_segments
;
2238 rq
->current_nr_sectors
= blk_rq_vec(rq
)->bv_len
>> 9;
2240 if ((rq
->cbio
= rq
->cbio
->bi_next
)) {
2241 rq
->nr_cbio_segments
= bio_segments(rq
->cbio
);
2242 rq
->nr_cbio_sectors
= bio_sectors(rq
->cbio
);
2243 rq
->current_nr_sectors
= bio_cur_sectors(rq
->cbio
);
2247 /* remember the size of this segment before we start I/O */
2248 rq
->hard_cur_sectors
= rq
->current_nr_sectors
;
2252 * process_that_request_first - process partial request submission
2253 * @req: the request being processed
2254 * @nr_sectors: number of sectors I/O has been submitted on
2257 * May be used for processing bio's while submitting I/O without
2258 * signalling completion. Fails if more data is requested than is
2259 * available in the request in which case it doesn't advance any
2262 * Assumes a request is correctly set up. No sanity checks.
2265 * 0 - no more data left to submit (not processed)
2266 * 1 - data available to submit for this request (processed)
2268 int process_that_request_first(struct request
*req
, unsigned int nr_sectors
)
2272 if (req
->nr_sectors
< nr_sectors
)
2275 req
->nr_sectors
-= nr_sectors
;
2276 req
->sector
+= nr_sectors
;
2277 while (nr_sectors
) {
2278 nsect
= min_t(unsigned, req
->current_nr_sectors
, nr_sectors
);
2279 req
->current_nr_sectors
-= nsect
;
2280 nr_sectors
-= nsect
;
2282 req
->nr_cbio_sectors
-= nsect
;
2283 blk_rq_next_segment(req
);
2289 void blk_recalc_rq_segments(struct request
*rq
)
2292 int nr_phys_segs
, nr_hw_segs
;
2297 nr_phys_segs
= nr_hw_segs
= 0;
2298 rq_for_each_bio(bio
, rq
) {
2299 /* Force bio hw/phys segs to be recalculated. */
2300 bio
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
2302 nr_phys_segs
+= bio_phys_segments(rq
->q
, bio
);
2303 nr_hw_segs
+= bio_hw_segments(rq
->q
, bio
);
2306 rq
->nr_phys_segments
= nr_phys_segs
;
2307 rq
->nr_hw_segments
= nr_hw_segs
;
2310 void blk_recalc_rq_sectors(struct request
*rq
, int nsect
)
2312 if (blk_fs_request(rq
)) {
2313 rq
->hard_sector
+= nsect
;
2314 rq
->hard_nr_sectors
-= nsect
;
2317 * Move the I/O submission pointers ahead if required,
2318 * i.e. for drivers not aware of rq->cbio.
2320 if ((rq
->nr_sectors
>= rq
->hard_nr_sectors
) &&
2321 (rq
->sector
<= rq
->hard_sector
)) {
2322 rq
->sector
= rq
->hard_sector
;
2323 rq
->nr_sectors
= rq
->hard_nr_sectors
;
2324 rq
->hard_cur_sectors
= bio_cur_sectors(rq
->bio
);
2325 rq
->current_nr_sectors
= rq
->hard_cur_sectors
;
2326 rq
->nr_cbio_segments
= bio_segments(rq
->bio
);
2327 rq
->nr_cbio_sectors
= bio_sectors(rq
->bio
);
2328 rq
->buffer
= bio_data(rq
->bio
);
2334 * if total number of sectors is less than the first segment
2335 * size, something has gone terribly wrong
2337 if (rq
->nr_sectors
< rq
->current_nr_sectors
) {
2338 printk("blk: request botched\n");
2339 rq
->nr_sectors
= rq
->current_nr_sectors
;
2344 static int __end_that_request_first(struct request
*req
, int uptodate
,
2347 int total_bytes
, bio_nbytes
, error
= 0, next_idx
= 0;
2351 * for a REQ_BLOCK_PC request, we want to carry any eventual
2352 * sense key with us all the way through
2354 if (!blk_pc_request(req
))
2359 if (!(req
->flags
& REQ_QUIET
))
2360 printk("end_request: I/O error, dev %s, sector %llu\n",
2361 req
->rq_disk
? req
->rq_disk
->disk_name
: "?",
2362 (unsigned long long)req
->sector
);
2365 total_bytes
= bio_nbytes
= 0;
2366 while ((bio
= req
->bio
)) {
2369 if (nr_bytes
>= bio
->bi_size
) {
2370 req
->bio
= bio
->bi_next
;
2371 nbytes
= bio
->bi_size
;
2372 bio_endio(bio
, nbytes
, error
);
2376 int idx
= bio
->bi_idx
+ next_idx
;
2378 if (unlikely(bio
->bi_idx
>= bio
->bi_vcnt
)) {
2379 blk_dump_rq_flags(req
, "__end_that");
2380 printk("%s: bio idx %d >= vcnt %d\n",
2382 bio
->bi_idx
, bio
->bi_vcnt
);
2386 nbytes
= bio_iovec_idx(bio
, idx
)->bv_len
;
2387 BIO_BUG_ON(nbytes
> bio
->bi_size
);
2390 * not a complete bvec done
2392 if (unlikely(nbytes
> nr_bytes
)) {
2393 bio_iovec_idx(bio
, idx
)->bv_offset
+= nr_bytes
;
2394 bio_iovec_idx(bio
, idx
)->bv_len
-= nr_bytes
;
2395 bio_nbytes
+= nr_bytes
;
2396 total_bytes
+= nr_bytes
;
2401 * advance to the next vector
2404 bio_nbytes
+= nbytes
;
2407 total_bytes
+= nbytes
;
2410 if ((bio
= req
->bio
)) {
2412 * end more in this run, or just return 'not-done'
2414 if (unlikely(nr_bytes
<= 0))
2426 * if the request wasn't completed, update state
2429 bio_endio(bio
, bio_nbytes
, error
);
2430 req
->bio
->bi_idx
+= next_idx
;
2433 blk_recalc_rq_sectors(req
, total_bytes
>> 9);
2434 blk_recalc_rq_segments(req
);
2439 * end_that_request_first - end I/O on a request
2440 * @req: the request being processed
2441 * @uptodate: 0 for I/O error
2442 * @nr_sectors: number of sectors to end I/O on
2445 * Ends I/O on a number of sectors attached to @req, and sets it up
2446 * for the next range of segments (if any) in the cluster.
2449 * 0 - we are done with this request, call end_that_request_last()
2450 * 1 - still buffers pending for this request
2452 int end_that_request_first(struct request
*req
, int uptodate
, int nr_sectors
)
2454 return __end_that_request_first(req
, uptodate
, nr_sectors
<< 9);
2458 * end_that_request_chunk - end I/O on a request
2459 * @req: the request being processed
2460 * @uptodate: 0 for I/O error
2461 * @nr_bytes: number of bytes to complete
2464 * Ends I/O on a number of bytes attached to @req, and sets it up
2465 * for the next range of segments (if any). Like end_that_request_first(),
2466 * but deals with bytes instead of sectors.
2469 * 0 - we are done with this request, call end_that_request_last()
2470 * 1 - still buffers pending for this request
2472 int end_that_request_chunk(struct request
*req
, int uptodate
, int nr_bytes
)
2474 return __end_that_request_first(req
, uptodate
, nr_bytes
);
2478 * queue lock must be held
2480 void end_that_request_last(struct request
*req
)
2482 struct gendisk
*disk
= req
->rq_disk
;
2483 struct completion
*waiting
= req
->waiting
;
2485 if (disk
&& blk_fs_request(req
)) {
2486 unsigned long duration
= jiffies
- req
->start_time
;
2487 switch (rq_data_dir(req
)) {
2489 disk_stat_inc(disk
, writes
);
2490 disk_stat_add(disk
, write_ticks
, duration
);
2493 disk_stat_inc(disk
, reads
);
2494 disk_stat_add(disk
, read_ticks
, duration
);
2497 disk_round_stats(disk
);
2500 __blk_put_request(req
->q
, req
);
2501 /* Do this LAST! The structure may be freed immediately afterwards */
2506 void end_request(struct request
*req
, int uptodate
)
2508 if (!end_that_request_first(req
, uptodate
, req
->hard_cur_sectors
)) {
2509 add_disk_randomness(req
->rq_disk
);
2510 blkdev_dequeue_request(req
);
2511 end_that_request_last(req
);
2515 void blk_rq_bio_prep(request_queue_t
*q
, struct request
*rq
, struct bio
*bio
)
2517 /* first three bits are identical in rq->flags and bio->bi_rw */
2518 rq
->flags
|= (bio
->bi_rw
& 7);
2520 rq
->nr_phys_segments
= bio_phys_segments(q
, bio
);
2521 rq
->nr_hw_segments
= bio_hw_segments(q
, bio
);
2522 rq
->current_nr_sectors
= bio_cur_sectors(bio
);
2523 rq
->hard_cur_sectors
= rq
->current_nr_sectors
;
2524 rq
->hard_nr_sectors
= rq
->nr_sectors
= bio_sectors(bio
);
2525 rq
->nr_cbio_segments
= bio_segments(bio
);
2526 rq
->nr_cbio_sectors
= bio_sectors(bio
);
2527 rq
->buffer
= bio_data(bio
);
2529 rq
->cbio
= rq
->bio
= rq
->biotail
= bio
;
2532 void blk_rq_prep_restart(struct request
*rq
)
2536 bio
= rq
->cbio
= rq
->bio
;
2538 rq
->nr_cbio_segments
= bio_segments(bio
);
2539 rq
->nr_cbio_sectors
= bio_sectors(bio
);
2540 rq
->hard_cur_sectors
= bio_cur_sectors(bio
);
2541 rq
->buffer
= bio_data(bio
);
2543 rq
->sector
= rq
->hard_sector
;
2544 rq
->nr_sectors
= rq
->hard_nr_sectors
;
2545 rq
->current_nr_sectors
= rq
->hard_cur_sectors
;
2548 int kblockd_schedule_work(struct work_struct
*work
)
2550 return queue_work(kblockd_workqueue
, work
);
2553 void kblockd_flush(void)
2555 flush_workqueue(kblockd_workqueue
);
2558 int __init
blk_dev_init(void)
2562 kblockd_workqueue
= create_workqueue("kblockd");
2563 if (!kblockd_workqueue
)
2564 panic("Failed to create kblockd\n");
2566 request_cachep
= kmem_cache_create("blkdev_requests",
2567 sizeof(struct request
), 0, 0, NULL
, NULL
);
2568 if (!request_cachep
)
2569 panic("Can't create request pool slab cache\n");
2571 blk_max_low_pfn
= max_low_pfn
;
2572 blk_max_pfn
= max_pfn
;
2574 for (i
= 0; i
< ARRAY_SIZE(congestion_wqh
); i
++)
2575 init_waitqueue_head(&congestion_wqh
[i
]);
2580 * IO Context helper functions
2582 void put_io_context(struct io_context
*ioc
)
2587 BUG_ON(atomic_read(&ioc
->refcount
) == 0);
2589 if (atomic_dec_and_test(&ioc
->refcount
)) {
2590 if (ioc
->aic
&& ioc
->aic
->dtor
)
2591 ioc
->aic
->dtor(ioc
->aic
);
2596 /* Called by the exitting task */
2597 void exit_io_context(void)
2599 unsigned long flags
;
2600 struct io_context
*ioc
;
2602 local_irq_save(flags
);
2603 ioc
= current
->io_context
;
2605 if (ioc
->aic
&& ioc
->aic
->exit
)
2606 ioc
->aic
->exit(ioc
->aic
);
2607 put_io_context(ioc
);
2608 current
->io_context
= NULL
;
2611 local_irq_restore(flags
);
2615 * If the current task has no IO context then create one and initialise it.
2616 * If it does have a context, take a ref on it.
2618 * This is always called in the context of the task which submitted the I/O.
2619 * But weird things happen, so we disable local interrupts to ensure exclusive
2620 * access to *current.
2622 struct io_context
*get_io_context(int gfp_flags
)
2624 struct task_struct
*tsk
= current
;
2625 unsigned long flags
;
2626 struct io_context
*ret
;
2628 local_irq_save(flags
);
2629 ret
= tsk
->io_context
;
2631 ret
= kmalloc(sizeof(*ret
), GFP_ATOMIC
);
2633 atomic_set(&ret
->refcount
, 1);
2634 ret
->pid
= tsk
->pid
;
2635 ret
->last_waited
= jiffies
; /* doesn't matter... */
2636 ret
->nr_batch_requests
= 0; /* because this is 0 */
2638 tsk
->io_context
= ret
;
2642 atomic_inc(&ret
->refcount
);
2643 local_irq_restore(flags
);
2647 void copy_io_context(struct io_context
**pdst
, struct io_context
**psrc
)
2649 struct io_context
*src
= *psrc
;
2650 struct io_context
*dst
= *pdst
;
2653 BUG_ON(atomic_read(&src
->refcount
) == 0);
2654 atomic_inc(&src
->refcount
);
2655 put_io_context(dst
);
2660 void swap_io_context(struct io_context
**ioc1
, struct io_context
**ioc2
)
2662 struct io_context
*temp
;
2672 struct queue_sysfs_entry
{
2673 struct attribute attr
;
2674 ssize_t (*show
)(struct request_queue
*, char *);
2675 ssize_t (*store
)(struct request_queue
*, const char *, size_t);
2679 queue_var_show(unsigned int var
, char *page
)
2681 return sprintf(page
, "%d\n", var
);
2685 queue_var_store(unsigned long *var
, const char *page
, size_t count
)
2687 char *p
= (char *) page
;
2689 *var
= simple_strtoul(p
, &p
, 10);
2693 static ssize_t
queue_requests_show(struct request_queue
*q
, char *page
)
2695 return queue_var_show(q
->nr_requests
, (page
));
2699 queue_requests_store(struct request_queue
*q
, const char *page
, size_t count
)
2701 struct request_list
*rl
= &q
->rq
;
2703 int ret
= queue_var_store(&q
->nr_requests
, page
, count
);
2704 if (q
->nr_requests
< BLKDEV_MIN_RQ
)
2705 q
->nr_requests
= BLKDEV_MIN_RQ
;
2707 if (rl
->count
[READ
] >= queue_congestion_on_threshold(q
))
2708 set_queue_congested(q
, READ
);
2709 else if (rl
->count
[READ
] < queue_congestion_off_threshold(q
))
2710 clear_queue_congested(q
, READ
);
2712 if (rl
->count
[WRITE
] >= queue_congestion_on_threshold(q
))
2713 set_queue_congested(q
, WRITE
);
2714 else if (rl
->count
[WRITE
] < queue_congestion_off_threshold(q
))
2715 clear_queue_congested(q
, WRITE
);
2717 if (rl
->count
[READ
] >= q
->nr_requests
) {
2718 blk_set_queue_full(q
, READ
);
2719 } else if (rl
->count
[READ
]+1 <= q
->nr_requests
) {
2720 blk_clear_queue_full(q
, READ
);
2721 wake_up(&rl
->wait
[READ
]);
2724 if (rl
->count
[WRITE
] >= q
->nr_requests
) {
2725 blk_set_queue_full(q
, WRITE
);
2726 } else if (rl
->count
[WRITE
]+1 <= q
->nr_requests
) {
2727 blk_clear_queue_full(q
, WRITE
);
2728 wake_up(&rl
->wait
[WRITE
]);
2733 static struct queue_sysfs_entry queue_requests_entry
= {
2734 .attr
= {.name
= "nr_requests", .mode
= S_IRUGO
| S_IWUSR
},
2735 .show
= queue_requests_show
,
2736 .store
= queue_requests_store
,
2739 static struct attribute
*default_attrs
[] = {
2740 &queue_requests_entry
.attr
,
2744 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
2747 queue_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
2749 struct queue_sysfs_entry
*entry
= to_queue(attr
);
2750 struct request_queue
*q
;
2752 q
= container_of(kobj
, struct request_queue
, kobj
);
2756 return entry
->show(q
, page
);
2760 queue_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
2761 const char *page
, size_t length
)
2763 struct queue_sysfs_entry
*entry
= to_queue(attr
);
2764 struct request_queue
*q
;
2766 q
= container_of(kobj
, struct request_queue
, kobj
);
2770 return entry
->store(q
, page
, length
);
2773 static struct sysfs_ops queue_sysfs_ops
= {
2774 .show
= queue_attr_show
,
2775 .store
= queue_attr_store
,
2778 struct kobj_type queue_ktype
= {
2779 .sysfs_ops
= &queue_sysfs_ops
,
2780 .default_attrs
= default_attrs
,
2783 int blk_register_queue(struct gendisk
*disk
)
2787 request_queue_t
*q
= disk
->queue
;
2792 q
->kobj
.parent
= kobject_get(&disk
->kobj
);
2793 if (!q
->kobj
.parent
)
2796 snprintf(q
->kobj
.name
, KOBJ_NAME_LEN
, "%s", "queue");
2797 q
->kobj
.ktype
= &queue_ktype
;
2799 ret
= kobject_register(&q
->kobj
);
2803 ret
= elv_register_queue(q
);
2805 kobject_unregister(&q
->kobj
);
2812 void blk_unregister_queue(struct gendisk
*disk
)
2814 request_queue_t
*q
= disk
->queue
;
2817 elv_unregister_queue(q
);
2819 kobject_unregister(&q
->kobj
);
2820 kobject_put(&disk
->kobj
);
2825 EXPORT_SYMBOL(process_that_request_first
);
2826 EXPORT_SYMBOL(end_that_request_first
);
2827 EXPORT_SYMBOL(end_that_request_chunk
);
2828 EXPORT_SYMBOL(end_that_request_last
);
2829 EXPORT_SYMBOL(end_request
);
2830 EXPORT_SYMBOL(blk_init_queue
);
2831 EXPORT_SYMBOL(blk_cleanup_queue
);
2832 EXPORT_SYMBOL(blk_get_queue
);
2833 EXPORT_SYMBOL(blk_alloc_queue
);
2834 EXPORT_SYMBOL(blk_queue_make_request
);
2835 EXPORT_SYMBOL(blk_queue_bounce_limit
);
2836 EXPORT_SYMBOL(generic_make_request
);
2837 EXPORT_SYMBOL(generic_unplug_device
);
2838 EXPORT_SYMBOL(blk_plug_device
);
2839 EXPORT_SYMBOL(blk_remove_plug
);
2840 EXPORT_SYMBOL(blk_attempt_remerge
);
2841 EXPORT_SYMBOL(__blk_attempt_remerge
);
2842 EXPORT_SYMBOL(blk_max_low_pfn
);
2843 EXPORT_SYMBOL(blk_max_pfn
);
2844 EXPORT_SYMBOL(blk_queue_max_sectors
);
2845 EXPORT_SYMBOL(blk_queue_max_phys_segments
);
2846 EXPORT_SYMBOL(blk_queue_max_hw_segments
);
2847 EXPORT_SYMBOL(blk_queue_max_segment_size
);
2848 EXPORT_SYMBOL(blk_queue_hardsect_size
);
2849 EXPORT_SYMBOL(blk_queue_stack_limits
);
2850 EXPORT_SYMBOL(blk_queue_segment_boundary
);
2851 EXPORT_SYMBOL(blk_queue_dma_alignment
);
2852 EXPORT_SYMBOL(blk_rq_map_sg
);
2853 EXPORT_SYMBOL(blk_dump_rq_flags
);
2854 EXPORT_SYMBOL(submit_bio
);
2855 EXPORT_SYMBOL(blk_phys_contig_segment
);
2856 EXPORT_SYMBOL(blk_hw_contig_segment
);
2857 EXPORT_SYMBOL(blk_get_request
);
2858 EXPORT_SYMBOL(blk_put_request
);
2859 EXPORT_SYMBOL(blk_insert_request
);
2860 EXPORT_SYMBOL(blk_requeue_request
);
2862 EXPORT_SYMBOL(blk_queue_prep_rq
);
2863 EXPORT_SYMBOL(blk_queue_merge_bvec
);
2865 EXPORT_SYMBOL(blk_queue_find_tag
);
2866 EXPORT_SYMBOL(blk_queue_init_tags
);
2867 EXPORT_SYMBOL(blk_queue_free_tags
);
2868 EXPORT_SYMBOL(blk_queue_start_tag
);
2869 EXPORT_SYMBOL(blk_queue_end_tag
);
2870 EXPORT_SYMBOL(blk_queue_invalidate_tags
);
2872 EXPORT_SYMBOL(blk_start_queue
);
2873 EXPORT_SYMBOL(blk_stop_queue
);
2874 EXPORT_SYMBOL(blk_run_queue
);
2875 EXPORT_SYMBOL(blk_run_queues
);
2877 EXPORT_SYMBOL(blk_rq_bio_prep
);
2878 EXPORT_SYMBOL(blk_rq_prep_restart
);