2 * Functions related to segment and merge handling
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
12 void blk_recalc_rq_sectors(struct request
*rq
, int nsect
)
14 if (blk_fs_request(rq
)) {
15 rq
->hard_sector
+= nsect
;
16 rq
->hard_nr_sectors
-= nsect
;
19 * Move the I/O submission pointers ahead if required.
21 if ((rq
->nr_sectors
>= rq
->hard_nr_sectors
) &&
22 (rq
->sector
<= rq
->hard_sector
)) {
23 rq
->sector
= rq
->hard_sector
;
24 rq
->nr_sectors
= rq
->hard_nr_sectors
;
25 rq
->hard_cur_sectors
= bio_cur_sectors(rq
->bio
);
26 rq
->current_nr_sectors
= rq
->hard_cur_sectors
;
27 rq
->buffer
= bio_data(rq
->bio
);
31 * if total number of sectors is less than the first segment
32 * size, something has gone terribly wrong
34 if (rq
->nr_sectors
< rq
->current_nr_sectors
) {
35 printk(KERN_ERR
"blk: request botched\n");
36 rq
->nr_sectors
= rq
->current_nr_sectors
;
41 void blk_recalc_rq_segments(struct request
*rq
)
45 unsigned int phys_size
;
47 struct bio_vec
*bv
, *bvprv
= NULL
;
51 struct req_iterator iter
;
52 int high
, highprv
= 1;
53 struct request_queue
*q
= rq
->q
;
58 cluster
= test_bit(QUEUE_FLAG_CLUSTER
, &q
->queue_flags
);
59 hw_seg_size
= seg_size
= 0;
60 phys_size
= hw_size
= nr_phys_segs
= nr_hw_segs
= 0;
61 rq_for_each_segment(bv
, rq
, iter
) {
63 * the trick here is making sure that a high page is never
64 * considered part of another segment, since that might
65 * change with the bounce page.
67 high
= page_to_pfn(bv
->bv_page
) > q
->bounce_pfn
;
71 if (seg_size
+ bv
->bv_len
> q
->max_segment_size
)
73 if (!BIOVEC_PHYS_MERGEABLE(bvprv
, bv
))
75 if (!BIOVEC_SEG_BOUNDARY(q
, bvprv
, bv
))
77 if (BIOVEC_VIRT_OVERSIZE(hw_seg_size
+ bv
->bv_len
))
80 seg_size
+= bv
->bv_len
;
81 hw_seg_size
+= bv
->bv_len
;
86 if (BIOVEC_VIRT_MERGEABLE(bvprv
, bv
) &&
87 !BIOVEC_VIRT_OVERSIZE(hw_seg_size
+ bv
->bv_len
))
88 hw_seg_size
+= bv
->bv_len
;
91 if (nr_hw_segs
== 1 &&
92 hw_seg_size
> rq
->bio
->bi_hw_front_size
)
93 rq
->bio
->bi_hw_front_size
= hw_seg_size
;
94 hw_seg_size
= BIOVEC_VIRT_START_SIZE(bv
) + bv
->bv_len
;
100 seg_size
= bv
->bv_len
;
104 if (nr_hw_segs
== 1 &&
105 hw_seg_size
> rq
->bio
->bi_hw_front_size
)
106 rq
->bio
->bi_hw_front_size
= hw_seg_size
;
107 if (hw_seg_size
> rq
->biotail
->bi_hw_back_size
)
108 rq
->biotail
->bi_hw_back_size
= hw_seg_size
;
109 rq
->nr_phys_segments
= nr_phys_segs
;
110 rq
->nr_hw_segments
= nr_hw_segs
;
113 void blk_recount_segments(struct request_queue
*q
, struct bio
*bio
)
116 struct bio
*nxt
= bio
->bi_next
;
118 rq
.bio
= rq
.biotail
= bio
;
120 blk_recalc_rq_segments(&rq
);
122 bio
->bi_phys_segments
= rq
.nr_phys_segments
;
123 bio
->bi_hw_segments
= rq
.nr_hw_segments
;
124 bio
->bi_flags
|= (1 << BIO_SEG_VALID
);
126 EXPORT_SYMBOL(blk_recount_segments
);
128 static int blk_phys_contig_segment(struct request_queue
*q
, struct bio
*bio
,
131 if (!test_bit(QUEUE_FLAG_CLUSTER
, &q
->queue_flags
))
134 if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio
), __BVEC_START(nxt
)))
136 if (bio
->bi_size
+ nxt
->bi_size
> q
->max_segment_size
)
140 * bio and nxt are contigous in memory, check if the queue allows
141 * these two to be merged into one
143 if (BIO_SEG_BOUNDARY(q
, bio
, nxt
))
149 static int blk_hw_contig_segment(struct request_queue
*q
, struct bio
*bio
,
152 if (!bio_flagged(bio
, BIO_SEG_VALID
))
153 blk_recount_segments(q
, bio
);
154 if (!bio_flagged(nxt
, BIO_SEG_VALID
))
155 blk_recount_segments(q
, nxt
);
156 if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio
), __BVEC_START(nxt
)) ||
157 BIOVEC_VIRT_OVERSIZE(bio
->bi_hw_back_size
+ nxt
->bi_hw_front_size
))
159 if (bio
->bi_hw_back_size
+ nxt
->bi_hw_front_size
> q
->max_segment_size
)
166 * map a request to scatterlist, return number of sg entries setup. Caller
167 * must make sure sg can hold rq->nr_phys_segments entries
169 int blk_rq_map_sg(struct request_queue
*q
, struct request
*rq
,
170 struct scatterlist
*sglist
)
172 struct bio_vec
*bvec
, *bvprv
;
173 struct req_iterator iter
;
174 struct scatterlist
*sg
;
178 cluster
= test_bit(QUEUE_FLAG_CLUSTER
, &q
->queue_flags
);
185 rq_for_each_segment(bvec
, rq
, iter
) {
186 int nbytes
= bvec
->bv_len
;
188 if (bvprv
&& cluster
) {
189 if (sg
->length
+ nbytes
> q
->max_segment_size
)
192 if (!BIOVEC_PHYS_MERGEABLE(bvprv
, bvec
))
194 if (!BIOVEC_SEG_BOUNDARY(q
, bvprv
, bvec
))
197 sg
->length
+= nbytes
;
204 * If the driver previously mapped a shorter
205 * list, we could see a termination bit
206 * prematurely unless it fully inits the sg
207 * table on each mapping. We KNOW that there
208 * must be more entries here or the driver
209 * would be buggy, so force clear the
210 * termination bit to avoid doing a full
211 * sg_init_table() in drivers for each command.
213 sg
->page_link
&= ~0x02;
217 sg_set_page(sg
, bvec
->bv_page
, nbytes
, bvec
->bv_offset
);
221 } /* segments in rq */
224 if (unlikely(rq
->cmd_flags
& REQ_COPY_USER
) &&
225 (rq
->data_len
& q
->dma_pad_mask
)) {
226 unsigned int pad_len
= (q
->dma_pad_mask
& ~rq
->data_len
) + 1;
228 sg
->length
+= pad_len
;
229 rq
->extra_len
+= pad_len
;
232 if (q
->dma_drain_size
&& q
->dma_drain_needed(rq
)) {
233 if (rq
->cmd_flags
& REQ_RW
)
234 memset(q
->dma_drain_buffer
, 0, q
->dma_drain_size
);
236 sg
->page_link
&= ~0x02;
238 sg_set_page(sg
, virt_to_page(q
->dma_drain_buffer
),
240 ((unsigned long)q
->dma_drain_buffer
) &
243 rq
->extra_len
+= q
->dma_drain_size
;
251 EXPORT_SYMBOL(blk_rq_map_sg
);
253 static inline int ll_new_mergeable(struct request_queue
*q
,
257 int nr_phys_segs
= bio_phys_segments(q
, bio
);
259 if (req
->nr_phys_segments
+ nr_phys_segs
> q
->max_phys_segments
) {
260 req
->cmd_flags
|= REQ_NOMERGE
;
261 if (req
== q
->last_merge
)
262 q
->last_merge
= NULL
;
267 * A hw segment is just getting larger, bump just the phys
270 req
->nr_phys_segments
+= nr_phys_segs
;
274 static inline int ll_new_hw_segment(struct request_queue
*q
,
278 int nr_hw_segs
= bio_hw_segments(q
, bio
);
279 int nr_phys_segs
= bio_phys_segments(q
, bio
);
281 if (req
->nr_hw_segments
+ nr_hw_segs
> q
->max_hw_segments
282 || req
->nr_phys_segments
+ nr_phys_segs
> q
->max_phys_segments
) {
283 req
->cmd_flags
|= REQ_NOMERGE
;
284 if (req
== q
->last_merge
)
285 q
->last_merge
= NULL
;
290 * This will form the start of a new hw segment. Bump both
293 req
->nr_hw_segments
+= nr_hw_segs
;
294 req
->nr_phys_segments
+= nr_phys_segs
;
298 int ll_back_merge_fn(struct request_queue
*q
, struct request
*req
,
301 unsigned short max_sectors
;
304 if (unlikely(blk_pc_request(req
)))
305 max_sectors
= q
->max_hw_sectors
;
307 max_sectors
= q
->max_sectors
;
309 if (req
->nr_sectors
+ bio_sectors(bio
) > max_sectors
) {
310 req
->cmd_flags
|= REQ_NOMERGE
;
311 if (req
== q
->last_merge
)
312 q
->last_merge
= NULL
;
315 if (!bio_flagged(req
->biotail
, BIO_SEG_VALID
))
316 blk_recount_segments(q
, req
->biotail
);
317 if (!bio_flagged(bio
, BIO_SEG_VALID
))
318 blk_recount_segments(q
, bio
);
319 len
= req
->biotail
->bi_hw_back_size
+ bio
->bi_hw_front_size
;
320 if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req
->biotail
), __BVEC_START(bio
))
321 && !BIOVEC_VIRT_OVERSIZE(len
)) {
322 int mergeable
= ll_new_mergeable(q
, req
, bio
);
325 if (req
->nr_hw_segments
== 1)
326 req
->bio
->bi_hw_front_size
= len
;
327 if (bio
->bi_hw_segments
== 1)
328 bio
->bi_hw_back_size
= len
;
333 return ll_new_hw_segment(q
, req
, bio
);
336 int ll_front_merge_fn(struct request_queue
*q
, struct request
*req
,
339 unsigned short max_sectors
;
342 if (unlikely(blk_pc_request(req
)))
343 max_sectors
= q
->max_hw_sectors
;
345 max_sectors
= q
->max_sectors
;
348 if (req
->nr_sectors
+ bio_sectors(bio
) > max_sectors
) {
349 req
->cmd_flags
|= REQ_NOMERGE
;
350 if (req
== q
->last_merge
)
351 q
->last_merge
= NULL
;
354 len
= bio
->bi_hw_back_size
+ req
->bio
->bi_hw_front_size
;
355 if (!bio_flagged(bio
, BIO_SEG_VALID
))
356 blk_recount_segments(q
, bio
);
357 if (!bio_flagged(req
->bio
, BIO_SEG_VALID
))
358 blk_recount_segments(q
, req
->bio
);
359 if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio
), __BVEC_START(req
->bio
)) &&
360 !BIOVEC_VIRT_OVERSIZE(len
)) {
361 int mergeable
= ll_new_mergeable(q
, req
, bio
);
364 if (bio
->bi_hw_segments
== 1)
365 bio
->bi_hw_front_size
= len
;
366 if (req
->nr_hw_segments
== 1)
367 req
->biotail
->bi_hw_back_size
= len
;
372 return ll_new_hw_segment(q
, req
, bio
);
375 static int ll_merge_requests_fn(struct request_queue
*q
, struct request
*req
,
376 struct request
*next
)
378 int total_phys_segments
;
379 int total_hw_segments
;
382 * First check if the either of the requests are re-queued
383 * requests. Can't merge them if they are.
385 if (req
->special
|| next
->special
)
389 * Will it become too large?
391 if ((req
->nr_sectors
+ next
->nr_sectors
) > q
->max_sectors
)
394 total_phys_segments
= req
->nr_phys_segments
+ next
->nr_phys_segments
;
395 if (blk_phys_contig_segment(q
, req
->biotail
, next
->bio
))
396 total_phys_segments
--;
398 if (total_phys_segments
> q
->max_phys_segments
)
401 total_hw_segments
= req
->nr_hw_segments
+ next
->nr_hw_segments
;
402 if (blk_hw_contig_segment(q
, req
->biotail
, next
->bio
)) {
403 int len
= req
->biotail
->bi_hw_back_size
+
404 next
->bio
->bi_hw_front_size
;
406 * propagate the combined length to the end of the requests
408 if (req
->nr_hw_segments
== 1)
409 req
->bio
->bi_hw_front_size
= len
;
410 if (next
->nr_hw_segments
== 1)
411 next
->biotail
->bi_hw_back_size
= len
;
415 if (total_hw_segments
> q
->max_hw_segments
)
419 req
->nr_phys_segments
= total_phys_segments
;
420 req
->nr_hw_segments
= total_hw_segments
;
425 * Has to be called with the request spinlock acquired
427 static int attempt_merge(struct request_queue
*q
, struct request
*req
,
428 struct request
*next
)
430 if (!rq_mergeable(req
) || !rq_mergeable(next
))
436 if (req
->sector
+ req
->nr_sectors
!= next
->sector
)
439 if (rq_data_dir(req
) != rq_data_dir(next
)
440 || req
->rq_disk
!= next
->rq_disk
444 if (blk_integrity_rq(req
) != blk_integrity_rq(next
))
448 * If we are allowed to merge, then append bio list
449 * from next to rq and release next. merge_requests_fn
450 * will have updated segment counts, update sector
453 if (!ll_merge_requests_fn(q
, req
, next
))
457 * At this point we have either done a back merge
458 * or front merge. We need the smaller start_time of
459 * the merged requests to be the current request
460 * for accounting purposes.
462 if (time_after(req
->start_time
, next
->start_time
))
463 req
->start_time
= next
->start_time
;
465 req
->biotail
->bi_next
= next
->bio
;
466 req
->biotail
= next
->biotail
;
468 req
->nr_sectors
= req
->hard_nr_sectors
+= next
->hard_nr_sectors
;
470 elv_merge_requests(q
, req
, next
);
473 struct hd_struct
*part
474 = get_part(req
->rq_disk
, req
->sector
);
475 disk_round_stats(req
->rq_disk
);
476 req
->rq_disk
->in_flight
--;
478 part_round_stats(part
);
483 req
->ioprio
= ioprio_best(req
->ioprio
, next
->ioprio
);
485 __blk_put_request(q
, next
);
489 int attempt_back_merge(struct request_queue
*q
, struct request
*rq
)
491 struct request
*next
= elv_latter_request(q
, rq
);
494 return attempt_merge(q
, rq
, next
);
499 int attempt_front_merge(struct request_queue
*q
, struct request
*rq
)
501 struct request
*prev
= elv_former_request(q
, rq
);
504 return attempt_merge(q
, prev
, rq
);