2 * Functions related to barrier IO handling
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
12 * blk_queue_ordered - does this queue support ordered writes
13 * @q: the request queue
14 * @ordered: one of QUEUE_ORDERED_*
15 * @prepare_flush_fn: rq setup helper for cache flush ordered writes
18 * For journalled file systems, doing ordered writes on a commit
19 * block instead of explicitly doing wait_on_buffer (which is bad
20 * for performance) can be a big win. Block drivers supporting this
21 * feature should call this function and indicate so.
24 int blk_queue_ordered(struct request_queue
*q
, unsigned ordered
,
25 prepare_flush_fn
*prepare_flush_fn
)
27 if (!prepare_flush_fn
&& (ordered
& (QUEUE_ORDERED_DO_PREFLUSH
|
28 QUEUE_ORDERED_DO_POSTFLUSH
))) {
29 printk(KERN_ERR
"%s: prepare_flush_fn required\n", __func__
);
33 if (ordered
!= QUEUE_ORDERED_NONE
&&
34 ordered
!= QUEUE_ORDERED_DRAIN
&&
35 ordered
!= QUEUE_ORDERED_DRAIN_FLUSH
&&
36 ordered
!= QUEUE_ORDERED_DRAIN_FUA
&&
37 ordered
!= QUEUE_ORDERED_TAG
&&
38 ordered
!= QUEUE_ORDERED_TAG_FLUSH
&&
39 ordered
!= QUEUE_ORDERED_TAG_FUA
) {
40 printk(KERN_ERR
"blk_queue_ordered: bad value %d\n", ordered
);
45 q
->next_ordered
= ordered
;
46 q
->prepare_flush_fn
= prepare_flush_fn
;
50 EXPORT_SYMBOL(blk_queue_ordered
);
53 * Cache flushing for ordered writes handling
55 unsigned blk_ordered_cur_seq(struct request_queue
*q
)
59 return 1 << ffz(q
->ordseq
);
62 unsigned blk_ordered_req_seq(struct request
*rq
)
64 struct request_queue
*q
= rq
->q
;
66 BUG_ON(q
->ordseq
== 0);
68 if (rq
== &q
->pre_flush_rq
)
69 return QUEUE_ORDSEQ_PREFLUSH
;
71 return QUEUE_ORDSEQ_BAR
;
72 if (rq
== &q
->post_flush_rq
)
73 return QUEUE_ORDSEQ_POSTFLUSH
;
76 * !fs requests don't need to follow barrier ordering. Always
77 * put them at the front. This fixes the following deadlock.
79 * http://thread.gmane.org/gmane.linux.kernel/537473
81 if (!blk_fs_request(rq
))
82 return QUEUE_ORDSEQ_DRAIN
;
84 if ((rq
->cmd_flags
& REQ_ORDERED_COLOR
) ==
85 (q
->orig_bar_rq
->cmd_flags
& REQ_ORDERED_COLOR
))
86 return QUEUE_ORDSEQ_DRAIN
;
88 return QUEUE_ORDSEQ_DONE
;
91 bool blk_ordered_complete_seq(struct request_queue
*q
, unsigned seq
, int error
)
95 if (error
&& !q
->orderr
)
98 BUG_ON(q
->ordseq
& seq
);
101 if (blk_ordered_cur_seq(q
) != QUEUE_ORDSEQ_DONE
)
105 * Okay, sequence complete.
109 __blk_end_request_all(rq
, q
->orderr
);
113 static void pre_flush_end_io(struct request
*rq
, int error
)
115 elv_completed_request(rq
->q
, rq
);
116 blk_ordered_complete_seq(rq
->q
, QUEUE_ORDSEQ_PREFLUSH
, error
);
119 static void bar_end_io(struct request
*rq
, int error
)
121 elv_completed_request(rq
->q
, rq
);
122 blk_ordered_complete_seq(rq
->q
, QUEUE_ORDSEQ_BAR
, error
);
125 static void post_flush_end_io(struct request
*rq
, int error
)
127 elv_completed_request(rq
->q
, rq
);
128 blk_ordered_complete_seq(rq
->q
, QUEUE_ORDSEQ_POSTFLUSH
, error
);
131 static void queue_flush(struct request_queue
*q
, unsigned which
)
134 rq_end_io_fn
*end_io
;
136 if (which
== QUEUE_ORDERED_DO_PREFLUSH
) {
137 rq
= &q
->pre_flush_rq
;
138 end_io
= pre_flush_end_io
;
140 rq
= &q
->post_flush_rq
;
141 end_io
= post_flush_end_io
;
145 rq
->cmd_flags
= REQ_HARDBARRIER
;
146 rq
->rq_disk
= q
->bar_rq
.rq_disk
;
148 q
->prepare_flush_fn(q
, rq
);
150 elv_insert(q
, rq
, ELEVATOR_INSERT_FRONT
);
153 static inline bool start_ordered(struct request_queue
*q
, struct request
**rqp
)
155 struct request
*rq
= *rqp
;
159 q
->ordered
= q
->next_ordered
;
160 q
->ordseq
|= QUEUE_ORDSEQ_STARTED
;
163 * For an empty barrier, there's no actual BAR request, which
164 * in turn makes POSTFLUSH unnecessary. Mask them off.
166 if (!blk_rq_sectors(rq
)) {
167 q
->ordered
&= ~(QUEUE_ORDERED_DO_BAR
|
168 QUEUE_ORDERED_DO_POSTFLUSH
);
170 * Empty barrier on a write-through device w/ ordered
171 * tag has no command to issue and without any command
172 * to issue, ordering by tag can't be used. Drain
175 if ((q
->ordered
& QUEUE_ORDERED_BY_TAG
) &&
176 !(q
->ordered
& QUEUE_ORDERED_DO_PREFLUSH
)) {
177 q
->ordered
&= ~QUEUE_ORDERED_BY_TAG
;
178 q
->ordered
|= QUEUE_ORDERED_BY_DRAIN
;
182 /* stash away the original request */
183 blk_dequeue_request(rq
);
188 * Queue ordered sequence. As we stack them at the head, we
189 * need to queue in reverse order. Note that we rely on that
190 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
191 * request gets inbetween ordered sequence.
193 if (q
->ordered
& QUEUE_ORDERED_DO_POSTFLUSH
) {
194 queue_flush(q
, QUEUE_ORDERED_DO_POSTFLUSH
);
195 rq
= &q
->post_flush_rq
;
197 skip
|= QUEUE_ORDSEQ_POSTFLUSH
;
199 if (q
->ordered
& QUEUE_ORDERED_DO_BAR
) {
202 /* initialize proxy request and queue it */
204 if (bio_data_dir(q
->orig_bar_rq
->bio
) == WRITE
)
205 rq
->cmd_flags
|= REQ_RW
;
206 if (q
->ordered
& QUEUE_ORDERED_DO_FUA
)
207 rq
->cmd_flags
|= REQ_FUA
;
208 init_request_from_bio(rq
, q
->orig_bar_rq
->bio
);
209 rq
->end_io
= bar_end_io
;
211 elv_insert(q
, rq
, ELEVATOR_INSERT_FRONT
);
213 skip
|= QUEUE_ORDSEQ_BAR
;
215 if (q
->ordered
& QUEUE_ORDERED_DO_PREFLUSH
) {
216 queue_flush(q
, QUEUE_ORDERED_DO_PREFLUSH
);
217 rq
= &q
->pre_flush_rq
;
219 skip
|= QUEUE_ORDSEQ_PREFLUSH
;
221 if ((q
->ordered
& QUEUE_ORDERED_BY_DRAIN
) && queue_in_flight(q
))
224 skip
|= QUEUE_ORDSEQ_DRAIN
;
229 * Complete skipped sequences. If whole sequence is complete,
230 * return false to tell elevator that this request is gone.
232 return !blk_ordered_complete_seq(q
, skip
, 0);
235 bool blk_do_ordered(struct request_queue
*q
, struct request
**rqp
)
237 struct request
*rq
= *rqp
;
238 const int is_barrier
= blk_fs_request(rq
) && blk_barrier_rq(rq
);
244 if (q
->next_ordered
!= QUEUE_ORDERED_NONE
)
245 return start_ordered(q
, rqp
);
248 * Queue ordering not supported. Terminate
251 blk_dequeue_request(rq
);
252 __blk_end_request_all(rq
, -EOPNOTSUPP
);
259 * Ordered sequence in progress
262 /* Special requests are not subject to ordering rules. */
263 if (!blk_fs_request(rq
) &&
264 rq
!= &q
->pre_flush_rq
&& rq
!= &q
->post_flush_rq
)
267 if (q
->ordered
& QUEUE_ORDERED_BY_TAG
) {
268 /* Ordered by tag. Blocking the next barrier is enough. */
269 if (is_barrier
&& rq
!= &q
->bar_rq
)
272 /* Ordered by draining. Wait for turn. */
273 WARN_ON(blk_ordered_req_seq(rq
) < blk_ordered_cur_seq(q
));
274 if (blk_ordered_req_seq(rq
) > blk_ordered_cur_seq(q
))
281 static void bio_end_empty_barrier(struct bio
*bio
, int err
)
284 if (err
== -EOPNOTSUPP
)
285 set_bit(BIO_EOPNOTSUPP
, &bio
->bi_flags
);
286 clear_bit(BIO_UPTODATE
, &bio
->bi_flags
);
289 complete(bio
->bi_private
);
293 * blkdev_issue_flush - queue a flush
294 * @bdev: blockdev to issue flush for
295 * @error_sector: error sector
298 * Issue a flush for the block device in question. Caller can supply
299 * room for storing the error offset in case of a flush error, if they
302 int blkdev_issue_flush(struct block_device
*bdev
, sector_t
*error_sector
)
304 DECLARE_COMPLETION_ONSTACK(wait
);
305 struct request_queue
*q
;
309 if (bdev
->bd_disk
== NULL
)
312 q
= bdev_get_queue(bdev
);
316 bio
= bio_alloc(GFP_KERNEL
, 0);
317 bio
->bi_end_io
= bio_end_empty_barrier
;
318 bio
->bi_private
= &wait
;
320 submit_bio(WRITE_BARRIER
, bio
);
322 wait_for_completion(&wait
);
325 * The driver must store the error location in ->bi_sector, if
326 * it supports it. For non-stacked drivers, this should be copied
327 * from blk_rq_pos(rq).
330 *error_sector
= bio
->bi_sector
;
333 if (bio_flagged(bio
, BIO_EOPNOTSUPP
))
335 else if (!bio_flagged(bio
, BIO_UPTODATE
))
341 EXPORT_SYMBOL(blkdev_issue_flush
);
343 static void blkdev_discard_end_io(struct bio
*bio
, int err
)
346 if (err
== -EOPNOTSUPP
)
347 set_bit(BIO_EOPNOTSUPP
, &bio
->bi_flags
);
348 clear_bit(BIO_UPTODATE
, &bio
->bi_flags
);
352 complete(bio
->bi_private
);
353 __free_page(bio_page(bio
));
359 * blkdev_issue_discard - queue a discard
360 * @bdev: blockdev to issue discard for
361 * @sector: start sector
362 * @nr_sects: number of sectors to discard
363 * @gfp_mask: memory allocation flags (for bio_alloc)
364 * @flags: DISCARD_FL_* flags to control behaviour
367 * Issue a discard request for the sectors in question.
369 int blkdev_issue_discard(struct block_device
*bdev
, sector_t sector
,
370 sector_t nr_sects
, gfp_t gfp_mask
, int flags
)
372 DECLARE_COMPLETION_ONSTACK(wait
);
373 struct request_queue
*q
= bdev_get_queue(bdev
);
374 int type
= flags
& DISCARD_FL_BARRIER
?
375 DISCARD_BARRIER
: DISCARD_NOBARRIER
;
383 if (!blk_queue_discard(q
))
386 while (nr_sects
&& !ret
) {
387 unsigned int sector_size
= q
->limits
.logical_block_size
;
388 unsigned int max_discard_sectors
=
389 min(q
->limits
.max_discard_sectors
, UINT_MAX
>> 9);
391 bio
= bio_alloc(gfp_mask
, 1);
394 bio
->bi_sector
= sector
;
395 bio
->bi_end_io
= blkdev_discard_end_io
;
397 if (flags
& DISCARD_FL_WAIT
)
398 bio
->bi_private
= &wait
;
401 * Add a zeroed one-sector payload as that's what
402 * our current implementations need. If we'll ever need
403 * more the interface will need revisiting.
405 page
= alloc_page(gfp_mask
| __GFP_ZERO
);
408 if (bio_add_pc_page(q
, bio
, page
, sector_size
, 0) < sector_size
)
412 * And override the bio size - the way discard works we
413 * touch many more blocks on disk than the actual payload
416 if (nr_sects
> max_discard_sectors
) {
417 bio
->bi_size
= max_discard_sectors
<< 9;
418 nr_sects
-= max_discard_sectors
;
419 sector
+= max_discard_sectors
;
421 bio
->bi_size
= nr_sects
<< 9;
426 submit_bio(type
, bio
);
428 if (flags
& DISCARD_FL_WAIT
)
429 wait_for_completion(&wait
);
431 if (bio_flagged(bio
, BIO_EOPNOTSUPP
))
433 else if (!bio_flagged(bio
, BIO_UPTODATE
))
445 EXPORT_SYMBOL(blkdev_issue_discard
);