2 * Functions related to barrier IO handling
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
13 * blk_queue_ordered - does this queue support ordered writes
14 * @q: the request queue
15 * @ordered: one of QUEUE_ORDERED_*
18 * For journalled file systems, doing ordered writes on a commit
19 * block instead of explicitly doing wait_on_buffer (which is bad
20 * for performance) can be a big win. Block drivers supporting this
21 * feature should call this function and indicate so.
24 int blk_queue_ordered(struct request_queue
*q
, unsigned ordered
)
26 if (ordered
!= QUEUE_ORDERED_NONE
&&
27 ordered
!= QUEUE_ORDERED_DRAIN
&&
28 ordered
!= QUEUE_ORDERED_DRAIN_FLUSH
&&
29 ordered
!= QUEUE_ORDERED_DRAIN_FUA
&&
30 ordered
!= QUEUE_ORDERED_TAG
&&
31 ordered
!= QUEUE_ORDERED_TAG_FLUSH
&&
32 ordered
!= QUEUE_ORDERED_TAG_FUA
) {
33 printk(KERN_ERR
"blk_queue_ordered: bad value %d\n", ordered
);
38 q
->next_ordered
= ordered
;
42 EXPORT_SYMBOL(blk_queue_ordered
);
45 * Cache flushing for ordered writes handling
47 unsigned blk_ordered_cur_seq(struct request_queue
*q
)
51 return 1 << ffz(q
->ordseq
);
54 unsigned blk_ordered_req_seq(struct request
*rq
)
56 struct request_queue
*q
= rq
->q
;
58 BUG_ON(q
->ordseq
== 0);
60 if (rq
== &q
->pre_flush_rq
)
61 return QUEUE_ORDSEQ_PREFLUSH
;
63 return QUEUE_ORDSEQ_BAR
;
64 if (rq
== &q
->post_flush_rq
)
65 return QUEUE_ORDSEQ_POSTFLUSH
;
68 * !fs requests don't need to follow barrier ordering. Always
69 * put them at the front. This fixes the following deadlock.
71 * http://thread.gmane.org/gmane.linux.kernel/537473
73 if (rq
->cmd_type
!= REQ_TYPE_FS
)
74 return QUEUE_ORDSEQ_DRAIN
;
76 if ((rq
->cmd_flags
& REQ_ORDERED_COLOR
) ==
77 (q
->orig_bar_rq
->cmd_flags
& REQ_ORDERED_COLOR
))
78 return QUEUE_ORDSEQ_DRAIN
;
80 return QUEUE_ORDSEQ_DONE
;
83 bool blk_ordered_complete_seq(struct request_queue
*q
, unsigned seq
, int error
)
87 if (error
&& !q
->orderr
)
90 BUG_ON(q
->ordseq
& seq
);
93 if (blk_ordered_cur_seq(q
) != QUEUE_ORDSEQ_DONE
)
97 * Okay, sequence complete.
101 __blk_end_request_all(rq
, q
->orderr
);
105 static void pre_flush_end_io(struct request
*rq
, int error
)
107 elv_completed_request(rq
->q
, rq
);
108 blk_ordered_complete_seq(rq
->q
, QUEUE_ORDSEQ_PREFLUSH
, error
);
111 static void bar_end_io(struct request
*rq
, int error
)
113 elv_completed_request(rq
->q
, rq
);
114 blk_ordered_complete_seq(rq
->q
, QUEUE_ORDSEQ_BAR
, error
);
117 static void post_flush_end_io(struct request
*rq
, int error
)
119 elv_completed_request(rq
->q
, rq
);
120 blk_ordered_complete_seq(rq
->q
, QUEUE_ORDSEQ_POSTFLUSH
, error
);
123 static void queue_flush(struct request_queue
*q
, unsigned which
)
126 rq_end_io_fn
*end_io
;
128 if (which
== QUEUE_ORDERED_DO_PREFLUSH
) {
129 rq
= &q
->pre_flush_rq
;
130 end_io
= pre_flush_end_io
;
132 rq
= &q
->post_flush_rq
;
133 end_io
= post_flush_end_io
;
137 rq
->cmd_type
= REQ_TYPE_FS
;
138 rq
->cmd_flags
= REQ_HARDBARRIER
| REQ_FLUSH
;
139 rq
->rq_disk
= q
->orig_bar_rq
->rq_disk
;
142 elv_insert(q
, rq
, ELEVATOR_INSERT_FRONT
);
145 static inline bool start_ordered(struct request_queue
*q
, struct request
**rqp
)
147 struct request
*rq
= *rqp
;
151 q
->ordered
= q
->next_ordered
;
152 q
->ordseq
|= QUEUE_ORDSEQ_STARTED
;
155 * For an empty barrier, there's no actual BAR request, which
156 * in turn makes POSTFLUSH unnecessary. Mask them off.
158 if (!blk_rq_sectors(rq
)) {
159 q
->ordered
&= ~(QUEUE_ORDERED_DO_BAR
|
160 QUEUE_ORDERED_DO_POSTFLUSH
);
162 * Empty barrier on a write-through device w/ ordered
163 * tag has no command to issue and without any command
164 * to issue, ordering by tag can't be used. Drain
167 if ((q
->ordered
& QUEUE_ORDERED_BY_TAG
) &&
168 !(q
->ordered
& QUEUE_ORDERED_DO_PREFLUSH
)) {
169 q
->ordered
&= ~QUEUE_ORDERED_BY_TAG
;
170 q
->ordered
|= QUEUE_ORDERED_BY_DRAIN
;
174 /* stash away the original request */
175 blk_dequeue_request(rq
);
180 * Queue ordered sequence. As we stack them at the head, we
181 * need to queue in reverse order. Note that we rely on that
182 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
183 * request gets inbetween ordered sequence.
185 if (q
->ordered
& QUEUE_ORDERED_DO_POSTFLUSH
) {
186 queue_flush(q
, QUEUE_ORDERED_DO_POSTFLUSH
);
187 rq
= &q
->post_flush_rq
;
189 skip
|= QUEUE_ORDSEQ_POSTFLUSH
;
191 if (q
->ordered
& QUEUE_ORDERED_DO_BAR
) {
194 /* initialize proxy request and queue it */
196 if (bio_data_dir(q
->orig_bar_rq
->bio
) == WRITE
)
197 rq
->cmd_flags
|= REQ_WRITE
;
198 if (q
->ordered
& QUEUE_ORDERED_DO_FUA
)
199 rq
->cmd_flags
|= REQ_FUA
;
200 init_request_from_bio(rq
, q
->orig_bar_rq
->bio
);
201 rq
->end_io
= bar_end_io
;
203 elv_insert(q
, rq
, ELEVATOR_INSERT_FRONT
);
205 skip
|= QUEUE_ORDSEQ_BAR
;
207 if (q
->ordered
& QUEUE_ORDERED_DO_PREFLUSH
) {
208 queue_flush(q
, QUEUE_ORDERED_DO_PREFLUSH
);
209 rq
= &q
->pre_flush_rq
;
211 skip
|= QUEUE_ORDSEQ_PREFLUSH
;
213 if ((q
->ordered
& QUEUE_ORDERED_BY_DRAIN
) && queue_in_flight(q
))
216 skip
|= QUEUE_ORDSEQ_DRAIN
;
221 * Complete skipped sequences. If whole sequence is complete,
222 * return false to tell elevator that this request is gone.
224 return !blk_ordered_complete_seq(q
, skip
, 0);
227 bool blk_do_ordered(struct request_queue
*q
, struct request
**rqp
)
229 struct request
*rq
= *rqp
;
230 const int is_barrier
= rq
->cmd_type
== REQ_TYPE_FS
&&
231 (rq
->cmd_flags
& REQ_HARDBARRIER
);
237 if (q
->next_ordered
!= QUEUE_ORDERED_NONE
)
238 return start_ordered(q
, rqp
);
241 * Queue ordering not supported. Terminate
244 blk_dequeue_request(rq
);
245 __blk_end_request_all(rq
, -EOPNOTSUPP
);
252 * Ordered sequence in progress
255 /* Special requests are not subject to ordering rules. */
256 if (rq
->cmd_type
!= REQ_TYPE_FS
&&
257 rq
!= &q
->pre_flush_rq
&& rq
!= &q
->post_flush_rq
)
260 if (q
->ordered
& QUEUE_ORDERED_BY_TAG
) {
261 /* Ordered by tag. Blocking the next barrier is enough. */
262 if (is_barrier
&& rq
!= &q
->bar_rq
)
265 /* Ordered by draining. Wait for turn. */
266 WARN_ON(blk_ordered_req_seq(rq
) < blk_ordered_cur_seq(q
));
267 if (blk_ordered_req_seq(rq
) > blk_ordered_cur_seq(q
))
274 static void bio_end_empty_barrier(struct bio
*bio
, int err
)
277 if (err
== -EOPNOTSUPP
)
278 set_bit(BIO_EOPNOTSUPP
, &bio
->bi_flags
);
279 clear_bit(BIO_UPTODATE
, &bio
->bi_flags
);
282 complete(bio
->bi_private
);
287 * blkdev_issue_flush - queue a flush
288 * @bdev: blockdev to issue flush for
289 * @gfp_mask: memory allocation flags (for bio_alloc)
290 * @error_sector: error sector
291 * @flags: BLKDEV_IFL_* flags to control behaviour
294 * Issue a flush for the block device in question. Caller can supply
295 * room for storing the error offset in case of a flush error, if they
296 * wish to. If WAIT flag is not passed then caller may check only what
297 * request was pushed in some internal queue for later handling.
299 int blkdev_issue_flush(struct block_device
*bdev
, gfp_t gfp_mask
,
300 sector_t
*error_sector
, unsigned long flags
)
302 DECLARE_COMPLETION_ONSTACK(wait
);
303 struct request_queue
*q
;
307 if (bdev
->bd_disk
== NULL
)
310 q
= bdev_get_queue(bdev
);
315 * some block devices may not have their queue correctly set up here
316 * (e.g. loop device without a backing file) and so issuing a flush
317 * here will panic. Ensure there is a request function before issuing
320 if (!q
->make_request_fn
)
323 bio
= bio_alloc(gfp_mask
, 0);
324 bio
->bi_end_io
= bio_end_empty_barrier
;
326 if (test_bit(BLKDEV_WAIT
, &flags
))
327 bio
->bi_private
= &wait
;
330 submit_bio(WRITE_BARRIER
, bio
);
331 if (test_bit(BLKDEV_WAIT
, &flags
)) {
332 wait_for_completion(&wait
);
334 * The driver must store the error location in ->bi_sector, if
335 * it supports it. For non-stacked drivers, this should be
336 * copied from blk_rq_pos(rq).
339 *error_sector
= bio
->bi_sector
;
342 if (bio_flagged(bio
, BIO_EOPNOTSUPP
))
344 else if (!bio_flagged(bio
, BIO_UPTODATE
))
350 EXPORT_SYMBOL(blkdev_issue_flush
);