2 * Functions related to barrier IO handling
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
13 * blk_queue_ordered - does this queue support ordered writes
14 * @q: the request queue
15 * @ordered: one of QUEUE_ORDERED_*
18 * For journalled file systems, doing ordered writes on a commit
19 * block instead of explicitly doing wait_on_buffer (which is bad
20 * for performance) can be a big win. Block drivers supporting this
21 * feature should call this function and indicate so.
24 int blk_queue_ordered(struct request_queue
*q
, unsigned ordered
)
26 if (ordered
!= QUEUE_ORDERED_NONE
&&
27 ordered
!= QUEUE_ORDERED_DRAIN
&&
28 ordered
!= QUEUE_ORDERED_DRAIN_FLUSH
&&
29 ordered
!= QUEUE_ORDERED_DRAIN_FUA
) {
30 printk(KERN_ERR
"blk_queue_ordered: bad value %d\n", ordered
);
35 q
->next_ordered
= ordered
;
39 EXPORT_SYMBOL(blk_queue_ordered
);
42 * Cache flushing for ordered writes handling
44 unsigned blk_ordered_cur_seq(struct request_queue
*q
)
48 return 1 << ffz(q
->ordseq
);
51 unsigned blk_ordered_req_seq(struct request
*rq
)
53 struct request_queue
*q
= rq
->q
;
55 BUG_ON(q
->ordseq
== 0);
57 if (rq
== &q
->pre_flush_rq
)
58 return QUEUE_ORDSEQ_PREFLUSH
;
60 return QUEUE_ORDSEQ_BAR
;
61 if (rq
== &q
->post_flush_rq
)
62 return QUEUE_ORDSEQ_POSTFLUSH
;
65 * !fs requests don't need to follow barrier ordering. Always
66 * put them at the front. This fixes the following deadlock.
68 * http://thread.gmane.org/gmane.linux.kernel/537473
70 if (rq
->cmd_type
!= REQ_TYPE_FS
)
71 return QUEUE_ORDSEQ_DRAIN
;
73 if ((rq
->cmd_flags
& REQ_ORDERED_COLOR
) ==
74 (q
->orig_bar_rq
->cmd_flags
& REQ_ORDERED_COLOR
))
75 return QUEUE_ORDSEQ_DRAIN
;
77 return QUEUE_ORDSEQ_DONE
;
80 bool blk_ordered_complete_seq(struct request_queue
*q
, unsigned seq
, int error
)
84 if (error
&& !q
->orderr
)
87 BUG_ON(q
->ordseq
& seq
);
90 if (blk_ordered_cur_seq(q
) != QUEUE_ORDSEQ_DONE
)
94 * Okay, sequence complete.
98 __blk_end_request_all(rq
, q
->orderr
);
102 static void pre_flush_end_io(struct request
*rq
, int error
)
104 elv_completed_request(rq
->q
, rq
);
105 blk_ordered_complete_seq(rq
->q
, QUEUE_ORDSEQ_PREFLUSH
, error
);
108 static void bar_end_io(struct request
*rq
, int error
)
110 elv_completed_request(rq
->q
, rq
);
111 blk_ordered_complete_seq(rq
->q
, QUEUE_ORDSEQ_BAR
, error
);
114 static void post_flush_end_io(struct request
*rq
, int error
)
116 elv_completed_request(rq
->q
, rq
);
117 blk_ordered_complete_seq(rq
->q
, QUEUE_ORDSEQ_POSTFLUSH
, error
);
120 static void queue_flush(struct request_queue
*q
, unsigned which
)
123 rq_end_io_fn
*end_io
;
125 if (which
== QUEUE_ORDERED_DO_PREFLUSH
) {
126 rq
= &q
->pre_flush_rq
;
127 end_io
= pre_flush_end_io
;
129 rq
= &q
->post_flush_rq
;
130 end_io
= post_flush_end_io
;
134 rq
->cmd_type
= REQ_TYPE_FS
;
135 rq
->cmd_flags
= REQ_HARDBARRIER
| REQ_FLUSH
;
136 rq
->rq_disk
= q
->orig_bar_rq
->rq_disk
;
139 elv_insert(q
, rq
, ELEVATOR_INSERT_FRONT
);
142 static inline bool start_ordered(struct request_queue
*q
, struct request
**rqp
)
144 struct request
*rq
= *rqp
;
148 q
->ordered
= q
->next_ordered
;
149 q
->ordseq
|= QUEUE_ORDSEQ_STARTED
;
152 * For an empty barrier, there's no actual BAR request, which
153 * in turn makes POSTFLUSH unnecessary. Mask them off.
155 if (!blk_rq_sectors(rq
))
156 q
->ordered
&= ~(QUEUE_ORDERED_DO_BAR
|
157 QUEUE_ORDERED_DO_POSTFLUSH
);
159 /* stash away the original request */
160 blk_dequeue_request(rq
);
165 * Queue ordered sequence. As we stack them at the head, we
166 * need to queue in reverse order. Note that we rely on that
167 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
168 * request gets inbetween ordered sequence.
170 if (q
->ordered
& QUEUE_ORDERED_DO_POSTFLUSH
) {
171 queue_flush(q
, QUEUE_ORDERED_DO_POSTFLUSH
);
172 rq
= &q
->post_flush_rq
;
174 skip
|= QUEUE_ORDSEQ_POSTFLUSH
;
176 if (q
->ordered
& QUEUE_ORDERED_DO_BAR
) {
179 /* initialize proxy request and queue it */
181 if (bio_data_dir(q
->orig_bar_rq
->bio
) == WRITE
)
182 rq
->cmd_flags
|= REQ_WRITE
;
183 if (q
->ordered
& QUEUE_ORDERED_DO_FUA
)
184 rq
->cmd_flags
|= REQ_FUA
;
185 init_request_from_bio(rq
, q
->orig_bar_rq
->bio
);
186 rq
->end_io
= bar_end_io
;
188 elv_insert(q
, rq
, ELEVATOR_INSERT_FRONT
);
190 skip
|= QUEUE_ORDSEQ_BAR
;
192 if (q
->ordered
& QUEUE_ORDERED_DO_PREFLUSH
) {
193 queue_flush(q
, QUEUE_ORDERED_DO_PREFLUSH
);
194 rq
= &q
->pre_flush_rq
;
196 skip
|= QUEUE_ORDSEQ_PREFLUSH
;
198 if (queue_in_flight(q
))
201 skip
|= QUEUE_ORDSEQ_DRAIN
;
206 * Complete skipped sequences. If whole sequence is complete,
207 * return false to tell elevator that this request is gone.
209 return !blk_ordered_complete_seq(q
, skip
, 0);
212 bool blk_do_ordered(struct request_queue
*q
, struct request
**rqp
)
214 struct request
*rq
= *rqp
;
215 const int is_barrier
= rq
->cmd_type
== REQ_TYPE_FS
&&
216 (rq
->cmd_flags
& REQ_HARDBARRIER
);
222 if (q
->next_ordered
!= QUEUE_ORDERED_NONE
)
223 return start_ordered(q
, rqp
);
226 * Queue ordering not supported. Terminate
229 blk_dequeue_request(rq
);
230 __blk_end_request_all(rq
, -EOPNOTSUPP
);
237 * Ordered sequence in progress
240 /* Special requests are not subject to ordering rules. */
241 if (rq
->cmd_type
!= REQ_TYPE_FS
&&
242 rq
!= &q
->pre_flush_rq
&& rq
!= &q
->post_flush_rq
)
245 /* Ordered by draining. Wait for turn. */
246 WARN_ON(blk_ordered_req_seq(rq
) < blk_ordered_cur_seq(q
));
247 if (blk_ordered_req_seq(rq
) > blk_ordered_cur_seq(q
))
253 static void bio_end_empty_barrier(struct bio
*bio
, int err
)
256 if (err
== -EOPNOTSUPP
)
257 set_bit(BIO_EOPNOTSUPP
, &bio
->bi_flags
);
258 clear_bit(BIO_UPTODATE
, &bio
->bi_flags
);
261 complete(bio
->bi_private
);
266 * blkdev_issue_flush - queue a flush
267 * @bdev: blockdev to issue flush for
268 * @gfp_mask: memory allocation flags (for bio_alloc)
269 * @error_sector: error sector
270 * @flags: BLKDEV_IFL_* flags to control behaviour
273 * Issue a flush for the block device in question. Caller can supply
274 * room for storing the error offset in case of a flush error, if they
275 * wish to. If WAIT flag is not passed then caller may check only what
276 * request was pushed in some internal queue for later handling.
278 int blkdev_issue_flush(struct block_device
*bdev
, gfp_t gfp_mask
,
279 sector_t
*error_sector
, unsigned long flags
)
281 DECLARE_COMPLETION_ONSTACK(wait
);
282 struct request_queue
*q
;
286 if (bdev
->bd_disk
== NULL
)
289 q
= bdev_get_queue(bdev
);
294 * some block devices may not have their queue correctly set up here
295 * (e.g. loop device without a backing file) and so issuing a flush
296 * here will panic. Ensure there is a request function before issuing
299 if (!q
->make_request_fn
)
302 bio
= bio_alloc(gfp_mask
, 0);
303 bio
->bi_end_io
= bio_end_empty_barrier
;
305 if (test_bit(BLKDEV_WAIT
, &flags
))
306 bio
->bi_private
= &wait
;
309 submit_bio(WRITE_BARRIER
, bio
);
310 if (test_bit(BLKDEV_WAIT
, &flags
)) {
311 wait_for_completion(&wait
);
313 * The driver must store the error location in ->bi_sector, if
314 * it supports it. For non-stacked drivers, this should be
315 * copied from blk_rq_pos(rq).
318 *error_sector
= bio
->bi_sector
;
321 if (bio_flagged(bio
, BIO_EOPNOTSUPP
))
323 else if (!bio_flagged(bio
, BIO_UPTODATE
))
329 EXPORT_SYMBOL(blkdev_issue_flush
);