x86/paravirt, 64-bit: don't restore user rsp within sysret
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / block / blk-barrier.c
bloba09ead19f9c5702a1ad76d709c54969176fe9e94
1 /*
2 * Functions related to barrier IO handling
3 */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
9 #include "blk.h"
11 /**
12 * blk_queue_ordered - does this queue support ordered writes
13 * @q: the request queue
14 * @ordered: one of QUEUE_ORDERED_*
15 * @prepare_flush_fn: rq setup helper for cache flush ordered writes
17 * Description:
18 * For journalled file systems, doing ordered writes on a commit
19 * block instead of explicitly doing wait_on_buffer (which is bad
20 * for performance) can be a big win. Block drivers supporting this
21 * feature should call this function and indicate so.
23 **/
24 int blk_queue_ordered(struct request_queue *q, unsigned ordered,
25 prepare_flush_fn *prepare_flush_fn)
27 if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
28 prepare_flush_fn == NULL) {
29 printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
30 return -EINVAL;
33 if (ordered != QUEUE_ORDERED_NONE &&
34 ordered != QUEUE_ORDERED_DRAIN &&
35 ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
36 ordered != QUEUE_ORDERED_DRAIN_FUA &&
37 ordered != QUEUE_ORDERED_TAG &&
38 ordered != QUEUE_ORDERED_TAG_FLUSH &&
39 ordered != QUEUE_ORDERED_TAG_FUA) {
40 printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
41 return -EINVAL;
44 q->ordered = ordered;
45 q->next_ordered = ordered;
46 q->prepare_flush_fn = prepare_flush_fn;
48 return 0;
50 EXPORT_SYMBOL(blk_queue_ordered);
53 * Cache flushing for ordered writes handling
55 unsigned blk_ordered_cur_seq(struct request_queue *q)
57 if (!q->ordseq)
58 return 0;
59 return 1 << ffz(q->ordseq);
62 unsigned blk_ordered_req_seq(struct request *rq)
64 struct request_queue *q = rq->q;
66 BUG_ON(q->ordseq == 0);
68 if (rq == &q->pre_flush_rq)
69 return QUEUE_ORDSEQ_PREFLUSH;
70 if (rq == &q->bar_rq)
71 return QUEUE_ORDSEQ_BAR;
72 if (rq == &q->post_flush_rq)
73 return QUEUE_ORDSEQ_POSTFLUSH;
76 * !fs requests don't need to follow barrier ordering. Always
77 * put them at the front. This fixes the following deadlock.
79 * http://thread.gmane.org/gmane.linux.kernel/537473
81 if (!blk_fs_request(rq))
82 return QUEUE_ORDSEQ_DRAIN;
84 if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
85 (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
86 return QUEUE_ORDSEQ_DRAIN;
87 else
88 return QUEUE_ORDSEQ_DONE;
91 void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
93 struct request *rq;
95 if (error && !q->orderr)
96 q->orderr = error;
98 BUG_ON(q->ordseq & seq);
99 q->ordseq |= seq;
101 if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
102 return;
105 * Okay, sequence complete.
107 q->ordseq = 0;
108 rq = q->orig_bar_rq;
110 if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
111 BUG();
114 static void pre_flush_end_io(struct request *rq, int error)
116 elv_completed_request(rq->q, rq);
117 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
120 static void bar_end_io(struct request *rq, int error)
122 elv_completed_request(rq->q, rq);
123 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
126 static void post_flush_end_io(struct request *rq, int error)
128 elv_completed_request(rq->q, rq);
129 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
132 static void queue_flush(struct request_queue *q, unsigned which)
134 struct request *rq;
135 rq_end_io_fn *end_io;
137 if (which == QUEUE_ORDERED_PREFLUSH) {
138 rq = &q->pre_flush_rq;
139 end_io = pre_flush_end_io;
140 } else {
141 rq = &q->post_flush_rq;
142 end_io = post_flush_end_io;
145 blk_rq_init(q, rq);
146 rq->cmd_flags = REQ_HARDBARRIER;
147 rq->rq_disk = q->bar_rq.rq_disk;
148 rq->end_io = end_io;
149 q->prepare_flush_fn(q, rq);
151 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
154 static inline struct request *start_ordered(struct request_queue *q,
155 struct request *rq)
157 q->orderr = 0;
158 q->ordered = q->next_ordered;
159 q->ordseq |= QUEUE_ORDSEQ_STARTED;
162 * Prep proxy barrier request.
164 blkdev_dequeue_request(rq);
165 q->orig_bar_rq = rq;
166 rq = &q->bar_rq;
167 blk_rq_init(q, rq);
168 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
169 rq->cmd_flags |= REQ_RW;
170 if (q->ordered & QUEUE_ORDERED_FUA)
171 rq->cmd_flags |= REQ_FUA;
172 init_request_from_bio(rq, q->orig_bar_rq->bio);
173 rq->end_io = bar_end_io;
176 * Queue ordered sequence. As we stack them at the head, we
177 * need to queue in reverse order. Note that we rely on that
178 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
179 * request gets inbetween ordered sequence. If this request is
180 * an empty barrier, we don't need to do a postflush ever since
181 * there will be no data written between the pre and post flush.
182 * Hence a single flush will suffice.
184 if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
185 queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
186 else
187 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
189 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
191 if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
192 queue_flush(q, QUEUE_ORDERED_PREFLUSH);
193 rq = &q->pre_flush_rq;
194 } else
195 q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;
197 if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0)
198 q->ordseq |= QUEUE_ORDSEQ_DRAIN;
199 else
200 rq = NULL;
202 return rq;
205 int blk_do_ordered(struct request_queue *q, struct request **rqp)
207 struct request *rq = *rqp;
208 const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
210 if (!q->ordseq) {
211 if (!is_barrier)
212 return 1;
214 if (q->next_ordered != QUEUE_ORDERED_NONE) {
215 *rqp = start_ordered(q, rq);
216 return 1;
217 } else {
219 * This can happen when the queue switches to
220 * ORDERED_NONE while this request is on it.
222 blkdev_dequeue_request(rq);
223 if (__blk_end_request(rq, -EOPNOTSUPP,
224 blk_rq_bytes(rq)))
225 BUG();
226 *rqp = NULL;
227 return 0;
232 * Ordered sequence in progress
235 /* Special requests are not subject to ordering rules. */
236 if (!blk_fs_request(rq) &&
237 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
238 return 1;
240 if (q->ordered & QUEUE_ORDERED_TAG) {
241 /* Ordered by tag. Blocking the next barrier is enough. */
242 if (is_barrier && rq != &q->bar_rq)
243 *rqp = NULL;
244 } else {
245 /* Ordered by draining. Wait for turn. */
246 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
247 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
248 *rqp = NULL;
251 return 1;
254 static void bio_end_empty_barrier(struct bio *bio, int err)
256 if (err) {
257 if (err == -EOPNOTSUPP)
258 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
259 clear_bit(BIO_UPTODATE, &bio->bi_flags);
262 complete(bio->bi_private);
266 * blkdev_issue_flush - queue a flush
267 * @bdev: blockdev to issue flush for
268 * @error_sector: error sector
270 * Description:
271 * Issue a flush for the block device in question. Caller can supply
272 * room for storing the error offset in case of a flush error, if they
273 * wish to. Caller must run wait_for_completion() on its own.
275 int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
277 DECLARE_COMPLETION_ONSTACK(wait);
278 struct request_queue *q;
279 struct bio *bio;
280 int ret;
282 if (bdev->bd_disk == NULL)
283 return -ENXIO;
285 q = bdev_get_queue(bdev);
286 if (!q)
287 return -ENXIO;
289 bio = bio_alloc(GFP_KERNEL, 0);
290 if (!bio)
291 return -ENOMEM;
293 bio->bi_end_io = bio_end_empty_barrier;
294 bio->bi_private = &wait;
295 bio->bi_bdev = bdev;
296 submit_bio(1 << BIO_RW_BARRIER, bio);
298 wait_for_completion(&wait);
301 * The driver must store the error location in ->bi_sector, if
302 * it supports it. For non-stacked drivers, this should be copied
303 * from rq->sector.
305 if (error_sector)
306 *error_sector = bio->bi_sector;
308 ret = 0;
309 if (bio_flagged(bio, BIO_EOPNOTSUPP))
310 ret = -EOPNOTSUPP;
311 else if (!bio_flagged(bio, BIO_UPTODATE))
312 ret = -EIO;
314 bio_put(bio);
315 return ret;
317 EXPORT_SYMBOL(blkdev_issue_flush);