split dev_queue
[cor.git] / block / blk-flush.c
blob1777346baf06f23d6c4411b77bcd12e573eea3b6
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions to sequence PREFLUSH and FUA writes.
5 * Copyright (C) 2011 Max Planck Institute for Gravitational Physics
6 * Copyright (C) 2011 Tejun Heo <tj@kernel.org>
8 * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
9 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
10 * properties and hardware capability.
12 * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
13 * indicates a simple flush request. If there is data, REQ_PREFLUSH indicates
14 * that the device cache should be flushed before the data is executed, and
15 * REQ_FUA means that the data must be on non-volatile media on request
16 * completion.
18 * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
19 * difference. The requests are either completed immediately if there's no data
20 * or executed as normal requests otherwise.
22 * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
23 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
25 * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
26 * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
28 * The actual execution of flush is double buffered. Whenever a request
29 * needs to execute PRE or POSTFLUSH, it queues at
30 * fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a
31 * REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush
32 * completes, all the requests which were pending are proceeded to the next
33 * step. This allows arbitrary merging of different types of PREFLUSH/FUA
34 * requests.
36 * Currently, the following conditions are used to determine when to issue
37 * flush.
39 * C1. At any given time, only one flush shall be in progress. This makes
40 * double buffering sufficient.
42 * C2. Flush is deferred if any request is executing DATA of its sequence.
43 * This avoids issuing separate POSTFLUSHes for requests which shared
44 * PREFLUSH.
46 * C3. The second condition is ignored if there is a request which has
47 * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid
48 * starvation in the unlikely case where there are continuous stream of
49 * FUA (without PREFLUSH) requests.
51 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
52 * is beneficial.
54 * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice.
55 * Once while executing DATA and again after the whole sequence is
56 * complete. The first completion updates the contained bio but doesn't
57 * finish it so that the bio submitter is notified only after the whole
58 * sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in
59 * req_bio_endio().
61 * The above peculiarity requires that each PREFLUSH/FUA request has only one
62 * bio attached to it, which is guaranteed as they aren't allowed to be
63 * merged in the usual way.
66 #include <linux/kernel.h>
67 #include <linux/module.h>
68 #include <linux/bio.h>
69 #include <linux/blkdev.h>
70 #include <linux/gfp.h>
71 #include <linux/blk-mq.h>
73 #include "blk.h"
74 #include "blk-mq.h"
75 #include "blk-mq-tag.h"
76 #include "blk-mq-sched.h"
78 /* PREFLUSH/FUA sequences */
79 enum {
80 REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */
81 REQ_FSEQ_DATA = (1 << 1), /* data write in progress */
82 REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */
83 REQ_FSEQ_DONE = (1 << 3),
85 REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
86 REQ_FSEQ_POSTFLUSH,
89 * If flush has been pending longer than the following timeout,
90 * it's issued even if flush_data requests are still in flight.
92 FLUSH_PENDING_TIMEOUT = 5 * HZ,
95 static void blk_kick_flush(struct request_queue *q,
96 struct blk_flush_queue *fq, unsigned int flags);
98 static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
100 unsigned int policy = 0;
102 if (blk_rq_sectors(rq))
103 policy |= REQ_FSEQ_DATA;
105 if (fflags & (1UL << QUEUE_FLAG_WC)) {
106 if (rq->cmd_flags & REQ_PREFLUSH)
107 policy |= REQ_FSEQ_PREFLUSH;
108 if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
109 (rq->cmd_flags & REQ_FUA))
110 policy |= REQ_FSEQ_POSTFLUSH;
112 return policy;
115 static unsigned int blk_flush_cur_seq(struct request *rq)
117 return 1 << ffz(rq->flush.seq);
120 static void blk_flush_restore_request(struct request *rq)
123 * After flush data completion, @rq->bio is %NULL but we need to
124 * complete the bio again. @rq->biotail is guaranteed to equal the
125 * original @rq->bio. Restore it.
127 rq->bio = rq->biotail;
129 /* make @rq a normal request */
130 rq->rq_flags &= ~RQF_FLUSH_SEQ;
131 rq->end_io = rq->flush.saved_end_io;
134 static void blk_flush_queue_rq(struct request *rq, bool add_front)
136 blk_mq_add_to_requeue_list(rq, add_front, true);
139 static void blk_account_io_flush(struct request *rq)
141 struct hd_struct *part = &rq->rq_disk->part0;
143 part_stat_lock();
144 part_stat_inc(part, ios[STAT_FLUSH]);
145 part_stat_add(part, nsecs[STAT_FLUSH],
146 ktime_get_ns() - rq->start_time_ns);
147 part_stat_unlock();
151 * blk_flush_complete_seq - complete flush sequence
152 * @rq: PREFLUSH/FUA request being sequenced
153 * @fq: flush queue
154 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
155 * @error: whether an error occurred
157 * @rq just completed @seq part of its flush sequence, record the
158 * completion and trigger the next step.
160 * CONTEXT:
161 * spin_lock_irq(fq->mq_flush_lock)
163 * RETURNS:
164 * %true if requests were added to the dispatch queue, %false otherwise.
166 static void blk_flush_complete_seq(struct request *rq,
167 struct blk_flush_queue *fq,
168 unsigned int seq, blk_status_t error)
170 struct request_queue *q = rq->q;
171 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
172 unsigned int cmd_flags;
174 BUG_ON(rq->flush.seq & seq);
175 rq->flush.seq |= seq;
176 cmd_flags = rq->cmd_flags;
178 if (likely(!error))
179 seq = blk_flush_cur_seq(rq);
180 else
181 seq = REQ_FSEQ_DONE;
183 switch (seq) {
184 case REQ_FSEQ_PREFLUSH:
185 case REQ_FSEQ_POSTFLUSH:
186 /* queue for flush */
187 if (list_empty(pending))
188 fq->flush_pending_since = jiffies;
189 list_move_tail(&rq->flush.list, pending);
190 break;
192 case REQ_FSEQ_DATA:
193 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
194 blk_flush_queue_rq(rq, true);
195 break;
197 case REQ_FSEQ_DONE:
199 * @rq was previously adjusted by blk_insert_flush() for
200 * flush sequencing and may already have gone through the
201 * flush data request completion path. Restore @rq for
202 * normal completion and end it.
204 BUG_ON(!list_empty(&rq->queuelist));
205 list_del_init(&rq->flush.list);
206 blk_flush_restore_request(rq);
207 blk_mq_end_request(rq, error);
208 break;
210 default:
211 BUG();
214 blk_kick_flush(q, fq, cmd_flags);
217 static void flush_end_io(struct request *flush_rq, blk_status_t error)
219 struct request_queue *q = flush_rq->q;
220 struct list_head *running;
221 struct request *rq, *n;
222 unsigned long flags = 0;
223 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
224 struct blk_mq_hw_ctx *hctx;
226 blk_account_io_flush(flush_rq);
228 /* release the tag's ownership to the req cloned from */
229 spin_lock_irqsave(&fq->mq_flush_lock, flags);
231 if (!refcount_dec_and_test(&flush_rq->ref)) {
232 fq->rq_status = error;
233 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
234 return;
237 if (fq->rq_status != BLK_STS_OK)
238 error = fq->rq_status;
240 hctx = flush_rq->mq_hctx;
241 if (!q->elevator) {
242 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
243 flush_rq->tag = -1;
244 } else {
245 blk_mq_put_driver_tag(flush_rq);
246 flush_rq->internal_tag = -1;
249 running = &fq->flush_queue[fq->flush_running_idx];
250 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
252 /* account completion of the flush request */
253 fq->flush_running_idx ^= 1;
255 /* and push the waiting requests to the next stage */
256 list_for_each_entry_safe(rq, n, running, flush.list) {
257 unsigned int seq = blk_flush_cur_seq(rq);
259 BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
260 blk_flush_complete_seq(rq, fq, seq, error);
263 fq->flush_queue_delayed = 0;
264 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
268 * blk_kick_flush - consider issuing flush request
269 * @q: request_queue being kicked
270 * @fq: flush queue
271 * @flags: cmd_flags of the original request
273 * Flush related states of @q have changed, consider issuing flush request.
274 * Please read the comment at the top of this file for more info.
276 * CONTEXT:
277 * spin_lock_irq(fq->mq_flush_lock)
280 static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
281 unsigned int flags)
283 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
284 struct request *first_rq =
285 list_first_entry(pending, struct request, flush.list);
286 struct request *flush_rq = fq->flush_rq;
288 /* C1 described at the top of this file */
289 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
290 return;
292 /* C2 and C3
294 * For blk-mq + scheduling, we can risk having all driver tags
295 * assigned to empty flushes, and we deadlock if we are expecting
296 * other requests to make progress. Don't defer for that case.
298 if (!list_empty(&fq->flush_data_in_flight) && q->elevator &&
299 time_before(jiffies,
300 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
301 return;
304 * Issue flush and toggle pending_idx. This makes pending_idx
305 * different from running_idx, which means flush is in flight.
307 fq->flush_pending_idx ^= 1;
309 blk_rq_init(q, flush_rq);
312 * In case of none scheduler, borrow tag from the first request
313 * since they can't be in flight at the same time. And acquire
314 * the tag's ownership for flush req.
316 * In case of IO scheduler, flush rq need to borrow scheduler tag
317 * just for cheating put/get driver tag.
319 flush_rq->mq_ctx = first_rq->mq_ctx;
320 flush_rq->mq_hctx = first_rq->mq_hctx;
322 if (!q->elevator) {
323 fq->orig_rq = first_rq;
324 flush_rq->tag = first_rq->tag;
325 blk_mq_tag_set_rq(flush_rq->mq_hctx, first_rq->tag, flush_rq);
326 } else {
327 flush_rq->internal_tag = first_rq->internal_tag;
330 flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
331 flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
332 flush_rq->rq_flags |= RQF_FLUSH_SEQ;
333 flush_rq->rq_disk = first_rq->rq_disk;
334 flush_rq->end_io = flush_end_io;
336 blk_flush_queue_rq(flush_rq, false);
339 static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
341 struct request_queue *q = rq->q;
342 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
343 struct blk_mq_ctx *ctx = rq->mq_ctx;
344 unsigned long flags;
345 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
347 if (q->elevator) {
348 WARN_ON(rq->tag < 0);
349 blk_mq_put_driver_tag(rq);
353 * After populating an empty queue, kick it to avoid stall. Read
354 * the comment in flush_end_io().
356 spin_lock_irqsave(&fq->mq_flush_lock, flags);
357 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
358 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
360 blk_mq_sched_restart(hctx);
364 * blk_insert_flush - insert a new PREFLUSH/FUA request
365 * @rq: request to insert
367 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
368 * or __blk_mq_run_hw_queue() to dispatch request.
369 * @rq is being submitted. Analyze what needs to be done and put it on the
370 * right queue.
372 void blk_insert_flush(struct request *rq)
374 struct request_queue *q = rq->q;
375 unsigned long fflags = q->queue_flags; /* may change, cache */
376 unsigned int policy = blk_flush_policy(fflags, rq);
377 struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
380 * @policy now records what operations need to be done. Adjust
381 * REQ_PREFLUSH and FUA for the driver.
383 rq->cmd_flags &= ~REQ_PREFLUSH;
384 if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
385 rq->cmd_flags &= ~REQ_FUA;
388 * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
389 * of those flags, we have to set REQ_SYNC to avoid skewing
390 * the request accounting.
392 rq->cmd_flags |= REQ_SYNC;
395 * An empty flush handed down from a stacking driver may
396 * translate into nothing if the underlying device does not
397 * advertise a write-back cache. In this case, simply
398 * complete the request.
400 if (!policy) {
401 blk_mq_end_request(rq, 0);
402 return;
405 BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
408 * If there's data but flush is not necessary, the request can be
409 * processed directly without going through flush machinery. Queue
410 * for normal execution.
412 if ((policy & REQ_FSEQ_DATA) &&
413 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
414 blk_mq_request_bypass_insert(rq, false);
415 return;
419 * @rq should go through flush machinery. Mark it part of flush
420 * sequence and submit for further processing.
422 memset(&rq->flush, 0, sizeof(rq->flush));
423 INIT_LIST_HEAD(&rq->flush.list);
424 rq->rq_flags |= RQF_FLUSH_SEQ;
425 rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
427 rq->end_io = mq_flush_data_end_io;
429 spin_lock_irq(&fq->mq_flush_lock);
430 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
431 spin_unlock_irq(&fq->mq_flush_lock);
435 * blkdev_issue_flush - queue a flush
436 * @bdev: blockdev to issue flush for
437 * @gfp_mask: memory allocation flags (for bio_alloc)
438 * @error_sector: error sector
440 * Description:
441 * Issue a flush for the block device in question. Caller can supply
442 * room for storing the error offset in case of a flush error, if they
443 * wish to.
445 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
446 sector_t *error_sector)
448 struct request_queue *q;
449 struct bio *bio;
450 int ret = 0;
452 if (bdev->bd_disk == NULL)
453 return -ENXIO;
455 q = bdev_get_queue(bdev);
456 if (!q)
457 return -ENXIO;
460 * some block devices may not have their queue correctly set up here
461 * (e.g. loop device without a backing file) and so issuing a flush
462 * here will panic. Ensure there is a request function before issuing
463 * the flush.
465 if (!q->make_request_fn)
466 return -ENXIO;
468 bio = bio_alloc(gfp_mask, 0);
469 bio_set_dev(bio, bdev);
470 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
472 ret = submit_bio_wait(bio);
475 * The driver must store the error location in ->bi_sector, if
476 * it supports it. For non-stacked drivers, this should be
477 * copied from blk_rq_pos(rq).
479 if (error_sector)
480 *error_sector = bio->bi_iter.bi_sector;
482 bio_put(bio);
483 return ret;
485 EXPORT_SYMBOL(blkdev_issue_flush);
487 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
488 int node, int cmd_size, gfp_t flags)
490 struct blk_flush_queue *fq;
491 int rq_sz = sizeof(struct request);
493 fq = kzalloc_node(sizeof(*fq), flags, node);
494 if (!fq)
495 goto fail;
497 spin_lock_init(&fq->mq_flush_lock);
499 rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
500 fq->flush_rq = kzalloc_node(rq_sz, flags, node);
501 if (!fq->flush_rq)
502 goto fail_rq;
504 INIT_LIST_HEAD(&fq->flush_queue[0]);
505 INIT_LIST_HEAD(&fq->flush_queue[1]);
506 INIT_LIST_HEAD(&fq->flush_data_in_flight);
508 return fq;
510 fail_rq:
511 kfree(fq);
512 fail:
513 return NULL;
516 void blk_free_flush_queue(struct blk_flush_queue *fq)
518 /* bio based request queue hasn't flush queue */
519 if (!fq)
520 return;
522 kfree(fq->flush_rq);
523 kfree(fq);