Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[linux-2.6/btrfs-unstable.git] / block / bfq-iosched.c
blobbd8499ef157ce8786c6eaa164448eb0bf9e84c6c
1 /*
2 * Budget Fair Queueing (BFQ) I/O scheduler.
4 * Based on ideas and code from CFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
10 * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
11 * Arianna Avanzini <avanzini@google.com>
13 * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of the
18 * License, or (at your option) any later version.
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
25 * BFQ is a proportional-share I/O scheduler, with some extra
26 * low-latency capabilities. BFQ also supports full hierarchical
27 * scheduling through cgroups. Next paragraphs provide an introduction
28 * on BFQ inner workings. Details on BFQ benefits, usage and
29 * limitations can be found in Documentation/block/bfq-iosched.txt.
31 * BFQ is a proportional-share storage-I/O scheduling algorithm based
32 * on the slice-by-slice service scheme of CFQ. But BFQ assigns
33 * budgets, measured in number of sectors, to processes instead of
34 * time slices. The device is not granted to the in-service process
35 * for a given time slice, but until it has exhausted its assigned
36 * budget. This change from the time to the service domain enables BFQ
37 * to distribute the device throughput among processes as desired,
38 * without any distortion due to throughput fluctuations, or to device
39 * internal queueing. BFQ uses an ad hoc internal scheduler, called
40 * B-WF2Q+, to schedule processes according to their budgets. More
41 * precisely, BFQ schedules queues associated with processes. Each
42 * process/queue is assigned a user-configurable weight, and B-WF2Q+
43 * guarantees that each queue receives a fraction of the throughput
44 * proportional to its weight. Thanks to the accurate policy of
45 * B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound
46 * processes issuing sequential requests (to boost the throughput),
47 * and yet guarantee a low latency to interactive and soft real-time
48 * applications.
50 * In particular, to provide these low-latency guarantees, BFQ
51 * explicitly privileges the I/O of two classes of time-sensitive
52 * applications: interactive and soft real-time. This feature enables
53 * BFQ to provide applications in these classes with a very low
54 * latency. Finally, BFQ also features additional heuristics for
55 * preserving both a low latency and a high throughput on NCQ-capable,
56 * rotational or flash-based devices, and to get the job done quickly
57 * for applications consisting in many I/O-bound processes.
59 * BFQ is described in [1], where also a reference to the initial, more
60 * theoretical paper on BFQ can be found. The interested reader can find
61 * in the latter paper full details on the main algorithm, as well as
62 * formulas of the guarantees and formal proofs of all the properties.
63 * With respect to the version of BFQ presented in these papers, this
64 * implementation adds a few more heuristics, such as the one that
65 * guarantees a low latency to soft real-time applications, and a
66 * hierarchical extension based on H-WF2Q+.
68 * B-WF2Q+ is based on WF2Q+, which is described in [2], together with
69 * H-WF2Q+, while the augmented tree used here to implement B-WF2Q+
70 * with O(log N) complexity derives from the one introduced with EEVDF
71 * in [3].
73 * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O
74 * Scheduler", Proceedings of the First Workshop on Mobile System
75 * Technologies (MST-2015), May 2015.
76 * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf
78 * [2] Jon C.R. Bennett and H. Zhang, "Hierarchical Packet Fair Queueing
79 * Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689,
80 * Oct 1997.
82 * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
84 * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline
85 * First: A Flexible and Accurate Mechanism for Proportional Share
86 * Resource Allocation", technical report.
88 * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
90 #include <linux/module.h>
91 #include <linux/slab.h>
92 #include <linux/blkdev.h>
93 #include <linux/cgroup.h>
94 #include <linux/elevator.h>
95 #include <linux/ktime.h>
96 #include <linux/rbtree.h>
97 #include <linux/ioprio.h>
98 #include <linux/sbitmap.h>
99 #include <linux/delay.h>
101 #include "blk.h"
102 #include "blk-mq.h"
103 #include "blk-mq-tag.h"
104 #include "blk-mq-sched.h"
105 #include "bfq-iosched.h"
107 #define BFQ_BFQQ_FNS(name) \
108 void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \
110 __set_bit(BFQQF_##name, &(bfqq)->flags); \
112 void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
114 __clear_bit(BFQQF_##name, &(bfqq)->flags); \
116 int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
118 return test_bit(BFQQF_##name, &(bfqq)->flags); \
121 BFQ_BFQQ_FNS(just_created);
122 BFQ_BFQQ_FNS(busy);
123 BFQ_BFQQ_FNS(wait_request);
124 BFQ_BFQQ_FNS(non_blocking_wait_rq);
125 BFQ_BFQQ_FNS(fifo_expire);
126 BFQ_BFQQ_FNS(idle_window);
127 BFQ_BFQQ_FNS(sync);
128 BFQ_BFQQ_FNS(IO_bound);
129 BFQ_BFQQ_FNS(in_large_burst);
130 BFQ_BFQQ_FNS(coop);
131 BFQ_BFQQ_FNS(split_coop);
132 BFQ_BFQQ_FNS(softrt_update);
133 #undef BFQ_BFQQ_FNS \
135 /* Expiration time of sync (0) and async (1) requests, in ns. */
136 static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
138 /* Maximum backwards seek (magic number lifted from CFQ), in KiB. */
139 static const int bfq_back_max = 16 * 1024;
141 /* Penalty of a backwards seek, in number of sectors. */
142 static const int bfq_back_penalty = 2;
144 /* Idling period duration, in ns. */
145 static u64 bfq_slice_idle = NSEC_PER_SEC / 125;
147 /* Minimum number of assigned budgets for which stats are safe to compute. */
148 static const int bfq_stats_min_budgets = 194;
150 /* Default maximum budget values, in sectors and number of requests. */
151 static const int bfq_default_max_budget = 16 * 1024;
154 * Async to sync throughput distribution is controlled as follows:
155 * when an async request is served, the entity is charged the number
156 * of sectors of the request, multiplied by the factor below
158 static const int bfq_async_charge_factor = 10;
160 /* Default timeout values, in jiffies, approximating CFQ defaults. */
161 const int bfq_timeout = HZ / 8;
163 static struct kmem_cache *bfq_pool;
165 /* Below this threshold (in ns), we consider thinktime immediate. */
166 #define BFQ_MIN_TT (2 * NSEC_PER_MSEC)
168 /* hw_tag detection: parallel requests threshold and min samples needed. */
169 #define BFQ_HW_QUEUE_THRESHOLD 4
170 #define BFQ_HW_QUEUE_SAMPLES 32
172 #define BFQQ_SEEK_THR (sector_t)(8 * 100)
173 #define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
174 #define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
175 #define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8)
177 /* Min number of samples required to perform peak-rate update */
178 #define BFQ_RATE_MIN_SAMPLES 32
179 /* Min observation time interval required to perform a peak-rate update (ns) */
180 #define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC)
181 /* Target observation time interval for a peak-rate update (ns) */
182 #define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC
184 /* Shift used for peak rate fixed precision calculations. */
185 #define BFQ_RATE_SHIFT 16
188 * By default, BFQ computes the duration of the weight raising for
189 * interactive applications automatically, using the following formula:
190 * duration = (R / r) * T, where r is the peak rate of the device, and
191 * R and T are two reference parameters.
192 * In particular, R is the peak rate of the reference device (see below),
193 * and T is a reference time: given the systems that are likely to be
194 * installed on the reference device according to its speed class, T is
195 * about the maximum time needed, under BFQ and while reading two files in
196 * parallel, to load typical large applications on these systems.
197 * In practice, the slower/faster the device at hand is, the more/less it
198 * takes to load applications with respect to the reference device.
199 * Accordingly, the longer/shorter BFQ grants weight raising to interactive
200 * applications.
202 * BFQ uses four different reference pairs (R, T), depending on:
203 * . whether the device is rotational or non-rotational;
204 * . whether the device is slow, such as old or portable HDDs, as well as
205 * SD cards, or fast, such as newer HDDs and SSDs.
207 * The device's speed class is dynamically (re)detected in
208 * bfq_update_peak_rate() every time the estimated peak rate is updated.
210 * In the following definitions, R_slow[0]/R_fast[0] and
211 * T_slow[0]/T_fast[0] are the reference values for a slow/fast
212 * rotational device, whereas R_slow[1]/R_fast[1] and
213 * T_slow[1]/T_fast[1] are the reference values for a slow/fast
214 * non-rotational device. Finally, device_speed_thresh are the
215 * thresholds used to switch between speed classes. The reference
216 * rates are not the actual peak rates of the devices used as a
217 * reference, but slightly lower values. The reason for using these
218 * slightly lower values is that the peak-rate estimator tends to
219 * yield slightly lower values than the actual peak rate (it can yield
220 * the actual peak rate only if there is only one process doing I/O,
221 * and the process does sequential I/O).
223 * Both the reference peak rates and the thresholds are measured in
224 * sectors/usec, left-shifted by BFQ_RATE_SHIFT.
226 static int R_slow[2] = {1000, 10700};
227 static int R_fast[2] = {14000, 33000};
229 * To improve readability, a conversion function is used to initialize the
230 * following arrays, which entails that they can be initialized only in a
231 * function.
233 static int T_slow[2];
234 static int T_fast[2];
235 static int device_speed_thresh[2];
237 #define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0])
238 #define RQ_BFQQ(rq) ((rq)->elv.priv[1])
240 struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
242 return bic->bfqq[is_sync];
245 void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync)
247 bic->bfqq[is_sync] = bfqq;
250 struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
252 return bic->icq.q->elevator->elevator_data;
256 * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
257 * @icq: the iocontext queue.
259 static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
261 /* bic->icq is the first member, %NULL will convert to %NULL */
262 return container_of(icq, struct bfq_io_cq, icq);
266 * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
267 * @bfqd: the lookup key.
268 * @ioc: the io_context of the process doing I/O.
269 * @q: the request queue.
271 static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
272 struct io_context *ioc,
273 struct request_queue *q)
275 if (ioc) {
276 unsigned long flags;
277 struct bfq_io_cq *icq;
279 spin_lock_irqsave(q->queue_lock, flags);
280 icq = icq_to_bic(ioc_lookup_icq(ioc, q));
281 spin_unlock_irqrestore(q->queue_lock, flags);
283 return icq;
286 return NULL;
290 * Scheduler run of queue, if there are requests pending and no one in the
291 * driver that will restart queueing.
293 void bfq_schedule_dispatch(struct bfq_data *bfqd)
295 if (bfqd->queued != 0) {
296 bfq_log(bfqd, "schedule dispatch");
297 blk_mq_run_hw_queues(bfqd->queue, true);
301 #define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
302 #define bfq_class_rt(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_RT)
304 #define bfq_sample_valid(samples) ((samples) > 80)
307 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
308 * We choose the request that is closesr to the head right now. Distance
309 * behind the head is penalized and only allowed to a certain extent.
311 static struct request *bfq_choose_req(struct bfq_data *bfqd,
312 struct request *rq1,
313 struct request *rq2,
314 sector_t last)
316 sector_t s1, s2, d1 = 0, d2 = 0;
317 unsigned long back_max;
318 #define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
319 #define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
320 unsigned int wrap = 0; /* bit mask: requests behind the disk head? */
322 if (!rq1 || rq1 == rq2)
323 return rq2;
324 if (!rq2)
325 return rq1;
327 if (rq_is_sync(rq1) && !rq_is_sync(rq2))
328 return rq1;
329 else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
330 return rq2;
331 if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
332 return rq1;
333 else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
334 return rq2;
336 s1 = blk_rq_pos(rq1);
337 s2 = blk_rq_pos(rq2);
340 * By definition, 1KiB is 2 sectors.
342 back_max = bfqd->bfq_back_max * 2;
345 * Strict one way elevator _except_ in the case where we allow
346 * short backward seeks which are biased as twice the cost of a
347 * similar forward seek.
349 if (s1 >= last)
350 d1 = s1 - last;
351 else if (s1 + back_max >= last)
352 d1 = (last - s1) * bfqd->bfq_back_penalty;
353 else
354 wrap |= BFQ_RQ1_WRAP;
356 if (s2 >= last)
357 d2 = s2 - last;
358 else if (s2 + back_max >= last)
359 d2 = (last - s2) * bfqd->bfq_back_penalty;
360 else
361 wrap |= BFQ_RQ2_WRAP;
363 /* Found required data */
366 * By doing switch() on the bit mask "wrap" we avoid having to
367 * check two variables for all permutations: --> faster!
369 switch (wrap) {
370 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
371 if (d1 < d2)
372 return rq1;
373 else if (d2 < d1)
374 return rq2;
376 if (s1 >= s2)
377 return rq1;
378 else
379 return rq2;
381 case BFQ_RQ2_WRAP:
382 return rq1;
383 case BFQ_RQ1_WRAP:
384 return rq2;
385 case BFQ_RQ1_WRAP|BFQ_RQ2_WRAP: /* both rqs wrapped */
386 default:
388 * Since both rqs are wrapped,
389 * start with the one that's further behind head
390 * (--> only *one* back seek required),
391 * since back seek takes more time than forward.
393 if (s1 <= s2)
394 return rq1;
395 else
396 return rq2;
400 static struct bfq_queue *
401 bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
402 sector_t sector, struct rb_node **ret_parent,
403 struct rb_node ***rb_link)
405 struct rb_node **p, *parent;
406 struct bfq_queue *bfqq = NULL;
408 parent = NULL;
409 p = &root->rb_node;
410 while (*p) {
411 struct rb_node **n;
413 parent = *p;
414 bfqq = rb_entry(parent, struct bfq_queue, pos_node);
417 * Sort strictly based on sector. Smallest to the left,
418 * largest to the right.
420 if (sector > blk_rq_pos(bfqq->next_rq))
421 n = &(*p)->rb_right;
422 else if (sector < blk_rq_pos(bfqq->next_rq))
423 n = &(*p)->rb_left;
424 else
425 break;
426 p = n;
427 bfqq = NULL;
430 *ret_parent = parent;
431 if (rb_link)
432 *rb_link = p;
434 bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
435 (unsigned long long)sector,
436 bfqq ? bfqq->pid : 0);
438 return bfqq;
441 void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
443 struct rb_node **p, *parent;
444 struct bfq_queue *__bfqq;
446 if (bfqq->pos_root) {
447 rb_erase(&bfqq->pos_node, bfqq->pos_root);
448 bfqq->pos_root = NULL;
451 if (bfq_class_idle(bfqq))
452 return;
453 if (!bfqq->next_rq)
454 return;
456 bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
457 __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
458 blk_rq_pos(bfqq->next_rq), &parent, &p);
459 if (!__bfqq) {
460 rb_link_node(&bfqq->pos_node, parent, p);
461 rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
462 } else
463 bfqq->pos_root = NULL;
467 * Tell whether there are active queues or groups with differentiated weights.
469 static bool bfq_differentiated_weights(struct bfq_data *bfqd)
472 * For weights to differ, at least one of the trees must contain
473 * at least two nodes.
475 return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
476 (bfqd->queue_weights_tree.rb_node->rb_left ||
477 bfqd->queue_weights_tree.rb_node->rb_right)
478 #ifdef CONFIG_BFQ_GROUP_IOSCHED
479 ) ||
480 (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
481 (bfqd->group_weights_tree.rb_node->rb_left ||
482 bfqd->group_weights_tree.rb_node->rb_right)
483 #endif
488 * The following function returns true if every queue must receive the
489 * same share of the throughput (this condition is used when deciding
490 * whether idling may be disabled, see the comments in the function
491 * bfq_bfqq_may_idle()).
493 * Such a scenario occurs when:
494 * 1) all active queues have the same weight,
495 * 2) all active groups at the same level in the groups tree have the same
496 * weight,
497 * 3) all active groups at the same level in the groups tree have the same
498 * number of children.
500 * Unfortunately, keeping the necessary state for evaluating exactly the
501 * above symmetry conditions would be quite complex and time-consuming.
502 * Therefore this function evaluates, instead, the following stronger
503 * sub-conditions, for which it is much easier to maintain the needed
504 * state:
505 * 1) all active queues have the same weight,
506 * 2) all active groups have the same weight,
507 * 3) all active groups have at most one active child each.
508 * In particular, the last two conditions are always true if hierarchical
509 * support and the cgroups interface are not enabled, thus no state needs
510 * to be maintained in this case.
512 static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
514 return !bfq_differentiated_weights(bfqd);
518 * If the weight-counter tree passed as input contains no counter for
519 * the weight of the input entity, then add that counter; otherwise just
520 * increment the existing counter.
522 * Note that weight-counter trees contain few nodes in mostly symmetric
523 * scenarios. For example, if all queues have the same weight, then the
524 * weight-counter tree for the queues may contain at most one node.
525 * This holds even if low_latency is on, because weight-raised queues
526 * are not inserted in the tree.
527 * In most scenarios, the rate at which nodes are created/destroyed
528 * should be low too.
530 void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
531 struct rb_root *root)
533 struct rb_node **new = &(root->rb_node), *parent = NULL;
536 * Do not insert if the entity is already associated with a
537 * counter, which happens if:
538 * 1) the entity is associated with a queue,
539 * 2) a request arrival has caused the queue to become both
540 * non-weight-raised, and hence change its weight, and
541 * backlogged; in this respect, each of the two events
542 * causes an invocation of this function,
543 * 3) this is the invocation of this function caused by the
544 * second event. This second invocation is actually useless,
545 * and we handle this fact by exiting immediately. More
546 * efficient or clearer solutions might possibly be adopted.
548 if (entity->weight_counter)
549 return;
551 while (*new) {
552 struct bfq_weight_counter *__counter = container_of(*new,
553 struct bfq_weight_counter,
554 weights_node);
555 parent = *new;
557 if (entity->weight == __counter->weight) {
558 entity->weight_counter = __counter;
559 goto inc_counter;
561 if (entity->weight < __counter->weight)
562 new = &((*new)->rb_left);
563 else
564 new = &((*new)->rb_right);
567 entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
568 GFP_ATOMIC);
571 * In the unlucky event of an allocation failure, we just
572 * exit. This will cause the weight of entity to not be
573 * considered in bfq_differentiated_weights, which, in its
574 * turn, causes the scenario to be deemed wrongly symmetric in
575 * case entity's weight would have been the only weight making
576 * the scenario asymmetric. On the bright side, no unbalance
577 * will however occur when entity becomes inactive again (the
578 * invocation of this function is triggered by an activation
579 * of entity). In fact, bfq_weights_tree_remove does nothing
580 * if !entity->weight_counter.
582 if (unlikely(!entity->weight_counter))
583 return;
585 entity->weight_counter->weight = entity->weight;
586 rb_link_node(&entity->weight_counter->weights_node, parent, new);
587 rb_insert_color(&entity->weight_counter->weights_node, root);
589 inc_counter:
590 entity->weight_counter->num_active++;
594 * Decrement the weight counter associated with the entity, and, if the
595 * counter reaches 0, remove the counter from the tree.
596 * See the comments to the function bfq_weights_tree_add() for considerations
597 * about overhead.
599 void bfq_weights_tree_remove(struct bfq_data *bfqd, struct bfq_entity *entity,
600 struct rb_root *root)
602 if (!entity->weight_counter)
603 return;
605 entity->weight_counter->num_active--;
606 if (entity->weight_counter->num_active > 0)
607 goto reset_entity_pointer;
609 rb_erase(&entity->weight_counter->weights_node, root);
610 kfree(entity->weight_counter);
612 reset_entity_pointer:
613 entity->weight_counter = NULL;
617 * Return expired entry, or NULL to just start from scratch in rbtree.
619 static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
620 struct request *last)
622 struct request *rq;
624 if (bfq_bfqq_fifo_expire(bfqq))
625 return NULL;
627 bfq_mark_bfqq_fifo_expire(bfqq);
629 rq = rq_entry_fifo(bfqq->fifo.next);
631 if (rq == last || ktime_get_ns() < rq->fifo_time)
632 return NULL;
634 bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
635 return rq;
638 static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
639 struct bfq_queue *bfqq,
640 struct request *last)
642 struct rb_node *rbnext = rb_next(&last->rb_node);
643 struct rb_node *rbprev = rb_prev(&last->rb_node);
644 struct request *next, *prev = NULL;
646 /* Follow expired path, else get first next available. */
647 next = bfq_check_fifo(bfqq, last);
648 if (next)
649 return next;
651 if (rbprev)
652 prev = rb_entry_rq(rbprev);
654 if (rbnext)
655 next = rb_entry_rq(rbnext);
656 else {
657 rbnext = rb_first(&bfqq->sort_list);
658 if (rbnext && rbnext != &last->rb_node)
659 next = rb_entry_rq(rbnext);
662 return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
665 /* see the definition of bfq_async_charge_factor for details */
666 static unsigned long bfq_serv_to_charge(struct request *rq,
667 struct bfq_queue *bfqq)
669 if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
670 return blk_rq_sectors(rq);
673 * If there are no weight-raised queues, then amplify service
674 * by just the async charge factor; otherwise amplify service
675 * by twice the async charge factor, to further reduce latency
676 * for weight-raised queues.
678 if (bfqq->bfqd->wr_busy_queues == 0)
679 return blk_rq_sectors(rq) * bfq_async_charge_factor;
681 return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor;
685 * bfq_updated_next_req - update the queue after a new next_rq selection.
686 * @bfqd: the device data the queue belongs to.
687 * @bfqq: the queue to update.
689 * If the first request of a queue changes we make sure that the queue
690 * has enough budget to serve at least its first request (if the
691 * request has grown). We do this because if the queue has not enough
692 * budget for its first request, it has to go through two dispatch
693 * rounds to actually get it dispatched.
695 static void bfq_updated_next_req(struct bfq_data *bfqd,
696 struct bfq_queue *bfqq)
698 struct bfq_entity *entity = &bfqq->entity;
699 struct request *next_rq = bfqq->next_rq;
700 unsigned long new_budget;
702 if (!next_rq)
703 return;
705 if (bfqq == bfqd->in_service_queue)
707 * In order not to break guarantees, budgets cannot be
708 * changed after an entity has been selected.
710 return;
712 new_budget = max_t(unsigned long, bfqq->max_budget,
713 bfq_serv_to_charge(next_rq, bfqq));
714 if (entity->budget != new_budget) {
715 entity->budget = new_budget;
716 bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
717 new_budget);
718 bfq_requeue_bfqq(bfqd, bfqq);
722 static void
723 bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
725 if (bic->saved_idle_window)
726 bfq_mark_bfqq_idle_window(bfqq);
727 else
728 bfq_clear_bfqq_idle_window(bfqq);
730 if (bic->saved_IO_bound)
731 bfq_mark_bfqq_IO_bound(bfqq);
732 else
733 bfq_clear_bfqq_IO_bound(bfqq);
735 bfqq->ttime = bic->saved_ttime;
736 bfqq->wr_coeff = bic->saved_wr_coeff;
737 bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
738 bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish;
739 bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time;
741 if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) ||
742 time_is_before_jiffies(bfqq->last_wr_start_finish +
743 bfqq->wr_cur_max_time))) {
744 bfq_log_bfqq(bfqq->bfqd, bfqq,
745 "resume state: switching off wr");
747 bfqq->wr_coeff = 1;
750 /* make sure weight will be updated, however we got here */
751 bfqq->entity.prio_changed = 1;
754 static int bfqq_process_refs(struct bfq_queue *bfqq)
756 return bfqq->ref - bfqq->allocated - bfqq->entity.on_st;
759 /* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
760 static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
762 struct bfq_queue *item;
763 struct hlist_node *n;
765 hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
766 hlist_del_init(&item->burst_list_node);
767 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
768 bfqd->burst_size = 1;
769 bfqd->burst_parent_entity = bfqq->entity.parent;
772 /* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
773 static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
775 /* Increment burst size to take into account also bfqq */
776 bfqd->burst_size++;
778 if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
779 struct bfq_queue *pos, *bfqq_item;
780 struct hlist_node *n;
783 * Enough queues have been activated shortly after each
784 * other to consider this burst as large.
786 bfqd->large_burst = true;
789 * We can now mark all queues in the burst list as
790 * belonging to a large burst.
792 hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
793 burst_list_node)
794 bfq_mark_bfqq_in_large_burst(bfqq_item);
795 bfq_mark_bfqq_in_large_burst(bfqq);
798 * From now on, and until the current burst finishes, any
799 * new queue being activated shortly after the last queue
800 * was inserted in the burst can be immediately marked as
801 * belonging to a large burst. So the burst list is not
802 * needed any more. Remove it.
804 hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
805 burst_list_node)
806 hlist_del_init(&pos->burst_list_node);
807 } else /*
808 * Burst not yet large: add bfqq to the burst list. Do
809 * not increment the ref counter for bfqq, because bfqq
810 * is removed from the burst list before freeing bfqq
811 * in put_queue.
813 hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
817 * If many queues belonging to the same group happen to be created
818 * shortly after each other, then the processes associated with these
819 * queues have typically a common goal. In particular, bursts of queue
820 * creations are usually caused by services or applications that spawn
821 * many parallel threads/processes. Examples are systemd during boot,
822 * or git grep. To help these processes get their job done as soon as
823 * possible, it is usually better to not grant either weight-raising
824 * or device idling to their queues.
826 * In this comment we describe, firstly, the reasons why this fact
827 * holds, and, secondly, the next function, which implements the main
828 * steps needed to properly mark these queues so that they can then be
829 * treated in a different way.
831 * The above services or applications benefit mostly from a high
832 * throughput: the quicker the requests of the activated queues are
833 * cumulatively served, the sooner the target job of these queues gets
834 * completed. As a consequence, weight-raising any of these queues,
835 * which also implies idling the device for it, is almost always
836 * counterproductive. In most cases it just lowers throughput.
838 * On the other hand, a burst of queue creations may be caused also by
839 * the start of an application that does not consist of a lot of
840 * parallel I/O-bound threads. In fact, with a complex application,
841 * several short processes may need to be executed to start-up the
842 * application. In this respect, to start an application as quickly as
843 * possible, the best thing to do is in any case to privilege the I/O
844 * related to the application with respect to all other
845 * I/O. Therefore, the best strategy to start as quickly as possible
846 * an application that causes a burst of queue creations is to
847 * weight-raise all the queues created during the burst. This is the
848 * exact opposite of the best strategy for the other type of bursts.
850 * In the end, to take the best action for each of the two cases, the
851 * two types of bursts need to be distinguished. Fortunately, this
852 * seems relatively easy, by looking at the sizes of the bursts. In
853 * particular, we found a threshold such that only bursts with a
854 * larger size than that threshold are apparently caused by
855 * services or commands such as systemd or git grep. For brevity,
856 * hereafter we call just 'large' these bursts. BFQ *does not*
857 * weight-raise queues whose creation occurs in a large burst. In
858 * addition, for each of these queues BFQ performs or does not perform
859 * idling depending on which choice boosts the throughput more. The
860 * exact choice depends on the device and request pattern at
861 * hand.
863 * Unfortunately, false positives may occur while an interactive task
864 * is starting (e.g., an application is being started). The
865 * consequence is that the queues associated with the task do not
866 * enjoy weight raising as expected. Fortunately these false positives
867 * are very rare. They typically occur if some service happens to
868 * start doing I/O exactly when the interactive task starts.
870 * Turning back to the next function, it implements all the steps
871 * needed to detect the occurrence of a large burst and to properly
872 * mark all the queues belonging to it (so that they can then be
873 * treated in a different way). This goal is achieved by maintaining a
874 * "burst list" that holds, temporarily, the queues that belong to the
875 * burst in progress. The list is then used to mark these queues as
876 * belonging to a large burst if the burst does become large. The main
877 * steps are the following.
879 * . when the very first queue is created, the queue is inserted into the
880 * list (as it could be the first queue in a possible burst)
882 * . if the current burst has not yet become large, and a queue Q that does
883 * not yet belong to the burst is activated shortly after the last time
884 * at which a new queue entered the burst list, then the function appends
885 * Q to the burst list
887 * . if, as a consequence of the previous step, the burst size reaches
888 * the large-burst threshold, then
890 * . all the queues in the burst list are marked as belonging to a
891 * large burst
893 * . the burst list is deleted; in fact, the burst list already served
894 * its purpose (keeping temporarily track of the queues in a burst,
895 * so as to be able to mark them as belonging to a large burst in the
896 * previous sub-step), and now is not needed any more
898 * . the device enters a large-burst mode
900 * . if a queue Q that does not belong to the burst is created while
901 * the device is in large-burst mode and shortly after the last time
902 * at which a queue either entered the burst list or was marked as
903 * belonging to the current large burst, then Q is immediately marked
904 * as belonging to a large burst.
906 * . if a queue Q that does not belong to the burst is created a while
907 * later, i.e., not shortly after, than the last time at which a queue
908 * either entered the burst list or was marked as belonging to the
909 * current large burst, then the current burst is deemed as finished and:
911 * . the large-burst mode is reset if set
913 * . the burst list is emptied
915 * . Q is inserted in the burst list, as Q may be the first queue
916 * in a possible new burst (then the burst list contains just Q
917 * after this step).
919 static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
922 * If bfqq is already in the burst list or is part of a large
923 * burst, or finally has just been split, then there is
924 * nothing else to do.
926 if (!hlist_unhashed(&bfqq->burst_list_node) ||
927 bfq_bfqq_in_large_burst(bfqq) ||
928 time_is_after_eq_jiffies(bfqq->split_time +
929 msecs_to_jiffies(10)))
930 return;
933 * If bfqq's creation happens late enough, or bfqq belongs to
934 * a different group than the burst group, then the current
935 * burst is finished, and related data structures must be
936 * reset.
938 * In this respect, consider the special case where bfqq is
939 * the very first queue created after BFQ is selected for this
940 * device. In this case, last_ins_in_burst and
941 * burst_parent_entity are not yet significant when we get
942 * here. But it is easy to verify that, whether or not the
943 * following condition is true, bfqq will end up being
944 * inserted into the burst list. In particular the list will
945 * happen to contain only bfqq. And this is exactly what has
946 * to happen, as bfqq may be the first queue of the first
947 * burst.
949 if (time_is_before_jiffies(bfqd->last_ins_in_burst +
950 bfqd->bfq_burst_interval) ||
951 bfqq->entity.parent != bfqd->burst_parent_entity) {
952 bfqd->large_burst = false;
953 bfq_reset_burst_list(bfqd, bfqq);
954 goto end;
958 * If we get here, then bfqq is being activated shortly after the
959 * last queue. So, if the current burst is also large, we can mark
960 * bfqq as belonging to this large burst immediately.
962 if (bfqd->large_burst) {
963 bfq_mark_bfqq_in_large_burst(bfqq);
964 goto end;
968 * If we get here, then a large-burst state has not yet been
969 * reached, but bfqq is being activated shortly after the last
970 * queue. Then we add bfqq to the burst.
972 bfq_add_to_burst(bfqd, bfqq);
973 end:
975 * At this point, bfqq either has been added to the current
976 * burst or has caused the current burst to terminate and a
977 * possible new burst to start. In particular, in the second
978 * case, bfqq has become the first queue in the possible new
979 * burst. In both cases last_ins_in_burst needs to be moved
980 * forward.
982 bfqd->last_ins_in_burst = jiffies;
985 static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
987 struct bfq_entity *entity = &bfqq->entity;
989 return entity->budget - entity->service;
993 * If enough samples have been computed, return the current max budget
994 * stored in bfqd, which is dynamically updated according to the
995 * estimated disk peak rate; otherwise return the default max budget
997 static int bfq_max_budget(struct bfq_data *bfqd)
999 if (bfqd->budgets_assigned < bfq_stats_min_budgets)
1000 return bfq_default_max_budget;
1001 else
1002 return bfqd->bfq_max_budget;
1006 * Return min budget, which is a fraction of the current or default
1007 * max budget (trying with 1/32)
1009 static int bfq_min_budget(struct bfq_data *bfqd)
1011 if (bfqd->budgets_assigned < bfq_stats_min_budgets)
1012 return bfq_default_max_budget / 32;
1013 else
1014 return bfqd->bfq_max_budget / 32;
1018 * The next function, invoked after the input queue bfqq switches from
1019 * idle to busy, updates the budget of bfqq. The function also tells
1020 * whether the in-service queue should be expired, by returning
1021 * true. The purpose of expiring the in-service queue is to give bfqq
1022 * the chance to possibly preempt the in-service queue, and the reason
1023 * for preempting the in-service queue is to achieve one of the two
1024 * goals below.
1026 * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has
1027 * expired because it has remained idle. In particular, bfqq may have
1028 * expired for one of the following two reasons:
1030 * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling
1031 * and did not make it to issue a new request before its last
1032 * request was served;
1034 * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue
1035 * a new request before the expiration of the idling-time.
1037 * Even if bfqq has expired for one of the above reasons, the process
1038 * associated with the queue may be however issuing requests greedily,
1039 * and thus be sensitive to the bandwidth it receives (bfqq may have
1040 * remained idle for other reasons: CPU high load, bfqq not enjoying
1041 * idling, I/O throttling somewhere in the path from the process to
1042 * the I/O scheduler, ...). But if, after every expiration for one of
1043 * the above two reasons, bfqq has to wait for the service of at least
1044 * one full budget of another queue before being served again, then
1045 * bfqq is likely to get a much lower bandwidth or resource time than
1046 * its reserved ones. To address this issue, two countermeasures need
1047 * to be taken.
1049 * First, the budget and the timestamps of bfqq need to be updated in
1050 * a special way on bfqq reactivation: they need to be updated as if
1051 * bfqq did not remain idle and did not expire. In fact, if they are
1052 * computed as if bfqq expired and remained idle until reactivation,
1053 * then the process associated with bfqq is treated as if, instead of
1054 * being greedy, it stopped issuing requests when bfqq remained idle,
1055 * and restarts issuing requests only on this reactivation. In other
1056 * words, the scheduler does not help the process recover the "service
1057 * hole" between bfqq expiration and reactivation. As a consequence,
1058 * the process receives a lower bandwidth than its reserved one. In
1059 * contrast, to recover this hole, the budget must be updated as if
1060 * bfqq was not expired at all before this reactivation, i.e., it must
1061 * be set to the value of the remaining budget when bfqq was
1062 * expired. Along the same line, timestamps need to be assigned the
1063 * value they had the last time bfqq was selected for service, i.e.,
1064 * before last expiration. Thus timestamps need to be back-shifted
1065 * with respect to their normal computation (see [1] for more details
1066 * on this tricky aspect).
1068 * Secondly, to allow the process to recover the hole, the in-service
1069 * queue must be expired too, to give bfqq the chance to preempt it
1070 * immediately. In fact, if bfqq has to wait for a full budget of the
1071 * in-service queue to be completed, then it may become impossible to
1072 * let the process recover the hole, even if the back-shifted
1073 * timestamps of bfqq are lower than those of the in-service queue. If
1074 * this happens for most or all of the holes, then the process may not
1075 * receive its reserved bandwidth. In this respect, it is worth noting
1076 * that, being the service of outstanding requests unpreemptible, a
1077 * little fraction of the holes may however be unrecoverable, thereby
1078 * causing a little loss of bandwidth.
1080 * The last important point is detecting whether bfqq does need this
1081 * bandwidth recovery. In this respect, the next function deems the
1082 * process associated with bfqq greedy, and thus allows it to recover
1083 * the hole, if: 1) the process is waiting for the arrival of a new
1084 * request (which implies that bfqq expired for one of the above two
1085 * reasons), and 2) such a request has arrived soon. The first
1086 * condition is controlled through the flag non_blocking_wait_rq,
1087 * while the second through the flag arrived_in_time. If both
1088 * conditions hold, then the function computes the budget in the
1089 * above-described special way, and signals that the in-service queue
1090 * should be expired. Timestamp back-shifting is done later in
1091 * __bfq_activate_entity.
1093 * 2. Reduce latency. Even if timestamps are not backshifted to let
1094 * the process associated with bfqq recover a service hole, bfqq may
1095 * however happen to have, after being (re)activated, a lower finish
1096 * timestamp than the in-service queue. That is, the next budget of
1097 * bfqq may have to be completed before the one of the in-service
1098 * queue. If this is the case, then preempting the in-service queue
1099 * allows this goal to be achieved, apart from the unpreemptible,
1100 * outstanding requests mentioned above.
1102 * Unfortunately, regardless of which of the above two goals one wants
1103 * to achieve, service trees need first to be updated to know whether
1104 * the in-service queue must be preempted. To have service trees
1105 * correctly updated, the in-service queue must be expired and
1106 * rescheduled, and bfqq must be scheduled too. This is one of the
1107 * most costly operations (in future versions, the scheduling
1108 * mechanism may be re-designed in such a way to make it possible to
1109 * know whether preemption is needed without needing to update service
1110 * trees). In addition, queue preemptions almost always cause random
1111 * I/O, and thus loss of throughput. Because of these facts, the next
1112 * function adopts the following simple scheme to avoid both costly
1113 * operations and too frequent preemptions: it requests the expiration
1114 * of the in-service queue (unconditionally) only for queues that need
1115 * to recover a hole, or that either are weight-raised or deserve to
1116 * be weight-raised.
1118 static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
1119 struct bfq_queue *bfqq,
1120 bool arrived_in_time,
1121 bool wr_or_deserves_wr)
1123 struct bfq_entity *entity = &bfqq->entity;
1125 if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time) {
1127 * We do not clear the flag non_blocking_wait_rq here, as
1128 * the latter is used in bfq_activate_bfqq to signal
1129 * that timestamps need to be back-shifted (and is
1130 * cleared right after).
1134 * In next assignment we rely on that either
1135 * entity->service or entity->budget are not updated
1136 * on expiration if bfqq is empty (see
1137 * __bfq_bfqq_recalc_budget). Thus both quantities
1138 * remain unchanged after such an expiration, and the
1139 * following statement therefore assigns to
1140 * entity->budget the remaining budget on such an
1141 * expiration. For clarity, entity->service is not
1142 * updated on expiration in any case, and, in normal
1143 * operation, is reset only when bfqq is selected for
1144 * service (see bfq_get_next_queue).
1146 entity->budget = min_t(unsigned long,
1147 bfq_bfqq_budget_left(bfqq),
1148 bfqq->max_budget);
1150 return true;
1153 entity->budget = max_t(unsigned long, bfqq->max_budget,
1154 bfq_serv_to_charge(bfqq->next_rq, bfqq));
1155 bfq_clear_bfqq_non_blocking_wait_rq(bfqq);
1156 return wr_or_deserves_wr;
1159 static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
1161 u64 dur;
1163 if (bfqd->bfq_wr_max_time > 0)
1164 return bfqd->bfq_wr_max_time;
1166 dur = bfqd->RT_prod;
1167 do_div(dur, bfqd->peak_rate);
1170 * Limit duration between 3 and 13 seconds. Tests show that
1171 * higher values than 13 seconds often yield the opposite of
1172 * the desired result, i.e., worsen responsiveness by letting
1173 * non-interactive and non-soft-real-time applications
1174 * preserve weight raising for a too long time interval.
1176 * On the other end, lower values than 3 seconds make it
1177 * difficult for most interactive tasks to complete their jobs
1178 * before weight-raising finishes.
1180 if (dur > msecs_to_jiffies(13000))
1181 dur = msecs_to_jiffies(13000);
1182 else if (dur < msecs_to_jiffies(3000))
1183 dur = msecs_to_jiffies(3000);
1185 return dur;
1188 static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
1189 struct bfq_queue *bfqq,
1190 unsigned int old_wr_coeff,
1191 bool wr_or_deserves_wr,
1192 bool interactive,
1193 bool in_burst,
1194 bool soft_rt)
1196 if (old_wr_coeff == 1 && wr_or_deserves_wr) {
1197 /* start a weight-raising period */
1198 if (interactive) {
1199 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1200 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1201 } else {
1202 bfqq->wr_start_at_switch_to_srt = jiffies;
1203 bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1204 BFQ_SOFTRT_WEIGHT_FACTOR;
1205 bfqq->wr_cur_max_time =
1206 bfqd->bfq_wr_rt_max_time;
1210 * If needed, further reduce budget to make sure it is
1211 * close to bfqq's backlog, so as to reduce the
1212 * scheduling-error component due to a too large
1213 * budget. Do not care about throughput consequences,
1214 * but only about latency. Finally, do not assign a
1215 * too small budget either, to avoid increasing
1216 * latency by causing too frequent expirations.
1218 bfqq->entity.budget = min_t(unsigned long,
1219 bfqq->entity.budget,
1220 2 * bfq_min_budget(bfqd));
1221 } else if (old_wr_coeff > 1) {
1222 if (interactive) { /* update wr coeff and duration */
1223 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1224 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1225 } else if (in_burst)
1226 bfqq->wr_coeff = 1;
1227 else if (soft_rt) {
1229 * The application is now or still meeting the
1230 * requirements for being deemed soft rt. We
1231 * can then correctly and safely (re)charge
1232 * the weight-raising duration for the
1233 * application with the weight-raising
1234 * duration for soft rt applications.
1236 * In particular, doing this recharge now, i.e.,
1237 * before the weight-raising period for the
1238 * application finishes, reduces the probability
1239 * of the following negative scenario:
1240 * 1) the weight of a soft rt application is
1241 * raised at startup (as for any newly
1242 * created application),
1243 * 2) since the application is not interactive,
1244 * at a certain time weight-raising is
1245 * stopped for the application,
1246 * 3) at that time the application happens to
1247 * still have pending requests, and hence
1248 * is destined to not have a chance to be
1249 * deemed soft rt before these requests are
1250 * completed (see the comments to the
1251 * function bfq_bfqq_softrt_next_start()
1252 * for details on soft rt detection),
1253 * 4) these pending requests experience a high
1254 * latency because the application is not
1255 * weight-raised while they are pending.
1257 if (bfqq->wr_cur_max_time !=
1258 bfqd->bfq_wr_rt_max_time) {
1259 bfqq->wr_start_at_switch_to_srt =
1260 bfqq->last_wr_start_finish;
1262 bfqq->wr_cur_max_time =
1263 bfqd->bfq_wr_rt_max_time;
1264 bfqq->wr_coeff = bfqd->bfq_wr_coeff *
1265 BFQ_SOFTRT_WEIGHT_FACTOR;
1267 bfqq->last_wr_start_finish = jiffies;
1272 static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd,
1273 struct bfq_queue *bfqq)
1275 return bfqq->dispatched == 0 &&
1276 time_is_before_jiffies(
1277 bfqq->budget_timeout +
1278 bfqd->bfq_wr_min_idle_time);
1281 static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
1282 struct bfq_queue *bfqq,
1283 int old_wr_coeff,
1284 struct request *rq,
1285 bool *interactive)
1287 bool soft_rt, in_burst, wr_or_deserves_wr,
1288 bfqq_wants_to_preempt,
1289 idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq),
1291 * See the comments on
1292 * bfq_bfqq_update_budg_for_activation for
1293 * details on the usage of the next variable.
1295 arrived_in_time = ktime_get_ns() <=
1296 bfqq->ttime.last_end_request +
1297 bfqd->bfq_slice_idle * 3;
1299 bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, rq->cmd_flags);
1302 * bfqq deserves to be weight-raised if:
1303 * - it is sync,
1304 * - it does not belong to a large burst,
1305 * - it has been idle for enough time or is soft real-time,
1306 * - is linked to a bfq_io_cq (it is not shared in any sense).
1308 in_burst = bfq_bfqq_in_large_burst(bfqq);
1309 soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
1310 !in_burst &&
1311 time_is_before_jiffies(bfqq->soft_rt_next_start);
1312 *interactive = !in_burst && idle_for_long_time;
1313 wr_or_deserves_wr = bfqd->low_latency &&
1314 (bfqq->wr_coeff > 1 ||
1315 (bfq_bfqq_sync(bfqq) &&
1316 bfqq->bic && (*interactive || soft_rt)));
1319 * Using the last flag, update budget and check whether bfqq
1320 * may want to preempt the in-service queue.
1322 bfqq_wants_to_preempt =
1323 bfq_bfqq_update_budg_for_activation(bfqd, bfqq,
1324 arrived_in_time,
1325 wr_or_deserves_wr);
1328 * If bfqq happened to be activated in a burst, but has been
1329 * idle for much more than an interactive queue, then we
1330 * assume that, in the overall I/O initiated in the burst, the
1331 * I/O associated with bfqq is finished. So bfqq does not need
1332 * to be treated as a queue belonging to a burst
1333 * anymore. Accordingly, we reset bfqq's in_large_burst flag
1334 * if set, and remove bfqq from the burst list if it's
1335 * there. We do not decrement burst_size, because the fact
1336 * that bfqq does not need to belong to the burst list any
1337 * more does not invalidate the fact that bfqq was created in
1338 * a burst.
1340 if (likely(!bfq_bfqq_just_created(bfqq)) &&
1341 idle_for_long_time &&
1342 time_is_before_jiffies(
1343 bfqq->budget_timeout +
1344 msecs_to_jiffies(10000))) {
1345 hlist_del_init(&bfqq->burst_list_node);
1346 bfq_clear_bfqq_in_large_burst(bfqq);
1349 bfq_clear_bfqq_just_created(bfqq);
1352 if (!bfq_bfqq_IO_bound(bfqq)) {
1353 if (arrived_in_time) {
1354 bfqq->requests_within_timer++;
1355 if (bfqq->requests_within_timer >=
1356 bfqd->bfq_requests_within_timer)
1357 bfq_mark_bfqq_IO_bound(bfqq);
1358 } else
1359 bfqq->requests_within_timer = 0;
1362 if (bfqd->low_latency) {
1363 if (unlikely(time_is_after_jiffies(bfqq->split_time)))
1364 /* wraparound */
1365 bfqq->split_time =
1366 jiffies - bfqd->bfq_wr_min_idle_time - 1;
1368 if (time_is_before_jiffies(bfqq->split_time +
1369 bfqd->bfq_wr_min_idle_time)) {
1370 bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq,
1371 old_wr_coeff,
1372 wr_or_deserves_wr,
1373 *interactive,
1374 in_burst,
1375 soft_rt);
1377 if (old_wr_coeff != bfqq->wr_coeff)
1378 bfqq->entity.prio_changed = 1;
1382 bfqq->last_idle_bklogged = jiffies;
1383 bfqq->service_from_backlogged = 0;
1384 bfq_clear_bfqq_softrt_update(bfqq);
1386 bfq_add_bfqq_busy(bfqd, bfqq);
1389 * Expire in-service queue only if preemption may be needed
1390 * for guarantees. In this respect, the function
1391 * next_queue_may_preempt just checks a simple, necessary
1392 * condition, and not a sufficient condition based on
1393 * timestamps. In fact, for the latter condition to be
1394 * evaluated, timestamps would need first to be updated, and
1395 * this operation is quite costly (see the comments on the
1396 * function bfq_bfqq_update_budg_for_activation).
1398 if (bfqd->in_service_queue && bfqq_wants_to_preempt &&
1399 bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff &&
1400 next_queue_may_preempt(bfqd))
1401 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
1402 false, BFQQE_PREEMPTED);
1405 static void bfq_add_request(struct request *rq)
1407 struct bfq_queue *bfqq = RQ_BFQQ(rq);
1408 struct bfq_data *bfqd = bfqq->bfqd;
1409 struct request *next_rq, *prev;
1410 unsigned int old_wr_coeff = bfqq->wr_coeff;
1411 bool interactive = false;
1413 bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
1414 bfqq->queued[rq_is_sync(rq)]++;
1415 bfqd->queued++;
1417 elv_rb_add(&bfqq->sort_list, rq);
1420 * Check if this request is a better next-serve candidate.
1422 prev = bfqq->next_rq;
1423 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
1424 bfqq->next_rq = next_rq;
1427 * Adjust priority tree position, if next_rq changes.
1429 if (prev != bfqq->next_rq)
1430 bfq_pos_tree_add_move(bfqd, bfqq);
1432 if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
1433 bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff,
1434 rq, &interactive);
1435 else {
1436 if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
1437 time_is_before_jiffies(
1438 bfqq->last_wr_start_finish +
1439 bfqd->bfq_wr_min_inter_arr_async)) {
1440 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
1441 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
1443 bfqd->wr_busy_queues++;
1444 bfqq->entity.prio_changed = 1;
1446 if (prev != bfqq->next_rq)
1447 bfq_updated_next_req(bfqd, bfqq);
1451 * Assign jiffies to last_wr_start_finish in the following
1452 * cases:
1454 * . if bfqq is not going to be weight-raised, because, for
1455 * non weight-raised queues, last_wr_start_finish stores the
1456 * arrival time of the last request; as of now, this piece
1457 * of information is used only for deciding whether to
1458 * weight-raise async queues
1460 * . if bfqq is not weight-raised, because, if bfqq is now
1461 * switching to weight-raised, then last_wr_start_finish
1462 * stores the time when weight-raising starts
1464 * . if bfqq is interactive, because, regardless of whether
1465 * bfqq is currently weight-raised, the weight-raising
1466 * period must start or restart (this case is considered
1467 * separately because it is not detected by the above
1468 * conditions, if bfqq is already weight-raised)
1470 * last_wr_start_finish has to be updated also if bfqq is soft
1471 * real-time, because the weight-raising period is constantly
1472 * restarted on idle-to-busy transitions for these queues, but
1473 * this is already done in bfq_bfqq_handle_idle_busy_switch if
1474 * needed.
1476 if (bfqd->low_latency &&
1477 (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
1478 bfqq->last_wr_start_finish = jiffies;
1481 static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
1482 struct bio *bio,
1483 struct request_queue *q)
1485 struct bfq_queue *bfqq = bfqd->bio_bfqq;
1488 if (bfqq)
1489 return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
1491 return NULL;
1494 static sector_t get_sdist(sector_t last_pos, struct request *rq)
1496 if (last_pos)
1497 return abs(blk_rq_pos(rq) - last_pos);
1499 return 0;
1502 #if 0 /* Still not clear if we can do without next two functions */
1503 static void bfq_activate_request(struct request_queue *q, struct request *rq)
1505 struct bfq_data *bfqd = q->elevator->elevator_data;
1507 bfqd->rq_in_driver++;
1510 static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
1512 struct bfq_data *bfqd = q->elevator->elevator_data;
1514 bfqd->rq_in_driver--;
1516 #endif
1518 static void bfq_remove_request(struct request_queue *q,
1519 struct request *rq)
1521 struct bfq_queue *bfqq = RQ_BFQQ(rq);
1522 struct bfq_data *bfqd = bfqq->bfqd;
1523 const int sync = rq_is_sync(rq);
1525 if (bfqq->next_rq == rq) {
1526 bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
1527 bfq_updated_next_req(bfqd, bfqq);
1530 if (rq->queuelist.prev != &rq->queuelist)
1531 list_del_init(&rq->queuelist);
1532 bfqq->queued[sync]--;
1533 bfqd->queued--;
1534 elv_rb_del(&bfqq->sort_list, rq);
1536 elv_rqhash_del(q, rq);
1537 if (q->last_merge == rq)
1538 q->last_merge = NULL;
1540 if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
1541 bfqq->next_rq = NULL;
1543 if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
1544 bfq_del_bfqq_busy(bfqd, bfqq, false);
1546 * bfqq emptied. In normal operation, when
1547 * bfqq is empty, bfqq->entity.service and
1548 * bfqq->entity.budget must contain,
1549 * respectively, the service received and the
1550 * budget used last time bfqq emptied. These
1551 * facts do not hold in this case, as at least
1552 * this last removal occurred while bfqq is
1553 * not in service. To avoid inconsistencies,
1554 * reset both bfqq->entity.service and
1555 * bfqq->entity.budget, if bfqq has still a
1556 * process that may issue I/O requests to it.
1558 bfqq->entity.budget = bfqq->entity.service = 0;
1562 * Remove queue from request-position tree as it is empty.
1564 if (bfqq->pos_root) {
1565 rb_erase(&bfqq->pos_node, bfqq->pos_root);
1566 bfqq->pos_root = NULL;
1570 if (rq->cmd_flags & REQ_META)
1571 bfqq->meta_pending--;
1573 bfqg_stats_update_io_remove(bfqq_group(bfqq), rq->cmd_flags);
1576 static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
1578 struct request_queue *q = hctx->queue;
1579 struct bfq_data *bfqd = q->elevator->elevator_data;
1580 struct request *free = NULL;
1582 * bfq_bic_lookup grabs the queue_lock: invoke it now and
1583 * store its return value for later use, to avoid nesting
1584 * queue_lock inside the bfqd->lock. We assume that the bic
1585 * returned by bfq_bic_lookup does not go away before
1586 * bfqd->lock is taken.
1588 struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q);
1589 bool ret;
1591 spin_lock_irq(&bfqd->lock);
1593 if (bic)
1594 bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
1595 else
1596 bfqd->bio_bfqq = NULL;
1597 bfqd->bio_bic = bic;
1599 ret = blk_mq_sched_try_merge(q, bio, &free);
1601 if (free)
1602 blk_mq_free_request(free);
1603 spin_unlock_irq(&bfqd->lock);
1605 return ret;
1608 static int bfq_request_merge(struct request_queue *q, struct request **req,
1609 struct bio *bio)
1611 struct bfq_data *bfqd = q->elevator->elevator_data;
1612 struct request *__rq;
1614 __rq = bfq_find_rq_fmerge(bfqd, bio, q);
1615 if (__rq && elv_bio_merge_ok(__rq, bio)) {
1616 *req = __rq;
1617 return ELEVATOR_FRONT_MERGE;
1620 return ELEVATOR_NO_MERGE;
1623 static void bfq_request_merged(struct request_queue *q, struct request *req,
1624 enum elv_merge type)
1626 if (type == ELEVATOR_FRONT_MERGE &&
1627 rb_prev(&req->rb_node) &&
1628 blk_rq_pos(req) <
1629 blk_rq_pos(container_of(rb_prev(&req->rb_node),
1630 struct request, rb_node))) {
1631 struct bfq_queue *bfqq = RQ_BFQQ(req);
1632 struct bfq_data *bfqd = bfqq->bfqd;
1633 struct request *prev, *next_rq;
1635 /* Reposition request in its sort_list */
1636 elv_rb_del(&bfqq->sort_list, req);
1637 elv_rb_add(&bfqq->sort_list, req);
1639 /* Choose next request to be served for bfqq */
1640 prev = bfqq->next_rq;
1641 next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
1642 bfqd->last_position);
1643 bfqq->next_rq = next_rq;
1645 * If next_rq changes, update both the queue's budget to
1646 * fit the new request and the queue's position in its
1647 * rq_pos_tree.
1649 if (prev != bfqq->next_rq) {
1650 bfq_updated_next_req(bfqd, bfqq);
1651 bfq_pos_tree_add_move(bfqd, bfqq);
1656 static void bfq_requests_merged(struct request_queue *q, struct request *rq,
1657 struct request *next)
1659 struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next);
1661 if (!RB_EMPTY_NODE(&rq->rb_node))
1662 goto end;
1663 spin_lock_irq(&bfqq->bfqd->lock);
1666 * If next and rq belong to the same bfq_queue and next is older
1667 * than rq, then reposition rq in the fifo (by substituting next
1668 * with rq). Otherwise, if next and rq belong to different
1669 * bfq_queues, never reposition rq: in fact, we would have to
1670 * reposition it with respect to next's position in its own fifo,
1671 * which would most certainly be too expensive with respect to
1672 * the benefits.
1674 if (bfqq == next_bfqq &&
1675 !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1676 next->fifo_time < rq->fifo_time) {
1677 list_del_init(&rq->queuelist);
1678 list_replace_init(&next->queuelist, &rq->queuelist);
1679 rq->fifo_time = next->fifo_time;
1682 if (bfqq->next_rq == next)
1683 bfqq->next_rq = rq;
1685 bfq_remove_request(q, next);
1687 spin_unlock_irq(&bfqq->bfqd->lock);
1688 end:
1689 bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags);
1692 /* Must be called with bfqq != NULL */
1693 static void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
1695 if (bfq_bfqq_busy(bfqq))
1696 bfqq->bfqd->wr_busy_queues--;
1697 bfqq->wr_coeff = 1;
1698 bfqq->wr_cur_max_time = 0;
1699 bfqq->last_wr_start_finish = jiffies;
1701 * Trigger a weight change on the next invocation of
1702 * __bfq_entity_update_weight_prio.
1704 bfqq->entity.prio_changed = 1;
1707 void bfq_end_wr_async_queues(struct bfq_data *bfqd,
1708 struct bfq_group *bfqg)
1710 int i, j;
1712 for (i = 0; i < 2; i++)
1713 for (j = 0; j < IOPRIO_BE_NR; j++)
1714 if (bfqg->async_bfqq[i][j])
1715 bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
1716 if (bfqg->async_idle_bfqq)
1717 bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
1720 static void bfq_end_wr(struct bfq_data *bfqd)
1722 struct bfq_queue *bfqq;
1724 spin_lock_irq(&bfqd->lock);
1726 list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
1727 bfq_bfqq_end_wr(bfqq);
1728 list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
1729 bfq_bfqq_end_wr(bfqq);
1730 bfq_end_wr_async(bfqd);
1732 spin_unlock_irq(&bfqd->lock);
1735 static sector_t bfq_io_struct_pos(void *io_struct, bool request)
1737 if (request)
1738 return blk_rq_pos(io_struct);
1739 else
1740 return ((struct bio *)io_struct)->bi_iter.bi_sector;
1743 static int bfq_rq_close_to_sector(void *io_struct, bool request,
1744 sector_t sector)
1746 return abs(bfq_io_struct_pos(io_struct, request) - sector) <=
1747 BFQQ_CLOSE_THR;
1750 static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd,
1751 struct bfq_queue *bfqq,
1752 sector_t sector)
1754 struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree;
1755 struct rb_node *parent, *node;
1756 struct bfq_queue *__bfqq;
1758 if (RB_EMPTY_ROOT(root))
1759 return NULL;
1762 * First, if we find a request starting at the end of the last
1763 * request, choose it.
1765 __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
1766 if (__bfqq)
1767 return __bfqq;
1770 * If the exact sector wasn't found, the parent of the NULL leaf
1771 * will contain the closest sector (rq_pos_tree sorted by
1772 * next_request position).
1774 __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
1775 if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
1776 return __bfqq;
1778 if (blk_rq_pos(__bfqq->next_rq) < sector)
1779 node = rb_next(&__bfqq->pos_node);
1780 else
1781 node = rb_prev(&__bfqq->pos_node);
1782 if (!node)
1783 return NULL;
1785 __bfqq = rb_entry(node, struct bfq_queue, pos_node);
1786 if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
1787 return __bfqq;
1789 return NULL;
1792 static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd,
1793 struct bfq_queue *cur_bfqq,
1794 sector_t sector)
1796 struct bfq_queue *bfqq;
1799 * We shall notice if some of the queues are cooperating,
1800 * e.g., working closely on the same area of the device. In
1801 * that case, we can group them together and: 1) don't waste
1802 * time idling, and 2) serve the union of their requests in
1803 * the best possible order for throughput.
1805 bfqq = bfqq_find_close(bfqd, cur_bfqq, sector);
1806 if (!bfqq || bfqq == cur_bfqq)
1807 return NULL;
1809 return bfqq;
1812 static struct bfq_queue *
1813 bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
1815 int process_refs, new_process_refs;
1816 struct bfq_queue *__bfqq;
1819 * If there are no process references on the new_bfqq, then it is
1820 * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
1821 * may have dropped their last reference (not just their last process
1822 * reference).
1824 if (!bfqq_process_refs(new_bfqq))
1825 return NULL;
1827 /* Avoid a circular list and skip interim queue merges. */
1828 while ((__bfqq = new_bfqq->new_bfqq)) {
1829 if (__bfqq == bfqq)
1830 return NULL;
1831 new_bfqq = __bfqq;
1834 process_refs = bfqq_process_refs(bfqq);
1835 new_process_refs = bfqq_process_refs(new_bfqq);
1837 * If the process for the bfqq has gone away, there is no
1838 * sense in merging the queues.
1840 if (process_refs == 0 || new_process_refs == 0)
1841 return NULL;
1843 bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
1844 new_bfqq->pid);
1847 * Merging is just a redirection: the requests of the process
1848 * owning one of the two queues are redirected to the other queue.
1849 * The latter queue, in its turn, is set as shared if this is the
1850 * first time that the requests of some process are redirected to
1851 * it.
1853 * We redirect bfqq to new_bfqq and not the opposite, because
1854 * we are in the context of the process owning bfqq, thus we
1855 * have the io_cq of this process. So we can immediately
1856 * configure this io_cq to redirect the requests of the
1857 * process to new_bfqq. In contrast, the io_cq of new_bfqq is
1858 * not available any more (new_bfqq->bic == NULL).
1860 * Anyway, even in case new_bfqq coincides with the in-service
1861 * queue, redirecting requests the in-service queue is the
1862 * best option, as we feed the in-service queue with new
1863 * requests close to the last request served and, by doing so,
1864 * are likely to increase the throughput.
1866 bfqq->new_bfqq = new_bfqq;
1867 new_bfqq->ref += process_refs;
1868 return new_bfqq;
1871 static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
1872 struct bfq_queue *new_bfqq)
1874 if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
1875 (bfqq->ioprio_class != new_bfqq->ioprio_class))
1876 return false;
1879 * If either of the queues has already been detected as seeky,
1880 * then merging it with the other queue is unlikely to lead to
1881 * sequential I/O.
1883 if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq))
1884 return false;
1887 * Interleaved I/O is known to be done by (some) applications
1888 * only for reads, so it does not make sense to merge async
1889 * queues.
1891 if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq))
1892 return false;
1894 return true;
1898 * If this function returns true, then bfqq cannot be merged. The idea
1899 * is that true cooperation happens very early after processes start
1900 * to do I/O. Usually, late cooperations are just accidental false
1901 * positives. In case bfqq is weight-raised, such false positives
1902 * would evidently degrade latency guarantees for bfqq.
1904 static bool wr_from_too_long(struct bfq_queue *bfqq)
1906 return bfqq->wr_coeff > 1 &&
1907 time_is_before_jiffies(bfqq->last_wr_start_finish +
1908 msecs_to_jiffies(100));
1912 * Attempt to schedule a merge of bfqq with the currently in-service
1913 * queue or with a close queue among the scheduled queues. Return
1914 * NULL if no merge was scheduled, a pointer to the shared bfq_queue
1915 * structure otherwise.
1917 * The OOM queue is not allowed to participate to cooperation: in fact, since
1918 * the requests temporarily redirected to the OOM queue could be redirected
1919 * again to dedicated queues at any time, the state needed to correctly
1920 * handle merging with the OOM queue would be quite complex and expensive
1921 * to maintain. Besides, in such a critical condition as an out of memory,
1922 * the benefits of queue merging may be little relevant, or even negligible.
1924 * Weight-raised queues can be merged only if their weight-raising
1925 * period has just started. In fact cooperating processes are usually
1926 * started together. Thus, with this filter we avoid false positives
1927 * that would jeopardize low-latency guarantees.
1929 * WARNING: queue merging may impair fairness among non-weight raised
1930 * queues, for at least two reasons: 1) the original weight of a
1931 * merged queue may change during the merged state, 2) even being the
1932 * weight the same, a merged queue may be bloated with many more
1933 * requests than the ones produced by its originally-associated
1934 * process.
1936 static struct bfq_queue *
1937 bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1938 void *io_struct, bool request)
1940 struct bfq_queue *in_service_bfqq, *new_bfqq;
1942 if (bfqq->new_bfqq)
1943 return bfqq->new_bfqq;
1945 if (!io_struct ||
1946 wr_from_too_long(bfqq) ||
1947 unlikely(bfqq == &bfqd->oom_bfqq))
1948 return NULL;
1950 /* If there is only one backlogged queue, don't search. */
1951 if (bfqd->busy_queues == 1)
1952 return NULL;
1954 in_service_bfqq = bfqd->in_service_queue;
1956 if (!in_service_bfqq || in_service_bfqq == bfqq
1957 || wr_from_too_long(in_service_bfqq) ||
1958 unlikely(in_service_bfqq == &bfqd->oom_bfqq))
1959 goto check_scheduled;
1961 if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
1962 bfqq->entity.parent == in_service_bfqq->entity.parent &&
1963 bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
1964 new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
1965 if (new_bfqq)
1966 return new_bfqq;
1969 * Check whether there is a cooperator among currently scheduled
1970 * queues. The only thing we need is that the bio/request is not
1971 * NULL, as we need it to establish whether a cooperator exists.
1973 check_scheduled:
1974 new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
1975 bfq_io_struct_pos(io_struct, request));
1977 if (new_bfqq && !wr_from_too_long(new_bfqq) &&
1978 likely(new_bfqq != &bfqd->oom_bfqq) &&
1979 bfq_may_be_close_cooperator(bfqq, new_bfqq))
1980 return bfq_setup_merge(bfqq, new_bfqq);
1982 return NULL;
1985 static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
1987 struct bfq_io_cq *bic = bfqq->bic;
1990 * If !bfqq->bic, the queue is already shared or its requests
1991 * have already been redirected to a shared queue; both idle window
1992 * and weight raising state have already been saved. Do nothing.
1994 if (!bic)
1995 return;
1997 bic->saved_ttime = bfqq->ttime;
1998 bic->saved_idle_window = bfq_bfqq_idle_window(bfqq);
1999 bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
2000 bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
2001 bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
2002 bic->saved_wr_coeff = bfqq->wr_coeff;
2003 bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt;
2004 bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish;
2005 bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time;
2008 static void
2009 bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
2010 struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
2012 bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
2013 (unsigned long)new_bfqq->pid);
2014 /* Save weight raising and idle window of the merged queues */
2015 bfq_bfqq_save_state(bfqq);
2016 bfq_bfqq_save_state(new_bfqq);
2017 if (bfq_bfqq_IO_bound(bfqq))
2018 bfq_mark_bfqq_IO_bound(new_bfqq);
2019 bfq_clear_bfqq_IO_bound(bfqq);
2022 * If bfqq is weight-raised, then let new_bfqq inherit
2023 * weight-raising. To reduce false positives, neglect the case
2024 * where bfqq has just been created, but has not yet made it
2025 * to be weight-raised (which may happen because EQM may merge
2026 * bfqq even before bfq_add_request is executed for the first
2027 * time for bfqq). Handling this case would however be very
2028 * easy, thanks to the flag just_created.
2030 if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) {
2031 new_bfqq->wr_coeff = bfqq->wr_coeff;
2032 new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time;
2033 new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish;
2034 new_bfqq->wr_start_at_switch_to_srt =
2035 bfqq->wr_start_at_switch_to_srt;
2036 if (bfq_bfqq_busy(new_bfqq))
2037 bfqd->wr_busy_queues++;
2038 new_bfqq->entity.prio_changed = 1;
2041 if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */
2042 bfqq->wr_coeff = 1;
2043 bfqq->entity.prio_changed = 1;
2044 if (bfq_bfqq_busy(bfqq))
2045 bfqd->wr_busy_queues--;
2048 bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d",
2049 bfqd->wr_busy_queues);
2052 * Merge queues (that is, let bic redirect its requests to new_bfqq)
2054 bic_set_bfqq(bic, new_bfqq, 1);
2055 bfq_mark_bfqq_coop(new_bfqq);
2057 * new_bfqq now belongs to at least two bics (it is a shared queue):
2058 * set new_bfqq->bic to NULL. bfqq either:
2059 * - does not belong to any bic any more, and hence bfqq->bic must
2060 * be set to NULL, or
2061 * - is a queue whose owning bics have already been redirected to a
2062 * different queue, hence the queue is destined to not belong to
2063 * any bic soon and bfqq->bic is already NULL (therefore the next
2064 * assignment causes no harm).
2066 new_bfqq->bic = NULL;
2067 bfqq->bic = NULL;
2068 /* release process reference to bfqq */
2069 bfq_put_queue(bfqq);
2072 static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
2073 struct bio *bio)
2075 struct bfq_data *bfqd = q->elevator->elevator_data;
2076 bool is_sync = op_is_sync(bio->bi_opf);
2077 struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq;
2080 * Disallow merge of a sync bio into an async request.
2082 if (is_sync && !rq_is_sync(rq))
2083 return false;
2086 * Lookup the bfqq that this bio will be queued with. Allow
2087 * merge only if rq is queued there.
2089 if (!bfqq)
2090 return false;
2093 * We take advantage of this function to perform an early merge
2094 * of the queues of possible cooperating processes.
2096 new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
2097 if (new_bfqq) {
2099 * bic still points to bfqq, then it has not yet been
2100 * redirected to some other bfq_queue, and a queue
2101 * merge beween bfqq and new_bfqq can be safely
2102 * fulfillled, i.e., bic can be redirected to new_bfqq
2103 * and bfqq can be put.
2105 bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
2106 new_bfqq);
2108 * If we get here, bio will be queued into new_queue,
2109 * so use new_bfqq to decide whether bio and rq can be
2110 * merged.
2112 bfqq = new_bfqq;
2115 * Change also bqfd->bio_bfqq, as
2116 * bfqd->bio_bic now points to new_bfqq, and
2117 * this function may be invoked again (and then may
2118 * use again bqfd->bio_bfqq).
2120 bfqd->bio_bfqq = bfqq;
2123 return bfqq == RQ_BFQQ(rq);
2127 * Set the maximum time for the in-service queue to consume its
2128 * budget. This prevents seeky processes from lowering the throughput.
2129 * In practice, a time-slice service scheme is used with seeky
2130 * processes.
2132 static void bfq_set_budget_timeout(struct bfq_data *bfqd,
2133 struct bfq_queue *bfqq)
2135 unsigned int timeout_coeff;
2137 if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
2138 timeout_coeff = 1;
2139 else
2140 timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
2142 bfqd->last_budget_start = ktime_get();
2144 bfqq->budget_timeout = jiffies +
2145 bfqd->bfq_timeout * timeout_coeff;
2148 static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
2149 struct bfq_queue *bfqq)
2151 if (bfqq) {
2152 bfqg_stats_update_avg_queue_size(bfqq_group(bfqq));
2153 bfq_clear_bfqq_fifo_expire(bfqq);
2155 bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8;
2157 if (time_is_before_jiffies(bfqq->last_wr_start_finish) &&
2158 bfqq->wr_coeff > 1 &&
2159 bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
2160 time_is_before_jiffies(bfqq->budget_timeout)) {
2162 * For soft real-time queues, move the start
2163 * of the weight-raising period forward by the
2164 * time the queue has not received any
2165 * service. Otherwise, a relatively long
2166 * service delay is likely to cause the
2167 * weight-raising period of the queue to end,
2168 * because of the short duration of the
2169 * weight-raising period of a soft real-time
2170 * queue. It is worth noting that this move
2171 * is not so dangerous for the other queues,
2172 * because soft real-time queues are not
2173 * greedy.
2175 * To not add a further variable, we use the
2176 * overloaded field budget_timeout to
2177 * determine for how long the queue has not
2178 * received service, i.e., how much time has
2179 * elapsed since the queue expired. However,
2180 * this is a little imprecise, because
2181 * budget_timeout is set to jiffies if bfqq
2182 * not only expires, but also remains with no
2183 * request.
2185 if (time_after(bfqq->budget_timeout,
2186 bfqq->last_wr_start_finish))
2187 bfqq->last_wr_start_finish +=
2188 jiffies - bfqq->budget_timeout;
2189 else
2190 bfqq->last_wr_start_finish = jiffies;
2193 bfq_set_budget_timeout(bfqd, bfqq);
2194 bfq_log_bfqq(bfqd, bfqq,
2195 "set_in_service_queue, cur-budget = %d",
2196 bfqq->entity.budget);
2199 bfqd->in_service_queue = bfqq;
2203 * Get and set a new queue for service.
2205 static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
2207 struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
2209 __bfq_set_in_service_queue(bfqd, bfqq);
2210 return bfqq;
2213 static void bfq_arm_slice_timer(struct bfq_data *bfqd)
2215 struct bfq_queue *bfqq = bfqd->in_service_queue;
2216 u32 sl;
2218 bfq_mark_bfqq_wait_request(bfqq);
2221 * We don't want to idle for seeks, but we do want to allow
2222 * fair distribution of slice time for a process doing back-to-back
2223 * seeks. So allow a little bit of time for him to submit a new rq.
2225 sl = bfqd->bfq_slice_idle;
2227 * Unless the queue is being weight-raised or the scenario is
2228 * asymmetric, grant only minimum idle time if the queue
2229 * is seeky. A long idling is preserved for a weight-raised
2230 * queue, or, more in general, in an asymmetric scenario,
2231 * because a long idling is needed for guaranteeing to a queue
2232 * its reserved share of the throughput (in particular, it is
2233 * needed if the queue has a higher weight than some other
2234 * queue).
2236 if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
2237 bfq_symmetric_scenario(bfqd))
2238 sl = min_t(u64, sl, BFQ_MIN_TT);
2240 bfqd->last_idling_start = ktime_get();
2241 hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
2242 HRTIMER_MODE_REL);
2243 bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
2247 * In autotuning mode, max_budget is dynamically recomputed as the
2248 * amount of sectors transferred in timeout at the estimated peak
2249 * rate. This enables BFQ to utilize a full timeslice with a full
2250 * budget, even if the in-service queue is served at peak rate. And
2251 * this maximises throughput with sequential workloads.
2253 static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd)
2255 return (u64)bfqd->peak_rate * USEC_PER_MSEC *
2256 jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT;
2260 * Update parameters related to throughput and responsiveness, as a
2261 * function of the estimated peak rate. See comments on
2262 * bfq_calc_max_budget(), and on T_slow and T_fast arrays.
2264 static void update_thr_responsiveness_params(struct bfq_data *bfqd)
2266 int dev_type = blk_queue_nonrot(bfqd->queue);
2268 if (bfqd->bfq_user_max_budget == 0)
2269 bfqd->bfq_max_budget =
2270 bfq_calc_max_budget(bfqd);
2272 if (bfqd->device_speed == BFQ_BFQD_FAST &&
2273 bfqd->peak_rate < device_speed_thresh[dev_type]) {
2274 bfqd->device_speed = BFQ_BFQD_SLOW;
2275 bfqd->RT_prod = R_slow[dev_type] *
2276 T_slow[dev_type];
2277 } else if (bfqd->device_speed == BFQ_BFQD_SLOW &&
2278 bfqd->peak_rate > device_speed_thresh[dev_type]) {
2279 bfqd->device_speed = BFQ_BFQD_FAST;
2280 bfqd->RT_prod = R_fast[dev_type] *
2281 T_fast[dev_type];
2284 bfq_log(bfqd,
2285 "dev_type %s dev_speed_class = %s (%llu sects/sec), thresh %llu setcs/sec",
2286 dev_type == 0 ? "ROT" : "NONROT",
2287 bfqd->device_speed == BFQ_BFQD_FAST ? "FAST" : "SLOW",
2288 bfqd->device_speed == BFQ_BFQD_FAST ?
2289 (USEC_PER_SEC*(u64)R_fast[dev_type])>>BFQ_RATE_SHIFT :
2290 (USEC_PER_SEC*(u64)R_slow[dev_type])>>BFQ_RATE_SHIFT,
2291 (USEC_PER_SEC*(u64)device_speed_thresh[dev_type])>>
2292 BFQ_RATE_SHIFT);
2295 static void bfq_reset_rate_computation(struct bfq_data *bfqd,
2296 struct request *rq)
2298 if (rq != NULL) { /* new rq dispatch now, reset accordingly */
2299 bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns();
2300 bfqd->peak_rate_samples = 1;
2301 bfqd->sequential_samples = 0;
2302 bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
2303 blk_rq_sectors(rq);
2304 } else /* no new rq dispatched, just reset the number of samples */
2305 bfqd->peak_rate_samples = 0; /* full re-init on next disp. */
2307 bfq_log(bfqd,
2308 "reset_rate_computation at end, sample %u/%u tot_sects %llu",
2309 bfqd->peak_rate_samples, bfqd->sequential_samples,
2310 bfqd->tot_sectors_dispatched);
2313 static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
2315 u32 rate, weight, divisor;
2318 * For the convergence property to hold (see comments on
2319 * bfq_update_peak_rate()) and for the assessment to be
2320 * reliable, a minimum number of samples must be present, and
2321 * a minimum amount of time must have elapsed. If not so, do
2322 * not compute new rate. Just reset parameters, to get ready
2323 * for a new evaluation attempt.
2325 if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES ||
2326 bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL)
2327 goto reset_computation;
2330 * If a new request completion has occurred after last
2331 * dispatch, then, to approximate the rate at which requests
2332 * have been served by the device, it is more precise to
2333 * extend the observation interval to the last completion.
2335 bfqd->delta_from_first =
2336 max_t(u64, bfqd->delta_from_first,
2337 bfqd->last_completion - bfqd->first_dispatch);
2340 * Rate computed in sects/usec, and not sects/nsec, for
2341 * precision issues.
2343 rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT,
2344 div_u64(bfqd->delta_from_first, NSEC_PER_USEC));
2347 * Peak rate not updated if:
2348 * - the percentage of sequential dispatches is below 3/4 of the
2349 * total, and rate is below the current estimated peak rate
2350 * - rate is unreasonably high (> 20M sectors/sec)
2352 if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 &&
2353 rate <= bfqd->peak_rate) ||
2354 rate > 20<<BFQ_RATE_SHIFT)
2355 goto reset_computation;
2358 * We have to update the peak rate, at last! To this purpose,
2359 * we use a low-pass filter. We compute the smoothing constant
2360 * of the filter as a function of the 'weight' of the new
2361 * measured rate.
2363 * As can be seen in next formulas, we define this weight as a
2364 * quantity proportional to how sequential the workload is,
2365 * and to how long the observation time interval is.
2367 * The weight runs from 0 to 8. The maximum value of the
2368 * weight, 8, yields the minimum value for the smoothing
2369 * constant. At this minimum value for the smoothing constant,
2370 * the measured rate contributes for half of the next value of
2371 * the estimated peak rate.
2373 * So, the first step is to compute the weight as a function
2374 * of how sequential the workload is. Note that the weight
2375 * cannot reach 9, because bfqd->sequential_samples cannot
2376 * become equal to bfqd->peak_rate_samples, which, in its
2377 * turn, holds true because bfqd->sequential_samples is not
2378 * incremented for the first sample.
2380 weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples;
2383 * Second step: further refine the weight as a function of the
2384 * duration of the observation interval.
2386 weight = min_t(u32, 8,
2387 div_u64(weight * bfqd->delta_from_first,
2388 BFQ_RATE_REF_INTERVAL));
2391 * Divisor ranging from 10, for minimum weight, to 2, for
2392 * maximum weight.
2394 divisor = 10 - weight;
2397 * Finally, update peak rate:
2399 * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor
2401 bfqd->peak_rate *= divisor-1;
2402 bfqd->peak_rate /= divisor;
2403 rate /= divisor; /* smoothing constant alpha = 1/divisor */
2405 bfqd->peak_rate += rate;
2406 update_thr_responsiveness_params(bfqd);
2408 reset_computation:
2409 bfq_reset_rate_computation(bfqd, rq);
2413 * Update the read/write peak rate (the main quantity used for
2414 * auto-tuning, see update_thr_responsiveness_params()).
2416 * It is not trivial to estimate the peak rate (correctly): because of
2417 * the presence of sw and hw queues between the scheduler and the
2418 * device components that finally serve I/O requests, it is hard to
2419 * say exactly when a given dispatched request is served inside the
2420 * device, and for how long. As a consequence, it is hard to know
2421 * precisely at what rate a given set of requests is actually served
2422 * by the device.
2424 * On the opposite end, the dispatch time of any request is trivially
2425 * available, and, from this piece of information, the "dispatch rate"
2426 * of requests can be immediately computed. So, the idea in the next
2427 * function is to use what is known, namely request dispatch times
2428 * (plus, when useful, request completion times), to estimate what is
2429 * unknown, namely in-device request service rate.
2431 * The main issue is that, because of the above facts, the rate at
2432 * which a certain set of requests is dispatched over a certain time
2433 * interval can vary greatly with respect to the rate at which the
2434 * same requests are then served. But, since the size of any
2435 * intermediate queue is limited, and the service scheme is lossless
2436 * (no request is silently dropped), the following obvious convergence
2437 * property holds: the number of requests dispatched MUST become
2438 * closer and closer to the number of requests completed as the
2439 * observation interval grows. This is the key property used in
2440 * the next function to estimate the peak service rate as a function
2441 * of the observed dispatch rate. The function assumes to be invoked
2442 * on every request dispatch.
2444 static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
2446 u64 now_ns = ktime_get_ns();
2448 if (bfqd->peak_rate_samples == 0) { /* first dispatch */
2449 bfq_log(bfqd, "update_peak_rate: goto reset, samples %d",
2450 bfqd->peak_rate_samples);
2451 bfq_reset_rate_computation(bfqd, rq);
2452 goto update_last_values; /* will add one sample */
2456 * Device idle for very long: the observation interval lasting
2457 * up to this dispatch cannot be a valid observation interval
2458 * for computing a new peak rate (similarly to the late-
2459 * completion event in bfq_completed_request()). Go to
2460 * update_rate_and_reset to have the following three steps
2461 * taken:
2462 * - close the observation interval at the last (previous)
2463 * request dispatch or completion
2464 * - compute rate, if possible, for that observation interval
2465 * - start a new observation interval with this dispatch
2467 if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC &&
2468 bfqd->rq_in_driver == 0)
2469 goto update_rate_and_reset;
2471 /* Update sampling information */
2472 bfqd->peak_rate_samples++;
2474 if ((bfqd->rq_in_driver > 0 ||
2475 now_ns - bfqd->last_completion < BFQ_MIN_TT)
2476 && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR)
2477 bfqd->sequential_samples++;
2479 bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
2481 /* Reset max observed rq size every 32 dispatches */
2482 if (likely(bfqd->peak_rate_samples % 32))
2483 bfqd->last_rq_max_size =
2484 max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size);
2485 else
2486 bfqd->last_rq_max_size = blk_rq_sectors(rq);
2488 bfqd->delta_from_first = now_ns - bfqd->first_dispatch;
2490 /* Target observation interval not yet reached, go on sampling */
2491 if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL)
2492 goto update_last_values;
2494 update_rate_and_reset:
2495 bfq_update_rate_reset(bfqd, rq);
2496 update_last_values:
2497 bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
2498 bfqd->last_dispatch = now_ns;
2502 * Remove request from internal lists.
2504 static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
2506 struct bfq_queue *bfqq = RQ_BFQQ(rq);
2509 * For consistency, the next instruction should have been
2510 * executed after removing the request from the queue and
2511 * dispatching it. We execute instead this instruction before
2512 * bfq_remove_request() (and hence introduce a temporary
2513 * inconsistency), for efficiency. In fact, should this
2514 * dispatch occur for a non in-service bfqq, this anticipated
2515 * increment prevents two counters related to bfqq->dispatched
2516 * from risking to be, first, uselessly decremented, and then
2517 * incremented again when the (new) value of bfqq->dispatched
2518 * happens to be taken into account.
2520 bfqq->dispatched++;
2521 bfq_update_peak_rate(q->elevator->elevator_data, rq);
2523 bfq_remove_request(q, rq);
2526 static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
2529 * If this bfqq is shared between multiple processes, check
2530 * to make sure that those processes are still issuing I/Os
2531 * within the mean seek distance. If not, it may be time to
2532 * break the queues apart again.
2534 if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
2535 bfq_mark_bfqq_split_coop(bfqq);
2537 if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
2538 if (bfqq->dispatched == 0)
2540 * Overloading budget_timeout field to store
2541 * the time at which the queue remains with no
2542 * backlog and no outstanding request; used by
2543 * the weight-raising mechanism.
2545 bfqq->budget_timeout = jiffies;
2547 bfq_del_bfqq_busy(bfqd, bfqq, true);
2548 } else {
2549 bfq_requeue_bfqq(bfqd, bfqq);
2551 * Resort priority tree of potential close cooperators.
2553 bfq_pos_tree_add_move(bfqd, bfqq);
2557 * All in-service entities must have been properly deactivated
2558 * or requeued before executing the next function, which
2559 * resets all in-service entites as no more in service.
2561 __bfq_bfqd_reset_in_service(bfqd);
2565 * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
2566 * @bfqd: device data.
2567 * @bfqq: queue to update.
2568 * @reason: reason for expiration.
2570 * Handle the feedback on @bfqq budget at queue expiration.
2571 * See the body for detailed comments.
2573 static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
2574 struct bfq_queue *bfqq,
2575 enum bfqq_expiration reason)
2577 struct request *next_rq;
2578 int budget, min_budget;
2580 min_budget = bfq_min_budget(bfqd);
2582 if (bfqq->wr_coeff == 1)
2583 budget = bfqq->max_budget;
2584 else /*
2585 * Use a constant, low budget for weight-raised queues,
2586 * to help achieve a low latency. Keep it slightly higher
2587 * than the minimum possible budget, to cause a little
2588 * bit fewer expirations.
2590 budget = 2 * min_budget;
2592 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d",
2593 bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
2594 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d",
2595 budget, bfq_min_budget(bfqd));
2596 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
2597 bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
2599 if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) {
2600 switch (reason) {
2602 * Caveat: in all the following cases we trade latency
2603 * for throughput.
2605 case BFQQE_TOO_IDLE:
2607 * This is the only case where we may reduce
2608 * the budget: if there is no request of the
2609 * process still waiting for completion, then
2610 * we assume (tentatively) that the timer has
2611 * expired because the batch of requests of
2612 * the process could have been served with a
2613 * smaller budget. Hence, betting that
2614 * process will behave in the same way when it
2615 * becomes backlogged again, we reduce its
2616 * next budget. As long as we guess right,
2617 * this budget cut reduces the latency
2618 * experienced by the process.
2620 * However, if there are still outstanding
2621 * requests, then the process may have not yet
2622 * issued its next request just because it is
2623 * still waiting for the completion of some of
2624 * the still outstanding ones. So in this
2625 * subcase we do not reduce its budget, on the
2626 * contrary we increase it to possibly boost
2627 * the throughput, as discussed in the
2628 * comments to the BUDGET_TIMEOUT case.
2630 if (bfqq->dispatched > 0) /* still outstanding reqs */
2631 budget = min(budget * 2, bfqd->bfq_max_budget);
2632 else {
2633 if (budget > 5 * min_budget)
2634 budget -= 4 * min_budget;
2635 else
2636 budget = min_budget;
2638 break;
2639 case BFQQE_BUDGET_TIMEOUT:
2641 * We double the budget here because it gives
2642 * the chance to boost the throughput if this
2643 * is not a seeky process (and has bumped into
2644 * this timeout because of, e.g., ZBR).
2646 budget = min(budget * 2, bfqd->bfq_max_budget);
2647 break;
2648 case BFQQE_BUDGET_EXHAUSTED:
2650 * The process still has backlog, and did not
2651 * let either the budget timeout or the disk
2652 * idling timeout expire. Hence it is not
2653 * seeky, has a short thinktime and may be
2654 * happy with a higher budget too. So
2655 * definitely increase the budget of this good
2656 * candidate to boost the disk throughput.
2658 budget = min(budget * 4, bfqd->bfq_max_budget);
2659 break;
2660 case BFQQE_NO_MORE_REQUESTS:
2662 * For queues that expire for this reason, it
2663 * is particularly important to keep the
2664 * budget close to the actual service they
2665 * need. Doing so reduces the timestamp
2666 * misalignment problem described in the
2667 * comments in the body of
2668 * __bfq_activate_entity. In fact, suppose
2669 * that a queue systematically expires for
2670 * BFQQE_NO_MORE_REQUESTS and presents a
2671 * new request in time to enjoy timestamp
2672 * back-shifting. The larger the budget of the
2673 * queue is with respect to the service the
2674 * queue actually requests in each service
2675 * slot, the more times the queue can be
2676 * reactivated with the same virtual finish
2677 * time. It follows that, even if this finish
2678 * time is pushed to the system virtual time
2679 * to reduce the consequent timestamp
2680 * misalignment, the queue unjustly enjoys for
2681 * many re-activations a lower finish time
2682 * than all newly activated queues.
2684 * The service needed by bfqq is measured
2685 * quite precisely by bfqq->entity.service.
2686 * Since bfqq does not enjoy device idling,
2687 * bfqq->entity.service is equal to the number
2688 * of sectors that the process associated with
2689 * bfqq requested to read/write before waiting
2690 * for request completions, or blocking for
2691 * other reasons.
2693 budget = max_t(int, bfqq->entity.service, min_budget);
2694 break;
2695 default:
2696 return;
2698 } else if (!bfq_bfqq_sync(bfqq)) {
2700 * Async queues get always the maximum possible
2701 * budget, as for them we do not care about latency
2702 * (in addition, their ability to dispatch is limited
2703 * by the charging factor).
2705 budget = bfqd->bfq_max_budget;
2708 bfqq->max_budget = budget;
2710 if (bfqd->budgets_assigned >= bfq_stats_min_budgets &&
2711 !bfqd->bfq_user_max_budget)
2712 bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget);
2715 * If there is still backlog, then assign a new budget, making
2716 * sure that it is large enough for the next request. Since
2717 * the finish time of bfqq must be kept in sync with the
2718 * budget, be sure to call __bfq_bfqq_expire() *after* this
2719 * update.
2721 * If there is no backlog, then no need to update the budget;
2722 * it will be updated on the arrival of a new request.
2724 next_rq = bfqq->next_rq;
2725 if (next_rq)
2726 bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
2727 bfq_serv_to_charge(next_rq, bfqq));
2729 bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d",
2730 next_rq ? blk_rq_sectors(next_rq) : 0,
2731 bfqq->entity.budget);
2735 * Return true if the process associated with bfqq is "slow". The slow
2736 * flag is used, in addition to the budget timeout, to reduce the
2737 * amount of service provided to seeky processes, and thus reduce
2738 * their chances to lower the throughput. More details in the comments
2739 * on the function bfq_bfqq_expire().
2741 * An important observation is in order: as discussed in the comments
2742 * on the function bfq_update_peak_rate(), with devices with internal
2743 * queues, it is hard if ever possible to know when and for how long
2744 * an I/O request is processed by the device (apart from the trivial
2745 * I/O pattern where a new request is dispatched only after the
2746 * previous one has been completed). This makes it hard to evaluate
2747 * the real rate at which the I/O requests of each bfq_queue are
2748 * served. In fact, for an I/O scheduler like BFQ, serving a
2749 * bfq_queue means just dispatching its requests during its service
2750 * slot (i.e., until the budget of the queue is exhausted, or the
2751 * queue remains idle, or, finally, a timeout fires). But, during the
2752 * service slot of a bfq_queue, around 100 ms at most, the device may
2753 * be even still processing requests of bfq_queues served in previous
2754 * service slots. On the opposite end, the requests of the in-service
2755 * bfq_queue may be completed after the service slot of the queue
2756 * finishes.
2758 * Anyway, unless more sophisticated solutions are used
2759 * (where possible), the sum of the sizes of the requests dispatched
2760 * during the service slot of a bfq_queue is probably the only
2761 * approximation available for the service received by the bfq_queue
2762 * during its service slot. And this sum is the quantity used in this
2763 * function to evaluate the I/O speed of a process.
2765 static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
2766 bool compensate, enum bfqq_expiration reason,
2767 unsigned long *delta_ms)
2769 ktime_t delta_ktime;
2770 u32 delta_usecs;
2771 bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */
2773 if (!bfq_bfqq_sync(bfqq))
2774 return false;
2776 if (compensate)
2777 delta_ktime = bfqd->last_idling_start;
2778 else
2779 delta_ktime = ktime_get();
2780 delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
2781 delta_usecs = ktime_to_us(delta_ktime);
2783 /* don't use too short time intervals */
2784 if (delta_usecs < 1000) {
2785 if (blk_queue_nonrot(bfqd->queue))
2787 * give same worst-case guarantees as idling
2788 * for seeky
2790 *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC;
2791 else /* charge at least one seek */
2792 *delta_ms = bfq_slice_idle / NSEC_PER_MSEC;
2794 return slow;
2797 *delta_ms = delta_usecs / USEC_PER_MSEC;
2800 * Use only long (> 20ms) intervals to filter out excessive
2801 * spikes in service rate estimation.
2803 if (delta_usecs > 20000) {
2805 * Caveat for rotational devices: processes doing I/O
2806 * in the slower disk zones tend to be slow(er) even
2807 * if not seeky. In this respect, the estimated peak
2808 * rate is likely to be an average over the disk
2809 * surface. Accordingly, to not be too harsh with
2810 * unlucky processes, a process is deemed slow only if
2811 * its rate has been lower than half of the estimated
2812 * peak rate.
2814 slow = bfqq->entity.service < bfqd->bfq_max_budget / 2;
2817 bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow);
2819 return slow;
2823 * To be deemed as soft real-time, an application must meet two
2824 * requirements. First, the application must not require an average
2825 * bandwidth higher than the approximate bandwidth required to playback or
2826 * record a compressed high-definition video.
2827 * The next function is invoked on the completion of the last request of a
2828 * batch, to compute the next-start time instant, soft_rt_next_start, such
2829 * that, if the next request of the application does not arrive before
2830 * soft_rt_next_start, then the above requirement on the bandwidth is met.
2832 * The second requirement is that the request pattern of the application is
2833 * isochronous, i.e., that, after issuing a request or a batch of requests,
2834 * the application stops issuing new requests until all its pending requests
2835 * have been completed. After that, the application may issue a new batch,
2836 * and so on.
2837 * For this reason the next function is invoked to compute
2838 * soft_rt_next_start only for applications that meet this requirement,
2839 * whereas soft_rt_next_start is set to infinity for applications that do
2840 * not.
2842 * Unfortunately, even a greedy application may happen to behave in an
2843 * isochronous way if the CPU load is high. In fact, the application may
2844 * stop issuing requests while the CPUs are busy serving other processes,
2845 * then restart, then stop again for a while, and so on. In addition, if
2846 * the disk achieves a low enough throughput with the request pattern
2847 * issued by the application (e.g., because the request pattern is random
2848 * and/or the device is slow), then the application may meet the above
2849 * bandwidth requirement too. To prevent such a greedy application to be
2850 * deemed as soft real-time, a further rule is used in the computation of
2851 * soft_rt_next_start: soft_rt_next_start must be higher than the current
2852 * time plus the maximum time for which the arrival of a request is waited
2853 * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
2854 * This filters out greedy applications, as the latter issue instead their
2855 * next request as soon as possible after the last one has been completed
2856 * (in contrast, when a batch of requests is completed, a soft real-time
2857 * application spends some time processing data).
2859 * Unfortunately, the last filter may easily generate false positives if
2860 * only bfqd->bfq_slice_idle is used as a reference time interval and one
2861 * or both the following cases occur:
2862 * 1) HZ is so low that the duration of a jiffy is comparable to or higher
2863 * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
2864 * HZ=100.
2865 * 2) jiffies, instead of increasing at a constant rate, may stop increasing
2866 * for a while, then suddenly 'jump' by several units to recover the lost
2867 * increments. This seems to happen, e.g., inside virtual machines.
2868 * To address this issue, we do not use as a reference time interval just
2869 * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
2870 * particular we add the minimum number of jiffies for which the filter
2871 * seems to be quite precise also in embedded systems and KVM/QEMU virtual
2872 * machines.
2874 static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
2875 struct bfq_queue *bfqq)
2877 return max(bfqq->last_idle_bklogged +
2878 HZ * bfqq->service_from_backlogged /
2879 bfqd->bfq_wr_max_softrt_rate,
2880 jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
2884 * Return the farthest future time instant according to jiffies
2885 * macros.
2887 static unsigned long bfq_greatest_from_now(void)
2889 return jiffies + MAX_JIFFY_OFFSET;
2893 * Return the farthest past time instant according to jiffies
2894 * macros.
2896 static unsigned long bfq_smallest_from_now(void)
2898 return jiffies - MAX_JIFFY_OFFSET;
2902 * bfq_bfqq_expire - expire a queue.
2903 * @bfqd: device owning the queue.
2904 * @bfqq: the queue to expire.
2905 * @compensate: if true, compensate for the time spent idling.
2906 * @reason: the reason causing the expiration.
2908 * If the process associated with bfqq does slow I/O (e.g., because it
2909 * issues random requests), we charge bfqq with the time it has been
2910 * in service instead of the service it has received (see
2911 * bfq_bfqq_charge_time for details on how this goal is achieved). As
2912 * a consequence, bfqq will typically get higher timestamps upon
2913 * reactivation, and hence it will be rescheduled as if it had
2914 * received more service than what it has actually received. In the
2915 * end, bfqq receives less service in proportion to how slowly its
2916 * associated process consumes its budgets (and hence how seriously it
2917 * tends to lower the throughput). In addition, this time-charging
2918 * strategy guarantees time fairness among slow processes. In
2919 * contrast, if the process associated with bfqq is not slow, we
2920 * charge bfqq exactly with the service it has received.
2922 * Charging time to the first type of queues and the exact service to
2923 * the other has the effect of using the WF2Q+ policy to schedule the
2924 * former on a timeslice basis, without violating service domain
2925 * guarantees among the latter.
2927 void bfq_bfqq_expire(struct bfq_data *bfqd,
2928 struct bfq_queue *bfqq,
2929 bool compensate,
2930 enum bfqq_expiration reason)
2932 bool slow;
2933 unsigned long delta = 0;
2934 struct bfq_entity *entity = &bfqq->entity;
2935 int ref;
2938 * Check whether the process is slow (see bfq_bfqq_is_slow).
2940 slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
2943 * Increase service_from_backlogged before next statement,
2944 * because the possible next invocation of
2945 * bfq_bfqq_charge_time would likely inflate
2946 * entity->service. In contrast, service_from_backlogged must
2947 * contain real service, to enable the soft real-time
2948 * heuristic to correctly compute the bandwidth consumed by
2949 * bfqq.
2951 bfqq->service_from_backlogged += entity->service;
2954 * As above explained, charge slow (typically seeky) and
2955 * timed-out queues with the time and not the service
2956 * received, to favor sequential workloads.
2958 * Processes doing I/O in the slower disk zones will tend to
2959 * be slow(er) even if not seeky. Therefore, since the
2960 * estimated peak rate is actually an average over the disk
2961 * surface, these processes may timeout just for bad luck. To
2962 * avoid punishing them, do not charge time to processes that
2963 * succeeded in consuming at least 2/3 of their budget. This
2964 * allows BFQ to preserve enough elasticity to still perform
2965 * bandwidth, and not time, distribution with little unlucky
2966 * or quasi-sequential processes.
2968 if (bfqq->wr_coeff == 1 &&
2969 (slow ||
2970 (reason == BFQQE_BUDGET_TIMEOUT &&
2971 bfq_bfqq_budget_left(bfqq) >= entity->budget / 3)))
2972 bfq_bfqq_charge_time(bfqd, bfqq, delta);
2974 if (reason == BFQQE_TOO_IDLE &&
2975 entity->service <= 2 * entity->budget / 10)
2976 bfq_clear_bfqq_IO_bound(bfqq);
2978 if (bfqd->low_latency && bfqq->wr_coeff == 1)
2979 bfqq->last_wr_start_finish = jiffies;
2981 if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
2982 RB_EMPTY_ROOT(&bfqq->sort_list)) {
2984 * If we get here, and there are no outstanding
2985 * requests, then the request pattern is isochronous
2986 * (see the comments on the function
2987 * bfq_bfqq_softrt_next_start()). Thus we can compute
2988 * soft_rt_next_start. If, instead, the queue still
2989 * has outstanding requests, then we have to wait for
2990 * the completion of all the outstanding requests to
2991 * discover whether the request pattern is actually
2992 * isochronous.
2994 if (bfqq->dispatched == 0)
2995 bfqq->soft_rt_next_start =
2996 bfq_bfqq_softrt_next_start(bfqd, bfqq);
2997 else {
2999 * The application is still waiting for the
3000 * completion of one or more requests:
3001 * prevent it from possibly being incorrectly
3002 * deemed as soft real-time by setting its
3003 * soft_rt_next_start to infinity. In fact,
3004 * without this assignment, the application
3005 * would be incorrectly deemed as soft
3006 * real-time if:
3007 * 1) it issued a new request before the
3008 * completion of all its in-flight
3009 * requests, and
3010 * 2) at that time, its soft_rt_next_start
3011 * happened to be in the past.
3013 bfqq->soft_rt_next_start =
3014 bfq_greatest_from_now();
3016 * Schedule an update of soft_rt_next_start to when
3017 * the task may be discovered to be isochronous.
3019 bfq_mark_bfqq_softrt_update(bfqq);
3023 bfq_log_bfqq(bfqd, bfqq,
3024 "expire (%d, slow %d, num_disp %d, idle_win %d)", reason,
3025 slow, bfqq->dispatched, bfq_bfqq_idle_window(bfqq));
3028 * Increase, decrease or leave budget unchanged according to
3029 * reason.
3031 __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
3032 ref = bfqq->ref;
3033 __bfq_bfqq_expire(bfqd, bfqq);
3035 /* mark bfqq as waiting a request only if a bic still points to it */
3036 if (ref > 1 && !bfq_bfqq_busy(bfqq) &&
3037 reason != BFQQE_BUDGET_TIMEOUT &&
3038 reason != BFQQE_BUDGET_EXHAUSTED)
3039 bfq_mark_bfqq_non_blocking_wait_rq(bfqq);
3043 * Budget timeout is not implemented through a dedicated timer, but
3044 * just checked on request arrivals and completions, as well as on
3045 * idle timer expirations.
3047 static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
3049 return time_is_before_eq_jiffies(bfqq->budget_timeout);
3053 * If we expire a queue that is actively waiting (i.e., with the
3054 * device idled) for the arrival of a new request, then we may incur
3055 * the timestamp misalignment problem described in the body of the
3056 * function __bfq_activate_entity. Hence we return true only if this
3057 * condition does not hold, or if the queue is slow enough to deserve
3058 * only to be kicked off for preserving a high throughput.
3060 static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
3062 bfq_log_bfqq(bfqq->bfqd, bfqq,
3063 "may_budget_timeout: wait_request %d left %d timeout %d",
3064 bfq_bfqq_wait_request(bfqq),
3065 bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
3066 bfq_bfqq_budget_timeout(bfqq));
3068 return (!bfq_bfqq_wait_request(bfqq) ||
3069 bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
3071 bfq_bfqq_budget_timeout(bfqq);
3075 * For a queue that becomes empty, device idling is allowed only if
3076 * this function returns true for the queue. As a consequence, since
3077 * device idling plays a critical role in both throughput boosting and
3078 * service guarantees, the return value of this function plays a
3079 * critical role in both these aspects as well.
3081 * In a nutshell, this function returns true only if idling is
3082 * beneficial for throughput or, even if detrimental for throughput,
3083 * idling is however necessary to preserve service guarantees (low
3084 * latency, desired throughput distribution, ...). In particular, on
3085 * NCQ-capable devices, this function tries to return false, so as to
3086 * help keep the drives' internal queues full, whenever this helps the
3087 * device boost the throughput without causing any service-guarantee
3088 * issue.
3090 * In more detail, the return value of this function is obtained by,
3091 * first, computing a number of boolean variables that take into
3092 * account throughput and service-guarantee issues, and, then,
3093 * combining these variables in a logical expression. Most of the
3094 * issues taken into account are not trivial. We discuss these issues
3095 * individually while introducing the variables.
3097 static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq)
3099 struct bfq_data *bfqd = bfqq->bfqd;
3100 bool idling_boosts_thr, idling_boosts_thr_without_issues,
3101 idling_needed_for_service_guarantees,
3102 asymmetric_scenario;
3104 if (bfqd->strict_guarantees)
3105 return true;
3108 * The next variable takes into account the cases where idling
3109 * boosts the throughput.
3111 * The value of the variable is computed considering, first, that
3112 * idling is virtually always beneficial for the throughput if:
3113 * (a) the device is not NCQ-capable, or
3114 * (b) regardless of the presence of NCQ, the device is rotational
3115 * and the request pattern for bfqq is I/O-bound and sequential.
3117 * Secondly, and in contrast to the above item (b), idling an
3118 * NCQ-capable flash-based device would not boost the
3119 * throughput even with sequential I/O; rather it would lower
3120 * the throughput in proportion to how fast the device
3121 * is. Accordingly, the next variable is true if any of the
3122 * above conditions (a) and (b) is true, and, in particular,
3123 * happens to be false if bfqd is an NCQ-capable flash-based
3124 * device.
3126 idling_boosts_thr = !bfqd->hw_tag ||
3127 (!blk_queue_nonrot(bfqd->queue) && bfq_bfqq_IO_bound(bfqq) &&
3128 bfq_bfqq_idle_window(bfqq));
3131 * The value of the next variable,
3132 * idling_boosts_thr_without_issues, is equal to that of
3133 * idling_boosts_thr, unless a special case holds. In this
3134 * special case, described below, idling may cause problems to
3135 * weight-raised queues.
3137 * When the request pool is saturated (e.g., in the presence
3138 * of write hogs), if the processes associated with
3139 * non-weight-raised queues ask for requests at a lower rate,
3140 * then processes associated with weight-raised queues have a
3141 * higher probability to get a request from the pool
3142 * immediately (or at least soon) when they need one. Thus
3143 * they have a higher probability to actually get a fraction
3144 * of the device throughput proportional to their high
3145 * weight. This is especially true with NCQ-capable drives,
3146 * which enqueue several requests in advance, and further
3147 * reorder internally-queued requests.
3149 * For this reason, we force to false the value of
3150 * idling_boosts_thr_without_issues if there are weight-raised
3151 * busy queues. In this case, and if bfqq is not weight-raised,
3152 * this guarantees that the device is not idled for bfqq (if,
3153 * instead, bfqq is weight-raised, then idling will be
3154 * guaranteed by another variable, see below). Combined with
3155 * the timestamping rules of BFQ (see [1] for details), this
3156 * behavior causes bfqq, and hence any sync non-weight-raised
3157 * queue, to get a lower number of requests served, and thus
3158 * to ask for a lower number of requests from the request
3159 * pool, before the busy weight-raised queues get served
3160 * again. This often mitigates starvation problems in the
3161 * presence of heavy write workloads and NCQ, thereby
3162 * guaranteeing a higher application and system responsiveness
3163 * in these hostile scenarios.
3165 idling_boosts_thr_without_issues = idling_boosts_thr &&
3166 bfqd->wr_busy_queues == 0;
3169 * There is then a case where idling must be performed not
3170 * for throughput concerns, but to preserve service
3171 * guarantees.
3173 * To introduce this case, we can note that allowing the drive
3174 * to enqueue more than one request at a time, and hence
3175 * delegating de facto final scheduling decisions to the
3176 * drive's internal scheduler, entails loss of control on the
3177 * actual request service order. In particular, the critical
3178 * situation is when requests from different processes happen
3179 * to be present, at the same time, in the internal queue(s)
3180 * of the drive. In such a situation, the drive, by deciding
3181 * the service order of the internally-queued requests, does
3182 * determine also the actual throughput distribution among
3183 * these processes. But the drive typically has no notion or
3184 * concern about per-process throughput distribution, and
3185 * makes its decisions only on a per-request basis. Therefore,
3186 * the service distribution enforced by the drive's internal
3187 * scheduler is likely to coincide with the desired
3188 * device-throughput distribution only in a completely
3189 * symmetric scenario where:
3190 * (i) each of these processes must get the same throughput as
3191 * the others;
3192 * (ii) all these processes have the same I/O pattern
3193 (either sequential or random).
3194 * In fact, in such a scenario, the drive will tend to treat
3195 * the requests of each of these processes in about the same
3196 * way as the requests of the others, and thus to provide
3197 * each of these processes with about the same throughput
3198 * (which is exactly the desired throughput distribution). In
3199 * contrast, in any asymmetric scenario, device idling is
3200 * certainly needed to guarantee that bfqq receives its
3201 * assigned fraction of the device throughput (see [1] for
3202 * details).
3204 * We address this issue by controlling, actually, only the
3205 * symmetry sub-condition (i), i.e., provided that
3206 * sub-condition (i) holds, idling is not performed,
3207 * regardless of whether sub-condition (ii) holds. In other
3208 * words, only if sub-condition (i) holds, then idling is
3209 * allowed, and the device tends to be prevented from queueing
3210 * many requests, possibly of several processes. The reason
3211 * for not controlling also sub-condition (ii) is that we
3212 * exploit preemption to preserve guarantees in case of
3213 * symmetric scenarios, even if (ii) does not hold, as
3214 * explained in the next two paragraphs.
3216 * Even if a queue, say Q, is expired when it remains idle, Q
3217 * can still preempt the new in-service queue if the next
3218 * request of Q arrives soon (see the comments on
3219 * bfq_bfqq_update_budg_for_activation). If all queues and
3220 * groups have the same weight, this form of preemption,
3221 * combined with the hole-recovery heuristic described in the
3222 * comments on function bfq_bfqq_update_budg_for_activation,
3223 * are enough to preserve a correct bandwidth distribution in
3224 * the mid term, even without idling. In fact, even if not
3225 * idling allows the internal queues of the device to contain
3226 * many requests, and thus to reorder requests, we can rather
3227 * safely assume that the internal scheduler still preserves a
3228 * minimum of mid-term fairness. The motivation for using
3229 * preemption instead of idling is that, by not idling,
3230 * service guarantees are preserved without minimally
3231 * sacrificing throughput. In other words, both a high
3232 * throughput and its desired distribution are obtained.
3234 * More precisely, this preemption-based, idleless approach
3235 * provides fairness in terms of IOPS, and not sectors per
3236 * second. This can be seen with a simple example. Suppose
3237 * that there are two queues with the same weight, but that
3238 * the first queue receives requests of 8 sectors, while the
3239 * second queue receives requests of 1024 sectors. In
3240 * addition, suppose that each of the two queues contains at
3241 * most one request at a time, which implies that each queue
3242 * always remains idle after it is served. Finally, after
3243 * remaining idle, each queue receives very quickly a new
3244 * request. It follows that the two queues are served
3245 * alternatively, preempting each other if needed. This
3246 * implies that, although both queues have the same weight,
3247 * the queue with large requests receives a service that is
3248 * 1024/8 times as high as the service received by the other
3249 * queue.
3251 * On the other hand, device idling is performed, and thus
3252 * pure sector-domain guarantees are provided, for the
3253 * following queues, which are likely to need stronger
3254 * throughput guarantees: weight-raised queues, and queues
3255 * with a higher weight than other queues. When such queues
3256 * are active, sub-condition (i) is false, which triggers
3257 * device idling.
3259 * According to the above considerations, the next variable is
3260 * true (only) if sub-condition (i) holds. To compute the
3261 * value of this variable, we not only use the return value of
3262 * the function bfq_symmetric_scenario(), but also check
3263 * whether bfqq is being weight-raised, because
3264 * bfq_symmetric_scenario() does not take into account also
3265 * weight-raised queues (see comments on
3266 * bfq_weights_tree_add()).
3268 * As a side note, it is worth considering that the above
3269 * device-idling countermeasures may however fail in the
3270 * following unlucky scenario: if idling is (correctly)
3271 * disabled in a time period during which all symmetry
3272 * sub-conditions hold, and hence the device is allowed to
3273 * enqueue many requests, but at some later point in time some
3274 * sub-condition stops to hold, then it may become impossible
3275 * to let requests be served in the desired order until all
3276 * the requests already queued in the device have been served.
3278 asymmetric_scenario = bfqq->wr_coeff > 1 ||
3279 !bfq_symmetric_scenario(bfqd);
3282 * Finally, there is a case where maximizing throughput is the
3283 * best choice even if it may cause unfairness toward
3284 * bfqq. Such a case is when bfqq became active in a burst of
3285 * queue activations. Queues that became active during a large
3286 * burst benefit only from throughput, as discussed in the
3287 * comments on bfq_handle_burst. Thus, if bfqq became active
3288 * in a burst and not idling the device maximizes throughput,
3289 * then the device must no be idled, because not idling the
3290 * device provides bfqq and all other queues in the burst with
3291 * maximum benefit. Combining this and the above case, we can
3292 * now establish when idling is actually needed to preserve
3293 * service guarantees.
3295 idling_needed_for_service_guarantees =
3296 asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq);
3299 * We have now all the components we need to compute the return
3300 * value of the function, which is true only if both the following
3301 * conditions hold:
3302 * 1) bfqq is sync, because idling make sense only for sync queues;
3303 * 2) idling either boosts the throughput (without issues), or
3304 * is necessary to preserve service guarantees.
3306 return bfq_bfqq_sync(bfqq) &&
3307 (idling_boosts_thr_without_issues ||
3308 idling_needed_for_service_guarantees);
3312 * If the in-service queue is empty but the function bfq_bfqq_may_idle
3313 * returns true, then:
3314 * 1) the queue must remain in service and cannot be expired, and
3315 * 2) the device must be idled to wait for the possible arrival of a new
3316 * request for the queue.
3317 * See the comments on the function bfq_bfqq_may_idle for the reasons
3318 * why performing device idling is the best choice to boost the throughput
3319 * and preserve service guarantees when bfq_bfqq_may_idle itself
3320 * returns true.
3322 static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
3324 struct bfq_data *bfqd = bfqq->bfqd;
3326 return RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 &&
3327 bfq_bfqq_may_idle(bfqq);
3331 * Select a queue for service. If we have a current queue in service,
3332 * check whether to continue servicing it, or retrieve and set a new one.
3334 static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
3336 struct bfq_queue *bfqq;
3337 struct request *next_rq;
3338 enum bfqq_expiration reason = BFQQE_BUDGET_TIMEOUT;
3340 bfqq = bfqd->in_service_queue;
3341 if (!bfqq)
3342 goto new_queue;
3344 bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
3346 if (bfq_may_expire_for_budg_timeout(bfqq) &&
3347 !bfq_bfqq_wait_request(bfqq) &&
3348 !bfq_bfqq_must_idle(bfqq))
3349 goto expire;
3351 check_queue:
3353 * This loop is rarely executed more than once. Even when it
3354 * happens, it is much more convenient to re-execute this loop
3355 * than to return NULL and trigger a new dispatch to get a
3356 * request served.
3358 next_rq = bfqq->next_rq;
3360 * If bfqq has requests queued and it has enough budget left to
3361 * serve them, keep the queue, otherwise expire it.
3363 if (next_rq) {
3364 if (bfq_serv_to_charge(next_rq, bfqq) >
3365 bfq_bfqq_budget_left(bfqq)) {
3367 * Expire the queue for budget exhaustion,
3368 * which makes sure that the next budget is
3369 * enough to serve the next request, even if
3370 * it comes from the fifo expired path.
3372 reason = BFQQE_BUDGET_EXHAUSTED;
3373 goto expire;
3374 } else {
3376 * The idle timer may be pending because we may
3377 * not disable disk idling even when a new request
3378 * arrives.
3380 if (bfq_bfqq_wait_request(bfqq)) {
3382 * If we get here: 1) at least a new request
3383 * has arrived but we have not disabled the
3384 * timer because the request was too small,
3385 * 2) then the block layer has unplugged
3386 * the device, causing the dispatch to be
3387 * invoked.
3389 * Since the device is unplugged, now the
3390 * requests are probably large enough to
3391 * provide a reasonable throughput.
3392 * So we disable idling.
3394 bfq_clear_bfqq_wait_request(bfqq);
3395 hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
3396 bfqg_stats_update_idle_time(bfqq_group(bfqq));
3398 goto keep_queue;
3403 * No requests pending. However, if the in-service queue is idling
3404 * for a new request, or has requests waiting for a completion and
3405 * may idle after their completion, then keep it anyway.
3407 if (bfq_bfqq_wait_request(bfqq) ||
3408 (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) {
3409 bfqq = NULL;
3410 goto keep_queue;
3413 reason = BFQQE_NO_MORE_REQUESTS;
3414 expire:
3415 bfq_bfqq_expire(bfqd, bfqq, false, reason);
3416 new_queue:
3417 bfqq = bfq_set_in_service_queue(bfqd);
3418 if (bfqq) {
3419 bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue");
3420 goto check_queue;
3422 keep_queue:
3423 if (bfqq)
3424 bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue");
3425 else
3426 bfq_log(bfqd, "select_queue: no queue returned");
3428 return bfqq;
3431 static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
3433 struct bfq_entity *entity = &bfqq->entity;
3435 if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
3436 bfq_log_bfqq(bfqd, bfqq,
3437 "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
3438 jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
3439 jiffies_to_msecs(bfqq->wr_cur_max_time),
3440 bfqq->wr_coeff,
3441 bfqq->entity.weight, bfqq->entity.orig_weight);
3443 if (entity->prio_changed)
3444 bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
3447 * If the queue was activated in a burst, or too much
3448 * time has elapsed from the beginning of this
3449 * weight-raising period, then end weight raising.
3451 if (bfq_bfqq_in_large_burst(bfqq))
3452 bfq_bfqq_end_wr(bfqq);
3453 else if (time_is_before_jiffies(bfqq->last_wr_start_finish +
3454 bfqq->wr_cur_max_time)) {
3455 if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time ||
3456 time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt +
3457 bfq_wr_duration(bfqd)))
3458 bfq_bfqq_end_wr(bfqq);
3459 else {
3460 /* switch back to interactive wr */
3461 bfqq->wr_coeff = bfqd->bfq_wr_coeff;
3462 bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
3463 bfqq->last_wr_start_finish =
3464 bfqq->wr_start_at_switch_to_srt;
3465 bfqq->entity.prio_changed = 1;
3469 /* Update weight both if it must be raised and if it must be lowered */
3470 if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
3471 __bfq_entity_update_weight_prio(
3472 bfq_entity_service_tree(entity),
3473 entity);
3477 * Dispatch next request from bfqq.
3479 static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
3480 struct bfq_queue *bfqq)
3482 struct request *rq = bfqq->next_rq;
3483 unsigned long service_to_charge;
3485 service_to_charge = bfq_serv_to_charge(rq, bfqq);
3487 bfq_bfqq_served(bfqq, service_to_charge);
3489 bfq_dispatch_remove(bfqd->queue, rq);
3492 * If weight raising has to terminate for bfqq, then next
3493 * function causes an immediate update of bfqq's weight,
3494 * without waiting for next activation. As a consequence, on
3495 * expiration, bfqq will be timestamped as if has never been
3496 * weight-raised during this service slot, even if it has
3497 * received part or even most of the service as a
3498 * weight-raised queue. This inflates bfqq's timestamps, which
3499 * is beneficial, as bfqq is then more willing to leave the
3500 * device immediately to possible other weight-raised queues.
3502 bfq_update_wr_data(bfqd, bfqq);
3505 * Expire bfqq, pretending that its budget expired, if bfqq
3506 * belongs to CLASS_IDLE and other queues are waiting for
3507 * service.
3509 if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq))
3510 goto expire;
3512 return rq;
3514 expire:
3515 bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
3516 return rq;
3519 static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
3521 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3524 * Avoiding lock: a race on bfqd->busy_queues should cause at
3525 * most a call to dispatch for nothing
3527 return !list_empty_careful(&bfqd->dispatch) ||
3528 bfqd->busy_queues > 0;
3531 static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
3533 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3534 struct request *rq = NULL;
3535 struct bfq_queue *bfqq = NULL;
3537 if (!list_empty(&bfqd->dispatch)) {
3538 rq = list_first_entry(&bfqd->dispatch, struct request,
3539 queuelist);
3540 list_del_init(&rq->queuelist);
3542 bfqq = RQ_BFQQ(rq);
3544 if (bfqq) {
3546 * Increment counters here, because this
3547 * dispatch does not follow the standard
3548 * dispatch flow (where counters are
3549 * incremented)
3551 bfqq->dispatched++;
3553 goto inc_in_driver_start_rq;
3557 * We exploit the put_rq_private hook to decrement
3558 * rq_in_driver, but put_rq_private will not be
3559 * invoked on this request. So, to avoid unbalance,
3560 * just start this request, without incrementing
3561 * rq_in_driver. As a negative consequence,
3562 * rq_in_driver is deceptively lower than it should be
3563 * while this request is in service. This may cause
3564 * bfq_schedule_dispatch to be invoked uselessly.
3566 * As for implementing an exact solution, the
3567 * put_request hook, if defined, is probably invoked
3568 * also on this request. So, by exploiting this hook,
3569 * we could 1) increment rq_in_driver here, and 2)
3570 * decrement it in put_request. Such a solution would
3571 * let the value of the counter be always accurate,
3572 * but it would entail using an extra interface
3573 * function. This cost seems higher than the benefit,
3574 * being the frequency of non-elevator-private
3575 * requests very low.
3577 goto start_rq;
3580 bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
3582 if (bfqd->busy_queues == 0)
3583 goto exit;
3586 * Force device to serve one request at a time if
3587 * strict_guarantees is true. Forcing this service scheme is
3588 * currently the ONLY way to guarantee that the request
3589 * service order enforced by the scheduler is respected by a
3590 * queueing device. Otherwise the device is free even to make
3591 * some unlucky request wait for as long as the device
3592 * wishes.
3594 * Of course, serving one request at at time may cause loss of
3595 * throughput.
3597 if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0)
3598 goto exit;
3600 bfqq = bfq_select_queue(bfqd);
3601 if (!bfqq)
3602 goto exit;
3604 rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq);
3606 if (rq) {
3607 inc_in_driver_start_rq:
3608 bfqd->rq_in_driver++;
3609 start_rq:
3610 rq->rq_flags |= RQF_STARTED;
3612 exit:
3613 return rq;
3616 static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
3618 struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
3619 struct request *rq;
3621 spin_lock_irq(&bfqd->lock);
3623 rq = __bfq_dispatch_request(hctx);
3624 spin_unlock_irq(&bfqd->lock);
3626 return rq;
3630 * Task holds one reference to the queue, dropped when task exits. Each rq
3631 * in-flight on this queue also holds a reference, dropped when rq is freed.
3633 * Scheduler lock must be held here. Recall not to use bfqq after calling
3634 * this function on it.
3636 void bfq_put_queue(struct bfq_queue *bfqq)
3638 #ifdef CONFIG_BFQ_GROUP_IOSCHED
3639 struct bfq_group *bfqg = bfqq_group(bfqq);
3640 #endif
3642 if (bfqq->bfqd)
3643 bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d",
3644 bfqq, bfqq->ref);
3646 bfqq->ref--;
3647 if (bfqq->ref)
3648 return;
3650 if (bfq_bfqq_sync(bfqq))
3652 * The fact that this queue is being destroyed does not
3653 * invalidate the fact that this queue may have been
3654 * activated during the current burst. As a consequence,
3655 * although the queue does not exist anymore, and hence
3656 * needs to be removed from the burst list if there,
3657 * the burst size has not to be decremented.
3659 hlist_del_init(&bfqq->burst_list_node);
3661 kmem_cache_free(bfq_pool, bfqq);
3662 #ifdef CONFIG_BFQ_GROUP_IOSCHED
3663 bfqg_put(bfqg);
3664 #endif
3667 static void bfq_put_cooperator(struct bfq_queue *bfqq)
3669 struct bfq_queue *__bfqq, *next;
3672 * If this queue was scheduled to merge with another queue, be
3673 * sure to drop the reference taken on that queue (and others in
3674 * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
3676 __bfqq = bfqq->new_bfqq;
3677 while (__bfqq) {
3678 if (__bfqq == bfqq)
3679 break;
3680 next = __bfqq->new_bfqq;
3681 bfq_put_queue(__bfqq);
3682 __bfqq = next;
3686 static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
3688 if (bfqq == bfqd->in_service_queue) {
3689 __bfq_bfqq_expire(bfqd, bfqq);
3690 bfq_schedule_dispatch(bfqd);
3693 bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
3695 bfq_put_cooperator(bfqq);
3697 bfq_put_queue(bfqq); /* release process reference */
3700 static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
3702 struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
3703 struct bfq_data *bfqd;
3705 if (bfqq)
3706 bfqd = bfqq->bfqd; /* NULL if scheduler already exited */
3708 if (bfqq && bfqd) {
3709 unsigned long flags;
3711 spin_lock_irqsave(&bfqd->lock, flags);
3712 bfq_exit_bfqq(bfqd, bfqq);
3713 bic_set_bfqq(bic, NULL, is_sync);
3714 spin_unlock_irqrestore(&bfqd->lock, flags);
3718 static void bfq_exit_icq(struct io_cq *icq)
3720 struct bfq_io_cq *bic = icq_to_bic(icq);
3722 bfq_exit_icq_bfqq(bic, true);
3723 bfq_exit_icq_bfqq(bic, false);
3727 * Update the entity prio values; note that the new values will not
3728 * be used until the next (re)activation.
3730 static void
3731 bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
3733 struct task_struct *tsk = current;
3734 int ioprio_class;
3735 struct bfq_data *bfqd = bfqq->bfqd;
3737 if (!bfqd)
3738 return;
3740 ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
3741 switch (ioprio_class) {
3742 default:
3743 dev_err(bfqq->bfqd->queue->backing_dev_info->dev,
3744 "bfq: bad prio class %d\n", ioprio_class);
3745 case IOPRIO_CLASS_NONE:
3747 * No prio set, inherit CPU scheduling settings.
3749 bfqq->new_ioprio = task_nice_ioprio(tsk);
3750 bfqq->new_ioprio_class = task_nice_ioclass(tsk);
3751 break;
3752 case IOPRIO_CLASS_RT:
3753 bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
3754 bfqq->new_ioprio_class = IOPRIO_CLASS_RT;
3755 break;
3756 case IOPRIO_CLASS_BE:
3757 bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
3758 bfqq->new_ioprio_class = IOPRIO_CLASS_BE;
3759 break;
3760 case IOPRIO_CLASS_IDLE:
3761 bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE;
3762 bfqq->new_ioprio = 7;
3763 bfq_clear_bfqq_idle_window(bfqq);
3764 break;
3767 if (bfqq->new_ioprio >= IOPRIO_BE_NR) {
3768 pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
3769 bfqq->new_ioprio);
3770 bfqq->new_ioprio = IOPRIO_BE_NR;
3773 bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio);
3774 bfqq->entity.prio_changed = 1;
3777 static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
3778 struct bio *bio, bool is_sync,
3779 struct bfq_io_cq *bic);
3781 static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio)
3783 struct bfq_data *bfqd = bic_to_bfqd(bic);
3784 struct bfq_queue *bfqq;
3785 int ioprio = bic->icq.ioc->ioprio;
3788 * This condition may trigger on a newly created bic, be sure to
3789 * drop the lock before returning.
3791 if (unlikely(!bfqd) || likely(bic->ioprio == ioprio))
3792 return;
3794 bic->ioprio = ioprio;
3796 bfqq = bic_to_bfqq(bic, false);
3797 if (bfqq) {
3798 /* release process reference on this queue */
3799 bfq_put_queue(bfqq);
3800 bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic);
3801 bic_set_bfqq(bic, bfqq, false);
3804 bfqq = bic_to_bfqq(bic, true);
3805 if (bfqq)
3806 bfq_set_next_ioprio_data(bfqq, bic);
3809 static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
3810 struct bfq_io_cq *bic, pid_t pid, int is_sync)
3812 RB_CLEAR_NODE(&bfqq->entity.rb_node);
3813 INIT_LIST_HEAD(&bfqq->fifo);
3814 INIT_HLIST_NODE(&bfqq->burst_list_node);
3816 bfqq->ref = 0;
3817 bfqq->bfqd = bfqd;
3819 if (bic)
3820 bfq_set_next_ioprio_data(bfqq, bic);
3822 if (is_sync) {
3823 if (!bfq_class_idle(bfqq))
3824 bfq_mark_bfqq_idle_window(bfqq);
3825 bfq_mark_bfqq_sync(bfqq);
3826 bfq_mark_bfqq_just_created(bfqq);
3827 } else
3828 bfq_clear_bfqq_sync(bfqq);
3830 /* set end request to minus infinity from now */
3831 bfqq->ttime.last_end_request = ktime_get_ns() + 1;
3833 bfq_mark_bfqq_IO_bound(bfqq);
3835 bfqq->pid = pid;
3837 /* Tentative initial value to trade off between thr and lat */
3838 bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
3839 bfqq->budget_timeout = bfq_smallest_from_now();
3841 bfqq->wr_coeff = 1;
3842 bfqq->last_wr_start_finish = jiffies;
3843 bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now();
3844 bfqq->split_time = bfq_smallest_from_now();
3847 * Set to the value for which bfqq will not be deemed as
3848 * soft rt when it becomes backlogged.
3850 bfqq->soft_rt_next_start = bfq_greatest_from_now();
3852 /* first request is almost certainly seeky */
3853 bfqq->seek_history = 1;
3856 static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
3857 struct bfq_group *bfqg,
3858 int ioprio_class, int ioprio)
3860 switch (ioprio_class) {
3861 case IOPRIO_CLASS_RT:
3862 return &bfqg->async_bfqq[0][ioprio];
3863 case IOPRIO_CLASS_NONE:
3864 ioprio = IOPRIO_NORM;
3865 /* fall through */
3866 case IOPRIO_CLASS_BE:
3867 return &bfqg->async_bfqq[1][ioprio];
3868 case IOPRIO_CLASS_IDLE:
3869 return &bfqg->async_idle_bfqq;
3870 default:
3871 return NULL;
3875 static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
3876 struct bio *bio, bool is_sync,
3877 struct bfq_io_cq *bic)
3879 const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
3880 const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
3881 struct bfq_queue **async_bfqq = NULL;
3882 struct bfq_queue *bfqq;
3883 struct bfq_group *bfqg;
3885 rcu_read_lock();
3887 bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio));
3888 if (!bfqg) {
3889 bfqq = &bfqd->oom_bfqq;
3890 goto out;
3893 if (!is_sync) {
3894 async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
3895 ioprio);
3896 bfqq = *async_bfqq;
3897 if (bfqq)
3898 goto out;
3901 bfqq = kmem_cache_alloc_node(bfq_pool,
3902 GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
3903 bfqd->queue->node);
3905 if (bfqq) {
3906 bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
3907 is_sync);
3908 bfq_init_entity(&bfqq->entity, bfqg);
3909 bfq_log_bfqq(bfqd, bfqq, "allocated");
3910 } else {
3911 bfqq = &bfqd->oom_bfqq;
3912 bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
3913 goto out;
3917 * Pin the queue now that it's allocated, scheduler exit will
3918 * prune it.
3920 if (async_bfqq) {
3921 bfqq->ref++; /*
3922 * Extra group reference, w.r.t. sync
3923 * queue. This extra reference is removed
3924 * only if bfqq->bfqg disappears, to
3925 * guarantee that this queue is not freed
3926 * until its group goes away.
3928 bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
3929 bfqq, bfqq->ref);
3930 *async_bfqq = bfqq;
3933 out:
3934 bfqq->ref++; /* get a process reference to this queue */
3935 bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
3936 rcu_read_unlock();
3937 return bfqq;
3940 static void bfq_update_io_thinktime(struct bfq_data *bfqd,
3941 struct bfq_queue *bfqq)
3943 struct bfq_ttime *ttime = &bfqq->ttime;
3944 u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
3946 elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
3948 ttime->ttime_samples = (7*bfqq->ttime.ttime_samples + 256) / 8;
3949 ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8);
3950 ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
3951 ttime->ttime_samples);
3954 static void
3955 bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
3956 struct request *rq)
3958 bfqq->seek_history <<= 1;
3959 bfqq->seek_history |=
3960 get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR &&
3961 (!blk_queue_nonrot(bfqd->queue) ||
3962 blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT);
3966 * Disable idle window if the process thinks too long or seeks so much that
3967 * it doesn't matter.
3969 static void bfq_update_idle_window(struct bfq_data *bfqd,
3970 struct bfq_queue *bfqq,
3971 struct bfq_io_cq *bic)
3973 int enable_idle;
3975 /* Don't idle for async or idle io prio class. */
3976 if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
3977 return;
3979 /* Idle window just restored, statistics are meaningless. */
3980 if (time_is_after_eq_jiffies(bfqq->split_time +
3981 bfqd->bfq_wr_min_idle_time))
3982 return;
3984 enable_idle = bfq_bfqq_idle_window(bfqq);
3986 if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
3987 bfqd->bfq_slice_idle == 0 ||
3988 (bfqd->hw_tag && BFQQ_SEEKY(bfqq) &&
3989 bfqq->wr_coeff == 1))
3990 enable_idle = 0;
3991 else if (bfq_sample_valid(bfqq->ttime.ttime_samples)) {
3992 if (bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle &&
3993 bfqq->wr_coeff == 1)
3994 enable_idle = 0;
3995 else
3996 enable_idle = 1;
3998 bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d",
3999 enable_idle);
4001 if (enable_idle)
4002 bfq_mark_bfqq_idle_window(bfqq);
4003 else
4004 bfq_clear_bfqq_idle_window(bfqq);
4008 * Called when a new fs request (rq) is added to bfqq. Check if there's
4009 * something we should do about it.
4011 static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
4012 struct request *rq)
4014 struct bfq_io_cq *bic = RQ_BIC(rq);
4016 if (rq->cmd_flags & REQ_META)
4017 bfqq->meta_pending++;
4019 bfq_update_io_thinktime(bfqd, bfqq);
4020 bfq_update_io_seektime(bfqd, bfqq, rq);
4021 if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
4022 !BFQQ_SEEKY(bfqq))
4023 bfq_update_idle_window(bfqd, bfqq, bic);
4025 bfq_log_bfqq(bfqd, bfqq,
4026 "rq_enqueued: idle_window=%d (seeky %d)",
4027 bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq));
4029 bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
4031 if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
4032 bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
4033 blk_rq_sectors(rq) < 32;
4034 bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
4037 * There is just this request queued: if the request
4038 * is small and the queue is not to be expired, then
4039 * just exit.
4041 * In this way, if the device is being idled to wait
4042 * for a new request from the in-service queue, we
4043 * avoid unplugging the device and committing the
4044 * device to serve just a small request. On the
4045 * contrary, we wait for the block layer to decide
4046 * when to unplug the device: hopefully, new requests
4047 * will be merged to this one quickly, then the device
4048 * will be unplugged and larger requests will be
4049 * dispatched.
4051 if (small_req && !budget_timeout)
4052 return;
4055 * A large enough request arrived, or the queue is to
4056 * be expired: in both cases disk idling is to be
4057 * stopped, so clear wait_request flag and reset
4058 * timer.
4060 bfq_clear_bfqq_wait_request(bfqq);
4061 hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
4062 bfqg_stats_update_idle_time(bfqq_group(bfqq));
4065 * The queue is not empty, because a new request just
4066 * arrived. Hence we can safely expire the queue, in
4067 * case of budget timeout, without risking that the
4068 * timestamps of the queue are not updated correctly.
4069 * See [1] for more details.
4071 if (budget_timeout)
4072 bfq_bfqq_expire(bfqd, bfqq, false,
4073 BFQQE_BUDGET_TIMEOUT);
4077 static void __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
4079 struct bfq_queue *bfqq = RQ_BFQQ(rq),
4080 *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
4082 if (new_bfqq) {
4083 if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
4084 new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
4086 * Release the request's reference to the old bfqq
4087 * and make sure one is taken to the shared queue.
4089 new_bfqq->allocated++;
4090 bfqq->allocated--;
4091 new_bfqq->ref++;
4092 bfq_clear_bfqq_just_created(bfqq);
4094 * If the bic associated with the process
4095 * issuing this request still points to bfqq
4096 * (and thus has not been already redirected
4097 * to new_bfqq or even some other bfq_queue),
4098 * then complete the merge and redirect it to
4099 * new_bfqq.
4101 if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
4102 bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
4103 bfqq, new_bfqq);
4105 * rq is about to be enqueued into new_bfqq,
4106 * release rq reference on bfqq
4108 bfq_put_queue(bfqq);
4109 rq->elv.priv[1] = new_bfqq;
4110 bfqq = new_bfqq;
4113 bfq_add_request(rq);
4115 rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
4116 list_add_tail(&rq->queuelist, &bfqq->fifo);
4118 bfq_rq_enqueued(bfqd, bfqq, rq);
4121 static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
4122 bool at_head)
4124 struct request_queue *q = hctx->queue;
4125 struct bfq_data *bfqd = q->elevator->elevator_data;
4127 spin_lock_irq(&bfqd->lock);
4128 if (blk_mq_sched_try_insert_merge(q, rq)) {
4129 spin_unlock_irq(&bfqd->lock);
4130 return;
4133 spin_unlock_irq(&bfqd->lock);
4135 blk_mq_sched_request_inserted(rq);
4137 spin_lock_irq(&bfqd->lock);
4138 if (at_head || blk_rq_is_passthrough(rq)) {
4139 if (at_head)
4140 list_add(&rq->queuelist, &bfqd->dispatch);
4141 else
4142 list_add_tail(&rq->queuelist, &bfqd->dispatch);
4143 } else {
4144 __bfq_insert_request(bfqd, rq);
4146 if (rq_mergeable(rq)) {
4147 elv_rqhash_add(q, rq);
4148 if (!q->last_merge)
4149 q->last_merge = rq;
4153 spin_unlock_irq(&bfqd->lock);
4156 static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
4157 struct list_head *list, bool at_head)
4159 while (!list_empty(list)) {
4160 struct request *rq;
4162 rq = list_first_entry(list, struct request, queuelist);
4163 list_del_init(&rq->queuelist);
4164 bfq_insert_request(hctx, rq, at_head);
4168 static void bfq_update_hw_tag(struct bfq_data *bfqd)
4170 bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
4171 bfqd->rq_in_driver);
4173 if (bfqd->hw_tag == 1)
4174 return;
4177 * This sample is valid if the number of outstanding requests
4178 * is large enough to allow a queueing behavior. Note that the
4179 * sum is not exact, as it's not taking into account deactivated
4180 * requests.
4182 if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
4183 return;
4185 if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
4186 return;
4188 bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
4189 bfqd->max_rq_in_driver = 0;
4190 bfqd->hw_tag_samples = 0;
4193 static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
4195 u64 now_ns;
4196 u32 delta_us;
4198 bfq_update_hw_tag(bfqd);
4200 bfqd->rq_in_driver--;
4201 bfqq->dispatched--;
4203 if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
4205 * Set budget_timeout (which we overload to store the
4206 * time at which the queue remains with no backlog and
4207 * no outstanding request; used by the weight-raising
4208 * mechanism).
4210 bfqq->budget_timeout = jiffies;
4212 bfq_weights_tree_remove(bfqd, &bfqq->entity,
4213 &bfqd->queue_weights_tree);
4216 now_ns = ktime_get_ns();
4218 bfqq->ttime.last_end_request = now_ns;
4221 * Using us instead of ns, to get a reasonable precision in
4222 * computing rate in next check.
4224 delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC);
4227 * If the request took rather long to complete, and, according
4228 * to the maximum request size recorded, this completion latency
4229 * implies that the request was certainly served at a very low
4230 * rate (less than 1M sectors/sec), then the whole observation
4231 * interval that lasts up to this time instant cannot be a
4232 * valid time interval for computing a new peak rate. Invoke
4233 * bfq_update_rate_reset to have the following three steps
4234 * taken:
4235 * - close the observation interval at the last (previous)
4236 * request dispatch or completion
4237 * - compute rate, if possible, for that observation interval
4238 * - reset to zero samples, which will trigger a proper
4239 * re-initialization of the observation interval on next
4240 * dispatch
4242 if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC &&
4243 (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us <
4244 1UL<<(BFQ_RATE_SHIFT - 10))
4245 bfq_update_rate_reset(bfqd, NULL);
4246 bfqd->last_completion = now_ns;
4249 * If we are waiting to discover whether the request pattern
4250 * of the task associated with the queue is actually
4251 * isochronous, and both requisites for this condition to hold
4252 * are now satisfied, then compute soft_rt_next_start (see the
4253 * comments on the function bfq_bfqq_softrt_next_start()). We
4254 * schedule this delayed check when bfqq expires, if it still
4255 * has in-flight requests.
4257 if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
4258 RB_EMPTY_ROOT(&bfqq->sort_list))
4259 bfqq->soft_rt_next_start =
4260 bfq_bfqq_softrt_next_start(bfqd, bfqq);
4263 * If this is the in-service queue, check if it needs to be expired,
4264 * or if we want to idle in case it has no pending requests.
4266 if (bfqd->in_service_queue == bfqq) {
4267 if (bfqq->dispatched == 0 && bfq_bfqq_must_idle(bfqq)) {
4268 bfq_arm_slice_timer(bfqd);
4269 return;
4270 } else if (bfq_may_expire_for_budg_timeout(bfqq))
4271 bfq_bfqq_expire(bfqd, bfqq, false,
4272 BFQQE_BUDGET_TIMEOUT);
4273 else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
4274 (bfqq->dispatched == 0 ||
4275 !bfq_bfqq_may_idle(bfqq)))
4276 bfq_bfqq_expire(bfqd, bfqq, false,
4277 BFQQE_NO_MORE_REQUESTS);
4281 static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
4283 bfqq->allocated--;
4285 bfq_put_queue(bfqq);
4288 static void bfq_put_rq_private(struct request_queue *q, struct request *rq)
4290 struct bfq_queue *bfqq = RQ_BFQQ(rq);
4291 struct bfq_data *bfqd = bfqq->bfqd;
4293 if (rq->rq_flags & RQF_STARTED)
4294 bfqg_stats_update_completion(bfqq_group(bfqq),
4295 rq_start_time_ns(rq),
4296 rq_io_start_time_ns(rq),
4297 rq->cmd_flags);
4299 if (likely(rq->rq_flags & RQF_STARTED)) {
4300 unsigned long flags;
4302 spin_lock_irqsave(&bfqd->lock, flags);
4304 bfq_completed_request(bfqq, bfqd);
4305 bfq_put_rq_priv_body(bfqq);
4307 spin_unlock_irqrestore(&bfqd->lock, flags);
4308 } else {
4310 * Request rq may be still/already in the scheduler,
4311 * in which case we need to remove it. And we cannot
4312 * defer such a check and removal, to avoid
4313 * inconsistencies in the time interval from the end
4314 * of this function to the start of the deferred work.
4315 * This situation seems to occur only in process
4316 * context, as a consequence of a merge. In the
4317 * current version of the code, this implies that the
4318 * lock is held.
4321 if (!RB_EMPTY_NODE(&rq->rb_node))
4322 bfq_remove_request(q, rq);
4323 bfq_put_rq_priv_body(bfqq);
4326 rq->elv.priv[0] = NULL;
4327 rq->elv.priv[1] = NULL;
4331 * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
4332 * was the last process referring to that bfqq.
4334 static struct bfq_queue *
4335 bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
4337 bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
4339 if (bfqq_process_refs(bfqq) == 1) {
4340 bfqq->pid = current->pid;
4341 bfq_clear_bfqq_coop(bfqq);
4342 bfq_clear_bfqq_split_coop(bfqq);
4343 return bfqq;
4346 bic_set_bfqq(bic, NULL, 1);
4348 bfq_put_cooperator(bfqq);
4350 bfq_put_queue(bfqq);
4351 return NULL;
4354 static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
4355 struct bfq_io_cq *bic,
4356 struct bio *bio,
4357 bool split, bool is_sync,
4358 bool *new_queue)
4360 struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync);
4362 if (likely(bfqq && bfqq != &bfqd->oom_bfqq))
4363 return bfqq;
4365 if (new_queue)
4366 *new_queue = true;
4368 if (bfqq)
4369 bfq_put_queue(bfqq);
4370 bfqq = bfq_get_queue(bfqd, bio, is_sync, bic);
4372 bic_set_bfqq(bic, bfqq, is_sync);
4373 if (split && is_sync) {
4374 if ((bic->was_in_burst_list && bfqd->large_burst) ||
4375 bic->saved_in_large_burst)
4376 bfq_mark_bfqq_in_large_burst(bfqq);
4377 else {
4378 bfq_clear_bfqq_in_large_burst(bfqq);
4379 if (bic->was_in_burst_list)
4380 hlist_add_head(&bfqq->burst_list_node,
4381 &bfqd->burst_list);
4383 bfqq->split_time = jiffies;
4386 return bfqq;
4390 * Allocate bfq data structures associated with this request.
4392 static int bfq_get_rq_private(struct request_queue *q, struct request *rq,
4393 struct bio *bio)
4395 struct bfq_data *bfqd = q->elevator->elevator_data;
4396 struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
4397 const int is_sync = rq_is_sync(rq);
4398 struct bfq_queue *bfqq;
4399 bool new_queue = false;
4400 bool split = false;
4402 spin_lock_irq(&bfqd->lock);
4404 if (!bic)
4405 goto queue_fail;
4407 bfq_check_ioprio_change(bic, bio);
4409 bfq_bic_update_cgroup(bic, bio);
4411 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync,
4412 &new_queue);
4414 if (likely(!new_queue)) {
4415 /* If the queue was seeky for too long, break it apart. */
4416 if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
4417 bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
4419 /* Update bic before losing reference to bfqq */
4420 if (bfq_bfqq_in_large_burst(bfqq))
4421 bic->saved_in_large_burst = true;
4423 bfqq = bfq_split_bfqq(bic, bfqq);
4424 split = true;
4426 if (!bfqq)
4427 bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio,
4428 true, is_sync,
4429 NULL);
4433 bfqq->allocated++;
4434 bfqq->ref++;
4435 bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d",
4436 rq, bfqq, bfqq->ref);
4438 rq->elv.priv[0] = bic;
4439 rq->elv.priv[1] = bfqq;
4442 * If a bfq_queue has only one process reference, it is owned
4443 * by only this bic: we can then set bfqq->bic = bic. in
4444 * addition, if the queue has also just been split, we have to
4445 * resume its state.
4447 if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
4448 bfqq->bic = bic;
4449 if (split) {
4451 * The queue has just been split from a shared
4452 * queue: restore the idle window and the
4453 * possible weight raising period.
4455 bfq_bfqq_resume_state(bfqq, bic);
4459 if (unlikely(bfq_bfqq_just_created(bfqq)))
4460 bfq_handle_burst(bfqd, bfqq);
4462 spin_unlock_irq(&bfqd->lock);
4464 return 0;
4466 queue_fail:
4467 spin_unlock_irq(&bfqd->lock);
4469 return 1;
4472 static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq)
4474 struct bfq_data *bfqd = bfqq->bfqd;
4475 enum bfqq_expiration reason;
4476 unsigned long flags;
4478 spin_lock_irqsave(&bfqd->lock, flags);
4479 bfq_clear_bfqq_wait_request(bfqq);
4481 if (bfqq != bfqd->in_service_queue) {
4482 spin_unlock_irqrestore(&bfqd->lock, flags);
4483 return;
4486 if (bfq_bfqq_budget_timeout(bfqq))
4488 * Also here the queue can be safely expired
4489 * for budget timeout without wasting
4490 * guarantees
4492 reason = BFQQE_BUDGET_TIMEOUT;
4493 else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
4495 * The queue may not be empty upon timer expiration,
4496 * because we may not disable the timer when the
4497 * first request of the in-service queue arrives
4498 * during disk idling.
4500 reason = BFQQE_TOO_IDLE;
4501 else
4502 goto schedule_dispatch;
4504 bfq_bfqq_expire(bfqd, bfqq, true, reason);
4506 schedule_dispatch:
4507 spin_unlock_irqrestore(&bfqd->lock, flags);
4508 bfq_schedule_dispatch(bfqd);
4512 * Handler of the expiration of the timer running if the in-service queue
4513 * is idling inside its time slice.
4515 static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer)
4517 struct bfq_data *bfqd = container_of(timer, struct bfq_data,
4518 idle_slice_timer);
4519 struct bfq_queue *bfqq = bfqd->in_service_queue;
4522 * Theoretical race here: the in-service queue can be NULL or
4523 * different from the queue that was idling if a new request
4524 * arrives for the current queue and there is a full dispatch
4525 * cycle that changes the in-service queue. This can hardly
4526 * happen, but in the worst case we just expire a queue too
4527 * early.
4529 if (bfqq)
4530 bfq_idle_slice_timer_body(bfqq);
4532 return HRTIMER_NORESTART;
4535 static void __bfq_put_async_bfqq(struct bfq_data *bfqd,
4536 struct bfq_queue **bfqq_ptr)
4538 struct bfq_queue *bfqq = *bfqq_ptr;
4540 bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
4541 if (bfqq) {
4542 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
4544 bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
4545 bfqq, bfqq->ref);
4546 bfq_put_queue(bfqq);
4547 *bfqq_ptr = NULL;
4552 * Release all the bfqg references to its async queues. If we are
4553 * deallocating the group these queues may still contain requests, so
4554 * we reparent them to the root cgroup (i.e., the only one that will
4555 * exist for sure until all the requests on a device are gone).
4557 void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
4559 int i, j;
4561 for (i = 0; i < 2; i++)
4562 for (j = 0; j < IOPRIO_BE_NR; j++)
4563 __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
4565 __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
4568 static void bfq_exit_queue(struct elevator_queue *e)
4570 struct bfq_data *bfqd = e->elevator_data;
4571 struct bfq_queue *bfqq, *n;
4573 hrtimer_cancel(&bfqd->idle_slice_timer);
4575 spin_lock_irq(&bfqd->lock);
4576 list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
4577 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
4578 spin_unlock_irq(&bfqd->lock);
4580 hrtimer_cancel(&bfqd->idle_slice_timer);
4582 #ifdef CONFIG_BFQ_GROUP_IOSCHED
4583 blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
4584 #else
4585 spin_lock_irq(&bfqd->lock);
4586 bfq_put_async_queues(bfqd, bfqd->root_group);
4587 kfree(bfqd->root_group);
4588 spin_unlock_irq(&bfqd->lock);
4589 #endif
4591 kfree(bfqd);
4594 static void bfq_init_root_group(struct bfq_group *root_group,
4595 struct bfq_data *bfqd)
4597 int i;
4599 #ifdef CONFIG_BFQ_GROUP_IOSCHED
4600 root_group->entity.parent = NULL;
4601 root_group->my_entity = NULL;
4602 root_group->bfqd = bfqd;
4603 #endif
4604 root_group->rq_pos_tree = RB_ROOT;
4605 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
4606 root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
4607 root_group->sched_data.bfq_class_idle_last_service = jiffies;
4610 static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
4612 struct bfq_data *bfqd;
4613 struct elevator_queue *eq;
4615 eq = elevator_alloc(q, e);
4616 if (!eq)
4617 return -ENOMEM;
4619 bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
4620 if (!bfqd) {
4621 kobject_put(&eq->kobj);
4622 return -ENOMEM;
4624 eq->elevator_data = bfqd;
4626 spin_lock_irq(q->queue_lock);
4627 q->elevator = eq;
4628 spin_unlock_irq(q->queue_lock);
4631 * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
4632 * Grab a permanent reference to it, so that the normal code flow
4633 * will not attempt to free it.
4635 bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0);
4636 bfqd->oom_bfqq.ref++;
4637 bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
4638 bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE;
4639 bfqd->oom_bfqq.entity.new_weight =
4640 bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio);
4642 /* oom_bfqq does not participate to bursts */
4643 bfq_clear_bfqq_just_created(&bfqd->oom_bfqq);
4646 * Trigger weight initialization, according to ioprio, at the
4647 * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
4648 * class won't be changed any more.
4650 bfqd->oom_bfqq.entity.prio_changed = 1;
4652 bfqd->queue = q;
4654 INIT_LIST_HEAD(&bfqd->dispatch);
4656 hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC,
4657 HRTIMER_MODE_REL);
4658 bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
4660 bfqd->queue_weights_tree = RB_ROOT;
4661 bfqd->group_weights_tree = RB_ROOT;
4663 INIT_LIST_HEAD(&bfqd->active_list);
4664 INIT_LIST_HEAD(&bfqd->idle_list);
4665 INIT_HLIST_HEAD(&bfqd->burst_list);
4667 bfqd->hw_tag = -1;
4669 bfqd->bfq_max_budget = bfq_default_max_budget;
4671 bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
4672 bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
4673 bfqd->bfq_back_max = bfq_back_max;
4674 bfqd->bfq_back_penalty = bfq_back_penalty;
4675 bfqd->bfq_slice_idle = bfq_slice_idle;
4676 bfqd->bfq_timeout = bfq_timeout;
4678 bfqd->bfq_requests_within_timer = 120;
4680 bfqd->bfq_large_burst_thresh = 8;
4681 bfqd->bfq_burst_interval = msecs_to_jiffies(180);
4683 bfqd->low_latency = true;
4686 * Trade-off between responsiveness and fairness.
4688 bfqd->bfq_wr_coeff = 30;
4689 bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
4690 bfqd->bfq_wr_max_time = 0;
4691 bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
4692 bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
4693 bfqd->bfq_wr_max_softrt_rate = 7000; /*
4694 * Approximate rate required
4695 * to playback or record a
4696 * high-definition compressed
4697 * video.
4699 bfqd->wr_busy_queues = 0;
4702 * Begin by assuming, optimistically, that the device is a
4703 * high-speed one, and that its peak rate is equal to 2/3 of
4704 * the highest reference rate.
4706 bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] *
4707 T_fast[blk_queue_nonrot(bfqd->queue)];
4708 bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)] * 2 / 3;
4709 bfqd->device_speed = BFQ_BFQD_FAST;
4711 spin_lock_init(&bfqd->lock);
4714 * The invocation of the next bfq_create_group_hierarchy
4715 * function is the head of a chain of function calls
4716 * (bfq_create_group_hierarchy->blkcg_activate_policy->
4717 * blk_mq_freeze_queue) that may lead to the invocation of the
4718 * has_work hook function. For this reason,
4719 * bfq_create_group_hierarchy is invoked only after all
4720 * scheduler data has been initialized, apart from the fields
4721 * that can be initialized only after invoking
4722 * bfq_create_group_hierarchy. This, in particular, enables
4723 * has_work to correctly return false. Of course, to avoid
4724 * other inconsistencies, the blk-mq stack must then refrain
4725 * from invoking further scheduler hooks before this init
4726 * function is finished.
4728 bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node);
4729 if (!bfqd->root_group)
4730 goto out_free;
4731 bfq_init_root_group(bfqd->root_group, bfqd);
4732 bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
4735 return 0;
4737 out_free:
4738 kfree(bfqd);
4739 kobject_put(&eq->kobj);
4740 return -ENOMEM;
4743 static void bfq_slab_kill(void)
4745 kmem_cache_destroy(bfq_pool);
4748 static int __init bfq_slab_setup(void)
4750 bfq_pool = KMEM_CACHE(bfq_queue, 0);
4751 if (!bfq_pool)
4752 return -ENOMEM;
4753 return 0;
4756 static ssize_t bfq_var_show(unsigned int var, char *page)
4758 return sprintf(page, "%u\n", var);
4761 static ssize_t bfq_var_store(unsigned long *var, const char *page,
4762 size_t count)
4764 unsigned long new_val;
4765 int ret = kstrtoul(page, 10, &new_val);
4767 if (ret == 0)
4768 *var = new_val;
4770 return count;
4773 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
4774 static ssize_t __FUNC(struct elevator_queue *e, char *page) \
4776 struct bfq_data *bfqd = e->elevator_data; \
4777 u64 __data = __VAR; \
4778 if (__CONV == 1) \
4779 __data = jiffies_to_msecs(__data); \
4780 else if (__CONV == 2) \
4781 __data = div_u64(__data, NSEC_PER_MSEC); \
4782 return bfq_var_show(__data, (page)); \
4784 SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2);
4785 SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2);
4786 SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
4787 SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
4788 SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2);
4789 SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
4790 SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1);
4791 SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0);
4792 SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
4793 #undef SHOW_FUNCTION
4795 #define USEC_SHOW_FUNCTION(__FUNC, __VAR) \
4796 static ssize_t __FUNC(struct elevator_queue *e, char *page) \
4798 struct bfq_data *bfqd = e->elevator_data; \
4799 u64 __data = __VAR; \
4800 __data = div_u64(__data, NSEC_PER_USEC); \
4801 return bfq_var_show(__data, (page)); \
4803 USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle);
4804 #undef USEC_SHOW_FUNCTION
4806 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
4807 static ssize_t \
4808 __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4810 struct bfq_data *bfqd = e->elevator_data; \
4811 unsigned long uninitialized_var(__data); \
4812 int ret = bfq_var_store(&__data, (page), count); \
4813 if (__data < (MIN)) \
4814 __data = (MIN); \
4815 else if (__data > (MAX)) \
4816 __data = (MAX); \
4817 if (__CONV == 1) \
4818 *(__PTR) = msecs_to_jiffies(__data); \
4819 else if (__CONV == 2) \
4820 *(__PTR) = (u64)__data * NSEC_PER_MSEC; \
4821 else \
4822 *(__PTR) = __data; \
4823 return ret; \
4825 STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
4826 INT_MAX, 2);
4827 STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
4828 INT_MAX, 2);
4829 STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
4830 STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
4831 INT_MAX, 0);
4832 STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2);
4833 #undef STORE_FUNCTION
4835 #define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
4836 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
4838 struct bfq_data *bfqd = e->elevator_data; \
4839 unsigned long uninitialized_var(__data); \
4840 int ret = bfq_var_store(&__data, (page), count); \
4841 if (__data < (MIN)) \
4842 __data = (MIN); \
4843 else if (__data > (MAX)) \
4844 __data = (MAX); \
4845 *(__PTR) = (u64)__data * NSEC_PER_USEC; \
4846 return ret; \
4848 USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0,
4849 UINT_MAX);
4850 #undef USEC_STORE_FUNCTION
4852 static ssize_t bfq_max_budget_store(struct elevator_queue *e,
4853 const char *page, size_t count)
4855 struct bfq_data *bfqd = e->elevator_data;
4856 unsigned long uninitialized_var(__data);
4857 int ret = bfq_var_store(&__data, (page), count);
4859 if (__data == 0)
4860 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
4861 else {
4862 if (__data > INT_MAX)
4863 __data = INT_MAX;
4864 bfqd->bfq_max_budget = __data;
4867 bfqd->bfq_user_max_budget = __data;
4869 return ret;
4873 * Leaving this name to preserve name compatibility with cfq
4874 * parameters, but this timeout is used for both sync and async.
4876 static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
4877 const char *page, size_t count)
4879 struct bfq_data *bfqd = e->elevator_data;
4880 unsigned long uninitialized_var(__data);
4881 int ret = bfq_var_store(&__data, (page), count);
4883 if (__data < 1)
4884 __data = 1;
4885 else if (__data > INT_MAX)
4886 __data = INT_MAX;
4888 bfqd->bfq_timeout = msecs_to_jiffies(__data);
4889 if (bfqd->bfq_user_max_budget == 0)
4890 bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd);
4892 return ret;
4895 static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e,
4896 const char *page, size_t count)
4898 struct bfq_data *bfqd = e->elevator_data;
4899 unsigned long uninitialized_var(__data);
4900 int ret = bfq_var_store(&__data, (page), count);
4902 if (__data > 1)
4903 __data = 1;
4904 if (!bfqd->strict_guarantees && __data == 1
4905 && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC)
4906 bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC;
4908 bfqd->strict_guarantees = __data;
4910 return ret;
4913 static ssize_t bfq_low_latency_store(struct elevator_queue *e,
4914 const char *page, size_t count)
4916 struct bfq_data *bfqd = e->elevator_data;
4917 unsigned long uninitialized_var(__data);
4918 int ret = bfq_var_store(&__data, (page), count);
4920 if (__data > 1)
4921 __data = 1;
4922 if (__data == 0 && bfqd->low_latency != 0)
4923 bfq_end_wr(bfqd);
4924 bfqd->low_latency = __data;
4926 return ret;
4929 #define BFQ_ATTR(name) \
4930 __ATTR(name, 0644, bfq_##name##_show, bfq_##name##_store)
4932 static struct elv_fs_entry bfq_attrs[] = {
4933 BFQ_ATTR(fifo_expire_sync),
4934 BFQ_ATTR(fifo_expire_async),
4935 BFQ_ATTR(back_seek_max),
4936 BFQ_ATTR(back_seek_penalty),
4937 BFQ_ATTR(slice_idle),
4938 BFQ_ATTR(slice_idle_us),
4939 BFQ_ATTR(max_budget),
4940 BFQ_ATTR(timeout_sync),
4941 BFQ_ATTR(strict_guarantees),
4942 BFQ_ATTR(low_latency),
4943 __ATTR_NULL
4946 static struct elevator_type iosched_bfq_mq = {
4947 .ops.mq = {
4948 .get_rq_priv = bfq_get_rq_private,
4949 .put_rq_priv = bfq_put_rq_private,
4950 .exit_icq = bfq_exit_icq,
4951 .insert_requests = bfq_insert_requests,
4952 .dispatch_request = bfq_dispatch_request,
4953 .next_request = elv_rb_latter_request,
4954 .former_request = elv_rb_former_request,
4955 .allow_merge = bfq_allow_bio_merge,
4956 .bio_merge = bfq_bio_merge,
4957 .request_merge = bfq_request_merge,
4958 .requests_merged = bfq_requests_merged,
4959 .request_merged = bfq_request_merged,
4960 .has_work = bfq_has_work,
4961 .init_sched = bfq_init_queue,
4962 .exit_sched = bfq_exit_queue,
4965 .uses_mq = true,
4966 .icq_size = sizeof(struct bfq_io_cq),
4967 .icq_align = __alignof__(struct bfq_io_cq),
4968 .elevator_attrs = bfq_attrs,
4969 .elevator_name = "bfq",
4970 .elevator_owner = THIS_MODULE,
4973 static int __init bfq_init(void)
4975 int ret;
4977 #ifdef CONFIG_BFQ_GROUP_IOSCHED
4978 ret = blkcg_policy_register(&blkcg_policy_bfq);
4979 if (ret)
4980 return ret;
4981 #endif
4983 ret = -ENOMEM;
4984 if (bfq_slab_setup())
4985 goto err_pol_unreg;
4988 * Times to load large popular applications for the typical
4989 * systems installed on the reference devices (see the
4990 * comments before the definitions of the next two
4991 * arrays). Actually, we use slightly slower values, as the
4992 * estimated peak rate tends to be smaller than the actual
4993 * peak rate. The reason for this last fact is that estimates
4994 * are computed over much shorter time intervals than the long
4995 * intervals typically used for benchmarking. Why? First, to
4996 * adapt more quickly to variations. Second, because an I/O
4997 * scheduler cannot rely on a peak-rate-evaluation workload to
4998 * be run for a long time.
5000 T_slow[0] = msecs_to_jiffies(3500); /* actually 4 sec */
5001 T_slow[1] = msecs_to_jiffies(6000); /* actually 6.5 sec */
5002 T_fast[0] = msecs_to_jiffies(7000); /* actually 8 sec */
5003 T_fast[1] = msecs_to_jiffies(2500); /* actually 3 sec */
5006 * Thresholds that determine the switch between speed classes
5007 * (see the comments before the definition of the array
5008 * device_speed_thresh). These thresholds are biased towards
5009 * transitions to the fast class. This is safer than the
5010 * opposite bias. In fact, a wrong transition to the slow
5011 * class results in short weight-raising periods, because the
5012 * speed of the device then tends to be higher that the
5013 * reference peak rate. On the opposite end, a wrong
5014 * transition to the fast class tends to increase
5015 * weight-raising periods, because of the opposite reason.
5017 device_speed_thresh[0] = (4 * R_slow[0]) / 3;
5018 device_speed_thresh[1] = (4 * R_slow[1]) / 3;
5020 ret = elv_register(&iosched_bfq_mq);
5021 if (ret)
5022 goto err_pol_unreg;
5024 return 0;
5026 err_pol_unreg:
5027 #ifdef CONFIG_BFQ_GROUP_IOSCHED
5028 blkcg_policy_unregister(&blkcg_policy_bfq);
5029 #endif
5030 return ret;
5033 static void __exit bfq_exit(void)
5035 elv_unregister(&iosched_bfq_mq);
5036 #ifdef CONFIG_BFQ_GROUP_IOSCHED
5037 blkcg_policy_unregister(&blkcg_policy_bfq);
5038 #endif
5039 bfq_slab_kill();
5042 module_init(bfq_init);
5043 module_exit(bfq_exit);
5045 MODULE_AUTHOR("Paolo Valente");
5046 MODULE_LICENSE("GPL");
5047 MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler");