ARM: multi_v7_defconfig: Enable CPU idle for exynos SoCs
[linux-2.6/btrfs-unstable.git] / kernel / sched / deadline.c
blob5e95145088fd37b3d07ccac66c3cd58f7effe10a
1 /*
2 * Deadline Scheduling Class (SCHED_DEADLINE)
4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 * Tasks that periodically executes their instances for less than their
7 * runtime won't miss any of their deadlines.
8 * Tasks that are not periodic or sporadic or that tries to execute more
9 * than their reserved bandwidth will be slowed down (and may potentially
10 * miss some of their deadlines), and won't affect any other task.
12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
13 * Juri Lelli <juri.lelli@gmail.com>,
14 * Michael Trimarchi <michael@amarulasolutions.com>,
15 * Fabio Checconi <fchecconi@gmail.com>
17 #include "sched.h"
19 #include <linux/slab.h>
21 struct dl_bandwidth def_dl_bandwidth;
23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
25 return container_of(dl_se, struct task_struct, dl);
28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
30 return container_of(dl_rq, struct rq, dl);
33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
38 return &rq->dl;
41 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
46 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
48 struct sched_dl_entity *dl_se = &p->dl;
50 return dl_rq->rb_leftmost == &dl_se->rb_node;
53 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
55 raw_spin_lock_init(&dl_b->dl_runtime_lock);
56 dl_b->dl_period = period;
57 dl_b->dl_runtime = runtime;
60 void init_dl_bw(struct dl_bw *dl_b)
62 raw_spin_lock_init(&dl_b->lock);
63 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
64 if (global_rt_runtime() == RUNTIME_INF)
65 dl_b->bw = -1;
66 else
67 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
68 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
69 dl_b->total_bw = 0;
72 void init_dl_rq(struct dl_rq *dl_rq)
74 dl_rq->rb_root = RB_ROOT;
76 #ifdef CONFIG_SMP
77 /* zero means no -deadline tasks */
78 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
80 dl_rq->dl_nr_migratory = 0;
81 dl_rq->overloaded = 0;
82 dl_rq->pushable_dl_tasks_root = RB_ROOT;
83 #else
84 init_dl_bw(&dl_rq->dl_bw);
85 #endif
88 #ifdef CONFIG_SMP
90 static inline int dl_overloaded(struct rq *rq)
92 return atomic_read(&rq->rd->dlo_count);
95 static inline void dl_set_overload(struct rq *rq)
97 if (!rq->online)
98 return;
100 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
102 * Must be visible before the overload count is
103 * set (as in sched_rt.c).
105 * Matched by the barrier in pull_dl_task().
107 smp_wmb();
108 atomic_inc(&rq->rd->dlo_count);
111 static inline void dl_clear_overload(struct rq *rq)
113 if (!rq->online)
114 return;
116 atomic_dec(&rq->rd->dlo_count);
117 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
120 static void update_dl_migration(struct dl_rq *dl_rq)
122 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
123 if (!dl_rq->overloaded) {
124 dl_set_overload(rq_of_dl_rq(dl_rq));
125 dl_rq->overloaded = 1;
127 } else if (dl_rq->overloaded) {
128 dl_clear_overload(rq_of_dl_rq(dl_rq));
129 dl_rq->overloaded = 0;
133 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
135 struct task_struct *p = dl_task_of(dl_se);
137 if (p->nr_cpus_allowed > 1)
138 dl_rq->dl_nr_migratory++;
140 update_dl_migration(dl_rq);
143 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
145 struct task_struct *p = dl_task_of(dl_se);
147 if (p->nr_cpus_allowed > 1)
148 dl_rq->dl_nr_migratory--;
150 update_dl_migration(dl_rq);
154 * The list of pushable -deadline task is not a plist, like in
155 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
157 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
159 struct dl_rq *dl_rq = &rq->dl;
160 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
161 struct rb_node *parent = NULL;
162 struct task_struct *entry;
163 int leftmost = 1;
165 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
167 while (*link) {
168 parent = *link;
169 entry = rb_entry(parent, struct task_struct,
170 pushable_dl_tasks);
171 if (dl_entity_preempt(&p->dl, &entry->dl))
172 link = &parent->rb_left;
173 else {
174 link = &parent->rb_right;
175 leftmost = 0;
179 if (leftmost)
180 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
182 rb_link_node(&p->pushable_dl_tasks, parent, link);
183 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
186 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
188 struct dl_rq *dl_rq = &rq->dl;
190 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
191 return;
193 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
194 struct rb_node *next_node;
196 next_node = rb_next(&p->pushable_dl_tasks);
197 dl_rq->pushable_dl_tasks_leftmost = next_node;
200 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
201 RB_CLEAR_NODE(&p->pushable_dl_tasks);
204 static inline int has_pushable_dl_tasks(struct rq *rq)
206 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
209 static int push_dl_task(struct rq *rq);
211 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
213 return dl_task(prev);
216 static inline void set_post_schedule(struct rq *rq)
218 rq->post_schedule = has_pushable_dl_tasks(rq);
221 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
223 static void dl_task_offline_migration(struct rq *rq, struct task_struct *p)
225 struct rq *later_rq = NULL;
226 bool fallback = false;
228 later_rq = find_lock_later_rq(p, rq);
230 if (!later_rq) {
231 int cpu;
234 * If we cannot preempt any rq, fall back to pick any
235 * online cpu.
237 fallback = true;
238 cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p));
239 if (cpu >= nr_cpu_ids) {
241 * Fail to find any suitable cpu.
242 * The task will never come back!
244 BUG_ON(dl_bandwidth_enabled());
247 * If admission control is disabled we
248 * try a little harder to let the task
249 * run.
251 cpu = cpumask_any(cpu_active_mask);
253 later_rq = cpu_rq(cpu);
254 double_lock_balance(rq, later_rq);
257 deactivate_task(rq, p, 0);
258 set_task_cpu(p, later_rq->cpu);
259 activate_task(later_rq, p, ENQUEUE_REPLENISH);
261 if (!fallback)
262 resched_curr(later_rq);
264 double_unlock_balance(rq, later_rq);
267 #else
269 static inline
270 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
274 static inline
275 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
279 static inline
280 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
284 static inline
285 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
289 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
291 return false;
294 static inline int pull_dl_task(struct rq *rq)
296 return 0;
299 static inline void set_post_schedule(struct rq *rq)
302 #endif /* CONFIG_SMP */
304 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
305 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
306 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
307 int flags);
310 * We are being explicitly informed that a new instance is starting,
311 * and this means that:
312 * - the absolute deadline of the entity has to be placed at
313 * current time + relative deadline;
314 * - the runtime of the entity has to be set to the maximum value.
316 * The capability of specifying such event is useful whenever a -deadline
317 * entity wants to (try to!) synchronize its behaviour with the scheduler's
318 * one, and to (try to!) reconcile itself with its own scheduling
319 * parameters.
321 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
322 struct sched_dl_entity *pi_se)
324 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
325 struct rq *rq = rq_of_dl_rq(dl_rq);
327 WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
330 * We use the regular wall clock time to set deadlines in the
331 * future; in fact, we must consider execution overheads (time
332 * spent on hardirq context, etc.).
334 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
335 dl_se->runtime = pi_se->dl_runtime;
336 dl_se->dl_new = 0;
340 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
341 * possibility of a entity lasting more than what it declared, and thus
342 * exhausting its runtime.
344 * Here we are interested in making runtime overrun possible, but we do
345 * not want a entity which is misbehaving to affect the scheduling of all
346 * other entities.
347 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
348 * is used, in order to confine each entity within its own bandwidth.
350 * This function deals exactly with that, and ensures that when the runtime
351 * of a entity is replenished, its deadline is also postponed. That ensures
352 * the overrunning entity can't interfere with other entity in the system and
353 * can't make them miss their deadlines. Reasons why this kind of overruns
354 * could happen are, typically, a entity voluntarily trying to overcome its
355 * runtime, or it just underestimated it during sched_setattr().
357 static void replenish_dl_entity(struct sched_dl_entity *dl_se,
358 struct sched_dl_entity *pi_se)
360 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
361 struct rq *rq = rq_of_dl_rq(dl_rq);
363 BUG_ON(pi_se->dl_runtime <= 0);
366 * This could be the case for a !-dl task that is boosted.
367 * Just go with full inherited parameters.
369 if (dl_se->dl_deadline == 0) {
370 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
371 dl_se->runtime = pi_se->dl_runtime;
375 * We keep moving the deadline away until we get some
376 * available runtime for the entity. This ensures correct
377 * handling of situations where the runtime overrun is
378 * arbitrary large.
380 while (dl_se->runtime <= 0) {
381 dl_se->deadline += pi_se->dl_period;
382 dl_se->runtime += pi_se->dl_runtime;
386 * At this point, the deadline really should be "in
387 * the future" with respect to rq->clock. If it's
388 * not, we are, for some reason, lagging too much!
389 * Anyway, after having warn userspace abut that,
390 * we still try to keep the things running by
391 * resetting the deadline and the budget of the
392 * entity.
394 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
395 printk_deferred_once("sched: DL replenish lagged to much\n");
396 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
397 dl_se->runtime = pi_se->dl_runtime;
400 if (dl_se->dl_yielded)
401 dl_se->dl_yielded = 0;
402 if (dl_se->dl_throttled)
403 dl_se->dl_throttled = 0;
407 * Here we check if --at time t-- an entity (which is probably being
408 * [re]activated or, in general, enqueued) can use its remaining runtime
409 * and its current deadline _without_ exceeding the bandwidth it is
410 * assigned (function returns true if it can't). We are in fact applying
411 * one of the CBS rules: when a task wakes up, if the residual runtime
412 * over residual deadline fits within the allocated bandwidth, then we
413 * can keep the current (absolute) deadline and residual budget without
414 * disrupting the schedulability of the system. Otherwise, we should
415 * refill the runtime and set the deadline a period in the future,
416 * because keeping the current (absolute) deadline of the task would
417 * result in breaking guarantees promised to other tasks (refer to
418 * Documentation/scheduler/sched-deadline.txt for more informations).
420 * This function returns true if:
422 * runtime / (deadline - t) > dl_runtime / dl_period ,
424 * IOW we can't recycle current parameters.
426 * Notice that the bandwidth check is done against the period. For
427 * task with deadline equal to period this is the same of using
428 * dl_deadline instead of dl_period in the equation above.
430 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
431 struct sched_dl_entity *pi_se, u64 t)
433 u64 left, right;
436 * left and right are the two sides of the equation above,
437 * after a bit of shuffling to use multiplications instead
438 * of divisions.
440 * Note that none of the time values involved in the two
441 * multiplications are absolute: dl_deadline and dl_runtime
442 * are the relative deadline and the maximum runtime of each
443 * instance, runtime is the runtime left for the last instance
444 * and (deadline - t), since t is rq->clock, is the time left
445 * to the (absolute) deadline. Even if overflowing the u64 type
446 * is very unlikely to occur in both cases, here we scale down
447 * as we want to avoid that risk at all. Scaling down by 10
448 * means that we reduce granularity to 1us. We are fine with it,
449 * since this is only a true/false check and, anyway, thinking
450 * of anything below microseconds resolution is actually fiction
451 * (but still we want to give the user that illusion >;).
453 left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
454 right = ((dl_se->deadline - t) >> DL_SCALE) *
455 (pi_se->dl_runtime >> DL_SCALE);
457 return dl_time_before(right, left);
461 * When a -deadline entity is queued back on the runqueue, its runtime and
462 * deadline might need updating.
464 * The policy here is that we update the deadline of the entity only if:
465 * - the current deadline is in the past,
466 * - using the remaining runtime with the current deadline would make
467 * the entity exceed its bandwidth.
469 static void update_dl_entity(struct sched_dl_entity *dl_se,
470 struct sched_dl_entity *pi_se)
472 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
473 struct rq *rq = rq_of_dl_rq(dl_rq);
476 * The arrival of a new instance needs special treatment, i.e.,
477 * the actual scheduling parameters have to be "renewed".
479 if (dl_se->dl_new) {
480 setup_new_dl_entity(dl_se, pi_se);
481 return;
484 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
485 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
486 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
487 dl_se->runtime = pi_se->dl_runtime;
492 * If the entity depleted all its runtime, and if we want it to sleep
493 * while waiting for some new execution time to become available, we
494 * set the bandwidth enforcement timer to the replenishment instant
495 * and try to activate it.
497 * Notice that it is important for the caller to know if the timer
498 * actually started or not (i.e., the replenishment instant is in
499 * the future or in the past).
501 static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted)
503 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
504 struct rq *rq = rq_of_dl_rq(dl_rq);
505 ktime_t now, act;
506 ktime_t soft, hard;
507 unsigned long range;
508 s64 delta;
510 if (boosted)
511 return 0;
513 * We want the timer to fire at the deadline, but considering
514 * that it is actually coming from rq->clock and not from
515 * hrtimer's time base reading.
517 act = ns_to_ktime(dl_se->deadline);
518 now = hrtimer_cb_get_time(&dl_se->dl_timer);
519 delta = ktime_to_ns(now) - rq_clock(rq);
520 act = ktime_add_ns(act, delta);
523 * If the expiry time already passed, e.g., because the value
524 * chosen as the deadline is too small, don't even try to
525 * start the timer in the past!
527 if (ktime_us_delta(act, now) < 0)
528 return 0;
530 hrtimer_set_expires(&dl_se->dl_timer, act);
532 soft = hrtimer_get_softexpires(&dl_se->dl_timer);
533 hard = hrtimer_get_expires(&dl_se->dl_timer);
534 range = ktime_to_ns(ktime_sub(hard, soft));
535 __hrtimer_start_range_ns(&dl_se->dl_timer, soft,
536 range, HRTIMER_MODE_ABS, 0);
538 return hrtimer_active(&dl_se->dl_timer);
542 * This is the bandwidth enforcement timer callback. If here, we know
543 * a task is not on its dl_rq, since the fact that the timer was running
544 * means the task is throttled and needs a runtime replenishment.
546 * However, what we actually do depends on the fact the task is active,
547 * (it is on its rq) or has been removed from there by a call to
548 * dequeue_task_dl(). In the former case we must issue the runtime
549 * replenishment and add the task back to the dl_rq; in the latter, we just
550 * do nothing but clearing dl_throttled, so that runtime and deadline
551 * updating (and the queueing back to dl_rq) will be done by the
552 * next call to enqueue_task_dl().
554 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
556 struct sched_dl_entity *dl_se = container_of(timer,
557 struct sched_dl_entity,
558 dl_timer);
559 struct task_struct *p = dl_task_of(dl_se);
560 unsigned long flags;
561 struct rq *rq;
563 rq = task_rq_lock(p, &flags);
566 * We need to take care of several possible races here:
568 * - the task might have changed its scheduling policy
569 * to something different than SCHED_DEADLINE
570 * - the task might have changed its reservation parameters
571 * (through sched_setattr())
572 * - the task might have been boosted by someone else and
573 * might be in the boosting/deboosting path
575 * In all this cases we bail out, as the task is already
576 * in the runqueue or is going to be enqueued back anyway.
578 if (!dl_task(p) || dl_se->dl_new ||
579 dl_se->dl_boosted || !dl_se->dl_throttled)
580 goto unlock;
582 sched_clock_tick();
583 update_rq_clock(rq);
585 #ifdef CONFIG_SMP
587 * If we find that the rq the task was on is no longer
588 * available, we need to select a new rq.
590 if (unlikely(!rq->online)) {
591 dl_task_offline_migration(rq, p);
592 goto unlock;
594 #endif
597 * If the throttle happened during sched-out; like:
599 * schedule()
600 * deactivate_task()
601 * dequeue_task_dl()
602 * update_curr_dl()
603 * start_dl_timer()
604 * __dequeue_task_dl()
605 * prev->on_rq = 0;
607 * We can be both throttled and !queued. Replenish the counter
608 * but do not enqueue -- wait for our wakeup to do that.
610 if (!task_on_rq_queued(p)) {
611 replenish_dl_entity(dl_se, dl_se);
612 goto unlock;
615 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
616 if (dl_task(rq->curr))
617 check_preempt_curr_dl(rq, p, 0);
618 else
619 resched_curr(rq);
620 #ifdef CONFIG_SMP
622 * Queueing this task back might have overloaded rq,
623 * check if we need to kick someone away.
625 if (has_pushable_dl_tasks(rq))
626 push_dl_task(rq);
627 #endif
628 unlock:
629 task_rq_unlock(rq, p, &flags);
631 return HRTIMER_NORESTART;
634 void init_dl_task_timer(struct sched_dl_entity *dl_se)
636 struct hrtimer *timer = &dl_se->dl_timer;
638 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
639 timer->function = dl_task_timer;
642 static
643 int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
645 return (dl_se->runtime <= 0);
648 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
651 * Update the current task's runtime statistics (provided it is still
652 * a -deadline task and has not been removed from the dl_rq).
654 static void update_curr_dl(struct rq *rq)
656 struct task_struct *curr = rq->curr;
657 struct sched_dl_entity *dl_se = &curr->dl;
658 u64 delta_exec;
660 if (!dl_task(curr) || !on_dl_rq(dl_se))
661 return;
664 * Consumed budget is computed considering the time as
665 * observed by schedulable tasks (excluding time spent
666 * in hardirq context, etc.). Deadlines are instead
667 * computed using hard walltime. This seems to be the more
668 * natural solution, but the full ramifications of this
669 * approach need further study.
671 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
672 if (unlikely((s64)delta_exec <= 0))
673 return;
675 schedstat_set(curr->se.statistics.exec_max,
676 max(curr->se.statistics.exec_max, delta_exec));
678 curr->se.sum_exec_runtime += delta_exec;
679 account_group_exec_runtime(curr, delta_exec);
681 curr->se.exec_start = rq_clock_task(rq);
682 cpuacct_charge(curr, delta_exec);
684 sched_rt_avg_update(rq, delta_exec);
686 dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
687 if (dl_runtime_exceeded(rq, dl_se)) {
688 dl_se->dl_throttled = 1;
689 __dequeue_task_dl(rq, curr, 0);
690 if (unlikely(!start_dl_timer(dl_se, curr->dl.dl_boosted)))
691 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
693 if (!is_leftmost(curr, &rq->dl))
694 resched_curr(rq);
698 * Because -- for now -- we share the rt bandwidth, we need to
699 * account our runtime there too, otherwise actual rt tasks
700 * would be able to exceed the shared quota.
702 * Account to the root rt group for now.
704 * The solution we're working towards is having the RT groups scheduled
705 * using deadline servers -- however there's a few nasties to figure
706 * out before that can happen.
708 if (rt_bandwidth_enabled()) {
709 struct rt_rq *rt_rq = &rq->rt;
711 raw_spin_lock(&rt_rq->rt_runtime_lock);
713 * We'll let actual RT tasks worry about the overflow here, we
714 * have our own CBS to keep us inline; only account when RT
715 * bandwidth is relevant.
717 if (sched_rt_bandwidth_account(rt_rq))
718 rt_rq->rt_time += delta_exec;
719 raw_spin_unlock(&rt_rq->rt_runtime_lock);
723 #ifdef CONFIG_SMP
725 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
727 static inline u64 next_deadline(struct rq *rq)
729 struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
731 if (next && dl_prio(next->prio))
732 return next->dl.deadline;
733 else
734 return 0;
737 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
739 struct rq *rq = rq_of_dl_rq(dl_rq);
741 if (dl_rq->earliest_dl.curr == 0 ||
742 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
744 * If the dl_rq had no -deadline tasks, or if the new task
745 * has shorter deadline than the current one on dl_rq, we
746 * know that the previous earliest becomes our next earliest,
747 * as the new task becomes the earliest itself.
749 dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
750 dl_rq->earliest_dl.curr = deadline;
751 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
752 } else if (dl_rq->earliest_dl.next == 0 ||
753 dl_time_before(deadline, dl_rq->earliest_dl.next)) {
755 * On the other hand, if the new -deadline task has a
756 * a later deadline than the earliest one on dl_rq, but
757 * it is earlier than the next (if any), we must
758 * recompute the next-earliest.
760 dl_rq->earliest_dl.next = next_deadline(rq);
764 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
766 struct rq *rq = rq_of_dl_rq(dl_rq);
769 * Since we may have removed our earliest (and/or next earliest)
770 * task we must recompute them.
772 if (!dl_rq->dl_nr_running) {
773 dl_rq->earliest_dl.curr = 0;
774 dl_rq->earliest_dl.next = 0;
775 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
776 } else {
777 struct rb_node *leftmost = dl_rq->rb_leftmost;
778 struct sched_dl_entity *entry;
780 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
781 dl_rq->earliest_dl.curr = entry->deadline;
782 dl_rq->earliest_dl.next = next_deadline(rq);
783 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
787 #else
789 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
790 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
792 #endif /* CONFIG_SMP */
794 static inline
795 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
797 int prio = dl_task_of(dl_se)->prio;
798 u64 deadline = dl_se->deadline;
800 WARN_ON(!dl_prio(prio));
801 dl_rq->dl_nr_running++;
802 add_nr_running(rq_of_dl_rq(dl_rq), 1);
804 inc_dl_deadline(dl_rq, deadline);
805 inc_dl_migration(dl_se, dl_rq);
808 static inline
809 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
811 int prio = dl_task_of(dl_se)->prio;
813 WARN_ON(!dl_prio(prio));
814 WARN_ON(!dl_rq->dl_nr_running);
815 dl_rq->dl_nr_running--;
816 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
818 dec_dl_deadline(dl_rq, dl_se->deadline);
819 dec_dl_migration(dl_se, dl_rq);
822 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
824 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
825 struct rb_node **link = &dl_rq->rb_root.rb_node;
826 struct rb_node *parent = NULL;
827 struct sched_dl_entity *entry;
828 int leftmost = 1;
830 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
832 while (*link) {
833 parent = *link;
834 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
835 if (dl_time_before(dl_se->deadline, entry->deadline))
836 link = &parent->rb_left;
837 else {
838 link = &parent->rb_right;
839 leftmost = 0;
843 if (leftmost)
844 dl_rq->rb_leftmost = &dl_se->rb_node;
846 rb_link_node(&dl_se->rb_node, parent, link);
847 rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
849 inc_dl_tasks(dl_se, dl_rq);
852 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
854 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
856 if (RB_EMPTY_NODE(&dl_se->rb_node))
857 return;
859 if (dl_rq->rb_leftmost == &dl_se->rb_node) {
860 struct rb_node *next_node;
862 next_node = rb_next(&dl_se->rb_node);
863 dl_rq->rb_leftmost = next_node;
866 rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
867 RB_CLEAR_NODE(&dl_se->rb_node);
869 dec_dl_tasks(dl_se, dl_rq);
872 static void
873 enqueue_dl_entity(struct sched_dl_entity *dl_se,
874 struct sched_dl_entity *pi_se, int flags)
876 BUG_ON(on_dl_rq(dl_se));
879 * If this is a wakeup or a new instance, the scheduling
880 * parameters of the task might need updating. Otherwise,
881 * we want a replenishment of its runtime.
883 if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
884 update_dl_entity(dl_se, pi_se);
885 else if (flags & ENQUEUE_REPLENISH)
886 replenish_dl_entity(dl_se, pi_se);
888 __enqueue_dl_entity(dl_se);
891 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
893 __dequeue_dl_entity(dl_se);
896 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
898 struct task_struct *pi_task = rt_mutex_get_top_task(p);
899 struct sched_dl_entity *pi_se = &p->dl;
902 * Use the scheduling parameters of the top pi-waiter
903 * task if we have one and its (relative) deadline is
904 * smaller than our one... OTW we keep our runtime and
905 * deadline.
907 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
908 pi_se = &pi_task->dl;
909 } else if (!dl_prio(p->normal_prio)) {
911 * Special case in which we have a !SCHED_DEADLINE task
912 * that is going to be deboosted, but exceedes its
913 * runtime while doing so. No point in replenishing
914 * it, as it's going to return back to its original
915 * scheduling class after this.
917 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
918 return;
922 * If p is throttled, we do nothing. In fact, if it exhausted
923 * its budget it needs a replenishment and, since it now is on
924 * its rq, the bandwidth timer callback (which clearly has not
925 * run yet) will take care of this.
927 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
928 return;
930 enqueue_dl_entity(&p->dl, pi_se, flags);
932 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
933 enqueue_pushable_dl_task(rq, p);
936 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
938 dequeue_dl_entity(&p->dl);
939 dequeue_pushable_dl_task(rq, p);
942 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
944 update_curr_dl(rq);
945 __dequeue_task_dl(rq, p, flags);
949 * Yield task semantic for -deadline tasks is:
951 * get off from the CPU until our next instance, with
952 * a new runtime. This is of little use now, since we
953 * don't have a bandwidth reclaiming mechanism. Anyway,
954 * bandwidth reclaiming is planned for the future, and
955 * yield_task_dl will indicate that some spare budget
956 * is available for other task instances to use it.
958 static void yield_task_dl(struct rq *rq)
960 struct task_struct *p = rq->curr;
963 * We make the task go to sleep until its current deadline by
964 * forcing its runtime to zero. This way, update_curr_dl() stops
965 * it and the bandwidth timer will wake it up and will give it
966 * new scheduling parameters (thanks to dl_yielded=1).
968 if (p->dl.runtime > 0) {
969 rq->curr->dl.dl_yielded = 1;
970 p->dl.runtime = 0;
972 update_rq_clock(rq);
973 update_curr_dl(rq);
975 * Tell update_rq_clock() that we've just updated,
976 * so we don't do microscopic update in schedule()
977 * and double the fastpath cost.
979 rq_clock_skip_update(rq, true);
982 #ifdef CONFIG_SMP
984 static int find_later_rq(struct task_struct *task);
986 static int
987 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
989 struct task_struct *curr;
990 struct rq *rq;
992 if (sd_flag != SD_BALANCE_WAKE)
993 goto out;
995 rq = cpu_rq(cpu);
997 rcu_read_lock();
998 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1001 * If we are dealing with a -deadline task, we must
1002 * decide where to wake it up.
1003 * If it has a later deadline and the current task
1004 * on this rq can't move (provided the waking task
1005 * can!) we prefer to send it somewhere else. On the
1006 * other hand, if it has a shorter deadline, we
1007 * try to make it stay here, it might be important.
1009 if (unlikely(dl_task(curr)) &&
1010 (curr->nr_cpus_allowed < 2 ||
1011 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1012 (p->nr_cpus_allowed > 1)) {
1013 int target = find_later_rq(p);
1015 if (target != -1)
1016 cpu = target;
1018 rcu_read_unlock();
1020 out:
1021 return cpu;
1024 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1027 * Current can't be migrated, useless to reschedule,
1028 * let's hope p can move out.
1030 if (rq->curr->nr_cpus_allowed == 1 ||
1031 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
1032 return;
1035 * p is migratable, so let's not schedule it and
1036 * see if it is pushed or pulled somewhere else.
1038 if (p->nr_cpus_allowed != 1 &&
1039 cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
1040 return;
1042 resched_curr(rq);
1045 static int pull_dl_task(struct rq *this_rq);
1047 #endif /* CONFIG_SMP */
1050 * Only called when both the current and waking task are -deadline
1051 * tasks.
1053 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1054 int flags)
1056 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1057 resched_curr(rq);
1058 return;
1061 #ifdef CONFIG_SMP
1063 * In the unlikely case current and p have the same deadline
1064 * let us try to decide what's the best thing to do...
1066 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1067 !test_tsk_need_resched(rq->curr))
1068 check_preempt_equal_dl(rq, p);
1069 #endif /* CONFIG_SMP */
1072 #ifdef CONFIG_SCHED_HRTICK
1073 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1075 hrtick_start(rq, p->dl.runtime);
1077 #else /* !CONFIG_SCHED_HRTICK */
1078 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1081 #endif
1083 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1084 struct dl_rq *dl_rq)
1086 struct rb_node *left = dl_rq->rb_leftmost;
1088 if (!left)
1089 return NULL;
1091 return rb_entry(left, struct sched_dl_entity, rb_node);
1094 struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
1096 struct sched_dl_entity *dl_se;
1097 struct task_struct *p;
1098 struct dl_rq *dl_rq;
1100 dl_rq = &rq->dl;
1102 if (need_pull_dl_task(rq, prev)) {
1103 pull_dl_task(rq);
1105 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1106 * means a stop task can slip in, in which case we need to
1107 * re-start task selection.
1109 if (rq->stop && task_on_rq_queued(rq->stop))
1110 return RETRY_TASK;
1114 * When prev is DL, we may throttle it in put_prev_task().
1115 * So, we update time before we check for dl_nr_running.
1117 if (prev->sched_class == &dl_sched_class)
1118 update_curr_dl(rq);
1120 if (unlikely(!dl_rq->dl_nr_running))
1121 return NULL;
1123 put_prev_task(rq, prev);
1125 dl_se = pick_next_dl_entity(rq, dl_rq);
1126 BUG_ON(!dl_se);
1128 p = dl_task_of(dl_se);
1129 p->se.exec_start = rq_clock_task(rq);
1131 /* Running task will never be pushed. */
1132 dequeue_pushable_dl_task(rq, p);
1134 if (hrtick_enabled(rq))
1135 start_hrtick_dl(rq, p);
1137 set_post_schedule(rq);
1139 return p;
1142 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1144 update_curr_dl(rq);
1146 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1147 enqueue_pushable_dl_task(rq, p);
1150 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1152 update_curr_dl(rq);
1155 * Even when we have runtime, update_curr_dl() might have resulted in us
1156 * not being the leftmost task anymore. In that case NEED_RESCHED will
1157 * be set and schedule() will start a new hrtick for the next task.
1159 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1160 is_leftmost(p, &rq->dl))
1161 start_hrtick_dl(rq, p);
1164 static void task_fork_dl(struct task_struct *p)
1167 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1168 * sched_fork()
1172 static void task_dead_dl(struct task_struct *p)
1174 struct hrtimer *timer = &p->dl.dl_timer;
1175 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1178 * Since we are TASK_DEAD we won't slip out of the domain!
1180 raw_spin_lock_irq(&dl_b->lock);
1181 /* XXX we should retain the bw until 0-lag */
1182 dl_b->total_bw -= p->dl.dl_bw;
1183 raw_spin_unlock_irq(&dl_b->lock);
1185 hrtimer_cancel(timer);
1188 static void set_curr_task_dl(struct rq *rq)
1190 struct task_struct *p = rq->curr;
1192 p->se.exec_start = rq_clock_task(rq);
1194 /* You can't push away the running task */
1195 dequeue_pushable_dl_task(rq, p);
1198 #ifdef CONFIG_SMP
1200 /* Only try algorithms three times */
1201 #define DL_MAX_TRIES 3
1203 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1205 if (!task_running(rq, p) &&
1206 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1207 return 1;
1208 return 0;
1211 /* Returns the second earliest -deadline task, NULL otherwise */
1212 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
1214 struct rb_node *next_node = rq->dl.rb_leftmost;
1215 struct sched_dl_entity *dl_se;
1216 struct task_struct *p = NULL;
1218 next_node:
1219 next_node = rb_next(next_node);
1220 if (next_node) {
1221 dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
1222 p = dl_task_of(dl_se);
1224 if (pick_dl_task(rq, p, cpu))
1225 return p;
1227 goto next_node;
1230 return NULL;
1233 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1235 static int find_later_rq(struct task_struct *task)
1237 struct sched_domain *sd;
1238 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1239 int this_cpu = smp_processor_id();
1240 int best_cpu, cpu = task_cpu(task);
1242 /* Make sure the mask is initialized first */
1243 if (unlikely(!later_mask))
1244 return -1;
1246 if (task->nr_cpus_allowed == 1)
1247 return -1;
1250 * We have to consider system topology and task affinity
1251 * first, then we can look for a suitable cpu.
1253 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1254 task, later_mask);
1255 if (best_cpu == -1)
1256 return -1;
1259 * If we are here, some target has been found,
1260 * the most suitable of which is cached in best_cpu.
1261 * This is, among the runqueues where the current tasks
1262 * have later deadlines than the task's one, the rq
1263 * with the latest possible one.
1265 * Now we check how well this matches with task's
1266 * affinity and system topology.
1268 * The last cpu where the task run is our first
1269 * guess, since it is most likely cache-hot there.
1271 if (cpumask_test_cpu(cpu, later_mask))
1272 return cpu;
1274 * Check if this_cpu is to be skipped (i.e., it is
1275 * not in the mask) or not.
1277 if (!cpumask_test_cpu(this_cpu, later_mask))
1278 this_cpu = -1;
1280 rcu_read_lock();
1281 for_each_domain(cpu, sd) {
1282 if (sd->flags & SD_WAKE_AFFINE) {
1285 * If possible, preempting this_cpu is
1286 * cheaper than migrating.
1288 if (this_cpu != -1 &&
1289 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1290 rcu_read_unlock();
1291 return this_cpu;
1295 * Last chance: if best_cpu is valid and is
1296 * in the mask, that becomes our choice.
1298 if (best_cpu < nr_cpu_ids &&
1299 cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1300 rcu_read_unlock();
1301 return best_cpu;
1305 rcu_read_unlock();
1308 * At this point, all our guesses failed, we just return
1309 * 'something', and let the caller sort the things out.
1311 if (this_cpu != -1)
1312 return this_cpu;
1314 cpu = cpumask_any(later_mask);
1315 if (cpu < nr_cpu_ids)
1316 return cpu;
1318 return -1;
1321 /* Locks the rq it finds */
1322 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1324 struct rq *later_rq = NULL;
1325 int tries;
1326 int cpu;
1328 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1329 cpu = find_later_rq(task);
1331 if ((cpu == -1) || (cpu == rq->cpu))
1332 break;
1334 later_rq = cpu_rq(cpu);
1336 /* Retry if something changed. */
1337 if (double_lock_balance(rq, later_rq)) {
1338 if (unlikely(task_rq(task) != rq ||
1339 !cpumask_test_cpu(later_rq->cpu,
1340 &task->cpus_allowed) ||
1341 task_running(rq, task) ||
1342 !task_on_rq_queued(task))) {
1343 double_unlock_balance(rq, later_rq);
1344 later_rq = NULL;
1345 break;
1350 * If the rq we found has no -deadline task, or
1351 * its earliest one has a later deadline than our
1352 * task, the rq is a good one.
1354 if (!later_rq->dl.dl_nr_running ||
1355 dl_time_before(task->dl.deadline,
1356 later_rq->dl.earliest_dl.curr))
1357 break;
1359 /* Otherwise we try again. */
1360 double_unlock_balance(rq, later_rq);
1361 later_rq = NULL;
1364 return later_rq;
1367 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1369 struct task_struct *p;
1371 if (!has_pushable_dl_tasks(rq))
1372 return NULL;
1374 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1375 struct task_struct, pushable_dl_tasks);
1377 BUG_ON(rq->cpu != task_cpu(p));
1378 BUG_ON(task_current(rq, p));
1379 BUG_ON(p->nr_cpus_allowed <= 1);
1381 BUG_ON(!task_on_rq_queued(p));
1382 BUG_ON(!dl_task(p));
1384 return p;
1388 * See if the non running -deadline tasks on this rq
1389 * can be sent to some other CPU where they can preempt
1390 * and start executing.
1392 static int push_dl_task(struct rq *rq)
1394 struct task_struct *next_task;
1395 struct rq *later_rq;
1396 int ret = 0;
1398 if (!rq->dl.overloaded)
1399 return 0;
1401 next_task = pick_next_pushable_dl_task(rq);
1402 if (!next_task)
1403 return 0;
1405 retry:
1406 if (unlikely(next_task == rq->curr)) {
1407 WARN_ON(1);
1408 return 0;
1412 * If next_task preempts rq->curr, and rq->curr
1413 * can move away, it makes sense to just reschedule
1414 * without going further in pushing next_task.
1416 if (dl_task(rq->curr) &&
1417 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1418 rq->curr->nr_cpus_allowed > 1) {
1419 resched_curr(rq);
1420 return 0;
1423 /* We might release rq lock */
1424 get_task_struct(next_task);
1426 /* Will lock the rq it'll find */
1427 later_rq = find_lock_later_rq(next_task, rq);
1428 if (!later_rq) {
1429 struct task_struct *task;
1432 * We must check all this again, since
1433 * find_lock_later_rq releases rq->lock and it is
1434 * then possible that next_task has migrated.
1436 task = pick_next_pushable_dl_task(rq);
1437 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1439 * The task is still there. We don't try
1440 * again, some other cpu will pull it when ready.
1442 goto out;
1445 if (!task)
1446 /* No more tasks */
1447 goto out;
1449 put_task_struct(next_task);
1450 next_task = task;
1451 goto retry;
1454 deactivate_task(rq, next_task, 0);
1455 set_task_cpu(next_task, later_rq->cpu);
1456 activate_task(later_rq, next_task, 0);
1457 ret = 1;
1459 resched_curr(later_rq);
1461 double_unlock_balance(rq, later_rq);
1463 out:
1464 put_task_struct(next_task);
1466 return ret;
1469 static void push_dl_tasks(struct rq *rq)
1471 /* Terminates as it moves a -deadline task */
1472 while (push_dl_task(rq))
1476 static int pull_dl_task(struct rq *this_rq)
1478 int this_cpu = this_rq->cpu, ret = 0, cpu;
1479 struct task_struct *p;
1480 struct rq *src_rq;
1481 u64 dmin = LONG_MAX;
1483 if (likely(!dl_overloaded(this_rq)))
1484 return 0;
1487 * Match the barrier from dl_set_overloaded; this guarantees that if we
1488 * see overloaded we must also see the dlo_mask bit.
1490 smp_rmb();
1492 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1493 if (this_cpu == cpu)
1494 continue;
1496 src_rq = cpu_rq(cpu);
1499 * It looks racy, abd it is! However, as in sched_rt.c,
1500 * we are fine with this.
1502 if (this_rq->dl.dl_nr_running &&
1503 dl_time_before(this_rq->dl.earliest_dl.curr,
1504 src_rq->dl.earliest_dl.next))
1505 continue;
1507 /* Might drop this_rq->lock */
1508 double_lock_balance(this_rq, src_rq);
1511 * If there are no more pullable tasks on the
1512 * rq, we're done with it.
1514 if (src_rq->dl.dl_nr_running <= 1)
1515 goto skip;
1517 p = pick_next_earliest_dl_task(src_rq, this_cpu);
1520 * We found a task to be pulled if:
1521 * - it preempts our current (if there's one),
1522 * - it will preempt the last one we pulled (if any).
1524 if (p && dl_time_before(p->dl.deadline, dmin) &&
1525 (!this_rq->dl.dl_nr_running ||
1526 dl_time_before(p->dl.deadline,
1527 this_rq->dl.earliest_dl.curr))) {
1528 WARN_ON(p == src_rq->curr);
1529 WARN_ON(!task_on_rq_queued(p));
1532 * Then we pull iff p has actually an earlier
1533 * deadline than the current task of its runqueue.
1535 if (dl_time_before(p->dl.deadline,
1536 src_rq->curr->dl.deadline))
1537 goto skip;
1539 ret = 1;
1541 deactivate_task(src_rq, p, 0);
1542 set_task_cpu(p, this_cpu);
1543 activate_task(this_rq, p, 0);
1544 dmin = p->dl.deadline;
1546 /* Is there any other task even earlier? */
1548 skip:
1549 double_unlock_balance(this_rq, src_rq);
1552 return ret;
1555 static void post_schedule_dl(struct rq *rq)
1557 push_dl_tasks(rq);
1561 * Since the task is not running and a reschedule is not going to happen
1562 * anytime soon on its runqueue, we try pushing it away now.
1564 static void task_woken_dl(struct rq *rq, struct task_struct *p)
1566 if (!task_running(rq, p) &&
1567 !test_tsk_need_resched(rq->curr) &&
1568 has_pushable_dl_tasks(rq) &&
1569 p->nr_cpus_allowed > 1 &&
1570 dl_task(rq->curr) &&
1571 (rq->curr->nr_cpus_allowed < 2 ||
1572 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1573 push_dl_tasks(rq);
1577 static void set_cpus_allowed_dl(struct task_struct *p,
1578 const struct cpumask *new_mask)
1580 struct rq *rq;
1581 struct root_domain *src_rd;
1582 int weight;
1584 BUG_ON(!dl_task(p));
1586 rq = task_rq(p);
1587 src_rd = rq->rd;
1589 * Migrating a SCHED_DEADLINE task between exclusive
1590 * cpusets (different root_domains) entails a bandwidth
1591 * update. We already made space for us in the destination
1592 * domain (see cpuset_can_attach()).
1594 if (!cpumask_intersects(src_rd->span, new_mask)) {
1595 struct dl_bw *src_dl_b;
1597 src_dl_b = dl_bw_of(cpu_of(rq));
1599 * We now free resources of the root_domain we are migrating
1600 * off. In the worst case, sched_setattr() may temporary fail
1601 * until we complete the update.
1603 raw_spin_lock(&src_dl_b->lock);
1604 __dl_clear(src_dl_b, p->dl.dl_bw);
1605 raw_spin_unlock(&src_dl_b->lock);
1609 * Update only if the task is actually running (i.e.,
1610 * it is on the rq AND it is not throttled).
1612 if (!on_dl_rq(&p->dl))
1613 return;
1615 weight = cpumask_weight(new_mask);
1618 * Only update if the process changes its state from whether it
1619 * can migrate or not.
1621 if ((p->nr_cpus_allowed > 1) == (weight > 1))
1622 return;
1625 * The process used to be able to migrate OR it can now migrate
1627 if (weight <= 1) {
1628 if (!task_current(rq, p))
1629 dequeue_pushable_dl_task(rq, p);
1630 BUG_ON(!rq->dl.dl_nr_migratory);
1631 rq->dl.dl_nr_migratory--;
1632 } else {
1633 if (!task_current(rq, p))
1634 enqueue_pushable_dl_task(rq, p);
1635 rq->dl.dl_nr_migratory++;
1638 update_dl_migration(&rq->dl);
1641 /* Assumes rq->lock is held */
1642 static void rq_online_dl(struct rq *rq)
1644 if (rq->dl.overloaded)
1645 dl_set_overload(rq);
1647 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
1648 if (rq->dl.dl_nr_running > 0)
1649 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
1652 /* Assumes rq->lock is held */
1653 static void rq_offline_dl(struct rq *rq)
1655 if (rq->dl.overloaded)
1656 dl_clear_overload(rq);
1658 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1659 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1662 void init_sched_dl_class(void)
1664 unsigned int i;
1666 for_each_possible_cpu(i)
1667 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1668 GFP_KERNEL, cpu_to_node(i));
1671 #endif /* CONFIG_SMP */
1674 * Ensure p's dl_timer is cancelled. May drop rq->lock for a while.
1676 static void cancel_dl_timer(struct rq *rq, struct task_struct *p)
1678 struct hrtimer *dl_timer = &p->dl.dl_timer;
1680 /* Nobody will change task's class if pi_lock is held */
1681 lockdep_assert_held(&p->pi_lock);
1683 if (hrtimer_active(dl_timer)) {
1684 int ret = hrtimer_try_to_cancel(dl_timer);
1686 if (unlikely(ret == -1)) {
1688 * Note, p may migrate OR new deadline tasks
1689 * may appear in rq when we are unlocking it.
1690 * A caller of us must be fine with that.
1692 raw_spin_unlock(&rq->lock);
1693 hrtimer_cancel(dl_timer);
1694 raw_spin_lock(&rq->lock);
1699 static void switched_from_dl(struct rq *rq, struct task_struct *p)
1701 /* XXX we should retain the bw until 0-lag */
1702 cancel_dl_timer(rq, p);
1703 __dl_clear_params(p);
1706 * Since this might be the only -deadline task on the rq,
1707 * this is the right place to try to pull some other one
1708 * from an overloaded cpu, if any.
1710 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
1711 return;
1713 if (pull_dl_task(rq))
1714 resched_curr(rq);
1718 * When switching to -deadline, we may overload the rq, then
1719 * we try to push someone off, if possible.
1721 static void switched_to_dl(struct rq *rq, struct task_struct *p)
1723 int check_resched = 1;
1725 if (task_on_rq_queued(p) && rq->curr != p) {
1726 #ifdef CONFIG_SMP
1727 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded &&
1728 push_dl_task(rq) && rq != task_rq(p))
1729 /* Only reschedule if pushing failed */
1730 check_resched = 0;
1731 #endif /* CONFIG_SMP */
1732 if (check_resched) {
1733 if (dl_task(rq->curr))
1734 check_preempt_curr_dl(rq, p, 0);
1735 else
1736 resched_curr(rq);
1742 * If the scheduling parameters of a -deadline task changed,
1743 * a push or pull operation might be needed.
1745 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1746 int oldprio)
1748 if (task_on_rq_queued(p) || rq->curr == p) {
1749 #ifdef CONFIG_SMP
1751 * This might be too much, but unfortunately
1752 * we don't have the old deadline value, and
1753 * we can't argue if the task is increasing
1754 * or lowering its prio, so...
1756 if (!rq->dl.overloaded)
1757 pull_dl_task(rq);
1760 * If we now have a earlier deadline task than p,
1761 * then reschedule, provided p is still on this
1762 * runqueue.
1764 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) &&
1765 rq->curr == p)
1766 resched_curr(rq);
1767 #else
1769 * Again, we don't know if p has a earlier
1770 * or later deadline, so let's blindly set a
1771 * (maybe not needed) rescheduling point.
1773 resched_curr(rq);
1774 #endif /* CONFIG_SMP */
1775 } else
1776 switched_to_dl(rq, p);
1779 const struct sched_class dl_sched_class = {
1780 .next = &rt_sched_class,
1781 .enqueue_task = enqueue_task_dl,
1782 .dequeue_task = dequeue_task_dl,
1783 .yield_task = yield_task_dl,
1785 .check_preempt_curr = check_preempt_curr_dl,
1787 .pick_next_task = pick_next_task_dl,
1788 .put_prev_task = put_prev_task_dl,
1790 #ifdef CONFIG_SMP
1791 .select_task_rq = select_task_rq_dl,
1792 .set_cpus_allowed = set_cpus_allowed_dl,
1793 .rq_online = rq_online_dl,
1794 .rq_offline = rq_offline_dl,
1795 .post_schedule = post_schedule_dl,
1796 .task_woken = task_woken_dl,
1797 #endif
1799 .set_curr_task = set_curr_task_dl,
1800 .task_tick = task_tick_dl,
1801 .task_fork = task_fork_dl,
1802 .task_dead = task_dead_dl,
1804 .prio_changed = prio_changed_dl,
1805 .switched_from = switched_from_dl,
1806 .switched_to = switched_to_dl,
1808 .update_curr = update_curr_dl,
1811 #ifdef CONFIG_SCHED_DEBUG
1812 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
1814 void print_dl_stats(struct seq_file *m, int cpu)
1816 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
1818 #endif /* CONFIG_SCHED_DEBUG */