[media] coda: export auxiliary buffers via debugfs
[linux-2.6/btrfs-unstable.git] / kernel / sched / deadline.c
blobfc4f98b1258f66cbbf3cf1fc1082cb909c0f1144
1 /*
2 * Deadline Scheduling Class (SCHED_DEADLINE)
4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 * Tasks that periodically executes their instances for less than their
7 * runtime won't miss any of their deadlines.
8 * Tasks that are not periodic or sporadic or that tries to execute more
9 * than their reserved bandwidth will be slowed down (and may potentially
10 * miss some of their deadlines), and won't affect any other task.
12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
13 * Juri Lelli <juri.lelli@gmail.com>,
14 * Michael Trimarchi <michael@amarulasolutions.com>,
15 * Fabio Checconi <fchecconi@gmail.com>
17 #include "sched.h"
19 #include <linux/slab.h>
21 struct dl_bandwidth def_dl_bandwidth;
23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
25 return container_of(dl_se, struct task_struct, dl);
28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
30 return container_of(dl_rq, struct rq, dl);
33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
38 return &rq->dl;
41 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
46 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
48 struct sched_dl_entity *dl_se = &p->dl;
50 return dl_rq->rb_leftmost == &dl_se->rb_node;
53 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
55 raw_spin_lock_init(&dl_b->dl_runtime_lock);
56 dl_b->dl_period = period;
57 dl_b->dl_runtime = runtime;
60 void init_dl_bw(struct dl_bw *dl_b)
62 raw_spin_lock_init(&dl_b->lock);
63 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
64 if (global_rt_runtime() == RUNTIME_INF)
65 dl_b->bw = -1;
66 else
67 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
68 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
69 dl_b->total_bw = 0;
72 void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq)
74 dl_rq->rb_root = RB_ROOT;
76 #ifdef CONFIG_SMP
77 /* zero means no -deadline tasks */
78 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
80 dl_rq->dl_nr_migratory = 0;
81 dl_rq->overloaded = 0;
82 dl_rq->pushable_dl_tasks_root = RB_ROOT;
83 #else
84 init_dl_bw(&dl_rq->dl_bw);
85 #endif
88 #ifdef CONFIG_SMP
90 static inline int dl_overloaded(struct rq *rq)
92 return atomic_read(&rq->rd->dlo_count);
95 static inline void dl_set_overload(struct rq *rq)
97 if (!rq->online)
98 return;
100 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
102 * Must be visible before the overload count is
103 * set (as in sched_rt.c).
105 * Matched by the barrier in pull_dl_task().
107 smp_wmb();
108 atomic_inc(&rq->rd->dlo_count);
111 static inline void dl_clear_overload(struct rq *rq)
113 if (!rq->online)
114 return;
116 atomic_dec(&rq->rd->dlo_count);
117 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
120 static void update_dl_migration(struct dl_rq *dl_rq)
122 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
123 if (!dl_rq->overloaded) {
124 dl_set_overload(rq_of_dl_rq(dl_rq));
125 dl_rq->overloaded = 1;
127 } else if (dl_rq->overloaded) {
128 dl_clear_overload(rq_of_dl_rq(dl_rq));
129 dl_rq->overloaded = 0;
133 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
135 struct task_struct *p = dl_task_of(dl_se);
137 if (p->nr_cpus_allowed > 1)
138 dl_rq->dl_nr_migratory++;
140 update_dl_migration(dl_rq);
143 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
145 struct task_struct *p = dl_task_of(dl_se);
147 if (p->nr_cpus_allowed > 1)
148 dl_rq->dl_nr_migratory--;
150 update_dl_migration(dl_rq);
154 * The list of pushable -deadline task is not a plist, like in
155 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
157 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
159 struct dl_rq *dl_rq = &rq->dl;
160 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
161 struct rb_node *parent = NULL;
162 struct task_struct *entry;
163 int leftmost = 1;
165 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
167 while (*link) {
168 parent = *link;
169 entry = rb_entry(parent, struct task_struct,
170 pushable_dl_tasks);
171 if (dl_entity_preempt(&p->dl, &entry->dl))
172 link = &parent->rb_left;
173 else {
174 link = &parent->rb_right;
175 leftmost = 0;
179 if (leftmost)
180 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
182 rb_link_node(&p->pushable_dl_tasks, parent, link);
183 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
186 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
188 struct dl_rq *dl_rq = &rq->dl;
190 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
191 return;
193 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
194 struct rb_node *next_node;
196 next_node = rb_next(&p->pushable_dl_tasks);
197 dl_rq->pushable_dl_tasks_leftmost = next_node;
200 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
201 RB_CLEAR_NODE(&p->pushable_dl_tasks);
204 static inline int has_pushable_dl_tasks(struct rq *rq)
206 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
209 static int push_dl_task(struct rq *rq);
211 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
213 return dl_task(prev);
216 static inline void set_post_schedule(struct rq *rq)
218 rq->post_schedule = has_pushable_dl_tasks(rq);
221 #else
223 static inline
224 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
228 static inline
229 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
233 static inline
234 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
238 static inline
239 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
243 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
245 return false;
248 static inline int pull_dl_task(struct rq *rq)
250 return 0;
253 static inline void set_post_schedule(struct rq *rq)
256 #endif /* CONFIG_SMP */
258 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
259 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
260 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
261 int flags);
264 * We are being explicitly informed that a new instance is starting,
265 * and this means that:
266 * - the absolute deadline of the entity has to be placed at
267 * current time + relative deadline;
268 * - the runtime of the entity has to be set to the maximum value.
270 * The capability of specifying such event is useful whenever a -deadline
271 * entity wants to (try to!) synchronize its behaviour with the scheduler's
272 * one, and to (try to!) reconcile itself with its own scheduling
273 * parameters.
275 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
276 struct sched_dl_entity *pi_se)
278 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
279 struct rq *rq = rq_of_dl_rq(dl_rq);
281 WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
284 * We use the regular wall clock time to set deadlines in the
285 * future; in fact, we must consider execution overheads (time
286 * spent on hardirq context, etc.).
288 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
289 dl_se->runtime = pi_se->dl_runtime;
290 dl_se->dl_new = 0;
294 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
295 * possibility of a entity lasting more than what it declared, and thus
296 * exhausting its runtime.
298 * Here we are interested in making runtime overrun possible, but we do
299 * not want a entity which is misbehaving to affect the scheduling of all
300 * other entities.
301 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
302 * is used, in order to confine each entity within its own bandwidth.
304 * This function deals exactly with that, and ensures that when the runtime
305 * of a entity is replenished, its deadline is also postponed. That ensures
306 * the overrunning entity can't interfere with other entity in the system and
307 * can't make them miss their deadlines. Reasons why this kind of overruns
308 * could happen are, typically, a entity voluntarily trying to overcome its
309 * runtime, or it just underestimated it during sched_setscheduler_ex().
311 static void replenish_dl_entity(struct sched_dl_entity *dl_se,
312 struct sched_dl_entity *pi_se)
314 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
315 struct rq *rq = rq_of_dl_rq(dl_rq);
317 BUG_ON(pi_se->dl_runtime <= 0);
320 * This could be the case for a !-dl task that is boosted.
321 * Just go with full inherited parameters.
323 if (dl_se->dl_deadline == 0) {
324 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
325 dl_se->runtime = pi_se->dl_runtime;
329 * We keep moving the deadline away until we get some
330 * available runtime for the entity. This ensures correct
331 * handling of situations where the runtime overrun is
332 * arbitrary large.
334 while (dl_se->runtime <= 0) {
335 dl_se->deadline += pi_se->dl_period;
336 dl_se->runtime += pi_se->dl_runtime;
340 * At this point, the deadline really should be "in
341 * the future" with respect to rq->clock. If it's
342 * not, we are, for some reason, lagging too much!
343 * Anyway, after having warn userspace abut that,
344 * we still try to keep the things running by
345 * resetting the deadline and the budget of the
346 * entity.
348 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
349 printk_deferred_once("sched: DL replenish lagged to much\n");
350 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
351 dl_se->runtime = pi_se->dl_runtime;
356 * Here we check if --at time t-- an entity (which is probably being
357 * [re]activated or, in general, enqueued) can use its remaining runtime
358 * and its current deadline _without_ exceeding the bandwidth it is
359 * assigned (function returns true if it can't). We are in fact applying
360 * one of the CBS rules: when a task wakes up, if the residual runtime
361 * over residual deadline fits within the allocated bandwidth, then we
362 * can keep the current (absolute) deadline and residual budget without
363 * disrupting the schedulability of the system. Otherwise, we should
364 * refill the runtime and set the deadline a period in the future,
365 * because keeping the current (absolute) deadline of the task would
366 * result in breaking guarantees promised to other tasks (refer to
367 * Documentation/scheduler/sched-deadline.txt for more informations).
369 * This function returns true if:
371 * runtime / (deadline - t) > dl_runtime / dl_period ,
373 * IOW we can't recycle current parameters.
375 * Notice that the bandwidth check is done against the period. For
376 * task with deadline equal to period this is the same of using
377 * dl_deadline instead of dl_period in the equation above.
379 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
380 struct sched_dl_entity *pi_se, u64 t)
382 u64 left, right;
385 * left and right are the two sides of the equation above,
386 * after a bit of shuffling to use multiplications instead
387 * of divisions.
389 * Note that none of the time values involved in the two
390 * multiplications are absolute: dl_deadline and dl_runtime
391 * are the relative deadline and the maximum runtime of each
392 * instance, runtime is the runtime left for the last instance
393 * and (deadline - t), since t is rq->clock, is the time left
394 * to the (absolute) deadline. Even if overflowing the u64 type
395 * is very unlikely to occur in both cases, here we scale down
396 * as we want to avoid that risk at all. Scaling down by 10
397 * means that we reduce granularity to 1us. We are fine with it,
398 * since this is only a true/false check and, anyway, thinking
399 * of anything below microseconds resolution is actually fiction
400 * (but still we want to give the user that illusion >;).
402 left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
403 right = ((dl_se->deadline - t) >> DL_SCALE) *
404 (pi_se->dl_runtime >> DL_SCALE);
406 return dl_time_before(right, left);
410 * When a -deadline entity is queued back on the runqueue, its runtime and
411 * deadline might need updating.
413 * The policy here is that we update the deadline of the entity only if:
414 * - the current deadline is in the past,
415 * - using the remaining runtime with the current deadline would make
416 * the entity exceed its bandwidth.
418 static void update_dl_entity(struct sched_dl_entity *dl_se,
419 struct sched_dl_entity *pi_se)
421 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
422 struct rq *rq = rq_of_dl_rq(dl_rq);
425 * The arrival of a new instance needs special treatment, i.e.,
426 * the actual scheduling parameters have to be "renewed".
428 if (dl_se->dl_new) {
429 setup_new_dl_entity(dl_se, pi_se);
430 return;
433 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
434 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
435 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
436 dl_se->runtime = pi_se->dl_runtime;
441 * If the entity depleted all its runtime, and if we want it to sleep
442 * while waiting for some new execution time to become available, we
443 * set the bandwidth enforcement timer to the replenishment instant
444 * and try to activate it.
446 * Notice that it is important for the caller to know if the timer
447 * actually started or not (i.e., the replenishment instant is in
448 * the future or in the past).
450 static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted)
452 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
453 struct rq *rq = rq_of_dl_rq(dl_rq);
454 ktime_t now, act;
455 ktime_t soft, hard;
456 unsigned long range;
457 s64 delta;
459 if (boosted)
460 return 0;
462 * We want the timer to fire at the deadline, but considering
463 * that it is actually coming from rq->clock and not from
464 * hrtimer's time base reading.
466 act = ns_to_ktime(dl_se->deadline);
467 now = hrtimer_cb_get_time(&dl_se->dl_timer);
468 delta = ktime_to_ns(now) - rq_clock(rq);
469 act = ktime_add_ns(act, delta);
472 * If the expiry time already passed, e.g., because the value
473 * chosen as the deadline is too small, don't even try to
474 * start the timer in the past!
476 if (ktime_us_delta(act, now) < 0)
477 return 0;
479 hrtimer_set_expires(&dl_se->dl_timer, act);
481 soft = hrtimer_get_softexpires(&dl_se->dl_timer);
482 hard = hrtimer_get_expires(&dl_se->dl_timer);
483 range = ktime_to_ns(ktime_sub(hard, soft));
484 __hrtimer_start_range_ns(&dl_se->dl_timer, soft,
485 range, HRTIMER_MODE_ABS, 0);
487 return hrtimer_active(&dl_se->dl_timer);
491 * This is the bandwidth enforcement timer callback. If here, we know
492 * a task is not on its dl_rq, since the fact that the timer was running
493 * means the task is throttled and needs a runtime replenishment.
495 * However, what we actually do depends on the fact the task is active,
496 * (it is on its rq) or has been removed from there by a call to
497 * dequeue_task_dl(). In the former case we must issue the runtime
498 * replenishment and add the task back to the dl_rq; in the latter, we just
499 * do nothing but clearing dl_throttled, so that runtime and deadline
500 * updating (and the queueing back to dl_rq) will be done by the
501 * next call to enqueue_task_dl().
503 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
505 struct sched_dl_entity *dl_se = container_of(timer,
506 struct sched_dl_entity,
507 dl_timer);
508 struct task_struct *p = dl_task_of(dl_se);
509 struct rq *rq;
510 again:
511 rq = task_rq(p);
512 raw_spin_lock(&rq->lock);
514 if (rq != task_rq(p)) {
515 /* Task was moved, retrying. */
516 raw_spin_unlock(&rq->lock);
517 goto again;
521 * We need to take care of a possible races here. In fact, the
522 * task might have changed its scheduling policy to something
523 * different from SCHED_DEADLINE or changed its reservation
524 * parameters (through sched_setattr()).
526 if (!dl_task(p) || dl_se->dl_new)
527 goto unlock;
529 sched_clock_tick();
530 update_rq_clock(rq);
531 dl_se->dl_throttled = 0;
532 dl_se->dl_yielded = 0;
533 if (p->on_rq) {
534 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
535 if (task_has_dl_policy(rq->curr))
536 check_preempt_curr_dl(rq, p, 0);
537 else
538 resched_task(rq->curr);
539 #ifdef CONFIG_SMP
541 * Queueing this task back might have overloaded rq,
542 * check if we need to kick someone away.
544 if (has_pushable_dl_tasks(rq))
545 push_dl_task(rq);
546 #endif
548 unlock:
549 raw_spin_unlock(&rq->lock);
551 return HRTIMER_NORESTART;
554 void init_dl_task_timer(struct sched_dl_entity *dl_se)
556 struct hrtimer *timer = &dl_se->dl_timer;
558 if (hrtimer_active(timer)) {
559 hrtimer_try_to_cancel(timer);
560 return;
563 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
564 timer->function = dl_task_timer;
567 static
568 int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
570 int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq));
571 int rorun = dl_se->runtime <= 0;
573 if (!rorun && !dmiss)
574 return 0;
577 * If we are beyond our current deadline and we are still
578 * executing, then we have already used some of the runtime of
579 * the next instance. Thus, if we do not account that, we are
580 * stealing bandwidth from the system at each deadline miss!
582 if (dmiss) {
583 dl_se->runtime = rorun ? dl_se->runtime : 0;
584 dl_se->runtime -= rq_clock(rq) - dl_se->deadline;
587 return 1;
590 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
593 * Update the current task's runtime statistics (provided it is still
594 * a -deadline task and has not been removed from the dl_rq).
596 static void update_curr_dl(struct rq *rq)
598 struct task_struct *curr = rq->curr;
599 struct sched_dl_entity *dl_se = &curr->dl;
600 u64 delta_exec;
602 if (!dl_task(curr) || !on_dl_rq(dl_se))
603 return;
606 * Consumed budget is computed considering the time as
607 * observed by schedulable tasks (excluding time spent
608 * in hardirq context, etc.). Deadlines are instead
609 * computed using hard walltime. This seems to be the more
610 * natural solution, but the full ramifications of this
611 * approach need further study.
613 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
614 if (unlikely((s64)delta_exec <= 0))
615 return;
617 schedstat_set(curr->se.statistics.exec_max,
618 max(curr->se.statistics.exec_max, delta_exec));
620 curr->se.sum_exec_runtime += delta_exec;
621 account_group_exec_runtime(curr, delta_exec);
623 curr->se.exec_start = rq_clock_task(rq);
624 cpuacct_charge(curr, delta_exec);
626 sched_rt_avg_update(rq, delta_exec);
628 dl_se->runtime -= delta_exec;
629 if (dl_runtime_exceeded(rq, dl_se)) {
630 __dequeue_task_dl(rq, curr, 0);
631 if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted)))
632 dl_se->dl_throttled = 1;
633 else
634 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
636 if (!is_leftmost(curr, &rq->dl))
637 resched_task(curr);
641 * Because -- for now -- we share the rt bandwidth, we need to
642 * account our runtime there too, otherwise actual rt tasks
643 * would be able to exceed the shared quota.
645 * Account to the root rt group for now.
647 * The solution we're working towards is having the RT groups scheduled
648 * using deadline servers -- however there's a few nasties to figure
649 * out before that can happen.
651 if (rt_bandwidth_enabled()) {
652 struct rt_rq *rt_rq = &rq->rt;
654 raw_spin_lock(&rt_rq->rt_runtime_lock);
656 * We'll let actual RT tasks worry about the overflow here, we
657 * have our own CBS to keep us inline; only account when RT
658 * bandwidth is relevant.
660 if (sched_rt_bandwidth_account(rt_rq))
661 rt_rq->rt_time += delta_exec;
662 raw_spin_unlock(&rt_rq->rt_runtime_lock);
666 #ifdef CONFIG_SMP
668 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
670 static inline u64 next_deadline(struct rq *rq)
672 struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
674 if (next && dl_prio(next->prio))
675 return next->dl.deadline;
676 else
677 return 0;
680 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
682 struct rq *rq = rq_of_dl_rq(dl_rq);
684 if (dl_rq->earliest_dl.curr == 0 ||
685 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
687 * If the dl_rq had no -deadline tasks, or if the new task
688 * has shorter deadline than the current one on dl_rq, we
689 * know that the previous earliest becomes our next earliest,
690 * as the new task becomes the earliest itself.
692 dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
693 dl_rq->earliest_dl.curr = deadline;
694 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
695 } else if (dl_rq->earliest_dl.next == 0 ||
696 dl_time_before(deadline, dl_rq->earliest_dl.next)) {
698 * On the other hand, if the new -deadline task has a
699 * a later deadline than the earliest one on dl_rq, but
700 * it is earlier than the next (if any), we must
701 * recompute the next-earliest.
703 dl_rq->earliest_dl.next = next_deadline(rq);
707 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
709 struct rq *rq = rq_of_dl_rq(dl_rq);
712 * Since we may have removed our earliest (and/or next earliest)
713 * task we must recompute them.
715 if (!dl_rq->dl_nr_running) {
716 dl_rq->earliest_dl.curr = 0;
717 dl_rq->earliest_dl.next = 0;
718 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
719 } else {
720 struct rb_node *leftmost = dl_rq->rb_leftmost;
721 struct sched_dl_entity *entry;
723 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
724 dl_rq->earliest_dl.curr = entry->deadline;
725 dl_rq->earliest_dl.next = next_deadline(rq);
726 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
730 #else
732 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
733 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
735 #endif /* CONFIG_SMP */
737 static inline
738 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
740 int prio = dl_task_of(dl_se)->prio;
741 u64 deadline = dl_se->deadline;
743 WARN_ON(!dl_prio(prio));
744 dl_rq->dl_nr_running++;
745 add_nr_running(rq_of_dl_rq(dl_rq), 1);
747 inc_dl_deadline(dl_rq, deadline);
748 inc_dl_migration(dl_se, dl_rq);
751 static inline
752 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
754 int prio = dl_task_of(dl_se)->prio;
756 WARN_ON(!dl_prio(prio));
757 WARN_ON(!dl_rq->dl_nr_running);
758 dl_rq->dl_nr_running--;
759 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
761 dec_dl_deadline(dl_rq, dl_se->deadline);
762 dec_dl_migration(dl_se, dl_rq);
765 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
767 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
768 struct rb_node **link = &dl_rq->rb_root.rb_node;
769 struct rb_node *parent = NULL;
770 struct sched_dl_entity *entry;
771 int leftmost = 1;
773 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
775 while (*link) {
776 parent = *link;
777 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
778 if (dl_time_before(dl_se->deadline, entry->deadline))
779 link = &parent->rb_left;
780 else {
781 link = &parent->rb_right;
782 leftmost = 0;
786 if (leftmost)
787 dl_rq->rb_leftmost = &dl_se->rb_node;
789 rb_link_node(&dl_se->rb_node, parent, link);
790 rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
792 inc_dl_tasks(dl_se, dl_rq);
795 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
797 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
799 if (RB_EMPTY_NODE(&dl_se->rb_node))
800 return;
802 if (dl_rq->rb_leftmost == &dl_se->rb_node) {
803 struct rb_node *next_node;
805 next_node = rb_next(&dl_se->rb_node);
806 dl_rq->rb_leftmost = next_node;
809 rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
810 RB_CLEAR_NODE(&dl_se->rb_node);
812 dec_dl_tasks(dl_se, dl_rq);
815 static void
816 enqueue_dl_entity(struct sched_dl_entity *dl_se,
817 struct sched_dl_entity *pi_se, int flags)
819 BUG_ON(on_dl_rq(dl_se));
822 * If this is a wakeup or a new instance, the scheduling
823 * parameters of the task might need updating. Otherwise,
824 * we want a replenishment of its runtime.
826 if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH)
827 replenish_dl_entity(dl_se, pi_se);
828 else
829 update_dl_entity(dl_se, pi_se);
831 __enqueue_dl_entity(dl_se);
834 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
836 __dequeue_dl_entity(dl_se);
839 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
841 struct task_struct *pi_task = rt_mutex_get_top_task(p);
842 struct sched_dl_entity *pi_se = &p->dl;
845 * Use the scheduling parameters of the top pi-waiter
846 * task if we have one and its (relative) deadline is
847 * smaller than our one... OTW we keep our runtime and
848 * deadline.
850 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio))
851 pi_se = &pi_task->dl;
854 * If p is throttled, we do nothing. In fact, if it exhausted
855 * its budget it needs a replenishment and, since it now is on
856 * its rq, the bandwidth timer callback (which clearly has not
857 * run yet) will take care of this.
859 if (p->dl.dl_throttled)
860 return;
862 enqueue_dl_entity(&p->dl, pi_se, flags);
864 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
865 enqueue_pushable_dl_task(rq, p);
868 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
870 dequeue_dl_entity(&p->dl);
871 dequeue_pushable_dl_task(rq, p);
874 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
876 update_curr_dl(rq);
877 __dequeue_task_dl(rq, p, flags);
881 * Yield task semantic for -deadline tasks is:
883 * get off from the CPU until our next instance, with
884 * a new runtime. This is of little use now, since we
885 * don't have a bandwidth reclaiming mechanism. Anyway,
886 * bandwidth reclaiming is planned for the future, and
887 * yield_task_dl will indicate that some spare budget
888 * is available for other task instances to use it.
890 static void yield_task_dl(struct rq *rq)
892 struct task_struct *p = rq->curr;
895 * We make the task go to sleep until its current deadline by
896 * forcing its runtime to zero. This way, update_curr_dl() stops
897 * it and the bandwidth timer will wake it up and will give it
898 * new scheduling parameters (thanks to dl_yielded=1).
900 if (p->dl.runtime > 0) {
901 rq->curr->dl.dl_yielded = 1;
902 p->dl.runtime = 0;
904 update_curr_dl(rq);
907 #ifdef CONFIG_SMP
909 static int find_later_rq(struct task_struct *task);
911 static int
912 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
914 struct task_struct *curr;
915 struct rq *rq;
917 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
918 goto out;
920 rq = cpu_rq(cpu);
922 rcu_read_lock();
923 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
926 * If we are dealing with a -deadline task, we must
927 * decide where to wake it up.
928 * If it has a later deadline and the current task
929 * on this rq can't move (provided the waking task
930 * can!) we prefer to send it somewhere else. On the
931 * other hand, if it has a shorter deadline, we
932 * try to make it stay here, it might be important.
934 if (unlikely(dl_task(curr)) &&
935 (curr->nr_cpus_allowed < 2 ||
936 !dl_entity_preempt(&p->dl, &curr->dl)) &&
937 (p->nr_cpus_allowed > 1)) {
938 int target = find_later_rq(p);
940 if (target != -1)
941 cpu = target;
943 rcu_read_unlock();
945 out:
946 return cpu;
949 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
952 * Current can't be migrated, useless to reschedule,
953 * let's hope p can move out.
955 if (rq->curr->nr_cpus_allowed == 1 ||
956 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
957 return;
960 * p is migratable, so let's not schedule it and
961 * see if it is pushed or pulled somewhere else.
963 if (p->nr_cpus_allowed != 1 &&
964 cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
965 return;
967 resched_task(rq->curr);
970 static int pull_dl_task(struct rq *this_rq);
972 #endif /* CONFIG_SMP */
975 * Only called when both the current and waking task are -deadline
976 * tasks.
978 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
979 int flags)
981 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
982 resched_task(rq->curr);
983 return;
986 #ifdef CONFIG_SMP
988 * In the unlikely case current and p have the same deadline
989 * let us try to decide what's the best thing to do...
991 if ((p->dl.deadline == rq->curr->dl.deadline) &&
992 !test_tsk_need_resched(rq->curr))
993 check_preempt_equal_dl(rq, p);
994 #endif /* CONFIG_SMP */
997 #ifdef CONFIG_SCHED_HRTICK
998 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1000 s64 delta = p->dl.dl_runtime - p->dl.runtime;
1002 if (delta > 10000)
1003 hrtick_start(rq, p->dl.runtime);
1005 #endif
1007 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1008 struct dl_rq *dl_rq)
1010 struct rb_node *left = dl_rq->rb_leftmost;
1012 if (!left)
1013 return NULL;
1015 return rb_entry(left, struct sched_dl_entity, rb_node);
1018 struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
1020 struct sched_dl_entity *dl_se;
1021 struct task_struct *p;
1022 struct dl_rq *dl_rq;
1024 dl_rq = &rq->dl;
1026 if (need_pull_dl_task(rq, prev)) {
1027 pull_dl_task(rq);
1029 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1030 * means a stop task can slip in, in which case we need to
1031 * re-start task selection.
1033 if (rq->stop && rq->stop->on_rq)
1034 return RETRY_TASK;
1038 * When prev is DL, we may throttle it in put_prev_task().
1039 * So, we update time before we check for dl_nr_running.
1041 if (prev->sched_class == &dl_sched_class)
1042 update_curr_dl(rq);
1044 if (unlikely(!dl_rq->dl_nr_running))
1045 return NULL;
1047 put_prev_task(rq, prev);
1049 dl_se = pick_next_dl_entity(rq, dl_rq);
1050 BUG_ON(!dl_se);
1052 p = dl_task_of(dl_se);
1053 p->se.exec_start = rq_clock_task(rq);
1055 /* Running task will never be pushed. */
1056 dequeue_pushable_dl_task(rq, p);
1058 #ifdef CONFIG_SCHED_HRTICK
1059 if (hrtick_enabled(rq))
1060 start_hrtick_dl(rq, p);
1061 #endif
1063 set_post_schedule(rq);
1065 return p;
1068 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1070 update_curr_dl(rq);
1072 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1073 enqueue_pushable_dl_task(rq, p);
1076 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1078 update_curr_dl(rq);
1080 #ifdef CONFIG_SCHED_HRTICK
1081 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0)
1082 start_hrtick_dl(rq, p);
1083 #endif
1086 static void task_fork_dl(struct task_struct *p)
1089 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1090 * sched_fork()
1094 static void task_dead_dl(struct task_struct *p)
1096 struct hrtimer *timer = &p->dl.dl_timer;
1097 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1100 * Since we are TASK_DEAD we won't slip out of the domain!
1102 raw_spin_lock_irq(&dl_b->lock);
1103 dl_b->total_bw -= p->dl.dl_bw;
1104 raw_spin_unlock_irq(&dl_b->lock);
1106 hrtimer_cancel(timer);
1109 static void set_curr_task_dl(struct rq *rq)
1111 struct task_struct *p = rq->curr;
1113 p->se.exec_start = rq_clock_task(rq);
1115 /* You can't push away the running task */
1116 dequeue_pushable_dl_task(rq, p);
1119 #ifdef CONFIG_SMP
1121 /* Only try algorithms three times */
1122 #define DL_MAX_TRIES 3
1124 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1126 if (!task_running(rq, p) &&
1127 (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
1128 (p->nr_cpus_allowed > 1))
1129 return 1;
1131 return 0;
1134 /* Returns the second earliest -deadline task, NULL otherwise */
1135 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
1137 struct rb_node *next_node = rq->dl.rb_leftmost;
1138 struct sched_dl_entity *dl_se;
1139 struct task_struct *p = NULL;
1141 next_node:
1142 next_node = rb_next(next_node);
1143 if (next_node) {
1144 dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
1145 p = dl_task_of(dl_se);
1147 if (pick_dl_task(rq, p, cpu))
1148 return p;
1150 goto next_node;
1153 return NULL;
1156 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1158 static int find_later_rq(struct task_struct *task)
1160 struct sched_domain *sd;
1161 struct cpumask *later_mask = __get_cpu_var(local_cpu_mask_dl);
1162 int this_cpu = smp_processor_id();
1163 int best_cpu, cpu = task_cpu(task);
1165 /* Make sure the mask is initialized first */
1166 if (unlikely(!later_mask))
1167 return -1;
1169 if (task->nr_cpus_allowed == 1)
1170 return -1;
1172 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1173 task, later_mask);
1174 if (best_cpu == -1)
1175 return -1;
1178 * If we are here, some target has been found,
1179 * the most suitable of which is cached in best_cpu.
1180 * This is, among the runqueues where the current tasks
1181 * have later deadlines than the task's one, the rq
1182 * with the latest possible one.
1184 * Now we check how well this matches with task's
1185 * affinity and system topology.
1187 * The last cpu where the task run is our first
1188 * guess, since it is most likely cache-hot there.
1190 if (cpumask_test_cpu(cpu, later_mask))
1191 return cpu;
1193 * Check if this_cpu is to be skipped (i.e., it is
1194 * not in the mask) or not.
1196 if (!cpumask_test_cpu(this_cpu, later_mask))
1197 this_cpu = -1;
1199 rcu_read_lock();
1200 for_each_domain(cpu, sd) {
1201 if (sd->flags & SD_WAKE_AFFINE) {
1204 * If possible, preempting this_cpu is
1205 * cheaper than migrating.
1207 if (this_cpu != -1 &&
1208 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1209 rcu_read_unlock();
1210 return this_cpu;
1214 * Last chance: if best_cpu is valid and is
1215 * in the mask, that becomes our choice.
1217 if (best_cpu < nr_cpu_ids &&
1218 cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1219 rcu_read_unlock();
1220 return best_cpu;
1224 rcu_read_unlock();
1227 * At this point, all our guesses failed, we just return
1228 * 'something', and let the caller sort the things out.
1230 if (this_cpu != -1)
1231 return this_cpu;
1233 cpu = cpumask_any(later_mask);
1234 if (cpu < nr_cpu_ids)
1235 return cpu;
1237 return -1;
1240 /* Locks the rq it finds */
1241 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1243 struct rq *later_rq = NULL;
1244 int tries;
1245 int cpu;
1247 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1248 cpu = find_later_rq(task);
1250 if ((cpu == -1) || (cpu == rq->cpu))
1251 break;
1253 later_rq = cpu_rq(cpu);
1255 /* Retry if something changed. */
1256 if (double_lock_balance(rq, later_rq)) {
1257 if (unlikely(task_rq(task) != rq ||
1258 !cpumask_test_cpu(later_rq->cpu,
1259 &task->cpus_allowed) ||
1260 task_running(rq, task) || !task->on_rq)) {
1261 double_unlock_balance(rq, later_rq);
1262 later_rq = NULL;
1263 break;
1268 * If the rq we found has no -deadline task, or
1269 * its earliest one has a later deadline than our
1270 * task, the rq is a good one.
1272 if (!later_rq->dl.dl_nr_running ||
1273 dl_time_before(task->dl.deadline,
1274 later_rq->dl.earliest_dl.curr))
1275 break;
1277 /* Otherwise we try again. */
1278 double_unlock_balance(rq, later_rq);
1279 later_rq = NULL;
1282 return later_rq;
1285 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1287 struct task_struct *p;
1289 if (!has_pushable_dl_tasks(rq))
1290 return NULL;
1292 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1293 struct task_struct, pushable_dl_tasks);
1295 BUG_ON(rq->cpu != task_cpu(p));
1296 BUG_ON(task_current(rq, p));
1297 BUG_ON(p->nr_cpus_allowed <= 1);
1299 BUG_ON(!p->on_rq);
1300 BUG_ON(!dl_task(p));
1302 return p;
1306 * See if the non running -deadline tasks on this rq
1307 * can be sent to some other CPU where they can preempt
1308 * and start executing.
1310 static int push_dl_task(struct rq *rq)
1312 struct task_struct *next_task;
1313 struct rq *later_rq;
1315 if (!rq->dl.overloaded)
1316 return 0;
1318 next_task = pick_next_pushable_dl_task(rq);
1319 if (!next_task)
1320 return 0;
1322 retry:
1323 if (unlikely(next_task == rq->curr)) {
1324 WARN_ON(1);
1325 return 0;
1329 * If next_task preempts rq->curr, and rq->curr
1330 * can move away, it makes sense to just reschedule
1331 * without going further in pushing next_task.
1333 if (dl_task(rq->curr) &&
1334 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1335 rq->curr->nr_cpus_allowed > 1) {
1336 resched_task(rq->curr);
1337 return 0;
1340 /* We might release rq lock */
1341 get_task_struct(next_task);
1343 /* Will lock the rq it'll find */
1344 later_rq = find_lock_later_rq(next_task, rq);
1345 if (!later_rq) {
1346 struct task_struct *task;
1349 * We must check all this again, since
1350 * find_lock_later_rq releases rq->lock and it is
1351 * then possible that next_task has migrated.
1353 task = pick_next_pushable_dl_task(rq);
1354 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1356 * The task is still there. We don't try
1357 * again, some other cpu will pull it when ready.
1359 dequeue_pushable_dl_task(rq, next_task);
1360 goto out;
1363 if (!task)
1364 /* No more tasks */
1365 goto out;
1367 put_task_struct(next_task);
1368 next_task = task;
1369 goto retry;
1372 deactivate_task(rq, next_task, 0);
1373 set_task_cpu(next_task, later_rq->cpu);
1374 activate_task(later_rq, next_task, 0);
1376 resched_task(later_rq->curr);
1378 double_unlock_balance(rq, later_rq);
1380 out:
1381 put_task_struct(next_task);
1383 return 1;
1386 static void push_dl_tasks(struct rq *rq)
1388 /* Terminates as it moves a -deadline task */
1389 while (push_dl_task(rq))
1393 static int pull_dl_task(struct rq *this_rq)
1395 int this_cpu = this_rq->cpu, ret = 0, cpu;
1396 struct task_struct *p;
1397 struct rq *src_rq;
1398 u64 dmin = LONG_MAX;
1400 if (likely(!dl_overloaded(this_rq)))
1401 return 0;
1404 * Match the barrier from dl_set_overloaded; this guarantees that if we
1405 * see overloaded we must also see the dlo_mask bit.
1407 smp_rmb();
1409 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1410 if (this_cpu == cpu)
1411 continue;
1413 src_rq = cpu_rq(cpu);
1416 * It looks racy, abd it is! However, as in sched_rt.c,
1417 * we are fine with this.
1419 if (this_rq->dl.dl_nr_running &&
1420 dl_time_before(this_rq->dl.earliest_dl.curr,
1421 src_rq->dl.earliest_dl.next))
1422 continue;
1424 /* Might drop this_rq->lock */
1425 double_lock_balance(this_rq, src_rq);
1428 * If there are no more pullable tasks on the
1429 * rq, we're done with it.
1431 if (src_rq->dl.dl_nr_running <= 1)
1432 goto skip;
1434 p = pick_next_earliest_dl_task(src_rq, this_cpu);
1437 * We found a task to be pulled if:
1438 * - it preempts our current (if there's one),
1439 * - it will preempt the last one we pulled (if any).
1441 if (p && dl_time_before(p->dl.deadline, dmin) &&
1442 (!this_rq->dl.dl_nr_running ||
1443 dl_time_before(p->dl.deadline,
1444 this_rq->dl.earliest_dl.curr))) {
1445 WARN_ON(p == src_rq->curr);
1446 WARN_ON(!p->on_rq);
1449 * Then we pull iff p has actually an earlier
1450 * deadline than the current task of its runqueue.
1452 if (dl_time_before(p->dl.deadline,
1453 src_rq->curr->dl.deadline))
1454 goto skip;
1456 ret = 1;
1458 deactivate_task(src_rq, p, 0);
1459 set_task_cpu(p, this_cpu);
1460 activate_task(this_rq, p, 0);
1461 dmin = p->dl.deadline;
1463 /* Is there any other task even earlier? */
1465 skip:
1466 double_unlock_balance(this_rq, src_rq);
1469 return ret;
1472 static void post_schedule_dl(struct rq *rq)
1474 push_dl_tasks(rq);
1478 * Since the task is not running and a reschedule is not going to happen
1479 * anytime soon on its runqueue, we try pushing it away now.
1481 static void task_woken_dl(struct rq *rq, struct task_struct *p)
1483 if (!task_running(rq, p) &&
1484 !test_tsk_need_resched(rq->curr) &&
1485 has_pushable_dl_tasks(rq) &&
1486 p->nr_cpus_allowed > 1 &&
1487 dl_task(rq->curr) &&
1488 (rq->curr->nr_cpus_allowed < 2 ||
1489 dl_entity_preempt(&rq->curr->dl, &p->dl))) {
1490 push_dl_tasks(rq);
1494 static void set_cpus_allowed_dl(struct task_struct *p,
1495 const struct cpumask *new_mask)
1497 struct rq *rq;
1498 int weight;
1500 BUG_ON(!dl_task(p));
1503 * Update only if the task is actually running (i.e.,
1504 * it is on the rq AND it is not throttled).
1506 if (!on_dl_rq(&p->dl))
1507 return;
1509 weight = cpumask_weight(new_mask);
1512 * Only update if the process changes its state from whether it
1513 * can migrate or not.
1515 if ((p->nr_cpus_allowed > 1) == (weight > 1))
1516 return;
1518 rq = task_rq(p);
1521 * The process used to be able to migrate OR it can now migrate
1523 if (weight <= 1) {
1524 if (!task_current(rq, p))
1525 dequeue_pushable_dl_task(rq, p);
1526 BUG_ON(!rq->dl.dl_nr_migratory);
1527 rq->dl.dl_nr_migratory--;
1528 } else {
1529 if (!task_current(rq, p))
1530 enqueue_pushable_dl_task(rq, p);
1531 rq->dl.dl_nr_migratory++;
1534 update_dl_migration(&rq->dl);
1537 /* Assumes rq->lock is held */
1538 static void rq_online_dl(struct rq *rq)
1540 if (rq->dl.overloaded)
1541 dl_set_overload(rq);
1543 if (rq->dl.dl_nr_running > 0)
1544 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
1547 /* Assumes rq->lock is held */
1548 static void rq_offline_dl(struct rq *rq)
1550 if (rq->dl.overloaded)
1551 dl_clear_overload(rq);
1553 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1556 void init_sched_dl_class(void)
1558 unsigned int i;
1560 for_each_possible_cpu(i)
1561 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1562 GFP_KERNEL, cpu_to_node(i));
1565 #endif /* CONFIG_SMP */
1567 static void switched_from_dl(struct rq *rq, struct task_struct *p)
1569 if (hrtimer_active(&p->dl.dl_timer) && !dl_policy(p->policy))
1570 hrtimer_try_to_cancel(&p->dl.dl_timer);
1572 #ifdef CONFIG_SMP
1574 * Since this might be the only -deadline task on the rq,
1575 * this is the right place to try to pull some other one
1576 * from an overloaded cpu, if any.
1578 if (!rq->dl.dl_nr_running)
1579 pull_dl_task(rq);
1580 #endif
1584 * When switching to -deadline, we may overload the rq, then
1585 * we try to push someone off, if possible.
1587 static void switched_to_dl(struct rq *rq, struct task_struct *p)
1589 int check_resched = 1;
1592 * If p is throttled, don't consider the possibility
1593 * of preempting rq->curr, the check will be done right
1594 * after its runtime will get replenished.
1596 if (unlikely(p->dl.dl_throttled))
1597 return;
1599 if (p->on_rq && rq->curr != p) {
1600 #ifdef CONFIG_SMP
1601 if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p))
1602 /* Only reschedule if pushing failed */
1603 check_resched = 0;
1604 #endif /* CONFIG_SMP */
1605 if (check_resched && task_has_dl_policy(rq->curr))
1606 check_preempt_curr_dl(rq, p, 0);
1611 * If the scheduling parameters of a -deadline task changed,
1612 * a push or pull operation might be needed.
1614 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1615 int oldprio)
1617 if (p->on_rq || rq->curr == p) {
1618 #ifdef CONFIG_SMP
1620 * This might be too much, but unfortunately
1621 * we don't have the old deadline value, and
1622 * we can't argue if the task is increasing
1623 * or lowering its prio, so...
1625 if (!rq->dl.overloaded)
1626 pull_dl_task(rq);
1629 * If we now have a earlier deadline task than p,
1630 * then reschedule, provided p is still on this
1631 * runqueue.
1633 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) &&
1634 rq->curr == p)
1635 resched_task(p);
1636 #else
1638 * Again, we don't know if p has a earlier
1639 * or later deadline, so let's blindly set a
1640 * (maybe not needed) rescheduling point.
1642 resched_task(p);
1643 #endif /* CONFIG_SMP */
1644 } else
1645 switched_to_dl(rq, p);
1648 const struct sched_class dl_sched_class = {
1649 .next = &rt_sched_class,
1650 .enqueue_task = enqueue_task_dl,
1651 .dequeue_task = dequeue_task_dl,
1652 .yield_task = yield_task_dl,
1654 .check_preempt_curr = check_preempt_curr_dl,
1656 .pick_next_task = pick_next_task_dl,
1657 .put_prev_task = put_prev_task_dl,
1659 #ifdef CONFIG_SMP
1660 .select_task_rq = select_task_rq_dl,
1661 .set_cpus_allowed = set_cpus_allowed_dl,
1662 .rq_online = rq_online_dl,
1663 .rq_offline = rq_offline_dl,
1664 .post_schedule = post_schedule_dl,
1665 .task_woken = task_woken_dl,
1666 #endif
1668 .set_curr_task = set_curr_task_dl,
1669 .task_tick = task_tick_dl,
1670 .task_fork = task_fork_dl,
1671 .task_dead = task_dead_dl,
1673 .prio_changed = prio_changed_dl,
1674 .switched_from = switched_from_dl,
1675 .switched_to = switched_to_dl,