sched: rt-bandwidth accounting fix
[linux-2.6/libata-dev.git] / kernel / sched_rt.c
blob77340b04a5380ae6502bccf43e754e0b2cd203e9
1 /*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
6 #ifdef CONFIG_SMP
8 static inline int rt_overloaded(struct rq *rq)
10 return atomic_read(&rq->rd->rto_count);
13 static inline void rt_set_overload(struct rq *rq)
15 if (!rq->online)
16 return;
18 cpu_set(rq->cpu, rq->rd->rto_mask);
20 * Make sure the mask is visible before we set
21 * the overload count. That is checked to determine
22 * if we should look at the mask. It would be a shame
23 * if we looked at the mask, but the mask was not
24 * updated yet.
26 wmb();
27 atomic_inc(&rq->rd->rto_count);
30 static inline void rt_clear_overload(struct rq *rq)
32 if (!rq->online)
33 return;
35 /* the order here really doesn't matter */
36 atomic_dec(&rq->rd->rto_count);
37 cpu_clear(rq->cpu, rq->rd->rto_mask);
40 static void update_rt_migration(struct rq *rq)
42 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
43 if (!rq->rt.overloaded) {
44 rt_set_overload(rq);
45 rq->rt.overloaded = 1;
47 } else if (rq->rt.overloaded) {
48 rt_clear_overload(rq);
49 rq->rt.overloaded = 0;
52 #endif /* CONFIG_SMP */
54 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
56 return container_of(rt_se, struct task_struct, rt);
59 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
61 return !list_empty(&rt_se->run_list);
64 #ifdef CONFIG_RT_GROUP_SCHED
66 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
68 if (!rt_rq->tg)
69 return RUNTIME_INF;
71 return rt_rq->rt_runtime;
74 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
76 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
79 #define for_each_leaf_rt_rq(rt_rq, rq) \
80 list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
82 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
84 return rt_rq->rq;
87 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
89 return rt_se->rt_rq;
92 #define for_each_sched_rt_entity(rt_se) \
93 for (; rt_se; rt_se = rt_se->parent)
95 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
97 return rt_se->my_q;
100 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
101 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
103 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
105 struct sched_rt_entity *rt_se = rt_rq->rt_se;
107 if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
108 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
110 enqueue_rt_entity(rt_se);
111 if (rt_rq->highest_prio < curr->prio)
112 resched_task(curr);
116 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
118 struct sched_rt_entity *rt_se = rt_rq->rt_se;
120 if (rt_se && on_rt_rq(rt_se))
121 dequeue_rt_entity(rt_se);
124 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
126 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
129 static int rt_se_boosted(struct sched_rt_entity *rt_se)
131 struct rt_rq *rt_rq = group_rt_rq(rt_se);
132 struct task_struct *p;
134 if (rt_rq)
135 return !!rt_rq->rt_nr_boosted;
137 p = rt_task_of(rt_se);
138 return p->prio != p->normal_prio;
141 #ifdef CONFIG_SMP
142 static inline cpumask_t sched_rt_period_mask(void)
144 return cpu_rq(smp_processor_id())->rd->span;
146 #else
147 static inline cpumask_t sched_rt_period_mask(void)
149 return cpu_online_map;
151 #endif
153 static inline
154 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
156 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
159 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
161 return &rt_rq->tg->rt_bandwidth;
164 #else /* !CONFIG_RT_GROUP_SCHED */
166 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
168 return rt_rq->rt_runtime;
171 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
173 return ktime_to_ns(def_rt_bandwidth.rt_period);
176 #define for_each_leaf_rt_rq(rt_rq, rq) \
177 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
179 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
181 return container_of(rt_rq, struct rq, rt);
184 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
186 struct task_struct *p = rt_task_of(rt_se);
187 struct rq *rq = task_rq(p);
189 return &rq->rt;
192 #define for_each_sched_rt_entity(rt_se) \
193 for (; rt_se; rt_se = NULL)
195 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
197 return NULL;
200 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
204 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
208 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
210 return rt_rq->rt_throttled;
213 static inline cpumask_t sched_rt_period_mask(void)
215 return cpu_online_map;
218 static inline
219 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
221 return &cpu_rq(cpu)->rt;
224 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
226 return &def_rt_bandwidth;
229 #endif /* CONFIG_RT_GROUP_SCHED */
231 #ifdef CONFIG_SMP
232 static int do_balance_runtime(struct rt_rq *rt_rq)
234 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
235 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
236 int i, weight, more = 0;
237 u64 rt_period;
239 weight = cpus_weight(rd->span);
241 spin_lock(&rt_b->rt_runtime_lock);
242 rt_period = ktime_to_ns(rt_b->rt_period);
243 for_each_cpu_mask_nr(i, rd->span) {
244 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
245 s64 diff;
247 if (iter == rt_rq)
248 continue;
250 spin_lock(&iter->rt_runtime_lock);
251 if (iter->rt_runtime == RUNTIME_INF)
252 goto next;
254 diff = iter->rt_runtime - iter->rt_time;
255 if (diff > 0) {
256 diff = div_u64((u64)diff, weight);
257 if (rt_rq->rt_runtime + diff > rt_period)
258 diff = rt_period - rt_rq->rt_runtime;
259 iter->rt_runtime -= diff;
260 rt_rq->rt_runtime += diff;
261 more = 1;
262 if (rt_rq->rt_runtime == rt_period) {
263 spin_unlock(&iter->rt_runtime_lock);
264 break;
267 next:
268 spin_unlock(&iter->rt_runtime_lock);
270 spin_unlock(&rt_b->rt_runtime_lock);
272 return more;
275 static void __disable_runtime(struct rq *rq)
277 struct root_domain *rd = rq->rd;
278 struct rt_rq *rt_rq;
280 if (unlikely(!scheduler_running))
281 return;
283 for_each_leaf_rt_rq(rt_rq, rq) {
284 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
285 s64 want;
286 int i;
288 spin_lock(&rt_b->rt_runtime_lock);
289 spin_lock(&rt_rq->rt_runtime_lock);
290 if (rt_rq->rt_runtime == RUNTIME_INF ||
291 rt_rq->rt_runtime == rt_b->rt_runtime)
292 goto balanced;
293 spin_unlock(&rt_rq->rt_runtime_lock);
295 want = rt_b->rt_runtime - rt_rq->rt_runtime;
297 for_each_cpu_mask(i, rd->span) {
298 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
299 s64 diff;
301 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
302 continue;
304 spin_lock(&iter->rt_runtime_lock);
305 if (want > 0) {
306 diff = min_t(s64, iter->rt_runtime, want);
307 iter->rt_runtime -= diff;
308 want -= diff;
309 } else {
310 iter->rt_runtime -= want;
311 want -= want;
313 spin_unlock(&iter->rt_runtime_lock);
315 if (!want)
316 break;
319 spin_lock(&rt_rq->rt_runtime_lock);
320 BUG_ON(want);
321 balanced:
322 rt_rq->rt_runtime = RUNTIME_INF;
323 spin_unlock(&rt_rq->rt_runtime_lock);
324 spin_unlock(&rt_b->rt_runtime_lock);
328 static void disable_runtime(struct rq *rq)
330 unsigned long flags;
332 spin_lock_irqsave(&rq->lock, flags);
333 __disable_runtime(rq);
334 spin_unlock_irqrestore(&rq->lock, flags);
337 static void __enable_runtime(struct rq *rq)
339 struct rt_rq *rt_rq;
341 if (unlikely(!scheduler_running))
342 return;
344 for_each_leaf_rt_rq(rt_rq, rq) {
345 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
347 spin_lock(&rt_b->rt_runtime_lock);
348 spin_lock(&rt_rq->rt_runtime_lock);
349 rt_rq->rt_runtime = rt_b->rt_runtime;
350 rt_rq->rt_time = 0;
351 spin_unlock(&rt_rq->rt_runtime_lock);
352 spin_unlock(&rt_b->rt_runtime_lock);
356 static void enable_runtime(struct rq *rq)
358 unsigned long flags;
360 spin_lock_irqsave(&rq->lock, flags);
361 __enable_runtime(rq);
362 spin_unlock_irqrestore(&rq->lock, flags);
365 static int balance_runtime(struct rt_rq *rt_rq)
367 int more = 0;
369 if (rt_rq->rt_time > rt_rq->rt_runtime) {
370 spin_unlock(&rt_rq->rt_runtime_lock);
371 more = do_balance_runtime(rt_rq);
372 spin_lock(&rt_rq->rt_runtime_lock);
375 return more;
377 #else /* !CONFIG_SMP */
378 static inline int balance_runtime(struct rt_rq *rt_rq)
380 return 0;
382 #endif /* CONFIG_SMP */
384 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
386 int i, idle = 1;
387 cpumask_t span;
389 if (rt_b->rt_runtime == RUNTIME_INF)
390 return 1;
392 span = sched_rt_period_mask();
393 for_each_cpu_mask(i, span) {
394 int enqueue = 0;
395 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
396 struct rq *rq = rq_of_rt_rq(rt_rq);
398 spin_lock(&rq->lock);
399 if (rt_rq->rt_time) {
400 u64 runtime;
402 spin_lock(&rt_rq->rt_runtime_lock);
403 if (rt_rq->rt_throttled)
404 balance_runtime(rt_rq);
405 runtime = rt_rq->rt_runtime;
406 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
407 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
408 rt_rq->rt_throttled = 0;
409 enqueue = 1;
411 if (rt_rq->rt_time || rt_rq->rt_nr_running)
412 idle = 0;
413 spin_unlock(&rt_rq->rt_runtime_lock);
414 } else if (rt_rq->rt_nr_running)
415 idle = 0;
417 if (enqueue)
418 sched_rt_rq_enqueue(rt_rq);
419 spin_unlock(&rq->lock);
422 return idle;
425 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
427 #ifdef CONFIG_RT_GROUP_SCHED
428 struct rt_rq *rt_rq = group_rt_rq(rt_se);
430 if (rt_rq)
431 return rt_rq->highest_prio;
432 #endif
434 return rt_task_of(rt_se)->prio;
437 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
439 u64 runtime = sched_rt_runtime(rt_rq);
441 if (rt_rq->rt_throttled)
442 return rt_rq_throttled(rt_rq);
444 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
445 return 0;
447 balance_runtime(rt_rq);
448 runtime = sched_rt_runtime(rt_rq);
449 if (runtime == RUNTIME_INF)
450 return 0;
452 if (rt_rq->rt_time > runtime) {
453 rt_rq->rt_throttled = 1;
454 if (rt_rq_throttled(rt_rq)) {
455 sched_rt_rq_dequeue(rt_rq);
456 return 1;
460 return 0;
464 * Update the current task's runtime statistics. Skip current tasks that
465 * are not in our scheduling class.
467 static void update_curr_rt(struct rq *rq)
469 struct task_struct *curr = rq->curr;
470 struct sched_rt_entity *rt_se = &curr->rt;
471 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
472 u64 delta_exec;
474 if (!task_has_rt_policy(curr))
475 return;
477 delta_exec = rq->clock - curr->se.exec_start;
478 if (unlikely((s64)delta_exec < 0))
479 delta_exec = 0;
481 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
483 curr->se.sum_exec_runtime += delta_exec;
484 curr->se.exec_start = rq->clock;
485 cpuacct_charge(curr, delta_exec);
487 for_each_sched_rt_entity(rt_se) {
488 rt_rq = rt_rq_of_se(rt_se);
490 spin_lock(&rt_rq->rt_runtime_lock);
491 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
492 rt_rq->rt_time += delta_exec;
493 if (sched_rt_runtime_exceeded(rt_rq))
494 resched_task(curr);
496 spin_unlock(&rt_rq->rt_runtime_lock);
500 static inline
501 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
503 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
504 rt_rq->rt_nr_running++;
505 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
506 if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
507 #ifdef CONFIG_SMP
508 struct rq *rq = rq_of_rt_rq(rt_rq);
509 #endif
511 rt_rq->highest_prio = rt_se_prio(rt_se);
512 #ifdef CONFIG_SMP
513 if (rq->online)
514 cpupri_set(&rq->rd->cpupri, rq->cpu,
515 rt_se_prio(rt_se));
516 #endif
518 #endif
519 #ifdef CONFIG_SMP
520 if (rt_se->nr_cpus_allowed > 1) {
521 struct rq *rq = rq_of_rt_rq(rt_rq);
523 rq->rt.rt_nr_migratory++;
526 update_rt_migration(rq_of_rt_rq(rt_rq));
527 #endif
528 #ifdef CONFIG_RT_GROUP_SCHED
529 if (rt_se_boosted(rt_se))
530 rt_rq->rt_nr_boosted++;
532 if (rt_rq->tg)
533 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
534 #else
535 start_rt_bandwidth(&def_rt_bandwidth);
536 #endif
539 static inline
540 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
542 #ifdef CONFIG_SMP
543 int highest_prio = rt_rq->highest_prio;
544 #endif
546 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
547 WARN_ON(!rt_rq->rt_nr_running);
548 rt_rq->rt_nr_running--;
549 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
550 if (rt_rq->rt_nr_running) {
551 struct rt_prio_array *array;
553 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
554 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
555 /* recalculate */
556 array = &rt_rq->active;
557 rt_rq->highest_prio =
558 sched_find_first_bit(array->bitmap);
559 } /* otherwise leave rq->highest prio alone */
560 } else
561 rt_rq->highest_prio = MAX_RT_PRIO;
562 #endif
563 #ifdef CONFIG_SMP
564 if (rt_se->nr_cpus_allowed > 1) {
565 struct rq *rq = rq_of_rt_rq(rt_rq);
566 rq->rt.rt_nr_migratory--;
569 if (rt_rq->highest_prio != highest_prio) {
570 struct rq *rq = rq_of_rt_rq(rt_rq);
572 if (rq->online)
573 cpupri_set(&rq->rd->cpupri, rq->cpu,
574 rt_rq->highest_prio);
577 update_rt_migration(rq_of_rt_rq(rt_rq));
578 #endif /* CONFIG_SMP */
579 #ifdef CONFIG_RT_GROUP_SCHED
580 if (rt_se_boosted(rt_se))
581 rt_rq->rt_nr_boosted--;
583 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
584 #endif
587 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
589 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
590 struct rt_prio_array *array = &rt_rq->active;
591 struct rt_rq *group_rq = group_rt_rq(rt_se);
592 struct list_head *queue = array->queue + rt_se_prio(rt_se);
595 * Don't enqueue the group if its throttled, or when empty.
596 * The latter is a consequence of the former when a child group
597 * get throttled and the current group doesn't have any other
598 * active members.
600 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
601 return;
603 list_add_tail(&rt_se->run_list, queue);
604 __set_bit(rt_se_prio(rt_se), array->bitmap);
606 inc_rt_tasks(rt_se, rt_rq);
609 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
611 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
612 struct rt_prio_array *array = &rt_rq->active;
614 list_del_init(&rt_se->run_list);
615 if (list_empty(array->queue + rt_se_prio(rt_se)))
616 __clear_bit(rt_se_prio(rt_se), array->bitmap);
618 dec_rt_tasks(rt_se, rt_rq);
622 * Because the prio of an upper entry depends on the lower
623 * entries, we must remove entries top - down.
625 static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
627 struct sched_rt_entity *back = NULL;
629 for_each_sched_rt_entity(rt_se) {
630 rt_se->back = back;
631 back = rt_se;
634 for (rt_se = back; rt_se; rt_se = rt_se->back) {
635 if (on_rt_rq(rt_se))
636 __dequeue_rt_entity(rt_se);
640 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
642 dequeue_rt_stack(rt_se);
643 for_each_sched_rt_entity(rt_se)
644 __enqueue_rt_entity(rt_se);
647 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
649 dequeue_rt_stack(rt_se);
651 for_each_sched_rt_entity(rt_se) {
652 struct rt_rq *rt_rq = group_rt_rq(rt_se);
654 if (rt_rq && rt_rq->rt_nr_running)
655 __enqueue_rt_entity(rt_se);
660 * Adding/removing a task to/from a priority array:
662 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
664 struct sched_rt_entity *rt_se = &p->rt;
666 if (wakeup)
667 rt_se->timeout = 0;
669 enqueue_rt_entity(rt_se);
671 inc_cpu_load(rq, p->se.load.weight);
674 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
676 struct sched_rt_entity *rt_se = &p->rt;
678 update_curr_rt(rq);
679 dequeue_rt_entity(rt_se);
681 dec_cpu_load(rq, p->se.load.weight);
685 * Put task to the end of the run list without the overhead of dequeue
686 * followed by enqueue.
688 static void
689 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
691 if (on_rt_rq(rt_se)) {
692 struct rt_prio_array *array = &rt_rq->active;
693 struct list_head *queue = array->queue + rt_se_prio(rt_se);
695 if (head)
696 list_move(&rt_se->run_list, queue);
697 else
698 list_move_tail(&rt_se->run_list, queue);
702 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
704 struct sched_rt_entity *rt_se = &p->rt;
705 struct rt_rq *rt_rq;
707 for_each_sched_rt_entity(rt_se) {
708 rt_rq = rt_rq_of_se(rt_se);
709 requeue_rt_entity(rt_rq, rt_se, head);
713 static void yield_task_rt(struct rq *rq)
715 requeue_task_rt(rq, rq->curr, 0);
718 #ifdef CONFIG_SMP
719 static int find_lowest_rq(struct task_struct *task);
721 static int select_task_rq_rt(struct task_struct *p, int sync)
723 struct rq *rq = task_rq(p);
726 * If the current task is an RT task, then
727 * try to see if we can wake this RT task up on another
728 * runqueue. Otherwise simply start this RT task
729 * on its current runqueue.
731 * We want to avoid overloading runqueues. Even if
732 * the RT task is of higher priority than the current RT task.
733 * RT tasks behave differently than other tasks. If
734 * one gets preempted, we try to push it off to another queue.
735 * So trying to keep a preempting RT task on the same
736 * cache hot CPU will force the running RT task to
737 * a cold CPU. So we waste all the cache for the lower
738 * RT task in hopes of saving some of a RT task
739 * that is just being woken and probably will have
740 * cold cache anyway.
742 if (unlikely(rt_task(rq->curr)) &&
743 (p->rt.nr_cpus_allowed > 1)) {
744 int cpu = find_lowest_rq(p);
746 return (cpu == -1) ? task_cpu(p) : cpu;
750 * Otherwise, just let it ride on the affined RQ and the
751 * post-schedule router will push the preempted task away
753 return task_cpu(p);
756 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
758 cpumask_t mask;
760 if (rq->curr->rt.nr_cpus_allowed == 1)
761 return;
763 if (p->rt.nr_cpus_allowed != 1
764 && cpupri_find(&rq->rd->cpupri, p, &mask))
765 return;
767 if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
768 return;
771 * There appears to be other cpus that can accept
772 * current and none to run 'p', so lets reschedule
773 * to try and push current away:
775 requeue_task_rt(rq, p, 1);
776 resched_task(rq->curr);
779 #endif /* CONFIG_SMP */
782 * Preempt the current task with a newly woken task if needed:
784 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
786 if (p->prio < rq->curr->prio) {
787 resched_task(rq->curr);
788 return;
791 #ifdef CONFIG_SMP
793 * If:
795 * - the newly woken task is of equal priority to the current task
796 * - the newly woken task is non-migratable while current is migratable
797 * - current will be preempted on the next reschedule
799 * we should check to see if current can readily move to a different
800 * cpu. If so, we will reschedule to allow the push logic to try
801 * to move current somewhere else, making room for our non-migratable
802 * task.
804 if (p->prio == rq->curr->prio && !need_resched())
805 check_preempt_equal_prio(rq, p);
806 #endif
809 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
810 struct rt_rq *rt_rq)
812 struct rt_prio_array *array = &rt_rq->active;
813 struct sched_rt_entity *next = NULL;
814 struct list_head *queue;
815 int idx;
817 idx = sched_find_first_bit(array->bitmap);
818 BUG_ON(idx >= MAX_RT_PRIO);
820 queue = array->queue + idx;
821 next = list_entry(queue->next, struct sched_rt_entity, run_list);
823 return next;
826 static struct task_struct *pick_next_task_rt(struct rq *rq)
828 struct sched_rt_entity *rt_se;
829 struct task_struct *p;
830 struct rt_rq *rt_rq;
832 rt_rq = &rq->rt;
834 if (unlikely(!rt_rq->rt_nr_running))
835 return NULL;
837 if (rt_rq_throttled(rt_rq))
838 return NULL;
840 do {
841 rt_se = pick_next_rt_entity(rq, rt_rq);
842 BUG_ON(!rt_se);
843 rt_rq = group_rt_rq(rt_se);
844 } while (rt_rq);
846 p = rt_task_of(rt_se);
847 p->se.exec_start = rq->clock;
848 return p;
851 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
853 update_curr_rt(rq);
854 p->se.exec_start = 0;
857 #ifdef CONFIG_SMP
859 /* Only try algorithms three times */
860 #define RT_MAX_TRIES 3
862 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
863 static void double_unlock_balance(struct rq *this_rq, struct rq *busiest);
865 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
867 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
869 if (!task_running(rq, p) &&
870 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
871 (p->rt.nr_cpus_allowed > 1))
872 return 1;
873 return 0;
876 /* Return the second highest RT task, NULL otherwise */
877 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
879 struct task_struct *next = NULL;
880 struct sched_rt_entity *rt_se;
881 struct rt_prio_array *array;
882 struct rt_rq *rt_rq;
883 int idx;
885 for_each_leaf_rt_rq(rt_rq, rq) {
886 array = &rt_rq->active;
887 idx = sched_find_first_bit(array->bitmap);
888 next_idx:
889 if (idx >= MAX_RT_PRIO)
890 continue;
891 if (next && next->prio < idx)
892 continue;
893 list_for_each_entry(rt_se, array->queue + idx, run_list) {
894 struct task_struct *p = rt_task_of(rt_se);
895 if (pick_rt_task(rq, p, cpu)) {
896 next = p;
897 break;
900 if (!next) {
901 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
902 goto next_idx;
906 return next;
909 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
911 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
913 int first;
915 /* "this_cpu" is cheaper to preempt than a remote processor */
916 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
917 return this_cpu;
919 first = first_cpu(*mask);
920 if (first != NR_CPUS)
921 return first;
923 return -1;
926 static int find_lowest_rq(struct task_struct *task)
928 struct sched_domain *sd;
929 cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
930 int this_cpu = smp_processor_id();
931 int cpu = task_cpu(task);
933 if (task->rt.nr_cpus_allowed == 1)
934 return -1; /* No other targets possible */
936 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
937 return -1; /* No targets found */
940 * Only consider CPUs that are usable for migration.
941 * I guess we might want to change cpupri_find() to ignore those
942 * in the first place.
944 cpus_and(*lowest_mask, *lowest_mask, cpu_active_map);
947 * At this point we have built a mask of cpus representing the
948 * lowest priority tasks in the system. Now we want to elect
949 * the best one based on our affinity and topology.
951 * We prioritize the last cpu that the task executed on since
952 * it is most likely cache-hot in that location.
954 if (cpu_isset(cpu, *lowest_mask))
955 return cpu;
958 * Otherwise, we consult the sched_domains span maps to figure
959 * out which cpu is logically closest to our hot cache data.
961 if (this_cpu == cpu)
962 this_cpu = -1; /* Skip this_cpu opt if the same */
964 for_each_domain(cpu, sd) {
965 if (sd->flags & SD_WAKE_AFFINE) {
966 cpumask_t domain_mask;
967 int best_cpu;
969 cpus_and(domain_mask, sd->span, *lowest_mask);
971 best_cpu = pick_optimal_cpu(this_cpu,
972 &domain_mask);
973 if (best_cpu != -1)
974 return best_cpu;
979 * And finally, if there were no matches within the domains
980 * just give the caller *something* to work with from the compatible
981 * locations.
983 return pick_optimal_cpu(this_cpu, lowest_mask);
986 /* Will lock the rq it finds */
987 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
989 struct rq *lowest_rq = NULL;
990 int tries;
991 int cpu;
993 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
994 cpu = find_lowest_rq(task);
996 if ((cpu == -1) || (cpu == rq->cpu))
997 break;
999 lowest_rq = cpu_rq(cpu);
1001 /* if the prio of this runqueue changed, try again */
1002 if (double_lock_balance(rq, lowest_rq)) {
1004 * We had to unlock the run queue. In
1005 * the mean time, task could have
1006 * migrated already or had its affinity changed.
1007 * Also make sure that it wasn't scheduled on its rq.
1009 if (unlikely(task_rq(task) != rq ||
1010 !cpu_isset(lowest_rq->cpu,
1011 task->cpus_allowed) ||
1012 task_running(rq, task) ||
1013 !task->se.on_rq)) {
1015 spin_unlock(&lowest_rq->lock);
1016 lowest_rq = NULL;
1017 break;
1021 /* If this rq is still suitable use it. */
1022 if (lowest_rq->rt.highest_prio > task->prio)
1023 break;
1025 /* try again */
1026 double_unlock_balance(rq, lowest_rq);
1027 lowest_rq = NULL;
1030 return lowest_rq;
1034 * If the current CPU has more than one RT task, see if the non
1035 * running task can migrate over to a CPU that is running a task
1036 * of lesser priority.
1038 static int push_rt_task(struct rq *rq)
1040 struct task_struct *next_task;
1041 struct rq *lowest_rq;
1042 int ret = 0;
1043 int paranoid = RT_MAX_TRIES;
1045 if (!rq->rt.overloaded)
1046 return 0;
1048 next_task = pick_next_highest_task_rt(rq, -1);
1049 if (!next_task)
1050 return 0;
1052 retry:
1053 if (unlikely(next_task == rq->curr)) {
1054 WARN_ON(1);
1055 return 0;
1059 * It's possible that the next_task slipped in of
1060 * higher priority than current. If that's the case
1061 * just reschedule current.
1063 if (unlikely(next_task->prio < rq->curr->prio)) {
1064 resched_task(rq->curr);
1065 return 0;
1068 /* We might release rq lock */
1069 get_task_struct(next_task);
1071 /* find_lock_lowest_rq locks the rq if found */
1072 lowest_rq = find_lock_lowest_rq(next_task, rq);
1073 if (!lowest_rq) {
1074 struct task_struct *task;
1076 * find lock_lowest_rq releases rq->lock
1077 * so it is possible that next_task has changed.
1078 * If it has, then try again.
1080 task = pick_next_highest_task_rt(rq, -1);
1081 if (unlikely(task != next_task) && task && paranoid--) {
1082 put_task_struct(next_task);
1083 next_task = task;
1084 goto retry;
1086 goto out;
1089 deactivate_task(rq, next_task, 0);
1090 set_task_cpu(next_task, lowest_rq->cpu);
1091 activate_task(lowest_rq, next_task, 0);
1093 resched_task(lowest_rq->curr);
1095 double_unlock_balance(rq, lowest_rq);
1097 ret = 1;
1098 out:
1099 put_task_struct(next_task);
1101 return ret;
1105 * TODO: Currently we just use the second highest prio task on
1106 * the queue, and stop when it can't migrate (or there's
1107 * no more RT tasks). There may be a case where a lower
1108 * priority RT task has a different affinity than the
1109 * higher RT task. In this case the lower RT task could
1110 * possibly be able to migrate where as the higher priority
1111 * RT task could not. We currently ignore this issue.
1112 * Enhancements are welcome!
1114 static void push_rt_tasks(struct rq *rq)
1116 /* push_rt_task will return true if it moved an RT */
1117 while (push_rt_task(rq))
1121 static int pull_rt_task(struct rq *this_rq)
1123 int this_cpu = this_rq->cpu, ret = 0, cpu;
1124 struct task_struct *p, *next;
1125 struct rq *src_rq;
1127 if (likely(!rt_overloaded(this_rq)))
1128 return 0;
1130 next = pick_next_task_rt(this_rq);
1132 for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
1133 if (this_cpu == cpu)
1134 continue;
1136 src_rq = cpu_rq(cpu);
1138 * We can potentially drop this_rq's lock in
1139 * double_lock_balance, and another CPU could
1140 * steal our next task - hence we must cause
1141 * the caller to recalculate the next task
1142 * in that case:
1144 if (double_lock_balance(this_rq, src_rq)) {
1145 struct task_struct *old_next = next;
1147 next = pick_next_task_rt(this_rq);
1148 if (next != old_next)
1149 ret = 1;
1153 * Are there still pullable RT tasks?
1155 if (src_rq->rt.rt_nr_running <= 1)
1156 goto skip;
1158 p = pick_next_highest_task_rt(src_rq, this_cpu);
1161 * Do we have an RT task that preempts
1162 * the to-be-scheduled task?
1164 if (p && (!next || (p->prio < next->prio))) {
1165 WARN_ON(p == src_rq->curr);
1166 WARN_ON(!p->se.on_rq);
1169 * There's a chance that p is higher in priority
1170 * than what's currently running on its cpu.
1171 * This is just that p is wakeing up and hasn't
1172 * had a chance to schedule. We only pull
1173 * p if it is lower in priority than the
1174 * current task on the run queue or
1175 * this_rq next task is lower in prio than
1176 * the current task on that rq.
1178 if (p->prio < src_rq->curr->prio ||
1179 (next && next->prio < src_rq->curr->prio))
1180 goto skip;
1182 ret = 1;
1184 deactivate_task(src_rq, p, 0);
1185 set_task_cpu(p, this_cpu);
1186 activate_task(this_rq, p, 0);
1188 * We continue with the search, just in
1189 * case there's an even higher prio task
1190 * in another runqueue. (low likelyhood
1191 * but possible)
1193 * Update next so that we won't pick a task
1194 * on another cpu with a priority lower (or equal)
1195 * than the one we just picked.
1197 next = p;
1200 skip:
1201 double_unlock_balance(this_rq, src_rq);
1204 return ret;
1207 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1209 /* Try to pull RT tasks here if we lower this rq's prio */
1210 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
1211 pull_rt_task(rq);
1214 static void post_schedule_rt(struct rq *rq)
1217 * If we have more than one rt_task queued, then
1218 * see if we can push the other rt_tasks off to other CPUS.
1219 * Note we may release the rq lock, and since
1220 * the lock was owned by prev, we need to release it
1221 * first via finish_lock_switch and then reaquire it here.
1223 if (unlikely(rq->rt.overloaded)) {
1224 spin_lock_irq(&rq->lock);
1225 push_rt_tasks(rq);
1226 spin_unlock_irq(&rq->lock);
1231 * If we are not running and we are not going to reschedule soon, we should
1232 * try to push tasks away now
1234 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1236 if (!task_running(rq, p) &&
1237 !test_tsk_need_resched(rq->curr) &&
1238 rq->rt.overloaded)
1239 push_rt_tasks(rq);
1242 static unsigned long
1243 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1244 unsigned long max_load_move,
1245 struct sched_domain *sd, enum cpu_idle_type idle,
1246 int *all_pinned, int *this_best_prio)
1248 /* don't touch RT tasks */
1249 return 0;
1252 static int
1253 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1254 struct sched_domain *sd, enum cpu_idle_type idle)
1256 /* don't touch RT tasks */
1257 return 0;
1260 static void set_cpus_allowed_rt(struct task_struct *p,
1261 const cpumask_t *new_mask)
1263 int weight = cpus_weight(*new_mask);
1265 BUG_ON(!rt_task(p));
1268 * Update the migration status of the RQ if we have an RT task
1269 * which is running AND changing its weight value.
1271 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1272 struct rq *rq = task_rq(p);
1274 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1275 rq->rt.rt_nr_migratory++;
1276 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1277 BUG_ON(!rq->rt.rt_nr_migratory);
1278 rq->rt.rt_nr_migratory--;
1281 update_rt_migration(rq);
1284 p->cpus_allowed = *new_mask;
1285 p->rt.nr_cpus_allowed = weight;
1288 /* Assumes rq->lock is held */
1289 static void rq_online_rt(struct rq *rq)
1291 if (rq->rt.overloaded)
1292 rt_set_overload(rq);
1294 __enable_runtime(rq);
1296 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
1299 /* Assumes rq->lock is held */
1300 static void rq_offline_rt(struct rq *rq)
1302 if (rq->rt.overloaded)
1303 rt_clear_overload(rq);
1305 __disable_runtime(rq);
1307 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1311 * When switch from the rt queue, we bring ourselves to a position
1312 * that we might want to pull RT tasks from other runqueues.
1314 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1315 int running)
1318 * If there are other RT tasks then we will reschedule
1319 * and the scheduling of the other RT tasks will handle
1320 * the balancing. But if we are the last RT task
1321 * we may need to handle the pulling of RT tasks
1322 * now.
1324 if (!rq->rt.rt_nr_running)
1325 pull_rt_task(rq);
1327 #endif /* CONFIG_SMP */
1330 * When switching a task to RT, we may overload the runqueue
1331 * with RT tasks. In this case we try to push them off to
1332 * other runqueues.
1334 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1335 int running)
1337 int check_resched = 1;
1340 * If we are already running, then there's nothing
1341 * that needs to be done. But if we are not running
1342 * we may need to preempt the current running task.
1343 * If that current running task is also an RT task
1344 * then see if we can move to another run queue.
1346 if (!running) {
1347 #ifdef CONFIG_SMP
1348 if (rq->rt.overloaded && push_rt_task(rq) &&
1349 /* Don't resched if we changed runqueues */
1350 rq != task_rq(p))
1351 check_resched = 0;
1352 #endif /* CONFIG_SMP */
1353 if (check_resched && p->prio < rq->curr->prio)
1354 resched_task(rq->curr);
1359 * Priority of the task has changed. This may cause
1360 * us to initiate a push or pull.
1362 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1363 int oldprio, int running)
1365 if (running) {
1366 #ifdef CONFIG_SMP
1368 * If our priority decreases while running, we
1369 * may need to pull tasks to this runqueue.
1371 if (oldprio < p->prio)
1372 pull_rt_task(rq);
1374 * If there's a higher priority task waiting to run
1375 * then reschedule. Note, the above pull_rt_task
1376 * can release the rq lock and p could migrate.
1377 * Only reschedule if p is still on the same runqueue.
1379 if (p->prio > rq->rt.highest_prio && rq->curr == p)
1380 resched_task(p);
1381 #else
1382 /* For UP simply resched on drop of prio */
1383 if (oldprio < p->prio)
1384 resched_task(p);
1385 #endif /* CONFIG_SMP */
1386 } else {
1388 * This task is not running, but if it is
1389 * greater than the current running task
1390 * then reschedule.
1392 if (p->prio < rq->curr->prio)
1393 resched_task(rq->curr);
1397 static void watchdog(struct rq *rq, struct task_struct *p)
1399 unsigned long soft, hard;
1401 if (!p->signal)
1402 return;
1404 soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1405 hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1407 if (soft != RLIM_INFINITY) {
1408 unsigned long next;
1410 p->rt.timeout++;
1411 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1412 if (p->rt.timeout > next)
1413 p->it_sched_expires = p->se.sum_exec_runtime;
1417 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1419 update_curr_rt(rq);
1421 watchdog(rq, p);
1424 * RR tasks need a special form of timeslice management.
1425 * FIFO tasks have no timeslices.
1427 if (p->policy != SCHED_RR)
1428 return;
1430 if (--p->rt.time_slice)
1431 return;
1433 p->rt.time_slice = DEF_TIMESLICE;
1436 * Requeue to the end of queue if we are not the only element
1437 * on the queue:
1439 if (p->rt.run_list.prev != p->rt.run_list.next) {
1440 requeue_task_rt(rq, p, 0);
1441 set_tsk_need_resched(p);
1445 static void set_curr_task_rt(struct rq *rq)
1447 struct task_struct *p = rq->curr;
1449 p->se.exec_start = rq->clock;
1452 static const struct sched_class rt_sched_class = {
1453 .next = &fair_sched_class,
1454 .enqueue_task = enqueue_task_rt,
1455 .dequeue_task = dequeue_task_rt,
1456 .yield_task = yield_task_rt,
1457 #ifdef CONFIG_SMP
1458 .select_task_rq = select_task_rq_rt,
1459 #endif /* CONFIG_SMP */
1461 .check_preempt_curr = check_preempt_curr_rt,
1463 .pick_next_task = pick_next_task_rt,
1464 .put_prev_task = put_prev_task_rt,
1466 #ifdef CONFIG_SMP
1467 .load_balance = load_balance_rt,
1468 .move_one_task = move_one_task_rt,
1469 .set_cpus_allowed = set_cpus_allowed_rt,
1470 .rq_online = rq_online_rt,
1471 .rq_offline = rq_offline_rt,
1472 .pre_schedule = pre_schedule_rt,
1473 .post_schedule = post_schedule_rt,
1474 .task_wake_up = task_wake_up_rt,
1475 .switched_from = switched_from_rt,
1476 #endif
1478 .set_curr_task = set_curr_task_rt,
1479 .task_tick = task_tick_rt,
1481 .prio_changed = prio_changed_rt,
1482 .switched_to = switched_to_rt,
1485 #ifdef CONFIG_SCHED_DEBUG
1486 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1488 static void print_rt_stats(struct seq_file *m, int cpu)
1490 struct rt_rq *rt_rq;
1492 rcu_read_lock();
1493 for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
1494 print_rt_rq(m, cpu, rt_rq);
1495 rcu_read_unlock();
1497 #endif /* CONFIG_SCHED_DEBUG */