sched: prevent bound kthreads from changing cpus_allowed
[linux-2.6/mini2440.git] / kernel / sched_rt.c
blob8ae3416e0bb419ecc9fcb6ad5bf3cb699e1cf504
1 /*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
6 #ifdef CONFIG_SMP
8 static inline int rt_overloaded(struct rq *rq)
10 return atomic_read(&rq->rd->rto_count);
13 static inline void rt_set_overload(struct rq *rq)
15 if (!rq->online)
16 return;
18 cpu_set(rq->cpu, rq->rd->rto_mask);
20 * Make sure the mask is visible before we set
21 * the overload count. That is checked to determine
22 * if we should look at the mask. It would be a shame
23 * if we looked at the mask, but the mask was not
24 * updated yet.
26 wmb();
27 atomic_inc(&rq->rd->rto_count);
30 static inline void rt_clear_overload(struct rq *rq)
32 if (!rq->online)
33 return;
35 /* the order here really doesn't matter */
36 atomic_dec(&rq->rd->rto_count);
37 cpu_clear(rq->cpu, rq->rd->rto_mask);
40 static void update_rt_migration(struct rq *rq)
42 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
43 if (!rq->rt.overloaded) {
44 rt_set_overload(rq);
45 rq->rt.overloaded = 1;
47 } else if (rq->rt.overloaded) {
48 rt_clear_overload(rq);
49 rq->rt.overloaded = 0;
52 #endif /* CONFIG_SMP */
54 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
56 return container_of(rt_se, struct task_struct, rt);
59 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
61 return !list_empty(&rt_se->run_list);
64 #ifdef CONFIG_RT_GROUP_SCHED
66 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
68 if (!rt_rq->tg)
69 return RUNTIME_INF;
71 return rt_rq->rt_runtime;
74 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
76 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
79 #define for_each_leaf_rt_rq(rt_rq, rq) \
80 list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
82 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
84 return rt_rq->rq;
87 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
89 return rt_se->rt_rq;
92 #define for_each_sched_rt_entity(rt_se) \
93 for (; rt_se; rt_se = rt_se->parent)
95 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
97 return rt_se->my_q;
100 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
101 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
103 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
105 struct sched_rt_entity *rt_se = rt_rq->rt_se;
107 if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
108 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
110 enqueue_rt_entity(rt_se);
111 if (rt_rq->highest_prio < curr->prio)
112 resched_task(curr);
116 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
118 struct sched_rt_entity *rt_se = rt_rq->rt_se;
120 if (rt_se && on_rt_rq(rt_se))
121 dequeue_rt_entity(rt_se);
124 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
126 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
129 static int rt_se_boosted(struct sched_rt_entity *rt_se)
131 struct rt_rq *rt_rq = group_rt_rq(rt_se);
132 struct task_struct *p;
134 if (rt_rq)
135 return !!rt_rq->rt_nr_boosted;
137 p = rt_task_of(rt_se);
138 return p->prio != p->normal_prio;
141 #ifdef CONFIG_SMP
142 static inline cpumask_t sched_rt_period_mask(void)
144 return cpu_rq(smp_processor_id())->rd->span;
146 #else
147 static inline cpumask_t sched_rt_period_mask(void)
149 return cpu_online_map;
151 #endif
153 static inline
154 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
156 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
159 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
161 return &rt_rq->tg->rt_bandwidth;
164 #else
166 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
168 return rt_rq->rt_runtime;
171 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
173 return ktime_to_ns(def_rt_bandwidth.rt_period);
176 #define for_each_leaf_rt_rq(rt_rq, rq) \
177 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
179 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
181 return container_of(rt_rq, struct rq, rt);
184 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
186 struct task_struct *p = rt_task_of(rt_se);
187 struct rq *rq = task_rq(p);
189 return &rq->rt;
192 #define for_each_sched_rt_entity(rt_se) \
193 for (; rt_se; rt_se = NULL)
195 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
197 return NULL;
200 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
204 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
208 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
210 return rt_rq->rt_throttled;
213 static inline cpumask_t sched_rt_period_mask(void)
215 return cpu_online_map;
218 static inline
219 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
221 return &cpu_rq(cpu)->rt;
224 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
226 return &def_rt_bandwidth;
229 #endif
231 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
233 int i, idle = 1;
234 cpumask_t span;
236 if (rt_b->rt_runtime == RUNTIME_INF)
237 return 1;
239 span = sched_rt_period_mask();
240 for_each_cpu_mask(i, span) {
241 int enqueue = 0;
242 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
243 struct rq *rq = rq_of_rt_rq(rt_rq);
245 spin_lock(&rq->lock);
246 if (rt_rq->rt_time) {
247 u64 runtime;
249 spin_lock(&rt_rq->rt_runtime_lock);
250 runtime = rt_rq->rt_runtime;
251 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
252 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
253 rt_rq->rt_throttled = 0;
254 enqueue = 1;
256 if (rt_rq->rt_time || rt_rq->rt_nr_running)
257 idle = 0;
258 spin_unlock(&rt_rq->rt_runtime_lock);
261 if (enqueue)
262 sched_rt_rq_enqueue(rt_rq);
263 spin_unlock(&rq->lock);
266 return idle;
269 #ifdef CONFIG_SMP
270 static int balance_runtime(struct rt_rq *rt_rq)
272 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
273 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
274 int i, weight, more = 0;
275 u64 rt_period;
277 weight = cpus_weight(rd->span);
279 spin_lock(&rt_b->rt_runtime_lock);
280 rt_period = ktime_to_ns(rt_b->rt_period);
281 for_each_cpu_mask(i, rd->span) {
282 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
283 s64 diff;
285 if (iter == rt_rq)
286 continue;
288 spin_lock(&iter->rt_runtime_lock);
289 if (iter->rt_runtime == RUNTIME_INF)
290 goto next;
292 diff = iter->rt_runtime - iter->rt_time;
293 if (diff > 0) {
294 do_div(diff, weight);
295 if (rt_rq->rt_runtime + diff > rt_period)
296 diff = rt_period - rt_rq->rt_runtime;
297 iter->rt_runtime -= diff;
298 rt_rq->rt_runtime += diff;
299 more = 1;
300 if (rt_rq->rt_runtime == rt_period) {
301 spin_unlock(&iter->rt_runtime_lock);
302 break;
305 next:
306 spin_unlock(&iter->rt_runtime_lock);
308 spin_unlock(&rt_b->rt_runtime_lock);
310 return more;
313 static void __disable_runtime(struct rq *rq)
315 struct root_domain *rd = rq->rd;
316 struct rt_rq *rt_rq;
318 if (unlikely(!scheduler_running))
319 return;
321 for_each_leaf_rt_rq(rt_rq, rq) {
322 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
323 s64 want;
324 int i;
326 spin_lock(&rt_b->rt_runtime_lock);
327 spin_lock(&rt_rq->rt_runtime_lock);
328 if (rt_rq->rt_runtime == RUNTIME_INF ||
329 rt_rq->rt_runtime == rt_b->rt_runtime)
330 goto balanced;
331 spin_unlock(&rt_rq->rt_runtime_lock);
333 want = rt_b->rt_runtime - rt_rq->rt_runtime;
335 for_each_cpu_mask(i, rd->span) {
336 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
337 s64 diff;
339 if (iter == rt_rq)
340 continue;
342 spin_lock(&iter->rt_runtime_lock);
343 if (want > 0) {
344 diff = min_t(s64, iter->rt_runtime, want);
345 iter->rt_runtime -= diff;
346 want -= diff;
347 } else {
348 iter->rt_runtime -= want;
349 want -= want;
351 spin_unlock(&iter->rt_runtime_lock);
353 if (!want)
354 break;
357 spin_lock(&rt_rq->rt_runtime_lock);
358 BUG_ON(want);
359 balanced:
360 rt_rq->rt_runtime = RUNTIME_INF;
361 spin_unlock(&rt_rq->rt_runtime_lock);
362 spin_unlock(&rt_b->rt_runtime_lock);
366 static void disable_runtime(struct rq *rq)
368 unsigned long flags;
370 spin_lock_irqsave(&rq->lock, flags);
371 __disable_runtime(rq);
372 spin_unlock_irqrestore(&rq->lock, flags);
375 static void __enable_runtime(struct rq *rq)
377 struct root_domain *rd = rq->rd;
378 struct rt_rq *rt_rq;
380 if (unlikely(!scheduler_running))
381 return;
383 for_each_leaf_rt_rq(rt_rq, rq) {
384 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
386 spin_lock(&rt_b->rt_runtime_lock);
387 spin_lock(&rt_rq->rt_runtime_lock);
388 rt_rq->rt_runtime = rt_b->rt_runtime;
389 rt_rq->rt_time = 0;
390 spin_unlock(&rt_rq->rt_runtime_lock);
391 spin_unlock(&rt_b->rt_runtime_lock);
395 static void enable_runtime(struct rq *rq)
397 unsigned long flags;
399 spin_lock_irqsave(&rq->lock, flags);
400 __enable_runtime(rq);
401 spin_unlock_irqrestore(&rq->lock, flags);
404 #endif
406 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
408 #ifdef CONFIG_RT_GROUP_SCHED
409 struct rt_rq *rt_rq = group_rt_rq(rt_se);
411 if (rt_rq)
412 return rt_rq->highest_prio;
413 #endif
415 return rt_task_of(rt_se)->prio;
418 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
420 u64 runtime = sched_rt_runtime(rt_rq);
422 if (runtime == RUNTIME_INF)
423 return 0;
425 if (rt_rq->rt_throttled)
426 return rt_rq_throttled(rt_rq);
428 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
429 return 0;
431 #ifdef CONFIG_SMP
432 if (rt_rq->rt_time > runtime) {
433 spin_unlock(&rt_rq->rt_runtime_lock);
434 balance_runtime(rt_rq);
435 spin_lock(&rt_rq->rt_runtime_lock);
437 runtime = sched_rt_runtime(rt_rq);
438 if (runtime == RUNTIME_INF)
439 return 0;
441 #endif
443 if (rt_rq->rt_time > runtime) {
444 rt_rq->rt_throttled = 1;
445 if (rt_rq_throttled(rt_rq)) {
446 sched_rt_rq_dequeue(rt_rq);
447 return 1;
451 return 0;
455 * Update the current task's runtime statistics. Skip current tasks that
456 * are not in our scheduling class.
458 static void update_curr_rt(struct rq *rq)
460 struct task_struct *curr = rq->curr;
461 struct sched_rt_entity *rt_se = &curr->rt;
462 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
463 u64 delta_exec;
465 if (!task_has_rt_policy(curr))
466 return;
468 delta_exec = rq->clock - curr->se.exec_start;
469 if (unlikely((s64)delta_exec < 0))
470 delta_exec = 0;
472 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
474 curr->se.sum_exec_runtime += delta_exec;
475 curr->se.exec_start = rq->clock;
476 cpuacct_charge(curr, delta_exec);
478 for_each_sched_rt_entity(rt_se) {
479 rt_rq = rt_rq_of_se(rt_se);
481 spin_lock(&rt_rq->rt_runtime_lock);
482 rt_rq->rt_time += delta_exec;
483 if (sched_rt_runtime_exceeded(rt_rq))
484 resched_task(curr);
485 spin_unlock(&rt_rq->rt_runtime_lock);
489 static inline
490 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
492 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
493 rt_rq->rt_nr_running++;
494 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
495 if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
496 struct rq *rq = rq_of_rt_rq(rt_rq);
498 rt_rq->highest_prio = rt_se_prio(rt_se);
499 #ifdef CONFIG_SMP
500 if (rq->online)
501 cpupri_set(&rq->rd->cpupri, rq->cpu,
502 rt_se_prio(rt_se));
503 #endif
505 #endif
506 #ifdef CONFIG_SMP
507 if (rt_se->nr_cpus_allowed > 1) {
508 struct rq *rq = rq_of_rt_rq(rt_rq);
510 rq->rt.rt_nr_migratory++;
513 update_rt_migration(rq_of_rt_rq(rt_rq));
514 #endif
515 #ifdef CONFIG_RT_GROUP_SCHED
516 if (rt_se_boosted(rt_se))
517 rt_rq->rt_nr_boosted++;
519 if (rt_rq->tg)
520 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
521 #else
522 start_rt_bandwidth(&def_rt_bandwidth);
523 #endif
526 static inline
527 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
529 #ifdef CONFIG_SMP
530 int highest_prio = rt_rq->highest_prio;
531 #endif
533 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
534 WARN_ON(!rt_rq->rt_nr_running);
535 rt_rq->rt_nr_running--;
536 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
537 if (rt_rq->rt_nr_running) {
538 struct rt_prio_array *array;
540 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
541 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
542 /* recalculate */
543 array = &rt_rq->active;
544 rt_rq->highest_prio =
545 sched_find_first_bit(array->bitmap);
546 } /* otherwise leave rq->highest prio alone */
547 } else
548 rt_rq->highest_prio = MAX_RT_PRIO;
549 #endif
550 #ifdef CONFIG_SMP
551 if (rt_se->nr_cpus_allowed > 1) {
552 struct rq *rq = rq_of_rt_rq(rt_rq);
553 rq->rt.rt_nr_migratory--;
556 if (rt_rq->highest_prio != highest_prio) {
557 struct rq *rq = rq_of_rt_rq(rt_rq);
559 if (rq->online)
560 cpupri_set(&rq->rd->cpupri, rq->cpu,
561 rt_rq->highest_prio);
564 update_rt_migration(rq_of_rt_rq(rt_rq));
565 #endif /* CONFIG_SMP */
566 #ifdef CONFIG_RT_GROUP_SCHED
567 if (rt_se_boosted(rt_se))
568 rt_rq->rt_nr_boosted--;
570 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
571 #endif
574 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
576 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
577 struct rt_prio_array *array = &rt_rq->active;
578 struct rt_rq *group_rq = group_rt_rq(rt_se);
580 if (group_rq && rt_rq_throttled(group_rq))
581 return;
583 if (rt_se->nr_cpus_allowed == 1)
584 list_add_tail(&rt_se->run_list,
585 array->xqueue + rt_se_prio(rt_se));
586 else
587 list_add_tail(&rt_se->run_list,
588 array->squeue + rt_se_prio(rt_se));
590 __set_bit(rt_se_prio(rt_se), array->bitmap);
592 inc_rt_tasks(rt_se, rt_rq);
595 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
597 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
598 struct rt_prio_array *array = &rt_rq->active;
600 list_del_init(&rt_se->run_list);
601 if (list_empty(array->squeue + rt_se_prio(rt_se))
602 && list_empty(array->xqueue + rt_se_prio(rt_se)))
603 __clear_bit(rt_se_prio(rt_se), array->bitmap);
605 dec_rt_tasks(rt_se, rt_rq);
609 * Because the prio of an upper entry depends on the lower
610 * entries, we must remove entries top - down.
612 static void dequeue_rt_stack(struct task_struct *p)
614 struct sched_rt_entity *rt_se, *back = NULL;
616 rt_se = &p->rt;
617 for_each_sched_rt_entity(rt_se) {
618 rt_se->back = back;
619 back = rt_se;
622 for (rt_se = back; rt_se; rt_se = rt_se->back) {
623 if (on_rt_rq(rt_se))
624 dequeue_rt_entity(rt_se);
629 * Adding/removing a task to/from a priority array:
631 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
633 struct sched_rt_entity *rt_se = &p->rt;
635 if (wakeup)
636 rt_se->timeout = 0;
638 dequeue_rt_stack(p);
641 * enqueue everybody, bottom - up.
643 for_each_sched_rt_entity(rt_se)
644 enqueue_rt_entity(rt_se);
647 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
649 struct sched_rt_entity *rt_se = &p->rt;
650 struct rt_rq *rt_rq;
652 update_curr_rt(rq);
654 dequeue_rt_stack(p);
657 * re-enqueue all non-empty rt_rq entities.
659 for_each_sched_rt_entity(rt_se) {
660 rt_rq = group_rt_rq(rt_se);
661 if (rt_rq && rt_rq->rt_nr_running)
662 enqueue_rt_entity(rt_se);
667 * Put task to the end of the run list without the overhead of dequeue
668 * followed by enqueue.
670 * Note: We always enqueue the task to the shared-queue, regardless of its
671 * previous position w.r.t. exclusive vs shared. This is so that exclusive RR
672 * tasks fairly round-robin with all tasks on the runqueue, not just other
673 * exclusive tasks.
675 static
676 void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
678 struct rt_prio_array *array = &rt_rq->active;
680 list_del_init(&rt_se->run_list);
681 list_add_tail(&rt_se->run_list, array->squeue + rt_se_prio(rt_se));
684 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
686 struct sched_rt_entity *rt_se = &p->rt;
687 struct rt_rq *rt_rq;
689 for_each_sched_rt_entity(rt_se) {
690 rt_rq = rt_rq_of_se(rt_se);
691 requeue_rt_entity(rt_rq, rt_se);
695 static void yield_task_rt(struct rq *rq)
697 requeue_task_rt(rq, rq->curr);
700 #ifdef CONFIG_SMP
701 static int find_lowest_rq(struct task_struct *task);
703 static int select_task_rq_rt(struct task_struct *p, int sync)
705 struct rq *rq = task_rq(p);
708 * If the current task is an RT task, then
709 * try to see if we can wake this RT task up on another
710 * runqueue. Otherwise simply start this RT task
711 * on its current runqueue.
713 * We want to avoid overloading runqueues. Even if
714 * the RT task is of higher priority than the current RT task.
715 * RT tasks behave differently than other tasks. If
716 * one gets preempted, we try to push it off to another queue.
717 * So trying to keep a preempting RT task on the same
718 * cache hot CPU will force the running RT task to
719 * a cold CPU. So we waste all the cache for the lower
720 * RT task in hopes of saving some of a RT task
721 * that is just being woken and probably will have
722 * cold cache anyway.
724 if (unlikely(rt_task(rq->curr)) &&
725 (p->rt.nr_cpus_allowed > 1)) {
726 int cpu = find_lowest_rq(p);
728 return (cpu == -1) ? task_cpu(p) : cpu;
732 * Otherwise, just let it ride on the affined RQ and the
733 * post-schedule router will push the preempted task away
735 return task_cpu(p);
737 #endif /* CONFIG_SMP */
739 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
740 struct rt_rq *rt_rq);
743 * Preempt the current task with a newly woken task if needed:
745 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
747 if (p->prio < rq->curr->prio) {
748 resched_task(rq->curr);
749 return;
752 #ifdef CONFIG_SMP
754 * If:
756 * - the newly woken task is of equal priority to the current task
757 * - the newly woken task is non-migratable while current is migratable
758 * - current will be preempted on the next reschedule
760 * we should check to see if current can readily move to a different
761 * cpu. If so, we will reschedule to allow the push logic to try
762 * to move current somewhere else, making room for our non-migratable
763 * task.
765 if((p->prio == rq->curr->prio)
766 && p->rt.nr_cpus_allowed == 1
767 && rq->curr->rt.nr_cpus_allowed != 1
768 && pick_next_rt_entity(rq, &rq->rt) != &rq->curr->rt) {
769 cpumask_t mask;
771 if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
773 * There appears to be other cpus that can accept
774 * current, so lets reschedule to try and push it away
776 resched_task(rq->curr);
778 #endif
781 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
782 struct rt_rq *rt_rq)
784 struct rt_prio_array *array = &rt_rq->active;
785 struct sched_rt_entity *next = NULL;
786 struct list_head *queue;
787 int idx;
789 idx = sched_find_first_bit(array->bitmap);
790 BUG_ON(idx >= MAX_RT_PRIO);
792 queue = array->xqueue + idx;
793 if (!list_empty(queue))
794 next = list_entry(queue->next, struct sched_rt_entity,
795 run_list);
796 else {
797 queue = array->squeue + idx;
798 next = list_entry(queue->next, struct sched_rt_entity,
799 run_list);
802 return next;
805 static struct task_struct *pick_next_task_rt(struct rq *rq)
807 struct sched_rt_entity *rt_se;
808 struct task_struct *p;
809 struct rt_rq *rt_rq;
811 rt_rq = &rq->rt;
813 if (unlikely(!rt_rq->rt_nr_running))
814 return NULL;
816 if (rt_rq_throttled(rt_rq))
817 return NULL;
819 do {
820 rt_se = pick_next_rt_entity(rq, rt_rq);
821 BUG_ON(!rt_se);
822 rt_rq = group_rt_rq(rt_se);
823 } while (rt_rq);
825 p = rt_task_of(rt_se);
826 p->se.exec_start = rq->clock;
827 return p;
830 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
832 update_curr_rt(rq);
833 p->se.exec_start = 0;
836 #ifdef CONFIG_SMP
838 /* Only try algorithms three times */
839 #define RT_MAX_TRIES 3
841 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
842 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
844 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
846 if (!task_running(rq, p) &&
847 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
848 (p->rt.nr_cpus_allowed > 1))
849 return 1;
850 return 0;
853 /* Return the second highest RT task, NULL otherwise */
854 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
856 struct task_struct *next = NULL;
857 struct sched_rt_entity *rt_se;
858 struct rt_prio_array *array;
859 struct rt_rq *rt_rq;
860 int idx;
862 for_each_leaf_rt_rq(rt_rq, rq) {
863 array = &rt_rq->active;
864 idx = sched_find_first_bit(array->bitmap);
865 next_idx:
866 if (idx >= MAX_RT_PRIO)
867 continue;
868 if (next && next->prio < idx)
869 continue;
870 list_for_each_entry(rt_se, array->squeue + idx, run_list) {
871 struct task_struct *p = rt_task_of(rt_se);
872 if (pick_rt_task(rq, p, cpu)) {
873 next = p;
874 break;
877 if (!next) {
878 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
879 goto next_idx;
883 return next;
886 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
888 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
890 int first;
892 /* "this_cpu" is cheaper to preempt than a remote processor */
893 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
894 return this_cpu;
896 first = first_cpu(*mask);
897 if (first != NR_CPUS)
898 return first;
900 return -1;
903 static int find_lowest_rq(struct task_struct *task)
905 struct sched_domain *sd;
906 cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
907 int this_cpu = smp_processor_id();
908 int cpu = task_cpu(task);
910 if (task->rt.nr_cpus_allowed == 1)
911 return -1; /* No other targets possible */
913 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
914 return -1; /* No targets found */
917 * At this point we have built a mask of cpus representing the
918 * lowest priority tasks in the system. Now we want to elect
919 * the best one based on our affinity and topology.
921 * We prioritize the last cpu that the task executed on since
922 * it is most likely cache-hot in that location.
924 if (cpu_isset(cpu, *lowest_mask))
925 return cpu;
928 * Otherwise, we consult the sched_domains span maps to figure
929 * out which cpu is logically closest to our hot cache data.
931 if (this_cpu == cpu)
932 this_cpu = -1; /* Skip this_cpu opt if the same */
934 for_each_domain(cpu, sd) {
935 if (sd->flags & SD_WAKE_AFFINE) {
936 cpumask_t domain_mask;
937 int best_cpu;
939 cpus_and(domain_mask, sd->span, *lowest_mask);
941 best_cpu = pick_optimal_cpu(this_cpu,
942 &domain_mask);
943 if (best_cpu != -1)
944 return best_cpu;
949 * And finally, if there were no matches within the domains
950 * just give the caller *something* to work with from the compatible
951 * locations.
953 return pick_optimal_cpu(this_cpu, lowest_mask);
956 /* Will lock the rq it finds */
957 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
959 struct rq *lowest_rq = NULL;
960 int tries;
961 int cpu;
963 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
964 cpu = find_lowest_rq(task);
966 if ((cpu == -1) || (cpu == rq->cpu))
967 break;
969 lowest_rq = cpu_rq(cpu);
971 /* if the prio of this runqueue changed, try again */
972 if (double_lock_balance(rq, lowest_rq)) {
974 * We had to unlock the run queue. In
975 * the mean time, task could have
976 * migrated already or had its affinity changed.
977 * Also make sure that it wasn't scheduled on its rq.
979 if (unlikely(task_rq(task) != rq ||
980 !cpu_isset(lowest_rq->cpu,
981 task->cpus_allowed) ||
982 task_running(rq, task) ||
983 !task->se.on_rq)) {
985 spin_unlock(&lowest_rq->lock);
986 lowest_rq = NULL;
987 break;
991 /* If this rq is still suitable use it. */
992 if (lowest_rq->rt.highest_prio > task->prio)
993 break;
995 /* try again */
996 spin_unlock(&lowest_rq->lock);
997 lowest_rq = NULL;
1000 return lowest_rq;
1004 * If the current CPU has more than one RT task, see if the non
1005 * running task can migrate over to a CPU that is running a task
1006 * of lesser priority.
1008 static int push_rt_task(struct rq *rq)
1010 struct task_struct *next_task;
1011 struct rq *lowest_rq;
1012 int ret = 0;
1013 int paranoid = RT_MAX_TRIES;
1015 if (!rq->rt.overloaded)
1016 return 0;
1018 next_task = pick_next_highest_task_rt(rq, -1);
1019 if (!next_task)
1020 return 0;
1022 retry:
1023 if (unlikely(next_task == rq->curr)) {
1024 WARN_ON(1);
1025 return 0;
1029 * It's possible that the next_task slipped in of
1030 * higher priority than current. If that's the case
1031 * just reschedule current.
1033 if (unlikely(next_task->prio < rq->curr->prio)) {
1034 resched_task(rq->curr);
1035 return 0;
1038 /* We might release rq lock */
1039 get_task_struct(next_task);
1041 /* find_lock_lowest_rq locks the rq if found */
1042 lowest_rq = find_lock_lowest_rq(next_task, rq);
1043 if (!lowest_rq) {
1044 struct task_struct *task;
1046 * find lock_lowest_rq releases rq->lock
1047 * so it is possible that next_task has changed.
1048 * If it has, then try again.
1050 task = pick_next_highest_task_rt(rq, -1);
1051 if (unlikely(task != next_task) && task && paranoid--) {
1052 put_task_struct(next_task);
1053 next_task = task;
1054 goto retry;
1056 goto out;
1059 deactivate_task(rq, next_task, 0);
1060 set_task_cpu(next_task, lowest_rq->cpu);
1061 activate_task(lowest_rq, next_task, 0);
1063 resched_task(lowest_rq->curr);
1065 spin_unlock(&lowest_rq->lock);
1067 ret = 1;
1068 out:
1069 put_task_struct(next_task);
1071 return ret;
1075 * TODO: Currently we just use the second highest prio task on
1076 * the queue, and stop when it can't migrate (or there's
1077 * no more RT tasks). There may be a case where a lower
1078 * priority RT task has a different affinity than the
1079 * higher RT task. In this case the lower RT task could
1080 * possibly be able to migrate where as the higher priority
1081 * RT task could not. We currently ignore this issue.
1082 * Enhancements are welcome!
1084 static void push_rt_tasks(struct rq *rq)
1086 /* push_rt_task will return true if it moved an RT */
1087 while (push_rt_task(rq))
1091 static int pull_rt_task(struct rq *this_rq)
1093 int this_cpu = this_rq->cpu, ret = 0, cpu;
1094 struct task_struct *p, *next;
1095 struct rq *src_rq;
1097 if (likely(!rt_overloaded(this_rq)))
1098 return 0;
1100 next = pick_next_task_rt(this_rq);
1102 for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
1103 if (this_cpu == cpu)
1104 continue;
1106 src_rq = cpu_rq(cpu);
1108 * We can potentially drop this_rq's lock in
1109 * double_lock_balance, and another CPU could
1110 * steal our next task - hence we must cause
1111 * the caller to recalculate the next task
1112 * in that case:
1114 if (double_lock_balance(this_rq, src_rq)) {
1115 struct task_struct *old_next = next;
1117 next = pick_next_task_rt(this_rq);
1118 if (next != old_next)
1119 ret = 1;
1123 * Are there still pullable RT tasks?
1125 if (src_rq->rt.rt_nr_running <= 1)
1126 goto skip;
1128 p = pick_next_highest_task_rt(src_rq, this_cpu);
1131 * Do we have an RT task that preempts
1132 * the to-be-scheduled task?
1134 if (p && (!next || (p->prio < next->prio))) {
1135 WARN_ON(p == src_rq->curr);
1136 WARN_ON(!p->se.on_rq);
1139 * There's a chance that p is higher in priority
1140 * than what's currently running on its cpu.
1141 * This is just that p is wakeing up and hasn't
1142 * had a chance to schedule. We only pull
1143 * p if it is lower in priority than the
1144 * current task on the run queue or
1145 * this_rq next task is lower in prio than
1146 * the current task on that rq.
1148 if (p->prio < src_rq->curr->prio ||
1149 (next && next->prio < src_rq->curr->prio))
1150 goto skip;
1152 ret = 1;
1154 deactivate_task(src_rq, p, 0);
1155 set_task_cpu(p, this_cpu);
1156 activate_task(this_rq, p, 0);
1158 * We continue with the search, just in
1159 * case there's an even higher prio task
1160 * in another runqueue. (low likelyhood
1161 * but possible)
1163 * Update next so that we won't pick a task
1164 * on another cpu with a priority lower (or equal)
1165 * than the one we just picked.
1167 next = p;
1170 skip:
1171 spin_unlock(&src_rq->lock);
1174 return ret;
1177 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1179 /* Try to pull RT tasks here if we lower this rq's prio */
1180 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
1181 pull_rt_task(rq);
1184 static void post_schedule_rt(struct rq *rq)
1187 * If we have more than one rt_task queued, then
1188 * see if we can push the other rt_tasks off to other CPUS.
1189 * Note we may release the rq lock, and since
1190 * the lock was owned by prev, we need to release it
1191 * first via finish_lock_switch and then reaquire it here.
1193 if (unlikely(rq->rt.overloaded)) {
1194 spin_lock_irq(&rq->lock);
1195 push_rt_tasks(rq);
1196 spin_unlock_irq(&rq->lock);
1201 * If we are not running and we are not going to reschedule soon, we should
1202 * try to push tasks away now
1204 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1206 if (!task_running(rq, p) &&
1207 !test_tsk_need_resched(rq->curr) &&
1208 rq->rt.overloaded)
1209 push_rt_tasks(rq);
1212 static unsigned long
1213 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1214 unsigned long max_load_move,
1215 struct sched_domain *sd, enum cpu_idle_type idle,
1216 int *all_pinned, int *this_best_prio)
1218 /* don't touch RT tasks */
1219 return 0;
1222 static int
1223 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1224 struct sched_domain *sd, enum cpu_idle_type idle)
1226 /* don't touch RT tasks */
1227 return 0;
1230 static void set_cpus_allowed_rt(struct task_struct *p,
1231 const cpumask_t *new_mask)
1233 int weight = cpus_weight(*new_mask);
1235 BUG_ON(!rt_task(p));
1238 * Update the migration status of the RQ if we have an RT task
1239 * which is running AND changing its weight value.
1241 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1242 struct rq *rq = task_rq(p);
1244 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1245 rq->rt.rt_nr_migratory++;
1246 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1247 BUG_ON(!rq->rt.rt_nr_migratory);
1248 rq->rt.rt_nr_migratory--;
1251 update_rt_migration(rq);
1253 if (unlikely(weight == 1 || p->rt.nr_cpus_allowed == 1))
1255 * If either the new or old weight is a "1", we need
1256 * to requeue to properly move between shared and
1257 * exclusive queues.
1259 requeue_task_rt(rq, p);
1262 p->cpus_allowed = *new_mask;
1263 p->rt.nr_cpus_allowed = weight;
1266 /* Assumes rq->lock is held */
1267 static void rq_online_rt(struct rq *rq)
1269 if (rq->rt.overloaded)
1270 rt_set_overload(rq);
1272 __enable_runtime(rq);
1274 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
1277 /* Assumes rq->lock is held */
1278 static void rq_offline_rt(struct rq *rq)
1280 if (rq->rt.overloaded)
1281 rt_clear_overload(rq);
1283 __disable_runtime(rq);
1285 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1289 * When switch from the rt queue, we bring ourselves to a position
1290 * that we might want to pull RT tasks from other runqueues.
1292 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1293 int running)
1296 * If there are other RT tasks then we will reschedule
1297 * and the scheduling of the other RT tasks will handle
1298 * the balancing. But if we are the last RT task
1299 * we may need to handle the pulling of RT tasks
1300 * now.
1302 if (!rq->rt.rt_nr_running)
1303 pull_rt_task(rq);
1305 #endif /* CONFIG_SMP */
1308 * When switching a task to RT, we may overload the runqueue
1309 * with RT tasks. In this case we try to push them off to
1310 * other runqueues.
1312 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1313 int running)
1315 int check_resched = 1;
1318 * If we are already running, then there's nothing
1319 * that needs to be done. But if we are not running
1320 * we may need to preempt the current running task.
1321 * If that current running task is also an RT task
1322 * then see if we can move to another run queue.
1324 if (!running) {
1325 #ifdef CONFIG_SMP
1326 if (rq->rt.overloaded && push_rt_task(rq) &&
1327 /* Don't resched if we changed runqueues */
1328 rq != task_rq(p))
1329 check_resched = 0;
1330 #endif /* CONFIG_SMP */
1331 if (check_resched && p->prio < rq->curr->prio)
1332 resched_task(rq->curr);
1337 * Priority of the task has changed. This may cause
1338 * us to initiate a push or pull.
1340 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1341 int oldprio, int running)
1343 if (running) {
1344 #ifdef CONFIG_SMP
1346 * If our priority decreases while running, we
1347 * may need to pull tasks to this runqueue.
1349 if (oldprio < p->prio)
1350 pull_rt_task(rq);
1352 * If there's a higher priority task waiting to run
1353 * then reschedule. Note, the above pull_rt_task
1354 * can release the rq lock and p could migrate.
1355 * Only reschedule if p is still on the same runqueue.
1357 if (p->prio > rq->rt.highest_prio && rq->curr == p)
1358 resched_task(p);
1359 #else
1360 /* For UP simply resched on drop of prio */
1361 if (oldprio < p->prio)
1362 resched_task(p);
1363 #endif /* CONFIG_SMP */
1364 } else {
1366 * This task is not running, but if it is
1367 * greater than the current running task
1368 * then reschedule.
1370 if (p->prio < rq->curr->prio)
1371 resched_task(rq->curr);
1375 static void watchdog(struct rq *rq, struct task_struct *p)
1377 unsigned long soft, hard;
1379 if (!p->signal)
1380 return;
1382 soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1383 hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1385 if (soft != RLIM_INFINITY) {
1386 unsigned long next;
1388 p->rt.timeout++;
1389 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1390 if (p->rt.timeout > next)
1391 p->it_sched_expires = p->se.sum_exec_runtime;
1395 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1397 update_curr_rt(rq);
1399 watchdog(rq, p);
1402 * RR tasks need a special form of timeslice management.
1403 * FIFO tasks have no timeslices.
1405 if (p->policy != SCHED_RR)
1406 return;
1408 if (--p->rt.time_slice)
1409 return;
1411 p->rt.time_slice = DEF_TIMESLICE;
1414 * Requeue to the end of queue if we are not the only element
1415 * on the queue:
1417 if (p->rt.run_list.prev != p->rt.run_list.next) {
1418 requeue_task_rt(rq, p);
1419 set_tsk_need_resched(p);
1423 static void set_curr_task_rt(struct rq *rq)
1425 struct task_struct *p = rq->curr;
1427 p->se.exec_start = rq->clock;
1430 static const struct sched_class rt_sched_class = {
1431 .next = &fair_sched_class,
1432 .enqueue_task = enqueue_task_rt,
1433 .dequeue_task = dequeue_task_rt,
1434 .yield_task = yield_task_rt,
1435 #ifdef CONFIG_SMP
1436 .select_task_rq = select_task_rq_rt,
1437 #endif /* CONFIG_SMP */
1439 .check_preempt_curr = check_preempt_curr_rt,
1441 .pick_next_task = pick_next_task_rt,
1442 .put_prev_task = put_prev_task_rt,
1444 #ifdef CONFIG_SMP
1445 .load_balance = load_balance_rt,
1446 .move_one_task = move_one_task_rt,
1447 .set_cpus_allowed = set_cpus_allowed_rt,
1448 .rq_online = rq_online_rt,
1449 .rq_offline = rq_offline_rt,
1450 .pre_schedule = pre_schedule_rt,
1451 .post_schedule = post_schedule_rt,
1452 .task_wake_up = task_wake_up_rt,
1453 .switched_from = switched_from_rt,
1454 #endif
1456 .set_curr_task = set_curr_task_rt,
1457 .task_tick = task_tick_rt,
1459 .prio_changed = prio_changed_rt,
1460 .switched_to = switched_to_rt,