sched: rt group scheduling
[linux-2.6.git] / kernel / sched_rt.c
blob1178257613adef2642bf21a7f59d3366db81df06
1 /*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
6 #ifdef CONFIG_SMP
8 static inline int rt_overloaded(struct rq *rq)
10 return atomic_read(&rq->rd->rto_count);
13 static inline void rt_set_overload(struct rq *rq)
15 cpu_set(rq->cpu, rq->rd->rto_mask);
17 * Make sure the mask is visible before we set
18 * the overload count. That is checked to determine
19 * if we should look at the mask. It would be a shame
20 * if we looked at the mask, but the mask was not
21 * updated yet.
23 wmb();
24 atomic_inc(&rq->rd->rto_count);
27 static inline void rt_clear_overload(struct rq *rq)
29 /* the order here really doesn't matter */
30 atomic_dec(&rq->rd->rto_count);
31 cpu_clear(rq->cpu, rq->rd->rto_mask);
34 static void update_rt_migration(struct rq *rq)
36 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
37 if (!rq->rt.overloaded) {
38 rt_set_overload(rq);
39 rq->rt.overloaded = 1;
41 } else if (rq->rt.overloaded) {
42 rt_clear_overload(rq);
43 rq->rt.overloaded = 0;
46 #endif /* CONFIG_SMP */
48 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
50 return container_of(rt_se, struct task_struct, rt);
53 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
55 return !list_empty(&rt_se->run_list);
58 #ifdef CONFIG_FAIR_GROUP_SCHED
60 static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq)
62 if (!rt_rq->tg)
63 return SCHED_RT_FRAC;
65 return rt_rq->tg->rt_ratio;
68 #define for_each_leaf_rt_rq(rt_rq, rq) \
69 list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
71 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
73 return rt_rq->rq;
76 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
78 return rt_se->rt_rq;
81 #define for_each_sched_rt_entity(rt_se) \
82 for (; rt_se; rt_se = rt_se->parent)
84 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
86 return rt_se->my_q;
89 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
90 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
92 static void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
94 struct sched_rt_entity *rt_se = rt_rq->rt_se;
96 if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
97 enqueue_rt_entity(rt_se);
98 resched_task(rq_of_rt_rq(rt_rq)->curr);
102 static void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
104 struct sched_rt_entity *rt_se = rt_rq->rt_se;
106 if (rt_se && on_rt_rq(rt_se))
107 dequeue_rt_entity(rt_se);
110 #else
112 static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq)
114 return sysctl_sched_rt_ratio;
117 #define for_each_leaf_rt_rq(rt_rq, rq) \
118 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
120 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
122 return container_of(rt_rq, struct rq, rt);
125 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
127 struct task_struct *p = rt_task_of(rt_se);
128 struct rq *rq = task_rq(p);
130 return &rq->rt;
133 #define for_each_sched_rt_entity(rt_se) \
134 for (; rt_se; rt_se = NULL)
136 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
138 return NULL;
141 static inline void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
145 static inline void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
149 #endif
151 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
153 #ifdef CONFIG_FAIR_GROUP_SCHED
154 struct rt_rq *rt_rq = group_rt_rq(rt_se);
156 if (rt_rq)
157 return rt_rq->highest_prio;
158 #endif
160 return rt_task_of(rt_se)->prio;
163 static int sched_rt_ratio_exceeded(struct rt_rq *rt_rq)
165 unsigned int rt_ratio = sched_rt_ratio(rt_rq);
166 u64 period, ratio;
168 if (rt_ratio == SCHED_RT_FRAC)
169 return 0;
171 if (rt_rq->rt_throttled)
172 return 1;
174 period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
175 ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
177 if (rt_rq->rt_time > ratio) {
178 rt_rq->rt_throttled = 1;
179 sched_rt_ratio_dequeue(rt_rq);
180 return 1;
183 return 0;
186 static void __update_sched_rt_period(struct rt_rq *rt_rq, u64 period)
188 unsigned long rt_ratio = sched_rt_ratio(rt_rq);
189 u64 ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
191 rt_rq->rt_time -= min(rt_rq->rt_time, ratio);
192 if (rt_rq->rt_throttled) {
193 rt_rq->rt_throttled = 0;
194 sched_rt_ratio_enqueue(rt_rq);
198 static void update_sched_rt_period(struct rq *rq)
200 struct rt_rq *rt_rq;
201 u64 period;
203 while (rq->clock > rq->rt_period_expire) {
204 period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
205 rq->rt_period_expire += period;
207 for_each_leaf_rt_rq(rt_rq, rq)
208 __update_sched_rt_period(rt_rq, period);
213 * Update the current task's runtime statistics. Skip current tasks that
214 * are not in our scheduling class.
216 static void update_curr_rt(struct rq *rq)
218 struct task_struct *curr = rq->curr;
219 struct sched_rt_entity *rt_se = &curr->rt;
220 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
221 u64 delta_exec;
223 if (!task_has_rt_policy(curr))
224 return;
226 delta_exec = rq->clock - curr->se.exec_start;
227 if (unlikely((s64)delta_exec < 0))
228 delta_exec = 0;
230 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
232 curr->se.sum_exec_runtime += delta_exec;
233 curr->se.exec_start = rq->clock;
234 cpuacct_charge(curr, delta_exec);
236 rt_rq->rt_time += delta_exec;
238 * might make it a tad more accurate:
240 * update_sched_rt_period(rq);
242 if (sched_rt_ratio_exceeded(rt_rq))
243 resched_task(curr);
246 static inline
247 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
249 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
250 rt_rq->rt_nr_running++;
251 #if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
252 if (rt_se_prio(rt_se) < rt_rq->highest_prio)
253 rt_rq->highest_prio = rt_se_prio(rt_se);
254 #endif
255 #ifdef CONFIG_SMP
256 if (rt_se->nr_cpus_allowed > 1) {
257 struct rq *rq = rq_of_rt_rq(rt_rq);
258 rq->rt.rt_nr_migratory++;
261 update_rt_migration(rq_of_rt_rq(rt_rq));
262 #endif
265 static inline
266 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
268 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
269 WARN_ON(!rt_rq->rt_nr_running);
270 rt_rq->rt_nr_running--;
271 #if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
272 if (rt_rq->rt_nr_running) {
273 struct rt_prio_array *array;
275 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
276 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
277 /* recalculate */
278 array = &rt_rq->active;
279 rt_rq->highest_prio =
280 sched_find_first_bit(array->bitmap);
281 } /* otherwise leave rq->highest prio alone */
282 } else
283 rt_rq->highest_prio = MAX_RT_PRIO;
284 #endif
285 #ifdef CONFIG_SMP
286 if (rt_se->nr_cpus_allowed > 1) {
287 struct rq *rq = rq_of_rt_rq(rt_rq);
288 rq->rt.rt_nr_migratory--;
291 update_rt_migration(rq_of_rt_rq(rt_rq));
292 #endif /* CONFIG_SMP */
295 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
297 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
298 struct rt_prio_array *array = &rt_rq->active;
299 struct rt_rq *group_rq = group_rt_rq(rt_se);
301 if (group_rq && group_rq->rt_throttled)
302 return;
304 list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
305 __set_bit(rt_se_prio(rt_se), array->bitmap);
307 inc_rt_tasks(rt_se, rt_rq);
310 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
312 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
313 struct rt_prio_array *array = &rt_rq->active;
315 list_del_init(&rt_se->run_list);
316 if (list_empty(array->queue + rt_se_prio(rt_se)))
317 __clear_bit(rt_se_prio(rt_se), array->bitmap);
319 dec_rt_tasks(rt_se, rt_rq);
323 * Because the prio of an upper entry depends on the lower
324 * entries, we must remove entries top - down.
326 * XXX: O(1/2 h^2) because we can only walk up, not down the chain.
327 * doesn't matter much for now, as h=2 for GROUP_SCHED.
329 static void dequeue_rt_stack(struct task_struct *p)
331 struct sched_rt_entity *rt_se, *top_se;
334 * dequeue all, top - down.
336 do {
337 rt_se = &p->rt;
338 top_se = NULL;
339 for_each_sched_rt_entity(rt_se) {
340 if (on_rt_rq(rt_se))
341 top_se = rt_se;
343 if (top_se)
344 dequeue_rt_entity(top_se);
345 } while (top_se);
349 * Adding/removing a task to/from a priority array:
351 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
353 struct sched_rt_entity *rt_se = &p->rt;
355 if (wakeup)
356 rt_se->timeout = 0;
358 dequeue_rt_stack(p);
361 * enqueue everybody, bottom - up.
363 for_each_sched_rt_entity(rt_se)
364 enqueue_rt_entity(rt_se);
366 inc_cpu_load(rq, p->se.load.weight);
369 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
371 struct sched_rt_entity *rt_se = &p->rt;
372 struct rt_rq *rt_rq;
374 update_curr_rt(rq);
376 dequeue_rt_stack(p);
379 * re-enqueue all non-empty rt_rq entities.
381 for_each_sched_rt_entity(rt_se) {
382 rt_rq = group_rt_rq(rt_se);
383 if (rt_rq && rt_rq->rt_nr_running)
384 enqueue_rt_entity(rt_se);
387 dec_cpu_load(rq, p->se.load.weight);
391 * Put task to the end of the run list without the overhead of dequeue
392 * followed by enqueue.
394 static
395 void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
397 struct rt_prio_array *array = &rt_rq->active;
399 list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
402 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
404 struct sched_rt_entity *rt_se = &p->rt;
405 struct rt_rq *rt_rq;
407 for_each_sched_rt_entity(rt_se) {
408 rt_rq = rt_rq_of_se(rt_se);
409 requeue_rt_entity(rt_rq, rt_se);
413 static void yield_task_rt(struct rq *rq)
415 requeue_task_rt(rq, rq->curr);
418 #ifdef CONFIG_SMP
419 static int find_lowest_rq(struct task_struct *task);
421 static int select_task_rq_rt(struct task_struct *p, int sync)
423 struct rq *rq = task_rq(p);
426 * If the current task is an RT task, then
427 * try to see if we can wake this RT task up on another
428 * runqueue. Otherwise simply start this RT task
429 * on its current runqueue.
431 * We want to avoid overloading runqueues. Even if
432 * the RT task is of higher priority than the current RT task.
433 * RT tasks behave differently than other tasks. If
434 * one gets preempted, we try to push it off to another queue.
435 * So trying to keep a preempting RT task on the same
436 * cache hot CPU will force the running RT task to
437 * a cold CPU. So we waste all the cache for the lower
438 * RT task in hopes of saving some of a RT task
439 * that is just being woken and probably will have
440 * cold cache anyway.
442 if (unlikely(rt_task(rq->curr)) &&
443 (p->rt.nr_cpus_allowed > 1)) {
444 int cpu = find_lowest_rq(p);
446 return (cpu == -1) ? task_cpu(p) : cpu;
450 * Otherwise, just let it ride on the affined RQ and the
451 * post-schedule router will push the preempted task away
453 return task_cpu(p);
455 #endif /* CONFIG_SMP */
458 * Preempt the current task with a newly woken task if needed:
460 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
462 if (p->prio < rq->curr->prio)
463 resched_task(rq->curr);
466 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
467 struct rt_rq *rt_rq)
469 struct rt_prio_array *array = &rt_rq->active;
470 struct sched_rt_entity *next = NULL;
471 struct list_head *queue;
472 int idx;
474 if (sched_rt_ratio_exceeded(rt_rq))
475 goto out;
477 idx = sched_find_first_bit(array->bitmap);
478 BUG_ON(idx >= MAX_RT_PRIO);
480 queue = array->queue + idx;
481 next = list_entry(queue->next, struct sched_rt_entity, run_list);
482 out:
483 return next;
486 static struct task_struct *pick_next_task_rt(struct rq *rq)
488 struct sched_rt_entity *rt_se;
489 struct task_struct *p;
490 struct rt_rq *rt_rq;
492 retry:
493 rt_rq = &rq->rt;
495 if (unlikely(!rt_rq->rt_nr_running))
496 return NULL;
498 if (sched_rt_ratio_exceeded(rt_rq))
499 return NULL;
501 do {
502 rt_se = pick_next_rt_entity(rq, rt_rq);
503 if (unlikely(!rt_se))
504 goto retry;
505 rt_rq = group_rt_rq(rt_se);
506 } while (rt_rq);
508 p = rt_task_of(rt_se);
509 p->se.exec_start = rq->clock;
510 return p;
513 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
515 update_curr_rt(rq);
516 p->se.exec_start = 0;
519 #ifdef CONFIG_SMP
521 /* Only try algorithms three times */
522 #define RT_MAX_TRIES 3
524 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
525 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
527 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
529 if (!task_running(rq, p) &&
530 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
531 (p->rt.nr_cpus_allowed > 1))
532 return 1;
533 return 0;
536 /* Return the second highest RT task, NULL otherwise */
537 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
539 struct task_struct *next = NULL;
540 struct sched_rt_entity *rt_se;
541 struct rt_prio_array *array;
542 struct rt_rq *rt_rq;
543 int idx;
545 for_each_leaf_rt_rq(rt_rq, rq) {
546 array = &rt_rq->active;
547 idx = sched_find_first_bit(array->bitmap);
548 next_idx:
549 if (idx >= MAX_RT_PRIO)
550 continue;
551 if (next && next->prio < idx)
552 continue;
553 list_for_each_entry(rt_se, array->queue + idx, run_list) {
554 struct task_struct *p = rt_task_of(rt_se);
555 if (pick_rt_task(rq, p, cpu)) {
556 next = p;
557 break;
560 if (!next) {
561 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
562 goto next_idx;
566 return next;
569 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
571 static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
573 int lowest_prio = -1;
574 int lowest_cpu = -1;
575 int count = 0;
576 int cpu;
578 cpus_and(*lowest_mask, task_rq(task)->rd->online, task->cpus_allowed);
581 * Scan each rq for the lowest prio.
583 for_each_cpu_mask(cpu, *lowest_mask) {
584 struct rq *rq = cpu_rq(cpu);
586 /* We look for lowest RT prio or non-rt CPU */
587 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
589 * if we already found a low RT queue
590 * and now we found this non-rt queue
591 * clear the mask and set our bit.
592 * Otherwise just return the queue as is
593 * and the count==1 will cause the algorithm
594 * to use the first bit found.
596 if (lowest_cpu != -1) {
597 cpus_clear(*lowest_mask);
598 cpu_set(rq->cpu, *lowest_mask);
600 return 1;
603 /* no locking for now */
604 if ((rq->rt.highest_prio > task->prio)
605 && (rq->rt.highest_prio >= lowest_prio)) {
606 if (rq->rt.highest_prio > lowest_prio) {
607 /* new low - clear old data */
608 lowest_prio = rq->rt.highest_prio;
609 lowest_cpu = cpu;
610 count = 0;
612 count++;
613 } else
614 cpu_clear(cpu, *lowest_mask);
618 * Clear out all the set bits that represent
619 * runqueues that were of higher prio than
620 * the lowest_prio.
622 if (lowest_cpu > 0) {
624 * Perhaps we could add another cpumask op to
625 * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
626 * Then that could be optimized to use memset and such.
628 for_each_cpu_mask(cpu, *lowest_mask) {
629 if (cpu >= lowest_cpu)
630 break;
631 cpu_clear(cpu, *lowest_mask);
635 return count;
638 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
640 int first;
642 /* "this_cpu" is cheaper to preempt than a remote processor */
643 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
644 return this_cpu;
646 first = first_cpu(*mask);
647 if (first != NR_CPUS)
648 return first;
650 return -1;
653 static int find_lowest_rq(struct task_struct *task)
655 struct sched_domain *sd;
656 cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
657 int this_cpu = smp_processor_id();
658 int cpu = task_cpu(task);
659 int count = find_lowest_cpus(task, lowest_mask);
661 if (!count)
662 return -1; /* No targets found */
665 * There is no sense in performing an optimal search if only one
666 * target is found.
668 if (count == 1)
669 return first_cpu(*lowest_mask);
672 * At this point we have built a mask of cpus representing the
673 * lowest priority tasks in the system. Now we want to elect
674 * the best one based on our affinity and topology.
676 * We prioritize the last cpu that the task executed on since
677 * it is most likely cache-hot in that location.
679 if (cpu_isset(cpu, *lowest_mask))
680 return cpu;
683 * Otherwise, we consult the sched_domains span maps to figure
684 * out which cpu is logically closest to our hot cache data.
686 if (this_cpu == cpu)
687 this_cpu = -1; /* Skip this_cpu opt if the same */
689 for_each_domain(cpu, sd) {
690 if (sd->flags & SD_WAKE_AFFINE) {
691 cpumask_t domain_mask;
692 int best_cpu;
694 cpus_and(domain_mask, sd->span, *lowest_mask);
696 best_cpu = pick_optimal_cpu(this_cpu,
697 &domain_mask);
698 if (best_cpu != -1)
699 return best_cpu;
704 * And finally, if there were no matches within the domains
705 * just give the caller *something* to work with from the compatible
706 * locations.
708 return pick_optimal_cpu(this_cpu, lowest_mask);
711 /* Will lock the rq it finds */
712 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
714 struct rq *lowest_rq = NULL;
715 int tries;
716 int cpu;
718 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
719 cpu = find_lowest_rq(task);
721 if ((cpu == -1) || (cpu == rq->cpu))
722 break;
724 lowest_rq = cpu_rq(cpu);
726 /* if the prio of this runqueue changed, try again */
727 if (double_lock_balance(rq, lowest_rq)) {
729 * We had to unlock the run queue. In
730 * the mean time, task could have
731 * migrated already or had its affinity changed.
732 * Also make sure that it wasn't scheduled on its rq.
734 if (unlikely(task_rq(task) != rq ||
735 !cpu_isset(lowest_rq->cpu,
736 task->cpus_allowed) ||
737 task_running(rq, task) ||
738 !task->se.on_rq)) {
740 spin_unlock(&lowest_rq->lock);
741 lowest_rq = NULL;
742 break;
746 /* If this rq is still suitable use it. */
747 if (lowest_rq->rt.highest_prio > task->prio)
748 break;
750 /* try again */
751 spin_unlock(&lowest_rq->lock);
752 lowest_rq = NULL;
755 return lowest_rq;
759 * If the current CPU has more than one RT task, see if the non
760 * running task can migrate over to a CPU that is running a task
761 * of lesser priority.
763 static int push_rt_task(struct rq *rq)
765 struct task_struct *next_task;
766 struct rq *lowest_rq;
767 int ret = 0;
768 int paranoid = RT_MAX_TRIES;
770 if (!rq->rt.overloaded)
771 return 0;
773 next_task = pick_next_highest_task_rt(rq, -1);
774 if (!next_task)
775 return 0;
777 retry:
778 if (unlikely(next_task == rq->curr)) {
779 WARN_ON(1);
780 return 0;
784 * It's possible that the next_task slipped in of
785 * higher priority than current. If that's the case
786 * just reschedule current.
788 if (unlikely(next_task->prio < rq->curr->prio)) {
789 resched_task(rq->curr);
790 return 0;
793 /* We might release rq lock */
794 get_task_struct(next_task);
796 /* find_lock_lowest_rq locks the rq if found */
797 lowest_rq = find_lock_lowest_rq(next_task, rq);
798 if (!lowest_rq) {
799 struct task_struct *task;
801 * find lock_lowest_rq releases rq->lock
802 * so it is possible that next_task has changed.
803 * If it has, then try again.
805 task = pick_next_highest_task_rt(rq, -1);
806 if (unlikely(task != next_task) && task && paranoid--) {
807 put_task_struct(next_task);
808 next_task = task;
809 goto retry;
811 goto out;
814 deactivate_task(rq, next_task, 0);
815 set_task_cpu(next_task, lowest_rq->cpu);
816 activate_task(lowest_rq, next_task, 0);
818 resched_task(lowest_rq->curr);
820 spin_unlock(&lowest_rq->lock);
822 ret = 1;
823 out:
824 put_task_struct(next_task);
826 return ret;
830 * TODO: Currently we just use the second highest prio task on
831 * the queue, and stop when it can't migrate (or there's
832 * no more RT tasks). There may be a case where a lower
833 * priority RT task has a different affinity than the
834 * higher RT task. In this case the lower RT task could
835 * possibly be able to migrate where as the higher priority
836 * RT task could not. We currently ignore this issue.
837 * Enhancements are welcome!
839 static void push_rt_tasks(struct rq *rq)
841 /* push_rt_task will return true if it moved an RT */
842 while (push_rt_task(rq))
846 static int pull_rt_task(struct rq *this_rq)
848 int this_cpu = this_rq->cpu, ret = 0, cpu;
849 struct task_struct *p, *next;
850 struct rq *src_rq;
852 if (likely(!rt_overloaded(this_rq)))
853 return 0;
855 next = pick_next_task_rt(this_rq);
857 for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
858 if (this_cpu == cpu)
859 continue;
861 src_rq = cpu_rq(cpu);
863 * We can potentially drop this_rq's lock in
864 * double_lock_balance, and another CPU could
865 * steal our next task - hence we must cause
866 * the caller to recalculate the next task
867 * in that case:
869 if (double_lock_balance(this_rq, src_rq)) {
870 struct task_struct *old_next = next;
872 next = pick_next_task_rt(this_rq);
873 if (next != old_next)
874 ret = 1;
878 * Are there still pullable RT tasks?
880 if (src_rq->rt.rt_nr_running <= 1) {
881 spin_unlock(&src_rq->lock);
882 continue;
885 p = pick_next_highest_task_rt(src_rq, this_cpu);
888 * Do we have an RT task that preempts
889 * the to-be-scheduled task?
891 if (p && (!next || (p->prio < next->prio))) {
892 WARN_ON(p == src_rq->curr);
893 WARN_ON(!p->se.on_rq);
896 * There's a chance that p is higher in priority
897 * than what's currently running on its cpu.
898 * This is just that p is wakeing up and hasn't
899 * had a chance to schedule. We only pull
900 * p if it is lower in priority than the
901 * current task on the run queue or
902 * this_rq next task is lower in prio than
903 * the current task on that rq.
905 if (p->prio < src_rq->curr->prio ||
906 (next && next->prio < src_rq->curr->prio))
907 goto out;
909 ret = 1;
911 deactivate_task(src_rq, p, 0);
912 set_task_cpu(p, this_cpu);
913 activate_task(this_rq, p, 0);
915 * We continue with the search, just in
916 * case there's an even higher prio task
917 * in another runqueue. (low likelyhood
918 * but possible)
920 * Update next so that we won't pick a task
921 * on another cpu with a priority lower (or equal)
922 * than the one we just picked.
924 next = p;
927 out:
928 spin_unlock(&src_rq->lock);
931 return ret;
934 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
936 /* Try to pull RT tasks here if we lower this rq's prio */
937 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
938 pull_rt_task(rq);
941 static void post_schedule_rt(struct rq *rq)
944 * If we have more than one rt_task queued, then
945 * see if we can push the other rt_tasks off to other CPUS.
946 * Note we may release the rq lock, and since
947 * the lock was owned by prev, we need to release it
948 * first via finish_lock_switch and then reaquire it here.
950 if (unlikely(rq->rt.overloaded)) {
951 spin_lock_irq(&rq->lock);
952 push_rt_tasks(rq);
953 spin_unlock_irq(&rq->lock);
958 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
960 if (!task_running(rq, p) &&
961 (p->prio >= rq->rt.highest_prio) &&
962 rq->rt.overloaded)
963 push_rt_tasks(rq);
966 static unsigned long
967 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
968 unsigned long max_load_move,
969 struct sched_domain *sd, enum cpu_idle_type idle,
970 int *all_pinned, int *this_best_prio)
972 /* don't touch RT tasks */
973 return 0;
976 static int
977 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
978 struct sched_domain *sd, enum cpu_idle_type idle)
980 /* don't touch RT tasks */
981 return 0;
984 static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
986 int weight = cpus_weight(*new_mask);
988 BUG_ON(!rt_task(p));
991 * Update the migration status of the RQ if we have an RT task
992 * which is running AND changing its weight value.
994 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
995 struct rq *rq = task_rq(p);
997 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
998 rq->rt.rt_nr_migratory++;
999 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1000 BUG_ON(!rq->rt.rt_nr_migratory);
1001 rq->rt.rt_nr_migratory--;
1004 update_rt_migration(rq);
1007 p->cpus_allowed = *new_mask;
1008 p->rt.nr_cpus_allowed = weight;
1011 /* Assumes rq->lock is held */
1012 static void join_domain_rt(struct rq *rq)
1014 if (rq->rt.overloaded)
1015 rt_set_overload(rq);
1018 /* Assumes rq->lock is held */
1019 static void leave_domain_rt(struct rq *rq)
1021 if (rq->rt.overloaded)
1022 rt_clear_overload(rq);
1026 * When switch from the rt queue, we bring ourselves to a position
1027 * that we might want to pull RT tasks from other runqueues.
1029 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1030 int running)
1033 * If there are other RT tasks then we will reschedule
1034 * and the scheduling of the other RT tasks will handle
1035 * the balancing. But if we are the last RT task
1036 * we may need to handle the pulling of RT tasks
1037 * now.
1039 if (!rq->rt.rt_nr_running)
1040 pull_rt_task(rq);
1042 #endif /* CONFIG_SMP */
1045 * When switching a task to RT, we may overload the runqueue
1046 * with RT tasks. In this case we try to push them off to
1047 * other runqueues.
1049 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1050 int running)
1052 int check_resched = 1;
1055 * If we are already running, then there's nothing
1056 * that needs to be done. But if we are not running
1057 * we may need to preempt the current running task.
1058 * If that current running task is also an RT task
1059 * then see if we can move to another run queue.
1061 if (!running) {
1062 #ifdef CONFIG_SMP
1063 if (rq->rt.overloaded && push_rt_task(rq) &&
1064 /* Don't resched if we changed runqueues */
1065 rq != task_rq(p))
1066 check_resched = 0;
1067 #endif /* CONFIG_SMP */
1068 if (check_resched && p->prio < rq->curr->prio)
1069 resched_task(rq->curr);
1074 * Priority of the task has changed. This may cause
1075 * us to initiate a push or pull.
1077 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1078 int oldprio, int running)
1080 if (running) {
1081 #ifdef CONFIG_SMP
1083 * If our priority decreases while running, we
1084 * may need to pull tasks to this runqueue.
1086 if (oldprio < p->prio)
1087 pull_rt_task(rq);
1089 * If there's a higher priority task waiting to run
1090 * then reschedule.
1092 if (p->prio > rq->rt.highest_prio)
1093 resched_task(p);
1094 #else
1095 /* For UP simply resched on drop of prio */
1096 if (oldprio < p->prio)
1097 resched_task(p);
1098 #endif /* CONFIG_SMP */
1099 } else {
1101 * This task is not running, but if it is
1102 * greater than the current running task
1103 * then reschedule.
1105 if (p->prio < rq->curr->prio)
1106 resched_task(rq->curr);
1110 static void watchdog(struct rq *rq, struct task_struct *p)
1112 unsigned long soft, hard;
1114 if (!p->signal)
1115 return;
1117 soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1118 hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1120 if (soft != RLIM_INFINITY) {
1121 unsigned long next;
1123 p->rt.timeout++;
1124 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1125 if (next > p->rt.timeout) {
1126 u64 next_time = p->se.sum_exec_runtime;
1128 next_time += next * (NSEC_PER_SEC/HZ);
1129 if (p->it_sched_expires > next_time)
1130 p->it_sched_expires = next_time;
1131 } else
1132 p->it_sched_expires = p->se.sum_exec_runtime;
1136 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1138 update_curr_rt(rq);
1140 watchdog(rq, p);
1143 * RR tasks need a special form of timeslice management.
1144 * FIFO tasks have no timeslices.
1146 if (p->policy != SCHED_RR)
1147 return;
1149 if (--p->rt.time_slice)
1150 return;
1152 p->rt.time_slice = DEF_TIMESLICE;
1155 * Requeue to the end of queue if we are not the only element
1156 * on the queue:
1158 if (p->rt.run_list.prev != p->rt.run_list.next) {
1159 requeue_task_rt(rq, p);
1160 set_tsk_need_resched(p);
1164 static void set_curr_task_rt(struct rq *rq)
1166 struct task_struct *p = rq->curr;
1168 p->se.exec_start = rq->clock;
1171 const struct sched_class rt_sched_class = {
1172 .next = &fair_sched_class,
1173 .enqueue_task = enqueue_task_rt,
1174 .dequeue_task = dequeue_task_rt,
1175 .yield_task = yield_task_rt,
1176 #ifdef CONFIG_SMP
1177 .select_task_rq = select_task_rq_rt,
1178 #endif /* CONFIG_SMP */
1180 .check_preempt_curr = check_preempt_curr_rt,
1182 .pick_next_task = pick_next_task_rt,
1183 .put_prev_task = put_prev_task_rt,
1185 #ifdef CONFIG_SMP
1186 .load_balance = load_balance_rt,
1187 .move_one_task = move_one_task_rt,
1188 .set_cpus_allowed = set_cpus_allowed_rt,
1189 .join_domain = join_domain_rt,
1190 .leave_domain = leave_domain_rt,
1191 .pre_schedule = pre_schedule_rt,
1192 .post_schedule = post_schedule_rt,
1193 .task_wake_up = task_wake_up_rt,
1194 .switched_from = switched_from_rt,
1195 #endif
1197 .set_curr_task = set_curr_task_rt,
1198 .task_tick = task_tick_rt,
1200 .prio_changed = prio_changed_rt,
1201 .switched_to = switched_to_rt,