sched: rt-watchdog: fix .rlim_max = RLIM_INFINITY
[linux-2.6/verdex.git] / kernel / sched_rt.c
blob2dac5ebb8bcb186c90ae781336b31148532f2e10
1 /*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
6 #ifdef CONFIG_SMP
8 static inline int rt_overloaded(struct rq *rq)
10 return atomic_read(&rq->rd->rto_count);
13 static inline void rt_set_overload(struct rq *rq)
15 cpu_set(rq->cpu, rq->rd->rto_mask);
17 * Make sure the mask is visible before we set
18 * the overload count. That is checked to determine
19 * if we should look at the mask. It would be a shame
20 * if we looked at the mask, but the mask was not
21 * updated yet.
23 wmb();
24 atomic_inc(&rq->rd->rto_count);
27 static inline void rt_clear_overload(struct rq *rq)
29 /* the order here really doesn't matter */
30 atomic_dec(&rq->rd->rto_count);
31 cpu_clear(rq->cpu, rq->rd->rto_mask);
34 static void update_rt_migration(struct rq *rq)
36 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
37 if (!rq->rt.overloaded) {
38 rt_set_overload(rq);
39 rq->rt.overloaded = 1;
41 } else if (rq->rt.overloaded) {
42 rt_clear_overload(rq);
43 rq->rt.overloaded = 0;
46 #endif /* CONFIG_SMP */
48 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
50 return container_of(rt_se, struct task_struct, rt);
53 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
55 return !list_empty(&rt_se->run_list);
58 #ifdef CONFIG_FAIR_GROUP_SCHED
60 static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq)
62 if (!rt_rq->tg)
63 return SCHED_RT_FRAC;
65 return rt_rq->tg->rt_ratio;
68 #define for_each_leaf_rt_rq(rt_rq, rq) \
69 list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
71 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
73 return rt_rq->rq;
76 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
78 return rt_se->rt_rq;
81 #define for_each_sched_rt_entity(rt_se) \
82 for (; rt_se; rt_se = rt_se->parent)
84 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
86 return rt_se->my_q;
89 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
90 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
92 static void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
94 struct sched_rt_entity *rt_se = rt_rq->rt_se;
96 if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
97 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
99 enqueue_rt_entity(rt_se);
100 if (rt_rq->highest_prio < curr->prio)
101 resched_task(curr);
105 static void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
107 struct sched_rt_entity *rt_se = rt_rq->rt_se;
109 if (rt_se && on_rt_rq(rt_se))
110 dequeue_rt_entity(rt_se);
113 #else
115 static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq)
117 return sysctl_sched_rt_ratio;
120 #define for_each_leaf_rt_rq(rt_rq, rq) \
121 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
123 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
125 return container_of(rt_rq, struct rq, rt);
128 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
130 struct task_struct *p = rt_task_of(rt_se);
131 struct rq *rq = task_rq(p);
133 return &rq->rt;
136 #define for_each_sched_rt_entity(rt_se) \
137 for (; rt_se; rt_se = NULL)
139 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
141 return NULL;
144 static inline void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
148 static inline void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
152 #endif
154 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
156 #ifdef CONFIG_FAIR_GROUP_SCHED
157 struct rt_rq *rt_rq = group_rt_rq(rt_se);
159 if (rt_rq)
160 return rt_rq->highest_prio;
161 #endif
163 return rt_task_of(rt_se)->prio;
166 static int sched_rt_ratio_exceeded(struct rt_rq *rt_rq)
168 unsigned int rt_ratio = sched_rt_ratio(rt_rq);
169 u64 period, ratio;
171 if (rt_ratio == SCHED_RT_FRAC)
172 return 0;
174 if (rt_rq->rt_throttled)
175 return 1;
177 period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
178 ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
180 if (rt_rq->rt_time > ratio) {
181 struct rq *rq = rq_of_rt_rq(rt_rq);
183 rq->rt_throttled = 1;
184 rt_rq->rt_throttled = 1;
186 sched_rt_ratio_dequeue(rt_rq);
187 return 1;
190 return 0;
193 static void update_sched_rt_period(struct rq *rq)
195 struct rt_rq *rt_rq;
196 u64 period;
198 while (rq->clock > rq->rt_period_expire) {
199 period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
200 rq->rt_period_expire += period;
202 for_each_leaf_rt_rq(rt_rq, rq) {
203 unsigned long rt_ratio = sched_rt_ratio(rt_rq);
204 u64 ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
206 rt_rq->rt_time -= min(rt_rq->rt_time, ratio);
207 if (rt_rq->rt_throttled) {
208 rt_rq->rt_throttled = 0;
209 sched_rt_ratio_enqueue(rt_rq);
213 rq->rt_throttled = 0;
218 * Update the current task's runtime statistics. Skip current tasks that
219 * are not in our scheduling class.
221 static void update_curr_rt(struct rq *rq)
223 struct task_struct *curr = rq->curr;
224 struct sched_rt_entity *rt_se = &curr->rt;
225 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
226 u64 delta_exec;
228 if (!task_has_rt_policy(curr))
229 return;
231 delta_exec = rq->clock - curr->se.exec_start;
232 if (unlikely((s64)delta_exec < 0))
233 delta_exec = 0;
235 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
237 curr->se.sum_exec_runtime += delta_exec;
238 curr->se.exec_start = rq->clock;
239 cpuacct_charge(curr, delta_exec);
241 rt_rq->rt_time += delta_exec;
243 * might make it a tad more accurate:
245 * update_sched_rt_period(rq);
247 if (sched_rt_ratio_exceeded(rt_rq))
248 resched_task(curr);
251 static inline
252 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
254 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
255 rt_rq->rt_nr_running++;
256 #if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
257 if (rt_se_prio(rt_se) < rt_rq->highest_prio)
258 rt_rq->highest_prio = rt_se_prio(rt_se);
259 #endif
260 #ifdef CONFIG_SMP
261 if (rt_se->nr_cpus_allowed > 1) {
262 struct rq *rq = rq_of_rt_rq(rt_rq);
263 rq->rt.rt_nr_migratory++;
266 update_rt_migration(rq_of_rt_rq(rt_rq));
267 #endif
270 static inline
271 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
273 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
274 WARN_ON(!rt_rq->rt_nr_running);
275 rt_rq->rt_nr_running--;
276 #if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
277 if (rt_rq->rt_nr_running) {
278 struct rt_prio_array *array;
280 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
281 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
282 /* recalculate */
283 array = &rt_rq->active;
284 rt_rq->highest_prio =
285 sched_find_first_bit(array->bitmap);
286 } /* otherwise leave rq->highest prio alone */
287 } else
288 rt_rq->highest_prio = MAX_RT_PRIO;
289 #endif
290 #ifdef CONFIG_SMP
291 if (rt_se->nr_cpus_allowed > 1) {
292 struct rq *rq = rq_of_rt_rq(rt_rq);
293 rq->rt.rt_nr_migratory--;
296 update_rt_migration(rq_of_rt_rq(rt_rq));
297 #endif /* CONFIG_SMP */
300 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
302 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
303 struct rt_prio_array *array = &rt_rq->active;
304 struct rt_rq *group_rq = group_rt_rq(rt_se);
306 if (group_rq && group_rq->rt_throttled)
307 return;
309 list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
310 __set_bit(rt_se_prio(rt_se), array->bitmap);
312 inc_rt_tasks(rt_se, rt_rq);
315 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
317 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
318 struct rt_prio_array *array = &rt_rq->active;
320 list_del_init(&rt_se->run_list);
321 if (list_empty(array->queue + rt_se_prio(rt_se)))
322 __clear_bit(rt_se_prio(rt_se), array->bitmap);
324 dec_rt_tasks(rt_se, rt_rq);
328 * Because the prio of an upper entry depends on the lower
329 * entries, we must remove entries top - down.
331 * XXX: O(1/2 h^2) because we can only walk up, not down the chain.
332 * doesn't matter much for now, as h=2 for GROUP_SCHED.
334 static void dequeue_rt_stack(struct task_struct *p)
336 struct sched_rt_entity *rt_se, *top_se;
339 * dequeue all, top - down.
341 do {
342 rt_se = &p->rt;
343 top_se = NULL;
344 for_each_sched_rt_entity(rt_se) {
345 if (on_rt_rq(rt_se))
346 top_se = rt_se;
348 if (top_se)
349 dequeue_rt_entity(top_se);
350 } while (top_se);
354 * Adding/removing a task to/from a priority array:
356 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
358 struct sched_rt_entity *rt_se = &p->rt;
360 if (wakeup)
361 rt_se->timeout = 0;
363 dequeue_rt_stack(p);
366 * enqueue everybody, bottom - up.
368 for_each_sched_rt_entity(rt_se)
369 enqueue_rt_entity(rt_se);
371 inc_cpu_load(rq, p->se.load.weight);
374 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
376 struct sched_rt_entity *rt_se = &p->rt;
377 struct rt_rq *rt_rq;
379 update_curr_rt(rq);
381 dequeue_rt_stack(p);
384 * re-enqueue all non-empty rt_rq entities.
386 for_each_sched_rt_entity(rt_se) {
387 rt_rq = group_rt_rq(rt_se);
388 if (rt_rq && rt_rq->rt_nr_running)
389 enqueue_rt_entity(rt_se);
392 dec_cpu_load(rq, p->se.load.weight);
396 * Put task to the end of the run list without the overhead of dequeue
397 * followed by enqueue.
399 static
400 void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
402 struct rt_prio_array *array = &rt_rq->active;
404 list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
407 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
409 struct sched_rt_entity *rt_se = &p->rt;
410 struct rt_rq *rt_rq;
412 for_each_sched_rt_entity(rt_se) {
413 rt_rq = rt_rq_of_se(rt_se);
414 requeue_rt_entity(rt_rq, rt_se);
418 static void yield_task_rt(struct rq *rq)
420 requeue_task_rt(rq, rq->curr);
423 #ifdef CONFIG_SMP
424 static int find_lowest_rq(struct task_struct *task);
426 static int select_task_rq_rt(struct task_struct *p, int sync)
428 struct rq *rq = task_rq(p);
431 * If the current task is an RT task, then
432 * try to see if we can wake this RT task up on another
433 * runqueue. Otherwise simply start this RT task
434 * on its current runqueue.
436 * We want to avoid overloading runqueues. Even if
437 * the RT task is of higher priority than the current RT task.
438 * RT tasks behave differently than other tasks. If
439 * one gets preempted, we try to push it off to another queue.
440 * So trying to keep a preempting RT task on the same
441 * cache hot CPU will force the running RT task to
442 * a cold CPU. So we waste all the cache for the lower
443 * RT task in hopes of saving some of a RT task
444 * that is just being woken and probably will have
445 * cold cache anyway.
447 if (unlikely(rt_task(rq->curr)) &&
448 (p->rt.nr_cpus_allowed > 1)) {
449 int cpu = find_lowest_rq(p);
451 return (cpu == -1) ? task_cpu(p) : cpu;
455 * Otherwise, just let it ride on the affined RQ and the
456 * post-schedule router will push the preempted task away
458 return task_cpu(p);
460 #endif /* CONFIG_SMP */
463 * Preempt the current task with a newly woken task if needed:
465 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
467 if (p->prio < rq->curr->prio)
468 resched_task(rq->curr);
471 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
472 struct rt_rq *rt_rq)
474 struct rt_prio_array *array = &rt_rq->active;
475 struct sched_rt_entity *next = NULL;
476 struct list_head *queue;
477 int idx;
479 if (sched_rt_ratio_exceeded(rt_rq))
480 goto out;
482 idx = sched_find_first_bit(array->bitmap);
483 BUG_ON(idx >= MAX_RT_PRIO);
485 queue = array->queue + idx;
486 next = list_entry(queue->next, struct sched_rt_entity, run_list);
487 out:
488 return next;
491 static struct task_struct *pick_next_task_rt(struct rq *rq)
493 struct sched_rt_entity *rt_se;
494 struct task_struct *p;
495 struct rt_rq *rt_rq;
497 retry:
498 rt_rq = &rq->rt;
500 if (unlikely(!rt_rq->rt_nr_running))
501 return NULL;
503 if (sched_rt_ratio_exceeded(rt_rq))
504 return NULL;
506 do {
507 rt_se = pick_next_rt_entity(rq, rt_rq);
508 if (unlikely(!rt_se))
509 goto retry;
510 rt_rq = group_rt_rq(rt_se);
511 } while (rt_rq);
513 p = rt_task_of(rt_se);
514 p->se.exec_start = rq->clock;
515 return p;
518 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
520 update_curr_rt(rq);
521 p->se.exec_start = 0;
524 #ifdef CONFIG_SMP
526 /* Only try algorithms three times */
527 #define RT_MAX_TRIES 3
529 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
530 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
532 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
534 if (!task_running(rq, p) &&
535 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
536 (p->rt.nr_cpus_allowed > 1))
537 return 1;
538 return 0;
541 /* Return the second highest RT task, NULL otherwise */
542 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
544 struct task_struct *next = NULL;
545 struct sched_rt_entity *rt_se;
546 struct rt_prio_array *array;
547 struct rt_rq *rt_rq;
548 int idx;
550 for_each_leaf_rt_rq(rt_rq, rq) {
551 array = &rt_rq->active;
552 idx = sched_find_first_bit(array->bitmap);
553 next_idx:
554 if (idx >= MAX_RT_PRIO)
555 continue;
556 if (next && next->prio < idx)
557 continue;
558 list_for_each_entry(rt_se, array->queue + idx, run_list) {
559 struct task_struct *p = rt_task_of(rt_se);
560 if (pick_rt_task(rq, p, cpu)) {
561 next = p;
562 break;
565 if (!next) {
566 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
567 goto next_idx;
571 return next;
574 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
576 static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
578 int lowest_prio = -1;
579 int lowest_cpu = -1;
580 int count = 0;
581 int cpu;
583 cpus_and(*lowest_mask, task_rq(task)->rd->online, task->cpus_allowed);
586 * Scan each rq for the lowest prio.
588 for_each_cpu_mask(cpu, *lowest_mask) {
589 struct rq *rq = cpu_rq(cpu);
591 /* We look for lowest RT prio or non-rt CPU */
592 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
594 * if we already found a low RT queue
595 * and now we found this non-rt queue
596 * clear the mask and set our bit.
597 * Otherwise just return the queue as is
598 * and the count==1 will cause the algorithm
599 * to use the first bit found.
601 if (lowest_cpu != -1) {
602 cpus_clear(*lowest_mask);
603 cpu_set(rq->cpu, *lowest_mask);
605 return 1;
608 /* no locking for now */
609 if ((rq->rt.highest_prio > task->prio)
610 && (rq->rt.highest_prio >= lowest_prio)) {
611 if (rq->rt.highest_prio > lowest_prio) {
612 /* new low - clear old data */
613 lowest_prio = rq->rt.highest_prio;
614 lowest_cpu = cpu;
615 count = 0;
617 count++;
618 } else
619 cpu_clear(cpu, *lowest_mask);
623 * Clear out all the set bits that represent
624 * runqueues that were of higher prio than
625 * the lowest_prio.
627 if (lowest_cpu > 0) {
629 * Perhaps we could add another cpumask op to
630 * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
631 * Then that could be optimized to use memset and such.
633 for_each_cpu_mask(cpu, *lowest_mask) {
634 if (cpu >= lowest_cpu)
635 break;
636 cpu_clear(cpu, *lowest_mask);
640 return count;
643 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
645 int first;
647 /* "this_cpu" is cheaper to preempt than a remote processor */
648 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
649 return this_cpu;
651 first = first_cpu(*mask);
652 if (first != NR_CPUS)
653 return first;
655 return -1;
658 static int find_lowest_rq(struct task_struct *task)
660 struct sched_domain *sd;
661 cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
662 int this_cpu = smp_processor_id();
663 int cpu = task_cpu(task);
664 int count = find_lowest_cpus(task, lowest_mask);
666 if (!count)
667 return -1; /* No targets found */
670 * There is no sense in performing an optimal search if only one
671 * target is found.
673 if (count == 1)
674 return first_cpu(*lowest_mask);
677 * At this point we have built a mask of cpus representing the
678 * lowest priority tasks in the system. Now we want to elect
679 * the best one based on our affinity and topology.
681 * We prioritize the last cpu that the task executed on since
682 * it is most likely cache-hot in that location.
684 if (cpu_isset(cpu, *lowest_mask))
685 return cpu;
688 * Otherwise, we consult the sched_domains span maps to figure
689 * out which cpu is logically closest to our hot cache data.
691 if (this_cpu == cpu)
692 this_cpu = -1; /* Skip this_cpu opt if the same */
694 for_each_domain(cpu, sd) {
695 if (sd->flags & SD_WAKE_AFFINE) {
696 cpumask_t domain_mask;
697 int best_cpu;
699 cpus_and(domain_mask, sd->span, *lowest_mask);
701 best_cpu = pick_optimal_cpu(this_cpu,
702 &domain_mask);
703 if (best_cpu != -1)
704 return best_cpu;
709 * And finally, if there were no matches within the domains
710 * just give the caller *something* to work with from the compatible
711 * locations.
713 return pick_optimal_cpu(this_cpu, lowest_mask);
716 /* Will lock the rq it finds */
717 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
719 struct rq *lowest_rq = NULL;
720 int tries;
721 int cpu;
723 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
724 cpu = find_lowest_rq(task);
726 if ((cpu == -1) || (cpu == rq->cpu))
727 break;
729 lowest_rq = cpu_rq(cpu);
731 /* if the prio of this runqueue changed, try again */
732 if (double_lock_balance(rq, lowest_rq)) {
734 * We had to unlock the run queue. In
735 * the mean time, task could have
736 * migrated already or had its affinity changed.
737 * Also make sure that it wasn't scheduled on its rq.
739 if (unlikely(task_rq(task) != rq ||
740 !cpu_isset(lowest_rq->cpu,
741 task->cpus_allowed) ||
742 task_running(rq, task) ||
743 !task->se.on_rq)) {
745 spin_unlock(&lowest_rq->lock);
746 lowest_rq = NULL;
747 break;
751 /* If this rq is still suitable use it. */
752 if (lowest_rq->rt.highest_prio > task->prio)
753 break;
755 /* try again */
756 spin_unlock(&lowest_rq->lock);
757 lowest_rq = NULL;
760 return lowest_rq;
764 * If the current CPU has more than one RT task, see if the non
765 * running task can migrate over to a CPU that is running a task
766 * of lesser priority.
768 static int push_rt_task(struct rq *rq)
770 struct task_struct *next_task;
771 struct rq *lowest_rq;
772 int ret = 0;
773 int paranoid = RT_MAX_TRIES;
775 if (!rq->rt.overloaded)
776 return 0;
778 next_task = pick_next_highest_task_rt(rq, -1);
779 if (!next_task)
780 return 0;
782 retry:
783 if (unlikely(next_task == rq->curr)) {
784 WARN_ON(1);
785 return 0;
789 * It's possible that the next_task slipped in of
790 * higher priority than current. If that's the case
791 * just reschedule current.
793 if (unlikely(next_task->prio < rq->curr->prio)) {
794 resched_task(rq->curr);
795 return 0;
798 /* We might release rq lock */
799 get_task_struct(next_task);
801 /* find_lock_lowest_rq locks the rq if found */
802 lowest_rq = find_lock_lowest_rq(next_task, rq);
803 if (!lowest_rq) {
804 struct task_struct *task;
806 * find lock_lowest_rq releases rq->lock
807 * so it is possible that next_task has changed.
808 * If it has, then try again.
810 task = pick_next_highest_task_rt(rq, -1);
811 if (unlikely(task != next_task) && task && paranoid--) {
812 put_task_struct(next_task);
813 next_task = task;
814 goto retry;
816 goto out;
819 deactivate_task(rq, next_task, 0);
820 set_task_cpu(next_task, lowest_rq->cpu);
821 activate_task(lowest_rq, next_task, 0);
823 resched_task(lowest_rq->curr);
825 spin_unlock(&lowest_rq->lock);
827 ret = 1;
828 out:
829 put_task_struct(next_task);
831 return ret;
835 * TODO: Currently we just use the second highest prio task on
836 * the queue, and stop when it can't migrate (or there's
837 * no more RT tasks). There may be a case where a lower
838 * priority RT task has a different affinity than the
839 * higher RT task. In this case the lower RT task could
840 * possibly be able to migrate where as the higher priority
841 * RT task could not. We currently ignore this issue.
842 * Enhancements are welcome!
844 static void push_rt_tasks(struct rq *rq)
846 /* push_rt_task will return true if it moved an RT */
847 while (push_rt_task(rq))
851 static int pull_rt_task(struct rq *this_rq)
853 int this_cpu = this_rq->cpu, ret = 0, cpu;
854 struct task_struct *p, *next;
855 struct rq *src_rq;
857 if (likely(!rt_overloaded(this_rq)))
858 return 0;
860 next = pick_next_task_rt(this_rq);
862 for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
863 if (this_cpu == cpu)
864 continue;
866 src_rq = cpu_rq(cpu);
868 * We can potentially drop this_rq's lock in
869 * double_lock_balance, and another CPU could
870 * steal our next task - hence we must cause
871 * the caller to recalculate the next task
872 * in that case:
874 if (double_lock_balance(this_rq, src_rq)) {
875 struct task_struct *old_next = next;
877 next = pick_next_task_rt(this_rq);
878 if (next != old_next)
879 ret = 1;
883 * Are there still pullable RT tasks?
885 if (src_rq->rt.rt_nr_running <= 1)
886 goto skip;
888 p = pick_next_highest_task_rt(src_rq, this_cpu);
891 * Do we have an RT task that preempts
892 * the to-be-scheduled task?
894 if (p && (!next || (p->prio < next->prio))) {
895 WARN_ON(p == src_rq->curr);
896 WARN_ON(!p->se.on_rq);
899 * There's a chance that p is higher in priority
900 * than what's currently running on its cpu.
901 * This is just that p is wakeing up and hasn't
902 * had a chance to schedule. We only pull
903 * p if it is lower in priority than the
904 * current task on the run queue or
905 * this_rq next task is lower in prio than
906 * the current task on that rq.
908 if (p->prio < src_rq->curr->prio ||
909 (next && next->prio < src_rq->curr->prio))
910 goto skip;
912 ret = 1;
914 deactivate_task(src_rq, p, 0);
915 set_task_cpu(p, this_cpu);
916 activate_task(this_rq, p, 0);
918 * We continue with the search, just in
919 * case there's an even higher prio task
920 * in another runqueue. (low likelyhood
921 * but possible)
923 * Update next so that we won't pick a task
924 * on another cpu with a priority lower (or equal)
925 * than the one we just picked.
927 next = p;
930 skip:
931 spin_unlock(&src_rq->lock);
934 return ret;
937 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
939 /* Try to pull RT tasks here if we lower this rq's prio */
940 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
941 pull_rt_task(rq);
944 static void post_schedule_rt(struct rq *rq)
947 * If we have more than one rt_task queued, then
948 * see if we can push the other rt_tasks off to other CPUS.
949 * Note we may release the rq lock, and since
950 * the lock was owned by prev, we need to release it
951 * first via finish_lock_switch and then reaquire it here.
953 if (unlikely(rq->rt.overloaded)) {
954 spin_lock_irq(&rq->lock);
955 push_rt_tasks(rq);
956 spin_unlock_irq(&rq->lock);
961 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
963 if (!task_running(rq, p) &&
964 (p->prio >= rq->rt.highest_prio) &&
965 rq->rt.overloaded)
966 push_rt_tasks(rq);
969 static unsigned long
970 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
971 unsigned long max_load_move,
972 struct sched_domain *sd, enum cpu_idle_type idle,
973 int *all_pinned, int *this_best_prio)
975 /* don't touch RT tasks */
976 return 0;
979 static int
980 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
981 struct sched_domain *sd, enum cpu_idle_type idle)
983 /* don't touch RT tasks */
984 return 0;
987 static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
989 int weight = cpus_weight(*new_mask);
991 BUG_ON(!rt_task(p));
994 * Update the migration status of the RQ if we have an RT task
995 * which is running AND changing its weight value.
997 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
998 struct rq *rq = task_rq(p);
1000 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1001 rq->rt.rt_nr_migratory++;
1002 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1003 BUG_ON(!rq->rt.rt_nr_migratory);
1004 rq->rt.rt_nr_migratory--;
1007 update_rt_migration(rq);
1010 p->cpus_allowed = *new_mask;
1011 p->rt.nr_cpus_allowed = weight;
1014 /* Assumes rq->lock is held */
1015 static void join_domain_rt(struct rq *rq)
1017 if (rq->rt.overloaded)
1018 rt_set_overload(rq);
1021 /* Assumes rq->lock is held */
1022 static void leave_domain_rt(struct rq *rq)
1024 if (rq->rt.overloaded)
1025 rt_clear_overload(rq);
1029 * When switch from the rt queue, we bring ourselves to a position
1030 * that we might want to pull RT tasks from other runqueues.
1032 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1033 int running)
1036 * If there are other RT tasks then we will reschedule
1037 * and the scheduling of the other RT tasks will handle
1038 * the balancing. But if we are the last RT task
1039 * we may need to handle the pulling of RT tasks
1040 * now.
1042 if (!rq->rt.rt_nr_running)
1043 pull_rt_task(rq);
1045 #endif /* CONFIG_SMP */
1048 * When switching a task to RT, we may overload the runqueue
1049 * with RT tasks. In this case we try to push them off to
1050 * other runqueues.
1052 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1053 int running)
1055 int check_resched = 1;
1058 * If we are already running, then there's nothing
1059 * that needs to be done. But if we are not running
1060 * we may need to preempt the current running task.
1061 * If that current running task is also an RT task
1062 * then see if we can move to another run queue.
1064 if (!running) {
1065 #ifdef CONFIG_SMP
1066 if (rq->rt.overloaded && push_rt_task(rq) &&
1067 /* Don't resched if we changed runqueues */
1068 rq != task_rq(p))
1069 check_resched = 0;
1070 #endif /* CONFIG_SMP */
1071 if (check_resched && p->prio < rq->curr->prio)
1072 resched_task(rq->curr);
1077 * Priority of the task has changed. This may cause
1078 * us to initiate a push or pull.
1080 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1081 int oldprio, int running)
1083 if (running) {
1084 #ifdef CONFIG_SMP
1086 * If our priority decreases while running, we
1087 * may need to pull tasks to this runqueue.
1089 if (oldprio < p->prio)
1090 pull_rt_task(rq);
1092 * If there's a higher priority task waiting to run
1093 * then reschedule.
1095 if (p->prio > rq->rt.highest_prio)
1096 resched_task(p);
1097 #else
1098 /* For UP simply resched on drop of prio */
1099 if (oldprio < p->prio)
1100 resched_task(p);
1101 #endif /* CONFIG_SMP */
1102 } else {
1104 * This task is not running, but if it is
1105 * greater than the current running task
1106 * then reschedule.
1108 if (p->prio < rq->curr->prio)
1109 resched_task(rq->curr);
1113 static void watchdog(struct rq *rq, struct task_struct *p)
1115 unsigned long soft, hard;
1117 if (!p->signal)
1118 return;
1120 soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1121 hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1123 if (soft != RLIM_INFINITY) {
1124 unsigned long next;
1126 p->rt.timeout++;
1127 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1128 if (p->rt.timeout > next)
1129 p->it_sched_expires = p->se.sum_exec_runtime;
1133 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1135 update_curr_rt(rq);
1137 watchdog(rq, p);
1140 * RR tasks need a special form of timeslice management.
1141 * FIFO tasks have no timeslices.
1143 if (p->policy != SCHED_RR)
1144 return;
1146 if (--p->rt.time_slice)
1147 return;
1149 p->rt.time_slice = DEF_TIMESLICE;
1152 * Requeue to the end of queue if we are not the only element
1153 * on the queue:
1155 if (p->rt.run_list.prev != p->rt.run_list.next) {
1156 requeue_task_rt(rq, p);
1157 set_tsk_need_resched(p);
1161 static void set_curr_task_rt(struct rq *rq)
1163 struct task_struct *p = rq->curr;
1165 p->se.exec_start = rq->clock;
1168 const struct sched_class rt_sched_class = {
1169 .next = &fair_sched_class,
1170 .enqueue_task = enqueue_task_rt,
1171 .dequeue_task = dequeue_task_rt,
1172 .yield_task = yield_task_rt,
1173 #ifdef CONFIG_SMP
1174 .select_task_rq = select_task_rq_rt,
1175 #endif /* CONFIG_SMP */
1177 .check_preempt_curr = check_preempt_curr_rt,
1179 .pick_next_task = pick_next_task_rt,
1180 .put_prev_task = put_prev_task_rt,
1182 #ifdef CONFIG_SMP
1183 .load_balance = load_balance_rt,
1184 .move_one_task = move_one_task_rt,
1185 .set_cpus_allowed = set_cpus_allowed_rt,
1186 .join_domain = join_domain_rt,
1187 .leave_domain = leave_domain_rt,
1188 .pre_schedule = pre_schedule_rt,
1189 .post_schedule = post_schedule_rt,
1190 .task_wake_up = task_wake_up_rt,
1191 .switched_from = switched_from_rt,
1192 #endif
1194 .set_curr_task = set_curr_task_rt,
1195 .task_tick = task_tick_rt,
1197 .prio_changed = prio_changed_rt,
1198 .switched_to = switched_to_rt,