2 * idle-task scheduling class.
4 * (NOTE: these are not related to SCHED_IDLE tasks which are
5 * handled in sched_fair.c)
9 static int select_task_rq_idle(struct task_struct
*p
, int sd_flag
, int flags
)
11 return task_cpu(p
); /* IDLE tasks as never migrated */
13 #endif /* CONFIG_SMP */
15 * Idle tasks are unconditionally rescheduled:
17 static void check_preempt_curr_idle(struct rq
*rq
, struct task_struct
*p
, int flags
)
19 resched_task(rq
->idle
);
22 static struct task_struct
*pick_next_task_idle(struct rq
*rq
)
24 schedstat_inc(rq
, sched_goidle
);
25 /* adjust the active tasks as we might go into a long sleep */
26 calc_load_account_active(rq
);
31 * It is not legal to sleep in the idle task - print a warning
32 * message if some code attempts to do it:
35 dequeue_task_idle(struct rq
*rq
, struct task_struct
*p
, int sleep
)
37 raw_spin_unlock_irq(&rq
->lock
);
38 printk(KERN_ERR
"bad: scheduling from the idle thread!\n");
40 raw_spin_lock_irq(&rq
->lock
);
43 static void put_prev_task_idle(struct rq
*rq
, struct task_struct
*prev
)
49 load_balance_idle(struct rq
*this_rq
, int this_cpu
, struct rq
*busiest
,
50 unsigned long max_load_move
,
51 struct sched_domain
*sd
, enum cpu_idle_type idle
,
52 int *all_pinned
, int *this_best_prio
)
58 move_one_task_idle(struct rq
*this_rq
, int this_cpu
, struct rq
*busiest
,
59 struct sched_domain
*sd
, enum cpu_idle_type idle
)
65 static void task_tick_idle(struct rq
*rq
, struct task_struct
*curr
, int queued
)
69 static void set_curr_task_idle(struct rq
*rq
)
73 static void switched_to_idle(struct rq
*rq
, struct task_struct
*p
,
76 /* Can this actually happen?? */
78 resched_task(rq
->curr
);
80 check_preempt_curr(rq
, p
, 0);
83 static void prio_changed_idle(struct rq
*rq
, struct task_struct
*p
,
84 int oldprio
, int running
)
86 /* This can happen for hot plug CPUS */
89 * Reschedule if we are currently running on this runqueue and
90 * our priority decreased, or if we are not currently running on
91 * this runqueue and our priority is higher than the current's
94 if (p
->prio
> oldprio
)
95 resched_task(rq
->curr
);
97 check_preempt_curr(rq
, p
, 0);
100 unsigned int get_rr_interval_idle(struct rq
*rq
, struct task_struct
*task
)
106 * Simple, special scheduling class for the per-CPU idle tasks:
108 static const struct sched_class idle_sched_class
= {
110 /* no enqueue/yield_task for idle tasks */
112 /* dequeue is not valid, we print a debug message there: */
113 .dequeue_task
= dequeue_task_idle
,
115 .check_preempt_curr
= check_preempt_curr_idle
,
117 .pick_next_task
= pick_next_task_idle
,
118 .put_prev_task
= put_prev_task_idle
,
121 .select_task_rq
= select_task_rq_idle
,
123 .load_balance
= load_balance_idle
,
124 .move_one_task
= move_one_task_idle
,
127 .set_curr_task
= set_curr_task_idle
,
128 .task_tick
= task_tick_idle
,
130 .get_rr_interval
= get_rr_interval_idle
,
132 .prio_changed
= prio_changed_idle
,
133 .switched_to
= switched_to_idle
,
135 /* no .task_new for idle tasks */