2 * idle-task scheduling class.
4 * (NOTE: these are not related to SCHED_IDLE tasks which are
5 * handled in sched_fair.c)
10 select_task_rq_idle(struct task_struct
*p
, int sd_flag
, int flags
)
12 return task_cpu(p
); /* IDLE tasks as never migrated */
14 #endif /* CONFIG_SMP */
16 * Idle tasks are unconditionally rescheduled:
18 static void check_preempt_curr_idle(struct rq
*rq
, struct task_struct
*p
, int flags
)
20 resched_task(rq
->idle
);
23 static struct task_struct
*pick_next_task_idle(struct rq
*rq
)
25 schedstat_inc(rq
, sched_goidle
);
26 calc_load_account_idle(rq
);
31 * It is not legal to sleep in the idle task - print a warning
32 * message if some code attempts to do it:
35 dequeue_task_idle(struct rq
*rq
, struct task_struct
*p
, int flags
)
37 raw_spin_unlock_irq(&rq
->lock
);
38 printk(KERN_ERR
"bad: scheduling from the idle thread!\n");
40 raw_spin_lock_irq(&rq
->lock
);
43 static void put_prev_task_idle(struct rq
*rq
, struct task_struct
*prev
)
47 static void task_tick_idle(struct rq
*rq
, struct task_struct
*curr
, int queued
)
51 static void set_curr_task_idle(struct rq
*rq
)
55 static void switched_to_idle(struct rq
*rq
, struct task_struct
*p
)
61 prio_changed_idle(struct rq
*rq
, struct task_struct
*p
, int oldprio
)
66 static unsigned int get_rr_interval_idle(struct rq
*rq
, struct task_struct
*task
)
72 * Simple, special scheduling class for the per-CPU idle tasks:
74 static const struct sched_class idle_sched_class
= {
76 /* no enqueue/yield_task for idle tasks */
78 /* dequeue is not valid, we print a debug message there: */
79 .dequeue_task
= dequeue_task_idle
,
81 .check_preempt_curr
= check_preempt_curr_idle
,
83 .pick_next_task
= pick_next_task_idle
,
84 .put_prev_task
= put_prev_task_idle
,
87 .select_task_rq
= select_task_rq_idle
,
90 .set_curr_task
= set_curr_task_idle
,
91 .task_tick
= task_tick_idle
,
93 .get_rr_interval
= get_rr_interval_idle
,
95 .prio_changed
= prio_changed_idle
,
96 .switched_to
= switched_to_idle
,