4 * idle-task scheduling class.
6 * (NOTE: these are not related to SCHED_IDLE tasks which are
7 * handled in sched_fair.c)
12 select_task_rq_idle(struct task_struct
*p
, int sd_flag
, int flags
)
14 return task_cpu(p
); /* IDLE tasks as never migrated */
16 #endif /* CONFIG_SMP */
18 * Idle tasks are unconditionally rescheduled:
20 static void check_preempt_curr_idle(struct rq
*rq
, struct task_struct
*p
, int flags
)
22 resched_task(rq
->idle
);
25 static struct task_struct
*pick_next_task_idle(struct rq
*rq
)
27 schedstat_inc(rq
, sched_goidle
);
28 calc_load_account_idle(rq
);
33 * It is not legal to sleep in the idle task - print a warning
34 * message if some code attempts to do it:
37 dequeue_task_idle(struct rq
*rq
, struct task_struct
*p
, int flags
)
39 raw_spin_unlock_irq(&rq
->lock
);
40 printk(KERN_ERR
"bad: scheduling from the idle thread!\n");
42 raw_spin_lock_irq(&rq
->lock
);
45 static void put_prev_task_idle(struct rq
*rq
, struct task_struct
*prev
)
49 static void task_tick_idle(struct rq
*rq
, struct task_struct
*curr
, int queued
)
53 static void set_curr_task_idle(struct rq
*rq
)
57 static void switched_to_idle(struct rq
*rq
, struct task_struct
*p
)
63 prio_changed_idle(struct rq
*rq
, struct task_struct
*p
, int oldprio
)
68 static unsigned int get_rr_interval_idle(struct rq
*rq
, struct task_struct
*task
)
74 * Simple, special scheduling class for the per-CPU idle tasks:
76 const struct sched_class idle_sched_class
= {
78 /* no enqueue/yield_task for idle tasks */
80 /* dequeue is not valid, we print a debug message there: */
81 .dequeue_task
= dequeue_task_idle
,
83 .check_preempt_curr
= check_preempt_curr_idle
,
85 .pick_next_task
= pick_next_task_idle
,
86 .put_prev_task
= put_prev_task_idle
,
89 .select_task_rq
= select_task_rq_idle
,
92 .set_curr_task
= set_curr_task_idle
,
93 .task_tick
= task_tick_idle
,
95 .get_rr_interval
= get_rr_interval_idle
,
97 .prio_changed
= prio_changed_idle
,
98 .switched_to
= switched_to_idle
,