KVM: MMU: Adjust pte accessors to explicitly indicate guest or shadow pte
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / sched_idletask.c
blob499672c10cbd615141a362cafe804711bbcf1ff3
1 /*
2 * idle-task scheduling class.
4 * (NOTE: these are not related to SCHED_IDLE tasks which are
5 * handled in sched_fair.c)
6 */
8 #ifdef CONFIG_SMP
9 static int select_task_rq_idle(struct task_struct *p, int sync)
11 return task_cpu(p); /* IDLE tasks as never migrated */
13 #endif /* CONFIG_SMP */
15 * Idle tasks are unconditionally rescheduled:
17 static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int sync)
19 resched_task(rq->idle);
22 static struct task_struct *pick_next_task_idle(struct rq *rq)
24 schedstat_inc(rq, sched_goidle);
25 /* adjust the active tasks as we might go into a long sleep */
26 calc_load_account_active(rq);
27 return rq->idle;
31 * It is not legal to sleep in the idle task - print a warning
32 * message if some code attempts to do it:
34 static void
35 dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep)
37 spin_unlock_irq(&rq->lock);
38 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
39 dump_stack();
40 spin_lock_irq(&rq->lock);
43 static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
47 #ifdef CONFIG_SMP
48 static unsigned long
49 load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
50 unsigned long max_load_move,
51 struct sched_domain *sd, enum cpu_idle_type idle,
52 int *all_pinned, int *this_best_prio)
54 return 0;
57 static int
58 move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
59 struct sched_domain *sd, enum cpu_idle_type idle)
61 return 0;
63 #endif
65 static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
69 static void set_curr_task_idle(struct rq *rq)
73 static void switched_to_idle(struct rq *rq, struct task_struct *p,
74 int running)
76 /* Can this actually happen?? */
77 if (running)
78 resched_task(rq->curr);
79 else
80 check_preempt_curr(rq, p, 0);
83 static void prio_changed_idle(struct rq *rq, struct task_struct *p,
84 int oldprio, int running)
86 /* This can happen for hot plug CPUS */
89 * Reschedule if we are currently running on this runqueue and
90 * our priority decreased, or if we are not currently running on
91 * this runqueue and our priority is higher than the current's
93 if (running) {
94 if (p->prio > oldprio)
95 resched_task(rq->curr);
96 } else
97 check_preempt_curr(rq, p, 0);
101 * Simple, special scheduling class for the per-CPU idle tasks:
103 static const struct sched_class idle_sched_class = {
104 /* .next is NULL */
105 /* no enqueue/yield_task for idle tasks */
107 /* dequeue is not valid, we print a debug message there: */
108 .dequeue_task = dequeue_task_idle,
110 .check_preempt_curr = check_preempt_curr_idle,
112 .pick_next_task = pick_next_task_idle,
113 .put_prev_task = put_prev_task_idle,
115 #ifdef CONFIG_SMP
116 .select_task_rq = select_task_rq_idle,
118 .load_balance = load_balance_idle,
119 .move_one_task = move_one_task_idle,
120 #endif
122 .set_curr_task = set_curr_task_idle,
123 .task_tick = task_tick_idle,
125 .prio_changed = prio_changed_idle,
126 .switched_to = switched_to_idle,
128 /* no .task_new for idle tasks */