2 * cpuidle.c - core cpuidle infrastructure
4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Shaohua Li <shaohua.li@intel.com>
6 * Adam Belay <abelay@novell.com>
8 * This code is licenced under the GPL.
11 #include <linux/kernel.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h>
14 #include <linux/notifier.h>
15 #include <linux/pm_qos_params.h>
16 #include <linux/cpu.h>
17 #include <linux/cpuidle.h>
18 #include <linux/ktime.h>
22 DEFINE_PER_CPU(struct cpuidle_device
*, cpuidle_devices
);
24 DEFINE_MUTEX(cpuidle_lock
);
25 LIST_HEAD(cpuidle_detected_devices
);
26 static void (*pm_idle_old
)(void);
28 static int enabled_devices
;
30 #if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT)
31 static void cpuidle_kick_cpus(void)
35 #elif defined(CONFIG_SMP)
36 # error "Arch needs cpu_idle_wait() equivalent here"
37 #else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT && !CONFIG_SMP */
38 static void cpuidle_kick_cpus(void) {}
41 static int __cpuidle_register_device(struct cpuidle_device
*dev
);
44 * cpuidle_idle_call - the main idle loop
46 * NOTE: no locks or semaphores should be used here
48 static void cpuidle_idle_call(void)
50 struct cpuidle_device
*dev
= __get_cpu_var(cpuidle_devices
);
51 struct cpuidle_state
*target_state
;
54 /* check if the device is ready */
55 if (!dev
|| !dev
->enabled
) {
63 /* ask the governor for the next state */
64 next_state
= cpuidle_curr_governor
->select(dev
);
67 target_state
= &dev
->states
[next_state
];
69 /* enter the state and update stats */
70 dev
->last_state
= target_state
;
71 dev
->last_residency
= target_state
->enter(dev
, target_state
);
73 target_state
= dev
->last_state
;
75 target_state
->time
+= (unsigned long long)dev
->last_residency
;
76 target_state
->usage
++;
78 /* give the governor an opportunity to reflect on the outcome */
79 if (cpuidle_curr_governor
->reflect
)
80 cpuidle_curr_governor
->reflect(dev
);
84 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
86 void cpuidle_install_idle_handler(void)
88 if (enabled_devices
&& (pm_idle
!= cpuidle_idle_call
)) {
89 /* Make sure all changes finished before we switch to new idle */
91 pm_idle
= cpuidle_idle_call
;
96 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
98 void cpuidle_uninstall_idle_handler(void)
100 if (enabled_devices
&& pm_idle_old
&& (pm_idle
!= pm_idle_old
)) {
101 pm_idle
= pm_idle_old
;
107 * cpuidle_pause_and_lock - temporarily disables CPUIDLE
109 void cpuidle_pause_and_lock(void)
111 mutex_lock(&cpuidle_lock
);
112 cpuidle_uninstall_idle_handler();
115 EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock
);
118 * cpuidle_resume_and_unlock - resumes CPUIDLE operation
120 void cpuidle_resume_and_unlock(void)
122 cpuidle_install_idle_handler();
123 mutex_unlock(&cpuidle_lock
);
126 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock
);
129 * cpuidle_enable_device - enables idle PM for a CPU
132 * This function must be called between cpuidle_pause_and_lock and
133 * cpuidle_resume_and_unlock when used externally.
135 int cpuidle_enable_device(struct cpuidle_device
*dev
)
141 if (!cpuidle_curr_driver
|| !cpuidle_curr_governor
)
143 if (!dev
->state_count
)
146 if (dev
->registered
== 0) {
147 ret
= __cpuidle_register_device(dev
);
152 if ((ret
= cpuidle_add_state_sysfs(dev
)))
155 if (cpuidle_curr_governor
->enable
&&
156 (ret
= cpuidle_curr_governor
->enable(dev
)))
159 for (i
= 0; i
< dev
->state_count
; i
++) {
160 dev
->states
[i
].usage
= 0;
161 dev
->states
[i
].time
= 0;
163 dev
->last_residency
= 0;
164 dev
->last_state
= NULL
;
174 cpuidle_remove_state_sysfs(dev
);
179 EXPORT_SYMBOL_GPL(cpuidle_enable_device
);
182 * cpuidle_disable_device - disables idle PM for a CPU
185 * This function must be called between cpuidle_pause_and_lock and
186 * cpuidle_resume_and_unlock when used externally.
188 void cpuidle_disable_device(struct cpuidle_device
*dev
)
192 if (!cpuidle_curr_driver
|| !cpuidle_curr_governor
)
197 if (cpuidle_curr_governor
->disable
)
198 cpuidle_curr_governor
->disable(dev
);
200 cpuidle_remove_state_sysfs(dev
);
204 EXPORT_SYMBOL_GPL(cpuidle_disable_device
);
206 #ifdef CONFIG_ARCH_HAS_CPU_RELAX
207 static int poll_idle(struct cpuidle_device
*dev
, struct cpuidle_state
*st
)
215 while (!need_resched())
219 diff
= ktime_to_us(ktime_sub(t2
, t1
));
227 static void poll_idle_init(struct cpuidle_device
*dev
)
229 struct cpuidle_state
*state
= &dev
->states
[0];
231 cpuidle_set_statedata(state
, NULL
);
233 snprintf(state
->name
, CPUIDLE_NAME_LEN
, "C0");
234 snprintf(state
->desc
, CPUIDLE_DESC_LEN
, "CPUIDLE CORE POLL IDLE");
235 state
->exit_latency
= 0;
236 state
->target_residency
= 0;
237 state
->power_usage
= -1;
238 state
->flags
= CPUIDLE_FLAG_POLL
;
239 state
->enter
= poll_idle
;
242 static void poll_idle_init(struct cpuidle_device
*dev
) {}
243 #endif /* CONFIG_ARCH_HAS_CPU_RELAX */
246 * __cpuidle_register_device - internal register function called before register
247 * and enable routines
250 * cpuidle_lock mutex must be held before this is called
252 static int __cpuidle_register_device(struct cpuidle_device
*dev
)
255 struct sys_device
*sys_dev
= get_cpu_sysdev((unsigned long)dev
->cpu
);
259 if (!try_module_get(cpuidle_curr_driver
->owner
))
262 init_completion(&dev
->kobj_unregister
);
266 per_cpu(cpuidle_devices
, dev
->cpu
) = dev
;
267 list_add(&dev
->device_list
, &cpuidle_detected_devices
);
268 if ((ret
= cpuidle_add_sysfs(sys_dev
))) {
269 module_put(cpuidle_curr_driver
->owner
);
278 * cpuidle_register_device - registers a CPU's idle PM feature
281 int cpuidle_register_device(struct cpuidle_device
*dev
)
285 mutex_lock(&cpuidle_lock
);
287 if ((ret
= __cpuidle_register_device(dev
))) {
288 mutex_unlock(&cpuidle_lock
);
292 cpuidle_enable_device(dev
);
293 cpuidle_install_idle_handler();
295 mutex_unlock(&cpuidle_lock
);
301 EXPORT_SYMBOL_GPL(cpuidle_register_device
);
304 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
307 void cpuidle_unregister_device(struct cpuidle_device
*dev
)
309 struct sys_device
*sys_dev
= get_cpu_sysdev((unsigned long)dev
->cpu
);
311 if (dev
->registered
== 0)
314 cpuidle_pause_and_lock();
316 cpuidle_disable_device(dev
);
318 cpuidle_remove_sysfs(sys_dev
);
319 list_del(&dev
->device_list
);
320 wait_for_completion(&dev
->kobj_unregister
);
321 per_cpu(cpuidle_devices
, dev
->cpu
) = NULL
;
323 cpuidle_resume_and_unlock();
325 module_put(cpuidle_curr_driver
->owner
);
328 EXPORT_SYMBOL_GPL(cpuidle_unregister_device
);
332 static void smp_callback(void *v
)
334 /* we already woke the CPU up, nothing more to do */
338 * This function gets called when a part of the kernel has a new latency
339 * requirement. This means we need to get all processors out of their C-state,
340 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
341 * wakes them all right up.
343 static int cpuidle_latency_notify(struct notifier_block
*b
,
344 unsigned long l
, void *v
)
346 smp_call_function(smp_callback
, NULL
, 1);
350 static struct notifier_block cpuidle_latency_notifier
= {
351 .notifier_call
= cpuidle_latency_notify
,
354 static inline void latency_notifier_init(struct notifier_block
*n
)
356 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY
, n
);
359 #else /* CONFIG_SMP */
361 #define latency_notifier_init(x) do { } while (0)
363 #endif /* CONFIG_SMP */
366 * cpuidle_init - core initializer
368 static int __init
cpuidle_init(void)
372 pm_idle_old
= pm_idle
;
374 ret
= cpuidle_add_class_sysfs(&cpu_sysdev_class
);
378 latency_notifier_init(&cpuidle_latency_notifier
);
383 core_initcall(cpuidle_init
);