tick-broadcast: Stop active broadcast device when replacing it
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / cpufreq / cpufreq.c
blobc7ae0261d46625459b01e131b6136ef9182147b2
1 /*
2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/notifier.h>
22 #include <linux/cpufreq.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/spinlock.h>
26 #include <linux/device.h>
27 #include <linux/slab.h>
28 #include <linux/cpu.h>
29 #include <linux/completion.h>
30 #include <linux/mutex.h>
32 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
33 "cpufreq-core", msg)
35 /**
36 * The "cpufreq driver" - the arch- or hardware-dependent low
37 * level driver of CPUFreq support, and its spinlock. This lock
38 * also protects the cpufreq_cpu_data array.
40 static struct cpufreq_driver *cpufreq_driver;
41 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
42 #ifdef CONFIG_HOTPLUG_CPU
43 /* This one keeps track of the previously set governor of a removed CPU */
44 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
45 #endif
46 static DEFINE_SPINLOCK(cpufreq_driver_lock);
49 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
50 * all cpufreq/hotplug/workqueue/etc related lock issues.
52 * The rules for this semaphore:
53 * - Any routine that wants to read from the policy structure will
54 * do a down_read on this semaphore.
55 * - Any routine that will write to the policy structure and/or may take away
56 * the policy altogether (eg. CPU hotplug), will hold this lock in write
57 * mode before doing so.
59 * Additional rules:
60 * - All holders of the lock should check to make sure that the CPU they
61 * are concerned with are online after they get the lock.
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
67 static DEFINE_PER_CPU(int, policy_cpu);
68 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
70 #define lock_policy_rwsem(mode, cpu) \
71 int lock_policy_rwsem_##mode \
72 (int cpu) \
73 { \
74 int policy_cpu = per_cpu(policy_cpu, cpu); \
75 BUG_ON(policy_cpu == -1); \
76 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
77 if (unlikely(!cpu_online(cpu))) { \
78 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
79 return -1; \
80 } \
82 return 0; \
85 lock_policy_rwsem(read, cpu);
86 EXPORT_SYMBOL_GPL(lock_policy_rwsem_read);
88 lock_policy_rwsem(write, cpu);
89 EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
91 void unlock_policy_rwsem_read(int cpu)
93 int policy_cpu = per_cpu(policy_cpu, cpu);
94 BUG_ON(policy_cpu == -1);
95 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
97 EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
99 void unlock_policy_rwsem_write(int cpu)
101 int policy_cpu = per_cpu(policy_cpu, cpu);
102 BUG_ON(policy_cpu == -1);
103 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
105 EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write);
108 /* internal prototypes */
109 static int __cpufreq_governor(struct cpufreq_policy *policy,
110 unsigned int event);
111 static unsigned int __cpufreq_get(unsigned int cpu);
112 static void handle_update(struct work_struct *work);
115 * Two notifier lists: the "policy" list is involved in the
116 * validation process for a new CPU frequency policy; the
117 * "transition" list for kernel code that needs to handle
118 * changes to devices when the CPU clock speed changes.
119 * The mutex locks both lists.
121 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
122 static struct srcu_notifier_head cpufreq_transition_notifier_list;
124 static bool init_cpufreq_transition_notifier_list_called;
125 static int __init init_cpufreq_transition_notifier_list(void)
127 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
128 init_cpufreq_transition_notifier_list_called = true;
129 return 0;
131 pure_initcall(init_cpufreq_transition_notifier_list);
133 static LIST_HEAD(cpufreq_governor_list);
134 static DEFINE_MUTEX(cpufreq_governor_mutex);
136 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
138 struct cpufreq_policy *data;
139 unsigned long flags;
141 if (cpu >= nr_cpu_ids)
142 goto err_out;
144 /* get the cpufreq driver */
145 spin_lock_irqsave(&cpufreq_driver_lock, flags);
147 if (!cpufreq_driver)
148 goto err_out_unlock;
150 if (!try_module_get(cpufreq_driver->owner))
151 goto err_out_unlock;
154 /* get the CPU */
155 data = per_cpu(cpufreq_cpu_data, cpu);
157 if (!data)
158 goto err_out_put_module;
160 if (!kobject_get(&data->kobj))
161 goto err_out_put_module;
163 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
164 return data;
166 err_out_put_module:
167 module_put(cpufreq_driver->owner);
168 err_out_unlock:
169 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
170 err_out:
171 return NULL;
173 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
176 void cpufreq_cpu_put(struct cpufreq_policy *data)
178 kobject_put(&data->kobj);
179 module_put(cpufreq_driver->owner);
181 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
184 /*********************************************************************
185 * UNIFIED DEBUG HELPERS *
186 *********************************************************************/
187 #ifdef CONFIG_CPU_FREQ_DEBUG
189 /* what part(s) of the CPUfreq subsystem are debugged? */
190 static unsigned int debug;
192 /* is the debug output ratelimit'ed using printk_ratelimit? User can
193 * set or modify this value.
195 static unsigned int debug_ratelimit = 1;
197 /* is the printk_ratelimit'ing enabled? It's enabled after a successful
198 * loading of a cpufreq driver, temporarily disabled when a new policy
199 * is set, and disabled upon cpufreq driver removal
201 static unsigned int disable_ratelimit = 1;
202 static DEFINE_SPINLOCK(disable_ratelimit_lock);
204 static void cpufreq_debug_enable_ratelimit(void)
206 unsigned long flags;
208 spin_lock_irqsave(&disable_ratelimit_lock, flags);
209 if (disable_ratelimit)
210 disable_ratelimit--;
211 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
214 static void cpufreq_debug_disable_ratelimit(void)
216 unsigned long flags;
218 spin_lock_irqsave(&disable_ratelimit_lock, flags);
219 disable_ratelimit++;
220 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
223 void cpufreq_debug_printk(unsigned int type, const char *prefix,
224 const char *fmt, ...)
226 char s[256];
227 va_list args;
228 unsigned int len;
229 unsigned long flags;
231 WARN_ON(!prefix);
232 if (type & debug) {
233 spin_lock_irqsave(&disable_ratelimit_lock, flags);
234 if (!disable_ratelimit && debug_ratelimit
235 && !printk_ratelimit()) {
236 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
237 return;
239 spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
241 len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix);
243 va_start(args, fmt);
244 len += vsnprintf(&s[len], (256 - len), fmt, args);
245 va_end(args);
247 printk(s);
249 WARN_ON(len < 5);
252 EXPORT_SYMBOL(cpufreq_debug_printk);
255 module_param(debug, uint, 0644);
256 MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core,"
257 " 2 to debug drivers, and 4 to debug governors.");
259 module_param(debug_ratelimit, uint, 0644);
260 MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:"
261 " set to 0 to disable ratelimiting.");
263 #else /* !CONFIG_CPU_FREQ_DEBUG */
265 static inline void cpufreq_debug_enable_ratelimit(void) { return; }
266 static inline void cpufreq_debug_disable_ratelimit(void) { return; }
268 #endif /* CONFIG_CPU_FREQ_DEBUG */
271 /*********************************************************************
272 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
273 *********************************************************************/
276 * adjust_jiffies - adjust the system "loops_per_jiffy"
278 * This function alters the system "loops_per_jiffy" for the clock
279 * speed change. Note that loops_per_jiffy cannot be updated on SMP
280 * systems as each CPU might be scaled differently. So, use the arch
281 * per-CPU loops_per_jiffy value wherever possible.
283 #ifndef CONFIG_SMP
284 static unsigned long l_p_j_ref;
285 static unsigned int l_p_j_ref_freq;
287 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
289 if (ci->flags & CPUFREQ_CONST_LOOPS)
290 return;
292 if (!l_p_j_ref_freq) {
293 l_p_j_ref = loops_per_jiffy;
294 l_p_j_ref_freq = ci->old;
295 dprintk("saving %lu as reference value for loops_per_jiffy; "
296 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
298 if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
299 (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
300 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
301 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
302 ci->new);
303 dprintk("scaling loops_per_jiffy to %lu "
304 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
307 #else
308 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
310 return;
312 #endif
316 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
317 * on frequency transition.
319 * This function calls the transition notifiers and the "adjust_jiffies"
320 * function. It is called twice on all CPU frequency changes that have
321 * external effects.
323 void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
325 struct cpufreq_policy *policy;
327 BUG_ON(irqs_disabled());
329 freqs->flags = cpufreq_driver->flags;
330 dprintk("notification %u of frequency transition to %u kHz\n",
331 state, freqs->new);
333 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
334 switch (state) {
336 case CPUFREQ_PRECHANGE:
337 /* detect if the driver reported a value as "old frequency"
338 * which is not equal to what the cpufreq core thinks is
339 * "old frequency".
341 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
342 if ((policy) && (policy->cpu == freqs->cpu) &&
343 (policy->cur) && (policy->cur != freqs->old)) {
344 dprintk("Warning: CPU frequency is"
345 " %u, cpufreq assumed %u kHz.\n",
346 freqs->old, policy->cur);
347 freqs->old = policy->cur;
350 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
351 CPUFREQ_PRECHANGE, freqs);
352 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
353 break;
355 case CPUFREQ_POSTCHANGE:
356 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
357 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
358 CPUFREQ_POSTCHANGE, freqs);
359 if (likely(policy) && likely(policy->cpu == freqs->cpu))
360 policy->cur = freqs->new;
361 break;
364 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
368 /*********************************************************************
369 * SYSFS INTERFACE *
370 *********************************************************************/
372 static struct cpufreq_governor *__find_governor(const char *str_governor)
374 struct cpufreq_governor *t;
376 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
377 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
378 return t;
380 return NULL;
384 * cpufreq_parse_governor - parse a governor string
386 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
387 struct cpufreq_governor **governor)
389 int err = -EINVAL;
391 if (!cpufreq_driver)
392 goto out;
394 if (cpufreq_driver->setpolicy) {
395 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
396 *policy = CPUFREQ_POLICY_PERFORMANCE;
397 err = 0;
398 } else if (!strnicmp(str_governor, "powersave",
399 CPUFREQ_NAME_LEN)) {
400 *policy = CPUFREQ_POLICY_POWERSAVE;
401 err = 0;
403 } else if (cpufreq_driver->target) {
404 struct cpufreq_governor *t;
406 mutex_lock(&cpufreq_governor_mutex);
408 t = __find_governor(str_governor);
410 if (t == NULL) {
411 char *name = kasprintf(GFP_KERNEL, "cpufreq_%s",
412 str_governor);
414 if (name) {
415 int ret;
417 mutex_unlock(&cpufreq_governor_mutex);
418 ret = request_module("%s", name);
419 mutex_lock(&cpufreq_governor_mutex);
421 if (ret == 0)
422 t = __find_governor(str_governor);
425 kfree(name);
428 if (t != NULL) {
429 *governor = t;
430 err = 0;
433 mutex_unlock(&cpufreq_governor_mutex);
435 out:
436 return err;
441 * cpufreq_per_cpu_attr_read() / show_##file_name() -
442 * print out cpufreq information
444 * Write out information from cpufreq_driver->policy[cpu]; object must be
445 * "unsigned int".
448 #define show_one(file_name, object) \
449 static ssize_t show_##file_name \
450 (struct cpufreq_policy *policy, char *buf) \
452 return sprintf(buf, "%u\n", policy->object); \
455 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
456 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
457 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
458 show_one(scaling_min_freq, min);
459 show_one(scaling_max_freq, max);
460 show_one(scaling_cur_freq, cur);
462 static int __cpufreq_set_policy(struct cpufreq_policy *data,
463 struct cpufreq_policy *policy);
466 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
468 #define store_one(file_name, object) \
469 static ssize_t store_##file_name \
470 (struct cpufreq_policy *policy, const char *buf, size_t count) \
472 unsigned int ret = -EINVAL; \
473 struct cpufreq_policy new_policy; \
475 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
476 if (ret) \
477 return -EINVAL; \
479 ret = sscanf(buf, "%u", &new_policy.object); \
480 if (ret != 1) \
481 return -EINVAL; \
483 ret = __cpufreq_set_policy(policy, &new_policy); \
484 policy->user_policy.object = policy->object; \
486 return ret ? ret : count; \
489 store_one(scaling_min_freq, min);
490 store_one(scaling_max_freq, max);
493 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
495 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
496 char *buf)
498 unsigned int cur_freq = __cpufreq_get(policy->cpu);
499 if (!cur_freq)
500 return sprintf(buf, "<unknown>");
501 return sprintf(buf, "%u\n", cur_freq);
506 * show_scaling_governor - show the current policy for the specified CPU
508 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
510 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
511 return sprintf(buf, "powersave\n");
512 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
513 return sprintf(buf, "performance\n");
514 else if (policy->governor)
515 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n",
516 policy->governor->name);
517 return -EINVAL;
522 * store_scaling_governor - store policy for the specified CPU
524 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
525 const char *buf, size_t count)
527 unsigned int ret = -EINVAL;
528 char str_governor[16];
529 struct cpufreq_policy new_policy;
531 ret = cpufreq_get_policy(&new_policy, policy->cpu);
532 if (ret)
533 return ret;
535 ret = sscanf(buf, "%15s", str_governor);
536 if (ret != 1)
537 return -EINVAL;
539 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
540 &new_policy.governor))
541 return -EINVAL;
543 /* Do not use cpufreq_set_policy here or the user_policy.max
544 will be wrongly overridden */
545 ret = __cpufreq_set_policy(policy, &new_policy);
547 policy->user_policy.policy = policy->policy;
548 policy->user_policy.governor = policy->governor;
550 if (ret)
551 return ret;
552 else
553 return count;
557 * show_scaling_driver - show the cpufreq driver currently loaded
559 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
561 return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
565 * show_scaling_available_governors - show the available CPUfreq governors
567 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
568 char *buf)
570 ssize_t i = 0;
571 struct cpufreq_governor *t;
573 if (!cpufreq_driver->target) {
574 i += sprintf(buf, "performance powersave");
575 goto out;
578 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
579 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
580 - (CPUFREQ_NAME_LEN + 2)))
581 goto out;
582 i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
584 out:
585 i += sprintf(&buf[i], "\n");
586 return i;
589 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
591 ssize_t i = 0;
592 unsigned int cpu;
594 for_each_cpu(cpu, mask) {
595 if (i)
596 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
597 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
598 if (i >= (PAGE_SIZE - 5))
599 break;
601 i += sprintf(&buf[i], "\n");
602 return i;
606 * show_related_cpus - show the CPUs affected by each transition even if
607 * hw coordination is in use
609 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
611 if (cpumask_empty(policy->related_cpus))
612 return show_cpus(policy->cpus, buf);
613 return show_cpus(policy->related_cpus, buf);
617 * show_affected_cpus - show the CPUs affected by each transition
619 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
621 return show_cpus(policy->cpus, buf);
624 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
625 const char *buf, size_t count)
627 unsigned int freq = 0;
628 unsigned int ret;
630 if (!policy->governor || !policy->governor->store_setspeed)
631 return -EINVAL;
633 ret = sscanf(buf, "%u", &freq);
634 if (ret != 1)
635 return -EINVAL;
637 policy->governor->store_setspeed(policy, freq);
639 return count;
642 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
644 if (!policy->governor || !policy->governor->show_setspeed)
645 return sprintf(buf, "<unsupported>\n");
647 return policy->governor->show_setspeed(policy, buf);
650 #define define_one_ro(_name) \
651 static struct freq_attr _name = \
652 __ATTR(_name, 0444, show_##_name, NULL)
654 #define define_one_ro0400(_name) \
655 static struct freq_attr _name = \
656 __ATTR(_name, 0400, show_##_name, NULL)
658 #define define_one_rw(_name) \
659 static struct freq_attr _name = \
660 __ATTR(_name, 0644, show_##_name, store_##_name)
662 define_one_ro0400(cpuinfo_cur_freq);
663 define_one_ro(cpuinfo_min_freq);
664 define_one_ro(cpuinfo_max_freq);
665 define_one_ro(cpuinfo_transition_latency);
666 define_one_ro(scaling_available_governors);
667 define_one_ro(scaling_driver);
668 define_one_ro(scaling_cur_freq);
669 define_one_ro(related_cpus);
670 define_one_ro(affected_cpus);
671 define_one_rw(scaling_min_freq);
672 define_one_rw(scaling_max_freq);
673 define_one_rw(scaling_governor);
674 define_one_rw(scaling_setspeed);
676 static struct attribute *default_attrs[] = {
677 &cpuinfo_min_freq.attr,
678 &cpuinfo_max_freq.attr,
679 &cpuinfo_transition_latency.attr,
680 &scaling_min_freq.attr,
681 &scaling_max_freq.attr,
682 &affected_cpus.attr,
683 &related_cpus.attr,
684 &scaling_governor.attr,
685 &scaling_driver.attr,
686 &scaling_available_governors.attr,
687 &scaling_setspeed.attr,
688 NULL
691 struct kobject *cpufreq_global_kobject;
692 EXPORT_SYMBOL(cpufreq_global_kobject);
694 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
695 #define to_attr(a) container_of(a, struct freq_attr, attr)
697 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
699 struct cpufreq_policy *policy = to_policy(kobj);
700 struct freq_attr *fattr = to_attr(attr);
701 ssize_t ret = -EINVAL;
702 policy = cpufreq_cpu_get(policy->cpu);
703 if (!policy)
704 goto no_policy;
706 if (lock_policy_rwsem_read(policy->cpu) < 0)
707 goto fail;
709 if (fattr->show)
710 ret = fattr->show(policy, buf);
711 else
712 ret = -EIO;
714 unlock_policy_rwsem_read(policy->cpu);
715 fail:
716 cpufreq_cpu_put(policy);
717 no_policy:
718 return ret;
721 static ssize_t store(struct kobject *kobj, struct attribute *attr,
722 const char *buf, size_t count)
724 struct cpufreq_policy *policy = to_policy(kobj);
725 struct freq_attr *fattr = to_attr(attr);
726 ssize_t ret = -EINVAL;
727 policy = cpufreq_cpu_get(policy->cpu);
728 if (!policy)
729 goto no_policy;
731 if (lock_policy_rwsem_write(policy->cpu) < 0)
732 goto fail;
734 if (fattr->store)
735 ret = fattr->store(policy, buf, count);
736 else
737 ret = -EIO;
739 unlock_policy_rwsem_write(policy->cpu);
740 fail:
741 cpufreq_cpu_put(policy);
742 no_policy:
743 return ret;
746 static void cpufreq_sysfs_release(struct kobject *kobj)
748 struct cpufreq_policy *policy = to_policy(kobj);
749 dprintk("last reference is dropped\n");
750 complete(&policy->kobj_unregister);
753 static struct sysfs_ops sysfs_ops = {
754 .show = show,
755 .store = store,
758 static struct kobj_type ktype_cpufreq = {
759 .sysfs_ops = &sysfs_ops,
760 .default_attrs = default_attrs,
761 .release = cpufreq_sysfs_release,
765 * Returns:
766 * Negative: Failure
767 * 0: Success
768 * Positive: When we have a managed CPU and the sysfs got symlinked
770 int cpufreq_add_dev_policy(unsigned int cpu, struct cpufreq_policy *policy,
771 struct sys_device *sys_dev)
773 int ret = 0;
774 #ifdef CONFIG_SMP
775 unsigned long flags;
776 unsigned int j;
777 #ifdef CONFIG_HOTPLUG_CPU
778 struct cpufreq_governor *gov;
780 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
781 if (gov) {
782 policy->governor = gov;
783 dprintk("Restoring governor %s for cpu %d\n",
784 policy->governor->name, cpu);
786 #endif
788 for_each_cpu(j, policy->cpus) {
789 struct cpufreq_policy *managed_policy;
791 if (cpu == j)
792 continue;
794 /* Check for existing affected CPUs.
795 * They may not be aware of it due to CPU Hotplug.
796 * cpufreq_cpu_put is called when the device is removed
797 * in __cpufreq_remove_dev()
799 managed_policy = cpufreq_cpu_get(j);
800 if (unlikely(managed_policy)) {
802 /* Set proper policy_cpu */
803 unlock_policy_rwsem_write(cpu);
804 per_cpu(policy_cpu, cpu) = managed_policy->cpu;
806 if (lock_policy_rwsem_write(cpu) < 0) {
807 /* Should not go through policy unlock path */
808 if (cpufreq_driver->exit)
809 cpufreq_driver->exit(policy);
810 cpufreq_cpu_put(managed_policy);
811 return -EBUSY;
814 spin_lock_irqsave(&cpufreq_driver_lock, flags);
815 cpumask_copy(managed_policy->cpus, policy->cpus);
816 per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
817 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
819 dprintk("CPU already managed, adding link\n");
820 ret = sysfs_create_link(&sys_dev->kobj,
821 &managed_policy->kobj,
822 "cpufreq");
823 if (ret)
824 cpufreq_cpu_put(managed_policy);
826 * Success. We only needed to be added to the mask.
827 * Call driver->exit() because only the cpu parent of
828 * the kobj needed to call init().
830 if (cpufreq_driver->exit)
831 cpufreq_driver->exit(policy);
833 if (!ret)
834 return 1;
835 else
836 return ret;
839 #endif
840 return ret;
844 /* symlink affected CPUs */
845 int cpufreq_add_dev_symlink(unsigned int cpu, struct cpufreq_policy *policy)
847 unsigned int j;
848 int ret = 0;
850 for_each_cpu(j, policy->cpus) {
851 struct cpufreq_policy *managed_policy;
852 struct sys_device *cpu_sys_dev;
854 if (j == cpu)
855 continue;
856 if (!cpu_online(j))
857 continue;
859 dprintk("CPU %u already managed, adding link\n", j);
860 managed_policy = cpufreq_cpu_get(cpu);
861 cpu_sys_dev = get_cpu_sysdev(j);
862 ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
863 "cpufreq");
864 if (ret) {
865 cpufreq_cpu_put(managed_policy);
866 return ret;
869 return ret;
872 int cpufreq_add_dev_interface(unsigned int cpu, struct cpufreq_policy *policy,
873 struct sys_device *sys_dev)
875 struct cpufreq_policy new_policy;
876 struct freq_attr **drv_attr;
877 unsigned long flags;
878 int ret = 0;
879 unsigned int j;
881 /* prepare interface data */
882 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
883 &sys_dev->kobj, "cpufreq");
884 if (ret)
885 return ret;
887 /* set up files for this cpu device */
888 drv_attr = cpufreq_driver->attr;
889 while ((drv_attr) && (*drv_attr)) {
890 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
891 if (ret)
892 goto err_out_kobj_put;
893 drv_attr++;
895 if (cpufreq_driver->get) {
896 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
897 if (ret)
898 goto err_out_kobj_put;
900 if (cpufreq_driver->target) {
901 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
902 if (ret)
903 goto err_out_kobj_put;
906 spin_lock_irqsave(&cpufreq_driver_lock, flags);
907 for_each_cpu(j, policy->cpus) {
908 if (!cpu_online(j))
909 continue;
910 per_cpu(cpufreq_cpu_data, j) = policy;
911 per_cpu(policy_cpu, j) = policy->cpu;
913 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
915 ret = cpufreq_add_dev_symlink(cpu, policy);
916 if (ret)
917 goto err_out_kobj_put;
919 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
920 /* assure that the starting sequence is run in __cpufreq_set_policy */
921 policy->governor = NULL;
923 /* set default policy */
924 ret = __cpufreq_set_policy(policy, &new_policy);
925 policy->user_policy.policy = policy->policy;
926 policy->user_policy.governor = policy->governor;
928 if (ret) {
929 dprintk("setting policy failed\n");
930 if (cpufreq_driver->exit)
931 cpufreq_driver->exit(policy);
933 return ret;
935 err_out_kobj_put:
936 kobject_put(&policy->kobj);
937 wait_for_completion(&policy->kobj_unregister);
938 return ret;
943 * cpufreq_add_dev - add a CPU device
945 * Adds the cpufreq interface for a CPU device.
947 * The Oracle says: try running cpufreq registration/unregistration concurrently
948 * with with cpu hotplugging and all hell will break loose. Tried to clean this
949 * mess up, but more thorough testing is needed. - Mathieu
951 static int cpufreq_add_dev(struct sys_device *sys_dev)
953 unsigned int cpu = sys_dev->id;
954 int ret = 0, found = 0;
955 struct cpufreq_policy *policy;
956 unsigned long flags;
957 unsigned int j;
958 #ifdef CONFIG_HOTPLUG_CPU
959 int sibling;
960 #endif
962 if (cpu_is_offline(cpu))
963 return 0;
965 cpufreq_debug_disable_ratelimit();
966 dprintk("adding CPU %u\n", cpu);
968 #ifdef CONFIG_SMP
969 /* check whether a different CPU already registered this
970 * CPU because it is in the same boat. */
971 policy = cpufreq_cpu_get(cpu);
972 if (unlikely(policy)) {
973 cpufreq_cpu_put(policy);
974 cpufreq_debug_enable_ratelimit();
975 return 0;
977 #endif
979 if (!try_module_get(cpufreq_driver->owner)) {
980 ret = -EINVAL;
981 goto module_out;
984 ret = -ENOMEM;
985 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
986 if (!policy)
987 goto nomem_out;
989 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
990 goto err_free_policy;
992 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
993 goto err_free_cpumask;
995 policy->cpu = cpu;
996 cpumask_copy(policy->cpus, cpumask_of(cpu));
998 /* Initially set CPU itself as the policy_cpu */
999 per_cpu(policy_cpu, cpu) = cpu;
1000 ret = (lock_policy_rwsem_write(cpu) < 0);
1001 WARN_ON(ret);
1003 init_completion(&policy->kobj_unregister);
1004 INIT_WORK(&policy->update, handle_update);
1006 /* Set governor before ->init, so that driver could check it */
1007 #ifdef CONFIG_HOTPLUG_CPU
1008 for_each_online_cpu(sibling) {
1009 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
1010 if (cp && cp->governor &&
1011 (cpumask_test_cpu(cpu, cp->related_cpus))) {
1012 policy->governor = cp->governor;
1013 found = 1;
1014 break;
1017 #endif
1018 if (!found)
1019 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1020 /* call driver. From then on the cpufreq must be able
1021 * to accept all calls to ->verify and ->setpolicy for this CPU
1023 ret = cpufreq_driver->init(policy);
1024 if (ret) {
1025 dprintk("initialization failed\n");
1026 goto err_unlock_policy;
1028 policy->user_policy.min = policy->min;
1029 policy->user_policy.max = policy->max;
1031 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1032 CPUFREQ_START, policy);
1034 ret = cpufreq_add_dev_policy(cpu, policy, sys_dev);
1035 if (ret) {
1036 if (ret > 0)
1037 /* This is a managed cpu, symlink created,
1038 exit with 0 */
1039 ret = 0;
1040 goto err_unlock_policy;
1043 ret = cpufreq_add_dev_interface(cpu, policy, sys_dev);
1044 if (ret)
1045 goto err_out_unregister;
1047 unlock_policy_rwsem_write(cpu);
1049 kobject_uevent(&policy->kobj, KOBJ_ADD);
1050 module_put(cpufreq_driver->owner);
1051 dprintk("initialization complete\n");
1052 cpufreq_debug_enable_ratelimit();
1054 return 0;
1057 err_out_unregister:
1058 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1059 for_each_cpu(j, policy->cpus)
1060 per_cpu(cpufreq_cpu_data, j) = NULL;
1061 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1063 kobject_put(&policy->kobj);
1064 wait_for_completion(&policy->kobj_unregister);
1066 err_unlock_policy:
1067 unlock_policy_rwsem_write(cpu);
1068 err_free_cpumask:
1069 free_cpumask_var(policy->cpus);
1070 err_free_policy:
1071 kfree(policy);
1072 nomem_out:
1073 module_put(cpufreq_driver->owner);
1074 module_out:
1075 cpufreq_debug_enable_ratelimit();
1076 return ret;
1081 * __cpufreq_remove_dev - remove a CPU device
1083 * Removes the cpufreq interface for a CPU device.
1084 * Caller should already have policy_rwsem in write mode for this CPU.
1085 * This routine frees the rwsem before returning.
1087 static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1089 unsigned int cpu = sys_dev->id;
1090 unsigned long flags;
1091 struct cpufreq_policy *data;
1092 #ifdef CONFIG_SMP
1093 struct sys_device *cpu_sys_dev;
1094 unsigned int j;
1095 #endif
1097 cpufreq_debug_disable_ratelimit();
1098 dprintk("unregistering CPU %u\n", cpu);
1100 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1101 data = per_cpu(cpufreq_cpu_data, cpu);
1103 if (!data) {
1104 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1105 cpufreq_debug_enable_ratelimit();
1106 unlock_policy_rwsem_write(cpu);
1107 return -EINVAL;
1109 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1112 #ifdef CONFIG_SMP
1113 /* if this isn't the CPU which is the parent of the kobj, we
1114 * only need to unlink, put and exit
1116 if (unlikely(cpu != data->cpu)) {
1117 dprintk("removing link\n");
1118 cpumask_clear_cpu(cpu, data->cpus);
1119 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1120 sysfs_remove_link(&sys_dev->kobj, "cpufreq");
1121 cpufreq_cpu_put(data);
1122 cpufreq_debug_enable_ratelimit();
1123 unlock_policy_rwsem_write(cpu);
1124 return 0;
1126 #endif
1128 #ifdef CONFIG_SMP
1130 #ifdef CONFIG_HOTPLUG_CPU
1131 strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
1132 CPUFREQ_NAME_LEN);
1133 #endif
1135 /* if we have other CPUs still registered, we need to unlink them,
1136 * or else wait_for_completion below will lock up. Clean the
1137 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
1138 * the sysfs links afterwards.
1140 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1141 for_each_cpu(j, data->cpus) {
1142 if (j == cpu)
1143 continue;
1144 per_cpu(cpufreq_cpu_data, j) = NULL;
1148 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1150 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1151 for_each_cpu(j, data->cpus) {
1152 if (j == cpu)
1153 continue;
1154 dprintk("removing link for cpu %u\n", j);
1155 #ifdef CONFIG_HOTPLUG_CPU
1156 strncpy(per_cpu(cpufreq_cpu_governor, j),
1157 data->governor->name, CPUFREQ_NAME_LEN);
1158 #endif
1159 cpu_sys_dev = get_cpu_sysdev(j);
1160 sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
1161 cpufreq_cpu_put(data);
1164 #else
1165 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1166 #endif
1168 if (cpufreq_driver->target)
1169 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1171 kobject_put(&data->kobj);
1173 /* we need to make sure that the underlying kobj is actually
1174 * not referenced anymore by anybody before we proceed with
1175 * unloading.
1177 dprintk("waiting for dropping of refcount\n");
1178 wait_for_completion(&data->kobj_unregister);
1179 dprintk("wait complete\n");
1181 if (cpufreq_driver->exit)
1182 cpufreq_driver->exit(data);
1184 unlock_policy_rwsem_write(cpu);
1186 cpufreq_debug_enable_ratelimit();
1188 #ifdef CONFIG_HOTPLUG_CPU
1189 /* when the CPU which is the parent of the kobj is hotplugged
1190 * offline, check for siblings, and create cpufreq sysfs interface
1191 * and symlinks
1193 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1194 /* first sibling now owns the new sysfs dir */
1195 cpumask_clear_cpu(cpu, data->cpus);
1196 cpufreq_add_dev(get_cpu_sysdev(cpumask_first(data->cpus)));
1198 /* finally remove our own symlink */
1199 lock_policy_rwsem_write(cpu);
1200 __cpufreq_remove_dev(sys_dev);
1202 #endif
1204 free_cpumask_var(data->related_cpus);
1205 free_cpumask_var(data->cpus);
1206 kfree(data);
1208 return 0;
1212 static int cpufreq_remove_dev(struct sys_device *sys_dev)
1214 unsigned int cpu = sys_dev->id;
1215 int retval;
1217 if (cpu_is_offline(cpu))
1218 return 0;
1220 if (unlikely(lock_policy_rwsem_write(cpu)))
1221 BUG();
1223 retval = __cpufreq_remove_dev(sys_dev);
1224 return retval;
1228 static void handle_update(struct work_struct *work)
1230 struct cpufreq_policy *policy =
1231 container_of(work, struct cpufreq_policy, update);
1232 unsigned int cpu = policy->cpu;
1233 dprintk("handle_update for cpu %u called\n", cpu);
1234 cpufreq_update_policy(cpu);
1238 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1239 * @cpu: cpu number
1240 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1241 * @new_freq: CPU frequency the CPU actually runs at
1243 * We adjust to current frequency first, and need to clean up later.
1244 * So either call to cpufreq_update_policy() or schedule handle_update()).
1246 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1247 unsigned int new_freq)
1249 struct cpufreq_freqs freqs;
1251 dprintk("Warning: CPU frequency out of sync: cpufreq and timing "
1252 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1254 freqs.cpu = cpu;
1255 freqs.old = old_freq;
1256 freqs.new = new_freq;
1257 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1258 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1263 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1264 * @cpu: CPU number
1266 * This is the last known freq, without actually getting it from the driver.
1267 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1269 unsigned int cpufreq_quick_get(unsigned int cpu)
1271 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1272 unsigned int ret_freq = 0;
1274 if (policy) {
1275 ret_freq = policy->cur;
1276 cpufreq_cpu_put(policy);
1279 return ret_freq;
1281 EXPORT_SYMBOL(cpufreq_quick_get);
1284 static unsigned int __cpufreq_get(unsigned int cpu)
1286 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1287 unsigned int ret_freq = 0;
1289 if (!cpufreq_driver->get)
1290 return ret_freq;
1292 ret_freq = cpufreq_driver->get(cpu);
1294 if (ret_freq && policy->cur &&
1295 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1296 /* verify no discrepancy between actual and
1297 saved value exists */
1298 if (unlikely(ret_freq != policy->cur)) {
1299 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1300 schedule_work(&policy->update);
1304 return ret_freq;
1308 * cpufreq_get - get the current CPU frequency (in kHz)
1309 * @cpu: CPU number
1311 * Get the CPU current (static) CPU frequency
1313 unsigned int cpufreq_get(unsigned int cpu)
1315 unsigned int ret_freq = 0;
1316 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1318 if (!policy)
1319 goto out;
1321 if (unlikely(lock_policy_rwsem_read(cpu)))
1322 goto out_policy;
1324 ret_freq = __cpufreq_get(cpu);
1326 unlock_policy_rwsem_read(cpu);
1328 out_policy:
1329 cpufreq_cpu_put(policy);
1330 out:
1331 return ret_freq;
1333 EXPORT_SYMBOL(cpufreq_get);
1337 * cpufreq_suspend - let the low level driver prepare for suspend
1340 static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
1342 int ret = 0;
1344 int cpu = sysdev->id;
1345 struct cpufreq_policy *cpu_policy;
1347 dprintk("suspending cpu %u\n", cpu);
1349 if (!cpu_online(cpu))
1350 return 0;
1352 /* we may be lax here as interrupts are off. Nonetheless
1353 * we need to grab the correct cpu policy, as to check
1354 * whether we really run on this CPU.
1357 cpu_policy = cpufreq_cpu_get(cpu);
1358 if (!cpu_policy)
1359 return -EINVAL;
1361 /* only handle each CPU group once */
1362 if (unlikely(cpu_policy->cpu != cpu))
1363 goto out;
1365 if (cpufreq_driver->suspend) {
1366 ret = cpufreq_driver->suspend(cpu_policy, pmsg);
1367 if (ret)
1368 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1369 "step on CPU %u\n", cpu_policy->cpu);
1372 out:
1373 cpufreq_cpu_put(cpu_policy);
1374 return ret;
1378 * cpufreq_resume - restore proper CPU frequency handling after resume
1380 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1381 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1382 * restored. It will verify that the current freq is in sync with
1383 * what we believe it to be. This is a bit later than when it
1384 * should be, but nonethteless it's better than calling
1385 * cpufreq_driver->get() here which might re-enable interrupts...
1387 static int cpufreq_resume(struct sys_device *sysdev)
1389 int ret = 0;
1391 int cpu = sysdev->id;
1392 struct cpufreq_policy *cpu_policy;
1394 dprintk("resuming cpu %u\n", cpu);
1396 if (!cpu_online(cpu))
1397 return 0;
1399 /* we may be lax here as interrupts are off. Nonetheless
1400 * we need to grab the correct cpu policy, as to check
1401 * whether we really run on this CPU.
1404 cpu_policy = cpufreq_cpu_get(cpu);
1405 if (!cpu_policy)
1406 return -EINVAL;
1408 /* only handle each CPU group once */
1409 if (unlikely(cpu_policy->cpu != cpu))
1410 goto fail;
1412 if (cpufreq_driver->resume) {
1413 ret = cpufreq_driver->resume(cpu_policy);
1414 if (ret) {
1415 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1416 "step on CPU %u\n", cpu_policy->cpu);
1417 goto fail;
1421 schedule_work(&cpu_policy->update);
1423 fail:
1424 cpufreq_cpu_put(cpu_policy);
1425 return ret;
1428 static struct sysdev_driver cpufreq_sysdev_driver = {
1429 .add = cpufreq_add_dev,
1430 .remove = cpufreq_remove_dev,
1431 .suspend = cpufreq_suspend,
1432 .resume = cpufreq_resume,
1436 /*********************************************************************
1437 * NOTIFIER LISTS INTERFACE *
1438 *********************************************************************/
1441 * cpufreq_register_notifier - register a driver with cpufreq
1442 * @nb: notifier function to register
1443 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1445 * Add a driver to one of two lists: either a list of drivers that
1446 * are notified about clock rate changes (once before and once after
1447 * the transition), or a list of drivers that are notified about
1448 * changes in cpufreq policy.
1450 * This function may sleep, and has the same return conditions as
1451 * blocking_notifier_chain_register.
1453 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1455 int ret;
1457 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1459 switch (list) {
1460 case CPUFREQ_TRANSITION_NOTIFIER:
1461 ret = srcu_notifier_chain_register(
1462 &cpufreq_transition_notifier_list, nb);
1463 break;
1464 case CPUFREQ_POLICY_NOTIFIER:
1465 ret = blocking_notifier_chain_register(
1466 &cpufreq_policy_notifier_list, nb);
1467 break;
1468 default:
1469 ret = -EINVAL;
1472 return ret;
1474 EXPORT_SYMBOL(cpufreq_register_notifier);
1478 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1479 * @nb: notifier block to be unregistered
1480 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1482 * Remove a driver from the CPU frequency notifier list.
1484 * This function may sleep, and has the same return conditions as
1485 * blocking_notifier_chain_unregister.
1487 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1489 int ret;
1491 switch (list) {
1492 case CPUFREQ_TRANSITION_NOTIFIER:
1493 ret = srcu_notifier_chain_unregister(
1494 &cpufreq_transition_notifier_list, nb);
1495 break;
1496 case CPUFREQ_POLICY_NOTIFIER:
1497 ret = blocking_notifier_chain_unregister(
1498 &cpufreq_policy_notifier_list, nb);
1499 break;
1500 default:
1501 ret = -EINVAL;
1504 return ret;
1506 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1509 /*********************************************************************
1510 * GOVERNORS *
1511 *********************************************************************/
1514 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1515 unsigned int target_freq,
1516 unsigned int relation)
1518 int retval = -EINVAL;
1520 dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1521 target_freq, relation);
1522 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1523 retval = cpufreq_driver->target(policy, target_freq, relation);
1525 return retval;
1527 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1529 int cpufreq_driver_target(struct cpufreq_policy *policy,
1530 unsigned int target_freq,
1531 unsigned int relation)
1533 int ret = -EINVAL;
1535 policy = cpufreq_cpu_get(policy->cpu);
1536 if (!policy)
1537 goto no_policy;
1539 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1540 goto fail;
1542 ret = __cpufreq_driver_target(policy, target_freq, relation);
1544 unlock_policy_rwsem_write(policy->cpu);
1546 fail:
1547 cpufreq_cpu_put(policy);
1548 no_policy:
1549 return ret;
1551 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1553 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1555 int ret = 0;
1557 policy = cpufreq_cpu_get(policy->cpu);
1558 if (!policy)
1559 return -EINVAL;
1561 if (cpu_online(cpu) && cpufreq_driver->getavg)
1562 ret = cpufreq_driver->getavg(policy, cpu);
1564 cpufreq_cpu_put(policy);
1565 return ret;
1567 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1570 * when "event" is CPUFREQ_GOV_LIMITS
1573 static int __cpufreq_governor(struct cpufreq_policy *policy,
1574 unsigned int event)
1576 int ret;
1578 /* Only must be defined when default governor is known to have latency
1579 restrictions, like e.g. conservative or ondemand.
1580 That this is the case is already ensured in Kconfig
1582 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1583 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1584 #else
1585 struct cpufreq_governor *gov = NULL;
1586 #endif
1588 if (policy->governor->max_transition_latency &&
1589 policy->cpuinfo.transition_latency >
1590 policy->governor->max_transition_latency) {
1591 if (!gov)
1592 return -EINVAL;
1593 else {
1594 printk(KERN_WARNING "%s governor failed, too long"
1595 " transition latency of HW, fallback"
1596 " to %s governor\n",
1597 policy->governor->name,
1598 gov->name);
1599 policy->governor = gov;
1603 if (!try_module_get(policy->governor->owner))
1604 return -EINVAL;
1606 dprintk("__cpufreq_governor for CPU %u, event %u\n",
1607 policy->cpu, event);
1608 ret = policy->governor->governor(policy, event);
1610 /* we keep one module reference alive for
1611 each CPU governed by this CPU */
1612 if ((event != CPUFREQ_GOV_START) || ret)
1613 module_put(policy->governor->owner);
1614 if ((event == CPUFREQ_GOV_STOP) && !ret)
1615 module_put(policy->governor->owner);
1617 return ret;
1621 int cpufreq_register_governor(struct cpufreq_governor *governor)
1623 int err;
1625 if (!governor)
1626 return -EINVAL;
1628 mutex_lock(&cpufreq_governor_mutex);
1630 err = -EBUSY;
1631 if (__find_governor(governor->name) == NULL) {
1632 err = 0;
1633 list_add(&governor->governor_list, &cpufreq_governor_list);
1636 mutex_unlock(&cpufreq_governor_mutex);
1637 return err;
1639 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1642 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1644 #ifdef CONFIG_HOTPLUG_CPU
1645 int cpu;
1646 #endif
1648 if (!governor)
1649 return;
1651 #ifdef CONFIG_HOTPLUG_CPU
1652 for_each_present_cpu(cpu) {
1653 if (cpu_online(cpu))
1654 continue;
1655 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1656 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1658 #endif
1660 mutex_lock(&cpufreq_governor_mutex);
1661 list_del(&governor->governor_list);
1662 mutex_unlock(&cpufreq_governor_mutex);
1663 return;
1665 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1669 /*********************************************************************
1670 * POLICY INTERFACE *
1671 *********************************************************************/
1674 * cpufreq_get_policy - get the current cpufreq_policy
1675 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1676 * is written
1678 * Reads the current cpufreq policy.
1680 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1682 struct cpufreq_policy *cpu_policy;
1683 if (!policy)
1684 return -EINVAL;
1686 cpu_policy = cpufreq_cpu_get(cpu);
1687 if (!cpu_policy)
1688 return -EINVAL;
1690 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1692 cpufreq_cpu_put(cpu_policy);
1693 return 0;
1695 EXPORT_SYMBOL(cpufreq_get_policy);
1699 * data : current policy.
1700 * policy : policy to be set.
1702 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1703 struct cpufreq_policy *policy)
1705 int ret = 0;
1707 cpufreq_debug_disable_ratelimit();
1708 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1709 policy->min, policy->max);
1711 memcpy(&policy->cpuinfo, &data->cpuinfo,
1712 sizeof(struct cpufreq_cpuinfo));
1714 if (policy->min > data->max || policy->max < data->min) {
1715 ret = -EINVAL;
1716 goto error_out;
1719 /* verify the cpu speed can be set within this limit */
1720 ret = cpufreq_driver->verify(policy);
1721 if (ret)
1722 goto error_out;
1724 /* adjust if necessary - all reasons */
1725 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1726 CPUFREQ_ADJUST, policy);
1728 /* adjust if necessary - hardware incompatibility*/
1729 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1730 CPUFREQ_INCOMPATIBLE, policy);
1732 /* verify the cpu speed can be set within this limit,
1733 which might be different to the first one */
1734 ret = cpufreq_driver->verify(policy);
1735 if (ret)
1736 goto error_out;
1738 /* notification of the new policy */
1739 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1740 CPUFREQ_NOTIFY, policy);
1742 data->min = policy->min;
1743 data->max = policy->max;
1745 dprintk("new min and max freqs are %u - %u kHz\n",
1746 data->min, data->max);
1748 if (cpufreq_driver->setpolicy) {
1749 data->policy = policy->policy;
1750 dprintk("setting range\n");
1751 ret = cpufreq_driver->setpolicy(policy);
1752 } else {
1753 if (policy->governor != data->governor) {
1754 /* save old, working values */
1755 struct cpufreq_governor *old_gov = data->governor;
1757 dprintk("governor switch\n");
1759 /* end old governor */
1760 if (data->governor)
1761 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1763 /* start new governor */
1764 data->governor = policy->governor;
1765 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1766 /* new governor failed, so re-start old one */
1767 dprintk("starting governor %s failed\n",
1768 data->governor->name);
1769 if (old_gov) {
1770 data->governor = old_gov;
1771 __cpufreq_governor(data,
1772 CPUFREQ_GOV_START);
1774 ret = -EINVAL;
1775 goto error_out;
1777 /* might be a policy change, too, so fall through */
1779 dprintk("governor: change or update limits\n");
1780 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1783 error_out:
1784 cpufreq_debug_enable_ratelimit();
1785 return ret;
1789 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1790 * @cpu: CPU which shall be re-evaluated
1792 * Usefull for policy notifiers which have different necessities
1793 * at different times.
1795 int cpufreq_update_policy(unsigned int cpu)
1797 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1798 struct cpufreq_policy policy;
1799 int ret;
1801 if (!data) {
1802 ret = -ENODEV;
1803 goto no_policy;
1806 if (unlikely(lock_policy_rwsem_write(cpu))) {
1807 ret = -EINVAL;
1808 goto fail;
1811 dprintk("updating policy for CPU %u\n", cpu);
1812 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1813 policy.min = data->user_policy.min;
1814 policy.max = data->user_policy.max;
1815 policy.policy = data->user_policy.policy;
1816 policy.governor = data->user_policy.governor;
1818 /* BIOS might change freq behind our back
1819 -> ask driver for current freq and notify governors about a change */
1820 if (cpufreq_driver->get) {
1821 policy.cur = cpufreq_driver->get(cpu);
1822 if (!data->cur) {
1823 dprintk("Driver did not initialize current freq");
1824 data->cur = policy.cur;
1825 } else {
1826 if (data->cur != policy.cur)
1827 cpufreq_out_of_sync(cpu, data->cur,
1828 policy.cur);
1832 ret = __cpufreq_set_policy(data, &policy);
1834 unlock_policy_rwsem_write(cpu);
1836 fail:
1837 cpufreq_cpu_put(data);
1838 no_policy:
1839 return ret;
1841 EXPORT_SYMBOL(cpufreq_update_policy);
1843 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1844 unsigned long action, void *hcpu)
1846 unsigned int cpu = (unsigned long)hcpu;
1847 struct sys_device *sys_dev;
1849 sys_dev = get_cpu_sysdev(cpu);
1850 if (sys_dev) {
1851 switch (action) {
1852 case CPU_ONLINE:
1853 case CPU_ONLINE_FROZEN:
1854 cpufreq_add_dev(sys_dev);
1855 break;
1856 case CPU_DOWN_PREPARE:
1857 case CPU_DOWN_PREPARE_FROZEN:
1858 if (unlikely(lock_policy_rwsem_write(cpu)))
1859 BUG();
1861 __cpufreq_remove_dev(sys_dev);
1862 break;
1863 case CPU_DOWN_FAILED:
1864 case CPU_DOWN_FAILED_FROZEN:
1865 cpufreq_add_dev(sys_dev);
1866 break;
1869 return NOTIFY_OK;
1872 static struct notifier_block __refdata cpufreq_cpu_notifier =
1874 .notifier_call = cpufreq_cpu_callback,
1877 /*********************************************************************
1878 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1879 *********************************************************************/
1882 * cpufreq_register_driver - register a CPU Frequency driver
1883 * @driver_data: A struct cpufreq_driver containing the values#
1884 * submitted by the CPU Frequency driver.
1886 * Registers a CPU Frequency driver to this core code. This code
1887 * returns zero on success, -EBUSY when another driver got here first
1888 * (and isn't unregistered in the meantime).
1891 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1893 unsigned long flags;
1894 int ret;
1896 if (!driver_data || !driver_data->verify || !driver_data->init ||
1897 ((!driver_data->setpolicy) && (!driver_data->target)))
1898 return -EINVAL;
1900 dprintk("trying to register driver %s\n", driver_data->name);
1902 if (driver_data->setpolicy)
1903 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1905 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1906 if (cpufreq_driver) {
1907 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1908 return -EBUSY;
1910 cpufreq_driver = driver_data;
1911 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1913 ret = sysdev_driver_register(&cpu_sysdev_class,
1914 &cpufreq_sysdev_driver);
1916 if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1917 int i;
1918 ret = -ENODEV;
1920 /* check for at least one working CPU */
1921 for (i = 0; i < nr_cpu_ids; i++)
1922 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1923 ret = 0;
1924 break;
1927 /* if all ->init() calls failed, unregister */
1928 if (ret) {
1929 dprintk("no CPU initialized for driver %s\n",
1930 driver_data->name);
1931 sysdev_driver_unregister(&cpu_sysdev_class,
1932 &cpufreq_sysdev_driver);
1934 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1935 cpufreq_driver = NULL;
1936 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1940 if (!ret) {
1941 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1942 dprintk("driver %s up and running\n", driver_data->name);
1943 cpufreq_debug_enable_ratelimit();
1946 return ret;
1948 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1952 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1954 * Unregister the current CPUFreq driver. Only call this if you have
1955 * the right to do so, i.e. if you have succeeded in initialising before!
1956 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1957 * currently not initialised.
1959 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1961 unsigned long flags;
1963 cpufreq_debug_disable_ratelimit();
1965 if (!cpufreq_driver || (driver != cpufreq_driver)) {
1966 cpufreq_debug_enable_ratelimit();
1967 return -EINVAL;
1970 dprintk("unregistering driver %s\n", driver->name);
1972 sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
1973 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1975 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1976 cpufreq_driver = NULL;
1977 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1979 return 0;
1981 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
1983 static int __init cpufreq_core_init(void)
1985 int cpu;
1987 for_each_possible_cpu(cpu) {
1988 per_cpu(policy_cpu, cpu) = -1;
1989 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1992 cpufreq_global_kobject = kobject_create_and_add("cpufreq",
1993 &cpu_sysdev_class.kset.kobj);
1994 BUG_ON(!cpufreq_global_kobject);
1996 return 0;
1998 core_initcall(cpufreq_core_init);