2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/notifier.h>
22 #include <linux/cpufreq.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/spinlock.h>
26 #include <linux/device.h>
27 #include <linux/slab.h>
28 #include <linux/cpu.h>
29 #include <linux/completion.h>
30 #include <linux/mutex.h>
32 #include <trace/events/power.h>
34 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
38 * The "cpufreq driver" - the arch- or hardware-dependent low
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
42 static struct cpufreq_driver
*cpufreq_driver
;
43 static DEFINE_PER_CPU(struct cpufreq_policy
*, cpufreq_cpu_data
);
44 #ifdef CONFIG_HOTPLUG_CPU
45 /* This one keeps track of the previously set governor of a removed CPU */
46 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN
], cpufreq_cpu_governor
);
48 static DEFINE_SPINLOCK(cpufreq_driver_lock
);
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
62 * - All holders of the lock should check to make sure that the CPU they
63 * are concerned with are online after they get the lock.
64 * - Governor routines that can be called in cpufreq hotplug path should not
65 * take this sem as top level hotplug notifier handler takes this.
66 * - Lock should not be held across
67 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
69 static DEFINE_PER_CPU(int, cpufreq_policy_cpu
);
70 static DEFINE_PER_CPU(struct rw_semaphore
, cpu_policy_rwsem
);
72 #define lock_policy_rwsem(mode, cpu) \
73 static int lock_policy_rwsem_##mode \
76 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
77 BUG_ON(policy_cpu == -1); \
78 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
79 if (unlikely(!cpu_online(cpu))) { \
80 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
87 lock_policy_rwsem(read
, cpu
);
89 lock_policy_rwsem(write
, cpu
);
91 static void unlock_policy_rwsem_read(int cpu
)
93 int policy_cpu
= per_cpu(cpufreq_policy_cpu
, cpu
);
94 BUG_ON(policy_cpu
== -1);
95 up_read(&per_cpu(cpu_policy_rwsem
, policy_cpu
));
98 static void unlock_policy_rwsem_write(int cpu
)
100 int policy_cpu
= per_cpu(cpufreq_policy_cpu
, cpu
);
101 BUG_ON(policy_cpu
== -1);
102 up_write(&per_cpu(cpu_policy_rwsem
, policy_cpu
));
106 /* internal prototypes */
107 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
109 static unsigned int __cpufreq_get(unsigned int cpu
);
110 static void handle_update(struct work_struct
*work
);
113 * Two notifier lists: the "policy" list is involved in the
114 * validation process for a new CPU frequency policy; the
115 * "transition" list for kernel code that needs to handle
116 * changes to devices when the CPU clock speed changes.
117 * The mutex locks both lists.
119 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list
);
120 static struct srcu_notifier_head cpufreq_transition_notifier_list
;
122 static bool init_cpufreq_transition_notifier_list_called
;
123 static int __init
init_cpufreq_transition_notifier_list(void)
125 srcu_init_notifier_head(&cpufreq_transition_notifier_list
);
126 init_cpufreq_transition_notifier_list_called
= true;
129 pure_initcall(init_cpufreq_transition_notifier_list
);
131 static LIST_HEAD(cpufreq_governor_list
);
132 static DEFINE_MUTEX(cpufreq_governor_mutex
);
134 struct cpufreq_policy
*cpufreq_cpu_get(unsigned int cpu
)
136 struct cpufreq_policy
*data
;
139 if (cpu
>= nr_cpu_ids
)
142 /* get the cpufreq driver */
143 spin_lock_irqsave(&cpufreq_driver_lock
, flags
);
148 if (!try_module_get(cpufreq_driver
->owner
))
153 data
= per_cpu(cpufreq_cpu_data
, cpu
);
156 goto err_out_put_module
;
158 if (!kobject_get(&data
->kobj
))
159 goto err_out_put_module
;
161 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
165 module_put(cpufreq_driver
->owner
);
167 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
171 EXPORT_SYMBOL_GPL(cpufreq_cpu_get
);
174 void cpufreq_cpu_put(struct cpufreq_policy
*data
)
176 kobject_put(&data
->kobj
);
177 module_put(cpufreq_driver
->owner
);
179 EXPORT_SYMBOL_GPL(cpufreq_cpu_put
);
182 /*********************************************************************
183 * UNIFIED DEBUG HELPERS *
184 *********************************************************************/
185 #ifdef CONFIG_CPU_FREQ_DEBUG
187 /* what part(s) of the CPUfreq subsystem are debugged? */
188 static unsigned int debug
;
190 /* is the debug output ratelimit'ed using printk_ratelimit? User can
191 * set or modify this value.
193 static unsigned int debug_ratelimit
= 1;
195 /* is the printk_ratelimit'ing enabled? It's enabled after a successful
196 * loading of a cpufreq driver, temporarily disabled when a new policy
197 * is set, and disabled upon cpufreq driver removal
199 static unsigned int disable_ratelimit
= 1;
200 static DEFINE_SPINLOCK(disable_ratelimit_lock
);
202 static void cpufreq_debug_enable_ratelimit(void)
206 spin_lock_irqsave(&disable_ratelimit_lock
, flags
);
207 if (disable_ratelimit
)
209 spin_unlock_irqrestore(&disable_ratelimit_lock
, flags
);
212 static void cpufreq_debug_disable_ratelimit(void)
216 spin_lock_irqsave(&disable_ratelimit_lock
, flags
);
218 spin_unlock_irqrestore(&disable_ratelimit_lock
, flags
);
221 void cpufreq_debug_printk(unsigned int type
, const char *prefix
,
222 const char *fmt
, ...)
231 spin_lock_irqsave(&disable_ratelimit_lock
, flags
);
232 if (!disable_ratelimit
&& debug_ratelimit
233 && !printk_ratelimit()) {
234 spin_unlock_irqrestore(&disable_ratelimit_lock
, flags
);
237 spin_unlock_irqrestore(&disable_ratelimit_lock
, flags
);
239 len
= snprintf(s
, 256, KERN_DEBUG
"%s: ", prefix
);
242 len
+= vsnprintf(&s
[len
], (256 - len
), fmt
, args
);
250 EXPORT_SYMBOL(cpufreq_debug_printk
);
253 module_param(debug
, uint
, 0644);
254 MODULE_PARM_DESC(debug
, "CPUfreq debugging: add 1 to debug core,"
255 " 2 to debug drivers, and 4 to debug governors.");
257 module_param(debug_ratelimit
, uint
, 0644);
258 MODULE_PARM_DESC(debug_ratelimit
, "CPUfreq debugging:"
259 " set to 0 to disable ratelimiting.");
261 #else /* !CONFIG_CPU_FREQ_DEBUG */
263 static inline void cpufreq_debug_enable_ratelimit(void) { return; }
264 static inline void cpufreq_debug_disable_ratelimit(void) { return; }
266 #endif /* CONFIG_CPU_FREQ_DEBUG */
269 /*********************************************************************
270 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
271 *********************************************************************/
274 * adjust_jiffies - adjust the system "loops_per_jiffy"
276 * This function alters the system "loops_per_jiffy" for the clock
277 * speed change. Note that loops_per_jiffy cannot be updated on SMP
278 * systems as each CPU might be scaled differently. So, use the arch
279 * per-CPU loops_per_jiffy value wherever possible.
282 static unsigned long l_p_j_ref
;
283 static unsigned int l_p_j_ref_freq
;
285 static void adjust_jiffies(unsigned long val
, struct cpufreq_freqs
*ci
)
287 if (ci
->flags
& CPUFREQ_CONST_LOOPS
)
290 if (!l_p_j_ref_freq
) {
291 l_p_j_ref
= loops_per_jiffy
;
292 l_p_j_ref_freq
= ci
->old
;
293 dprintk("saving %lu as reference value for loops_per_jiffy; "
294 "freq is %u kHz\n", l_p_j_ref
, l_p_j_ref_freq
);
296 if ((val
== CPUFREQ_PRECHANGE
&& ci
->old
< ci
->new) ||
297 (val
== CPUFREQ_POSTCHANGE
&& ci
->old
> ci
->new) ||
298 (val
== CPUFREQ_RESUMECHANGE
|| val
== CPUFREQ_SUSPENDCHANGE
)) {
299 loops_per_jiffy
= cpufreq_scale(l_p_j_ref
, l_p_j_ref_freq
,
301 dprintk("scaling loops_per_jiffy to %lu "
302 "for frequency %u kHz\n", loops_per_jiffy
, ci
->new);
306 static inline void adjust_jiffies(unsigned long val
, struct cpufreq_freqs
*ci
)
314 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
315 * on frequency transition.
317 * This function calls the transition notifiers and the "adjust_jiffies"
318 * function. It is called twice on all CPU frequency changes that have
321 void cpufreq_notify_transition(struct cpufreq_freqs
*freqs
, unsigned int state
)
323 struct cpufreq_policy
*policy
;
325 BUG_ON(irqs_disabled());
327 freqs
->flags
= cpufreq_driver
->flags
;
328 dprintk("notification %u of frequency transition to %u kHz\n",
331 policy
= per_cpu(cpufreq_cpu_data
, freqs
->cpu
);
334 case CPUFREQ_PRECHANGE
:
335 /* detect if the driver reported a value as "old frequency"
336 * which is not equal to what the cpufreq core thinks is
339 if (!(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
340 if ((policy
) && (policy
->cpu
== freqs
->cpu
) &&
341 (policy
->cur
) && (policy
->cur
!= freqs
->old
)) {
342 dprintk("Warning: CPU frequency is"
343 " %u, cpufreq assumed %u kHz.\n",
344 freqs
->old
, policy
->cur
);
345 freqs
->old
= policy
->cur
;
348 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
349 CPUFREQ_PRECHANGE
, freqs
);
350 adjust_jiffies(CPUFREQ_PRECHANGE
, freqs
);
353 case CPUFREQ_POSTCHANGE
:
354 adjust_jiffies(CPUFREQ_POSTCHANGE
, freqs
);
355 dprintk("FREQ: %lu - CPU: %lu", (unsigned long)freqs
->new,
356 (unsigned long)freqs
->cpu
);
357 trace_power_frequency(POWER_PSTATE
, freqs
->new, freqs
->cpu
);
358 trace_cpu_frequency(freqs
->new, freqs
->cpu
);
359 srcu_notifier_call_chain(&cpufreq_transition_notifier_list
,
360 CPUFREQ_POSTCHANGE
, freqs
);
361 if (likely(policy
) && likely(policy
->cpu
== freqs
->cpu
))
362 policy
->cur
= freqs
->new;
366 EXPORT_SYMBOL_GPL(cpufreq_notify_transition
);
370 /*********************************************************************
372 *********************************************************************/
374 static struct cpufreq_governor
*__find_governor(const char *str_governor
)
376 struct cpufreq_governor
*t
;
378 list_for_each_entry(t
, &cpufreq_governor_list
, governor_list
)
379 if (!strnicmp(str_governor
, t
->name
, CPUFREQ_NAME_LEN
))
386 * cpufreq_parse_governor - parse a governor string
388 static int cpufreq_parse_governor(char *str_governor
, unsigned int *policy
,
389 struct cpufreq_governor
**governor
)
396 if (cpufreq_driver
->setpolicy
) {
397 if (!strnicmp(str_governor
, "performance", CPUFREQ_NAME_LEN
)) {
398 *policy
= CPUFREQ_POLICY_PERFORMANCE
;
400 } else if (!strnicmp(str_governor
, "powersave",
402 *policy
= CPUFREQ_POLICY_POWERSAVE
;
405 } else if (cpufreq_driver
->target
) {
406 struct cpufreq_governor
*t
;
408 mutex_lock(&cpufreq_governor_mutex
);
410 t
= __find_governor(str_governor
);
413 char *name
= kasprintf(GFP_KERNEL
, "cpufreq_%s",
419 mutex_unlock(&cpufreq_governor_mutex
);
420 ret
= request_module("%s", name
);
421 mutex_lock(&cpufreq_governor_mutex
);
424 t
= __find_governor(str_governor
);
435 mutex_unlock(&cpufreq_governor_mutex
);
443 * cpufreq_per_cpu_attr_read() / show_##file_name() -
444 * print out cpufreq information
446 * Write out information from cpufreq_driver->policy[cpu]; object must be
450 #define show_one(file_name, object) \
451 static ssize_t show_##file_name \
452 (struct cpufreq_policy *policy, char *buf) \
454 return sprintf(buf, "%u\n", policy->object); \
457 show_one(cpuinfo_min_freq
, cpuinfo
.min_freq
);
458 show_one(cpuinfo_max_freq
, cpuinfo
.max_freq
);
459 show_one(cpuinfo_transition_latency
, cpuinfo
.transition_latency
);
460 show_one(scaling_min_freq
, min
);
461 show_one(scaling_max_freq
, max
);
462 show_one(scaling_cur_freq
, cur
);
464 static int __cpufreq_set_policy(struct cpufreq_policy
*data
,
465 struct cpufreq_policy
*policy
);
468 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
470 #define store_one(file_name, object) \
471 static ssize_t store_##file_name \
472 (struct cpufreq_policy *policy, const char *buf, size_t count) \
474 unsigned int ret = -EINVAL; \
475 struct cpufreq_policy new_policy; \
477 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
481 ret = sscanf(buf, "%u", &new_policy.object); \
485 ret = __cpufreq_set_policy(policy, &new_policy); \
486 policy->user_policy.object = policy->object; \
488 return ret ? ret : count; \
491 store_one(scaling_min_freq
, min
);
492 store_one(scaling_max_freq
, max
);
495 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
497 static ssize_t
show_cpuinfo_cur_freq(struct cpufreq_policy
*policy
,
500 unsigned int cur_freq
= __cpufreq_get(policy
->cpu
);
502 return sprintf(buf
, "<unknown>");
503 return sprintf(buf
, "%u\n", cur_freq
);
508 * show_scaling_governor - show the current policy for the specified CPU
510 static ssize_t
show_scaling_governor(struct cpufreq_policy
*policy
, char *buf
)
512 if (policy
->policy
== CPUFREQ_POLICY_POWERSAVE
)
513 return sprintf(buf
, "powersave\n");
514 else if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
)
515 return sprintf(buf
, "performance\n");
516 else if (policy
->governor
)
517 return scnprintf(buf
, CPUFREQ_NAME_LEN
, "%s\n",
518 policy
->governor
->name
);
524 * store_scaling_governor - store policy for the specified CPU
526 static ssize_t
store_scaling_governor(struct cpufreq_policy
*policy
,
527 const char *buf
, size_t count
)
529 unsigned int ret
= -EINVAL
;
530 char str_governor
[16];
531 struct cpufreq_policy new_policy
;
533 ret
= cpufreq_get_policy(&new_policy
, policy
->cpu
);
537 ret
= sscanf(buf
, "%15s", str_governor
);
541 if (cpufreq_parse_governor(str_governor
, &new_policy
.policy
,
542 &new_policy
.governor
))
545 /* Do not use cpufreq_set_policy here or the user_policy.max
546 will be wrongly overridden */
547 ret
= __cpufreq_set_policy(policy
, &new_policy
);
549 policy
->user_policy
.policy
= policy
->policy
;
550 policy
->user_policy
.governor
= policy
->governor
;
559 * show_scaling_driver - show the cpufreq driver currently loaded
561 static ssize_t
show_scaling_driver(struct cpufreq_policy
*policy
, char *buf
)
563 return scnprintf(buf
, CPUFREQ_NAME_LEN
, "%s\n", cpufreq_driver
->name
);
567 * show_scaling_available_governors - show the available CPUfreq governors
569 static ssize_t
show_scaling_available_governors(struct cpufreq_policy
*policy
,
573 struct cpufreq_governor
*t
;
575 if (!cpufreq_driver
->target
) {
576 i
+= sprintf(buf
, "performance powersave");
580 list_for_each_entry(t
, &cpufreq_governor_list
, governor_list
) {
581 if (i
>= (ssize_t
) ((PAGE_SIZE
/ sizeof(char))
582 - (CPUFREQ_NAME_LEN
+ 2)))
584 i
+= scnprintf(&buf
[i
], CPUFREQ_NAME_LEN
, "%s ", t
->name
);
587 i
+= sprintf(&buf
[i
], "\n");
591 static ssize_t
show_cpus(const struct cpumask
*mask
, char *buf
)
596 for_each_cpu(cpu
, mask
) {
598 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), " ");
599 i
+= scnprintf(&buf
[i
], (PAGE_SIZE
- i
- 2), "%u", cpu
);
600 if (i
>= (PAGE_SIZE
- 5))
603 i
+= sprintf(&buf
[i
], "\n");
608 * show_related_cpus - show the CPUs affected by each transition even if
609 * hw coordination is in use
611 static ssize_t
show_related_cpus(struct cpufreq_policy
*policy
, char *buf
)
613 if (cpumask_empty(policy
->related_cpus
))
614 return show_cpus(policy
->cpus
, buf
);
615 return show_cpus(policy
->related_cpus
, buf
);
619 * show_affected_cpus - show the CPUs affected by each transition
621 static ssize_t
show_affected_cpus(struct cpufreq_policy
*policy
, char *buf
)
623 return show_cpus(policy
->cpus
, buf
);
626 static ssize_t
store_scaling_setspeed(struct cpufreq_policy
*policy
,
627 const char *buf
, size_t count
)
629 unsigned int freq
= 0;
632 if (!policy
->governor
|| !policy
->governor
->store_setspeed
)
635 ret
= sscanf(buf
, "%u", &freq
);
639 policy
->governor
->store_setspeed(policy
, freq
);
644 static ssize_t
show_scaling_setspeed(struct cpufreq_policy
*policy
, char *buf
)
646 if (!policy
->governor
|| !policy
->governor
->show_setspeed
)
647 return sprintf(buf
, "<unsupported>\n");
649 return policy
->governor
->show_setspeed(policy
, buf
);
653 * show_scaling_driver - show the current cpufreq HW/BIOS limitation
655 static ssize_t
show_bios_limit(struct cpufreq_policy
*policy
, char *buf
)
659 if (cpufreq_driver
->bios_limit
) {
660 ret
= cpufreq_driver
->bios_limit(policy
->cpu
, &limit
);
662 return sprintf(buf
, "%u\n", limit
);
664 return sprintf(buf
, "%u\n", policy
->cpuinfo
.max_freq
);
667 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq
, 0400);
668 cpufreq_freq_attr_ro(cpuinfo_min_freq
);
669 cpufreq_freq_attr_ro(cpuinfo_max_freq
);
670 cpufreq_freq_attr_ro(cpuinfo_transition_latency
);
671 cpufreq_freq_attr_ro(scaling_available_governors
);
672 cpufreq_freq_attr_ro(scaling_driver
);
673 cpufreq_freq_attr_ro(scaling_cur_freq
);
674 cpufreq_freq_attr_ro(bios_limit
);
675 cpufreq_freq_attr_ro(related_cpus
);
676 cpufreq_freq_attr_ro(affected_cpus
);
677 cpufreq_freq_attr_rw(scaling_min_freq
);
678 cpufreq_freq_attr_rw(scaling_max_freq
);
679 cpufreq_freq_attr_rw(scaling_governor
);
680 cpufreq_freq_attr_rw(scaling_setspeed
);
682 static struct attribute
*default_attrs
[] = {
683 &cpuinfo_min_freq
.attr
,
684 &cpuinfo_max_freq
.attr
,
685 &cpuinfo_transition_latency
.attr
,
686 &scaling_min_freq
.attr
,
687 &scaling_max_freq
.attr
,
690 &scaling_governor
.attr
,
691 &scaling_driver
.attr
,
692 &scaling_available_governors
.attr
,
693 &scaling_setspeed
.attr
,
697 struct kobject
*cpufreq_global_kobject
;
698 EXPORT_SYMBOL(cpufreq_global_kobject
);
700 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
701 #define to_attr(a) container_of(a, struct freq_attr, attr)
703 static ssize_t
show(struct kobject
*kobj
, struct attribute
*attr
, char *buf
)
705 struct cpufreq_policy
*policy
= to_policy(kobj
);
706 struct freq_attr
*fattr
= to_attr(attr
);
707 ssize_t ret
= -EINVAL
;
708 policy
= cpufreq_cpu_get(policy
->cpu
);
712 if (lock_policy_rwsem_read(policy
->cpu
) < 0)
716 ret
= fattr
->show(policy
, buf
);
720 unlock_policy_rwsem_read(policy
->cpu
);
722 cpufreq_cpu_put(policy
);
727 static ssize_t
store(struct kobject
*kobj
, struct attribute
*attr
,
728 const char *buf
, size_t count
)
730 struct cpufreq_policy
*policy
= to_policy(kobj
);
731 struct freq_attr
*fattr
= to_attr(attr
);
732 ssize_t ret
= -EINVAL
;
733 policy
= cpufreq_cpu_get(policy
->cpu
);
737 if (lock_policy_rwsem_write(policy
->cpu
) < 0)
741 ret
= fattr
->store(policy
, buf
, count
);
745 unlock_policy_rwsem_write(policy
->cpu
);
747 cpufreq_cpu_put(policy
);
752 static void cpufreq_sysfs_release(struct kobject
*kobj
)
754 struct cpufreq_policy
*policy
= to_policy(kobj
);
755 dprintk("last reference is dropped\n");
756 complete(&policy
->kobj_unregister
);
759 static const struct sysfs_ops sysfs_ops
= {
764 static struct kobj_type ktype_cpufreq
= {
765 .sysfs_ops
= &sysfs_ops
,
766 .default_attrs
= default_attrs
,
767 .release
= cpufreq_sysfs_release
,
774 * Positive: When we have a managed CPU and the sysfs got symlinked
776 static int cpufreq_add_dev_policy(unsigned int cpu
,
777 struct cpufreq_policy
*policy
,
778 struct sys_device
*sys_dev
)
784 #ifdef CONFIG_HOTPLUG_CPU
785 struct cpufreq_governor
*gov
;
787 gov
= __find_governor(per_cpu(cpufreq_cpu_governor
, cpu
));
789 policy
->governor
= gov
;
790 dprintk("Restoring governor %s for cpu %d\n",
791 policy
->governor
->name
, cpu
);
795 for_each_cpu(j
, policy
->cpus
) {
796 struct cpufreq_policy
*managed_policy
;
801 /* Check for existing affected CPUs.
802 * They may not be aware of it due to CPU Hotplug.
803 * cpufreq_cpu_put is called when the device is removed
804 * in __cpufreq_remove_dev()
806 managed_policy
= cpufreq_cpu_get(j
);
807 if (unlikely(managed_policy
)) {
809 /* Set proper policy_cpu */
810 unlock_policy_rwsem_write(cpu
);
811 per_cpu(cpufreq_policy_cpu
, cpu
) = managed_policy
->cpu
;
813 if (lock_policy_rwsem_write(cpu
) < 0) {
814 /* Should not go through policy unlock path */
815 if (cpufreq_driver
->exit
)
816 cpufreq_driver
->exit(policy
);
817 cpufreq_cpu_put(managed_policy
);
821 spin_lock_irqsave(&cpufreq_driver_lock
, flags
);
822 cpumask_copy(managed_policy
->cpus
, policy
->cpus
);
823 per_cpu(cpufreq_cpu_data
, cpu
) = managed_policy
;
824 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
826 dprintk("CPU already managed, adding link\n");
827 ret
= sysfs_create_link(&sys_dev
->kobj
,
828 &managed_policy
->kobj
,
831 cpufreq_cpu_put(managed_policy
);
833 * Success. We only needed to be added to the mask.
834 * Call driver->exit() because only the cpu parent of
835 * the kobj needed to call init().
837 if (cpufreq_driver
->exit
)
838 cpufreq_driver
->exit(policy
);
851 /* symlink affected CPUs */
852 static int cpufreq_add_dev_symlink(unsigned int cpu
,
853 struct cpufreq_policy
*policy
)
858 for_each_cpu(j
, policy
->cpus
) {
859 struct cpufreq_policy
*managed_policy
;
860 struct sys_device
*cpu_sys_dev
;
867 dprintk("CPU %u already managed, adding link\n", j
);
868 managed_policy
= cpufreq_cpu_get(cpu
);
869 cpu_sys_dev
= get_cpu_sysdev(j
);
870 ret
= sysfs_create_link(&cpu_sys_dev
->kobj
, &policy
->kobj
,
873 cpufreq_cpu_put(managed_policy
);
880 static int cpufreq_add_dev_interface(unsigned int cpu
,
881 struct cpufreq_policy
*policy
,
882 struct sys_device
*sys_dev
)
884 struct cpufreq_policy new_policy
;
885 struct freq_attr
**drv_attr
;
890 /* prepare interface data */
891 ret
= kobject_init_and_add(&policy
->kobj
, &ktype_cpufreq
,
892 &sys_dev
->kobj
, "cpufreq");
896 /* set up files for this cpu device */
897 drv_attr
= cpufreq_driver
->attr
;
898 while ((drv_attr
) && (*drv_attr
)) {
899 ret
= sysfs_create_file(&policy
->kobj
, &((*drv_attr
)->attr
));
901 goto err_out_kobj_put
;
904 if (cpufreq_driver
->get
) {
905 ret
= sysfs_create_file(&policy
->kobj
, &cpuinfo_cur_freq
.attr
);
907 goto err_out_kobj_put
;
909 if (cpufreq_driver
->target
) {
910 ret
= sysfs_create_file(&policy
->kobj
, &scaling_cur_freq
.attr
);
912 goto err_out_kobj_put
;
914 if (cpufreq_driver
->bios_limit
) {
915 ret
= sysfs_create_file(&policy
->kobj
, &bios_limit
.attr
);
917 goto err_out_kobj_put
;
920 spin_lock_irqsave(&cpufreq_driver_lock
, flags
);
921 for_each_cpu(j
, policy
->cpus
) {
924 per_cpu(cpufreq_cpu_data
, j
) = policy
;
925 per_cpu(cpufreq_policy_cpu
, j
) = policy
->cpu
;
927 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
929 ret
= cpufreq_add_dev_symlink(cpu
, policy
);
931 goto err_out_kobj_put
;
933 memcpy(&new_policy
, policy
, sizeof(struct cpufreq_policy
));
934 /* assure that the starting sequence is run in __cpufreq_set_policy */
935 policy
->governor
= NULL
;
937 /* set default policy */
938 ret
= __cpufreq_set_policy(policy
, &new_policy
);
939 policy
->user_policy
.policy
= policy
->policy
;
940 policy
->user_policy
.governor
= policy
->governor
;
943 dprintk("setting policy failed\n");
944 if (cpufreq_driver
->exit
)
945 cpufreq_driver
->exit(policy
);
950 kobject_put(&policy
->kobj
);
951 wait_for_completion(&policy
->kobj_unregister
);
957 * cpufreq_add_dev - add a CPU device
959 * Adds the cpufreq interface for a CPU device.
961 * The Oracle says: try running cpufreq registration/unregistration concurrently
962 * with with cpu hotplugging and all hell will break loose. Tried to clean this
963 * mess up, but more thorough testing is needed. - Mathieu
965 static int cpufreq_add_dev(struct sys_device
*sys_dev
)
967 unsigned int cpu
= sys_dev
->id
;
968 int ret
= 0, found
= 0;
969 struct cpufreq_policy
*policy
;
972 #ifdef CONFIG_HOTPLUG_CPU
976 if (cpu_is_offline(cpu
))
979 cpufreq_debug_disable_ratelimit();
980 dprintk("adding CPU %u\n", cpu
);
983 /* check whether a different CPU already registered this
984 * CPU because it is in the same boat. */
985 policy
= cpufreq_cpu_get(cpu
);
986 if (unlikely(policy
)) {
987 cpufreq_cpu_put(policy
);
988 cpufreq_debug_enable_ratelimit();
993 if (!try_module_get(cpufreq_driver
->owner
)) {
999 policy
= kzalloc(sizeof(struct cpufreq_policy
), GFP_KERNEL
);
1003 if (!alloc_cpumask_var(&policy
->cpus
, GFP_KERNEL
))
1004 goto err_free_policy
;
1006 if (!zalloc_cpumask_var(&policy
->related_cpus
, GFP_KERNEL
))
1007 goto err_free_cpumask
;
1010 cpumask_copy(policy
->cpus
, cpumask_of(cpu
));
1012 /* Initially set CPU itself as the policy_cpu */
1013 per_cpu(cpufreq_policy_cpu
, cpu
) = cpu
;
1014 ret
= (lock_policy_rwsem_write(cpu
) < 0);
1017 init_completion(&policy
->kobj_unregister
);
1018 INIT_WORK(&policy
->update
, handle_update
);
1020 /* Set governor before ->init, so that driver could check it */
1021 #ifdef CONFIG_HOTPLUG_CPU
1022 for_each_online_cpu(sibling
) {
1023 struct cpufreq_policy
*cp
= per_cpu(cpufreq_cpu_data
, sibling
);
1024 if (cp
&& cp
->governor
&&
1025 (cpumask_test_cpu(cpu
, cp
->related_cpus
))) {
1026 policy
->governor
= cp
->governor
;
1033 policy
->governor
= CPUFREQ_DEFAULT_GOVERNOR
;
1034 /* call driver. From then on the cpufreq must be able
1035 * to accept all calls to ->verify and ->setpolicy for this CPU
1037 ret
= cpufreq_driver
->init(policy
);
1039 dprintk("initialization failed\n");
1040 goto err_unlock_policy
;
1042 policy
->user_policy
.min
= policy
->min
;
1043 policy
->user_policy
.max
= policy
->max
;
1045 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1046 CPUFREQ_START
, policy
);
1048 ret
= cpufreq_add_dev_policy(cpu
, policy
, sys_dev
);
1051 /* This is a managed cpu, symlink created,
1054 goto err_unlock_policy
;
1057 ret
= cpufreq_add_dev_interface(cpu
, policy
, sys_dev
);
1059 goto err_out_unregister
;
1061 unlock_policy_rwsem_write(cpu
);
1063 kobject_uevent(&policy
->kobj
, KOBJ_ADD
);
1064 module_put(cpufreq_driver
->owner
);
1065 dprintk("initialization complete\n");
1066 cpufreq_debug_enable_ratelimit();
1072 spin_lock_irqsave(&cpufreq_driver_lock
, flags
);
1073 for_each_cpu(j
, policy
->cpus
)
1074 per_cpu(cpufreq_cpu_data
, j
) = NULL
;
1075 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1077 kobject_put(&policy
->kobj
);
1078 wait_for_completion(&policy
->kobj_unregister
);
1081 unlock_policy_rwsem_write(cpu
);
1082 free_cpumask_var(policy
->related_cpus
);
1084 free_cpumask_var(policy
->cpus
);
1088 module_put(cpufreq_driver
->owner
);
1090 cpufreq_debug_enable_ratelimit();
1096 * __cpufreq_remove_dev - remove a CPU device
1098 * Removes the cpufreq interface for a CPU device.
1099 * Caller should already have policy_rwsem in write mode for this CPU.
1100 * This routine frees the rwsem before returning.
1102 static int __cpufreq_remove_dev(struct sys_device
*sys_dev
)
1104 unsigned int cpu
= sys_dev
->id
;
1105 unsigned long flags
;
1106 struct cpufreq_policy
*data
;
1107 struct kobject
*kobj
;
1108 struct completion
*cmp
;
1110 struct sys_device
*cpu_sys_dev
;
1114 cpufreq_debug_disable_ratelimit();
1115 dprintk("unregistering CPU %u\n", cpu
);
1117 spin_lock_irqsave(&cpufreq_driver_lock
, flags
);
1118 data
= per_cpu(cpufreq_cpu_data
, cpu
);
1121 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1122 cpufreq_debug_enable_ratelimit();
1123 unlock_policy_rwsem_write(cpu
);
1126 per_cpu(cpufreq_cpu_data
, cpu
) = NULL
;
1130 /* if this isn't the CPU which is the parent of the kobj, we
1131 * only need to unlink, put and exit
1133 if (unlikely(cpu
!= data
->cpu
)) {
1134 dprintk("removing link\n");
1135 cpumask_clear_cpu(cpu
, data
->cpus
);
1136 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1137 kobj
= &sys_dev
->kobj
;
1138 cpufreq_cpu_put(data
);
1139 cpufreq_debug_enable_ratelimit();
1140 unlock_policy_rwsem_write(cpu
);
1141 sysfs_remove_link(kobj
, "cpufreq");
1148 #ifdef CONFIG_HOTPLUG_CPU
1149 strncpy(per_cpu(cpufreq_cpu_governor
, cpu
), data
->governor
->name
,
1153 /* if we have other CPUs still registered, we need to unlink them,
1154 * or else wait_for_completion below will lock up. Clean the
1155 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
1156 * the sysfs links afterwards.
1158 if (unlikely(cpumask_weight(data
->cpus
) > 1)) {
1159 for_each_cpu(j
, data
->cpus
) {
1162 per_cpu(cpufreq_cpu_data
, j
) = NULL
;
1166 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1168 if (unlikely(cpumask_weight(data
->cpus
) > 1)) {
1169 for_each_cpu(j
, data
->cpus
) {
1172 dprintk("removing link for cpu %u\n", j
);
1173 #ifdef CONFIG_HOTPLUG_CPU
1174 strncpy(per_cpu(cpufreq_cpu_governor
, j
),
1175 data
->governor
->name
, CPUFREQ_NAME_LEN
);
1177 cpu_sys_dev
= get_cpu_sysdev(j
);
1178 kobj
= &cpu_sys_dev
->kobj
;
1179 unlock_policy_rwsem_write(cpu
);
1180 sysfs_remove_link(kobj
, "cpufreq");
1181 lock_policy_rwsem_write(cpu
);
1182 cpufreq_cpu_put(data
);
1186 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1189 if (cpufreq_driver
->target
)
1190 __cpufreq_governor(data
, CPUFREQ_GOV_STOP
);
1193 cmp
= &data
->kobj_unregister
;
1194 unlock_policy_rwsem_write(cpu
);
1197 /* we need to make sure that the underlying kobj is actually
1198 * not referenced anymore by anybody before we proceed with
1201 dprintk("waiting for dropping of refcount\n");
1202 wait_for_completion(cmp
);
1203 dprintk("wait complete\n");
1205 lock_policy_rwsem_write(cpu
);
1206 if (cpufreq_driver
->exit
)
1207 cpufreq_driver
->exit(data
);
1208 unlock_policy_rwsem_write(cpu
);
1210 cpufreq_debug_enable_ratelimit();
1212 #ifdef CONFIG_HOTPLUG_CPU
1213 /* when the CPU which is the parent of the kobj is hotplugged
1214 * offline, check for siblings, and create cpufreq sysfs interface
1217 if (unlikely(cpumask_weight(data
->cpus
) > 1)) {
1218 /* first sibling now owns the new sysfs dir */
1219 cpumask_clear_cpu(cpu
, data
->cpus
);
1220 cpufreq_add_dev(get_cpu_sysdev(cpumask_first(data
->cpus
)));
1222 /* finally remove our own symlink */
1223 lock_policy_rwsem_write(cpu
);
1224 __cpufreq_remove_dev(sys_dev
);
1228 free_cpumask_var(data
->related_cpus
);
1229 free_cpumask_var(data
->cpus
);
1236 static int cpufreq_remove_dev(struct sys_device
*sys_dev
)
1238 unsigned int cpu
= sys_dev
->id
;
1241 if (cpu_is_offline(cpu
))
1244 if (unlikely(lock_policy_rwsem_write(cpu
)))
1247 retval
= __cpufreq_remove_dev(sys_dev
);
1252 static void handle_update(struct work_struct
*work
)
1254 struct cpufreq_policy
*policy
=
1255 container_of(work
, struct cpufreq_policy
, update
);
1256 unsigned int cpu
= policy
->cpu
;
1257 dprintk("handle_update for cpu %u called\n", cpu
);
1258 cpufreq_update_policy(cpu
);
1262 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1264 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1265 * @new_freq: CPU frequency the CPU actually runs at
1267 * We adjust to current frequency first, and need to clean up later.
1268 * So either call to cpufreq_update_policy() or schedule handle_update()).
1270 static void cpufreq_out_of_sync(unsigned int cpu
, unsigned int old_freq
,
1271 unsigned int new_freq
)
1273 struct cpufreq_freqs freqs
;
1275 dprintk("Warning: CPU frequency out of sync: cpufreq and timing "
1276 "core thinks of %u, is %u kHz.\n", old_freq
, new_freq
);
1279 freqs
.old
= old_freq
;
1280 freqs
.new = new_freq
;
1281 cpufreq_notify_transition(&freqs
, CPUFREQ_PRECHANGE
);
1282 cpufreq_notify_transition(&freqs
, CPUFREQ_POSTCHANGE
);
1287 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1290 * This is the last known freq, without actually getting it from the driver.
1291 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1293 unsigned int cpufreq_quick_get(unsigned int cpu
)
1295 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1296 unsigned int ret_freq
= 0;
1299 ret_freq
= policy
->cur
;
1300 cpufreq_cpu_put(policy
);
1305 EXPORT_SYMBOL(cpufreq_quick_get
);
1308 static unsigned int __cpufreq_get(unsigned int cpu
)
1310 struct cpufreq_policy
*policy
= per_cpu(cpufreq_cpu_data
, cpu
);
1311 unsigned int ret_freq
= 0;
1313 if (!cpufreq_driver
->get
)
1316 ret_freq
= cpufreq_driver
->get(cpu
);
1318 if (ret_freq
&& policy
->cur
&&
1319 !(cpufreq_driver
->flags
& CPUFREQ_CONST_LOOPS
)) {
1320 /* verify no discrepancy between actual and
1321 saved value exists */
1322 if (unlikely(ret_freq
!= policy
->cur
)) {
1323 cpufreq_out_of_sync(cpu
, policy
->cur
, ret_freq
);
1324 schedule_work(&policy
->update
);
1332 * cpufreq_get - get the current CPU frequency (in kHz)
1335 * Get the CPU current (static) CPU frequency
1337 unsigned int cpufreq_get(unsigned int cpu
)
1339 unsigned int ret_freq
= 0;
1340 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
1345 if (unlikely(lock_policy_rwsem_read(cpu
)))
1348 ret_freq
= __cpufreq_get(cpu
);
1350 unlock_policy_rwsem_read(cpu
);
1353 cpufreq_cpu_put(policy
);
1357 EXPORT_SYMBOL(cpufreq_get
);
1361 * cpufreq_suspend - let the low level driver prepare for suspend
1364 static int cpufreq_suspend(struct sys_device
*sysdev
, pm_message_t pmsg
)
1368 int cpu
= sysdev
->id
;
1369 struct cpufreq_policy
*cpu_policy
;
1371 dprintk("suspending cpu %u\n", cpu
);
1373 if (!cpu_online(cpu
))
1376 /* we may be lax here as interrupts are off. Nonetheless
1377 * we need to grab the correct cpu policy, as to check
1378 * whether we really run on this CPU.
1381 cpu_policy
= cpufreq_cpu_get(cpu
);
1385 /* only handle each CPU group once */
1386 if (unlikely(cpu_policy
->cpu
!= cpu
))
1389 if (cpufreq_driver
->suspend
) {
1390 ret
= cpufreq_driver
->suspend(cpu_policy
, pmsg
);
1392 printk(KERN_ERR
"cpufreq: suspend failed in ->suspend "
1393 "step on CPU %u\n", cpu_policy
->cpu
);
1397 cpufreq_cpu_put(cpu_policy
);
1402 * cpufreq_resume - restore proper CPU frequency handling after resume
1404 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1405 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1406 * restored. It will verify that the current freq is in sync with
1407 * what we believe it to be. This is a bit later than when it
1408 * should be, but nonethteless it's better than calling
1409 * cpufreq_driver->get() here which might re-enable interrupts...
1411 static int cpufreq_resume(struct sys_device
*sysdev
)
1415 int cpu
= sysdev
->id
;
1416 struct cpufreq_policy
*cpu_policy
;
1418 dprintk("resuming cpu %u\n", cpu
);
1420 if (!cpu_online(cpu
))
1423 /* we may be lax here as interrupts are off. Nonetheless
1424 * we need to grab the correct cpu policy, as to check
1425 * whether we really run on this CPU.
1428 cpu_policy
= cpufreq_cpu_get(cpu
);
1432 /* only handle each CPU group once */
1433 if (unlikely(cpu_policy
->cpu
!= cpu
))
1436 if (cpufreq_driver
->resume
) {
1437 ret
= cpufreq_driver
->resume(cpu_policy
);
1439 printk(KERN_ERR
"cpufreq: resume failed in ->resume "
1440 "step on CPU %u\n", cpu_policy
->cpu
);
1445 schedule_work(&cpu_policy
->update
);
1448 cpufreq_cpu_put(cpu_policy
);
1452 static struct sysdev_driver cpufreq_sysdev_driver
= {
1453 .add
= cpufreq_add_dev
,
1454 .remove
= cpufreq_remove_dev
,
1455 .suspend
= cpufreq_suspend
,
1456 .resume
= cpufreq_resume
,
1460 /*********************************************************************
1461 * NOTIFIER LISTS INTERFACE *
1462 *********************************************************************/
1465 * cpufreq_register_notifier - register a driver with cpufreq
1466 * @nb: notifier function to register
1467 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1469 * Add a driver to one of two lists: either a list of drivers that
1470 * are notified about clock rate changes (once before and once after
1471 * the transition), or a list of drivers that are notified about
1472 * changes in cpufreq policy.
1474 * This function may sleep, and has the same return conditions as
1475 * blocking_notifier_chain_register.
1477 int cpufreq_register_notifier(struct notifier_block
*nb
, unsigned int list
)
1481 WARN_ON(!init_cpufreq_transition_notifier_list_called
);
1484 case CPUFREQ_TRANSITION_NOTIFIER
:
1485 ret
= srcu_notifier_chain_register(
1486 &cpufreq_transition_notifier_list
, nb
);
1488 case CPUFREQ_POLICY_NOTIFIER
:
1489 ret
= blocking_notifier_chain_register(
1490 &cpufreq_policy_notifier_list
, nb
);
1498 EXPORT_SYMBOL(cpufreq_register_notifier
);
1502 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1503 * @nb: notifier block to be unregistered
1504 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1506 * Remove a driver from the CPU frequency notifier list.
1508 * This function may sleep, and has the same return conditions as
1509 * blocking_notifier_chain_unregister.
1511 int cpufreq_unregister_notifier(struct notifier_block
*nb
, unsigned int list
)
1516 case CPUFREQ_TRANSITION_NOTIFIER
:
1517 ret
= srcu_notifier_chain_unregister(
1518 &cpufreq_transition_notifier_list
, nb
);
1520 case CPUFREQ_POLICY_NOTIFIER
:
1521 ret
= blocking_notifier_chain_unregister(
1522 &cpufreq_policy_notifier_list
, nb
);
1530 EXPORT_SYMBOL(cpufreq_unregister_notifier
);
1533 /*********************************************************************
1535 *********************************************************************/
1538 int __cpufreq_driver_target(struct cpufreq_policy
*policy
,
1539 unsigned int target_freq
,
1540 unsigned int relation
)
1542 int retval
= -EINVAL
;
1544 dprintk("target for CPU %u: %u kHz, relation %u\n", policy
->cpu
,
1545 target_freq
, relation
);
1546 if (cpu_online(policy
->cpu
) && cpufreq_driver
->target
)
1547 retval
= cpufreq_driver
->target(policy
, target_freq
, relation
);
1551 EXPORT_SYMBOL_GPL(__cpufreq_driver_target
);
1553 int cpufreq_driver_target(struct cpufreq_policy
*policy
,
1554 unsigned int target_freq
,
1555 unsigned int relation
)
1559 policy
= cpufreq_cpu_get(policy
->cpu
);
1563 if (unlikely(lock_policy_rwsem_write(policy
->cpu
)))
1566 ret
= __cpufreq_driver_target(policy
, target_freq
, relation
);
1568 unlock_policy_rwsem_write(policy
->cpu
);
1571 cpufreq_cpu_put(policy
);
1575 EXPORT_SYMBOL_GPL(cpufreq_driver_target
);
1577 int __cpufreq_driver_getavg(struct cpufreq_policy
*policy
, unsigned int cpu
)
1581 policy
= cpufreq_cpu_get(policy
->cpu
);
1585 if (cpu_online(cpu
) && cpufreq_driver
->getavg
)
1586 ret
= cpufreq_driver
->getavg(policy
, cpu
);
1588 cpufreq_cpu_put(policy
);
1591 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg
);
1594 * when "event" is CPUFREQ_GOV_LIMITS
1597 static int __cpufreq_governor(struct cpufreq_policy
*policy
,
1602 /* Only must be defined when default governor is known to have latency
1603 restrictions, like e.g. conservative or ondemand.
1604 That this is the case is already ensured in Kconfig
1606 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1607 struct cpufreq_governor
*gov
= &cpufreq_gov_performance
;
1609 struct cpufreq_governor
*gov
= NULL
;
1612 if (policy
->governor
->max_transition_latency
&&
1613 policy
->cpuinfo
.transition_latency
>
1614 policy
->governor
->max_transition_latency
) {
1618 printk(KERN_WARNING
"%s governor failed, too long"
1619 " transition latency of HW, fallback"
1620 " to %s governor\n",
1621 policy
->governor
->name
,
1623 policy
->governor
= gov
;
1627 if (!try_module_get(policy
->governor
->owner
))
1630 dprintk("__cpufreq_governor for CPU %u, event %u\n",
1631 policy
->cpu
, event
);
1632 ret
= policy
->governor
->governor(policy
, event
);
1634 /* we keep one module reference alive for
1635 each CPU governed by this CPU */
1636 if ((event
!= CPUFREQ_GOV_START
) || ret
)
1637 module_put(policy
->governor
->owner
);
1638 if ((event
== CPUFREQ_GOV_STOP
) && !ret
)
1639 module_put(policy
->governor
->owner
);
1645 int cpufreq_register_governor(struct cpufreq_governor
*governor
)
1652 mutex_lock(&cpufreq_governor_mutex
);
1655 if (__find_governor(governor
->name
) == NULL
) {
1657 list_add(&governor
->governor_list
, &cpufreq_governor_list
);
1660 mutex_unlock(&cpufreq_governor_mutex
);
1663 EXPORT_SYMBOL_GPL(cpufreq_register_governor
);
1666 void cpufreq_unregister_governor(struct cpufreq_governor
*governor
)
1668 #ifdef CONFIG_HOTPLUG_CPU
1675 #ifdef CONFIG_HOTPLUG_CPU
1676 for_each_present_cpu(cpu
) {
1677 if (cpu_online(cpu
))
1679 if (!strcmp(per_cpu(cpufreq_cpu_governor
, cpu
), governor
->name
))
1680 strcpy(per_cpu(cpufreq_cpu_governor
, cpu
), "\0");
1684 mutex_lock(&cpufreq_governor_mutex
);
1685 list_del(&governor
->governor_list
);
1686 mutex_unlock(&cpufreq_governor_mutex
);
1689 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor
);
1693 /*********************************************************************
1694 * POLICY INTERFACE *
1695 *********************************************************************/
1698 * cpufreq_get_policy - get the current cpufreq_policy
1699 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1702 * Reads the current cpufreq policy.
1704 int cpufreq_get_policy(struct cpufreq_policy
*policy
, unsigned int cpu
)
1706 struct cpufreq_policy
*cpu_policy
;
1710 cpu_policy
= cpufreq_cpu_get(cpu
);
1714 memcpy(policy
, cpu_policy
, sizeof(struct cpufreq_policy
));
1716 cpufreq_cpu_put(cpu_policy
);
1719 EXPORT_SYMBOL(cpufreq_get_policy
);
1723 * data : current policy.
1724 * policy : policy to be set.
1726 static int __cpufreq_set_policy(struct cpufreq_policy
*data
,
1727 struct cpufreq_policy
*policy
)
1731 cpufreq_debug_disable_ratelimit();
1732 dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy
->cpu
,
1733 policy
->min
, policy
->max
);
1735 memcpy(&policy
->cpuinfo
, &data
->cpuinfo
,
1736 sizeof(struct cpufreq_cpuinfo
));
1738 if (policy
->min
> data
->max
|| policy
->max
< data
->min
) {
1743 /* verify the cpu speed can be set within this limit */
1744 ret
= cpufreq_driver
->verify(policy
);
1748 /* adjust if necessary - all reasons */
1749 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1750 CPUFREQ_ADJUST
, policy
);
1752 /* adjust if necessary - hardware incompatibility*/
1753 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1754 CPUFREQ_INCOMPATIBLE
, policy
);
1756 /* verify the cpu speed can be set within this limit,
1757 which might be different to the first one */
1758 ret
= cpufreq_driver
->verify(policy
);
1762 /* notification of the new policy */
1763 blocking_notifier_call_chain(&cpufreq_policy_notifier_list
,
1764 CPUFREQ_NOTIFY
, policy
);
1766 data
->min
= policy
->min
;
1767 data
->max
= policy
->max
;
1769 dprintk("new min and max freqs are %u - %u kHz\n",
1770 data
->min
, data
->max
);
1772 if (cpufreq_driver
->setpolicy
) {
1773 data
->policy
= policy
->policy
;
1774 dprintk("setting range\n");
1775 ret
= cpufreq_driver
->setpolicy(policy
);
1777 if (policy
->governor
!= data
->governor
) {
1778 /* save old, working values */
1779 struct cpufreq_governor
*old_gov
= data
->governor
;
1781 dprintk("governor switch\n");
1783 /* end old governor */
1785 __cpufreq_governor(data
, CPUFREQ_GOV_STOP
);
1787 /* start new governor */
1788 data
->governor
= policy
->governor
;
1789 if (__cpufreq_governor(data
, CPUFREQ_GOV_START
)) {
1790 /* new governor failed, so re-start old one */
1791 dprintk("starting governor %s failed\n",
1792 data
->governor
->name
);
1794 data
->governor
= old_gov
;
1795 __cpufreq_governor(data
,
1801 /* might be a policy change, too, so fall through */
1803 dprintk("governor: change or update limits\n");
1804 __cpufreq_governor(data
, CPUFREQ_GOV_LIMITS
);
1808 cpufreq_debug_enable_ratelimit();
1813 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1814 * @cpu: CPU which shall be re-evaluated
1816 * Usefull for policy notifiers which have different necessities
1817 * at different times.
1819 int cpufreq_update_policy(unsigned int cpu
)
1821 struct cpufreq_policy
*data
= cpufreq_cpu_get(cpu
);
1822 struct cpufreq_policy policy
;
1830 if (unlikely(lock_policy_rwsem_write(cpu
))) {
1835 dprintk("updating policy for CPU %u\n", cpu
);
1836 memcpy(&policy
, data
, sizeof(struct cpufreq_policy
));
1837 policy
.min
= data
->user_policy
.min
;
1838 policy
.max
= data
->user_policy
.max
;
1839 policy
.policy
= data
->user_policy
.policy
;
1840 policy
.governor
= data
->user_policy
.governor
;
1842 /* BIOS might change freq behind our back
1843 -> ask driver for current freq and notify governors about a change */
1844 if (cpufreq_driver
->get
) {
1845 policy
.cur
= cpufreq_driver
->get(cpu
);
1847 dprintk("Driver did not initialize current freq");
1848 data
->cur
= policy
.cur
;
1850 if (data
->cur
!= policy
.cur
)
1851 cpufreq_out_of_sync(cpu
, data
->cur
,
1856 ret
= __cpufreq_set_policy(data
, &policy
);
1858 unlock_policy_rwsem_write(cpu
);
1861 cpufreq_cpu_put(data
);
1865 EXPORT_SYMBOL(cpufreq_update_policy
);
1867 static int __cpuinit
cpufreq_cpu_callback(struct notifier_block
*nfb
,
1868 unsigned long action
, void *hcpu
)
1870 unsigned int cpu
= (unsigned long)hcpu
;
1871 struct sys_device
*sys_dev
;
1873 sys_dev
= get_cpu_sysdev(cpu
);
1877 case CPU_ONLINE_FROZEN
:
1878 cpufreq_add_dev(sys_dev
);
1880 case CPU_DOWN_PREPARE
:
1881 case CPU_DOWN_PREPARE_FROZEN
:
1882 if (unlikely(lock_policy_rwsem_write(cpu
)))
1885 __cpufreq_remove_dev(sys_dev
);
1887 case CPU_DOWN_FAILED
:
1888 case CPU_DOWN_FAILED_FROZEN
:
1889 cpufreq_add_dev(sys_dev
);
1896 static struct notifier_block __refdata cpufreq_cpu_notifier
= {
1897 .notifier_call
= cpufreq_cpu_callback
,
1900 /*********************************************************************
1901 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1902 *********************************************************************/
1905 * cpufreq_register_driver - register a CPU Frequency driver
1906 * @driver_data: A struct cpufreq_driver containing the values#
1907 * submitted by the CPU Frequency driver.
1909 * Registers a CPU Frequency driver to this core code. This code
1910 * returns zero on success, -EBUSY when another driver got here first
1911 * (and isn't unregistered in the meantime).
1914 int cpufreq_register_driver(struct cpufreq_driver
*driver_data
)
1916 unsigned long flags
;
1919 if (!driver_data
|| !driver_data
->verify
|| !driver_data
->init
||
1920 ((!driver_data
->setpolicy
) && (!driver_data
->target
)))
1923 dprintk("trying to register driver %s\n", driver_data
->name
);
1925 if (driver_data
->setpolicy
)
1926 driver_data
->flags
|= CPUFREQ_CONST_LOOPS
;
1928 spin_lock_irqsave(&cpufreq_driver_lock
, flags
);
1929 if (cpufreq_driver
) {
1930 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1933 cpufreq_driver
= driver_data
;
1934 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1936 ret
= sysdev_driver_register(&cpu_sysdev_class
,
1937 &cpufreq_sysdev_driver
);
1939 goto err_null_driver
;
1941 if (!(cpufreq_driver
->flags
& CPUFREQ_STICKY
)) {
1945 /* check for at least one working CPU */
1946 for (i
= 0; i
< nr_cpu_ids
; i
++)
1947 if (cpu_possible(i
) && per_cpu(cpufreq_cpu_data
, i
)) {
1952 /* if all ->init() calls failed, unregister */
1954 dprintk("no CPU initialized for driver %s\n",
1956 goto err_sysdev_unreg
;
1960 register_hotcpu_notifier(&cpufreq_cpu_notifier
);
1961 dprintk("driver %s up and running\n", driver_data
->name
);
1962 cpufreq_debug_enable_ratelimit();
1966 sysdev_driver_unregister(&cpu_sysdev_class
,
1967 &cpufreq_sysdev_driver
);
1969 spin_lock_irqsave(&cpufreq_driver_lock
, flags
);
1970 cpufreq_driver
= NULL
;
1971 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
1974 EXPORT_SYMBOL_GPL(cpufreq_register_driver
);
1978 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1980 * Unregister the current CPUFreq driver. Only call this if you have
1981 * the right to do so, i.e. if you have succeeded in initialising before!
1982 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1983 * currently not initialised.
1985 int cpufreq_unregister_driver(struct cpufreq_driver
*driver
)
1987 unsigned long flags
;
1989 cpufreq_debug_disable_ratelimit();
1991 if (!cpufreq_driver
|| (driver
!= cpufreq_driver
)) {
1992 cpufreq_debug_enable_ratelimit();
1996 dprintk("unregistering driver %s\n", driver
->name
);
1998 sysdev_driver_unregister(&cpu_sysdev_class
, &cpufreq_sysdev_driver
);
1999 unregister_hotcpu_notifier(&cpufreq_cpu_notifier
);
2001 spin_lock_irqsave(&cpufreq_driver_lock
, flags
);
2002 cpufreq_driver
= NULL
;
2003 spin_unlock_irqrestore(&cpufreq_driver_lock
, flags
);
2007 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver
);
2009 static int __init
cpufreq_core_init(void)
2013 for_each_possible_cpu(cpu
) {
2014 per_cpu(cpufreq_policy_cpu
, cpu
) = -1;
2015 init_rwsem(&per_cpu(cpu_policy_rwsem
, cpu
));
2018 cpufreq_global_kobject
= kobject_create_and_add("cpufreq",
2019 &cpu_sysdev_class
.kset
.kobj
);
2020 BUG_ON(!cpufreq_global_kobject
);
2024 core_initcall(cpufreq_core_init
);