OMAPDSS: HDMI: fix hdmi_wait_for_bit_change
[linux-2.6/btrfs-unstable.git] / drivers / cpufreq / cpufreq.c
blob16d7b4ac94be21210779cd7c87d9440a78666162
1 /*
2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
33 /**
34 * The "cpufreq driver" - the arch- or hardware-dependent low
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
38 static struct cpufreq_driver *cpufreq_driver;
39 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
40 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
41 static DEFINE_RWLOCK(cpufreq_driver_lock);
42 static DEFINE_MUTEX(cpufreq_governor_lock);
43 static LIST_HEAD(cpufreq_policy_list);
45 #ifdef CONFIG_HOTPLUG_CPU
46 /* This one keeps track of the previously set governor of a removed CPU */
47 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
48 #endif
50 static inline bool has_target(void)
52 return cpufreq_driver->target_index || cpufreq_driver->target;
56 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
57 * sections
59 static DECLARE_RWSEM(cpufreq_rwsem);
61 /* internal prototypes */
62 static int __cpufreq_governor(struct cpufreq_policy *policy,
63 unsigned int event);
64 static unsigned int __cpufreq_get(unsigned int cpu);
65 static void handle_update(struct work_struct *work);
67 /**
68 * Two notifier lists: the "policy" list is involved in the
69 * validation process for a new CPU frequency policy; the
70 * "transition" list for kernel code that needs to handle
71 * changes to devices when the CPU clock speed changes.
72 * The mutex locks both lists.
74 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
75 static struct srcu_notifier_head cpufreq_transition_notifier_list;
77 static bool init_cpufreq_transition_notifier_list_called;
78 static int __init init_cpufreq_transition_notifier_list(void)
80 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
81 init_cpufreq_transition_notifier_list_called = true;
82 return 0;
84 pure_initcall(init_cpufreq_transition_notifier_list);
86 static int off __read_mostly;
87 static int cpufreq_disabled(void)
89 return off;
91 void disable_cpufreq(void)
93 off = 1;
95 static LIST_HEAD(cpufreq_governor_list);
96 static DEFINE_MUTEX(cpufreq_governor_mutex);
98 bool have_governor_per_policy(void)
100 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
102 EXPORT_SYMBOL_GPL(have_governor_per_policy);
104 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
106 if (have_governor_per_policy())
107 return &policy->kobj;
108 else
109 return cpufreq_global_kobject;
111 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
113 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
115 u64 idle_time;
116 u64 cur_wall_time;
117 u64 busy_time;
119 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
121 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
122 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
123 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
124 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
125 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
128 idle_time = cur_wall_time - busy_time;
129 if (wall)
130 *wall = cputime_to_usecs(cur_wall_time);
132 return cputime_to_usecs(idle_time);
135 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
137 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
139 if (idle_time == -1ULL)
140 return get_cpu_idle_time_jiffy(cpu, wall);
141 else if (!io_busy)
142 idle_time += get_cpu_iowait_time_us(cpu, wall);
144 return idle_time;
146 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
149 * This is a generic cpufreq init() routine which can be used by cpufreq
150 * drivers of SMP systems. It will do following:
151 * - validate & show freq table passed
152 * - set policies transition latency
153 * - policy->cpus with all possible CPUs
155 int cpufreq_generic_init(struct cpufreq_policy *policy,
156 struct cpufreq_frequency_table *table,
157 unsigned int transition_latency)
159 int ret;
161 ret = cpufreq_table_validate_and_show(policy, table);
162 if (ret) {
163 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
164 return ret;
167 policy->cpuinfo.transition_latency = transition_latency;
170 * The driver only supports the SMP configuartion where all processors
171 * share the clock and voltage and clock.
173 cpumask_setall(policy->cpus);
175 return 0;
177 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
179 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
181 struct cpufreq_policy *policy = NULL;
182 unsigned long flags;
184 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
185 return NULL;
187 if (!down_read_trylock(&cpufreq_rwsem))
188 return NULL;
190 /* get the cpufreq driver */
191 read_lock_irqsave(&cpufreq_driver_lock, flags);
193 if (cpufreq_driver) {
194 /* get the CPU */
195 policy = per_cpu(cpufreq_cpu_data, cpu);
196 if (policy)
197 kobject_get(&policy->kobj);
200 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
202 if (!policy)
203 up_read(&cpufreq_rwsem);
205 return policy;
207 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
209 void cpufreq_cpu_put(struct cpufreq_policy *policy)
211 if (cpufreq_disabled())
212 return;
214 kobject_put(&policy->kobj);
215 up_read(&cpufreq_rwsem);
217 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
219 /*********************************************************************
220 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
221 *********************************************************************/
224 * adjust_jiffies - adjust the system "loops_per_jiffy"
226 * This function alters the system "loops_per_jiffy" for the clock
227 * speed change. Note that loops_per_jiffy cannot be updated on SMP
228 * systems as each CPU might be scaled differently. So, use the arch
229 * per-CPU loops_per_jiffy value wherever possible.
231 #ifndef CONFIG_SMP
232 static unsigned long l_p_j_ref;
233 static unsigned int l_p_j_ref_freq;
235 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
237 if (ci->flags & CPUFREQ_CONST_LOOPS)
238 return;
240 if (!l_p_j_ref_freq) {
241 l_p_j_ref = loops_per_jiffy;
242 l_p_j_ref_freq = ci->old;
243 pr_debug("saving %lu as reference value for loops_per_jiffy; "
244 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
246 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
247 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
248 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
249 ci->new);
250 pr_debug("scaling loops_per_jiffy to %lu "
251 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
254 #else
255 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
257 return;
259 #endif
261 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
262 struct cpufreq_freqs *freqs, unsigned int state)
264 BUG_ON(irqs_disabled());
266 if (cpufreq_disabled())
267 return;
269 freqs->flags = cpufreq_driver->flags;
270 pr_debug("notification %u of frequency transition to %u kHz\n",
271 state, freqs->new);
273 switch (state) {
275 case CPUFREQ_PRECHANGE:
276 /* detect if the driver reported a value as "old frequency"
277 * which is not equal to what the cpufreq core thinks is
278 * "old frequency".
280 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
281 if ((policy) && (policy->cpu == freqs->cpu) &&
282 (policy->cur) && (policy->cur != freqs->old)) {
283 pr_debug("Warning: CPU frequency is"
284 " %u, cpufreq assumed %u kHz.\n",
285 freqs->old, policy->cur);
286 freqs->old = policy->cur;
289 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
290 CPUFREQ_PRECHANGE, freqs);
291 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
292 break;
294 case CPUFREQ_POSTCHANGE:
295 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
296 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
297 (unsigned long)freqs->cpu);
298 trace_cpu_frequency(freqs->new, freqs->cpu);
299 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
300 CPUFREQ_POSTCHANGE, freqs);
301 if (likely(policy) && likely(policy->cpu == freqs->cpu))
302 policy->cur = freqs->new;
303 break;
308 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
309 * on frequency transition.
311 * This function calls the transition notifiers and the "adjust_jiffies"
312 * function. It is called twice on all CPU frequency changes that have
313 * external effects.
315 void cpufreq_notify_transition(struct cpufreq_policy *policy,
316 struct cpufreq_freqs *freqs, unsigned int state)
318 for_each_cpu(freqs->cpu, policy->cpus)
319 __cpufreq_notify_transition(policy, freqs, state);
321 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
324 /*********************************************************************
325 * SYSFS INTERFACE *
326 *********************************************************************/
328 static struct cpufreq_governor *__find_governor(const char *str_governor)
330 struct cpufreq_governor *t;
332 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
333 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
334 return t;
336 return NULL;
340 * cpufreq_parse_governor - parse a governor string
342 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
343 struct cpufreq_governor **governor)
345 int err = -EINVAL;
347 if (!cpufreq_driver)
348 goto out;
350 if (cpufreq_driver->setpolicy) {
351 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
352 *policy = CPUFREQ_POLICY_PERFORMANCE;
353 err = 0;
354 } else if (!strnicmp(str_governor, "powersave",
355 CPUFREQ_NAME_LEN)) {
356 *policy = CPUFREQ_POLICY_POWERSAVE;
357 err = 0;
359 } else if (has_target()) {
360 struct cpufreq_governor *t;
362 mutex_lock(&cpufreq_governor_mutex);
364 t = __find_governor(str_governor);
366 if (t == NULL) {
367 int ret;
369 mutex_unlock(&cpufreq_governor_mutex);
370 ret = request_module("cpufreq_%s", str_governor);
371 mutex_lock(&cpufreq_governor_mutex);
373 if (ret == 0)
374 t = __find_governor(str_governor);
377 if (t != NULL) {
378 *governor = t;
379 err = 0;
382 mutex_unlock(&cpufreq_governor_mutex);
384 out:
385 return err;
389 * cpufreq_per_cpu_attr_read() / show_##file_name() -
390 * print out cpufreq information
392 * Write out information from cpufreq_driver->policy[cpu]; object must be
393 * "unsigned int".
396 #define show_one(file_name, object) \
397 static ssize_t show_##file_name \
398 (struct cpufreq_policy *policy, char *buf) \
400 return sprintf(buf, "%u\n", policy->object); \
403 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
404 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
405 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
406 show_one(scaling_min_freq, min);
407 show_one(scaling_max_freq, max);
408 show_one(scaling_cur_freq, cur);
410 static int cpufreq_set_policy(struct cpufreq_policy *policy,
411 struct cpufreq_policy *new_policy);
414 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
416 #define store_one(file_name, object) \
417 static ssize_t store_##file_name \
418 (struct cpufreq_policy *policy, const char *buf, size_t count) \
420 int ret; \
421 struct cpufreq_policy new_policy; \
423 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
424 if (ret) \
425 return -EINVAL; \
427 ret = sscanf(buf, "%u", &new_policy.object); \
428 if (ret != 1) \
429 return -EINVAL; \
431 ret = cpufreq_set_policy(policy, &new_policy); \
432 policy->user_policy.object = policy->object; \
434 return ret ? ret : count; \
437 store_one(scaling_min_freq, min);
438 store_one(scaling_max_freq, max);
441 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
443 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
444 char *buf)
446 unsigned int cur_freq = __cpufreq_get(policy->cpu);
447 if (!cur_freq)
448 return sprintf(buf, "<unknown>");
449 return sprintf(buf, "%u\n", cur_freq);
453 * show_scaling_governor - show the current policy for the specified CPU
455 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
457 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
458 return sprintf(buf, "powersave\n");
459 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
460 return sprintf(buf, "performance\n");
461 else if (policy->governor)
462 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
463 policy->governor->name);
464 return -EINVAL;
468 * store_scaling_governor - store policy for the specified CPU
470 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
471 const char *buf, size_t count)
473 int ret;
474 char str_governor[16];
475 struct cpufreq_policy new_policy;
477 ret = cpufreq_get_policy(&new_policy, policy->cpu);
478 if (ret)
479 return ret;
481 ret = sscanf(buf, "%15s", str_governor);
482 if (ret != 1)
483 return -EINVAL;
485 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
486 &new_policy.governor))
487 return -EINVAL;
489 ret = cpufreq_set_policy(policy, &new_policy);
491 policy->user_policy.policy = policy->policy;
492 policy->user_policy.governor = policy->governor;
494 if (ret)
495 return ret;
496 else
497 return count;
501 * show_scaling_driver - show the cpufreq driver currently loaded
503 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
505 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
509 * show_scaling_available_governors - show the available CPUfreq governors
511 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
512 char *buf)
514 ssize_t i = 0;
515 struct cpufreq_governor *t;
517 if (!has_target()) {
518 i += sprintf(buf, "performance powersave");
519 goto out;
522 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
523 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
524 - (CPUFREQ_NAME_LEN + 2)))
525 goto out;
526 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
528 out:
529 i += sprintf(&buf[i], "\n");
530 return i;
533 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
535 ssize_t i = 0;
536 unsigned int cpu;
538 for_each_cpu(cpu, mask) {
539 if (i)
540 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
541 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
542 if (i >= (PAGE_SIZE - 5))
543 break;
545 i += sprintf(&buf[i], "\n");
546 return i;
548 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
551 * show_related_cpus - show the CPUs affected by each transition even if
552 * hw coordination is in use
554 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
556 return cpufreq_show_cpus(policy->related_cpus, buf);
560 * show_affected_cpus - show the CPUs affected by each transition
562 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
564 return cpufreq_show_cpus(policy->cpus, buf);
567 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
568 const char *buf, size_t count)
570 unsigned int freq = 0;
571 unsigned int ret;
573 if (!policy->governor || !policy->governor->store_setspeed)
574 return -EINVAL;
576 ret = sscanf(buf, "%u", &freq);
577 if (ret != 1)
578 return -EINVAL;
580 policy->governor->store_setspeed(policy, freq);
582 return count;
585 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
587 if (!policy->governor || !policy->governor->show_setspeed)
588 return sprintf(buf, "<unsupported>\n");
590 return policy->governor->show_setspeed(policy, buf);
594 * show_bios_limit - show the current cpufreq HW/BIOS limitation
596 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
598 unsigned int limit;
599 int ret;
600 if (cpufreq_driver->bios_limit) {
601 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
602 if (!ret)
603 return sprintf(buf, "%u\n", limit);
605 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
608 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
609 cpufreq_freq_attr_ro(cpuinfo_min_freq);
610 cpufreq_freq_attr_ro(cpuinfo_max_freq);
611 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
612 cpufreq_freq_attr_ro(scaling_available_governors);
613 cpufreq_freq_attr_ro(scaling_driver);
614 cpufreq_freq_attr_ro(scaling_cur_freq);
615 cpufreq_freq_attr_ro(bios_limit);
616 cpufreq_freq_attr_ro(related_cpus);
617 cpufreq_freq_attr_ro(affected_cpus);
618 cpufreq_freq_attr_rw(scaling_min_freq);
619 cpufreq_freq_attr_rw(scaling_max_freq);
620 cpufreq_freq_attr_rw(scaling_governor);
621 cpufreq_freq_attr_rw(scaling_setspeed);
623 static struct attribute *default_attrs[] = {
624 &cpuinfo_min_freq.attr,
625 &cpuinfo_max_freq.attr,
626 &cpuinfo_transition_latency.attr,
627 &scaling_min_freq.attr,
628 &scaling_max_freq.attr,
629 &affected_cpus.attr,
630 &related_cpus.attr,
631 &scaling_governor.attr,
632 &scaling_driver.attr,
633 &scaling_available_governors.attr,
634 &scaling_setspeed.attr,
635 NULL
638 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
639 #define to_attr(a) container_of(a, struct freq_attr, attr)
641 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
643 struct cpufreq_policy *policy = to_policy(kobj);
644 struct freq_attr *fattr = to_attr(attr);
645 ssize_t ret;
647 if (!down_read_trylock(&cpufreq_rwsem))
648 return -EINVAL;
650 down_read(&policy->rwsem);
652 if (fattr->show)
653 ret = fattr->show(policy, buf);
654 else
655 ret = -EIO;
657 up_read(&policy->rwsem);
658 up_read(&cpufreq_rwsem);
660 return ret;
663 static ssize_t store(struct kobject *kobj, struct attribute *attr,
664 const char *buf, size_t count)
666 struct cpufreq_policy *policy = to_policy(kobj);
667 struct freq_attr *fattr = to_attr(attr);
668 ssize_t ret = -EINVAL;
670 get_online_cpus();
672 if (!cpu_online(policy->cpu))
673 goto unlock;
675 if (!down_read_trylock(&cpufreq_rwsem))
676 goto unlock;
678 down_write(&policy->rwsem);
680 if (fattr->store)
681 ret = fattr->store(policy, buf, count);
682 else
683 ret = -EIO;
685 up_write(&policy->rwsem);
687 up_read(&cpufreq_rwsem);
688 unlock:
689 put_online_cpus();
691 return ret;
694 static void cpufreq_sysfs_release(struct kobject *kobj)
696 struct cpufreq_policy *policy = to_policy(kobj);
697 pr_debug("last reference is dropped\n");
698 complete(&policy->kobj_unregister);
701 static const struct sysfs_ops sysfs_ops = {
702 .show = show,
703 .store = store,
706 static struct kobj_type ktype_cpufreq = {
707 .sysfs_ops = &sysfs_ops,
708 .default_attrs = default_attrs,
709 .release = cpufreq_sysfs_release,
712 struct kobject *cpufreq_global_kobject;
713 EXPORT_SYMBOL(cpufreq_global_kobject);
715 static int cpufreq_global_kobject_usage;
717 int cpufreq_get_global_kobject(void)
719 if (!cpufreq_global_kobject_usage++)
720 return kobject_add(cpufreq_global_kobject,
721 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
723 return 0;
725 EXPORT_SYMBOL(cpufreq_get_global_kobject);
727 void cpufreq_put_global_kobject(void)
729 if (!--cpufreq_global_kobject_usage)
730 kobject_del(cpufreq_global_kobject);
732 EXPORT_SYMBOL(cpufreq_put_global_kobject);
734 int cpufreq_sysfs_create_file(const struct attribute *attr)
736 int ret = cpufreq_get_global_kobject();
738 if (!ret) {
739 ret = sysfs_create_file(cpufreq_global_kobject, attr);
740 if (ret)
741 cpufreq_put_global_kobject();
744 return ret;
746 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
748 void cpufreq_sysfs_remove_file(const struct attribute *attr)
750 sysfs_remove_file(cpufreq_global_kobject, attr);
751 cpufreq_put_global_kobject();
753 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
755 /* symlink affected CPUs */
756 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
758 unsigned int j;
759 int ret = 0;
761 for_each_cpu(j, policy->cpus) {
762 struct device *cpu_dev;
764 if (j == policy->cpu)
765 continue;
767 pr_debug("Adding link for CPU: %u\n", j);
768 cpu_dev = get_cpu_device(j);
769 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
770 "cpufreq");
771 if (ret)
772 break;
774 return ret;
777 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
778 struct device *dev)
780 struct freq_attr **drv_attr;
781 int ret = 0;
783 /* prepare interface data */
784 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
785 &dev->kobj, "cpufreq");
786 if (ret)
787 return ret;
789 /* set up files for this cpu device */
790 drv_attr = cpufreq_driver->attr;
791 while ((drv_attr) && (*drv_attr)) {
792 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
793 if (ret)
794 goto err_out_kobj_put;
795 drv_attr++;
797 if (cpufreq_driver->get) {
798 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
799 if (ret)
800 goto err_out_kobj_put;
802 if (has_target()) {
803 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
804 if (ret)
805 goto err_out_kobj_put;
807 if (cpufreq_driver->bios_limit) {
808 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
809 if (ret)
810 goto err_out_kobj_put;
813 ret = cpufreq_add_dev_symlink(policy);
814 if (ret)
815 goto err_out_kobj_put;
817 return ret;
819 err_out_kobj_put:
820 kobject_put(&policy->kobj);
821 wait_for_completion(&policy->kobj_unregister);
822 return ret;
825 static void cpufreq_init_policy(struct cpufreq_policy *policy)
827 struct cpufreq_policy new_policy;
828 int ret = 0;
830 memcpy(&new_policy, policy, sizeof(*policy));
832 /* Use the default policy if its valid. */
833 if (cpufreq_driver->setpolicy)
834 cpufreq_parse_governor(policy->governor->name,
835 &new_policy.policy, NULL);
837 /* assure that the starting sequence is run in cpufreq_set_policy */
838 policy->governor = NULL;
840 /* set default policy */
841 ret = cpufreq_set_policy(policy, &new_policy);
842 policy->user_policy.policy = policy->policy;
843 policy->user_policy.governor = policy->governor;
845 if (ret) {
846 pr_debug("setting policy failed\n");
847 if (cpufreq_driver->exit)
848 cpufreq_driver->exit(policy);
852 #ifdef CONFIG_HOTPLUG_CPU
853 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
854 unsigned int cpu, struct device *dev)
856 int ret = 0;
857 unsigned long flags;
859 if (has_target()) {
860 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
861 if (ret) {
862 pr_err("%s: Failed to stop governor\n", __func__);
863 return ret;
867 down_write(&policy->rwsem);
869 write_lock_irqsave(&cpufreq_driver_lock, flags);
871 cpumask_set_cpu(cpu, policy->cpus);
872 per_cpu(cpufreq_cpu_data, cpu) = policy;
873 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
875 up_write(&policy->rwsem);
877 if (has_target()) {
878 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
879 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
880 pr_err("%s: Failed to start governor\n", __func__);
881 return ret;
885 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
887 #endif
889 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
891 struct cpufreq_policy *policy;
892 unsigned long flags;
894 read_lock_irqsave(&cpufreq_driver_lock, flags);
896 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
898 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
900 return policy;
903 static struct cpufreq_policy *cpufreq_policy_alloc(void)
905 struct cpufreq_policy *policy;
907 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
908 if (!policy)
909 return NULL;
911 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
912 goto err_free_policy;
914 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
915 goto err_free_cpumask;
917 INIT_LIST_HEAD(&policy->policy_list);
918 init_rwsem(&policy->rwsem);
920 return policy;
922 err_free_cpumask:
923 free_cpumask_var(policy->cpus);
924 err_free_policy:
925 kfree(policy);
927 return NULL;
930 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
932 struct kobject *kobj;
933 struct completion *cmp;
935 down_read(&policy->rwsem);
936 kobj = &policy->kobj;
937 cmp = &policy->kobj_unregister;
938 up_read(&policy->rwsem);
939 kobject_put(kobj);
942 * We need to make sure that the underlying kobj is
943 * actually not referenced anymore by anybody before we
944 * proceed with unloading.
946 pr_debug("waiting for dropping of refcount\n");
947 wait_for_completion(cmp);
948 pr_debug("wait complete\n");
951 static void cpufreq_policy_free(struct cpufreq_policy *policy)
953 free_cpumask_var(policy->related_cpus);
954 free_cpumask_var(policy->cpus);
955 kfree(policy);
958 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
960 if (WARN_ON(cpu == policy->cpu))
961 return;
963 down_write(&policy->rwsem);
965 policy->last_cpu = policy->cpu;
966 policy->cpu = cpu;
968 up_write(&policy->rwsem);
970 cpufreq_frequency_table_update_policy_cpu(policy);
971 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
972 CPUFREQ_UPDATE_POLICY_CPU, policy);
975 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
976 bool frozen)
978 unsigned int j, cpu = dev->id;
979 int ret = -ENOMEM;
980 struct cpufreq_policy *policy;
981 unsigned long flags;
982 #ifdef CONFIG_HOTPLUG_CPU
983 struct cpufreq_policy *tpolicy;
984 struct cpufreq_governor *gov;
985 #endif
987 if (cpu_is_offline(cpu))
988 return 0;
990 pr_debug("adding CPU %u\n", cpu);
992 #ifdef CONFIG_SMP
993 /* check whether a different CPU already registered this
994 * CPU because it is in the same boat. */
995 policy = cpufreq_cpu_get(cpu);
996 if (unlikely(policy)) {
997 cpufreq_cpu_put(policy);
998 return 0;
1000 #endif
1002 if (!down_read_trylock(&cpufreq_rwsem))
1003 return 0;
1005 #ifdef CONFIG_HOTPLUG_CPU
1006 /* Check if this cpu was hot-unplugged earlier and has siblings */
1007 read_lock_irqsave(&cpufreq_driver_lock, flags);
1008 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1009 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
1010 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1011 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
1012 up_read(&cpufreq_rwsem);
1013 return ret;
1016 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1017 #endif
1019 if (frozen)
1020 /* Restore the saved policy when doing light-weight init */
1021 policy = cpufreq_policy_restore(cpu);
1022 else
1023 policy = cpufreq_policy_alloc();
1025 if (!policy)
1026 goto nomem_out;
1030 * In the resume path, since we restore a saved policy, the assignment
1031 * to policy->cpu is like an update of the existing policy, rather than
1032 * the creation of a brand new one. So we need to perform this update
1033 * by invoking update_policy_cpu().
1035 if (frozen && cpu != policy->cpu)
1036 update_policy_cpu(policy, cpu);
1037 else
1038 policy->cpu = cpu;
1040 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1041 cpumask_copy(policy->cpus, cpumask_of(cpu));
1043 init_completion(&policy->kobj_unregister);
1044 INIT_WORK(&policy->update, handle_update);
1046 /* call driver. From then on the cpufreq must be able
1047 * to accept all calls to ->verify and ->setpolicy for this CPU
1049 ret = cpufreq_driver->init(policy);
1050 if (ret) {
1051 pr_debug("initialization failed\n");
1052 goto err_set_policy_cpu;
1055 if (cpufreq_driver->get) {
1056 policy->cur = cpufreq_driver->get(policy->cpu);
1057 if (!policy->cur) {
1058 pr_err("%s: ->get() failed\n", __func__);
1059 goto err_get_freq;
1063 /* related cpus should atleast have policy->cpus */
1064 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1067 * affected cpus must always be the one, which are online. We aren't
1068 * managing offline cpus here.
1070 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1072 policy->user_policy.min = policy->min;
1073 policy->user_policy.max = policy->max;
1075 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1076 CPUFREQ_START, policy);
1078 #ifdef CONFIG_HOTPLUG_CPU
1079 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1080 if (gov) {
1081 policy->governor = gov;
1082 pr_debug("Restoring governor %s for cpu %d\n",
1083 policy->governor->name, cpu);
1085 #endif
1087 write_lock_irqsave(&cpufreq_driver_lock, flags);
1088 for_each_cpu(j, policy->cpus)
1089 per_cpu(cpufreq_cpu_data, j) = policy;
1090 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1092 if (!frozen) {
1093 ret = cpufreq_add_dev_interface(policy, dev);
1094 if (ret)
1095 goto err_out_unregister;
1098 write_lock_irqsave(&cpufreq_driver_lock, flags);
1099 list_add(&policy->policy_list, &cpufreq_policy_list);
1100 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1102 cpufreq_init_policy(policy);
1104 kobject_uevent(&policy->kobj, KOBJ_ADD);
1105 up_read(&cpufreq_rwsem);
1107 pr_debug("initialization complete\n");
1109 return 0;
1111 err_out_unregister:
1112 write_lock_irqsave(&cpufreq_driver_lock, flags);
1113 for_each_cpu(j, policy->cpus)
1114 per_cpu(cpufreq_cpu_data, j) = NULL;
1115 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1117 err_get_freq:
1118 if (cpufreq_driver->exit)
1119 cpufreq_driver->exit(policy);
1120 err_set_policy_cpu:
1121 if (frozen)
1122 cpufreq_policy_put_kobj(policy);
1123 cpufreq_policy_free(policy);
1125 nomem_out:
1126 up_read(&cpufreq_rwsem);
1128 return ret;
1132 * cpufreq_add_dev - add a CPU device
1134 * Adds the cpufreq interface for a CPU device.
1136 * The Oracle says: try running cpufreq registration/unregistration concurrently
1137 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1138 * mess up, but more thorough testing is needed. - Mathieu
1140 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1142 return __cpufreq_add_dev(dev, sif, false);
1145 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1146 unsigned int old_cpu)
1148 struct device *cpu_dev;
1149 int ret;
1151 /* first sibling now owns the new sysfs dir */
1152 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
1154 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1155 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1156 if (ret) {
1157 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1159 down_write(&policy->rwsem);
1160 cpumask_set_cpu(old_cpu, policy->cpus);
1161 up_write(&policy->rwsem);
1163 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1164 "cpufreq");
1166 return -EINVAL;
1169 return cpu_dev->id;
1172 static int __cpufreq_remove_dev_prepare(struct device *dev,
1173 struct subsys_interface *sif,
1174 bool frozen)
1176 unsigned int cpu = dev->id, cpus;
1177 int new_cpu, ret;
1178 unsigned long flags;
1179 struct cpufreq_policy *policy;
1181 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1183 write_lock_irqsave(&cpufreq_driver_lock, flags);
1185 policy = per_cpu(cpufreq_cpu_data, cpu);
1187 /* Save the policy somewhere when doing a light-weight tear-down */
1188 if (frozen)
1189 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1191 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1193 if (!policy) {
1194 pr_debug("%s: No cpu_data found\n", __func__);
1195 return -EINVAL;
1198 if (has_target()) {
1199 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1200 if (ret) {
1201 pr_err("%s: Failed to stop governor\n", __func__);
1202 return ret;
1206 #ifdef CONFIG_HOTPLUG_CPU
1207 if (!cpufreq_driver->setpolicy)
1208 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1209 policy->governor->name, CPUFREQ_NAME_LEN);
1210 #endif
1212 down_read(&policy->rwsem);
1213 cpus = cpumask_weight(policy->cpus);
1214 up_read(&policy->rwsem);
1216 if (cpu != policy->cpu) {
1217 if (!frozen)
1218 sysfs_remove_link(&dev->kobj, "cpufreq");
1219 } else if (cpus > 1) {
1220 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
1221 if (new_cpu >= 0) {
1222 update_policy_cpu(policy, new_cpu);
1224 if (!frozen) {
1225 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1226 __func__, new_cpu, cpu);
1231 return 0;
1234 static int __cpufreq_remove_dev_finish(struct device *dev,
1235 struct subsys_interface *sif,
1236 bool frozen)
1238 unsigned int cpu = dev->id, cpus;
1239 int ret;
1240 unsigned long flags;
1241 struct cpufreq_policy *policy;
1243 read_lock_irqsave(&cpufreq_driver_lock, flags);
1244 policy = per_cpu(cpufreq_cpu_data, cpu);
1245 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1247 if (!policy) {
1248 pr_debug("%s: No cpu_data found\n", __func__);
1249 return -EINVAL;
1252 down_write(&policy->rwsem);
1253 cpus = cpumask_weight(policy->cpus);
1255 if (cpus > 1)
1256 cpumask_clear_cpu(cpu, policy->cpus);
1257 up_write(&policy->rwsem);
1259 /* If cpu is last user of policy, free policy */
1260 if (cpus == 1) {
1261 if (has_target()) {
1262 ret = __cpufreq_governor(policy,
1263 CPUFREQ_GOV_POLICY_EXIT);
1264 if (ret) {
1265 pr_err("%s: Failed to exit governor\n",
1266 __func__);
1267 return ret;
1271 if (!frozen)
1272 cpufreq_policy_put_kobj(policy);
1275 * Perform the ->exit() even during light-weight tear-down,
1276 * since this is a core component, and is essential for the
1277 * subsequent light-weight ->init() to succeed.
1279 if (cpufreq_driver->exit)
1280 cpufreq_driver->exit(policy);
1282 /* Remove policy from list of active policies */
1283 write_lock_irqsave(&cpufreq_driver_lock, flags);
1284 list_del(&policy->policy_list);
1285 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1287 if (!frozen)
1288 cpufreq_policy_free(policy);
1289 } else {
1290 if (has_target()) {
1291 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1292 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1293 pr_err("%s: Failed to start governor\n",
1294 __func__);
1295 return ret;
1300 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1301 return 0;
1305 * cpufreq_remove_dev - remove a CPU device
1307 * Removes the cpufreq interface for a CPU device.
1309 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1311 unsigned int cpu = dev->id;
1312 int ret;
1314 if (cpu_is_offline(cpu))
1315 return 0;
1317 ret = __cpufreq_remove_dev_prepare(dev, sif, false);
1319 if (!ret)
1320 ret = __cpufreq_remove_dev_finish(dev, sif, false);
1322 return ret;
1325 static void handle_update(struct work_struct *work)
1327 struct cpufreq_policy *policy =
1328 container_of(work, struct cpufreq_policy, update);
1329 unsigned int cpu = policy->cpu;
1330 pr_debug("handle_update for cpu %u called\n", cpu);
1331 cpufreq_update_policy(cpu);
1335 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1336 * in deep trouble.
1337 * @cpu: cpu number
1338 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1339 * @new_freq: CPU frequency the CPU actually runs at
1341 * We adjust to current frequency first, and need to clean up later.
1342 * So either call to cpufreq_update_policy() or schedule handle_update()).
1344 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1345 unsigned int new_freq)
1347 struct cpufreq_policy *policy;
1348 struct cpufreq_freqs freqs;
1349 unsigned long flags;
1351 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1352 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1354 freqs.old = old_freq;
1355 freqs.new = new_freq;
1357 read_lock_irqsave(&cpufreq_driver_lock, flags);
1358 policy = per_cpu(cpufreq_cpu_data, cpu);
1359 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1361 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1362 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1366 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1367 * @cpu: CPU number
1369 * This is the last known freq, without actually getting it from the driver.
1370 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1372 unsigned int cpufreq_quick_get(unsigned int cpu)
1374 struct cpufreq_policy *policy;
1375 unsigned int ret_freq = 0;
1377 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1378 return cpufreq_driver->get(cpu);
1380 policy = cpufreq_cpu_get(cpu);
1381 if (policy) {
1382 ret_freq = policy->cur;
1383 cpufreq_cpu_put(policy);
1386 return ret_freq;
1388 EXPORT_SYMBOL(cpufreq_quick_get);
1391 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1392 * @cpu: CPU number
1394 * Just return the max possible frequency for a given CPU.
1396 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1398 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1399 unsigned int ret_freq = 0;
1401 if (policy) {
1402 ret_freq = policy->max;
1403 cpufreq_cpu_put(policy);
1406 return ret_freq;
1408 EXPORT_SYMBOL(cpufreq_quick_get_max);
1410 static unsigned int __cpufreq_get(unsigned int cpu)
1412 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1413 unsigned int ret_freq = 0;
1415 if (!cpufreq_driver->get)
1416 return ret_freq;
1418 ret_freq = cpufreq_driver->get(cpu);
1420 if (ret_freq && policy->cur &&
1421 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1422 /* verify no discrepancy between actual and
1423 saved value exists */
1424 if (unlikely(ret_freq != policy->cur)) {
1425 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1426 schedule_work(&policy->update);
1430 return ret_freq;
1434 * cpufreq_get - get the current CPU frequency (in kHz)
1435 * @cpu: CPU number
1437 * Get the CPU current (static) CPU frequency
1439 unsigned int cpufreq_get(unsigned int cpu)
1441 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1442 unsigned int ret_freq = 0;
1444 if (cpufreq_disabled() || !cpufreq_driver)
1445 return -ENOENT;
1447 BUG_ON(!policy);
1449 if (!down_read_trylock(&cpufreq_rwsem))
1450 return 0;
1452 down_read(&policy->rwsem);
1454 ret_freq = __cpufreq_get(cpu);
1456 up_read(&policy->rwsem);
1457 up_read(&cpufreq_rwsem);
1459 return ret_freq;
1461 EXPORT_SYMBOL(cpufreq_get);
1463 static struct subsys_interface cpufreq_interface = {
1464 .name = "cpufreq",
1465 .subsys = &cpu_subsys,
1466 .add_dev = cpufreq_add_dev,
1467 .remove_dev = cpufreq_remove_dev,
1471 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1473 * This function is only executed for the boot processor. The other CPUs
1474 * have been put offline by means of CPU hotplug.
1476 static int cpufreq_bp_suspend(void)
1478 int ret = 0;
1480 int cpu = smp_processor_id();
1481 struct cpufreq_policy *policy;
1483 pr_debug("suspending cpu %u\n", cpu);
1485 /* If there's no policy for the boot CPU, we have nothing to do. */
1486 policy = cpufreq_cpu_get(cpu);
1487 if (!policy)
1488 return 0;
1490 if (cpufreq_driver->suspend) {
1491 ret = cpufreq_driver->suspend(policy);
1492 if (ret)
1493 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1494 "step on CPU %u\n", policy->cpu);
1497 cpufreq_cpu_put(policy);
1498 return ret;
1502 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1504 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1505 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1506 * restored. It will verify that the current freq is in sync with
1507 * what we believe it to be. This is a bit later than when it
1508 * should be, but nonethteless it's better than calling
1509 * cpufreq_driver->get() here which might re-enable interrupts...
1511 * This function is only executed for the boot CPU. The other CPUs have not
1512 * been turned on yet.
1514 static void cpufreq_bp_resume(void)
1516 int ret = 0;
1518 int cpu = smp_processor_id();
1519 struct cpufreq_policy *policy;
1521 pr_debug("resuming cpu %u\n", cpu);
1523 /* If there's no policy for the boot CPU, we have nothing to do. */
1524 policy = cpufreq_cpu_get(cpu);
1525 if (!policy)
1526 return;
1528 if (cpufreq_driver->resume) {
1529 ret = cpufreq_driver->resume(policy);
1530 if (ret) {
1531 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1532 "step on CPU %u\n", policy->cpu);
1533 goto fail;
1537 schedule_work(&policy->update);
1539 fail:
1540 cpufreq_cpu_put(policy);
1543 static struct syscore_ops cpufreq_syscore_ops = {
1544 .suspend = cpufreq_bp_suspend,
1545 .resume = cpufreq_bp_resume,
1549 * cpufreq_get_current_driver - return current driver's name
1551 * Return the name string of the currently loaded cpufreq driver
1552 * or NULL, if none.
1554 const char *cpufreq_get_current_driver(void)
1556 if (cpufreq_driver)
1557 return cpufreq_driver->name;
1559 return NULL;
1561 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1563 /*********************************************************************
1564 * NOTIFIER LISTS INTERFACE *
1565 *********************************************************************/
1568 * cpufreq_register_notifier - register a driver with cpufreq
1569 * @nb: notifier function to register
1570 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1572 * Add a driver to one of two lists: either a list of drivers that
1573 * are notified about clock rate changes (once before and once after
1574 * the transition), or a list of drivers that are notified about
1575 * changes in cpufreq policy.
1577 * This function may sleep, and has the same return conditions as
1578 * blocking_notifier_chain_register.
1580 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1582 int ret;
1584 if (cpufreq_disabled())
1585 return -EINVAL;
1587 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1589 switch (list) {
1590 case CPUFREQ_TRANSITION_NOTIFIER:
1591 ret = srcu_notifier_chain_register(
1592 &cpufreq_transition_notifier_list, nb);
1593 break;
1594 case CPUFREQ_POLICY_NOTIFIER:
1595 ret = blocking_notifier_chain_register(
1596 &cpufreq_policy_notifier_list, nb);
1597 break;
1598 default:
1599 ret = -EINVAL;
1602 return ret;
1604 EXPORT_SYMBOL(cpufreq_register_notifier);
1607 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1608 * @nb: notifier block to be unregistered
1609 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1611 * Remove a driver from the CPU frequency notifier list.
1613 * This function may sleep, and has the same return conditions as
1614 * blocking_notifier_chain_unregister.
1616 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1618 int ret;
1620 if (cpufreq_disabled())
1621 return -EINVAL;
1623 switch (list) {
1624 case CPUFREQ_TRANSITION_NOTIFIER:
1625 ret = srcu_notifier_chain_unregister(
1626 &cpufreq_transition_notifier_list, nb);
1627 break;
1628 case CPUFREQ_POLICY_NOTIFIER:
1629 ret = blocking_notifier_chain_unregister(
1630 &cpufreq_policy_notifier_list, nb);
1631 break;
1632 default:
1633 ret = -EINVAL;
1636 return ret;
1638 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1641 /*********************************************************************
1642 * GOVERNORS *
1643 *********************************************************************/
1645 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1646 unsigned int target_freq,
1647 unsigned int relation)
1649 int retval = -EINVAL;
1650 unsigned int old_target_freq = target_freq;
1652 if (cpufreq_disabled())
1653 return -ENODEV;
1655 /* Make sure that target_freq is within supported range */
1656 if (target_freq > policy->max)
1657 target_freq = policy->max;
1658 if (target_freq < policy->min)
1659 target_freq = policy->min;
1661 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1662 policy->cpu, target_freq, relation, old_target_freq);
1665 * This might look like a redundant call as we are checking it again
1666 * after finding index. But it is left intentionally for cases where
1667 * exactly same freq is called again and so we can save on few function
1668 * calls.
1670 if (target_freq == policy->cur)
1671 return 0;
1673 if (cpufreq_driver->target)
1674 retval = cpufreq_driver->target(policy, target_freq, relation);
1675 else if (cpufreq_driver->target_index) {
1676 struct cpufreq_frequency_table *freq_table;
1677 struct cpufreq_freqs freqs;
1678 bool notify;
1679 int index;
1681 freq_table = cpufreq_frequency_get_table(policy->cpu);
1682 if (unlikely(!freq_table)) {
1683 pr_err("%s: Unable to find freq_table\n", __func__);
1684 goto out;
1687 retval = cpufreq_frequency_table_target(policy, freq_table,
1688 target_freq, relation, &index);
1689 if (unlikely(retval)) {
1690 pr_err("%s: Unable to find matching freq\n", __func__);
1691 goto out;
1694 if (freq_table[index].frequency == policy->cur) {
1695 retval = 0;
1696 goto out;
1699 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1701 if (notify) {
1702 freqs.old = policy->cur;
1703 freqs.new = freq_table[index].frequency;
1704 freqs.flags = 0;
1706 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1707 __func__, policy->cpu, freqs.old,
1708 freqs.new);
1710 cpufreq_notify_transition(policy, &freqs,
1711 CPUFREQ_PRECHANGE);
1714 retval = cpufreq_driver->target_index(policy, index);
1715 if (retval)
1716 pr_err("%s: Failed to change cpu frequency: %d\n",
1717 __func__, retval);
1719 if (notify) {
1721 * Notify with old freq in case we failed to change
1722 * frequency
1724 if (retval)
1725 freqs.new = freqs.old;
1727 cpufreq_notify_transition(policy, &freqs,
1728 CPUFREQ_POSTCHANGE);
1732 out:
1733 return retval;
1735 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1737 int cpufreq_driver_target(struct cpufreq_policy *policy,
1738 unsigned int target_freq,
1739 unsigned int relation)
1741 int ret = -EINVAL;
1743 down_write(&policy->rwsem);
1745 ret = __cpufreq_driver_target(policy, target_freq, relation);
1747 up_write(&policy->rwsem);
1749 return ret;
1751 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1754 * when "event" is CPUFREQ_GOV_LIMITS
1757 static int __cpufreq_governor(struct cpufreq_policy *policy,
1758 unsigned int event)
1760 int ret;
1762 /* Only must be defined when default governor is known to have latency
1763 restrictions, like e.g. conservative or ondemand.
1764 That this is the case is already ensured in Kconfig
1766 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1767 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1768 #else
1769 struct cpufreq_governor *gov = NULL;
1770 #endif
1772 if (policy->governor->max_transition_latency &&
1773 policy->cpuinfo.transition_latency >
1774 policy->governor->max_transition_latency) {
1775 if (!gov)
1776 return -EINVAL;
1777 else {
1778 printk(KERN_WARNING "%s governor failed, too long"
1779 " transition latency of HW, fallback"
1780 " to %s governor\n",
1781 policy->governor->name,
1782 gov->name);
1783 policy->governor = gov;
1787 if (event == CPUFREQ_GOV_POLICY_INIT)
1788 if (!try_module_get(policy->governor->owner))
1789 return -EINVAL;
1791 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1792 policy->cpu, event);
1794 mutex_lock(&cpufreq_governor_lock);
1795 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
1796 || (!policy->governor_enabled
1797 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
1798 mutex_unlock(&cpufreq_governor_lock);
1799 return -EBUSY;
1802 if (event == CPUFREQ_GOV_STOP)
1803 policy->governor_enabled = false;
1804 else if (event == CPUFREQ_GOV_START)
1805 policy->governor_enabled = true;
1807 mutex_unlock(&cpufreq_governor_lock);
1809 ret = policy->governor->governor(policy, event);
1811 if (!ret) {
1812 if (event == CPUFREQ_GOV_POLICY_INIT)
1813 policy->governor->initialized++;
1814 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1815 policy->governor->initialized--;
1816 } else {
1817 /* Restore original values */
1818 mutex_lock(&cpufreq_governor_lock);
1819 if (event == CPUFREQ_GOV_STOP)
1820 policy->governor_enabled = true;
1821 else if (event == CPUFREQ_GOV_START)
1822 policy->governor_enabled = false;
1823 mutex_unlock(&cpufreq_governor_lock);
1826 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1827 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1828 module_put(policy->governor->owner);
1830 return ret;
1833 int cpufreq_register_governor(struct cpufreq_governor *governor)
1835 int err;
1837 if (!governor)
1838 return -EINVAL;
1840 if (cpufreq_disabled())
1841 return -ENODEV;
1843 mutex_lock(&cpufreq_governor_mutex);
1845 governor->initialized = 0;
1846 err = -EBUSY;
1847 if (__find_governor(governor->name) == NULL) {
1848 err = 0;
1849 list_add(&governor->governor_list, &cpufreq_governor_list);
1852 mutex_unlock(&cpufreq_governor_mutex);
1853 return err;
1855 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1857 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1859 #ifdef CONFIG_HOTPLUG_CPU
1860 int cpu;
1861 #endif
1863 if (!governor)
1864 return;
1866 if (cpufreq_disabled())
1867 return;
1869 #ifdef CONFIG_HOTPLUG_CPU
1870 for_each_present_cpu(cpu) {
1871 if (cpu_online(cpu))
1872 continue;
1873 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1874 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1876 #endif
1878 mutex_lock(&cpufreq_governor_mutex);
1879 list_del(&governor->governor_list);
1880 mutex_unlock(&cpufreq_governor_mutex);
1881 return;
1883 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1886 /*********************************************************************
1887 * POLICY INTERFACE *
1888 *********************************************************************/
1891 * cpufreq_get_policy - get the current cpufreq_policy
1892 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1893 * is written
1895 * Reads the current cpufreq policy.
1897 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1899 struct cpufreq_policy *cpu_policy;
1900 if (!policy)
1901 return -EINVAL;
1903 cpu_policy = cpufreq_cpu_get(cpu);
1904 if (!cpu_policy)
1905 return -EINVAL;
1907 memcpy(policy, cpu_policy, sizeof(*policy));
1909 cpufreq_cpu_put(cpu_policy);
1910 return 0;
1912 EXPORT_SYMBOL(cpufreq_get_policy);
1915 * policy : current policy.
1916 * new_policy: policy to be set.
1918 static int cpufreq_set_policy(struct cpufreq_policy *policy,
1919 struct cpufreq_policy *new_policy)
1921 int ret = 0, failed = 1;
1923 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
1924 new_policy->min, new_policy->max);
1926 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1928 if (new_policy->min > policy->max || new_policy->max < policy->min) {
1929 ret = -EINVAL;
1930 goto error_out;
1933 /* verify the cpu speed can be set within this limit */
1934 ret = cpufreq_driver->verify(new_policy);
1935 if (ret)
1936 goto error_out;
1938 /* adjust if necessary - all reasons */
1939 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1940 CPUFREQ_ADJUST, new_policy);
1942 /* adjust if necessary - hardware incompatibility*/
1943 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1944 CPUFREQ_INCOMPATIBLE, new_policy);
1947 * verify the cpu speed can be set within this limit, which might be
1948 * different to the first one
1950 ret = cpufreq_driver->verify(new_policy);
1951 if (ret)
1952 goto error_out;
1954 /* notification of the new policy */
1955 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1956 CPUFREQ_NOTIFY, new_policy);
1958 policy->min = new_policy->min;
1959 policy->max = new_policy->max;
1961 pr_debug("new min and max freqs are %u - %u kHz\n",
1962 policy->min, policy->max);
1964 if (cpufreq_driver->setpolicy) {
1965 policy->policy = new_policy->policy;
1966 pr_debug("setting range\n");
1967 ret = cpufreq_driver->setpolicy(new_policy);
1968 } else {
1969 if (new_policy->governor != policy->governor) {
1970 /* save old, working values */
1971 struct cpufreq_governor *old_gov = policy->governor;
1973 pr_debug("governor switch\n");
1975 /* end old governor */
1976 if (policy->governor) {
1977 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1978 up_write(&policy->rwsem);
1979 __cpufreq_governor(policy,
1980 CPUFREQ_GOV_POLICY_EXIT);
1981 down_write(&policy->rwsem);
1984 /* start new governor */
1985 policy->governor = new_policy->governor;
1986 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
1987 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
1988 failed = 0;
1989 } else {
1990 up_write(&policy->rwsem);
1991 __cpufreq_governor(policy,
1992 CPUFREQ_GOV_POLICY_EXIT);
1993 down_write(&policy->rwsem);
1997 if (failed) {
1998 /* new governor failed, so re-start old one */
1999 pr_debug("starting governor %s failed\n",
2000 policy->governor->name);
2001 if (old_gov) {
2002 policy->governor = old_gov;
2003 __cpufreq_governor(policy,
2004 CPUFREQ_GOV_POLICY_INIT);
2005 __cpufreq_governor(policy,
2006 CPUFREQ_GOV_START);
2008 ret = -EINVAL;
2009 goto error_out;
2011 /* might be a policy change, too, so fall through */
2013 pr_debug("governor: change or update limits\n");
2014 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2017 error_out:
2018 return ret;
2022 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2023 * @cpu: CPU which shall be re-evaluated
2025 * Useful for policy notifiers which have different necessities
2026 * at different times.
2028 int cpufreq_update_policy(unsigned int cpu)
2030 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2031 struct cpufreq_policy new_policy;
2032 int ret;
2034 if (!policy) {
2035 ret = -ENODEV;
2036 goto no_policy;
2039 down_write(&policy->rwsem);
2041 pr_debug("updating policy for CPU %u\n", cpu);
2042 memcpy(&new_policy, policy, sizeof(*policy));
2043 new_policy.min = policy->user_policy.min;
2044 new_policy.max = policy->user_policy.max;
2045 new_policy.policy = policy->user_policy.policy;
2046 new_policy.governor = policy->user_policy.governor;
2049 * BIOS might change freq behind our back
2050 * -> ask driver for current freq and notify governors about a change
2052 if (cpufreq_driver->get) {
2053 new_policy.cur = cpufreq_driver->get(cpu);
2054 if (!policy->cur) {
2055 pr_debug("Driver did not initialize current freq");
2056 policy->cur = new_policy.cur;
2057 } else {
2058 if (policy->cur != new_policy.cur && has_target())
2059 cpufreq_out_of_sync(cpu, policy->cur,
2060 new_policy.cur);
2064 ret = cpufreq_set_policy(policy, &new_policy);
2066 up_write(&policy->rwsem);
2068 cpufreq_cpu_put(policy);
2069 no_policy:
2070 return ret;
2072 EXPORT_SYMBOL(cpufreq_update_policy);
2074 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2075 unsigned long action, void *hcpu)
2077 unsigned int cpu = (unsigned long)hcpu;
2078 struct device *dev;
2079 bool frozen = false;
2081 dev = get_cpu_device(cpu);
2082 if (dev) {
2084 if (action & CPU_TASKS_FROZEN)
2085 frozen = true;
2087 switch (action & ~CPU_TASKS_FROZEN) {
2088 case CPU_ONLINE:
2089 __cpufreq_add_dev(dev, NULL, frozen);
2090 cpufreq_update_policy(cpu);
2091 break;
2093 case CPU_DOWN_PREPARE:
2094 __cpufreq_remove_dev_prepare(dev, NULL, frozen);
2095 break;
2097 case CPU_POST_DEAD:
2098 __cpufreq_remove_dev_finish(dev, NULL, frozen);
2099 break;
2101 case CPU_DOWN_FAILED:
2102 __cpufreq_add_dev(dev, NULL, frozen);
2103 break;
2106 return NOTIFY_OK;
2109 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2110 .notifier_call = cpufreq_cpu_callback,
2113 /*********************************************************************
2114 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2115 *********************************************************************/
2118 * cpufreq_register_driver - register a CPU Frequency driver
2119 * @driver_data: A struct cpufreq_driver containing the values#
2120 * submitted by the CPU Frequency driver.
2122 * Registers a CPU Frequency driver to this core code. This code
2123 * returns zero on success, -EBUSY when another driver got here first
2124 * (and isn't unregistered in the meantime).
2127 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2129 unsigned long flags;
2130 int ret;
2132 if (cpufreq_disabled())
2133 return -ENODEV;
2135 if (!driver_data || !driver_data->verify || !driver_data->init ||
2136 !(driver_data->setpolicy || driver_data->target_index ||
2137 driver_data->target))
2138 return -EINVAL;
2140 pr_debug("trying to register driver %s\n", driver_data->name);
2142 if (driver_data->setpolicy)
2143 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2145 write_lock_irqsave(&cpufreq_driver_lock, flags);
2146 if (cpufreq_driver) {
2147 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2148 return -EEXIST;
2150 cpufreq_driver = driver_data;
2151 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2153 ret = subsys_interface_register(&cpufreq_interface);
2154 if (ret)
2155 goto err_null_driver;
2157 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2158 int i;
2159 ret = -ENODEV;
2161 /* check for at least one working CPU */
2162 for (i = 0; i < nr_cpu_ids; i++)
2163 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2164 ret = 0;
2165 break;
2168 /* if all ->init() calls failed, unregister */
2169 if (ret) {
2170 pr_debug("no CPU initialized for driver %s\n",
2171 driver_data->name);
2172 goto err_if_unreg;
2176 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2177 pr_debug("driver %s up and running\n", driver_data->name);
2179 return 0;
2180 err_if_unreg:
2181 subsys_interface_unregister(&cpufreq_interface);
2182 err_null_driver:
2183 write_lock_irqsave(&cpufreq_driver_lock, flags);
2184 cpufreq_driver = NULL;
2185 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2186 return ret;
2188 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2191 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2193 * Unregister the current CPUFreq driver. Only call this if you have
2194 * the right to do so, i.e. if you have succeeded in initialising before!
2195 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2196 * currently not initialised.
2198 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2200 unsigned long flags;
2202 if (!cpufreq_driver || (driver != cpufreq_driver))
2203 return -EINVAL;
2205 pr_debug("unregistering driver %s\n", driver->name);
2207 subsys_interface_unregister(&cpufreq_interface);
2208 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2210 down_write(&cpufreq_rwsem);
2211 write_lock_irqsave(&cpufreq_driver_lock, flags);
2213 cpufreq_driver = NULL;
2215 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2216 up_write(&cpufreq_rwsem);
2218 return 0;
2220 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2222 static int __init cpufreq_core_init(void)
2224 if (cpufreq_disabled())
2225 return -ENODEV;
2227 cpufreq_global_kobject = kobject_create();
2228 BUG_ON(!cpufreq_global_kobject);
2229 register_syscore_ops(&cpufreq_syscore_ops);
2231 return 0;
2233 core_initcall(cpufreq_core_init);