2 * drivers/cpufreq/cpufreq_ondemand.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/smp.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/ctype.h>
19 #include <linux/cpufreq.h>
20 #include <linux/sysctl.h>
21 #include <linux/types.h>
23 #include <linux/sysfs.h>
24 #include <linux/sched.h>
25 #include <linux/kmod.h>
26 #include <linux/workqueue.h>
27 #include <linux/jiffies.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/percpu.h>
32 * dbs is used in this file as a shortform for demandbased switching
33 * It helps to keep variable names smaller, simpler
36 #define DEF_FREQUENCY_UP_THRESHOLD (80)
37 #define MIN_FREQUENCY_UP_THRESHOLD (0)
38 #define MAX_FREQUENCY_UP_THRESHOLD (100)
40 #define DEF_FREQUENCY_DOWN_THRESHOLD (20)
41 #define MIN_FREQUENCY_DOWN_THRESHOLD (0)
42 #define MAX_FREQUENCY_DOWN_THRESHOLD (100)
45 * The polling frequency of this governor depends on the capability of
46 * the processor. Default polling frequency is 1000 times the transition
47 * latency of the processor. The governor will work on any processor with
48 * transition latency <= 10mS, using appropriate sampling
50 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
51 * this governor will not work.
52 * All times here are in uS.
54 static unsigned int def_sampling_rate
;
55 #define MIN_SAMPLING_RATE (def_sampling_rate / 2)
56 #define MAX_SAMPLING_RATE (500 * def_sampling_rate)
57 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
58 #define DEF_SAMPLING_DOWN_FACTOR (10)
59 #define TRANSITION_LATENCY_LIMIT (10 * 1000)
61 static void do_dbs_timer(void *data
);
63 struct cpu_dbs_info_s
{
64 struct cpufreq_policy
*cur_policy
;
65 unsigned int prev_cpu_idle_up
;
66 unsigned int prev_cpu_idle_down
;
69 static DEFINE_PER_CPU(struct cpu_dbs_info_s
, cpu_dbs_info
);
71 static unsigned int dbs_enable
; /* number of CPUs using this policy */
73 static DECLARE_MUTEX (dbs_sem
);
74 static DECLARE_WORK (dbs_work
, do_dbs_timer
, NULL
);
77 unsigned int sampling_rate
;
78 unsigned int sampling_down_factor
;
79 unsigned int up_threshold
;
80 unsigned int down_threshold
;
81 unsigned int ignore_nice
;
82 unsigned int freq_step
;
85 static struct dbs_tuners dbs_tuners_ins
= {
86 .up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
,
87 .down_threshold
= DEF_FREQUENCY_DOWN_THRESHOLD
,
88 .sampling_down_factor
= DEF_SAMPLING_DOWN_FACTOR
,
91 static inline unsigned int get_cpu_idle_time(unsigned int cpu
)
93 return kstat_cpu(cpu
).cpustat
.idle
+
94 kstat_cpu(cpu
).cpustat
.iowait
+
95 ( !dbs_tuners_ins
.ignore_nice
?
96 kstat_cpu(cpu
).cpustat
.nice
:
100 /************************** sysfs interface ************************/
101 static ssize_t
show_sampling_rate_max(struct cpufreq_policy
*policy
, char *buf
)
103 return sprintf (buf
, "%u\n", MAX_SAMPLING_RATE
);
106 static ssize_t
show_sampling_rate_min(struct cpufreq_policy
*policy
, char *buf
)
108 return sprintf (buf
, "%u\n", MIN_SAMPLING_RATE
);
111 #define define_one_ro(_name) \
112 static struct freq_attr _name = \
113 __ATTR(_name, 0444, show_##_name, NULL)
115 define_one_ro(sampling_rate_max
);
116 define_one_ro(sampling_rate_min
);
118 /* cpufreq_ondemand Governor Tunables */
119 #define show_one(file_name, object) \
120 static ssize_t show_##file_name \
121 (struct cpufreq_policy *unused, char *buf) \
123 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
125 show_one(sampling_rate
, sampling_rate
);
126 show_one(sampling_down_factor
, sampling_down_factor
);
127 show_one(up_threshold
, up_threshold
);
128 show_one(down_threshold
, down_threshold
);
129 show_one(ignore_nice
, ignore_nice
);
130 show_one(freq_step
, freq_step
);
132 static ssize_t
store_sampling_down_factor(struct cpufreq_policy
*unused
,
133 const char *buf
, size_t count
)
137 ret
= sscanf (buf
, "%u", &input
);
142 dbs_tuners_ins
.sampling_down_factor
= input
;
148 static ssize_t
store_sampling_rate(struct cpufreq_policy
*unused
,
149 const char *buf
, size_t count
)
153 ret
= sscanf (buf
, "%u", &input
);
156 if (ret
!= 1 || input
> MAX_SAMPLING_RATE
|| input
< MIN_SAMPLING_RATE
) {
161 dbs_tuners_ins
.sampling_rate
= input
;
167 static ssize_t
store_up_threshold(struct cpufreq_policy
*unused
,
168 const char *buf
, size_t count
)
172 ret
= sscanf (buf
, "%u", &input
);
175 if (ret
!= 1 || input
> MAX_FREQUENCY_UP_THRESHOLD
||
176 input
< MIN_FREQUENCY_UP_THRESHOLD
||
177 input
<= dbs_tuners_ins
.down_threshold
) {
182 dbs_tuners_ins
.up_threshold
= input
;
188 static ssize_t
store_down_threshold(struct cpufreq_policy
*unused
,
189 const char *buf
, size_t count
)
193 ret
= sscanf (buf
, "%u", &input
);
196 if (ret
!= 1 || input
> MAX_FREQUENCY_DOWN_THRESHOLD
||
197 input
< MIN_FREQUENCY_DOWN_THRESHOLD
||
198 input
>= dbs_tuners_ins
.up_threshold
) {
203 dbs_tuners_ins
.down_threshold
= input
;
209 static ssize_t
store_ignore_nice(struct cpufreq_policy
*policy
,
210 const char *buf
, size_t count
)
217 ret
= sscanf (buf
, "%u", &input
);
225 if ( input
== dbs_tuners_ins
.ignore_nice
) { /* nothing to do */
229 dbs_tuners_ins
.ignore_nice
= input
;
231 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
232 for_each_online_cpu(j
) {
233 struct cpu_dbs_info_s
*j_dbs_info
;
234 j_dbs_info
= &per_cpu(cpu_dbs_info
, j
);
235 j_dbs_info
->prev_cpu_idle_up
= get_cpu_idle_time(j
);
236 j_dbs_info
->prev_cpu_idle_down
= j_dbs_info
->prev_cpu_idle_up
;
243 static ssize_t
store_freq_step(struct cpufreq_policy
*policy
,
244 const char *buf
, size_t count
)
249 ret
= sscanf (buf
, "%u", &input
);
257 /* no need to test here if freq_step is zero as the user might actually
258 * want this, they would be crazy though :) */
260 dbs_tuners_ins
.freq_step
= input
;
266 #define define_one_rw(_name) \
267 static struct freq_attr _name = \
268 __ATTR(_name, 0644, show_##_name, store_##_name)
270 define_one_rw(sampling_rate
);
271 define_one_rw(sampling_down_factor
);
272 define_one_rw(up_threshold
);
273 define_one_rw(down_threshold
);
274 define_one_rw(ignore_nice
);
275 define_one_rw(freq_step
);
277 static struct attribute
* dbs_attributes
[] = {
278 &sampling_rate_max
.attr
,
279 &sampling_rate_min
.attr
,
281 &sampling_down_factor
.attr
,
283 &down_threshold
.attr
,
289 static struct attribute_group dbs_attr_group
= {
290 .attrs
= dbs_attributes
,
294 /************************** sysfs end ************************/
296 static void dbs_check_cpu(int cpu
)
298 unsigned int idle_ticks
, up_idle_ticks
, down_idle_ticks
;
299 unsigned int freq_down_step
;
300 unsigned int freq_down_sampling_rate
;
301 static int down_skip
[NR_CPUS
];
302 struct cpu_dbs_info_s
*this_dbs_info
;
304 struct cpufreq_policy
*policy
;
307 this_dbs_info
= &per_cpu(cpu_dbs_info
, cpu
);
308 if (!this_dbs_info
->enable
)
311 policy
= this_dbs_info
->cur_policy
;
313 * The default safe range is 20% to 80%
314 * Every sampling_rate, we check
315 * - If current idle time is less than 20%, then we try to
317 * Every sampling_rate*sampling_down_factor, we check
318 * - If current idle time is more than 80%, then we try to
321 * Any frequency increase takes it to the maximum frequency.
322 * Frequency reduction happens at minimum steps of
323 * 5% (default) of max_frequency
326 /* Check for frequency increase */
327 idle_ticks
= UINT_MAX
;
328 for_each_cpu_mask(j
, policy
->cpus
) {
329 unsigned int tmp_idle_ticks
, total_idle_ticks
;
330 struct cpu_dbs_info_s
*j_dbs_info
;
332 j_dbs_info
= &per_cpu(cpu_dbs_info
, j
);
333 total_idle_ticks
= get_cpu_idle_time(j
);
334 tmp_idle_ticks
= total_idle_ticks
-
335 j_dbs_info
->prev_cpu_idle_up
;
336 j_dbs_info
->prev_cpu_idle_up
= total_idle_ticks
;
338 if (tmp_idle_ticks
< idle_ticks
)
339 idle_ticks
= tmp_idle_ticks
;
342 /* Scale idle ticks by 100 and compare with up and down ticks */
344 up_idle_ticks
= (100 - dbs_tuners_ins
.up_threshold
) *
345 usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
);
347 if (idle_ticks
< up_idle_ticks
) {
349 for_each_cpu_mask(j
, policy
->cpus
) {
350 struct cpu_dbs_info_s
*j_dbs_info
;
352 j_dbs_info
= &per_cpu(cpu_dbs_info
, j
);
353 j_dbs_info
->prev_cpu_idle_down
=
354 j_dbs_info
->prev_cpu_idle_up
;
356 /* if we are already at full speed then break out early */
357 if (policy
->cur
== policy
->max
)
360 __cpufreq_driver_target(policy
, policy
->max
,
365 /* Check for frequency decrease */
367 if (down_skip
[cpu
] < dbs_tuners_ins
.sampling_down_factor
)
370 idle_ticks
= UINT_MAX
;
371 for_each_cpu_mask(j
, policy
->cpus
) {
372 unsigned int tmp_idle_ticks
, total_idle_ticks
;
373 struct cpu_dbs_info_s
*j_dbs_info
;
375 j_dbs_info
= &per_cpu(cpu_dbs_info
, j
);
376 /* Check for frequency decrease */
377 total_idle_ticks
= j_dbs_info
->prev_cpu_idle_up
;
378 tmp_idle_ticks
= total_idle_ticks
-
379 j_dbs_info
->prev_cpu_idle_down
;
380 j_dbs_info
->prev_cpu_idle_down
= total_idle_ticks
;
382 if (tmp_idle_ticks
< idle_ticks
)
383 idle_ticks
= tmp_idle_ticks
;
386 /* Scale idle ticks by 100 and compare with up and down ticks */
390 freq_down_sampling_rate
= dbs_tuners_ins
.sampling_rate
*
391 dbs_tuners_ins
.sampling_down_factor
;
392 down_idle_ticks
= (100 - dbs_tuners_ins
.down_threshold
) *
393 usecs_to_jiffies(freq_down_sampling_rate
);
395 if (idle_ticks
> down_idle_ticks
) {
396 /* if we are already at the lowest speed then break out early
397 * or if we 'cannot' reduce the speed as the user might want
398 * freq_step to be zero */
399 if (policy
->cur
== policy
->min
|| dbs_tuners_ins
.freq_step
== 0)
402 freq_down_step
= (dbs_tuners_ins
.freq_step
* policy
->max
) / 100;
404 /* max freq cannot be less than 100. But who knows.... */
405 if (unlikely(freq_down_step
== 0))
408 __cpufreq_driver_target(policy
,
409 policy
->cur
- freq_down_step
,
415 static void do_dbs_timer(void *data
)
419 for_each_online_cpu(i
)
421 schedule_delayed_work(&dbs_work
,
422 usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
));
426 static inline void dbs_timer_init(void)
428 INIT_WORK(&dbs_work
, do_dbs_timer
, NULL
);
429 schedule_delayed_work(&dbs_work
,
430 usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
));
434 static inline void dbs_timer_exit(void)
436 cancel_delayed_work(&dbs_work
);
440 static int cpufreq_governor_dbs(struct cpufreq_policy
*policy
,
443 unsigned int cpu
= policy
->cpu
;
444 struct cpu_dbs_info_s
*this_dbs_info
;
447 this_dbs_info
= &per_cpu(cpu_dbs_info
, cpu
);
450 case CPUFREQ_GOV_START
:
451 if ((!cpu_online(cpu
)) ||
455 if (policy
->cpuinfo
.transition_latency
>
456 (TRANSITION_LATENCY_LIMIT
* 1000))
458 if (this_dbs_info
->enable
) /* Already enabled */
462 for_each_cpu_mask(j
, policy
->cpus
) {
463 struct cpu_dbs_info_s
*j_dbs_info
;
464 j_dbs_info
= &per_cpu(cpu_dbs_info
, j
);
465 j_dbs_info
->cur_policy
= policy
;
467 j_dbs_info
->prev_cpu_idle_up
= get_cpu_idle_time(j
);
468 j_dbs_info
->prev_cpu_idle_down
469 = j_dbs_info
->prev_cpu_idle_up
;
471 this_dbs_info
->enable
= 1;
472 sysfs_create_group(&policy
->kobj
, &dbs_attr_group
);
475 * Start the timerschedule work, when this governor
476 * is used for first time
478 if (dbs_enable
== 1) {
479 unsigned int latency
;
480 /* policy latency is in nS. Convert it to uS first */
482 latency
= policy
->cpuinfo
.transition_latency
;
486 def_sampling_rate
= (latency
/ 1000) *
487 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER
;
488 dbs_tuners_ins
.sampling_rate
= def_sampling_rate
;
489 dbs_tuners_ins
.ignore_nice
= 0;
490 dbs_tuners_ins
.freq_step
= 5;
498 case CPUFREQ_GOV_STOP
:
500 this_dbs_info
->enable
= 0;
501 sysfs_remove_group(&policy
->kobj
, &dbs_attr_group
);
504 * Stop the timerschedule work, when this governor
505 * is used for first time
514 case CPUFREQ_GOV_LIMITS
:
516 if (policy
->max
< this_dbs_info
->cur_policy
->cur
)
517 __cpufreq_driver_target(
518 this_dbs_info
->cur_policy
,
519 policy
->max
, CPUFREQ_RELATION_H
);
520 else if (policy
->min
> this_dbs_info
->cur_policy
->cur
)
521 __cpufreq_driver_target(
522 this_dbs_info
->cur_policy
,
523 policy
->min
, CPUFREQ_RELATION_L
);
530 static struct cpufreq_governor cpufreq_gov_dbs
= {
532 .governor
= cpufreq_governor_dbs
,
533 .owner
= THIS_MODULE
,
536 static int __init
cpufreq_gov_dbs_init(void)
538 return cpufreq_register_governor(&cpufreq_gov_dbs
);
541 static void __exit
cpufreq_gov_dbs_exit(void)
543 /* Make sure that the scheduled work is indeed not running */
544 flush_scheduled_work();
546 cpufreq_unregister_governor(&cpufreq_gov_dbs
);
550 MODULE_AUTHOR ("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
551 MODULE_DESCRIPTION ("'cpufreq_ondemand' - A dynamic cpufreq governor for "
552 "Low Latency Frequency Transition capable processors");
553 MODULE_LICENSE ("GPL");
555 module_init(cpufreq_gov_dbs_init
);
556 module_exit(cpufreq_gov_dbs_exit
);