2 * drivers/cpufreq/cpufreq_ondemand.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/smp.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/ctype.h>
19 #include <linux/cpufreq.h>
20 #include <linux/sysctl.h>
21 #include <linux/types.h>
23 #include <linux/sysfs.h>
24 #include <linux/sched.h>
25 #include <linux/kmod.h>
26 #include <linux/workqueue.h>
27 #include <linux/jiffies.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/percpu.h>
32 * dbs is used in this file as a shortform for demandbased switching
33 * It helps to keep variable names smaller, simpler
36 #define DEF_FREQUENCY_UP_THRESHOLD (80)
37 #define MIN_FREQUENCY_UP_THRESHOLD (0)
38 #define MAX_FREQUENCY_UP_THRESHOLD (100)
40 #define DEF_FREQUENCY_DOWN_THRESHOLD (20)
41 #define MIN_FREQUENCY_DOWN_THRESHOLD (0)
42 #define MAX_FREQUENCY_DOWN_THRESHOLD (100)
45 * The polling frequency of this governor depends on the capability of
46 * the processor. Default polling frequency is 1000 times the transition
47 * latency of the processor. The governor will work on any processor with
48 * transition latency <= 10mS, using appropriate sampling
50 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
51 * this governor will not work.
52 * All times here are in uS.
54 static unsigned int def_sampling_rate
;
55 #define MIN_SAMPLING_RATE (def_sampling_rate / 2)
56 #define MAX_SAMPLING_RATE (500 * def_sampling_rate)
57 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
58 #define DEF_SAMPLING_DOWN_FACTOR (10)
59 #define TRANSITION_LATENCY_LIMIT (10 * 1000)
61 static void do_dbs_timer(void *data
);
63 struct cpu_dbs_info_s
{
64 struct cpufreq_policy
*cur_policy
;
65 unsigned int prev_cpu_idle_up
;
66 unsigned int prev_cpu_idle_down
;
69 static DEFINE_PER_CPU(struct cpu_dbs_info_s
, cpu_dbs_info
);
71 static unsigned int dbs_enable
; /* number of CPUs using this policy */
73 static DECLARE_MUTEX (dbs_sem
);
74 static DECLARE_WORK (dbs_work
, do_dbs_timer
, NULL
);
77 unsigned int sampling_rate
;
78 unsigned int sampling_down_factor
;
79 unsigned int up_threshold
;
80 unsigned int down_threshold
;
81 unsigned int ignore_nice
;
84 static struct dbs_tuners dbs_tuners_ins
= {
85 .up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
,
86 .down_threshold
= DEF_FREQUENCY_DOWN_THRESHOLD
,
87 .sampling_down_factor
= DEF_SAMPLING_DOWN_FACTOR
,
90 /************************** sysfs interface ************************/
91 static ssize_t
show_sampling_rate_max(struct cpufreq_policy
*policy
, char *buf
)
93 return sprintf (buf
, "%u\n", MAX_SAMPLING_RATE
);
96 static ssize_t
show_sampling_rate_min(struct cpufreq_policy
*policy
, char *buf
)
98 return sprintf (buf
, "%u\n", MIN_SAMPLING_RATE
);
101 #define define_one_ro(_name) \
102 static struct freq_attr _name = \
103 __ATTR(_name, 0444, show_##_name, NULL)
105 define_one_ro(sampling_rate_max
);
106 define_one_ro(sampling_rate_min
);
108 /* cpufreq_ondemand Governor Tunables */
109 #define show_one(file_name, object) \
110 static ssize_t show_##file_name \
111 (struct cpufreq_policy *unused, char *buf) \
113 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
115 show_one(sampling_rate
, sampling_rate
);
116 show_one(sampling_down_factor
, sampling_down_factor
);
117 show_one(up_threshold
, up_threshold
);
118 show_one(down_threshold
, down_threshold
);
119 show_one(ignore_nice
, ignore_nice
);
121 static ssize_t
store_sampling_down_factor(struct cpufreq_policy
*unused
,
122 const char *buf
, size_t count
)
126 ret
= sscanf (buf
, "%u", &input
);
131 dbs_tuners_ins
.sampling_down_factor
= input
;
137 static ssize_t
store_sampling_rate(struct cpufreq_policy
*unused
,
138 const char *buf
, size_t count
)
142 ret
= sscanf (buf
, "%u", &input
);
145 if (ret
!= 1 || input
> MAX_SAMPLING_RATE
|| input
< MIN_SAMPLING_RATE
) {
150 dbs_tuners_ins
.sampling_rate
= input
;
156 static ssize_t
store_up_threshold(struct cpufreq_policy
*unused
,
157 const char *buf
, size_t count
)
161 ret
= sscanf (buf
, "%u", &input
);
164 if (ret
!= 1 || input
> MAX_FREQUENCY_UP_THRESHOLD
||
165 input
< MIN_FREQUENCY_UP_THRESHOLD
||
166 input
<= dbs_tuners_ins
.down_threshold
) {
171 dbs_tuners_ins
.up_threshold
= input
;
177 static ssize_t
store_down_threshold(struct cpufreq_policy
*unused
,
178 const char *buf
, size_t count
)
182 ret
= sscanf (buf
, "%u", &input
);
185 if (ret
!= 1 || input
> MAX_FREQUENCY_DOWN_THRESHOLD
||
186 input
< MIN_FREQUENCY_DOWN_THRESHOLD
||
187 input
>= dbs_tuners_ins
.up_threshold
) {
192 dbs_tuners_ins
.down_threshold
= input
;
198 static ssize_t
store_ignore_nice(struct cpufreq_policy
*policy
,
199 const char *buf
, size_t count
)
206 ret
= sscanf (buf
, "%u", &input
);
214 if ( input
== dbs_tuners_ins
.ignore_nice
) { /* nothing to do */
218 dbs_tuners_ins
.ignore_nice
= input
;
220 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
221 for_each_cpu_mask(j
, policy
->cpus
) {
222 struct cpu_dbs_info_s
*j_dbs_info
;
223 j_dbs_info
= &per_cpu(cpu_dbs_info
, j
);
224 j_dbs_info
->cur_policy
= policy
;
226 j_dbs_info
->prev_cpu_idle_up
=
227 kstat_cpu(j
).cpustat
.idle
+
228 kstat_cpu(j
).cpustat
.iowait
+
229 ( !dbs_tuners_ins
.ignore_nice
230 ? kstat_cpu(j
).cpustat
.nice
: 0 );
231 j_dbs_info
->prev_cpu_idle_down
= j_dbs_info
->prev_cpu_idle_up
;
238 #define define_one_rw(_name) \
239 static struct freq_attr _name = \
240 __ATTR(_name, 0644, show_##_name, store_##_name)
242 define_one_rw(sampling_rate
);
243 define_one_rw(sampling_down_factor
);
244 define_one_rw(up_threshold
);
245 define_one_rw(down_threshold
);
246 define_one_rw(ignore_nice
);
248 static struct attribute
* dbs_attributes
[] = {
249 &sampling_rate_max
.attr
,
250 &sampling_rate_min
.attr
,
252 &sampling_down_factor
.attr
,
254 &down_threshold
.attr
,
259 static struct attribute_group dbs_attr_group
= {
260 .attrs
= dbs_attributes
,
264 /************************** sysfs end ************************/
266 static void dbs_check_cpu(int cpu
)
268 unsigned int idle_ticks
, up_idle_ticks
, down_idle_ticks
;
269 unsigned int total_idle_ticks
;
270 unsigned int freq_down_step
;
271 unsigned int freq_down_sampling_rate
;
272 static int down_skip
[NR_CPUS
];
273 struct cpu_dbs_info_s
*this_dbs_info
;
275 struct cpufreq_policy
*policy
;
278 this_dbs_info
= &per_cpu(cpu_dbs_info
, cpu
);
279 if (!this_dbs_info
->enable
)
282 policy
= this_dbs_info
->cur_policy
;
284 * The default safe range is 20% to 80%
285 * Every sampling_rate, we check
286 * - If current idle time is less than 20%, then we try to
288 * Every sampling_rate*sampling_down_factor, we check
289 * - If current idle time is more than 80%, then we try to
292 * Any frequency increase takes it to the maximum frequency.
293 * Frequency reduction happens at minimum steps of
294 * 5% of max_frequency
297 /* Check for frequency increase */
298 total_idle_ticks
= kstat_cpu(cpu
).cpustat
.idle
+
299 kstat_cpu(cpu
).cpustat
.iowait
;
300 /* consider 'nice' tasks as 'idle' time too if required */
301 if (dbs_tuners_ins
.ignore_nice
== 0)
302 total_idle_ticks
+= kstat_cpu(cpu
).cpustat
.nice
;
303 idle_ticks
= total_idle_ticks
-
304 this_dbs_info
->prev_cpu_idle_up
;
305 this_dbs_info
->prev_cpu_idle_up
= total_idle_ticks
;
308 for_each_cpu_mask(j
, policy
->cpus
) {
309 unsigned int tmp_idle_ticks
;
310 struct cpu_dbs_info_s
*j_dbs_info
;
315 j_dbs_info
= &per_cpu(cpu_dbs_info
, j
);
316 /* Check for frequency increase */
317 total_idle_ticks
= kstat_cpu(j
).cpustat
.idle
+
318 kstat_cpu(j
).cpustat
.iowait
;
319 /* consider 'nice' too? */
320 if (dbs_tuners_ins
.ignore_nice
== 0)
321 total_idle_ticks
+= kstat_cpu(j
).cpustat
.nice
;
322 tmp_idle_ticks
= total_idle_ticks
-
323 j_dbs_info
->prev_cpu_idle_up
;
324 j_dbs_info
->prev_cpu_idle_up
= total_idle_ticks
;
326 if (tmp_idle_ticks
< idle_ticks
)
327 idle_ticks
= tmp_idle_ticks
;
330 /* Scale idle ticks by 100 and compare with up and down ticks */
332 up_idle_ticks
= (100 - dbs_tuners_ins
.up_threshold
) *
333 usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
);
335 if (idle_ticks
< up_idle_ticks
) {
336 __cpufreq_driver_target(policy
, policy
->max
,
339 this_dbs_info
->prev_cpu_idle_down
= total_idle_ticks
;
343 /* Check for frequency decrease */
345 if (down_skip
[cpu
] < dbs_tuners_ins
.sampling_down_factor
)
348 total_idle_ticks
= kstat_cpu(cpu
).cpustat
.idle
+
349 kstat_cpu(cpu
).cpustat
.iowait
;
350 /* consider 'nice' too? */
351 if (dbs_tuners_ins
.ignore_nice
== 0)
352 total_idle_ticks
+= kstat_cpu(cpu
).cpustat
.nice
;
353 idle_ticks
= total_idle_ticks
-
354 this_dbs_info
->prev_cpu_idle_down
;
355 this_dbs_info
->prev_cpu_idle_down
= total_idle_ticks
;
357 for_each_cpu_mask(j
, policy
->cpus
) {
358 unsigned int tmp_idle_ticks
;
359 struct cpu_dbs_info_s
*j_dbs_info
;
364 j_dbs_info
= &per_cpu(cpu_dbs_info
, j
);
365 /* Check for frequency increase */
366 total_idle_ticks
= kstat_cpu(j
).cpustat
.idle
+
367 kstat_cpu(j
).cpustat
.iowait
;
368 /* consider 'nice' too? */
369 if (dbs_tuners_ins
.ignore_nice
== 0)
370 total_idle_ticks
+= kstat_cpu(j
).cpustat
.nice
;
371 tmp_idle_ticks
= total_idle_ticks
-
372 j_dbs_info
->prev_cpu_idle_down
;
373 j_dbs_info
->prev_cpu_idle_down
= total_idle_ticks
;
375 if (tmp_idle_ticks
< idle_ticks
)
376 idle_ticks
= tmp_idle_ticks
;
379 /* Scale idle ticks by 100 and compare with up and down ticks */
383 freq_down_sampling_rate
= dbs_tuners_ins
.sampling_rate
*
384 dbs_tuners_ins
.sampling_down_factor
;
385 down_idle_ticks
= (100 - dbs_tuners_ins
.down_threshold
) *
386 usecs_to_jiffies(freq_down_sampling_rate
);
388 if (idle_ticks
> down_idle_ticks
) {
389 freq_down_step
= (5 * policy
->max
) / 100;
391 /* max freq cannot be less than 100. But who knows.... */
392 if (unlikely(freq_down_step
== 0))
395 __cpufreq_driver_target(policy
,
396 policy
->cur
- freq_down_step
,
402 static void do_dbs_timer(void *data
)
406 for_each_online_cpu(i
)
408 schedule_delayed_work(&dbs_work
,
409 usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
));
413 static inline void dbs_timer_init(void)
415 INIT_WORK(&dbs_work
, do_dbs_timer
, NULL
);
416 schedule_delayed_work(&dbs_work
,
417 usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
));
421 static inline void dbs_timer_exit(void)
423 cancel_delayed_work(&dbs_work
);
427 static int cpufreq_governor_dbs(struct cpufreq_policy
*policy
,
430 unsigned int cpu
= policy
->cpu
;
431 struct cpu_dbs_info_s
*this_dbs_info
;
434 this_dbs_info
= &per_cpu(cpu_dbs_info
, cpu
);
437 case CPUFREQ_GOV_START
:
438 if ((!cpu_online(cpu
)) ||
442 if (policy
->cpuinfo
.transition_latency
>
443 (TRANSITION_LATENCY_LIMIT
* 1000))
445 if (this_dbs_info
->enable
) /* Already enabled */
449 for_each_cpu_mask(j
, policy
->cpus
) {
450 struct cpu_dbs_info_s
*j_dbs_info
;
451 j_dbs_info
= &per_cpu(cpu_dbs_info
, j
);
452 j_dbs_info
->cur_policy
= policy
;
454 j_dbs_info
->prev_cpu_idle_up
=
455 kstat_cpu(j
).cpustat
.idle
+
456 kstat_cpu(j
).cpustat
.iowait
+
457 ( !dbs_tuners_ins
.ignore_nice
458 ? kstat_cpu(j
).cpustat
.nice
: 0 );
459 j_dbs_info
->prev_cpu_idle_down
460 = j_dbs_info
->prev_cpu_idle_up
;
462 this_dbs_info
->enable
= 1;
463 sysfs_create_group(&policy
->kobj
, &dbs_attr_group
);
466 * Start the timerschedule work, when this governor
467 * is used for first time
469 if (dbs_enable
== 1) {
470 unsigned int latency
;
471 /* policy latency is in nS. Convert it to uS first */
473 latency
= policy
->cpuinfo
.transition_latency
;
477 def_sampling_rate
= (latency
/ 1000) *
478 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER
;
479 dbs_tuners_ins
.sampling_rate
= def_sampling_rate
;
480 dbs_tuners_ins
.ignore_nice
= 0;
488 case CPUFREQ_GOV_STOP
:
490 this_dbs_info
->enable
= 0;
491 sysfs_remove_group(&policy
->kobj
, &dbs_attr_group
);
494 * Stop the timerschedule work, when this governor
495 * is used for first time
504 case CPUFREQ_GOV_LIMITS
:
506 if (policy
->max
< this_dbs_info
->cur_policy
->cur
)
507 __cpufreq_driver_target(
508 this_dbs_info
->cur_policy
,
509 policy
->max
, CPUFREQ_RELATION_H
);
510 else if (policy
->min
> this_dbs_info
->cur_policy
->cur
)
511 __cpufreq_driver_target(
512 this_dbs_info
->cur_policy
,
513 policy
->min
, CPUFREQ_RELATION_L
);
520 static struct cpufreq_governor cpufreq_gov_dbs
= {
522 .governor
= cpufreq_governor_dbs
,
523 .owner
= THIS_MODULE
,
526 static int __init
cpufreq_gov_dbs_init(void)
528 return cpufreq_register_governor(&cpufreq_gov_dbs
);
531 static void __exit
cpufreq_gov_dbs_exit(void)
533 /* Make sure that the scheduled work is indeed not running */
534 flush_scheduled_work();
536 cpufreq_unregister_governor(&cpufreq_gov_dbs
);
540 MODULE_AUTHOR ("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
541 MODULE_DESCRIPTION ("'cpufreq_ondemand' - A dynamic cpufreq governor for "
542 "Low Latency Frequency Transition capable processors");
543 MODULE_LICENSE ("GPL");
545 module_init(cpufreq_gov_dbs_init
);
546 module_exit(cpufreq_gov_dbs_exit
);