2 * drivers/cpufreq/cpufreq_conservative.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
7 * (C) 2004 Alexander Clouter <alex-kernel@digriz.org.uk>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/smp.h>
17 #include <linux/init.h>
18 #include <linux/interrupt.h>
19 #include <linux/ctype.h>
20 #include <linux/cpufreq.h>
21 #include <linux/sysctl.h>
22 #include <linux/types.h>
24 #include <linux/sysfs.h>
25 #include <linux/sched.h>
26 #include <linux/kmod.h>
27 #include <linux/workqueue.h>
28 #include <linux/jiffies.h>
29 #include <linux/kernel_stat.h>
30 #include <linux/percpu.h>
33 * dbs is used in this file as a shortform for demandbased switching
34 * It helps to keep variable names smaller, simpler
37 #define DEF_FREQUENCY_UP_THRESHOLD (80)
38 #define MIN_FREQUENCY_UP_THRESHOLD (0)
39 #define MAX_FREQUENCY_UP_THRESHOLD (100)
41 #define DEF_FREQUENCY_DOWN_THRESHOLD (20)
42 #define MIN_FREQUENCY_DOWN_THRESHOLD (0)
43 #define MAX_FREQUENCY_DOWN_THRESHOLD (100)
46 * The polling frequency of this governor depends on the capability of
47 * the processor. Default polling frequency is 1000 times the transition
48 * latency of the processor. The governor will work on any processor with
49 * transition latency <= 10mS, using appropriate sampling
51 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
52 * this governor will not work.
53 * All times here are in uS.
55 static unsigned int def_sampling_rate
;
56 #define MIN_SAMPLING_RATE (def_sampling_rate / 2)
57 #define MAX_SAMPLING_RATE (500 * def_sampling_rate)
58 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (100000)
59 #define DEF_SAMPLING_DOWN_FACTOR (5)
60 #define TRANSITION_LATENCY_LIMIT (10 * 1000)
62 static void do_dbs_timer(void *data
);
64 struct cpu_dbs_info_s
{
65 struct cpufreq_policy
*cur_policy
;
66 unsigned int prev_cpu_idle_up
;
67 unsigned int prev_cpu_idle_down
;
70 static DEFINE_PER_CPU(struct cpu_dbs_info_s
, cpu_dbs_info
);
72 static unsigned int dbs_enable
; /* number of CPUs using this policy */
74 static DECLARE_MUTEX (dbs_sem
);
75 static DECLARE_WORK (dbs_work
, do_dbs_timer
, NULL
);
78 unsigned int sampling_rate
;
79 unsigned int sampling_down_factor
;
80 unsigned int up_threshold
;
81 unsigned int down_threshold
;
82 unsigned int ignore_nice
;
83 unsigned int freq_step
;
86 static struct dbs_tuners dbs_tuners_ins
= {
87 .up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
,
88 .down_threshold
= DEF_FREQUENCY_DOWN_THRESHOLD
,
89 .sampling_down_factor
= DEF_SAMPLING_DOWN_FACTOR
,
92 /************************** sysfs interface ************************/
93 static ssize_t
show_sampling_rate_max(struct cpufreq_policy
*policy
, char *buf
)
95 return sprintf (buf
, "%u\n", MAX_SAMPLING_RATE
);
98 static ssize_t
show_sampling_rate_min(struct cpufreq_policy
*policy
, char *buf
)
100 return sprintf (buf
, "%u\n", MIN_SAMPLING_RATE
);
103 #define define_one_ro(_name) \
104 static struct freq_attr _name = \
105 __ATTR(_name, 0444, show_##_name, NULL)
107 define_one_ro(sampling_rate_max
);
108 define_one_ro(sampling_rate_min
);
110 /* cpufreq_conservative Governor Tunables */
111 #define show_one(file_name, object) \
112 static ssize_t show_##file_name \
113 (struct cpufreq_policy *unused, char *buf) \
115 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
117 show_one(sampling_rate
, sampling_rate
);
118 show_one(sampling_down_factor
, sampling_down_factor
);
119 show_one(up_threshold
, up_threshold
);
120 show_one(down_threshold
, down_threshold
);
121 show_one(ignore_nice
, ignore_nice
);
122 show_one(freq_step
, freq_step
);
124 static ssize_t
store_sampling_down_factor(struct cpufreq_policy
*unused
,
125 const char *buf
, size_t count
)
129 ret
= sscanf (buf
, "%u", &input
);
134 dbs_tuners_ins
.sampling_down_factor
= input
;
140 static ssize_t
store_sampling_rate(struct cpufreq_policy
*unused
,
141 const char *buf
, size_t count
)
145 ret
= sscanf (buf
, "%u", &input
);
148 if (ret
!= 1 || input
> MAX_SAMPLING_RATE
|| input
< MIN_SAMPLING_RATE
) {
153 dbs_tuners_ins
.sampling_rate
= input
;
159 static ssize_t
store_up_threshold(struct cpufreq_policy
*unused
,
160 const char *buf
, size_t count
)
164 ret
= sscanf (buf
, "%u", &input
);
167 if (ret
!= 1 || input
> MAX_FREQUENCY_UP_THRESHOLD
||
168 input
< MIN_FREQUENCY_UP_THRESHOLD
||
169 input
<= dbs_tuners_ins
.down_threshold
) {
174 dbs_tuners_ins
.up_threshold
= input
;
180 static ssize_t
store_down_threshold(struct cpufreq_policy
*unused
,
181 const char *buf
, size_t count
)
185 ret
= sscanf (buf
, "%u", &input
);
188 if (ret
!= 1 || input
> MAX_FREQUENCY_DOWN_THRESHOLD
||
189 input
< MIN_FREQUENCY_DOWN_THRESHOLD
||
190 input
>= dbs_tuners_ins
.up_threshold
) {
195 dbs_tuners_ins
.down_threshold
= input
;
201 static ssize_t
store_ignore_nice(struct cpufreq_policy
*policy
,
202 const char *buf
, size_t count
)
209 ret
= sscanf (buf
, "%u", &input
);
217 if ( input
== dbs_tuners_ins
.ignore_nice
) { /* nothing to do */
221 dbs_tuners_ins
.ignore_nice
= input
;
223 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
224 for_each_cpu_mask(j
, policy
->cpus
) {
225 struct cpu_dbs_info_s
*j_dbs_info
;
226 j_dbs_info
= &per_cpu(cpu_dbs_info
, j
);
227 j_dbs_info
->cur_policy
= policy
;
229 j_dbs_info
->prev_cpu_idle_up
=
230 kstat_cpu(j
).cpustat
.idle
+
231 kstat_cpu(j
).cpustat
.iowait
+
232 ( !dbs_tuners_ins
.ignore_nice
233 ? kstat_cpu(j
).cpustat
.nice
: 0 );
234 j_dbs_info
->prev_cpu_idle_down
= j_dbs_info
->prev_cpu_idle_up
;
241 static ssize_t
store_freq_step(struct cpufreq_policy
*policy
,
242 const char *buf
, size_t count
)
247 ret
= sscanf (buf
, "%u", &input
);
255 /* no need to test here if freq_step is zero as the user might actually
256 * want this, they would be crazy though :) */
258 dbs_tuners_ins
.freq_step
= input
;
264 #define define_one_rw(_name) \
265 static struct freq_attr _name = \
266 __ATTR(_name, 0644, show_##_name, store_##_name)
268 define_one_rw(sampling_rate
);
269 define_one_rw(sampling_down_factor
);
270 define_one_rw(up_threshold
);
271 define_one_rw(down_threshold
);
272 define_one_rw(ignore_nice
);
273 define_one_rw(freq_step
);
275 static struct attribute
* dbs_attributes
[] = {
276 &sampling_rate_max
.attr
,
277 &sampling_rate_min
.attr
,
279 &sampling_down_factor
.attr
,
281 &down_threshold
.attr
,
287 static struct attribute_group dbs_attr_group
= {
288 .attrs
= dbs_attributes
,
289 .name
= "conservative",
292 /************************** sysfs end ************************/
294 static void dbs_check_cpu(int cpu
)
296 unsigned int idle_ticks
, up_idle_ticks
, down_idle_ticks
;
297 unsigned int total_idle_ticks
;
298 unsigned int freq_step
;
299 unsigned int freq_down_sampling_rate
;
300 static int down_skip
[NR_CPUS
];
301 static int requested_freq
[NR_CPUS
];
302 static unsigned short init_flag
= 0;
303 struct cpu_dbs_info_s
*this_dbs_info
;
304 struct cpu_dbs_info_s
*dbs_info
;
306 struct cpufreq_policy
*policy
;
309 this_dbs_info
= &per_cpu(cpu_dbs_info
, cpu
);
310 if (!this_dbs_info
->enable
)
313 policy
= this_dbs_info
->cur_policy
;
315 if ( init_flag
== 0 ) {
316 for ( /* NULL */; init_flag
< NR_CPUS
; init_flag
++ ) {
317 dbs_info
= &per_cpu(cpu_dbs_info
, init_flag
);
318 requested_freq
[cpu
] = dbs_info
->cur_policy
->cur
;
324 * The default safe range is 20% to 80%
325 * Every sampling_rate, we check
326 * - If current idle time is less than 20%, then we try to
328 * Every sampling_rate*sampling_down_factor, we check
329 * - If current idle time is more than 80%, then we try to
332 * Any frequency increase takes it to the maximum frequency.
333 * Frequency reduction happens at minimum steps of
334 * 5% (default) of max_frequency
337 /* Check for frequency increase */
338 total_idle_ticks
= kstat_cpu(cpu
).cpustat
.idle
+
339 kstat_cpu(cpu
).cpustat
.iowait
;
340 /* consider 'nice' tasks as 'idle' time too if required */
341 if (dbs_tuners_ins
.ignore_nice
== 0)
342 total_idle_ticks
+= kstat_cpu(cpu
).cpustat
.nice
;
343 idle_ticks
= total_idle_ticks
-
344 this_dbs_info
->prev_cpu_idle_up
;
345 this_dbs_info
->prev_cpu_idle_up
= total_idle_ticks
;
348 for_each_cpu_mask(j
, policy
->cpus
) {
349 unsigned int tmp_idle_ticks
;
350 struct cpu_dbs_info_s
*j_dbs_info
;
355 j_dbs_info
= &per_cpu(cpu_dbs_info
, j
);
356 /* Check for frequency increase */
357 total_idle_ticks
= kstat_cpu(j
).cpustat
.idle
+
358 kstat_cpu(j
).cpustat
.iowait
;
359 /* consider 'nice' too? */
360 if (dbs_tuners_ins
.ignore_nice
== 0)
361 total_idle_ticks
+= kstat_cpu(j
).cpustat
.nice
;
362 tmp_idle_ticks
= total_idle_ticks
-
363 j_dbs_info
->prev_cpu_idle_up
;
364 j_dbs_info
->prev_cpu_idle_up
= total_idle_ticks
;
366 if (tmp_idle_ticks
< idle_ticks
)
367 idle_ticks
= tmp_idle_ticks
;
370 /* Scale idle ticks by 100 and compare with up and down ticks */
372 up_idle_ticks
= (100 - dbs_tuners_ins
.up_threshold
) *
373 usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
);
375 if (idle_ticks
< up_idle_ticks
) {
376 /* if we are already at full speed then break out early */
377 if (requested_freq
[cpu
] == policy
->max
)
380 freq_step
= (dbs_tuners_ins
.freq_step
* policy
->max
) / 100;
382 /* max freq cannot be less than 100. But who knows.... */
383 if (unlikely(freq_step
== 0))
386 requested_freq
[cpu
] += freq_step
;
387 if (requested_freq
[cpu
] > policy
->max
)
388 requested_freq
[cpu
] = policy
->max
;
390 __cpufreq_driver_target(policy
, requested_freq
[cpu
],
393 this_dbs_info
->prev_cpu_idle_down
= total_idle_ticks
;
397 /* Check for frequency decrease */
399 if (down_skip
[cpu
] < dbs_tuners_ins
.sampling_down_factor
)
402 total_idle_ticks
= kstat_cpu(cpu
).cpustat
.idle
+
403 kstat_cpu(cpu
).cpustat
.iowait
;
404 /* consider 'nice' too? */
405 if (dbs_tuners_ins
.ignore_nice
== 0)
406 total_idle_ticks
+= kstat_cpu(cpu
).cpustat
.nice
;
407 idle_ticks
= total_idle_ticks
-
408 this_dbs_info
->prev_cpu_idle_down
;
409 this_dbs_info
->prev_cpu_idle_down
= total_idle_ticks
;
411 for_each_cpu_mask(j
, policy
->cpus
) {
412 unsigned int tmp_idle_ticks
;
413 struct cpu_dbs_info_s
*j_dbs_info
;
418 j_dbs_info
= &per_cpu(cpu_dbs_info
, j
);
419 /* Check for frequency increase */
420 total_idle_ticks
= kstat_cpu(j
).cpustat
.idle
+
421 kstat_cpu(j
).cpustat
.iowait
;
422 /* consider 'nice' too? */
423 if (dbs_tuners_ins
.ignore_nice
== 0)
424 total_idle_ticks
+= kstat_cpu(j
).cpustat
.nice
;
425 tmp_idle_ticks
= total_idle_ticks
-
426 j_dbs_info
->prev_cpu_idle_down
;
427 j_dbs_info
->prev_cpu_idle_down
= total_idle_ticks
;
429 if (tmp_idle_ticks
< idle_ticks
)
430 idle_ticks
= tmp_idle_ticks
;
433 /* Scale idle ticks by 100 and compare with up and down ticks */
437 freq_down_sampling_rate
= dbs_tuners_ins
.sampling_rate
*
438 dbs_tuners_ins
.sampling_down_factor
;
439 down_idle_ticks
= (100 - dbs_tuners_ins
.down_threshold
) *
440 usecs_to_jiffies(freq_down_sampling_rate
);
442 if (idle_ticks
> down_idle_ticks
) {
443 /* if we are already at the lowest speed then break out early
444 * or if we 'cannot' reduce the speed as the user might want
445 * freq_step to be zero */
446 if (requested_freq
[cpu
] == policy
->min
447 || dbs_tuners_ins
.freq_step
== 0)
450 freq_step
= (dbs_tuners_ins
.freq_step
* policy
->max
) / 100;
452 /* max freq cannot be less than 100. But who knows.... */
453 if (unlikely(freq_step
== 0))
456 requested_freq
[cpu
] -= freq_step
;
457 if (requested_freq
[cpu
] < policy
->min
)
458 requested_freq
[cpu
] = policy
->min
;
460 __cpufreq_driver_target(policy
,
467 static void do_dbs_timer(void *data
)
471 for_each_online_cpu(i
)
473 schedule_delayed_work(&dbs_work
,
474 usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
));
478 static inline void dbs_timer_init(void)
480 INIT_WORK(&dbs_work
, do_dbs_timer
, NULL
);
481 schedule_delayed_work(&dbs_work
,
482 usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
));
486 static inline void dbs_timer_exit(void)
488 cancel_delayed_work(&dbs_work
);
492 static int cpufreq_governor_dbs(struct cpufreq_policy
*policy
,
495 unsigned int cpu
= policy
->cpu
;
496 struct cpu_dbs_info_s
*this_dbs_info
;
499 this_dbs_info
= &per_cpu(cpu_dbs_info
, cpu
);
502 case CPUFREQ_GOV_START
:
503 if ((!cpu_online(cpu
)) ||
507 if (policy
->cpuinfo
.transition_latency
>
508 (TRANSITION_LATENCY_LIMIT
* 1000))
510 if (this_dbs_info
->enable
) /* Already enabled */
514 for_each_cpu_mask(j
, policy
->cpus
) {
515 struct cpu_dbs_info_s
*j_dbs_info
;
516 j_dbs_info
= &per_cpu(cpu_dbs_info
, j
);
517 j_dbs_info
->cur_policy
= policy
;
519 j_dbs_info
->prev_cpu_idle_up
=
520 kstat_cpu(j
).cpustat
.idle
+
521 kstat_cpu(j
).cpustat
.iowait
+
522 ( !dbs_tuners_ins
.ignore_nice
523 ? kstat_cpu(j
).cpustat
.nice
: 0 );
524 j_dbs_info
->prev_cpu_idle_down
525 = j_dbs_info
->prev_cpu_idle_up
;
527 this_dbs_info
->enable
= 1;
528 sysfs_create_group(&policy
->kobj
, &dbs_attr_group
);
531 * Start the timerschedule work, when this governor
532 * is used for first time
534 if (dbs_enable
== 1) {
535 unsigned int latency
;
536 /* policy latency is in nS. Convert it to uS first */
538 latency
= policy
->cpuinfo
.transition_latency
;
542 def_sampling_rate
= (latency
/ 1000) *
543 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER
;
544 dbs_tuners_ins
.sampling_rate
= def_sampling_rate
;
545 dbs_tuners_ins
.ignore_nice
= 0;
546 dbs_tuners_ins
.freq_step
= 5;
554 case CPUFREQ_GOV_STOP
:
556 this_dbs_info
->enable
= 0;
557 sysfs_remove_group(&policy
->kobj
, &dbs_attr_group
);
560 * Stop the timerschedule work, when this governor
561 * is used for first time
570 case CPUFREQ_GOV_LIMITS
:
572 if (policy
->max
< this_dbs_info
->cur_policy
->cur
)
573 __cpufreq_driver_target(
574 this_dbs_info
->cur_policy
,
575 policy
->max
, CPUFREQ_RELATION_H
);
576 else if (policy
->min
> this_dbs_info
->cur_policy
->cur
)
577 __cpufreq_driver_target(
578 this_dbs_info
->cur_policy
,
579 policy
->min
, CPUFREQ_RELATION_L
);
586 static struct cpufreq_governor cpufreq_gov_dbs
= {
587 .name
= "conservative",
588 .governor
= cpufreq_governor_dbs
,
589 .owner
= THIS_MODULE
,
592 static int __init
cpufreq_gov_dbs_init(void)
594 return cpufreq_register_governor(&cpufreq_gov_dbs
);
597 static void __exit
cpufreq_gov_dbs_exit(void)
599 /* Make sure that the scheduled work is indeed not running */
600 flush_scheduled_work();
602 cpufreq_unregister_governor(&cpufreq_gov_dbs
);
606 MODULE_AUTHOR ("Alexander Clouter <alex-kernel@digriz.org.uk>");
607 MODULE_DESCRIPTION ("'cpufreq_conservative' - A dynamic cpufreq governor for "
608 "Low Latency Frequency Transition capable processors "
609 "optimised for use in a battery environment");
610 MODULE_LICENSE ("GPL");
612 module_init(cpufreq_gov_dbs_init
);
613 module_exit(cpufreq_gov_dbs_exit
);