2 * drivers/cpufreq/cpufreq_ondemand.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/smp.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/ctype.h>
19 #include <linux/cpufreq.h>
20 #include <linux/sysctl.h>
21 #include <linux/types.h>
23 #include <linux/sysfs.h>
24 #include <linux/sched.h>
25 #include <linux/kmod.h>
26 #include <linux/workqueue.h>
27 #include <linux/jiffies.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/percpu.h>
32 * dbs is used in this file as a shortform for demandbased switching
33 * It helps to keep variable names smaller, simpler
36 #define DEF_FREQUENCY_UP_THRESHOLD (80)
37 #define MIN_FREQUENCY_UP_THRESHOLD (11)
38 #define MAX_FREQUENCY_UP_THRESHOLD (100)
41 * The polling frequency of this governor depends on the capability of
42 * the processor. Default polling frequency is 1000 times the transition
43 * latency of the processor. The governor will work on any processor with
44 * transition latency <= 10mS, using appropriate sampling
46 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
47 * this governor will not work.
48 * All times here are in uS.
50 static unsigned int def_sampling_rate
;
51 #define MIN_SAMPLING_RATE (def_sampling_rate / 2)
52 #define MAX_SAMPLING_RATE (500 * def_sampling_rate)
53 #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
54 #define DEF_SAMPLING_DOWN_FACTOR (10)
55 #define TRANSITION_LATENCY_LIMIT (10 * 1000)
57 static void do_dbs_timer(void *data
);
59 struct cpu_dbs_info_s
{
60 struct cpufreq_policy
*cur_policy
;
61 unsigned int prev_cpu_idle_up
;
62 unsigned int prev_cpu_idle_down
;
65 static DEFINE_PER_CPU(struct cpu_dbs_info_s
, cpu_dbs_info
);
67 static unsigned int dbs_enable
; /* number of CPUs using this policy */
69 static DECLARE_MUTEX (dbs_sem
);
70 static DECLARE_WORK (dbs_work
, do_dbs_timer
, NULL
);
73 unsigned int sampling_rate
;
74 unsigned int sampling_down_factor
;
75 unsigned int up_threshold
;
76 unsigned int ignore_nice
;
79 static struct dbs_tuners dbs_tuners_ins
= {
80 .up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
,
81 .sampling_down_factor
= DEF_SAMPLING_DOWN_FACTOR
,
84 static inline unsigned int get_cpu_idle_time(unsigned int cpu
)
86 return kstat_cpu(cpu
).cpustat
.idle
+
87 kstat_cpu(cpu
).cpustat
.iowait
+
88 ( !dbs_tuners_ins
.ignore_nice
?
89 kstat_cpu(cpu
).cpustat
.nice
:
93 /************************** sysfs interface ************************/
94 static ssize_t
show_sampling_rate_max(struct cpufreq_policy
*policy
, char *buf
)
96 return sprintf (buf
, "%u\n", MAX_SAMPLING_RATE
);
99 static ssize_t
show_sampling_rate_min(struct cpufreq_policy
*policy
, char *buf
)
101 return sprintf (buf
, "%u\n", MIN_SAMPLING_RATE
);
104 #define define_one_ro(_name) \
105 static struct freq_attr _name = \
106 __ATTR(_name, 0444, show_##_name, NULL)
108 define_one_ro(sampling_rate_max
);
109 define_one_ro(sampling_rate_min
);
111 /* cpufreq_ondemand Governor Tunables */
112 #define show_one(file_name, object) \
113 static ssize_t show_##file_name \
114 (struct cpufreq_policy *unused, char *buf) \
116 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
118 show_one(sampling_rate
, sampling_rate
);
119 show_one(sampling_down_factor
, sampling_down_factor
);
120 show_one(up_threshold
, up_threshold
);
121 show_one(ignore_nice
, ignore_nice
);
123 static ssize_t
store_sampling_down_factor(struct cpufreq_policy
*unused
,
124 const char *buf
, size_t count
)
128 ret
= sscanf (buf
, "%u", &input
);
133 dbs_tuners_ins
.sampling_down_factor
= input
;
139 static ssize_t
store_sampling_rate(struct cpufreq_policy
*unused
,
140 const char *buf
, size_t count
)
144 ret
= sscanf (buf
, "%u", &input
);
147 if (ret
!= 1 || input
> MAX_SAMPLING_RATE
|| input
< MIN_SAMPLING_RATE
) {
152 dbs_tuners_ins
.sampling_rate
= input
;
158 static ssize_t
store_up_threshold(struct cpufreq_policy
*unused
,
159 const char *buf
, size_t count
)
163 ret
= sscanf (buf
, "%u", &input
);
166 if (ret
!= 1 || input
> MAX_FREQUENCY_UP_THRESHOLD
||
167 input
< MIN_FREQUENCY_UP_THRESHOLD
) {
172 dbs_tuners_ins
.up_threshold
= input
;
178 static ssize_t
store_ignore_nice(struct cpufreq_policy
*policy
,
179 const char *buf
, size_t count
)
186 ret
= sscanf (buf
, "%u", &input
);
194 if ( input
== dbs_tuners_ins
.ignore_nice
) { /* nothing to do */
198 dbs_tuners_ins
.ignore_nice
= input
;
200 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
201 for_each_online_cpu(j
) {
202 struct cpu_dbs_info_s
*j_dbs_info
;
203 j_dbs_info
= &per_cpu(cpu_dbs_info
, j
);
204 j_dbs_info
->prev_cpu_idle_up
= get_cpu_idle_time(j
);
205 j_dbs_info
->prev_cpu_idle_down
= j_dbs_info
->prev_cpu_idle_up
;
212 #define define_one_rw(_name) \
213 static struct freq_attr _name = \
214 __ATTR(_name, 0644, show_##_name, store_##_name)
216 define_one_rw(sampling_rate
);
217 define_one_rw(sampling_down_factor
);
218 define_one_rw(up_threshold
);
219 define_one_rw(ignore_nice
);
221 static struct attribute
* dbs_attributes
[] = {
222 &sampling_rate_max
.attr
,
223 &sampling_rate_min
.attr
,
225 &sampling_down_factor
.attr
,
231 static struct attribute_group dbs_attr_group
= {
232 .attrs
= dbs_attributes
,
236 /************************** sysfs end ************************/
238 static void dbs_check_cpu(int cpu
)
240 unsigned int idle_ticks
, up_idle_ticks
, total_ticks
;
241 unsigned int freq_next
;
242 unsigned int freq_down_sampling_rate
;
243 static int down_skip
[NR_CPUS
];
244 struct cpu_dbs_info_s
*this_dbs_info
;
246 struct cpufreq_policy
*policy
;
249 this_dbs_info
= &per_cpu(cpu_dbs_info
, cpu
);
250 if (!this_dbs_info
->enable
)
253 policy
= this_dbs_info
->cur_policy
;
255 * Every sampling_rate, we check, if current idle time is less
256 * than 20% (default), then we try to increase frequency
257 * Every sampling_rate*sampling_down_factor, we look for a the lowest
258 * frequency which can sustain the load while keeping idle time over
259 * 30%. If such a frequency exist, we try to decrease to this frequency.
261 * Any frequency increase takes it to the maximum frequency.
262 * Frequency reduction happens at minimum steps of
263 * 5% (default) of current frequency
266 /* Check for frequency increase */
267 idle_ticks
= UINT_MAX
;
268 for_each_cpu_mask(j
, policy
->cpus
) {
269 unsigned int tmp_idle_ticks
, total_idle_ticks
;
270 struct cpu_dbs_info_s
*j_dbs_info
;
272 j_dbs_info
= &per_cpu(cpu_dbs_info
, j
);
273 total_idle_ticks
= get_cpu_idle_time(j
);
274 tmp_idle_ticks
= total_idle_ticks
-
275 j_dbs_info
->prev_cpu_idle_up
;
276 j_dbs_info
->prev_cpu_idle_up
= total_idle_ticks
;
278 if (tmp_idle_ticks
< idle_ticks
)
279 idle_ticks
= tmp_idle_ticks
;
282 /* Scale idle ticks by 100 and compare with up and down ticks */
284 up_idle_ticks
= (100 - dbs_tuners_ins
.up_threshold
) *
285 usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
);
287 if (idle_ticks
< up_idle_ticks
) {
289 for_each_cpu_mask(j
, policy
->cpus
) {
290 struct cpu_dbs_info_s
*j_dbs_info
;
292 j_dbs_info
= &per_cpu(cpu_dbs_info
, j
);
293 j_dbs_info
->prev_cpu_idle_down
=
294 j_dbs_info
->prev_cpu_idle_up
;
296 /* if we are already at full speed then break out early */
297 if (policy
->cur
== policy
->max
)
300 __cpufreq_driver_target(policy
, policy
->max
,
305 /* Check for frequency decrease */
307 if (down_skip
[cpu
] < dbs_tuners_ins
.sampling_down_factor
)
310 idle_ticks
= UINT_MAX
;
311 for_each_cpu_mask(j
, policy
->cpus
) {
312 unsigned int tmp_idle_ticks
, total_idle_ticks
;
313 struct cpu_dbs_info_s
*j_dbs_info
;
315 j_dbs_info
= &per_cpu(cpu_dbs_info
, j
);
316 /* Check for frequency decrease */
317 total_idle_ticks
= j_dbs_info
->prev_cpu_idle_up
;
318 tmp_idle_ticks
= total_idle_ticks
-
319 j_dbs_info
->prev_cpu_idle_down
;
320 j_dbs_info
->prev_cpu_idle_down
= total_idle_ticks
;
322 if (tmp_idle_ticks
< idle_ticks
)
323 idle_ticks
= tmp_idle_ticks
;
327 /* if we cannot reduce the frequency anymore, break out early */
328 if (policy
->cur
== policy
->min
)
331 /* Compute how many ticks there are between two measurements */
332 freq_down_sampling_rate
= dbs_tuners_ins
.sampling_rate
*
333 dbs_tuners_ins
.sampling_down_factor
;
334 total_ticks
= usecs_to_jiffies(freq_down_sampling_rate
);
337 * The optimal frequency is the frequency that is the lowest that
338 * can support the current CPU usage without triggering the up
339 * policy. To be safe, we focus 10 points under the threshold.
341 freq_next
= ((total_ticks
- idle_ticks
) * 100) / total_ticks
;
342 freq_next
= (freq_next
* policy
->cur
) /
343 (dbs_tuners_ins
.up_threshold
- 10);
345 if (freq_next
<= ((policy
->cur
* 95) / 100))
346 __cpufreq_driver_target(policy
, freq_next
, CPUFREQ_RELATION_L
);
349 static void do_dbs_timer(void *data
)
353 for_each_online_cpu(i
)
355 schedule_delayed_work(&dbs_work
,
356 usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
));
360 static inline void dbs_timer_init(void)
362 INIT_WORK(&dbs_work
, do_dbs_timer
, NULL
);
363 schedule_delayed_work(&dbs_work
,
364 usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
));
368 static inline void dbs_timer_exit(void)
370 cancel_delayed_work(&dbs_work
);
374 static int cpufreq_governor_dbs(struct cpufreq_policy
*policy
,
377 unsigned int cpu
= policy
->cpu
;
378 struct cpu_dbs_info_s
*this_dbs_info
;
381 this_dbs_info
= &per_cpu(cpu_dbs_info
, cpu
);
384 case CPUFREQ_GOV_START
:
385 if ((!cpu_online(cpu
)) ||
389 if (policy
->cpuinfo
.transition_latency
>
390 (TRANSITION_LATENCY_LIMIT
* 1000))
392 if (this_dbs_info
->enable
) /* Already enabled */
396 for_each_cpu_mask(j
, policy
->cpus
) {
397 struct cpu_dbs_info_s
*j_dbs_info
;
398 j_dbs_info
= &per_cpu(cpu_dbs_info
, j
);
399 j_dbs_info
->cur_policy
= policy
;
401 j_dbs_info
->prev_cpu_idle_up
= get_cpu_idle_time(j
);
402 j_dbs_info
->prev_cpu_idle_down
403 = j_dbs_info
->prev_cpu_idle_up
;
405 this_dbs_info
->enable
= 1;
406 sysfs_create_group(&policy
->kobj
, &dbs_attr_group
);
409 * Start the timerschedule work, when this governor
410 * is used for first time
412 if (dbs_enable
== 1) {
413 unsigned int latency
;
414 /* policy latency is in nS. Convert it to uS first */
416 latency
= policy
->cpuinfo
.transition_latency
;
420 def_sampling_rate
= (latency
/ 1000) *
421 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER
;
422 dbs_tuners_ins
.sampling_rate
= def_sampling_rate
;
423 dbs_tuners_ins
.ignore_nice
= 0;
431 case CPUFREQ_GOV_STOP
:
433 this_dbs_info
->enable
= 0;
434 sysfs_remove_group(&policy
->kobj
, &dbs_attr_group
);
437 * Stop the timerschedule work, when this governor
438 * is used for first time
447 case CPUFREQ_GOV_LIMITS
:
449 if (policy
->max
< this_dbs_info
->cur_policy
->cur
)
450 __cpufreq_driver_target(
451 this_dbs_info
->cur_policy
,
452 policy
->max
, CPUFREQ_RELATION_H
);
453 else if (policy
->min
> this_dbs_info
->cur_policy
->cur
)
454 __cpufreq_driver_target(
455 this_dbs_info
->cur_policy
,
456 policy
->min
, CPUFREQ_RELATION_L
);
463 static struct cpufreq_governor cpufreq_gov_dbs
= {
465 .governor
= cpufreq_governor_dbs
,
466 .owner
= THIS_MODULE
,
469 static int __init
cpufreq_gov_dbs_init(void)
471 return cpufreq_register_governor(&cpufreq_gov_dbs
);
474 static void __exit
cpufreq_gov_dbs_exit(void)
476 /* Make sure that the scheduled work is indeed not running */
477 flush_scheduled_work();
479 cpufreq_unregister_governor(&cpufreq_gov_dbs
);
483 MODULE_AUTHOR ("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
484 MODULE_DESCRIPTION ("'cpufreq_ondemand' - A dynamic cpufreq governor for "
485 "Low Latency Frequency Transition capable processors");
486 MODULE_LICENSE ("GPL");
488 module_init(cpufreq_gov_dbs_init
);
489 module_exit(cpufreq_gov_dbs_exit
);