2 * drivers/cpufreq/cpufreq_conservative.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
7 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/cpufreq.h>
15 #include <linux/init.h>
16 #include <linux/kernel.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/kobject.h>
19 #include <linux/module.h>
20 #include <linux/mutex.h>
21 #include <linux/notifier.h>
22 #include <linux/percpu-defs.h>
23 #include <linux/slab.h>
24 #include <linux/sysfs.h>
25 #include <linux/types.h>
27 #include "cpufreq_governor.h"
29 /* Conservative governor macros */
30 #define DEF_FREQUENCY_UP_THRESHOLD (80)
31 #define DEF_FREQUENCY_DOWN_THRESHOLD (20)
32 #define DEF_FREQUENCY_STEP (5)
33 #define DEF_SAMPLING_DOWN_FACTOR (1)
34 #define MAX_SAMPLING_DOWN_FACTOR (10)
36 static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s
, cs_cpu_dbs_info
);
38 static inline unsigned int get_freq_target(struct cs_dbs_tuners
*cs_tuners
,
39 struct cpufreq_policy
*policy
)
41 unsigned int freq_target
= (cs_tuners
->freq_step
* policy
->max
) / 100;
43 /* max freq cannot be less than 100. But who knows... */
44 if (unlikely(freq_target
== 0))
45 freq_target
= DEF_FREQUENCY_STEP
;
51 * Every sampling_rate, we check, if current idle time is less than 20%
52 * (default), then we try to increase frequency. Every sampling_rate *
53 * sampling_down_factor, we check, if current idle time is more than 80%
54 * (default), then we try to decrease frequency
56 * Any frequency increase takes it to the maximum frequency. Frequency reduction
57 * happens at minimum steps of 5% (default) of maximum frequency
59 static void cs_check_cpu(int cpu
, unsigned int load
)
61 struct cs_cpu_dbs_info_s
*dbs_info
= &per_cpu(cs_cpu_dbs_info
, cpu
);
62 struct cpufreq_policy
*policy
= dbs_info
->cdbs
.cur_policy
;
63 struct dbs_data
*dbs_data
= policy
->governor_data
;
64 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
67 * break out if we 'cannot' reduce the speed as the user might
68 * want freq_step to be zero
70 if (cs_tuners
->freq_step
== 0)
73 /* Check for frequency increase */
74 if (load
> cs_tuners
->up_threshold
) {
75 dbs_info
->down_skip
= 0;
77 /* if we are already at full speed then break out early */
78 if (dbs_info
->requested_freq
== policy
->max
)
81 dbs_info
->requested_freq
+= get_freq_target(cs_tuners
, policy
);
82 if (dbs_info
->requested_freq
> policy
->max
)
83 dbs_info
->requested_freq
= policy
->max
;
85 __cpufreq_driver_target(policy
, dbs_info
->requested_freq
,
90 /* if sampling_down_factor is active break out early */
91 if (++dbs_info
->down_skip
< cs_tuners
->sampling_down_factor
)
93 dbs_info
->down_skip
= 0;
95 /* Check for frequency decrease */
96 if (load
< cs_tuners
->down_threshold
) {
98 * if we cannot reduce the frequency anymore, break out early
100 if (policy
->cur
== policy
->min
)
103 dbs_info
->requested_freq
-= get_freq_target(cs_tuners
, policy
);
104 if (dbs_info
->requested_freq
< policy
->min
)
105 dbs_info
->requested_freq
= policy
->min
;
107 __cpufreq_driver_target(policy
, dbs_info
->requested_freq
,
113 static void cs_dbs_timer(struct work_struct
*work
)
115 struct cs_cpu_dbs_info_s
*dbs_info
= container_of(work
,
116 struct cs_cpu_dbs_info_s
, cdbs
.work
.work
);
117 unsigned int cpu
= dbs_info
->cdbs
.cur_policy
->cpu
;
118 struct cs_cpu_dbs_info_s
*core_dbs_info
= &per_cpu(cs_cpu_dbs_info
,
120 struct dbs_data
*dbs_data
= dbs_info
->cdbs
.cur_policy
->governor_data
;
121 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
122 int delay
= delay_for_sampling_rate(cs_tuners
->sampling_rate
);
123 bool modify_all
= true;
125 mutex_lock(&core_dbs_info
->cdbs
.timer_mutex
);
126 if (!need_load_eval(&core_dbs_info
->cdbs
, cs_tuners
->sampling_rate
))
129 dbs_check_cpu(dbs_data
, cpu
);
131 gov_queue_work(dbs_data
, dbs_info
->cdbs
.cur_policy
, delay
, modify_all
);
132 mutex_unlock(&core_dbs_info
->cdbs
.timer_mutex
);
135 static int dbs_cpufreq_notifier(struct notifier_block
*nb
, unsigned long val
,
138 struct cpufreq_freqs
*freq
= data
;
139 struct cs_cpu_dbs_info_s
*dbs_info
=
140 &per_cpu(cs_cpu_dbs_info
, freq
->cpu
);
141 struct cpufreq_policy
*policy
;
143 if (!dbs_info
->enable
)
146 policy
= dbs_info
->cdbs
.cur_policy
;
149 * we only care if our internally tracked freq moves outside the 'valid'
150 * ranges of frequency available to us otherwise we do not change it
152 if (dbs_info
->requested_freq
> policy
->max
153 || dbs_info
->requested_freq
< policy
->min
)
154 dbs_info
->requested_freq
= freq
->new;
159 /************************** sysfs interface ************************/
160 static struct common_dbs_data cs_dbs_cdata
;
162 static ssize_t
store_sampling_down_factor(struct dbs_data
*dbs_data
,
163 const char *buf
, size_t count
)
165 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
168 ret
= sscanf(buf
, "%u", &input
);
170 if (ret
!= 1 || input
> MAX_SAMPLING_DOWN_FACTOR
|| input
< 1)
173 cs_tuners
->sampling_down_factor
= input
;
177 static ssize_t
store_sampling_rate(struct dbs_data
*dbs_data
, const char *buf
,
180 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
183 ret
= sscanf(buf
, "%u", &input
);
188 cs_tuners
->sampling_rate
= max(input
, dbs_data
->min_sampling_rate
);
192 static ssize_t
store_up_threshold(struct dbs_data
*dbs_data
, const char *buf
,
195 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
198 ret
= sscanf(buf
, "%u", &input
);
200 if (ret
!= 1 || input
> 100 || input
<= cs_tuners
->down_threshold
)
203 cs_tuners
->up_threshold
= input
;
207 static ssize_t
store_down_threshold(struct dbs_data
*dbs_data
, const char *buf
,
210 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
213 ret
= sscanf(buf
, "%u", &input
);
215 /* cannot be lower than 11 otherwise freq will not fall */
216 if (ret
!= 1 || input
< 11 || input
> 100 ||
217 input
>= cs_tuners
->up_threshold
)
220 cs_tuners
->down_threshold
= input
;
224 static ssize_t
store_ignore_nice(struct dbs_data
*dbs_data
, const char *buf
,
227 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
228 unsigned int input
, j
;
231 ret
= sscanf(buf
, "%u", &input
);
238 if (input
== cs_tuners
->ignore_nice
) /* nothing to do */
241 cs_tuners
->ignore_nice
= input
;
243 /* we need to re-evaluate prev_cpu_idle */
244 for_each_online_cpu(j
) {
245 struct cs_cpu_dbs_info_s
*dbs_info
;
246 dbs_info
= &per_cpu(cs_cpu_dbs_info
, j
);
247 dbs_info
->cdbs
.prev_cpu_idle
= get_cpu_idle_time(j
,
248 &dbs_info
->cdbs
.prev_cpu_wall
, 0);
249 if (cs_tuners
->ignore_nice
)
250 dbs_info
->cdbs
.prev_cpu_nice
=
251 kcpustat_cpu(j
).cpustat
[CPUTIME_NICE
];
256 static ssize_t
store_freq_step(struct dbs_data
*dbs_data
, const char *buf
,
259 struct cs_dbs_tuners
*cs_tuners
= dbs_data
->tuners
;
262 ret
= sscanf(buf
, "%u", &input
);
271 * no need to test here if freq_step is zero as the user might actually
272 * want this, they would be crazy though :)
274 cs_tuners
->freq_step
= input
;
278 show_store_one(cs
, sampling_rate
);
279 show_store_one(cs
, sampling_down_factor
);
280 show_store_one(cs
, up_threshold
);
281 show_store_one(cs
, down_threshold
);
282 show_store_one(cs
, ignore_nice
);
283 show_store_one(cs
, freq_step
);
284 declare_show_sampling_rate_min(cs
);
286 gov_sys_pol_attr_rw(sampling_rate
);
287 gov_sys_pol_attr_rw(sampling_down_factor
);
288 gov_sys_pol_attr_rw(up_threshold
);
289 gov_sys_pol_attr_rw(down_threshold
);
290 gov_sys_pol_attr_rw(ignore_nice
);
291 gov_sys_pol_attr_rw(freq_step
);
292 gov_sys_pol_attr_ro(sampling_rate_min
);
294 static struct attribute
*dbs_attributes_gov_sys
[] = {
295 &sampling_rate_min_gov_sys
.attr
,
296 &sampling_rate_gov_sys
.attr
,
297 &sampling_down_factor_gov_sys
.attr
,
298 &up_threshold_gov_sys
.attr
,
299 &down_threshold_gov_sys
.attr
,
300 &ignore_nice_gov_sys
.attr
,
301 &freq_step_gov_sys
.attr
,
305 static struct attribute_group cs_attr_group_gov_sys
= {
306 .attrs
= dbs_attributes_gov_sys
,
307 .name
= "conservative",
310 static struct attribute
*dbs_attributes_gov_pol
[] = {
311 &sampling_rate_min_gov_pol
.attr
,
312 &sampling_rate_gov_pol
.attr
,
313 &sampling_down_factor_gov_pol
.attr
,
314 &up_threshold_gov_pol
.attr
,
315 &down_threshold_gov_pol
.attr
,
316 &ignore_nice_gov_pol
.attr
,
317 &freq_step_gov_pol
.attr
,
321 static struct attribute_group cs_attr_group_gov_pol
= {
322 .attrs
= dbs_attributes_gov_pol
,
323 .name
= "conservative",
326 /************************** sysfs end ************************/
328 static int cs_init(struct dbs_data
*dbs_data
)
330 struct cs_dbs_tuners
*tuners
;
332 tuners
= kzalloc(sizeof(struct cs_dbs_tuners
), GFP_KERNEL
);
334 pr_err("%s: kzalloc failed\n", __func__
);
338 tuners
->up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
;
339 tuners
->down_threshold
= DEF_FREQUENCY_DOWN_THRESHOLD
;
340 tuners
->sampling_down_factor
= DEF_SAMPLING_DOWN_FACTOR
;
341 tuners
->ignore_nice
= 0;
342 tuners
->freq_step
= DEF_FREQUENCY_STEP
;
344 dbs_data
->tuners
= tuners
;
345 dbs_data
->min_sampling_rate
= MIN_SAMPLING_RATE_RATIO
*
346 jiffies_to_usecs(10);
347 mutex_init(&dbs_data
->mutex
);
351 static void cs_exit(struct dbs_data
*dbs_data
)
353 kfree(dbs_data
->tuners
);
356 define_get_cpu_dbs_routines(cs_cpu_dbs_info
);
358 static struct notifier_block cs_cpufreq_notifier_block
= {
359 .notifier_call
= dbs_cpufreq_notifier
,
362 static struct cs_ops cs_ops
= {
363 .notifier_block
= &cs_cpufreq_notifier_block
,
366 static struct common_dbs_data cs_dbs_cdata
= {
367 .governor
= GOV_CONSERVATIVE
,
368 .attr_group_gov_sys
= &cs_attr_group_gov_sys
,
369 .attr_group_gov_pol
= &cs_attr_group_gov_pol
,
370 .get_cpu_cdbs
= get_cpu_cdbs
,
371 .get_cpu_dbs_info_s
= get_cpu_dbs_info_s
,
372 .gov_dbs_timer
= cs_dbs_timer
,
373 .gov_check_cpu
= cs_check_cpu
,
379 static int cs_cpufreq_governor_dbs(struct cpufreq_policy
*policy
,
382 return cpufreq_governor_dbs(policy
, &cs_dbs_cdata
, event
);
385 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
388 struct cpufreq_governor cpufreq_gov_conservative
= {
389 .name
= "conservative",
390 .governor
= cs_cpufreq_governor_dbs
,
391 .max_transition_latency
= TRANSITION_LATENCY_LIMIT
,
392 .owner
= THIS_MODULE
,
395 static int __init
cpufreq_gov_dbs_init(void)
397 return cpufreq_register_governor(&cpufreq_gov_conservative
);
400 static void __exit
cpufreq_gov_dbs_exit(void)
402 cpufreq_unregister_governor(&cpufreq_gov_conservative
);
405 MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
406 MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
407 "Low Latency Frequency Transition capable processors "
408 "optimised for use in a battery environment");
409 MODULE_LICENSE("GPL");
411 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
412 fs_initcall(cpufreq_gov_dbs_init
);
414 module_init(cpufreq_gov_dbs_init
);
416 module_exit(cpufreq_gov_dbs_exit
);