2 * drivers/cpufreq/cpufreq_ondemand.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/cpufreq.h>
17 #include <linux/cpu.h>
18 #include <linux/jiffies.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/mutex.h>
21 #include <linux/hrtimer.h>
22 #include <linux/tick.h>
23 #include <linux/ktime.h>
24 #include <linux/sched.h>
27 * dbs is used in this file as a shortform for demandbased switching
28 * It helps to keep variable names smaller, simpler
31 #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
32 #define DEF_FREQUENCY_UP_THRESHOLD (80)
33 #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
34 #define MICRO_FREQUENCY_UP_THRESHOLD (95)
35 #define MIN_FREQUENCY_UP_THRESHOLD (11)
36 #define MAX_FREQUENCY_UP_THRESHOLD (100)
39 * The polling frequency of this governor depends on the capability of
40 * the processor. Default polling frequency is 1000 times the transition
41 * latency of the processor. The governor will work on any processor with
42 * transition latency <= 10mS, using appropriate sampling
44 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
45 * this governor will not work.
46 * All times here are in uS.
48 static unsigned int def_sampling_rate
;
49 #define MIN_SAMPLING_RATE_RATIO (2)
50 /* for correct statistics, we need at least 10 ticks between each measure */
51 #define MIN_STAT_SAMPLING_RATE \
52 (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
53 #define MIN_SAMPLING_RATE \
54 (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
55 /* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon
56 * Define the minimal settable sampling rate to the greater of:
57 * - "HW transition latency" * 100 (same as default sampling / 10)
58 * - MIN_STAT_SAMPLING_RATE
59 * To avoid that userspace shoots itself.
61 static unsigned int minimum_sampling_rate(void)
63 return max(def_sampling_rate
/ 10, MIN_STAT_SAMPLING_RATE
);
66 /* This will also vanish soon with removing sampling_rate_max */
67 #define MAX_SAMPLING_RATE (500 * def_sampling_rate)
68 #define LATENCY_MULTIPLIER (1000)
69 #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
71 static void do_dbs_timer(struct work_struct
*work
);
74 enum {DBS_NORMAL_SAMPLE
, DBS_SUB_SAMPLE
};
76 struct cpu_dbs_info_s
{
77 cputime64_t prev_cpu_idle
;
78 cputime64_t prev_cpu_wall
;
79 cputime64_t prev_cpu_nice
;
80 struct cpufreq_policy
*cur_policy
;
81 struct delayed_work work
;
82 struct cpufreq_frequency_table
*freq_table
;
84 unsigned int freq_lo_jiffies
;
85 unsigned int freq_hi_jiffies
;
87 unsigned int enable
:1,
90 static DEFINE_PER_CPU(struct cpu_dbs_info_s
, cpu_dbs_info
);
92 static unsigned int dbs_enable
; /* number of CPUs using this policy */
95 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
96 * lock and dbs_mutex. cpu_hotplug lock should always be held before
97 * dbs_mutex. If any function that can potentially take cpu_hotplug lock
98 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
99 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
100 * is recursive for the same process. -Venki
101 * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
102 * would deadlock with cancel_delayed_work_sync(), which is needed for proper
103 * raceless workqueue teardown.
105 static DEFINE_MUTEX(dbs_mutex
);
107 static struct workqueue_struct
*kondemand_wq
;
109 static struct dbs_tuners
{
110 unsigned int sampling_rate
;
111 unsigned int up_threshold
;
112 unsigned int down_differential
;
113 unsigned int ignore_nice
;
114 unsigned int powersave_bias
;
116 .up_threshold
= DEF_FREQUENCY_UP_THRESHOLD
,
117 .down_differential
= DEF_FREQUENCY_DOWN_DIFFERENTIAL
,
122 static inline cputime64_t
get_cpu_idle_time_jiffy(unsigned int cpu
,
125 cputime64_t idle_time
;
126 cputime64_t cur_wall_time
;
127 cputime64_t busy_time
;
129 cur_wall_time
= jiffies64_to_cputime64(get_jiffies_64());
130 busy_time
= cputime64_add(kstat_cpu(cpu
).cpustat
.user
,
131 kstat_cpu(cpu
).cpustat
.system
);
133 busy_time
= cputime64_add(busy_time
, kstat_cpu(cpu
).cpustat
.irq
);
134 busy_time
= cputime64_add(busy_time
, kstat_cpu(cpu
).cpustat
.softirq
);
135 busy_time
= cputime64_add(busy_time
, kstat_cpu(cpu
).cpustat
.steal
);
136 busy_time
= cputime64_add(busy_time
, kstat_cpu(cpu
).cpustat
.nice
);
138 idle_time
= cputime64_sub(cur_wall_time
, busy_time
);
140 *wall
= cur_wall_time
;
145 static inline cputime64_t
get_cpu_idle_time(unsigned int cpu
, cputime64_t
*wall
)
147 u64 idle_time
= get_cpu_idle_time_us(cpu
, wall
);
149 if (idle_time
== -1ULL)
150 return get_cpu_idle_time_jiffy(cpu
, wall
);
156 * Find right freq to be set now with powersave_bias on.
157 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
158 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
160 static unsigned int powersave_bias_target(struct cpufreq_policy
*policy
,
161 unsigned int freq_next
,
162 unsigned int relation
)
164 unsigned int freq_req
, freq_reduc
, freq_avg
;
165 unsigned int freq_hi
, freq_lo
;
166 unsigned int index
= 0;
167 unsigned int jiffies_total
, jiffies_hi
, jiffies_lo
;
168 struct cpu_dbs_info_s
*dbs_info
= &per_cpu(cpu_dbs_info
, policy
->cpu
);
170 if (!dbs_info
->freq_table
) {
171 dbs_info
->freq_lo
= 0;
172 dbs_info
->freq_lo_jiffies
= 0;
176 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_next
,
178 freq_req
= dbs_info
->freq_table
[index
].frequency
;
179 freq_reduc
= freq_req
* dbs_tuners_ins
.powersave_bias
/ 1000;
180 freq_avg
= freq_req
- freq_reduc
;
182 /* Find freq bounds for freq_avg in freq_table */
184 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_avg
,
185 CPUFREQ_RELATION_H
, &index
);
186 freq_lo
= dbs_info
->freq_table
[index
].frequency
;
188 cpufreq_frequency_table_target(policy
, dbs_info
->freq_table
, freq_avg
,
189 CPUFREQ_RELATION_L
, &index
);
190 freq_hi
= dbs_info
->freq_table
[index
].frequency
;
192 /* Find out how long we have to be in hi and lo freqs */
193 if (freq_hi
== freq_lo
) {
194 dbs_info
->freq_lo
= 0;
195 dbs_info
->freq_lo_jiffies
= 0;
198 jiffies_total
= usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
);
199 jiffies_hi
= (freq_avg
- freq_lo
) * jiffies_total
;
200 jiffies_hi
+= ((freq_hi
- freq_lo
) / 2);
201 jiffies_hi
/= (freq_hi
- freq_lo
);
202 jiffies_lo
= jiffies_total
- jiffies_hi
;
203 dbs_info
->freq_lo
= freq_lo
;
204 dbs_info
->freq_lo_jiffies
= jiffies_lo
;
205 dbs_info
->freq_hi_jiffies
= jiffies_hi
;
209 static void ondemand_powersave_bias_init(void)
212 for_each_online_cpu(i
) {
213 struct cpu_dbs_info_s
*dbs_info
= &per_cpu(cpu_dbs_info
, i
);
214 dbs_info
->freq_table
= cpufreq_frequency_get_table(i
);
215 dbs_info
->freq_lo
= 0;
219 /************************** sysfs interface ************************/
220 static ssize_t
show_sampling_rate_max(struct cpufreq_policy
*policy
, char *buf
)
222 static int print_once
;
225 printk(KERN_INFO
"CPUFREQ: ondemand sampling_rate_max "
226 "sysfs file is deprecated - used by: %s\n",
230 return sprintf(buf
, "%u\n", MAX_SAMPLING_RATE
);
233 static ssize_t
show_sampling_rate_min(struct cpufreq_policy
*policy
, char *buf
)
235 static int print_once
;
238 printk(KERN_INFO
"CPUFREQ: ondemand sampling_rate_min "
239 "sysfs file is deprecated - used by: %s\n",
243 return sprintf(buf
, "%u\n", MIN_SAMPLING_RATE
);
246 #define define_one_ro(_name) \
247 static struct freq_attr _name = \
248 __ATTR(_name, 0444, show_##_name, NULL)
250 define_one_ro(sampling_rate_max
);
251 define_one_ro(sampling_rate_min
);
253 /* cpufreq_ondemand Governor Tunables */
254 #define show_one(file_name, object) \
255 static ssize_t show_##file_name \
256 (struct cpufreq_policy *unused, char *buf) \
258 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
260 show_one(sampling_rate
, sampling_rate
);
261 show_one(up_threshold
, up_threshold
);
262 show_one(ignore_nice_load
, ignore_nice
);
263 show_one(powersave_bias
, powersave_bias
);
265 static ssize_t
store_sampling_rate(struct cpufreq_policy
*unused
,
266 const char *buf
, size_t count
)
270 ret
= sscanf(buf
, "%u", &input
);
272 mutex_lock(&dbs_mutex
);
274 mutex_unlock(&dbs_mutex
);
277 dbs_tuners_ins
.sampling_rate
= max(input
, minimum_sampling_rate());
278 mutex_unlock(&dbs_mutex
);
283 static ssize_t
store_up_threshold(struct cpufreq_policy
*unused
,
284 const char *buf
, size_t count
)
288 ret
= sscanf(buf
, "%u", &input
);
290 mutex_lock(&dbs_mutex
);
291 if (ret
!= 1 || input
> MAX_FREQUENCY_UP_THRESHOLD
||
292 input
< MIN_FREQUENCY_UP_THRESHOLD
) {
293 mutex_unlock(&dbs_mutex
);
297 dbs_tuners_ins
.up_threshold
= input
;
298 mutex_unlock(&dbs_mutex
);
303 static ssize_t
store_ignore_nice_load(struct cpufreq_policy
*policy
,
304 const char *buf
, size_t count
)
311 ret
= sscanf(buf
, "%u", &input
);
318 mutex_lock(&dbs_mutex
);
319 if (input
== dbs_tuners_ins
.ignore_nice
) { /* nothing to do */
320 mutex_unlock(&dbs_mutex
);
323 dbs_tuners_ins
.ignore_nice
= input
;
325 /* we need to re-evaluate prev_cpu_idle */
326 for_each_online_cpu(j
) {
327 struct cpu_dbs_info_s
*dbs_info
;
328 dbs_info
= &per_cpu(cpu_dbs_info
, j
);
329 dbs_info
->prev_cpu_idle
= get_cpu_idle_time(j
,
330 &dbs_info
->prev_cpu_wall
);
331 if (dbs_tuners_ins
.ignore_nice
)
332 dbs_info
->prev_cpu_nice
= kstat_cpu(j
).cpustat
.nice
;
335 mutex_unlock(&dbs_mutex
);
340 static ssize_t
store_powersave_bias(struct cpufreq_policy
*unused
,
341 const char *buf
, size_t count
)
345 ret
= sscanf(buf
, "%u", &input
);
353 mutex_lock(&dbs_mutex
);
354 dbs_tuners_ins
.powersave_bias
= input
;
355 ondemand_powersave_bias_init();
356 mutex_unlock(&dbs_mutex
);
361 #define define_one_rw(_name) \
362 static struct freq_attr _name = \
363 __ATTR(_name, 0644, show_##_name, store_##_name)
365 define_one_rw(sampling_rate
);
366 define_one_rw(up_threshold
);
367 define_one_rw(ignore_nice_load
);
368 define_one_rw(powersave_bias
);
370 static struct attribute
*dbs_attributes
[] = {
371 &sampling_rate_max
.attr
,
372 &sampling_rate_min
.attr
,
375 &ignore_nice_load
.attr
,
376 &powersave_bias
.attr
,
380 static struct attribute_group dbs_attr_group
= {
381 .attrs
= dbs_attributes
,
385 /************************** sysfs end ************************/
387 static void dbs_check_cpu(struct cpu_dbs_info_s
*this_dbs_info
)
389 unsigned int max_load_freq
;
391 struct cpufreq_policy
*policy
;
394 if (!this_dbs_info
->enable
)
397 this_dbs_info
->freq_lo
= 0;
398 policy
= this_dbs_info
->cur_policy
;
401 * Every sampling_rate, we check, if current idle time is less
402 * than 20% (default), then we try to increase frequency
403 * Every sampling_rate, we look for a the lowest
404 * frequency which can sustain the load while keeping idle time over
405 * 30%. If such a frequency exist, we try to decrease to this frequency.
407 * Any frequency increase takes it to the maximum frequency.
408 * Frequency reduction happens at minimum steps of
409 * 5% (default) of current frequency
412 /* Get Absolute Load - in terms of freq */
415 for_each_cpu(j
, policy
->cpus
) {
416 struct cpu_dbs_info_s
*j_dbs_info
;
417 cputime64_t cur_wall_time
, cur_idle_time
;
418 unsigned int idle_time
, wall_time
;
419 unsigned int load
, load_freq
;
422 j_dbs_info
= &per_cpu(cpu_dbs_info
, j
);
424 cur_idle_time
= get_cpu_idle_time(j
, &cur_wall_time
);
426 wall_time
= (unsigned int) cputime64_sub(cur_wall_time
,
427 j_dbs_info
->prev_cpu_wall
);
428 j_dbs_info
->prev_cpu_wall
= cur_wall_time
;
430 idle_time
= (unsigned int) cputime64_sub(cur_idle_time
,
431 j_dbs_info
->prev_cpu_idle
);
432 j_dbs_info
->prev_cpu_idle
= cur_idle_time
;
434 if (dbs_tuners_ins
.ignore_nice
) {
435 cputime64_t cur_nice
;
436 unsigned long cur_nice_jiffies
;
438 cur_nice
= cputime64_sub(kstat_cpu(j
).cpustat
.nice
,
439 j_dbs_info
->prev_cpu_nice
);
441 * Assumption: nice time between sampling periods will
442 * be less than 2^32 jiffies for 32 bit sys
444 cur_nice_jiffies
= (unsigned long)
445 cputime64_to_jiffies64(cur_nice
);
447 j_dbs_info
->prev_cpu_nice
= kstat_cpu(j
).cpustat
.nice
;
448 idle_time
+= jiffies_to_usecs(cur_nice_jiffies
);
451 if (unlikely(!wall_time
|| wall_time
< idle_time
))
454 load
= 100 * (wall_time
- idle_time
) / wall_time
;
456 freq_avg
= __cpufreq_driver_getavg(policy
, j
);
458 freq_avg
= policy
->cur
;
460 load_freq
= load
* freq_avg
;
461 if (load_freq
> max_load_freq
)
462 max_load_freq
= load_freq
;
465 /* Check for frequency increase */
466 if (max_load_freq
> dbs_tuners_ins
.up_threshold
* policy
->cur
) {
467 /* if we are already at full speed then break out early */
468 if (!dbs_tuners_ins
.powersave_bias
) {
469 if (policy
->cur
== policy
->max
)
472 __cpufreq_driver_target(policy
, policy
->max
,
475 int freq
= powersave_bias_target(policy
, policy
->max
,
477 __cpufreq_driver_target(policy
, freq
,
483 /* Check for frequency decrease */
484 /* if we cannot reduce the frequency anymore, break out early */
485 if (policy
->cur
== policy
->min
)
489 * The optimal frequency is the frequency that is the lowest that
490 * can support the current CPU usage without triggering the up
491 * policy. To be safe, we focus 10 points under the threshold.
494 (dbs_tuners_ins
.up_threshold
- dbs_tuners_ins
.down_differential
) *
496 unsigned int freq_next
;
497 freq_next
= max_load_freq
/
498 (dbs_tuners_ins
.up_threshold
-
499 dbs_tuners_ins
.down_differential
);
501 if (!dbs_tuners_ins
.powersave_bias
) {
502 __cpufreq_driver_target(policy
, freq_next
,
505 int freq
= powersave_bias_target(policy
, freq_next
,
507 __cpufreq_driver_target(policy
, freq
,
513 static void do_dbs_timer(struct work_struct
*work
)
515 struct cpu_dbs_info_s
*dbs_info
=
516 container_of(work
, struct cpu_dbs_info_s
, work
.work
);
517 unsigned int cpu
= dbs_info
->cpu
;
518 int sample_type
= dbs_info
->sample_type
;
520 /* We want all CPUs to do sampling nearly on same jiffy */
521 int delay
= usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
);
523 delay
-= jiffies
% delay
;
525 if (lock_policy_rwsem_write(cpu
) < 0)
528 if (!dbs_info
->enable
) {
529 unlock_policy_rwsem_write(cpu
);
533 /* Common NORMAL_SAMPLE setup */
534 dbs_info
->sample_type
= DBS_NORMAL_SAMPLE
;
535 if (!dbs_tuners_ins
.powersave_bias
||
536 sample_type
== DBS_NORMAL_SAMPLE
) {
537 dbs_check_cpu(dbs_info
);
538 if (dbs_info
->freq_lo
) {
539 /* Setup timer for SUB_SAMPLE */
540 dbs_info
->sample_type
= DBS_SUB_SAMPLE
;
541 delay
= dbs_info
->freq_hi_jiffies
;
544 __cpufreq_driver_target(dbs_info
->cur_policy
,
545 dbs_info
->freq_lo
, CPUFREQ_RELATION_H
);
547 queue_delayed_work_on(cpu
, kondemand_wq
, &dbs_info
->work
, delay
);
548 unlock_policy_rwsem_write(cpu
);
551 static inline void dbs_timer_init(struct cpu_dbs_info_s
*dbs_info
)
553 /* We want all CPUs to do sampling nearly on same jiffy */
554 int delay
= usecs_to_jiffies(dbs_tuners_ins
.sampling_rate
);
555 delay
-= jiffies
% delay
;
557 dbs_info
->enable
= 1;
558 ondemand_powersave_bias_init();
559 dbs_info
->sample_type
= DBS_NORMAL_SAMPLE
;
560 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info
->work
, do_dbs_timer
);
561 queue_delayed_work_on(dbs_info
->cpu
, kondemand_wq
, &dbs_info
->work
,
565 static inline void dbs_timer_exit(struct cpu_dbs_info_s
*dbs_info
)
567 dbs_info
->enable
= 0;
568 cancel_delayed_work_sync(&dbs_info
->work
);
571 static int cpufreq_governor_dbs(struct cpufreq_policy
*policy
,
574 unsigned int cpu
= policy
->cpu
;
575 struct cpu_dbs_info_s
*this_dbs_info
;
579 this_dbs_info
= &per_cpu(cpu_dbs_info
, cpu
);
582 case CPUFREQ_GOV_START
:
583 if ((!cpu_online(cpu
)) || (!policy
->cur
))
586 if (this_dbs_info
->enable
) /* Already enabled */
589 mutex_lock(&dbs_mutex
);
592 rc
= sysfs_create_group(&policy
->kobj
, &dbs_attr_group
);
595 mutex_unlock(&dbs_mutex
);
599 for_each_cpu(j
, policy
->cpus
) {
600 struct cpu_dbs_info_s
*j_dbs_info
;
601 j_dbs_info
= &per_cpu(cpu_dbs_info
, j
);
602 j_dbs_info
->cur_policy
= policy
;
604 j_dbs_info
->prev_cpu_idle
= get_cpu_idle_time(j
,
605 &j_dbs_info
->prev_cpu_wall
);
606 if (dbs_tuners_ins
.ignore_nice
) {
607 j_dbs_info
->prev_cpu_nice
=
608 kstat_cpu(j
).cpustat
.nice
;
611 this_dbs_info
->cpu
= cpu
;
613 * Start the timerschedule work, when this governor
614 * is used for first time
616 if (dbs_enable
== 1) {
617 unsigned int latency
;
618 /* policy latency is in nS. Convert it to uS first */
619 latency
= policy
->cpuinfo
.transition_latency
/ 1000;
624 max(latency
* LATENCY_MULTIPLIER
,
625 MIN_STAT_SAMPLING_RATE
);
627 dbs_tuners_ins
.sampling_rate
= def_sampling_rate
;
629 dbs_timer_init(this_dbs_info
);
631 mutex_unlock(&dbs_mutex
);
634 case CPUFREQ_GOV_STOP
:
635 mutex_lock(&dbs_mutex
);
636 dbs_timer_exit(this_dbs_info
);
637 sysfs_remove_group(&policy
->kobj
, &dbs_attr_group
);
639 mutex_unlock(&dbs_mutex
);
643 case CPUFREQ_GOV_LIMITS
:
644 mutex_lock(&dbs_mutex
);
645 if (policy
->max
< this_dbs_info
->cur_policy
->cur
)
646 __cpufreq_driver_target(this_dbs_info
->cur_policy
,
647 policy
->max
, CPUFREQ_RELATION_H
);
648 else if (policy
->min
> this_dbs_info
->cur_policy
->cur
)
649 __cpufreq_driver_target(this_dbs_info
->cur_policy
,
650 policy
->min
, CPUFREQ_RELATION_L
);
651 mutex_unlock(&dbs_mutex
);
657 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
660 struct cpufreq_governor cpufreq_gov_ondemand
= {
662 .governor
= cpufreq_governor_dbs
,
663 .max_transition_latency
= TRANSITION_LATENCY_LIMIT
,
664 .owner
= THIS_MODULE
,
667 static int __init
cpufreq_gov_dbs_init(void)
674 idle_time
= get_cpu_idle_time_us(cpu
, &wall
);
676 if (idle_time
!= -1ULL) {
677 /* Idle micro accounting is supported. Use finer thresholds */
678 dbs_tuners_ins
.up_threshold
= MICRO_FREQUENCY_UP_THRESHOLD
;
679 dbs_tuners_ins
.down_differential
=
680 MICRO_FREQUENCY_DOWN_DIFFERENTIAL
;
683 kondemand_wq
= create_workqueue("kondemand");
685 printk(KERN_ERR
"Creation of kondemand failed\n");
688 err
= cpufreq_register_governor(&cpufreq_gov_ondemand
);
690 destroy_workqueue(kondemand_wq
);
695 static void __exit
cpufreq_gov_dbs_exit(void)
697 cpufreq_unregister_governor(&cpufreq_gov_ondemand
);
698 destroy_workqueue(kondemand_wq
);
702 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
703 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
704 MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
705 "Low Latency Frequency Transition capable processors");
706 MODULE_LICENSE("GPL");
708 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
709 fs_initcall(cpufreq_gov_dbs_init
);
711 module_init(cpufreq_gov_dbs_init
);
713 module_exit(cpufreq_gov_dbs_exit
);