2 * acpi-cpufreq.c - ACPI Processor P-States Driver ($Revision: 1.4 $)
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or (at
14 * your option) any later version.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write to the Free Software Foundation, Inc.,
23 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/smp.h>
32 #include <linux/sched.h>
33 #include <linux/cpufreq.h>
34 #include <linux/compiler.h>
35 #include <linux/dmi.h>
36 #include <trace/power.h>
38 #include <linux/acpi.h>
39 #include <acpi/processor.h>
43 #include <asm/processor.h>
44 #include <asm/cpufeature.h>
45 #include <asm/delay.h>
46 #include <asm/uaccess.h>
48 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg)
50 MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
51 MODULE_DESCRIPTION("ACPI Processor P-States Driver");
52 MODULE_LICENSE("GPL");
55 UNDEFINED_CAPABLE
= 0,
56 SYSTEM_INTEL_MSR_CAPABLE
,
60 #define INTEL_MSR_RANGE (0xffff)
61 #define CPUID_6_ECX_APERFMPERF_CAPABILITY (0x1)
63 struct acpi_cpufreq_data
{
64 struct acpi_processor_performance
*acpi_data
;
65 struct cpufreq_frequency_table
*freq_table
;
66 unsigned int max_freq
;
68 unsigned int cpu_feature
;
71 static DEFINE_PER_CPU(struct acpi_cpufreq_data
*, drv_data
);
73 DEFINE_TRACE(power_mark
);
75 /* acpi_perf_data is a pointer to percpu data. */
76 static struct acpi_processor_performance
*acpi_perf_data
;
78 static struct cpufreq_driver acpi_cpufreq_driver
;
80 static unsigned int acpi_pstate_strict
;
82 static int check_est_cpu(unsigned int cpuid
)
84 struct cpuinfo_x86
*cpu
= &cpu_data(cpuid
);
86 if (cpu
->x86_vendor
!= X86_VENDOR_INTEL
||
87 !cpu_has(cpu
, X86_FEATURE_EST
))
93 static unsigned extract_io(u32 value
, struct acpi_cpufreq_data
*data
)
95 struct acpi_processor_performance
*perf
;
98 perf
= data
->acpi_data
;
100 for (i
=0; i
<perf
->state_count
; i
++) {
101 if (value
== perf
->states
[i
].status
)
102 return data
->freq_table
[i
].frequency
;
107 static unsigned extract_msr(u32 msr
, struct acpi_cpufreq_data
*data
)
110 struct acpi_processor_performance
*perf
;
112 msr
&= INTEL_MSR_RANGE
;
113 perf
= data
->acpi_data
;
115 for (i
=0; data
->freq_table
[i
].frequency
!= CPUFREQ_TABLE_END
; i
++) {
116 if (msr
== perf
->states
[data
->freq_table
[i
].index
].status
)
117 return data
->freq_table
[i
].frequency
;
119 return data
->freq_table
[0].frequency
;
122 static unsigned extract_freq(u32 val
, struct acpi_cpufreq_data
*data
)
124 switch (data
->cpu_feature
) {
125 case SYSTEM_INTEL_MSR_CAPABLE
:
126 return extract_msr(val
, data
);
127 case SYSTEM_IO_CAPABLE
:
128 return extract_io(val
, data
);
150 const struct cpumask
*mask
;
155 static long do_drv_read(void *_cmd
)
157 struct drv_cmd
*cmd
= _cmd
;
161 case SYSTEM_INTEL_MSR_CAPABLE
:
162 rdmsr(cmd
->addr
.msr
.reg
, cmd
->val
, h
);
164 case SYSTEM_IO_CAPABLE
:
165 acpi_os_read_port((acpi_io_address
)cmd
->addr
.io
.port
,
167 (u32
)cmd
->addr
.io
.bit_width
);
175 static long do_drv_write(void *_cmd
)
177 struct drv_cmd
*cmd
= _cmd
;
181 case SYSTEM_INTEL_MSR_CAPABLE
:
182 rdmsr(cmd
->addr
.msr
.reg
, lo
, hi
);
183 lo
= (lo
& ~INTEL_MSR_RANGE
) | (cmd
->val
& INTEL_MSR_RANGE
);
184 wrmsr(cmd
->addr
.msr
.reg
, lo
, hi
);
186 case SYSTEM_IO_CAPABLE
:
187 acpi_os_write_port((acpi_io_address
)cmd
->addr
.io
.port
,
189 (u32
)cmd
->addr
.io
.bit_width
);
197 static void drv_read(struct drv_cmd
*cmd
)
201 work_on_cpu(cpumask_any(cmd
->mask
), do_drv_read
, cmd
);
204 static void drv_write(struct drv_cmd
*cmd
)
208 for_each_cpu(i
, cmd
->mask
) {
209 work_on_cpu(i
, do_drv_write
, cmd
);
213 static u32
get_cur_val(const struct cpumask
*mask
)
215 struct acpi_processor_performance
*perf
;
218 if (unlikely(cpumask_empty(mask
)))
221 switch (per_cpu(drv_data
, cpumask_first(mask
))->cpu_feature
) {
222 case SYSTEM_INTEL_MSR_CAPABLE
:
223 cmd
.type
= SYSTEM_INTEL_MSR_CAPABLE
;
224 cmd
.addr
.msr
.reg
= MSR_IA32_PERF_STATUS
;
226 case SYSTEM_IO_CAPABLE
:
227 cmd
.type
= SYSTEM_IO_CAPABLE
;
228 perf
= per_cpu(drv_data
, cpumask_first(mask
))->acpi_data
;
229 cmd
.addr
.io
.port
= perf
->control_register
.address
;
230 cmd
.addr
.io
.bit_width
= perf
->control_register
.bit_width
;
239 dprintk("get_cur_val = %u\n", cmd
.val
);
251 } aperf_cur
, mperf_cur
;
255 static long read_measured_perf_ctrs(void *_cur
)
257 struct perf_cur
*cur
= _cur
;
259 rdmsr(MSR_IA32_APERF
, cur
->aperf_cur
.split
.lo
, cur
->aperf_cur
.split
.hi
);
260 rdmsr(MSR_IA32_MPERF
, cur
->mperf_cur
.split
.lo
, cur
->mperf_cur
.split
.hi
);
262 wrmsr(MSR_IA32_APERF
, 0, 0);
263 wrmsr(MSR_IA32_MPERF
, 0, 0);
269 * Return the measured active (C0) frequency on this CPU since last call
272 * Return: Average CPU frequency in terms of max frequency (zero on error)
274 * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
275 * over a period of time, while CPU is in C0 state.
276 * IA32_MPERF counts at the rate of max advertised frequency
277 * IA32_APERF counts at the rate of actual CPU frequency
278 * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
279 * no meaning should be associated with absolute values of these MSRs.
281 static unsigned int get_measured_perf(struct cpufreq_policy
*policy
,
285 unsigned int perf_percent
;
288 if (!work_on_cpu(cpu
, read_measured_perf_ctrs
, &cur
))
293 * We dont want to do 64 bit divide with 32 bit kernel
294 * Get an approximate value. Return failure in case we cannot get
295 * an approximate value.
297 if (unlikely(cur
.aperf_cur
.split
.hi
|| cur
.mperf_cur
.split
.hi
)) {
301 h
= max_t(u32
, cur
.aperf_cur
.split
.hi
, cur
.mperf_cur
.split
.hi
);
302 shift_count
= fls(h
);
304 cur
.aperf_cur
.whole
>>= shift_count
;
305 cur
.mperf_cur
.whole
>>= shift_count
;
308 if (((unsigned long)(-1) / 100) < cur
.aperf_cur
.split
.lo
) {
310 cur
.aperf_cur
.split
.lo
>>= shift_count
;
311 cur
.mperf_cur
.split
.lo
>>= shift_count
;
314 if (cur
.aperf_cur
.split
.lo
&& cur
.mperf_cur
.split
.lo
)
315 perf_percent
= (cur
.aperf_cur
.split
.lo
* 100) /
316 cur
.mperf_cur
.split
.lo
;
321 if (unlikely(((unsigned long)(-1) / 100) < cur
.aperf_cur
.whole
)) {
323 cur
.aperf_cur
.whole
>>= shift_count
;
324 cur
.mperf_cur
.whole
>>= shift_count
;
327 if (cur
.aperf_cur
.whole
&& cur
.mperf_cur
.whole
)
328 perf_percent
= (cur
.aperf_cur
.whole
* 100) /
335 retval
= per_cpu(drv_data
, policy
->cpu
)->max_freq
* perf_percent
/ 100;
340 static unsigned int get_cur_freq_on_cpu(unsigned int cpu
)
342 struct acpi_cpufreq_data
*data
= per_cpu(drv_data
, cpu
);
344 unsigned int cached_freq
;
346 dprintk("get_cur_freq_on_cpu (%d)\n", cpu
);
348 if (unlikely(data
== NULL
||
349 data
->acpi_data
== NULL
|| data
->freq_table
== NULL
)) {
353 cached_freq
= data
->freq_table
[data
->acpi_data
->state
].frequency
;
354 freq
= extract_freq(get_cur_val(cpumask_of(cpu
)), data
);
355 if (freq
!= cached_freq
) {
357 * The dreaded BIOS frequency change behind our back.
358 * Force set the frequency on next target call.
363 dprintk("cur freq = %u\n", freq
);
368 static unsigned int check_freqs(const struct cpumask
*mask
, unsigned int freq
,
369 struct acpi_cpufreq_data
*data
)
371 unsigned int cur_freq
;
374 for (i
=0; i
<100; i
++) {
375 cur_freq
= extract_freq(get_cur_val(mask
), data
);
376 if (cur_freq
== freq
)
383 static int acpi_cpufreq_target(struct cpufreq_policy
*policy
,
384 unsigned int target_freq
, unsigned int relation
)
386 struct acpi_cpufreq_data
*data
= per_cpu(drv_data
, policy
->cpu
);
387 struct acpi_processor_performance
*perf
;
388 struct cpufreq_freqs freqs
;
390 unsigned int next_state
= 0; /* Index into freq_table */
391 unsigned int next_perf_state
= 0; /* Index into perf table */
394 struct power_trace it
;
396 dprintk("acpi_cpufreq_target %d (%d)\n", target_freq
, policy
->cpu
);
398 if (unlikely(data
== NULL
||
399 data
->acpi_data
== NULL
|| data
->freq_table
== NULL
)) {
403 perf
= data
->acpi_data
;
404 result
= cpufreq_frequency_table_target(policy
,
407 relation
, &next_state
);
408 if (unlikely(result
)) {
413 next_perf_state
= data
->freq_table
[next_state
].index
;
414 if (perf
->state
== next_perf_state
) {
415 if (unlikely(data
->resume
)) {
416 dprintk("Called after resume, resetting to P%d\n",
420 dprintk("Already at target state (P%d)\n",
426 trace_power_mark(&it
, POWER_PSTATE
, next_perf_state
);
428 switch (data
->cpu_feature
) {
429 case SYSTEM_INTEL_MSR_CAPABLE
:
430 cmd
.type
= SYSTEM_INTEL_MSR_CAPABLE
;
431 cmd
.addr
.msr
.reg
= MSR_IA32_PERF_CTL
;
432 cmd
.val
= (u32
) perf
->states
[next_perf_state
].control
;
434 case SYSTEM_IO_CAPABLE
:
435 cmd
.type
= SYSTEM_IO_CAPABLE
;
436 cmd
.addr
.io
.port
= perf
->control_register
.address
;
437 cmd
.addr
.io
.bit_width
= perf
->control_register
.bit_width
;
438 cmd
.val
= (u32
) perf
->states
[next_perf_state
].control
;
445 /* cpufreq holds the hotplug lock, so we are safe from here on */
446 if (policy
->shared_type
!= CPUFREQ_SHARED_TYPE_ANY
)
447 cmd
.mask
= policy
->cpus
;
449 cmd
.mask
= cpumask_of(policy
->cpu
);
451 freqs
.old
= perf
->states
[perf
->state
].core_frequency
* 1000;
452 freqs
.new = data
->freq_table
[next_state
].frequency
;
453 for_each_cpu(i
, cmd
.mask
) {
455 cpufreq_notify_transition(&freqs
, CPUFREQ_PRECHANGE
);
460 if (acpi_pstate_strict
) {
461 if (!check_freqs(cmd
.mask
, freqs
.new, data
)) {
462 dprintk("acpi_cpufreq_target failed (%d)\n",
469 for_each_cpu(i
, cmd
.mask
) {
471 cpufreq_notify_transition(&freqs
, CPUFREQ_POSTCHANGE
);
473 perf
->state
= next_perf_state
;
479 static int acpi_cpufreq_verify(struct cpufreq_policy
*policy
)
481 struct acpi_cpufreq_data
*data
= per_cpu(drv_data
, policy
->cpu
);
483 dprintk("acpi_cpufreq_verify\n");
485 return cpufreq_frequency_table_verify(policy
, data
->freq_table
);
489 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data
*data
, unsigned int cpu
)
491 struct acpi_processor_performance
*perf
= data
->acpi_data
;
494 /* search the closest match to cpu_khz */
497 unsigned long freqn
= perf
->states
[0].core_frequency
* 1000;
499 for (i
=0; i
<(perf
->state_count
-1); i
++) {
501 freqn
= perf
->states
[i
+1].core_frequency
* 1000;
502 if ((2 * cpu_khz
) > (freqn
+ freq
)) {
507 perf
->state
= perf
->state_count
-1;
510 /* assume CPU is at P0... */
512 return perf
->states
[0].core_frequency
* 1000;
516 static void free_acpi_perf_data(void)
520 /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
521 for_each_possible_cpu(i
)
522 free_cpumask_var(per_cpu_ptr(acpi_perf_data
, i
)
524 free_percpu(acpi_perf_data
);
528 * acpi_cpufreq_early_init - initialize ACPI P-States library
530 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
531 * in order to determine correct frequency and voltage pairings. We can
532 * do _PDC and _PSD and find out the processor dependency for the
533 * actual init that will happen later...
535 static int __init
acpi_cpufreq_early_init(void)
538 dprintk("acpi_cpufreq_early_init\n");
540 acpi_perf_data
= alloc_percpu(struct acpi_processor_performance
);
541 if (!acpi_perf_data
) {
542 dprintk("Memory allocation error for acpi_perf_data.\n");
545 for_each_possible_cpu(i
) {
546 if (!alloc_cpumask_var_node(
547 &per_cpu_ptr(acpi_perf_data
, i
)->shared_cpu_map
,
548 GFP_KERNEL
, cpu_to_node(i
))) {
550 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
551 free_acpi_perf_data();
556 /* Do initialization in ACPI core */
557 acpi_processor_preregister_performance(acpi_perf_data
);
563 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
564 * or do it in BIOS firmware and won't inform about it to OS. If not
565 * detected, this has a side effect of making CPU run at a different speed
566 * than OS intended it to run at. Detect it and handle it cleanly.
568 static int bios_with_sw_any_bug
;
570 static int sw_any_bug_found(const struct dmi_system_id
*d
)
572 bios_with_sw_any_bug
= 1;
576 static const struct dmi_system_id sw_any_bug_dmi_table
[] = {
578 .callback
= sw_any_bug_found
,
579 .ident
= "Supermicro Server X6DLP",
581 DMI_MATCH(DMI_SYS_VENDOR
, "Supermicro"),
582 DMI_MATCH(DMI_BIOS_VERSION
, "080010"),
583 DMI_MATCH(DMI_PRODUCT_NAME
, "X6DLP"),
590 static int acpi_cpufreq_cpu_init(struct cpufreq_policy
*policy
)
593 unsigned int valid_states
= 0;
594 unsigned int cpu
= policy
->cpu
;
595 struct acpi_cpufreq_data
*data
;
596 unsigned int result
= 0;
597 struct cpuinfo_x86
*c
= &cpu_data(policy
->cpu
);
598 struct acpi_processor_performance
*perf
;
600 dprintk("acpi_cpufreq_cpu_init\n");
602 data
= kzalloc(sizeof(struct acpi_cpufreq_data
), GFP_KERNEL
);
606 data
->acpi_data
= per_cpu_ptr(acpi_perf_data
, cpu
);
607 per_cpu(drv_data
, cpu
) = data
;
609 if (cpu_has(c
, X86_FEATURE_CONSTANT_TSC
))
610 acpi_cpufreq_driver
.flags
|= CPUFREQ_CONST_LOOPS
;
612 result
= acpi_processor_register_performance(data
->acpi_data
, cpu
);
616 perf
= data
->acpi_data
;
617 policy
->shared_type
= perf
->shared_type
;
620 * Will let policy->cpus know about dependency only when software
621 * coordination is required.
623 if (policy
->shared_type
== CPUFREQ_SHARED_TYPE_ALL
||
624 policy
->shared_type
== CPUFREQ_SHARED_TYPE_ANY
) {
625 cpumask_copy(policy
->cpus
, perf
->shared_cpu_map
);
627 cpumask_copy(policy
->related_cpus
, perf
->shared_cpu_map
);
630 dmi_check_system(sw_any_bug_dmi_table
);
631 if (bios_with_sw_any_bug
&& cpumask_weight(policy
->cpus
) == 1) {
632 policy
->shared_type
= CPUFREQ_SHARED_TYPE_ALL
;
633 cpumask_copy(policy
->cpus
, cpu_core_mask(cpu
));
637 /* capability check */
638 if (perf
->state_count
<= 1) {
639 dprintk("No P-States\n");
644 if (perf
->control_register
.space_id
!= perf
->status_register
.space_id
) {
649 switch (perf
->control_register
.space_id
) {
650 case ACPI_ADR_SPACE_SYSTEM_IO
:
651 dprintk("SYSTEM IO addr space\n");
652 data
->cpu_feature
= SYSTEM_IO_CAPABLE
;
654 case ACPI_ADR_SPACE_FIXED_HARDWARE
:
655 dprintk("HARDWARE addr space\n");
656 if (!check_est_cpu(cpu
)) {
660 data
->cpu_feature
= SYSTEM_INTEL_MSR_CAPABLE
;
663 dprintk("Unknown addr space %d\n",
664 (u32
) (perf
->control_register
.space_id
));
669 data
->freq_table
= kmalloc(sizeof(struct cpufreq_frequency_table
) *
670 (perf
->state_count
+1), GFP_KERNEL
);
671 if (!data
->freq_table
) {
676 /* detect transition latency */
677 policy
->cpuinfo
.transition_latency
= 0;
678 for (i
=0; i
<perf
->state_count
; i
++) {
679 if ((perf
->states
[i
].transition_latency
* 1000) >
680 policy
->cpuinfo
.transition_latency
)
681 policy
->cpuinfo
.transition_latency
=
682 perf
->states
[i
].transition_latency
* 1000;
685 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
686 if (perf
->control_register
.space_id
== ACPI_ADR_SPACE_FIXED_HARDWARE
&&
687 policy
->cpuinfo
.transition_latency
> 20 * 1000) {
688 static int print_once
;
689 policy
->cpuinfo
.transition_latency
= 20 * 1000;
692 printk(KERN_INFO
"Capping off P-state tranision latency"
697 data
->max_freq
= perf
->states
[0].core_frequency
* 1000;
699 for (i
=0; i
<perf
->state_count
; i
++) {
700 if (i
>0 && perf
->states
[i
].core_frequency
>=
701 data
->freq_table
[valid_states
-1].frequency
/ 1000)
704 data
->freq_table
[valid_states
].index
= i
;
705 data
->freq_table
[valid_states
].frequency
=
706 perf
->states
[i
].core_frequency
* 1000;
709 data
->freq_table
[valid_states
].frequency
= CPUFREQ_TABLE_END
;
712 result
= cpufreq_frequency_table_cpuinfo(policy
, data
->freq_table
);
716 switch (perf
->control_register
.space_id
) {
717 case ACPI_ADR_SPACE_SYSTEM_IO
:
718 /* Current speed is unknown and not detectable by IO port */
719 policy
->cur
= acpi_cpufreq_guess_freq(data
, policy
->cpu
);
721 case ACPI_ADR_SPACE_FIXED_HARDWARE
:
722 acpi_cpufreq_driver
.get
= get_cur_freq_on_cpu
;
723 policy
->cur
= get_cur_freq_on_cpu(cpu
);
729 /* notify BIOS that we exist */
730 acpi_processor_notify_smm(THIS_MODULE
);
732 /* Check for APERF/MPERF support in hardware */
733 if (c
->x86_vendor
== X86_VENDOR_INTEL
&& c
->cpuid_level
>= 6) {
736 if (ecx
& CPUID_6_ECX_APERFMPERF_CAPABILITY
)
737 acpi_cpufreq_driver
.getavg
= get_measured_perf
;
740 dprintk("CPU%u - ACPI performance management activated.\n", cpu
);
741 for (i
= 0; i
< perf
->state_count
; i
++)
742 dprintk(" %cP%d: %d MHz, %d mW, %d uS\n",
743 (i
== perf
->state
? '*' : ' '), i
,
744 (u32
) perf
->states
[i
].core_frequency
,
745 (u32
) perf
->states
[i
].power
,
746 (u32
) perf
->states
[i
].transition_latency
);
748 cpufreq_frequency_table_get_attr(data
->freq_table
, policy
->cpu
);
751 * the first call to ->target() should result in us actually
752 * writing something to the appropriate registers.
759 kfree(data
->freq_table
);
761 acpi_processor_unregister_performance(perf
, cpu
);
764 per_cpu(drv_data
, cpu
) = NULL
;
769 static int acpi_cpufreq_cpu_exit(struct cpufreq_policy
*policy
)
771 struct acpi_cpufreq_data
*data
= per_cpu(drv_data
, policy
->cpu
);
773 dprintk("acpi_cpufreq_cpu_exit\n");
776 cpufreq_frequency_table_put_attr(policy
->cpu
);
777 per_cpu(drv_data
, policy
->cpu
) = NULL
;
778 acpi_processor_unregister_performance(data
->acpi_data
,
786 static int acpi_cpufreq_resume(struct cpufreq_policy
*policy
)
788 struct acpi_cpufreq_data
*data
= per_cpu(drv_data
, policy
->cpu
);
790 dprintk("acpi_cpufreq_resume\n");
797 static struct freq_attr
*acpi_cpufreq_attr
[] = {
798 &cpufreq_freq_attr_scaling_available_freqs
,
802 static struct cpufreq_driver acpi_cpufreq_driver
= {
803 .verify
= acpi_cpufreq_verify
,
804 .target
= acpi_cpufreq_target
,
805 .init
= acpi_cpufreq_cpu_init
,
806 .exit
= acpi_cpufreq_cpu_exit
,
807 .resume
= acpi_cpufreq_resume
,
808 .name
= "acpi-cpufreq",
809 .owner
= THIS_MODULE
,
810 .attr
= acpi_cpufreq_attr
,
813 static int __init
acpi_cpufreq_init(void)
820 dprintk("acpi_cpufreq_init\n");
822 ret
= acpi_cpufreq_early_init();
826 ret
= cpufreq_register_driver(&acpi_cpufreq_driver
);
828 free_acpi_perf_data();
833 static void __exit
acpi_cpufreq_exit(void)
835 dprintk("acpi_cpufreq_exit\n");
837 cpufreq_unregister_driver(&acpi_cpufreq_driver
);
839 free_percpu(acpi_perf_data
);
842 module_param(acpi_pstate_strict
, uint
, 0644);
843 MODULE_PARM_DESC(acpi_pstate_strict
,
844 "value 0 or non-zero. non-zero -> strict ACPI checks are "
845 "performed during frequency changes.");
847 late_initcall(acpi_cpufreq_init
);
848 module_exit(acpi_cpufreq_exit
);
850 MODULE_ALIAS("acpi");