2 * acpi-cpufreq.c - ACPI Processor P-States Driver ($Revision: 1.4 $)
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or (at
14 * your option) any later version.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write to the Free Software Foundation, Inc.,
23 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/smp.h>
32 #include <linux/sched.h>
33 #include <linux/cpufreq.h>
34 #include <linux/compiler.h>
35 #include <linux/dmi.h>
37 #include <linux/acpi.h>
38 #include <acpi/processor.h>
42 #include <asm/processor.h>
43 #include <asm/cpufeature.h>
44 #include <asm/delay.h>
45 #include <asm/uaccess.h>
47 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg)
49 MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
50 MODULE_DESCRIPTION("ACPI Processor P-States Driver");
51 MODULE_LICENSE("GPL");
54 UNDEFINED_CAPABLE
= 0,
55 SYSTEM_INTEL_MSR_CAPABLE
,
59 #define INTEL_MSR_RANGE (0xffff)
60 #define CPUID_6_ECX_APERFMPERF_CAPABILITY (0x1)
62 struct acpi_cpufreq_data
{
63 struct acpi_processor_performance
*acpi_data
;
64 struct cpufreq_frequency_table
*freq_table
;
65 unsigned int max_freq
;
67 unsigned int cpu_feature
;
70 static struct acpi_cpufreq_data
*drv_data
[NR_CPUS
];
71 static struct acpi_processor_performance
*acpi_perf_data
[NR_CPUS
];
73 static struct cpufreq_driver acpi_cpufreq_driver
;
75 static unsigned int acpi_pstate_strict
;
77 static int check_est_cpu(unsigned int cpuid
)
79 struct cpuinfo_x86
*cpu
= &cpu_data
[cpuid
];
81 if (cpu
->x86_vendor
!= X86_VENDOR_INTEL
||
82 !cpu_has(cpu
, X86_FEATURE_EST
))
88 static unsigned extract_io(u32 value
, struct acpi_cpufreq_data
*data
)
90 struct acpi_processor_performance
*perf
;
93 perf
= data
->acpi_data
;
95 for (i
= 0; i
< perf
->state_count
; i
++) {
96 if (value
== perf
->states
[i
].status
)
97 return data
->freq_table
[i
].frequency
;
102 static unsigned extract_msr(u32 msr
, struct acpi_cpufreq_data
*data
)
105 struct acpi_processor_performance
*perf
;
107 msr
&= INTEL_MSR_RANGE
;
108 perf
= data
->acpi_data
;
110 for (i
= 0; data
->freq_table
[i
].frequency
!= CPUFREQ_TABLE_END
; i
++) {
111 if (msr
== perf
->states
[data
->freq_table
[i
].index
].status
)
112 return data
->freq_table
[i
].frequency
;
114 return data
->freq_table
[0].frequency
;
117 static unsigned extract_freq(u32 val
, struct acpi_cpufreq_data
*data
)
119 switch (data
->cpu_feature
) {
120 case SYSTEM_INTEL_MSR_CAPABLE
:
121 return extract_msr(val
, data
);
122 case SYSTEM_IO_CAPABLE
:
123 return extract_io(val
, data
);
129 static void wrport(u16 port
, u8 bit_width
, u32 value
)
131 if (bit_width
<= 8) {
133 } else if (bit_width
<= 16) {
135 } else if (bit_width
<= 32) {
140 static void rdport(u16 port
, u8 bit_width
, u32
* ret
)
143 if (bit_width
<= 8) {
145 } else if (bit_width
<= 16) {
147 } else if (bit_width
<= 32) {
173 static void do_drv_read(struct drv_cmd
*cmd
)
178 case SYSTEM_INTEL_MSR_CAPABLE
:
179 rdmsr(cmd
->addr
.msr
.reg
, cmd
->val
, h
);
181 case SYSTEM_IO_CAPABLE
:
182 rdport(cmd
->addr
.io
.port
, cmd
->addr
.io
.bit_width
, &cmd
->val
);
189 static void do_drv_write(struct drv_cmd
*cmd
)
194 case SYSTEM_INTEL_MSR_CAPABLE
:
195 wrmsr(cmd
->addr
.msr
.reg
, cmd
->val
, h
);
197 case SYSTEM_IO_CAPABLE
:
198 wrport(cmd
->addr
.io
.port
, cmd
->addr
.io
.bit_width
, cmd
->val
);
205 static inline void drv_read(struct drv_cmd
*cmd
)
207 cpumask_t saved_mask
= current
->cpus_allowed
;
210 set_cpus_allowed(current
, cmd
->mask
);
212 set_cpus_allowed(current
, saved_mask
);
216 static void drv_write(struct drv_cmd
*cmd
)
218 cpumask_t saved_mask
= current
->cpus_allowed
;
221 for_each_cpu_mask(i
, cmd
->mask
) {
222 set_cpus_allowed(current
, cpumask_of_cpu(i
));
226 set_cpus_allowed(current
, saved_mask
);
230 static u32
get_cur_val(cpumask_t mask
)
232 struct acpi_processor_performance
*perf
;
235 if (unlikely(cpus_empty(mask
)))
238 switch (drv_data
[first_cpu(mask
)]->cpu_feature
) {
239 case SYSTEM_INTEL_MSR_CAPABLE
:
240 cmd
.type
= SYSTEM_INTEL_MSR_CAPABLE
;
241 cmd
.addr
.msr
.reg
= MSR_IA32_PERF_STATUS
;
243 case SYSTEM_IO_CAPABLE
:
244 cmd
.type
= SYSTEM_IO_CAPABLE
;
245 perf
= drv_data
[first_cpu(mask
)]->acpi_data
;
246 cmd
.addr
.io
.port
= perf
->control_register
.address
;
247 cmd
.addr
.io
.bit_width
= perf
->control_register
.bit_width
;
257 dprintk("get_cur_val = %u\n", cmd
.val
);
263 * Return the measured active (C0) frequency on this CPU since last call
266 * Return: Average CPU frequency in terms of max frequency (zero on error)
268 * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
269 * over a period of time, while CPU is in C0 state.
270 * IA32_MPERF counts at the rate of max advertised frequency
271 * IA32_APERF counts at the rate of actual CPU frequency
272 * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
273 * no meaning should be associated with absolute values of these MSRs.
275 static unsigned int get_measured_perf(unsigned int cpu
)
283 } aperf_cur
, mperf_cur
;
285 cpumask_t saved_mask
;
286 unsigned int perf_percent
;
289 saved_mask
= current
->cpus_allowed
;
290 set_cpus_allowed(current
, cpumask_of_cpu(cpu
));
291 if (get_cpu() != cpu
) {
292 /* We were not able to run on requested processor */
297 rdmsr(MSR_IA32_APERF
, aperf_cur
.split
.lo
, aperf_cur
.split
.hi
);
298 rdmsr(MSR_IA32_MPERF
, mperf_cur
.split
.lo
, mperf_cur
.split
.hi
);
300 wrmsr(MSR_IA32_APERF
, 0,0);
301 wrmsr(MSR_IA32_MPERF
, 0,0);
305 * We dont want to do 64 bit divide with 32 bit kernel
306 * Get an approximate value. Return failure in case we cannot get
307 * an approximate value.
309 if (unlikely(aperf_cur
.split
.hi
|| mperf_cur
.split
.hi
)) {
313 h
= max_t(u32
, aperf_cur
.split
.hi
, mperf_cur
.split
.hi
);
314 shift_count
= fls(h
);
316 aperf_cur
.whole
>>= shift_count
;
317 mperf_cur
.whole
>>= shift_count
;
320 if (((unsigned long)(-1) / 100) < aperf_cur
.split
.lo
) {
322 aperf_cur
.split
.lo
>>= shift_count
;
323 mperf_cur
.split
.lo
>>= shift_count
;
326 if (aperf_cur
.split
.lo
&& mperf_cur
.split
.lo
) {
327 perf_percent
= (aperf_cur
.split
.lo
* 100) / mperf_cur
.split
.lo
;
333 if (unlikely(((unsigned long)(-1) / 100) < aperf_cur
.whole
)) {
335 aperf_cur
.whole
>>= shift_count
;
336 mperf_cur
.whole
>>= shift_count
;
339 if (aperf_cur
.whole
&& mperf_cur
.whole
) {
340 perf_percent
= (aperf_cur
.whole
* 100) / mperf_cur
.whole
;
347 retval
= drv_data
[cpu
]->max_freq
* perf_percent
/ 100;
350 set_cpus_allowed(current
, saved_mask
);
352 dprintk("cpu %d: performance percent %d\n", cpu
, perf_percent
);
356 static unsigned int get_cur_freq_on_cpu(unsigned int cpu
)
358 struct acpi_cpufreq_data
*data
= drv_data
[cpu
];
361 dprintk("get_cur_freq_on_cpu (%d)\n", cpu
);
363 if (unlikely(data
== NULL
||
364 data
->acpi_data
== NULL
|| data
->freq_table
== NULL
)) {
368 freq
= extract_freq(get_cur_val(cpumask_of_cpu(cpu
)), data
);
369 dprintk("cur freq = %u\n", freq
);
374 static unsigned int check_freqs(cpumask_t mask
, unsigned int freq
,
375 struct acpi_cpufreq_data
*data
)
377 unsigned int cur_freq
;
380 for (i
= 0; i
< 100; i
++) {
381 cur_freq
= extract_freq(get_cur_val(mask
), data
);
382 if (cur_freq
== freq
)
389 static int acpi_cpufreq_target(struct cpufreq_policy
*policy
,
390 unsigned int target_freq
, unsigned int relation
)
392 struct acpi_cpufreq_data
*data
= drv_data
[policy
->cpu
];
393 struct acpi_processor_performance
*perf
;
394 struct cpufreq_freqs freqs
;
395 cpumask_t online_policy_cpus
;
398 unsigned int next_state
= 0;
399 unsigned int next_perf_state
= 0;
403 dprintk("acpi_cpufreq_target %d (%d)\n", target_freq
, policy
->cpu
);
405 if (unlikely(data
== NULL
||
406 data
->acpi_data
== NULL
|| data
->freq_table
== NULL
)) {
410 perf
= data
->acpi_data
;
411 result
= cpufreq_frequency_table_target(policy
,
414 relation
, &next_state
);
415 if (unlikely(result
))
418 #ifdef CONFIG_HOTPLUG_CPU
419 /* cpufreq holds the hotplug lock, so we are safe from here on */
420 cpus_and(online_policy_cpus
, cpu_online_map
, policy
->cpus
);
422 online_policy_cpus
= policy
->cpus
;
425 next_perf_state
= data
->freq_table
[next_state
].index
;
426 if (perf
->state
== next_perf_state
) {
427 if (unlikely(data
->resume
)) {
428 dprintk("Called after resume, resetting to P%d\n",
432 dprintk("Already at target state (P%d)\n",
438 switch (data
->cpu_feature
) {
439 case SYSTEM_INTEL_MSR_CAPABLE
:
440 cmd
.type
= SYSTEM_INTEL_MSR_CAPABLE
;
441 cmd
.addr
.msr
.reg
= MSR_IA32_PERF_CTL
;
443 (u32
) perf
->states
[next_perf_state
].
444 control
& INTEL_MSR_RANGE
;
445 cmd
.val
= (cmd
.val
& ~INTEL_MSR_RANGE
) | msr
;
447 case SYSTEM_IO_CAPABLE
:
448 cmd
.type
= SYSTEM_IO_CAPABLE
;
449 cmd
.addr
.io
.port
= perf
->control_register
.address
;
450 cmd
.addr
.io
.bit_width
= perf
->control_register
.bit_width
;
451 cmd
.val
= (u32
) perf
->states
[next_perf_state
].control
;
457 cpus_clear(cmd
.mask
);
459 if (policy
->shared_type
!= CPUFREQ_SHARED_TYPE_ANY
)
460 cmd
.mask
= online_policy_cpus
;
462 cpu_set(policy
->cpu
, cmd
.mask
);
464 freqs
.old
= data
->freq_table
[perf
->state
].frequency
;
465 freqs
.new = data
->freq_table
[next_perf_state
].frequency
;
466 for_each_cpu_mask(i
, cmd
.mask
) {
468 cpufreq_notify_transition(&freqs
, CPUFREQ_PRECHANGE
);
473 if (acpi_pstate_strict
) {
474 if (!check_freqs(cmd
.mask
, freqs
.new, data
)) {
475 dprintk("acpi_cpufreq_target failed (%d)\n",
481 for_each_cpu_mask(i
, cmd
.mask
) {
483 cpufreq_notify_transition(&freqs
, CPUFREQ_POSTCHANGE
);
485 perf
->state
= next_perf_state
;
490 static int acpi_cpufreq_verify(struct cpufreq_policy
*policy
)
492 struct acpi_cpufreq_data
*data
= drv_data
[policy
->cpu
];
494 dprintk("acpi_cpufreq_verify\n");
496 return cpufreq_frequency_table_verify(policy
, data
->freq_table
);
500 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data
*data
, unsigned int cpu
)
502 struct acpi_processor_performance
*perf
= data
->acpi_data
;
505 /* search the closest match to cpu_khz */
508 unsigned long freqn
= perf
->states
[0].core_frequency
* 1000;
510 for (i
= 0; i
< (perf
->state_count
- 1); i
++) {
512 freqn
= perf
->states
[i
+ 1].core_frequency
* 1000;
513 if ((2 * cpu_khz
) > (freqn
+ freq
)) {
518 perf
->state
= perf
->state_count
- 1;
521 /* assume CPU is at P0... */
523 return perf
->states
[0].core_frequency
* 1000;
528 * acpi_cpufreq_early_init - initialize ACPI P-States library
530 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
531 * in order to determine correct frequency and voltage pairings. We can
532 * do _PDC and _PSD and find out the processor dependency for the
533 * actual init that will happen later...
535 static int acpi_cpufreq_early_init(void)
537 struct acpi_processor_performance
*data
;
541 dprintk("acpi_cpufreq_early_init\n");
543 for_each_possible_cpu(i
) {
544 data
= kzalloc(sizeof(struct acpi_processor_performance
),
547 for_each_cpu_mask(j
, covered
) {
548 kfree(acpi_perf_data
[j
]);
549 acpi_perf_data
[j
] = NULL
;
553 acpi_perf_data
[i
] = data
;
557 /* Do initialization in ACPI core */
558 acpi_processor_preregister_performance(acpi_perf_data
);
563 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
564 * or do it in BIOS firmware and won't inform about it to OS. If not
565 * detected, this has a side effect of making CPU run at a different speed
566 * than OS intended it to run at. Detect it and handle it cleanly.
568 static int bios_with_sw_any_bug
;
570 static int sw_any_bug_found(struct dmi_system_id
*d
)
572 bios_with_sw_any_bug
= 1;
576 static struct dmi_system_id sw_any_bug_dmi_table
[] = {
578 .callback
= sw_any_bug_found
,
579 .ident
= "Supermicro Server X6DLP",
581 DMI_MATCH(DMI_SYS_VENDOR
, "Supermicro"),
582 DMI_MATCH(DMI_BIOS_VERSION
, "080010"),
583 DMI_MATCH(DMI_PRODUCT_NAME
, "X6DLP"),
589 static int acpi_cpufreq_cpu_init(struct cpufreq_policy
*policy
)
592 unsigned int valid_states
= 0;
593 unsigned int cpu
= policy
->cpu
;
594 struct acpi_cpufreq_data
*data
;
595 unsigned int result
= 0;
596 struct cpuinfo_x86
*c
= &cpu_data
[policy
->cpu
];
597 struct acpi_processor_performance
*perf
;
599 dprintk("acpi_cpufreq_cpu_init\n");
601 if (!acpi_perf_data
[cpu
])
604 data
= kzalloc(sizeof(struct acpi_cpufreq_data
), GFP_KERNEL
);
608 data
->acpi_data
= acpi_perf_data
[cpu
];
609 drv_data
[cpu
] = data
;
611 if (cpu_has(c
, X86_FEATURE_CONSTANT_TSC
)) {
612 acpi_cpufreq_driver
.flags
|= CPUFREQ_CONST_LOOPS
;
615 result
= acpi_processor_register_performance(data
->acpi_data
, cpu
);
619 perf
= data
->acpi_data
;
620 policy
->shared_type
= perf
->shared_type
;
622 * Will let policy->cpus know about dependency only when software
623 * coordination is required.
625 if (policy
->shared_type
== CPUFREQ_SHARED_TYPE_ALL
||
626 policy
->shared_type
== CPUFREQ_SHARED_TYPE_ANY
) {
627 policy
->cpus
= perf
->shared_cpu_map
;
631 dmi_check_system(sw_any_bug_dmi_table
);
632 if (bios_with_sw_any_bug
&& cpus_weight(policy
->cpus
) == 1) {
633 policy
->shared_type
= CPUFREQ_SHARED_TYPE_ALL
;
634 policy
->cpus
= cpu_core_map
[cpu
];
638 /* capability check */
639 if (perf
->state_count
<= 1) {
640 dprintk("No P-States\n");
645 if (perf
->control_register
.space_id
!= perf
->status_register
.space_id
) {
650 switch (perf
->control_register
.space_id
) {
651 case ACPI_ADR_SPACE_SYSTEM_IO
:
652 dprintk("SYSTEM IO addr space\n");
653 data
->cpu_feature
= SYSTEM_IO_CAPABLE
;
655 case ACPI_ADR_SPACE_FIXED_HARDWARE
:
656 dprintk("HARDWARE addr space\n");
657 if (!check_est_cpu(cpu
)) {
661 data
->cpu_feature
= SYSTEM_INTEL_MSR_CAPABLE
;
664 dprintk("Unknown addr space %d\n",
665 (u32
) (perf
->control_register
.space_id
));
671 kmalloc(sizeof(struct cpufreq_frequency_table
) *
672 (perf
->state_count
+ 1), GFP_KERNEL
);
673 if (!data
->freq_table
) {
678 /* detect transition latency */
679 policy
->cpuinfo
.transition_latency
= 0;
680 for (i
= 0; i
< perf
->state_count
; i
++) {
681 if ((perf
->states
[i
].transition_latency
* 1000) >
682 policy
->cpuinfo
.transition_latency
)
683 policy
->cpuinfo
.transition_latency
=
684 perf
->states
[i
].transition_latency
* 1000;
686 policy
->governor
= CPUFREQ_DEFAULT_GOVERNOR
;
688 data
->max_freq
= perf
->states
[0].core_frequency
* 1000;
690 for (i
= 0; i
< perf
->state_count
; i
++) {
691 if (i
> 0 && perf
->states
[i
].core_frequency
==
692 perf
->states
[i
- 1].core_frequency
)
695 data
->freq_table
[valid_states
].index
= i
;
696 data
->freq_table
[valid_states
].frequency
=
697 perf
->states
[i
].core_frequency
* 1000;
700 data
->freq_table
[perf
->state_count
].frequency
= CPUFREQ_TABLE_END
;
702 result
= cpufreq_frequency_table_cpuinfo(policy
, data
->freq_table
);
707 switch (data
->cpu_feature
) {
708 case ACPI_ADR_SPACE_SYSTEM_IO
:
709 /* Current speed is unknown and not detectable by IO port */
710 policy
->cur
= acpi_cpufreq_guess_freq(data
, policy
->cpu
);
712 case ACPI_ADR_SPACE_FIXED_HARDWARE
:
713 acpi_cpufreq_driver
.get
= get_cur_freq_on_cpu
;
714 get_cur_freq_on_cpu(cpu
);
720 /* notify BIOS that we exist */
721 acpi_processor_notify_smm(THIS_MODULE
);
723 /* Check for APERF/MPERF support in hardware */
724 if (c
->x86_vendor
== X86_VENDOR_INTEL
&& c
->cpuid_level
>= 6) {
727 if (ecx
& CPUID_6_ECX_APERFMPERF_CAPABILITY
) {
728 acpi_cpufreq_driver
.getavg
= get_measured_perf
;
732 dprintk("CPU%u - ACPI performance management activated.\n", cpu
);
733 for (i
= 0; i
< perf
->state_count
; i
++)
734 dprintk(" %cP%d: %d MHz, %d mW, %d uS\n",
735 (i
== perf
->state
? '*' : ' '), i
,
736 (u32
) perf
->states
[i
].core_frequency
,
737 (u32
) perf
->states
[i
].power
,
738 (u32
) perf
->states
[i
].transition_latency
);
740 cpufreq_frequency_table_get_attr(data
->freq_table
, policy
->cpu
);
743 * the first call to ->target() should result in us actually
744 * writing something to the appropriate registers.
751 kfree(data
->freq_table
);
753 acpi_processor_unregister_performance(perf
, cpu
);
756 drv_data
[cpu
] = NULL
;
761 static int acpi_cpufreq_cpu_exit(struct cpufreq_policy
*policy
)
763 struct acpi_cpufreq_data
*data
= drv_data
[policy
->cpu
];
765 dprintk("acpi_cpufreq_cpu_exit\n");
768 cpufreq_frequency_table_put_attr(policy
->cpu
);
769 drv_data
[policy
->cpu
] = NULL
;
770 acpi_processor_unregister_performance(data
->acpi_data
,
778 static int acpi_cpufreq_resume(struct cpufreq_policy
*policy
)
780 struct acpi_cpufreq_data
*data
= drv_data
[policy
->cpu
];
782 dprintk("acpi_cpufreq_resume\n");
789 static struct freq_attr
*acpi_cpufreq_attr
[] = {
790 &cpufreq_freq_attr_scaling_available_freqs
,
794 static struct cpufreq_driver acpi_cpufreq_driver
= {
795 .verify
= acpi_cpufreq_verify
,
796 .target
= acpi_cpufreq_target
,
797 .init
= acpi_cpufreq_cpu_init
,
798 .exit
= acpi_cpufreq_cpu_exit
,
799 .resume
= acpi_cpufreq_resume
,
800 .name
= "acpi-cpufreq",
801 .owner
= THIS_MODULE
,
802 .attr
= acpi_cpufreq_attr
,
805 static int __init
acpi_cpufreq_init(void)
807 dprintk("acpi_cpufreq_init\n");
809 acpi_cpufreq_early_init();
811 return cpufreq_register_driver(&acpi_cpufreq_driver
);
814 static void __exit
acpi_cpufreq_exit(void)
817 dprintk("acpi_cpufreq_exit\n");
819 cpufreq_unregister_driver(&acpi_cpufreq_driver
);
821 for_each_possible_cpu(i
) {
822 kfree(acpi_perf_data
[i
]);
823 acpi_perf_data
[i
] = NULL
;
828 module_param(acpi_pstate_strict
, uint
, 0644);
829 MODULE_PARM_DESC(acpi_pstate_strict
,
830 "value 0 or non-zero. non-zero -> strict ACPI checks are performed during frequency changes.");
832 late_initcall(acpi_cpufreq_init
);
833 module_exit(acpi_cpufreq_exit
);
835 MODULE_ALIAS("acpi");