2 * (c) 2003-2010 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
7 * Support : mark.langsdorf@amd.com
9 * Based on the powernow-k7.c module written by Dave Jones.
10 * (C) 2003 Dave Jones on behalf of SuSE Labs
11 * (C) 2004 Dominik Brodowski <linux@brodo.de>
12 * (C) 2004 Pavel Machek <pavel@ucw.cz>
13 * Licensed under the terms of the GNU GPL License version 2.
14 * Based upon datasheets & sample CPUs kindly provided by AMD.
16 * Valuable input gratefully received from Dave Jones, Pavel Machek,
17 * Dominik Brodowski, Jacob Shin, and others.
18 * Originally developed by Paul Devriendt.
19 * Processor information obtained from Chapter 9 (Power and Thermal Management)
20 * of the "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
21 * Opteron Processors" available for download from www.amd.com
23 * Tables for specific CPUs can be inferred from
24 * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/30430.pdf
27 #include <linux/kernel.h>
28 #include <linux/smp.h>
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/cpufreq.h>
32 #include <linux/slab.h>
33 #include <linux/string.h>
34 #include <linux/cpumask.h>
35 #include <linux/sched.h> /* for current / set_cpus_allowed() */
37 #include <linux/delay.h>
41 #include <linux/acpi.h>
42 #include <linux/mutex.h>
43 #include <acpi/processor.h>
45 #define PFX "powernow-k8: "
46 #define VERSION "version 2.20.00"
47 #include "powernow-k8.h"
50 /* serialize freq changes */
51 static DEFINE_MUTEX(fidvid_mutex
);
53 static DEFINE_PER_CPU(struct powernow_k8_data
*, powernow_data
);
55 static int cpu_family
= CPU_OPTERON
;
57 /* array to map SW pstate number to acpi state */
58 static u32 ps_to_as
[8];
60 /* core performance boost */
61 static bool cpb_capable
, cpb_enabled
;
62 static struct msr __percpu
*msrs
;
64 static struct cpufreq_driver cpufreq_amd64_driver
;
67 static inline const struct cpumask
*cpu_core_mask(int cpu
)
73 /* Return a frequency in MHz, given an input fid */
74 static u32
find_freq_from_fid(u32 fid
)
76 return 800 + (fid
* 100);
79 /* Return a frequency in KHz, given an input fid */
80 static u32
find_khz_freq_from_fid(u32 fid
)
82 return 1000 * find_freq_from_fid(fid
);
85 static u32
find_khz_freq_from_pstate(struct cpufreq_frequency_table
*data
,
88 return data
[ps_to_as
[pstate
]].frequency
;
91 /* Return the vco fid for an input fid
93 * Each "low" fid has corresponding "high" fid, and you can get to "low" fids
94 * only from corresponding high fids. This returns "high" fid corresponding to
97 static u32
convert_fid_to_vco_fid(u32 fid
)
99 if (fid
< HI_FID_TABLE_BOTTOM
)
100 return 8 + (2 * fid
);
106 * Return 1 if the pending bit is set. Unless we just instructed the processor
107 * to transition to a new state, seeing this bit set is really bad news.
109 static int pending_bit_stuck(void)
113 if (cpu_family
== CPU_HW_PSTATE
)
116 rdmsr(MSR_FIDVID_STATUS
, lo
, hi
);
117 return lo
& MSR_S_LO_CHANGE_PENDING
? 1 : 0;
121 * Update the global current fid / vid values from the status msr.
122 * Returns 1 on error.
124 static int query_current_values_with_pending_wait(struct powernow_k8_data
*data
)
129 if (cpu_family
== CPU_HW_PSTATE
) {
130 rdmsr(MSR_PSTATE_STATUS
, lo
, hi
);
131 i
= lo
& HW_PSTATE_MASK
;
132 data
->currpstate
= i
;
135 * a workaround for family 11h erratum 311 might cause
136 * an "out-of-range Pstate if the core is in Pstate-0
138 if ((boot_cpu_data
.x86
== 0x11) && (i
>= data
->numps
))
139 data
->currpstate
= HW_PSTATE_0
;
145 pr_debug("detected change pending stuck\n");
148 rdmsr(MSR_FIDVID_STATUS
, lo
, hi
);
149 } while (lo
& MSR_S_LO_CHANGE_PENDING
);
151 data
->currvid
= hi
& MSR_S_HI_CURRENT_VID
;
152 data
->currfid
= lo
& MSR_S_LO_CURRENT_FID
;
157 /* the isochronous relief time */
158 static void count_off_irt(struct powernow_k8_data
*data
)
160 udelay((1 << data
->irt
) * 10);
164 /* the voltage stabilization time */
165 static void count_off_vst(struct powernow_k8_data
*data
)
167 udelay(data
->vstable
* VST_UNITS_20US
);
171 /* need to init the control msr to a safe value (for each cpu) */
172 static void fidvid_msr_init(void)
177 rdmsr(MSR_FIDVID_STATUS
, lo
, hi
);
178 vid
= hi
& MSR_S_HI_CURRENT_VID
;
179 fid
= lo
& MSR_S_LO_CURRENT_FID
;
180 lo
= fid
| (vid
<< MSR_C_LO_VID_SHIFT
);
181 hi
= MSR_C_HI_STP_GNT_BENIGN
;
182 pr_debug("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo
, hi
);
183 wrmsr(MSR_FIDVID_CTL
, lo
, hi
);
186 /* write the new fid value along with the other control fields to the msr */
187 static int write_new_fid(struct powernow_k8_data
*data
, u32 fid
)
190 u32 savevid
= data
->currvid
;
193 if ((fid
& INVALID_FID_MASK
) || (data
->currvid
& INVALID_VID_MASK
)) {
194 printk(KERN_ERR PFX
"internal error - overflow on fid write\n");
199 lo
|= (data
->currvid
<< MSR_C_LO_VID_SHIFT
);
200 lo
|= MSR_C_LO_INIT_FID_VID
;
202 pr_debug("writing fid 0x%x, lo 0x%x, hi 0x%x\n",
203 fid
, lo
, data
->plllock
* PLL_LOCK_CONVERSION
);
206 wrmsr(MSR_FIDVID_CTL
, lo
, data
->plllock
* PLL_LOCK_CONVERSION
);
209 "Hardware error - pending bit very stuck - "
210 "no further pstate changes possible\n");
213 } while (query_current_values_with_pending_wait(data
));
217 if (savevid
!= data
->currvid
) {
219 "vid change on fid trans, old 0x%x, new 0x%x\n",
220 savevid
, data
->currvid
);
224 if (fid
!= data
->currfid
) {
226 "fid trans failed, fid 0x%x, curr 0x%x\n", fid
,
234 /* Write a new vid to the hardware */
235 static int write_new_vid(struct powernow_k8_data
*data
, u32 vid
)
238 u32 savefid
= data
->currfid
;
241 if ((data
->currfid
& INVALID_FID_MASK
) || (vid
& INVALID_VID_MASK
)) {
242 printk(KERN_ERR PFX
"internal error - overflow on vid write\n");
247 lo
|= (vid
<< MSR_C_LO_VID_SHIFT
);
248 lo
|= MSR_C_LO_INIT_FID_VID
;
250 pr_debug("writing vid 0x%x, lo 0x%x, hi 0x%x\n",
251 vid
, lo
, STOP_GRANT_5NS
);
254 wrmsr(MSR_FIDVID_CTL
, lo
, STOP_GRANT_5NS
);
256 printk(KERN_ERR PFX
"internal error - pending bit "
257 "very stuck - no further pstate "
258 "changes possible\n");
261 } while (query_current_values_with_pending_wait(data
));
263 if (savefid
!= data
->currfid
) {
264 printk(KERN_ERR PFX
"fid changed on vid trans, old "
266 savefid
, data
->currfid
);
270 if (vid
!= data
->currvid
) {
271 printk(KERN_ERR PFX
"vid trans failed, vid 0x%x, "
281 * Reduce the vid by the max of step or reqvid.
282 * Decreasing vid codes represent increasing voltages:
283 * vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of VID_OFF is off.
285 static int decrease_vid_code_by_step(struct powernow_k8_data
*data
,
286 u32 reqvid
, u32 step
)
288 if ((data
->currvid
- reqvid
) > step
)
289 reqvid
= data
->currvid
- step
;
291 if (write_new_vid(data
, reqvid
))
299 /* Change hardware pstate by single MSR write */
300 static int transition_pstate(struct powernow_k8_data
*data
, u32 pstate
)
302 wrmsr(MSR_PSTATE_CTRL
, pstate
, 0);
303 data
->currpstate
= pstate
;
307 /* Change Opteron/Athlon64 fid and vid, by the 3 phases. */
308 static int transition_fid_vid(struct powernow_k8_data
*data
,
309 u32 reqfid
, u32 reqvid
)
311 if (core_voltage_pre_transition(data
, reqvid
, reqfid
))
314 if (core_frequency_transition(data
, reqfid
))
317 if (core_voltage_post_transition(data
, reqvid
))
320 if (query_current_values_with_pending_wait(data
))
323 if ((reqfid
!= data
->currfid
) || (reqvid
!= data
->currvid
)) {
324 printk(KERN_ERR PFX
"failed (cpu%d): req 0x%x 0x%x, "
327 reqfid
, reqvid
, data
->currfid
, data
->currvid
);
331 pr_debug("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n",
332 smp_processor_id(), data
->currfid
, data
->currvid
);
337 /* Phase 1 - core voltage transition ... setup voltage */
338 static int core_voltage_pre_transition(struct powernow_k8_data
*data
,
339 u32 reqvid
, u32 reqfid
)
341 u32 rvosteps
= data
->rvo
;
342 u32 savefid
= data
->currfid
;
343 u32 maxvid
, lo
, rvomult
= 1;
345 pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, "
346 "reqvid 0x%x, rvo 0x%x\n",
348 data
->currfid
, data
->currvid
, reqvid
, data
->rvo
);
350 if ((savefid
< LO_FID_TABLE_TOP
) && (reqfid
< LO_FID_TABLE_TOP
))
353 rdmsr(MSR_FIDVID_STATUS
, lo
, maxvid
);
354 maxvid
= 0x1f & (maxvid
>> 16);
355 pr_debug("ph1 maxvid=0x%x\n", maxvid
);
356 if (reqvid
< maxvid
) /* lower numbers are higher voltages */
359 while (data
->currvid
> reqvid
) {
360 pr_debug("ph1: curr 0x%x, req vid 0x%x\n",
361 data
->currvid
, reqvid
);
362 if (decrease_vid_code_by_step(data
, reqvid
, data
->vidmvs
))
366 while ((rvosteps
> 0) &&
367 ((rvomult
* data
->rvo
+ data
->currvid
) > reqvid
)) {
368 if (data
->currvid
== maxvid
) {
371 pr_debug("ph1: changing vid for rvo, req 0x%x\n",
373 if (decrease_vid_code_by_step(data
, data
->currvid
-1, 1))
379 if (query_current_values_with_pending_wait(data
))
382 if (savefid
!= data
->currfid
) {
383 printk(KERN_ERR PFX
"ph1 err, currfid changed 0x%x\n",
388 pr_debug("ph1 complete, currfid 0x%x, currvid 0x%x\n",
389 data
->currfid
, data
->currvid
);
394 /* Phase 2 - core frequency transition */
395 static int core_frequency_transition(struct powernow_k8_data
*data
, u32 reqfid
)
397 u32 vcoreqfid
, vcocurrfid
, vcofiddiff
;
398 u32 fid_interval
, savevid
= data
->currvid
;
400 if (data
->currfid
== reqfid
) {
401 printk(KERN_ERR PFX
"ph2 null fid transition 0x%x\n",
406 pr_debug("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, "
409 data
->currfid
, data
->currvid
, reqfid
);
411 vcoreqfid
= convert_fid_to_vco_fid(reqfid
);
412 vcocurrfid
= convert_fid_to_vco_fid(data
->currfid
);
413 vcofiddiff
= vcocurrfid
> vcoreqfid
? vcocurrfid
- vcoreqfid
414 : vcoreqfid
- vcocurrfid
;
416 if ((reqfid
<= LO_FID_TABLE_TOP
) && (data
->currfid
<= LO_FID_TABLE_TOP
))
419 while (vcofiddiff
> 2) {
420 (data
->currfid
& 1) ? (fid_interval
= 1) : (fid_interval
= 2);
422 if (reqfid
> data
->currfid
) {
423 if (data
->currfid
> LO_FID_TABLE_TOP
) {
424 if (write_new_fid(data
,
425 data
->currfid
+ fid_interval
))
430 2 + convert_fid_to_vco_fid(data
->currfid
)))
434 if (write_new_fid(data
, data
->currfid
- fid_interval
))
438 vcocurrfid
= convert_fid_to_vco_fid(data
->currfid
);
439 vcofiddiff
= vcocurrfid
> vcoreqfid
? vcocurrfid
- vcoreqfid
440 : vcoreqfid
- vcocurrfid
;
443 if (write_new_fid(data
, reqfid
))
446 if (query_current_values_with_pending_wait(data
))
449 if (data
->currfid
!= reqfid
) {
451 "ph2: mismatch, failed fid transition, "
452 "curr 0x%x, req 0x%x\n",
453 data
->currfid
, reqfid
);
457 if (savevid
!= data
->currvid
) {
458 printk(KERN_ERR PFX
"ph2: vid changed, save 0x%x, curr 0x%x\n",
459 savevid
, data
->currvid
);
463 pr_debug("ph2 complete, currfid 0x%x, currvid 0x%x\n",
464 data
->currfid
, data
->currvid
);
469 /* Phase 3 - core voltage transition flow ... jump to the final vid. */
470 static int core_voltage_post_transition(struct powernow_k8_data
*data
,
473 u32 savefid
= data
->currfid
;
474 u32 savereqvid
= reqvid
;
476 pr_debug("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n",
478 data
->currfid
, data
->currvid
);
480 if (reqvid
!= data
->currvid
) {
481 if (write_new_vid(data
, reqvid
))
484 if (savefid
!= data
->currfid
) {
486 "ph3: bad fid change, save 0x%x, curr 0x%x\n",
487 savefid
, data
->currfid
);
491 if (data
->currvid
!= reqvid
) {
493 "ph3: failed vid transition\n, "
494 "req 0x%x, curr 0x%x",
495 reqvid
, data
->currvid
);
500 if (query_current_values_with_pending_wait(data
))
503 if (savereqvid
!= data
->currvid
) {
504 pr_debug("ph3 failed, currvid 0x%x\n", data
->currvid
);
508 if (savefid
!= data
->currfid
) {
509 pr_debug("ph3 failed, currfid changed 0x%x\n",
514 pr_debug("ph3 complete, currfid 0x%x, currvid 0x%x\n",
515 data
->currfid
, data
->currvid
);
520 static void check_supported_cpu(void *_rc
)
522 u32 eax
, ebx
, ecx
, edx
;
527 if (__this_cpu_read(cpu_info
.x86_vendor
) != X86_VENDOR_AMD
)
530 eax
= cpuid_eax(CPUID_PROCESSOR_SIGNATURE
);
531 if (((eax
& CPUID_XFAM
) != CPUID_XFAM_K8
) &&
532 ((eax
& CPUID_XFAM
) < CPUID_XFAM_10H
))
535 if ((eax
& CPUID_XFAM
) == CPUID_XFAM_K8
) {
536 if (((eax
& CPUID_USE_XFAM_XMOD
) != CPUID_USE_XFAM_XMOD
) ||
537 ((eax
& CPUID_XMOD
) > CPUID_XMOD_REV_MASK
)) {
539 "Processor cpuid %x not supported\n", eax
);
543 eax
= cpuid_eax(CPUID_GET_MAX_CAPABILITIES
);
544 if (eax
< CPUID_FREQ_VOLT_CAPABILITIES
) {
546 "No frequency change capabilities detected\n");
550 cpuid(CPUID_FREQ_VOLT_CAPABILITIES
, &eax
, &ebx
, &ecx
, &edx
);
551 if ((edx
& P_STATE_TRANSITION_CAPABLE
)
552 != P_STATE_TRANSITION_CAPABLE
) {
554 "Power state transitions not supported\n");
557 } else { /* must be a HW Pstate capable processor */
558 cpuid(CPUID_FREQ_VOLT_CAPABILITIES
, &eax
, &ebx
, &ecx
, &edx
);
559 if ((edx
& USE_HW_PSTATE
) == USE_HW_PSTATE
)
560 cpu_family
= CPU_HW_PSTATE
;
568 static int check_pst_table(struct powernow_k8_data
*data
, struct pst_s
*pst
,
574 for (j
= 0; j
< data
->numps
; j
++) {
575 if (pst
[j
].vid
> LEAST_VID
) {
576 printk(KERN_ERR FW_BUG PFX
"vid %d invalid : 0x%x\n",
580 if (pst
[j
].vid
< data
->rvo
) {
582 printk(KERN_ERR FW_BUG PFX
"0 vid exceeded with pstate"
586 if (pst
[j
].vid
< maxvid
+ data
->rvo
) {
587 /* vid + rvo >= maxvid */
588 printk(KERN_ERR FW_BUG PFX
"maxvid exceeded with pstate"
592 if (pst
[j
].fid
> MAX_FID
) {
593 printk(KERN_ERR FW_BUG PFX
"maxfid exceeded with pstate"
597 if (j
&& (pst
[j
].fid
< HI_FID_TABLE_BOTTOM
)) {
598 /* Only first fid is allowed to be in "low" range */
599 printk(KERN_ERR FW_BUG PFX
"two low fids - %d : "
600 "0x%x\n", j
, pst
[j
].fid
);
603 if (pst
[j
].fid
< lastfid
)
604 lastfid
= pst
[j
].fid
;
607 printk(KERN_ERR FW_BUG PFX
"lastfid invalid\n");
610 if (lastfid
> LO_FID_TABLE_TOP
)
611 printk(KERN_INFO FW_BUG PFX
612 "first fid not from lo freq table\n");
617 static void invalidate_entry(struct cpufreq_frequency_table
*powernow_table
,
620 powernow_table
[entry
].frequency
= CPUFREQ_ENTRY_INVALID
;
623 static void print_basics(struct powernow_k8_data
*data
)
626 for (j
= 0; j
< data
->numps
; j
++) {
627 if (data
->powernow_table
[j
].frequency
!=
628 CPUFREQ_ENTRY_INVALID
) {
629 if (cpu_family
== CPU_HW_PSTATE
) {
631 " %d : pstate %d (%d MHz)\n", j
,
632 data
->powernow_table
[j
].index
,
633 data
->powernow_table
[j
].frequency
/1000);
636 "fid 0x%x (%d MHz), vid 0x%x\n",
637 data
->powernow_table
[j
].index
& 0xff,
638 data
->powernow_table
[j
].frequency
/1000,
639 data
->powernow_table
[j
].index
>> 8);
644 printk(KERN_INFO PFX
"Only %d pstates on battery\n",
648 static u32
freq_from_fid_did(u32 fid
, u32 did
)
652 if (boot_cpu_data
.x86
== 0x10)
653 mhz
= (100 * (fid
+ 0x10)) >> did
;
654 else if (boot_cpu_data
.x86
== 0x11)
655 mhz
= (100 * (fid
+ 8)) >> did
;
662 static int fill_powernow_table(struct powernow_k8_data
*data
,
663 struct pst_s
*pst
, u8 maxvid
)
665 struct cpufreq_frequency_table
*powernow_table
;
669 /* use ACPI support to get full speed on mains power */
670 printk(KERN_WARNING PFX
671 "Only %d pstates usable (use ACPI driver for full "
672 "range\n", data
->batps
);
673 data
->numps
= data
->batps
;
676 for (j
= 1; j
< data
->numps
; j
++) {
677 if (pst
[j
-1].fid
>= pst
[j
].fid
) {
678 printk(KERN_ERR PFX
"PST out of sequence\n");
683 if (data
->numps
< 2) {
684 printk(KERN_ERR PFX
"no p states to transition\n");
688 if (check_pst_table(data
, pst
, maxvid
))
691 powernow_table
= kmalloc((sizeof(struct cpufreq_frequency_table
)
692 * (data
->numps
+ 1)), GFP_KERNEL
);
693 if (!powernow_table
) {
694 printk(KERN_ERR PFX
"powernow_table memory alloc failure\n");
698 for (j
= 0; j
< data
->numps
; j
++) {
700 powernow_table
[j
].index
= pst
[j
].fid
; /* lower 8 bits */
701 powernow_table
[j
].index
|= (pst
[j
].vid
<< 8); /* upper 8 bits */
702 freq
= find_khz_freq_from_fid(pst
[j
].fid
);
703 powernow_table
[j
].frequency
= freq
;
705 powernow_table
[data
->numps
].frequency
= CPUFREQ_TABLE_END
;
706 powernow_table
[data
->numps
].index
= 0;
708 if (query_current_values_with_pending_wait(data
)) {
709 kfree(powernow_table
);
713 pr_debug("cfid 0x%x, cvid 0x%x\n", data
->currfid
, data
->currvid
);
714 data
->powernow_table
= powernow_table
;
715 if (cpumask_first(cpu_core_mask(data
->cpu
)) == data
->cpu
)
718 for (j
= 0; j
< data
->numps
; j
++)
719 if ((pst
[j
].fid
== data
->currfid
) &&
720 (pst
[j
].vid
== data
->currvid
))
723 pr_debug("currfid/vid do not match PST, ignoring\n");
727 /* Find and validate the PSB/PST table in BIOS. */
728 static int find_psb_table(struct powernow_k8_data
*data
)
737 for (i
= 0xc0000; i
< 0xffff0; i
+= 0x10) {
738 /* Scan BIOS looking for the signature. */
739 /* It can not be at ffff0 - it is too big. */
741 psb
= phys_to_virt(i
);
742 if (memcmp(psb
, PSB_ID_STRING
, PSB_ID_STRING_LEN
) != 0)
745 pr_debug("found PSB header at 0x%p\n", psb
);
747 pr_debug("table vers: 0x%x\n", psb
->tableversion
);
748 if (psb
->tableversion
!= PSB_VERSION_1_4
) {
749 printk(KERN_ERR FW_BUG PFX
"PSB table is not v1.4\n");
753 pr_debug("flags: 0x%x\n", psb
->flags1
);
755 printk(KERN_ERR FW_BUG PFX
"unknown flags\n");
759 data
->vstable
= psb
->vstable
;
760 pr_debug("voltage stabilization time: %d(*20us)\n",
763 pr_debug("flags2: 0x%x\n", psb
->flags2
);
764 data
->rvo
= psb
->flags2
& 3;
765 data
->irt
= ((psb
->flags2
) >> 2) & 3;
766 mvs
= ((psb
->flags2
) >> 4) & 3;
767 data
->vidmvs
= 1 << mvs
;
768 data
->batps
= ((psb
->flags2
) >> 6) & 3;
770 pr_debug("ramp voltage offset: %d\n", data
->rvo
);
771 pr_debug("isochronous relief time: %d\n", data
->irt
);
772 pr_debug("maximum voltage step: %d - 0x%x\n", mvs
, data
->vidmvs
);
774 pr_debug("numpst: 0x%x\n", psb
->num_tables
);
775 cpst
= psb
->num_tables
;
776 if ((psb
->cpuid
== 0x00000fc0) ||
777 (psb
->cpuid
== 0x00000fe0)) {
778 thiscpuid
= cpuid_eax(CPUID_PROCESSOR_SIGNATURE
);
779 if ((thiscpuid
== 0x00000fc0) ||
780 (thiscpuid
== 0x00000fe0))
784 printk(KERN_ERR FW_BUG PFX
"numpst must be 1\n");
788 data
->plllock
= psb
->plllocktime
;
789 pr_debug("plllocktime: 0x%x (units 1us)\n", psb
->plllocktime
);
790 pr_debug("maxfid: 0x%x\n", psb
->maxfid
);
791 pr_debug("maxvid: 0x%x\n", psb
->maxvid
);
792 maxvid
= psb
->maxvid
;
794 data
->numps
= psb
->numps
;
795 pr_debug("numpstates: 0x%x\n", data
->numps
);
796 return fill_powernow_table(data
,
797 (struct pst_s
*)(psb
+1), maxvid
);
800 * If you see this message, complain to BIOS manufacturer. If
801 * he tells you "we do not support Linux" or some similar
802 * nonsense, remember that Windows 2000 uses the same legacy
803 * mechanism that the old Linux PSB driver uses. Tell them it
804 * is broken with Windows 2000.
806 * The reference to the AMD documentation is chapter 9 in the
807 * BIOS and Kernel Developer's Guide, which is available on
810 printk(KERN_ERR FW_BUG PFX
"No PSB or ACPI _PSS objects\n");
811 printk(KERN_ERR PFX
"Make sure that your BIOS is up to date"
812 " and Cool'N'Quiet support is enabled in BIOS setup\n");
816 static void powernow_k8_acpi_pst_values(struct powernow_k8_data
*data
,
821 if (!data
->acpi_data
.state_count
|| (cpu_family
== CPU_HW_PSTATE
))
824 control
= data
->acpi_data
.states
[index
].control
;
825 data
->irt
= (control
>> IRT_SHIFT
) & IRT_MASK
;
826 data
->rvo
= (control
>> RVO_SHIFT
) & RVO_MASK
;
827 data
->exttype
= (control
>> EXT_TYPE_SHIFT
) & EXT_TYPE_MASK
;
828 data
->plllock
= (control
>> PLL_L_SHIFT
) & PLL_L_MASK
;
829 data
->vidmvs
= 1 << ((control
>> MVS_SHIFT
) & MVS_MASK
);
830 data
->vstable
= (control
>> VST_SHIFT
) & VST_MASK
;
833 static int powernow_k8_cpu_init_acpi(struct powernow_k8_data
*data
)
835 struct cpufreq_frequency_table
*powernow_table
;
836 int ret_val
= -ENODEV
;
839 if (acpi_processor_register_performance(&data
->acpi_data
, data
->cpu
)) {
840 pr_debug("register performance failed: bad ACPI data\n");
844 /* verify the data contained in the ACPI structures */
845 if (data
->acpi_data
.state_count
<= 1) {
846 pr_debug("No ACPI P-States\n");
850 control
= data
->acpi_data
.control_register
.space_id
;
851 status
= data
->acpi_data
.status_register
.space_id
;
853 if ((control
!= ACPI_ADR_SPACE_FIXED_HARDWARE
) ||
854 (status
!= ACPI_ADR_SPACE_FIXED_HARDWARE
)) {
855 pr_debug("Invalid control/status registers (%llx - %llx)\n",
860 /* fill in data->powernow_table */
861 powernow_table
= kmalloc((sizeof(struct cpufreq_frequency_table
)
862 * (data
->acpi_data
.state_count
+ 1)), GFP_KERNEL
);
863 if (!powernow_table
) {
864 pr_debug("powernow_table memory alloc failure\n");
869 data
->numps
= data
->acpi_data
.state_count
;
870 powernow_k8_acpi_pst_values(data
, 0);
872 if (cpu_family
== CPU_HW_PSTATE
)
873 ret_val
= fill_powernow_table_pstate(data
, powernow_table
);
875 ret_val
= fill_powernow_table_fidvid(data
, powernow_table
);
879 powernow_table
[data
->acpi_data
.state_count
].frequency
=
881 powernow_table
[data
->acpi_data
.state_count
].index
= 0;
882 data
->powernow_table
= powernow_table
;
884 if (cpumask_first(cpu_core_mask(data
->cpu
)) == data
->cpu
)
887 /* notify BIOS that we exist */
888 acpi_processor_notify_smm(THIS_MODULE
);
890 if (!zalloc_cpumask_var(&data
->acpi_data
.shared_cpu_map
, GFP_KERNEL
)) {
892 "unable to alloc powernow_k8_data cpumask\n");
900 kfree(powernow_table
);
903 acpi_processor_unregister_performance(&data
->acpi_data
, data
->cpu
);
905 /* data->acpi_data.state_count informs us at ->exit()
906 * whether ACPI was used */
907 data
->acpi_data
.state_count
= 0;
912 static int fill_powernow_table_pstate(struct powernow_k8_data
*data
,
913 struct cpufreq_frequency_table
*powernow_table
)
917 rdmsr(MSR_PSTATE_CUR_LIMIT
, lo
, hi
);
918 data
->max_hw_pstate
= (lo
& HW_PSTATE_MAX_MASK
) >> HW_PSTATE_MAX_SHIFT
;
920 for (i
= 0; i
< data
->acpi_data
.state_count
; i
++) {
923 index
= data
->acpi_data
.states
[i
].control
& HW_PSTATE_MASK
;
924 if (index
> data
->max_hw_pstate
) {
925 printk(KERN_ERR PFX
"invalid pstate %d - "
926 "bad value %d.\n", i
, index
);
927 printk(KERN_ERR PFX
"Please report to BIOS "
929 invalidate_entry(powernow_table
, i
);
935 /* Frequency may be rounded for these */
936 if ((boot_cpu_data
.x86
== 0x10 && boot_cpu_data
.x86_model
< 10)
937 || boot_cpu_data
.x86
== 0x11) {
939 rdmsr(MSR_PSTATE_DEF_BASE
+ index
, lo
, hi
);
940 if (!(hi
& HW_PSTATE_VALID_MASK
)) {
941 pr_debug("invalid pstate %d, ignoring\n", index
);
942 invalidate_entry(powernow_table
, i
);
946 powernow_table
[i
].frequency
=
947 freq_from_fid_did(lo
& 0x3f, (lo
>> 6) & 7);
949 powernow_table
[i
].frequency
=
950 data
->acpi_data
.states
[i
].core_frequency
* 1000;
952 powernow_table
[i
].index
= index
;
957 static int fill_powernow_table_fidvid(struct powernow_k8_data
*data
,
958 struct cpufreq_frequency_table
*powernow_table
)
962 for (i
= 0; i
< data
->acpi_data
.state_count
; i
++) {
969 status
= data
->acpi_data
.states
[i
].status
;
970 fid
= status
& EXT_FID_MASK
;
971 vid
= (status
>> VID_SHIFT
) & EXT_VID_MASK
;
973 control
= data
->acpi_data
.states
[i
].control
;
974 fid
= control
& FID_MASK
;
975 vid
= (control
>> VID_SHIFT
) & VID_MASK
;
978 pr_debug(" %d : fid 0x%x, vid 0x%x\n", i
, fid
, vid
);
980 index
= fid
| (vid
<<8);
981 powernow_table
[i
].index
= index
;
983 freq
= find_khz_freq_from_fid(fid
);
984 powernow_table
[i
].frequency
= freq
;
986 /* verify frequency is OK */
987 if ((freq
> (MAX_FREQ
* 1000)) || (freq
< (MIN_FREQ
* 1000))) {
988 pr_debug("invalid freq %u kHz, ignoring\n", freq
);
989 invalidate_entry(powernow_table
, i
);
993 /* verify voltage is OK -
994 * BIOSs are using "off" to indicate invalid */
995 if (vid
== VID_OFF
) {
996 pr_debug("invalid vid %u, ignoring\n", vid
);
997 invalidate_entry(powernow_table
, i
);
1001 if (freq
!= (data
->acpi_data
.states
[i
].core_frequency
* 1000)) {
1002 printk(KERN_INFO PFX
"invalid freq entries "
1003 "%u kHz vs. %u kHz\n", freq
,
1005 (data
->acpi_data
.states
[i
].core_frequency
1007 invalidate_entry(powernow_table
, i
);
1014 static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data
*data
)
1016 if (data
->acpi_data
.state_count
)
1017 acpi_processor_unregister_performance(&data
->acpi_data
,
1019 free_cpumask_var(data
->acpi_data
.shared_cpu_map
);
1022 static int get_transition_latency(struct powernow_k8_data
*data
)
1024 int max_latency
= 0;
1026 for (i
= 0; i
< data
->acpi_data
.state_count
; i
++) {
1027 int cur_latency
= data
->acpi_data
.states
[i
].transition_latency
1028 + data
->acpi_data
.states
[i
].bus_master_latency
;
1029 if (cur_latency
> max_latency
)
1030 max_latency
= cur_latency
;
1032 if (max_latency
== 0) {
1034 * Fam 11h and later may return 0 as transition latency. This
1035 * is intended and means "very fast". While cpufreq core and
1036 * governors currently can handle that gracefully, better set it
1037 * to 1 to avoid problems in the future.
1039 if (boot_cpu_data
.x86
< 0x11)
1040 printk(KERN_ERR FW_WARN PFX
"Invalid zero transition "
1044 /* value in usecs, needs to be in nanoseconds */
1045 return 1000 * max_latency
;
1048 /* Take a frequency, and issue the fid/vid transition command */
1049 static int transition_frequency_fidvid(struct powernow_k8_data
*data
,
1055 struct cpufreq_freqs freqs
;
1057 pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index
);
1059 /* fid/vid correctness check for k8 */
1060 /* fid are the lower 8 bits of the index we stored into
1061 * the cpufreq frequency table in find_psb_table, vid
1062 * are the upper 8 bits.
1064 fid
= data
->powernow_table
[index
].index
& 0xFF;
1065 vid
= (data
->powernow_table
[index
].index
& 0xFF00) >> 8;
1067 pr_debug("table matched fid 0x%x, giving vid 0x%x\n", fid
, vid
);
1069 if (query_current_values_with_pending_wait(data
))
1072 if ((data
->currvid
== vid
) && (data
->currfid
== fid
)) {
1073 pr_debug("target matches current values (fid 0x%x, vid 0x%x)\n",
1078 pr_debug("cpu %d, changing to fid 0x%x, vid 0x%x\n",
1079 smp_processor_id(), fid
, vid
);
1080 freqs
.old
= find_khz_freq_from_fid(data
->currfid
);
1081 freqs
.new = find_khz_freq_from_fid(fid
);
1083 for_each_cpu(i
, data
->available_cores
) {
1085 cpufreq_notify_transition(&freqs
, CPUFREQ_PRECHANGE
);
1088 res
= transition_fid_vid(data
, fid
, vid
);
1092 freqs
.new = find_khz_freq_from_fid(data
->currfid
);
1094 for_each_cpu(i
, data
->available_cores
) {
1096 cpufreq_notify_transition(&freqs
, CPUFREQ_POSTCHANGE
);
1101 /* Take a frequency, and issue the hardware pstate transition command */
1102 static int transition_frequency_pstate(struct powernow_k8_data
*data
,
1107 struct cpufreq_freqs freqs
;
1109 pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index
);
1111 /* get MSR index for hardware pstate transition */
1112 pstate
= index
& HW_PSTATE_MASK
;
1113 if (pstate
> data
->max_hw_pstate
)
1116 freqs
.old
= find_khz_freq_from_pstate(data
->powernow_table
,
1118 freqs
.new = find_khz_freq_from_pstate(data
->powernow_table
, pstate
);
1120 for_each_cpu(i
, data
->available_cores
) {
1122 cpufreq_notify_transition(&freqs
, CPUFREQ_PRECHANGE
);
1125 res
= transition_pstate(data
, pstate
);
1126 freqs
.new = find_khz_freq_from_pstate(data
->powernow_table
, pstate
);
1128 for_each_cpu(i
, data
->available_cores
) {
1130 cpufreq_notify_transition(&freqs
, CPUFREQ_POSTCHANGE
);
1135 /* Driver entry point to switch to the target frequency */
1136 static int powernowk8_target(struct cpufreq_policy
*pol
,
1137 unsigned targfreq
, unsigned relation
)
1139 cpumask_var_t oldmask
;
1140 struct powernow_k8_data
*data
= per_cpu(powernow_data
, pol
->cpu
);
1143 unsigned int newstate
;
1149 checkfid
= data
->currfid
;
1150 checkvid
= data
->currvid
;
1152 /* only run on specific CPU from here on. */
1153 /* This is poor form: use a workqueue or smp_call_function_single */
1154 if (!alloc_cpumask_var(&oldmask
, GFP_KERNEL
))
1157 cpumask_copy(oldmask
, tsk_cpus_allowed(current
));
1158 set_cpus_allowed_ptr(current
, cpumask_of(pol
->cpu
));
1160 if (smp_processor_id() != pol
->cpu
) {
1161 printk(KERN_ERR PFX
"limiting to cpu %u failed\n", pol
->cpu
);
1165 if (pending_bit_stuck()) {
1166 printk(KERN_ERR PFX
"failing targ, change pending bit set\n");
1170 pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
1171 pol
->cpu
, targfreq
, pol
->min
, pol
->max
, relation
);
1173 if (query_current_values_with_pending_wait(data
))
1176 if (cpu_family
!= CPU_HW_PSTATE
) {
1177 pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
1178 data
->currfid
, data
->currvid
);
1180 if ((checkvid
!= data
->currvid
) ||
1181 (checkfid
!= data
->currfid
)) {
1182 printk(KERN_INFO PFX
1183 "error - out of sync, fix 0x%x 0x%x, "
1185 checkfid
, data
->currfid
,
1186 checkvid
, data
->currvid
);
1190 if (cpufreq_frequency_table_target(pol
, data
->powernow_table
,
1191 targfreq
, relation
, &newstate
))
1194 mutex_lock(&fidvid_mutex
);
1196 powernow_k8_acpi_pst_values(data
, newstate
);
1198 if (cpu_family
== CPU_HW_PSTATE
)
1199 ret
= transition_frequency_pstate(data
,
1200 data
->powernow_table
[newstate
].index
);
1202 ret
= transition_frequency_fidvid(data
, newstate
);
1204 printk(KERN_ERR PFX
"transition frequency failed\n");
1206 mutex_unlock(&fidvid_mutex
);
1209 mutex_unlock(&fidvid_mutex
);
1211 if (cpu_family
== CPU_HW_PSTATE
)
1212 pol
->cur
= find_khz_freq_from_pstate(data
->powernow_table
,
1213 data
->powernow_table
[newstate
].index
);
1215 pol
->cur
= find_khz_freq_from_fid(data
->currfid
);
1219 set_cpus_allowed_ptr(current
, oldmask
);
1220 free_cpumask_var(oldmask
);
1224 /* Driver entry point to verify the policy and range of frequencies */
1225 static int powernowk8_verify(struct cpufreq_policy
*pol
)
1227 struct powernow_k8_data
*data
= per_cpu(powernow_data
, pol
->cpu
);
1232 return cpufreq_frequency_table_verify(pol
, data
->powernow_table
);
1235 struct init_on_cpu
{
1236 struct powernow_k8_data
*data
;
1240 static void __cpuinit
powernowk8_cpu_init_on_cpu(void *_init_on_cpu
)
1242 struct init_on_cpu
*init_on_cpu
= _init_on_cpu
;
1244 if (pending_bit_stuck()) {
1245 printk(KERN_ERR PFX
"failing init, change pending bit set\n");
1246 init_on_cpu
->rc
= -ENODEV
;
1250 if (query_current_values_with_pending_wait(init_on_cpu
->data
)) {
1251 init_on_cpu
->rc
= -ENODEV
;
1255 if (cpu_family
== CPU_OPTERON
)
1258 init_on_cpu
->rc
= 0;
1261 /* per CPU init entry point to the driver */
1262 static int __cpuinit
powernowk8_cpu_init(struct cpufreq_policy
*pol
)
1264 static const char ACPI_PSS_BIOS_BUG_MSG
[] =
1265 KERN_ERR FW_BUG PFX
"No compatible ACPI _PSS objects found.\n"
1266 FW_BUG PFX
"Try again with latest BIOS.\n";
1267 struct powernow_k8_data
*data
;
1268 struct init_on_cpu init_on_cpu
;
1270 struct cpuinfo_x86
*c
= &cpu_data(pol
->cpu
);
1272 if (!cpu_online(pol
->cpu
))
1275 smp_call_function_single(pol
->cpu
, check_supported_cpu
, &rc
, 1);
1279 data
= kzalloc(sizeof(struct powernow_k8_data
), GFP_KERNEL
);
1281 printk(KERN_ERR PFX
"unable to alloc powernow_k8_data");
1285 data
->cpu
= pol
->cpu
;
1286 data
->currpstate
= HW_PSTATE_INVALID
;
1288 if (powernow_k8_cpu_init_acpi(data
)) {
1290 * Use the PSB BIOS structure. This is only available on
1291 * an UP version, and is deprecated by AMD.
1293 if (num_online_cpus() != 1) {
1294 printk_once(ACPI_PSS_BIOS_BUG_MSG
);
1297 if (pol
->cpu
!= 0) {
1298 printk(KERN_ERR FW_BUG PFX
"No ACPI _PSS objects for "
1299 "CPU other than CPU0. Complain to your BIOS "
1303 rc
= find_psb_table(data
);
1307 /* Take a crude guess here.
1308 * That guess was in microseconds, so multiply with 1000 */
1309 pol
->cpuinfo
.transition_latency
= (
1310 ((data
->rvo
+ 8) * data
->vstable
* VST_UNITS_20US
) +
1311 ((1 << data
->irt
) * 30)) * 1000;
1312 } else /* ACPI _PSS objects available */
1313 pol
->cpuinfo
.transition_latency
= get_transition_latency(data
);
1315 /* only run on specific CPU from here on */
1316 init_on_cpu
.data
= data
;
1317 smp_call_function_single(data
->cpu
, powernowk8_cpu_init_on_cpu
,
1319 rc
= init_on_cpu
.rc
;
1321 goto err_out_exit_acpi
;
1323 if (cpu_family
== CPU_HW_PSTATE
)
1324 cpumask_copy(pol
->cpus
, cpumask_of(pol
->cpu
));
1326 cpumask_copy(pol
->cpus
, cpu_core_mask(pol
->cpu
));
1327 data
->available_cores
= pol
->cpus
;
1329 if (cpu_family
== CPU_HW_PSTATE
)
1330 pol
->cur
= find_khz_freq_from_pstate(data
->powernow_table
,
1333 pol
->cur
= find_khz_freq_from_fid(data
->currfid
);
1334 pr_debug("policy current frequency %d kHz\n", pol
->cur
);
1336 /* min/max the cpu is capable of */
1337 if (cpufreq_frequency_table_cpuinfo(pol
, data
->powernow_table
)) {
1338 printk(KERN_ERR FW_BUG PFX
"invalid powernow_table\n");
1339 powernow_k8_cpu_exit_acpi(data
);
1340 kfree(data
->powernow_table
);
1345 /* Check for APERF/MPERF support in hardware */
1346 if (cpu_has(c
, X86_FEATURE_APERFMPERF
))
1347 cpufreq_amd64_driver
.getavg
= cpufreq_get_measured_perf
;
1349 cpufreq_frequency_table_get_attr(data
->powernow_table
, pol
->cpu
);
1351 if (cpu_family
== CPU_HW_PSTATE
)
1352 pr_debug("cpu_init done, current pstate 0x%x\n",
1355 pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
1356 data
->currfid
, data
->currvid
);
1358 per_cpu(powernow_data
, pol
->cpu
) = data
;
1363 powernow_k8_cpu_exit_acpi(data
);
1370 static int __devexit
powernowk8_cpu_exit(struct cpufreq_policy
*pol
)
1372 struct powernow_k8_data
*data
= per_cpu(powernow_data
, pol
->cpu
);
1377 powernow_k8_cpu_exit_acpi(data
);
1379 cpufreq_frequency_table_put_attr(pol
->cpu
);
1381 kfree(data
->powernow_table
);
1383 per_cpu(powernow_data
, pol
->cpu
) = NULL
;
1388 static void query_values_on_cpu(void *_err
)
1391 struct powernow_k8_data
*data
= __this_cpu_read(powernow_data
);
1393 *err
= query_current_values_with_pending_wait(data
);
1396 static unsigned int powernowk8_get(unsigned int cpu
)
1398 struct powernow_k8_data
*data
= per_cpu(powernow_data
, cpu
);
1399 unsigned int khz
= 0;
1405 smp_call_function_single(cpu
, query_values_on_cpu
, &err
, true);
1409 if (cpu_family
== CPU_HW_PSTATE
)
1410 khz
= find_khz_freq_from_pstate(data
->powernow_table
,
1413 khz
= find_khz_freq_from_fid(data
->currfid
);
1420 static void _cpb_toggle_msrs(bool t
)
1426 rdmsr_on_cpus(cpu_online_mask
, MSR_K7_HWCR
, msrs
);
1428 for_each_cpu(cpu
, cpu_online_mask
) {
1429 struct msr
*reg
= per_cpu_ptr(msrs
, cpu
);
1435 wrmsr_on_cpus(cpu_online_mask
, MSR_K7_HWCR
, msrs
);
1441 * Switch on/off core performance boosting.
1446 static void cpb_toggle(bool t
)
1451 if (t
&& !cpb_enabled
) {
1453 _cpb_toggle_msrs(t
);
1454 printk(KERN_INFO PFX
"Core Boosting enabled.\n");
1455 } else if (!t
&& cpb_enabled
) {
1456 cpb_enabled
= false;
1457 _cpb_toggle_msrs(t
);
1458 printk(KERN_INFO PFX
"Core Boosting disabled.\n");
1462 static ssize_t
store_cpb(struct cpufreq_policy
*policy
, const char *buf
,
1466 unsigned long val
= 0;
1468 ret
= strict_strtoul(buf
, 10, &val
);
1469 if (!ret
&& (val
== 0 || val
== 1) && cpb_capable
)
1477 static ssize_t
show_cpb(struct cpufreq_policy
*policy
, char *buf
)
1479 return sprintf(buf
, "%u\n", cpb_enabled
);
1482 #define define_one_rw(_name) \
1483 static struct freq_attr _name = \
1484 __ATTR(_name, 0644, show_##_name, store_##_name)
1488 static struct freq_attr
*powernow_k8_attr
[] = {
1489 &cpufreq_freq_attr_scaling_available_freqs
,
1494 static struct cpufreq_driver cpufreq_amd64_driver
= {
1495 .verify
= powernowk8_verify
,
1496 .target
= powernowk8_target
,
1497 .bios_limit
= acpi_processor_get_bios_limit
,
1498 .init
= powernowk8_cpu_init
,
1499 .exit
= __devexit_p(powernowk8_cpu_exit
),
1500 .get
= powernowk8_get
,
1501 .name
= "powernow-k8",
1502 .owner
= THIS_MODULE
,
1503 .attr
= powernow_k8_attr
,
1507 * Clear the boost-disable flag on the CPU_DOWN path so that this cpu
1508 * cannot block the remaining ones from boosting. On the CPU_UP path we
1509 * simply keep the boost-disable flag in sync with the current global
1512 static int cpb_notify(struct notifier_block
*nb
, unsigned long action
,
1515 unsigned cpu
= (long)hcpu
;
1519 case CPU_UP_PREPARE
:
1520 case CPU_UP_PREPARE_FROZEN
:
1523 rdmsr_on_cpu(cpu
, MSR_K7_HWCR
, &lo
, &hi
);
1525 wrmsr_on_cpu(cpu
, MSR_K7_HWCR
, lo
, hi
);
1529 case CPU_DOWN_PREPARE
:
1530 case CPU_DOWN_PREPARE_FROZEN
:
1531 rdmsr_on_cpu(cpu
, MSR_K7_HWCR
, &lo
, &hi
);
1533 wrmsr_on_cpu(cpu
, MSR_K7_HWCR
, lo
, hi
);
1543 static struct notifier_block cpb_nb
= {
1544 .notifier_call
= cpb_notify
,
1547 /* driver entry point for init */
1548 static int __cpuinit
powernowk8_init(void)
1550 unsigned int i
, supported_cpus
= 0, cpu
;
1553 for_each_online_cpu(i
) {
1555 smp_call_function_single(i
, check_supported_cpu
, &rc
, 1);
1560 if (supported_cpus
!= num_online_cpus())
1563 printk(KERN_INFO PFX
"Found %d %s (%d cpu cores) (" VERSION
")\n",
1564 num_online_nodes(), boot_cpu_data
.x86_model_id
, supported_cpus
);
1566 if (boot_cpu_has(X86_FEATURE_CPB
)) {
1570 msrs
= msrs_alloc();
1572 printk(KERN_ERR
"%s: Error allocating msrs!\n", __func__
);
1576 register_cpu_notifier(&cpb_nb
);
1578 rdmsr_on_cpus(cpu_online_mask
, MSR_K7_HWCR
, msrs
);
1580 for_each_cpu(cpu
, cpu_online_mask
) {
1581 struct msr
*reg
= per_cpu_ptr(msrs
, cpu
);
1582 cpb_enabled
|= !(!!(reg
->l
& BIT(25)));
1585 printk(KERN_INFO PFX
"Core Performance Boosting: %s.\n",
1586 (cpb_enabled
? "on" : "off"));
1589 rv
= cpufreq_register_driver(&cpufreq_amd64_driver
);
1590 if (rv
< 0 && boot_cpu_has(X86_FEATURE_CPB
)) {
1591 unregister_cpu_notifier(&cpb_nb
);
1598 /* driver entry point for term */
1599 static void __exit
powernowk8_exit(void)
1603 if (boot_cpu_has(X86_FEATURE_CPB
)) {
1607 unregister_cpu_notifier(&cpb_nb
);
1610 cpufreq_unregister_driver(&cpufreq_amd64_driver
);
1613 MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and "
1614 "Mark Langsdorf <mark.langsdorf@amd.com>");
1615 MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver.");
1616 MODULE_LICENSE("GPL");
1618 late_initcall(powernowk8_init
);
1619 module_exit(powernowk8_exit
);