1 /* local apic based NMI watchdog for various CPUs.
2 This file also handles reservation of performance counters for coordination
3 with other users (like oprofile).
5 Note that these events normally don't tick when the CPU idles. This means
6 the frequency varies with CPU load.
8 Original code for K7/P6 written by Keith Owens */
10 #include <linux/percpu.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/bitops.h>
14 #include <linux/smp.h>
15 #include <linux/nmi.h>
17 #include <asm/intel_arch_perfmon.h>
19 struct nmi_watchdog_ctlblk
{
20 unsigned int cccr_msr
;
21 unsigned int perfctr_msr
; /* the MSR to reset in NMI handler */
22 unsigned int evntsel_msr
; /* the MSR to select the events to handle */
25 /* Interface defining a CPU specific perfctr watchdog */
28 void (*unreserve
)(void);
29 int (*setup
)(unsigned nmi_hz
);
30 void (*rearm
)(struct nmi_watchdog_ctlblk
*wd
, unsigned nmi_hz
);
37 static struct wd_ops
*wd_ops
;
39 /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
40 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
42 #define NMI_MAX_COUNTER_BITS 66
44 /* perfctr_nmi_owner tracks the ownership of the perfctr registers:
45 * evtsel_nmi_owner tracks the ownership of the event selection
46 * - different performance counters/ event selection may be reserved for
47 * different subsystems this reservation system just tries to coordinate
50 static DECLARE_BITMAP(perfctr_nmi_owner
, NMI_MAX_COUNTER_BITS
);
51 static DECLARE_BITMAP(evntsel_nmi_owner
, NMI_MAX_COUNTER_BITS
);
53 static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk
, nmi_watchdog_ctlblk
);
55 /* converts an msr to an appropriate reservation bit */
56 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr
)
58 /* returns the bit offset of the performance counter register */
59 switch (boot_cpu_data
.x86_vendor
) {
61 return (msr
- MSR_K7_PERFCTR0
);
62 case X86_VENDOR_INTEL
:
63 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
64 return (msr
- MSR_ARCH_PERFMON_PERFCTR0
);
66 switch (boot_cpu_data
.x86
) {
68 return (msr
- MSR_P6_PERFCTR0
);
70 return (msr
- MSR_P4_BPU_PERFCTR0
);
76 /* converts an msr to an appropriate reservation bit */
77 /* returns the bit offset of the event selection register */
78 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr
)
80 /* returns the bit offset of the event selection register */
81 switch (boot_cpu_data
.x86_vendor
) {
83 return (msr
- MSR_K7_EVNTSEL0
);
84 case X86_VENDOR_INTEL
:
85 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
86 return (msr
- MSR_ARCH_PERFMON_EVENTSEL0
);
88 switch (boot_cpu_data
.x86
) {
90 return (msr
- MSR_P6_EVNTSEL0
);
92 return (msr
- MSR_P4_BSU_ESCR0
);
99 /* checks for a bit availability (hack for oprofile) */
100 int avail_to_resrv_perfctr_nmi_bit(unsigned int counter
)
102 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
104 return (!test_bit(counter
, perfctr_nmi_owner
));
107 /* checks the an msr for availability */
108 int avail_to_resrv_perfctr_nmi(unsigned int msr
)
110 unsigned int counter
;
112 counter
= nmi_perfctr_msr_to_bit(msr
);
113 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
115 return (!test_bit(counter
, perfctr_nmi_owner
));
118 int reserve_perfctr_nmi(unsigned int msr
)
120 unsigned int counter
;
122 counter
= nmi_perfctr_msr_to_bit(msr
);
123 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
125 if (!test_and_set_bit(counter
, perfctr_nmi_owner
))
130 void release_perfctr_nmi(unsigned int msr
)
132 unsigned int counter
;
134 counter
= nmi_perfctr_msr_to_bit(msr
);
135 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
137 clear_bit(counter
, perfctr_nmi_owner
);
140 int reserve_evntsel_nmi(unsigned int msr
)
142 unsigned int counter
;
144 counter
= nmi_evntsel_msr_to_bit(msr
);
145 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
147 if (!test_and_set_bit(counter
, evntsel_nmi_owner
))
152 void release_evntsel_nmi(unsigned int msr
)
154 unsigned int counter
;
156 counter
= nmi_evntsel_msr_to_bit(msr
);
157 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
159 clear_bit(counter
, evntsel_nmi_owner
);
162 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi
);
163 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit
);
164 EXPORT_SYMBOL(reserve_perfctr_nmi
);
165 EXPORT_SYMBOL(release_perfctr_nmi
);
166 EXPORT_SYMBOL(reserve_evntsel_nmi
);
167 EXPORT_SYMBOL(release_evntsel_nmi
);
169 void disable_lapic_nmi_watchdog(void)
171 BUG_ON(nmi_watchdog
!= NMI_LOCAL_APIC
);
173 if (atomic_read(&nmi_active
) <= 0)
176 on_each_cpu(stop_apic_nmi_watchdog
, NULL
, 0, 1);
179 BUG_ON(atomic_read(&nmi_active
) != 0);
182 void enable_lapic_nmi_watchdog(void)
184 BUG_ON(nmi_watchdog
!= NMI_LOCAL_APIC
);
186 /* are we already enabled */
187 if (atomic_read(&nmi_active
) != 0)
190 /* are we lapic aware */
193 if (!wd_ops
->reserve()) {
194 printk(KERN_ERR
"NMI watchdog: cannot reserve perfctrs\n");
198 on_each_cpu(setup_apic_nmi_watchdog
, NULL
, 0, 1);
199 touch_nmi_watchdog();
203 * Activate the NMI watchdog via the local APIC.
206 static unsigned int adjust_for_32bit_ctr(unsigned int hz
)
209 unsigned int retval
= hz
;
212 * On Intel CPUs with P6/ARCH_PERFMON only 32 bits in the counter
213 * are writable, with higher bits sign extending from bit 31.
214 * So, we can only program the counter with 31 bit values and
215 * 32nd bit should be 1, for 33.. to be 1.
216 * Find the appropriate nmi_hz
218 counter_val
= (u64
)cpu_khz
* 1000;
219 do_div(counter_val
, retval
);
220 if (counter_val
> 0x7fffffffULL
) {
221 u64 count
= (u64
)cpu_khz
* 1000;
222 do_div(count
, 0x7fffffffUL
);
229 write_watchdog_counter(unsigned int perfctr_msr
, const char *descr
, unsigned nmi_hz
)
231 u64 count
= (u64
)cpu_khz
* 1000;
233 do_div(count
, nmi_hz
);
235 Dprintk("setting %s to -0x%08Lx\n", descr
, count
);
236 wrmsrl(perfctr_msr
, 0 - count
);
239 static void write_watchdog_counter32(unsigned int perfctr_msr
,
240 const char *descr
, unsigned nmi_hz
)
242 u64 count
= (u64
)cpu_khz
* 1000;
244 do_div(count
, nmi_hz
);
246 Dprintk("setting %s to -0x%08Lx\n", descr
, count
);
247 wrmsr(perfctr_msr
, (u32
)(-count
), 0);
250 /* AMD K7/K8/Family10h/Family11h support. AMD keeps this interface
251 nicely stable so there is not much variety */
253 #define K7_EVNTSEL_ENABLE (1 << 22)
254 #define K7_EVNTSEL_INT (1 << 20)
255 #define K7_EVNTSEL_OS (1 << 17)
256 #define K7_EVNTSEL_USR (1 << 16)
257 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
258 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
260 static int setup_k7_watchdog(unsigned nmi_hz
)
262 unsigned int perfctr_msr
, evntsel_msr
;
263 unsigned int evntsel
;
264 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
266 perfctr_msr
= wd_ops
->perfctr
;
267 evntsel_msr
= wd_ops
->evntsel
;
269 wrmsrl(perfctr_msr
, 0UL);
271 evntsel
= K7_EVNTSEL_INT
276 /* setup the timer */
277 wrmsr(evntsel_msr
, evntsel
, 0);
278 write_watchdog_counter(perfctr_msr
, "K7_PERFCTR0",nmi_hz
);
279 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
280 evntsel
|= K7_EVNTSEL_ENABLE
;
281 wrmsr(evntsel_msr
, evntsel
, 0);
283 wd
->perfctr_msr
= perfctr_msr
;
284 wd
->evntsel_msr
= evntsel_msr
;
285 wd
->cccr_msr
= 0; //unused
289 static void single_msr_stop_watchdog(void)
291 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
293 wrmsr(wd
->evntsel_msr
, 0, 0);
296 static int single_msr_reserve(void)
298 if (!reserve_perfctr_nmi(wd_ops
->perfctr
))
301 if (!reserve_evntsel_nmi(wd_ops
->evntsel
)) {
302 release_perfctr_nmi(wd_ops
->perfctr
);
308 static void single_msr_unreserve(void)
310 release_evntsel_nmi(wd_ops
->evntsel
);
311 release_perfctr_nmi(wd_ops
->perfctr
);
314 static void single_msr_rearm(struct nmi_watchdog_ctlblk
*wd
, unsigned nmi_hz
)
316 /* start the cycle over again */
317 write_watchdog_counter(wd
->perfctr_msr
, NULL
, nmi_hz
);
320 static struct wd_ops k7_wd_ops
= {
321 .reserve
= single_msr_reserve
,
322 .unreserve
= single_msr_unreserve
,
323 .setup
= setup_k7_watchdog
,
324 .rearm
= single_msr_rearm
,
325 .stop
= single_msr_stop_watchdog
,
326 .perfctr
= MSR_K7_PERFCTR0
,
327 .evntsel
= MSR_K7_EVNTSEL0
,
328 .checkbit
= 1ULL<<47,
331 /* Intel Model 6 (PPro+,P2,P3,P-M,Core1) */
333 #define P6_EVNTSEL0_ENABLE (1 << 22)
334 #define P6_EVNTSEL_INT (1 << 20)
335 #define P6_EVNTSEL_OS (1 << 17)
336 #define P6_EVNTSEL_USR (1 << 16)
337 #define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79
338 #define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED
340 static int setup_p6_watchdog(unsigned nmi_hz
)
342 unsigned int perfctr_msr
, evntsel_msr
;
343 unsigned int evntsel
;
344 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
346 perfctr_msr
= wd_ops
->perfctr
;
347 evntsel_msr
= wd_ops
->evntsel
;
349 /* KVM doesn't implement this MSR */
350 if (wrmsr_safe(perfctr_msr
, 0, 0) < 0)
353 evntsel
= P6_EVNTSEL_INT
358 /* setup the timer */
359 wrmsr(evntsel_msr
, evntsel
, 0);
360 nmi_hz
= adjust_for_32bit_ctr(nmi_hz
);
361 write_watchdog_counter32(perfctr_msr
, "P6_PERFCTR0",nmi_hz
);
362 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
363 evntsel
|= P6_EVNTSEL0_ENABLE
;
364 wrmsr(evntsel_msr
, evntsel
, 0);
366 wd
->perfctr_msr
= perfctr_msr
;
367 wd
->evntsel_msr
= evntsel_msr
;
368 wd
->cccr_msr
= 0; //unused
372 static void p6_rearm(struct nmi_watchdog_ctlblk
*wd
, unsigned nmi_hz
)
374 /* P6 based Pentium M need to re-unmask
375 * the apic vector but it doesn't hurt
377 * ArchPerfom/Core Duo also needs this */
378 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
379 /* P6/ARCH_PERFMON has 32 bit counter write */
380 write_watchdog_counter32(wd
->perfctr_msr
, NULL
,nmi_hz
);
383 static struct wd_ops p6_wd_ops
= {
384 .reserve
= single_msr_reserve
,
385 .unreserve
= single_msr_unreserve
,
386 .setup
= setup_p6_watchdog
,
388 .stop
= single_msr_stop_watchdog
,
389 .perfctr
= MSR_P6_PERFCTR0
,
390 .evntsel
= MSR_P6_EVNTSEL0
,
391 .checkbit
= 1ULL<<39,
394 /* Intel P4 performance counters. By far the most complicated of all. */
396 #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
397 #define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
398 #define P4_ESCR_OS (1<<3)
399 #define P4_ESCR_USR (1<<2)
400 #define P4_CCCR_OVF_PMI0 (1<<26)
401 #define P4_CCCR_OVF_PMI1 (1<<27)
402 #define P4_CCCR_THRESHOLD(N) ((N)<<20)
403 #define P4_CCCR_COMPLEMENT (1<<19)
404 #define P4_CCCR_COMPARE (1<<18)
405 #define P4_CCCR_REQUIRED (3<<16)
406 #define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
407 #define P4_CCCR_ENABLE (1<<12)
408 #define P4_CCCR_OVF (1<<31)
410 /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
411 CRU_ESCR0 (with any non-null event selector) through a complemented
412 max threshold. [IA32-Vol3, Section 14.9.9] */
414 static int setup_p4_watchdog(unsigned nmi_hz
)
416 unsigned int perfctr_msr
, evntsel_msr
, cccr_msr
;
417 unsigned int evntsel
, cccr_val
;
418 unsigned int misc_enable
, dummy
;
420 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
422 rdmsr(MSR_IA32_MISC_ENABLE
, misc_enable
, dummy
);
423 if (!(misc_enable
& MSR_P4_MISC_ENABLE_PERF_AVAIL
))
427 /* detect which hyperthread we are on */
428 if (smp_num_siblings
== 2) {
429 unsigned int ebx
, apicid
;
432 apicid
= (ebx
>> 24) & 0xff;
438 /* performance counters are shared resources
439 * assign each hyperthread its own set
440 * (re-use the ESCR0 register, seems safe
441 * and keeps the cccr_val the same)
445 perfctr_msr
= MSR_P4_IQ_PERFCTR0
;
446 evntsel_msr
= MSR_P4_CRU_ESCR0
;
447 cccr_msr
= MSR_P4_IQ_CCCR0
;
448 cccr_val
= P4_CCCR_OVF_PMI0
| P4_CCCR_ESCR_SELECT(4);
451 perfctr_msr
= MSR_P4_IQ_PERFCTR1
;
452 evntsel_msr
= MSR_P4_CRU_ESCR0
;
453 cccr_msr
= MSR_P4_IQ_CCCR1
;
454 cccr_val
= P4_CCCR_OVF_PMI1
| P4_CCCR_ESCR_SELECT(4);
457 evntsel
= P4_ESCR_EVENT_SELECT(0x3F)
461 cccr_val
|= P4_CCCR_THRESHOLD(15)
466 wrmsr(evntsel_msr
, evntsel
, 0);
467 wrmsr(cccr_msr
, cccr_val
, 0);
468 write_watchdog_counter(perfctr_msr
, "P4_IQ_COUNTER0", nmi_hz
);
469 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
470 cccr_val
|= P4_CCCR_ENABLE
;
471 wrmsr(cccr_msr
, cccr_val
, 0);
472 wd
->perfctr_msr
= perfctr_msr
;
473 wd
->evntsel_msr
= evntsel_msr
;
474 wd
->cccr_msr
= cccr_msr
;
478 static void stop_p4_watchdog(void)
480 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
481 wrmsr(wd
->cccr_msr
, 0, 0);
482 wrmsr(wd
->evntsel_msr
, 0, 0);
485 static int p4_reserve(void)
487 if (!reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR0
))
490 if (smp_num_siblings
> 1 && !reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR1
))
493 if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0
))
495 /* RED-PEN why is ESCR1 not reserved here? */
499 if (smp_num_siblings
> 1)
500 release_perfctr_nmi(MSR_P4_IQ_PERFCTR1
);
503 release_perfctr_nmi(MSR_P4_IQ_PERFCTR0
);
507 static void p4_unreserve(void)
510 if (smp_num_siblings
> 1)
511 release_perfctr_nmi(MSR_P4_IQ_PERFCTR1
);
513 release_evntsel_nmi(MSR_P4_CRU_ESCR0
);
514 release_perfctr_nmi(MSR_P4_IQ_PERFCTR0
);
517 static void p4_rearm(struct nmi_watchdog_ctlblk
*wd
, unsigned nmi_hz
)
522 * - An overflown perfctr will assert its interrupt
523 * until the OVF flag in its CCCR is cleared.
524 * - LVTPC is masked on interrupt and must be
525 * unmasked by the LVTPC handler.
527 rdmsrl(wd
->cccr_msr
, dummy
);
528 dummy
&= ~P4_CCCR_OVF
;
529 wrmsrl(wd
->cccr_msr
, dummy
);
530 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
531 /* start the cycle over again */
532 write_watchdog_counter(wd
->perfctr_msr
, NULL
, nmi_hz
);
535 static struct wd_ops p4_wd_ops
= {
536 .reserve
= p4_reserve
,
537 .unreserve
= p4_unreserve
,
538 .setup
= setup_p4_watchdog
,
540 .stop
= stop_p4_watchdog
,
541 /* RED-PEN this is wrong for the other sibling */
542 .perfctr
= MSR_P4_BPU_PERFCTR0
,
543 .evntsel
= MSR_P4_BSU_ESCR0
,
544 .checkbit
= 1ULL<<39,
547 /* Watchdog using the Intel architected PerfMon. Used for Core2 and hopefully
548 all future Intel CPUs. */
550 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
551 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
553 static int setup_intel_arch_watchdog(unsigned nmi_hz
)
556 union cpuid10_eax eax
;
558 unsigned int perfctr_msr
, evntsel_msr
;
559 unsigned int evntsel
;
560 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
563 * Check whether the Architectural PerfMon supports
564 * Unhalted Core Cycles Event or not.
565 * NOTE: Corresponding bit = 0 in ebx indicates event present.
567 cpuid(10, &(eax
.full
), &ebx
, &unused
, &unused
);
568 if ((eax
.split
.mask_length
< (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX
+1)) ||
569 (ebx
& ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT
))
572 perfctr_msr
= wd_ops
->perfctr
;
573 evntsel_msr
= wd_ops
->evntsel
;
575 wrmsrl(perfctr_msr
, 0UL);
577 evntsel
= ARCH_PERFMON_EVENTSEL_INT
578 | ARCH_PERFMON_EVENTSEL_OS
579 | ARCH_PERFMON_EVENTSEL_USR
580 | ARCH_PERFMON_NMI_EVENT_SEL
581 | ARCH_PERFMON_NMI_EVENT_UMASK
;
583 /* setup the timer */
584 wrmsr(evntsel_msr
, evntsel
, 0);
585 nmi_hz
= adjust_for_32bit_ctr(nmi_hz
);
586 write_watchdog_counter32(perfctr_msr
, "INTEL_ARCH_PERFCTR0", nmi_hz
);
587 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
588 evntsel
|= ARCH_PERFMON_EVENTSEL0_ENABLE
;
589 wrmsr(evntsel_msr
, evntsel
, 0);
591 wd
->perfctr_msr
= perfctr_msr
;
592 wd
->evntsel_msr
= evntsel_msr
;
593 wd
->cccr_msr
= 0; //unused
594 wd_ops
->checkbit
= 1ULL << (eax
.split
.bit_width
- 1);
598 static struct wd_ops intel_arch_wd_ops
= {
599 .reserve
= single_msr_reserve
,
600 .unreserve
= single_msr_unreserve
,
601 .setup
= setup_intel_arch_watchdog
,
603 .stop
= single_msr_stop_watchdog
,
604 .perfctr
= MSR_ARCH_PERFMON_PERFCTR1
,
605 .evntsel
= MSR_ARCH_PERFMON_EVENTSEL1
,
608 static struct wd_ops coreduo_wd_ops
= {
609 .reserve
= single_msr_reserve
,
610 .unreserve
= single_msr_unreserve
,
611 .setup
= setup_intel_arch_watchdog
,
613 .stop
= single_msr_stop_watchdog
,
614 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
615 .evntsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
618 static void probe_nmi_watchdog(void)
620 switch (boot_cpu_data
.x86_vendor
) {
622 if (boot_cpu_data
.x86
!= 6 && boot_cpu_data
.x86
!= 15 &&
623 boot_cpu_data
.x86
!= 16)
627 case X86_VENDOR_INTEL
:
628 /* Work around Core Duo (Yonah) errata AE49 where perfctr1
629 doesn't have a working enable bit. */
630 if (boot_cpu_data
.x86
== 6 && boot_cpu_data
.x86_model
== 14) {
631 wd_ops
= &coreduo_wd_ops
;
634 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
635 wd_ops
= &intel_arch_wd_ops
;
638 switch (boot_cpu_data
.x86
) {
640 if (boot_cpu_data
.x86_model
> 0xd)
646 if (boot_cpu_data
.x86_model
> 0x4)
658 /* Interface to nmi.c */
660 int lapic_watchdog_init(unsigned nmi_hz
)
663 probe_nmi_watchdog();
667 if (!wd_ops
->reserve()) {
669 "NMI watchdog: cannot reserve perfctrs\n");
674 if (!(wd_ops
->setup(nmi_hz
))) {
675 printk(KERN_ERR
"Cannot setup NMI watchdog on CPU %d\n",
676 raw_smp_processor_id());
683 void lapic_watchdog_stop(void)
689 unsigned lapic_adjust_nmi_hz(unsigned hz
)
691 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
692 if (wd
->perfctr_msr
== MSR_P6_PERFCTR0
||
693 wd
->perfctr_msr
== MSR_ARCH_PERFMON_PERFCTR1
)
694 hz
= adjust_for_32bit_ctr(hz
);
698 int lapic_wd_event(unsigned nmi_hz
)
700 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
702 rdmsrl(wd
->perfctr_msr
, ctr
);
703 if (ctr
& wd_ops
->checkbit
) { /* perfctr still running? */
706 wd_ops
->rearm(wd
, nmi_hz
);
710 int lapic_watchdog_ok(void)
712 return wd_ops
!= NULL
;