2 * linux/arch/x86_64/nmi.c
4 * NMI watchdog support on APIC systems
6 * Started by Ingo Molnar <mingo@redhat.com>
9 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
10 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
12 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
16 #include <linux/delay.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
19 #include <linux/sysdev.h>
20 #include <linux/nmi.h>
21 #include <linux/sysctl.h>
22 #include <linux/kprobes.h>
26 #include <asm/proto.h>
27 #include <asm/kdebug.h>
29 #include <asm/intel_arch_perfmon.h>
31 /* perfctr_nmi_owner tracks the ownership of the perfctr registers:
32 * evtsel_nmi_owner tracks the ownership of the event selection
33 * - different performance counters/ event selection may be reserved for
34 * different subsystems this reservation system just tries to coordinate
37 static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner
);
38 static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner
[2]);
40 /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
41 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
43 #define NMI_MAX_COUNTER_BITS 66
46 * >0: the lapic NMI watchdog is active, but can be disabled
47 * <0: the lapic NMI watchdog has not been set up, and cannot
49 * 0: the lapic NMI watchdog is disabled, but can be enabled
51 atomic_t nmi_active
= ATOMIC_INIT(0); /* oprofile uses this */
54 unsigned int nmi_watchdog
= NMI_DEFAULT
;
55 static unsigned int nmi_hz
= HZ
;
57 struct nmi_watchdog_ctlblk
{
60 unsigned int cccr_msr
;
61 unsigned int perfctr_msr
; /* the MSR to reset in NMI handler */
62 unsigned int evntsel_msr
; /* the MSR to select the events to handle */
64 static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk
, nmi_watchdog_ctlblk
);
66 /* local prototypes */
67 static int unknown_nmi_panic_callback(struct pt_regs
*regs
, int cpu
);
69 /* converts an msr to an appropriate reservation bit */
70 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr
)
72 /* returns the bit offset of the performance counter register */
73 switch (boot_cpu_data
.x86_vendor
) {
75 return (msr
- MSR_K7_PERFCTR0
);
76 case X86_VENDOR_INTEL
:
77 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
78 return (msr
- MSR_ARCH_PERFMON_PERFCTR0
);
80 return (msr
- MSR_P4_BPU_PERFCTR0
);
85 /* converts an msr to an appropriate reservation bit */
86 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr
)
88 /* returns the bit offset of the event selection register */
89 switch (boot_cpu_data
.x86_vendor
) {
91 return (msr
- MSR_K7_EVNTSEL0
);
92 case X86_VENDOR_INTEL
:
93 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
94 return (msr
- MSR_ARCH_PERFMON_EVENTSEL0
);
96 return (msr
- MSR_P4_BSU_ESCR0
);
101 /* checks for a bit availability (hack for oprofile) */
102 int avail_to_resrv_perfctr_nmi_bit(unsigned int counter
)
104 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
106 return (!test_bit(counter
, &__get_cpu_var(perfctr_nmi_owner
)));
109 /* checks the an msr for availability */
110 int avail_to_resrv_perfctr_nmi(unsigned int msr
)
112 unsigned int counter
;
114 counter
= nmi_perfctr_msr_to_bit(msr
);
115 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
117 return (!test_bit(counter
, &__get_cpu_var(perfctr_nmi_owner
)));
120 int reserve_perfctr_nmi(unsigned int msr
)
122 unsigned int counter
;
124 counter
= nmi_perfctr_msr_to_bit(msr
);
125 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
127 if (!test_and_set_bit(counter
, &__get_cpu_var(perfctr_nmi_owner
)))
132 void release_perfctr_nmi(unsigned int msr
)
134 unsigned int counter
;
136 counter
= nmi_perfctr_msr_to_bit(msr
);
137 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
139 clear_bit(counter
, &__get_cpu_var(perfctr_nmi_owner
));
142 int reserve_evntsel_nmi(unsigned int msr
)
144 unsigned int counter
;
146 counter
= nmi_evntsel_msr_to_bit(msr
);
147 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
149 if (!test_and_set_bit(counter
, &__get_cpu_var(evntsel_nmi_owner
)))
154 void release_evntsel_nmi(unsigned int msr
)
156 unsigned int counter
;
158 counter
= nmi_evntsel_msr_to_bit(msr
);
159 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
161 clear_bit(counter
, &__get_cpu_var(evntsel_nmi_owner
));
164 static __cpuinit
inline int nmi_known_cpu(void)
166 switch (boot_cpu_data
.x86_vendor
) {
168 return boot_cpu_data
.x86
== 15;
169 case X86_VENDOR_INTEL
:
170 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
173 return (boot_cpu_data
.x86
== 15);
178 /* Run after command line and cpu_init init, but before all other checks */
179 void nmi_watchdog_default(void)
181 if (nmi_watchdog
!= NMI_DEFAULT
)
184 nmi_watchdog
= NMI_LOCAL_APIC
;
186 nmi_watchdog
= NMI_IO_APIC
;
190 /* The performance counters used by NMI_LOCAL_APIC don't trigger when
191 * the CPU is idle. To make sure the NMI watchdog really ticks on all
192 * CPUs during the test make them busy.
194 static __init
void nmi_cpu_busy(void *data
)
196 volatile int *endflag
= data
;
197 local_irq_enable_in_hardirq();
198 /* Intentionally don't use cpu_relax here. This is
199 to make sure that the performance counter really ticks,
200 even if there is a simulator or similar that catches the
201 pause instruction. On a real HT machine this is fine because
202 all other CPUs are busy with "useless" delay loops and don't
203 care if they get somewhat less cycles. */
204 while (*endflag
== 0)
209 int __init
check_nmi_watchdog (void)
211 volatile int endflag
= 0;
215 if ((nmi_watchdog
== NMI_NONE
) || (nmi_watchdog
== NMI_DEFAULT
))
218 if (!atomic_read(&nmi_active
))
221 counts
= kmalloc(NR_CPUS
* sizeof(int), GFP_KERNEL
);
225 printk(KERN_INFO
"testing NMI watchdog ... ");
228 if (nmi_watchdog
== NMI_LOCAL_APIC
)
229 smp_call_function(nmi_cpu_busy
, (void *)&endflag
, 0, 0);
232 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++)
233 counts
[cpu
] = cpu_pda(cpu
)->__nmi_count
;
235 mdelay((10*1000)/nmi_hz
); // wait 10 ticks
237 for_each_online_cpu(cpu
) {
238 if (!per_cpu(nmi_watchdog_ctlblk
, cpu
).enabled
)
240 if (cpu_pda(cpu
)->__nmi_count
- counts
[cpu
] <= 5) {
241 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
244 cpu_pda(cpu
)->__nmi_count
);
245 per_cpu(nmi_watchdog_ctlblk
, cpu
).enabled
= 0;
246 atomic_dec(&nmi_active
);
249 if (!atomic_read(&nmi_active
)) {
251 atomic_set(&nmi_active
, -1);
257 /* now that we know it works we can reduce NMI frequency to
258 something more reasonable; makes a difference in some configs */
259 if (nmi_watchdog
== NMI_LOCAL_APIC
) {
260 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
264 * On Intel CPUs with ARCH_PERFMON only 32 bits in the counter
265 * are writable, with higher bits sign extending from bit 31.
266 * So, we can only program the counter with 31 bit values and
267 * 32nd bit should be 1, for 33.. to be 1.
268 * Find the appropriate nmi_hz
270 if (wd
->perfctr_msr
== MSR_ARCH_PERFMON_PERFCTR0
&&
271 ((u64
)cpu_khz
* 1000) > 0x7fffffffULL
) {
272 nmi_hz
= ((u64
)cpu_khz
* 1000) / 0x7fffffffUL
+ 1;
280 int __init
setup_nmi_watchdog(char *str
)
284 if (!strncmp(str
,"panic",5)) {
285 panic_on_timeout
= 1;
286 str
= strchr(str
, ',');
292 get_option(&str
, &nmi
);
294 if ((nmi
>= NMI_INVALID
) || (nmi
< NMI_NONE
))
297 if ((nmi
== NMI_LOCAL_APIC
) && (nmi_known_cpu() == 0))
298 return 0; /* no lapic support */
303 __setup("nmi_watchdog=", setup_nmi_watchdog
);
305 static void disable_lapic_nmi_watchdog(void)
307 BUG_ON(nmi_watchdog
!= NMI_LOCAL_APIC
);
309 if (atomic_read(&nmi_active
) <= 0)
312 on_each_cpu(stop_apic_nmi_watchdog
, NULL
, 0, 1);
314 BUG_ON(atomic_read(&nmi_active
) != 0);
317 static void enable_lapic_nmi_watchdog(void)
319 BUG_ON(nmi_watchdog
!= NMI_LOCAL_APIC
);
321 /* are we already enabled */
322 if (atomic_read(&nmi_active
) != 0)
325 /* are we lapic aware */
326 if (nmi_known_cpu() <= 0)
329 on_each_cpu(setup_apic_nmi_watchdog
, NULL
, 0, 1);
330 touch_nmi_watchdog();
333 void disable_timer_nmi_watchdog(void)
335 BUG_ON(nmi_watchdog
!= NMI_IO_APIC
);
337 if (atomic_read(&nmi_active
) <= 0)
341 on_each_cpu(stop_apic_nmi_watchdog
, NULL
, 0, 1);
343 BUG_ON(atomic_read(&nmi_active
) != 0);
346 void enable_timer_nmi_watchdog(void)
348 BUG_ON(nmi_watchdog
!= NMI_IO_APIC
);
350 if (atomic_read(&nmi_active
) == 0) {
351 touch_nmi_watchdog();
352 on_each_cpu(setup_apic_nmi_watchdog
, NULL
, 0, 1);
359 static int nmi_pm_active
; /* nmi_active before suspend */
361 static int lapic_nmi_suspend(struct sys_device
*dev
, pm_message_t state
)
363 /* only CPU0 goes here, other CPUs should be offline */
364 nmi_pm_active
= atomic_read(&nmi_active
);
365 stop_apic_nmi_watchdog(NULL
);
366 BUG_ON(atomic_read(&nmi_active
) != 0);
370 static int lapic_nmi_resume(struct sys_device
*dev
)
372 /* only CPU0 goes here, other CPUs should be offline */
373 if (nmi_pm_active
> 0) {
374 setup_apic_nmi_watchdog(NULL
);
375 touch_nmi_watchdog();
380 static struct sysdev_class nmi_sysclass
= {
381 set_kset_name("lapic_nmi"),
382 .resume
= lapic_nmi_resume
,
383 .suspend
= lapic_nmi_suspend
,
386 static struct sys_device device_lapic_nmi
= {
388 .cls
= &nmi_sysclass
,
391 static int __init
init_lapic_nmi_sysfs(void)
395 /* should really be a BUG_ON but b/c this is an
396 * init call, it just doesn't work. -dcz
398 if (nmi_watchdog
!= NMI_LOCAL_APIC
)
401 if ( atomic_read(&nmi_active
) < 0 )
404 error
= sysdev_class_register(&nmi_sysclass
);
406 error
= sysdev_register(&device_lapic_nmi
);
409 /* must come after the local APIC's device_initcall() */
410 late_initcall(init_lapic_nmi_sysfs
);
412 #endif /* CONFIG_PM */
415 * Activate the NMI watchdog via the local APIC.
416 * Original code written by Keith Owens.
419 /* Note that these events don't tick when the CPU idles. This means
420 the frequency varies with CPU load. */
422 #define K7_EVNTSEL_ENABLE (1 << 22)
423 #define K7_EVNTSEL_INT (1 << 20)
424 #define K7_EVNTSEL_OS (1 << 17)
425 #define K7_EVNTSEL_USR (1 << 16)
426 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
427 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
429 static int setup_k7_watchdog(void)
431 unsigned int perfctr_msr
, evntsel_msr
;
432 unsigned int evntsel
;
433 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
435 perfctr_msr
= MSR_K7_PERFCTR0
;
436 evntsel_msr
= MSR_K7_EVNTSEL0
;
437 if (!reserve_perfctr_nmi(perfctr_msr
))
440 if (!reserve_evntsel_nmi(evntsel_msr
))
443 /* Simulator may not support it */
444 if (checking_wrmsrl(evntsel_msr
, 0UL))
446 wrmsrl(perfctr_msr
, 0UL);
448 evntsel
= K7_EVNTSEL_INT
453 /* setup the timer */
454 wrmsr(evntsel_msr
, evntsel
, 0);
455 wrmsrl(perfctr_msr
, -((u64
)cpu_khz
* 1000 / nmi_hz
));
456 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
457 evntsel
|= K7_EVNTSEL_ENABLE
;
458 wrmsr(evntsel_msr
, evntsel
, 0);
460 wd
->perfctr_msr
= perfctr_msr
;
461 wd
->evntsel_msr
= evntsel_msr
;
462 wd
->cccr_msr
= 0; //unused
463 wd
->check_bit
= 1ULL<<63;
466 release_evntsel_nmi(evntsel_msr
);
468 release_perfctr_nmi(perfctr_msr
);
473 static void stop_k7_watchdog(void)
475 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
477 wrmsr(wd
->evntsel_msr
, 0, 0);
479 release_evntsel_nmi(wd
->evntsel_msr
);
480 release_perfctr_nmi(wd
->perfctr_msr
);
483 /* Note that these events don't tick when the CPU idles. This means
484 the frequency varies with CPU load. */
486 #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
487 #define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
488 #define P4_ESCR_OS (1<<3)
489 #define P4_ESCR_USR (1<<2)
490 #define P4_CCCR_OVF_PMI0 (1<<26)
491 #define P4_CCCR_OVF_PMI1 (1<<27)
492 #define P4_CCCR_THRESHOLD(N) ((N)<<20)
493 #define P4_CCCR_COMPLEMENT (1<<19)
494 #define P4_CCCR_COMPARE (1<<18)
495 #define P4_CCCR_REQUIRED (3<<16)
496 #define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
497 #define P4_CCCR_ENABLE (1<<12)
498 #define P4_CCCR_OVF (1<<31)
499 /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
500 CRU_ESCR0 (with any non-null event selector) through a complemented
501 max threshold. [IA32-Vol3, Section 14.9.9] */
503 static int setup_p4_watchdog(void)
505 unsigned int perfctr_msr
, evntsel_msr
, cccr_msr
;
506 unsigned int evntsel
, cccr_val
;
507 unsigned int misc_enable
, dummy
;
509 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
511 rdmsr(MSR_IA32_MISC_ENABLE
, misc_enable
, dummy
);
512 if (!(misc_enable
& MSR_P4_MISC_ENABLE_PERF_AVAIL
))
516 /* detect which hyperthread we are on */
517 if (smp_num_siblings
== 2) {
518 unsigned int ebx
, apicid
;
521 apicid
= (ebx
>> 24) & 0xff;
527 /* performance counters are shared resources
528 * assign each hyperthread its own set
529 * (re-use the ESCR0 register, seems safe
530 * and keeps the cccr_val the same)
534 perfctr_msr
= MSR_P4_IQ_PERFCTR0
;
535 evntsel_msr
= MSR_P4_CRU_ESCR0
;
536 cccr_msr
= MSR_P4_IQ_CCCR0
;
537 cccr_val
= P4_CCCR_OVF_PMI0
| P4_CCCR_ESCR_SELECT(4);
540 perfctr_msr
= MSR_P4_IQ_PERFCTR1
;
541 evntsel_msr
= MSR_P4_CRU_ESCR0
;
542 cccr_msr
= MSR_P4_IQ_CCCR1
;
543 cccr_val
= P4_CCCR_OVF_PMI1
| P4_CCCR_ESCR_SELECT(4);
546 if (!reserve_perfctr_nmi(perfctr_msr
))
549 if (!reserve_evntsel_nmi(evntsel_msr
))
552 evntsel
= P4_ESCR_EVENT_SELECT(0x3F)
556 cccr_val
|= P4_CCCR_THRESHOLD(15)
561 wrmsr(evntsel_msr
, evntsel
, 0);
562 wrmsr(cccr_msr
, cccr_val
, 0);
563 wrmsrl(perfctr_msr
, -((u64
)cpu_khz
* 1000 / nmi_hz
));
564 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
565 cccr_val
|= P4_CCCR_ENABLE
;
566 wrmsr(cccr_msr
, cccr_val
, 0);
568 wd
->perfctr_msr
= perfctr_msr
;
569 wd
->evntsel_msr
= evntsel_msr
;
570 wd
->cccr_msr
= cccr_msr
;
571 wd
->check_bit
= 1ULL<<39;
574 release_perfctr_nmi(perfctr_msr
);
579 static void stop_p4_watchdog(void)
581 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
583 wrmsr(wd
->cccr_msr
, 0, 0);
584 wrmsr(wd
->evntsel_msr
, 0, 0);
586 release_evntsel_nmi(wd
->evntsel_msr
);
587 release_perfctr_nmi(wd
->perfctr_msr
);
590 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
591 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
593 static int setup_intel_arch_watchdog(void)
596 union cpuid10_eax eax
;
598 unsigned int perfctr_msr
, evntsel_msr
;
599 unsigned int evntsel
;
600 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
603 * Check whether the Architectural PerfMon supports
604 * Unhalted Core Cycles Event or not.
605 * NOTE: Corresponding bit = 0 in ebx indicates event present.
607 cpuid(10, &(eax
.full
), &ebx
, &unused
, &unused
);
608 if ((eax
.split
.mask_length
< (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX
+1)) ||
609 (ebx
& ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT
))
612 perfctr_msr
= MSR_ARCH_PERFMON_PERFCTR0
;
613 evntsel_msr
= MSR_ARCH_PERFMON_EVENTSEL0
;
615 if (!reserve_perfctr_nmi(perfctr_msr
))
618 if (!reserve_evntsel_nmi(evntsel_msr
))
621 wrmsrl(perfctr_msr
, 0UL);
623 evntsel
= ARCH_PERFMON_EVENTSEL_INT
624 | ARCH_PERFMON_EVENTSEL_OS
625 | ARCH_PERFMON_EVENTSEL_USR
626 | ARCH_PERFMON_NMI_EVENT_SEL
627 | ARCH_PERFMON_NMI_EVENT_UMASK
;
629 /* setup the timer */
630 wrmsr(evntsel_msr
, evntsel
, 0);
631 wrmsrl(perfctr_msr
, -((u64
)cpu_khz
* 1000 / nmi_hz
));
633 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
634 evntsel
|= ARCH_PERFMON_EVENTSEL0_ENABLE
;
635 wrmsr(evntsel_msr
, evntsel
, 0);
637 wd
->perfctr_msr
= perfctr_msr
;
638 wd
->evntsel_msr
= evntsel_msr
;
639 wd
->cccr_msr
= 0; //unused
640 wd
->check_bit
= 1ULL << (eax
.split
.bit_width
- 1);
643 release_perfctr_nmi(perfctr_msr
);
648 static void stop_intel_arch_watchdog(void)
651 union cpuid10_eax eax
;
653 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
656 * Check whether the Architectural PerfMon supports
657 * Unhalted Core Cycles Event or not.
658 * NOTE: Corresponding bit = 0 in ebx indicates event present.
660 cpuid(10, &(eax
.full
), &ebx
, &unused
, &unused
);
661 if ((eax
.split
.mask_length
< (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX
+1)) ||
662 (ebx
& ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT
))
665 wrmsr(wd
->evntsel_msr
, 0, 0);
667 release_evntsel_nmi(wd
->evntsel_msr
);
668 release_perfctr_nmi(wd
->perfctr_msr
);
671 void setup_apic_nmi_watchdog(void *unused
)
673 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
675 /* only support LOCAL and IO APICs for now */
676 if ((nmi_watchdog
!= NMI_LOCAL_APIC
) &&
677 (nmi_watchdog
!= NMI_IO_APIC
))
680 if (wd
->enabled
== 1)
683 /* cheap hack to support suspend/resume */
684 /* if cpu0 is not active neither should the other cpus */
685 if ((smp_processor_id() != 0) && (atomic_read(&nmi_active
) <= 0))
688 if (nmi_watchdog
== NMI_LOCAL_APIC
) {
689 switch (boot_cpu_data
.x86_vendor
) {
691 if (strstr(boot_cpu_data
.x86_model_id
, "Screwdriver"))
693 if (!setup_k7_watchdog())
696 case X86_VENDOR_INTEL
:
697 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
698 if (!setup_intel_arch_watchdog())
702 if (!setup_p4_watchdog())
710 atomic_inc(&nmi_active
);
713 void stop_apic_nmi_watchdog(void *unused
)
715 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
717 /* only support LOCAL and IO APICs for now */
718 if ((nmi_watchdog
!= NMI_LOCAL_APIC
) &&
719 (nmi_watchdog
!= NMI_IO_APIC
))
722 if (wd
->enabled
== 0)
725 if (nmi_watchdog
== NMI_LOCAL_APIC
) {
726 switch (boot_cpu_data
.x86_vendor
) {
728 if (strstr(boot_cpu_data
.x86_model_id
, "Screwdriver"))
732 case X86_VENDOR_INTEL
:
733 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
734 stop_intel_arch_watchdog();
744 atomic_dec(&nmi_active
);
748 * the best way to detect whether a CPU has a 'hard lockup' problem
749 * is to check it's local APIC timer IRQ counts. If they are not
750 * changing then that CPU has some problem.
752 * as these watchdog NMI IRQs are generated on every CPU, we only
753 * have to check the current processor.
756 static DEFINE_PER_CPU(unsigned, last_irq_sum
);
757 static DEFINE_PER_CPU(local_t
, alert_counter
);
758 static DEFINE_PER_CPU(int, nmi_touch
);
760 void touch_nmi_watchdog (void)
762 if (nmi_watchdog
> 0) {
766 * Tell other CPUs to reset their alert counters. We cannot
767 * do it ourselves because the alert count increase is not
770 for_each_present_cpu (cpu
)
771 per_cpu(nmi_touch
, cpu
) = 1;
774 touch_softlockup_watchdog();
777 int __kprobes
nmi_watchdog_tick(struct pt_regs
* regs
, unsigned reason
)
781 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
785 /* check for other users first */
786 if (notify_die(DIE_NMI
, "nmi", regs
, reason
, 2, SIGINT
)
792 sum
= read_pda(apic_timer_irqs
);
793 if (__get_cpu_var(nmi_touch
)) {
794 __get_cpu_var(nmi_touch
) = 0;
798 #ifdef CONFIG_X86_MCE
799 /* Could check oops_in_progress here too, but it's safer
801 if (atomic_read(&mce_entry
) > 0)
804 /* if the apic timer isn't firing, this cpu isn't doing much */
805 if (!touched
&& __get_cpu_var(last_irq_sum
) == sum
) {
807 * Ayiee, looks like this CPU is stuck ...
808 * wait a few IRQs (5 seconds) before doing the oops ...
810 local_inc(&__get_cpu_var(alert_counter
));
811 if (local_read(&__get_cpu_var(alert_counter
)) == 5*nmi_hz
)
812 die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs
,
815 __get_cpu_var(last_irq_sum
) = sum
;
816 local_set(&__get_cpu_var(alert_counter
), 0);
819 /* see if the nmi watchdog went off */
821 if (nmi_watchdog
== NMI_LOCAL_APIC
) {
822 rdmsrl(wd
->perfctr_msr
, dummy
);
823 if (dummy
& wd
->check_bit
){
824 /* this wasn't a watchdog timer interrupt */
828 /* only Intel uses the cccr msr */
829 if (wd
->cccr_msr
!= 0) {
832 * - An overflown perfctr will assert its interrupt
833 * until the OVF flag in its CCCR is cleared.
834 * - LVTPC is masked on interrupt and must be
835 * unmasked by the LVTPC handler.
837 rdmsrl(wd
->cccr_msr
, dummy
);
838 dummy
&= ~P4_CCCR_OVF
;
839 wrmsrl(wd
->cccr_msr
, dummy
);
840 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
841 } else if (wd
->perfctr_msr
== MSR_ARCH_PERFMON_PERFCTR0
) {
843 * ArchPerfom/Core Duo needs to re-unmask
846 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
848 /* start the cycle over again */
849 wrmsrl(wd
->perfctr_msr
, -((u64
)cpu_khz
* 1000 / nmi_hz
));
851 } else if (nmi_watchdog
== NMI_IO_APIC
) {
852 /* don't know how to accurately check for this.
853 * just assume it was a watchdog timer interrupt
854 * This matches the old behaviour.
858 printk(KERN_WARNING
"Unknown enabled NMI hardware?!\n");
864 asmlinkage __kprobes
void do_nmi(struct pt_regs
* regs
, long error_code
)
867 add_pda(__nmi_count
,1);
868 default_do_nmi(regs
);
872 int do_nmi_callback(struct pt_regs
* regs
, int cpu
)
875 if (unknown_nmi_panic
)
876 return unknown_nmi_panic_callback(regs
, cpu
);
883 static int unknown_nmi_panic_callback(struct pt_regs
*regs
, int cpu
)
885 unsigned char reason
= get_nmi_reason();
888 sprintf(buf
, "NMI received for unknown reason %02x\n", reason
);
889 die_nmi(buf
, regs
, 1); /* Always panic here */
894 * proc handler for /proc/sys/kernel/nmi
896 int proc_nmi_enabled(struct ctl_table
*table
, int write
, struct file
*file
,
897 void __user
*buffer
, size_t *length
, loff_t
*ppos
)
901 nmi_watchdog_enabled
= (atomic_read(&nmi_active
) > 0) ? 1 : 0;
902 old_state
= nmi_watchdog_enabled
;
903 proc_dointvec(table
, write
, file
, buffer
, length
, ppos
);
904 if (!!old_state
== !!nmi_watchdog_enabled
)
907 if (atomic_read(&nmi_active
) < 0) {
908 printk( KERN_WARNING
"NMI watchdog is permanently disabled\n");
912 /* if nmi_watchdog is not set yet, then set it */
913 nmi_watchdog_default();
915 if (nmi_watchdog
== NMI_LOCAL_APIC
) {
916 if (nmi_watchdog_enabled
)
917 enable_lapic_nmi_watchdog();
919 disable_lapic_nmi_watchdog();
922 "NMI watchdog doesn't know what hardware to touch\n");
930 EXPORT_SYMBOL(nmi_active
);
931 EXPORT_SYMBOL(nmi_watchdog
);
932 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi
);
933 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit
);
934 EXPORT_SYMBOL(reserve_perfctr_nmi
);
935 EXPORT_SYMBOL(release_perfctr_nmi
);
936 EXPORT_SYMBOL(reserve_evntsel_nmi
);
937 EXPORT_SYMBOL(release_evntsel_nmi
);
938 EXPORT_SYMBOL(disable_timer_nmi_watchdog
);
939 EXPORT_SYMBOL(enable_timer_nmi_watchdog
);
940 EXPORT_SYMBOL(touch_nmi_watchdog
);