2 * linux/arch/x86_64/nmi.c
4 * NMI watchdog support on APIC systems
6 * Started by Ingo Molnar <mingo@redhat.com>
9 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
10 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
12 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
15 #include <linux/nmi.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/sysdev.h>
21 #include <linux/sysctl.h>
22 #include <linux/kprobes.h>
23 #include <linux/cpumask.h>
27 #include <asm/proto.h>
28 #include <asm/kdebug.h>
30 #include <asm/intel_arch_perfmon.h>
32 int unknown_nmi_panic
;
33 int nmi_watchdog_enabled
;
34 int panic_on_unrecovered_nmi
;
36 /* perfctr_nmi_owner tracks the ownership of the perfctr registers:
37 * evtsel_nmi_owner tracks the ownership of the event selection
38 * - different performance counters/ event selection may be reserved for
39 * different subsystems this reservation system just tries to coordinate
42 static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner
);
43 static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner
[2]);
45 static cpumask_t backtrace_mask
= CPU_MASK_NONE
;
47 /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
48 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
50 #define NMI_MAX_COUNTER_BITS 66
53 * >0: the lapic NMI watchdog is active, but can be disabled
54 * <0: the lapic NMI watchdog has not been set up, and cannot
56 * 0: the lapic NMI watchdog is disabled, but can be enabled
58 atomic_t nmi_active
= ATOMIC_INIT(0); /* oprofile uses this */
61 unsigned int nmi_watchdog
= NMI_DEFAULT
;
62 static unsigned int nmi_hz
= HZ
;
64 struct nmi_watchdog_ctlblk
{
67 unsigned int cccr_msr
;
68 unsigned int perfctr_msr
; /* the MSR to reset in NMI handler */
69 unsigned int evntsel_msr
; /* the MSR to select the events to handle */
71 static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk
, nmi_watchdog_ctlblk
);
73 /* local prototypes */
74 static int unknown_nmi_panic_callback(struct pt_regs
*regs
, int cpu
);
76 /* converts an msr to an appropriate reservation bit */
77 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr
)
79 /* returns the bit offset of the performance counter register */
80 switch (boot_cpu_data
.x86_vendor
) {
82 return (msr
- MSR_K7_PERFCTR0
);
83 case X86_VENDOR_INTEL
:
84 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
85 return (msr
- MSR_ARCH_PERFMON_PERFCTR0
);
87 return (msr
- MSR_P4_BPU_PERFCTR0
);
92 /* converts an msr to an appropriate reservation bit */
93 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr
)
95 /* returns the bit offset of the event selection register */
96 switch (boot_cpu_data
.x86_vendor
) {
98 return (msr
- MSR_K7_EVNTSEL0
);
99 case X86_VENDOR_INTEL
:
100 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
101 return (msr
- MSR_ARCH_PERFMON_EVENTSEL0
);
103 return (msr
- MSR_P4_BSU_ESCR0
);
108 /* checks for a bit availability (hack for oprofile) */
109 int avail_to_resrv_perfctr_nmi_bit(unsigned int counter
)
111 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
113 return (!test_bit(counter
, &__get_cpu_var(perfctr_nmi_owner
)));
116 /* checks the an msr for availability */
117 int avail_to_resrv_perfctr_nmi(unsigned int msr
)
119 unsigned int counter
;
121 counter
= nmi_perfctr_msr_to_bit(msr
);
122 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
124 return (!test_bit(counter
, &__get_cpu_var(perfctr_nmi_owner
)));
127 int reserve_perfctr_nmi(unsigned int msr
)
129 unsigned int counter
;
131 counter
= nmi_perfctr_msr_to_bit(msr
);
132 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
134 if (!test_and_set_bit(counter
, &__get_cpu_var(perfctr_nmi_owner
)))
139 void release_perfctr_nmi(unsigned int msr
)
141 unsigned int counter
;
143 counter
= nmi_perfctr_msr_to_bit(msr
);
144 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
146 clear_bit(counter
, &__get_cpu_var(perfctr_nmi_owner
));
149 int reserve_evntsel_nmi(unsigned int msr
)
151 unsigned int counter
;
153 counter
= nmi_evntsel_msr_to_bit(msr
);
154 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
156 if (!test_and_set_bit(counter
, &__get_cpu_var(evntsel_nmi_owner
)))
161 void release_evntsel_nmi(unsigned int msr
)
163 unsigned int counter
;
165 counter
= nmi_evntsel_msr_to_bit(msr
);
166 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
168 clear_bit(counter
, &__get_cpu_var(evntsel_nmi_owner
));
171 static __cpuinit
inline int nmi_known_cpu(void)
173 switch (boot_cpu_data
.x86_vendor
) {
175 return boot_cpu_data
.x86
== 15;
176 case X86_VENDOR_INTEL
:
177 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
180 return (boot_cpu_data
.x86
== 15);
185 /* Run after command line and cpu_init init, but before all other checks */
186 void nmi_watchdog_default(void)
188 if (nmi_watchdog
!= NMI_DEFAULT
)
191 nmi_watchdog
= NMI_LOCAL_APIC
;
193 nmi_watchdog
= NMI_IO_APIC
;
196 static int endflag __initdata
= 0;
199 /* The performance counters used by NMI_LOCAL_APIC don't trigger when
200 * the CPU is idle. To make sure the NMI watchdog really ticks on all
201 * CPUs during the test make them busy.
203 static __init
void nmi_cpu_busy(void *data
)
205 local_irq_enable_in_hardirq();
206 /* Intentionally don't use cpu_relax here. This is
207 to make sure that the performance counter really ticks,
208 even if there is a simulator or similar that catches the
209 pause instruction. On a real HT machine this is fine because
210 all other CPUs are busy with "useless" delay loops and don't
211 care if they get somewhat less cycles. */
217 static unsigned int adjust_for_32bit_ctr(unsigned int hz
)
219 unsigned int retval
= hz
;
222 * On Intel CPUs with ARCH_PERFMON only 32 bits in the counter
223 * are writable, with higher bits sign extending from bit 31.
224 * So, we can only program the counter with 31 bit values and
225 * 32nd bit should be 1, for 33.. to be 1.
226 * Find the appropriate nmi_hz
228 if ((((u64
)cpu_khz
* 1000) / retval
) > 0x7fffffffULL
) {
229 retval
= ((u64
)cpu_khz
* 1000) / 0x7fffffffUL
+ 1;
234 int __init
check_nmi_watchdog (void)
239 if ((nmi_watchdog
== NMI_NONE
) || (nmi_watchdog
== NMI_DEFAULT
))
242 if (!atomic_read(&nmi_active
))
245 counts
= kmalloc(NR_CPUS
* sizeof(int), GFP_KERNEL
);
249 printk(KERN_INFO
"testing NMI watchdog ... ");
252 if (nmi_watchdog
== NMI_LOCAL_APIC
)
253 smp_call_function(nmi_cpu_busy
, (void *)&endflag
, 0, 0);
256 for (cpu
= 0; cpu
< NR_CPUS
; cpu
++)
257 counts
[cpu
] = cpu_pda(cpu
)->__nmi_count
;
259 mdelay((10*1000)/nmi_hz
); // wait 10 ticks
261 for_each_online_cpu(cpu
) {
262 if (!per_cpu(nmi_watchdog_ctlblk
, cpu
).enabled
)
264 if (cpu_pda(cpu
)->__nmi_count
- counts
[cpu
] <= 5) {
265 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
268 cpu_pda(cpu
)->__nmi_count
);
269 per_cpu(nmi_watchdog_ctlblk
, cpu
).enabled
= 0;
270 atomic_dec(&nmi_active
);
273 if (!atomic_read(&nmi_active
)) {
275 atomic_set(&nmi_active
, -1);
282 /* now that we know it works we can reduce NMI frequency to
283 something more reasonable; makes a difference in some configs */
284 if (nmi_watchdog
== NMI_LOCAL_APIC
) {
285 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
288 if (wd
->perfctr_msr
== MSR_ARCH_PERFMON_PERFCTR0
)
289 nmi_hz
= adjust_for_32bit_ctr(nmi_hz
);
296 int __init
setup_nmi_watchdog(char *str
)
300 if (!strncmp(str
,"panic",5)) {
301 panic_on_timeout
= 1;
302 str
= strchr(str
, ',');
308 get_option(&str
, &nmi
);
310 if ((nmi
>= NMI_INVALID
) || (nmi
< NMI_NONE
))
317 __setup("nmi_watchdog=", setup_nmi_watchdog
);
319 static void disable_lapic_nmi_watchdog(void)
321 BUG_ON(nmi_watchdog
!= NMI_LOCAL_APIC
);
323 if (atomic_read(&nmi_active
) <= 0)
326 on_each_cpu(stop_apic_nmi_watchdog
, NULL
, 0, 1);
328 BUG_ON(atomic_read(&nmi_active
) != 0);
331 static void enable_lapic_nmi_watchdog(void)
333 BUG_ON(nmi_watchdog
!= NMI_LOCAL_APIC
);
335 /* are we already enabled */
336 if (atomic_read(&nmi_active
) != 0)
339 /* are we lapic aware */
340 if (nmi_known_cpu() <= 0)
343 on_each_cpu(setup_apic_nmi_watchdog
, NULL
, 0, 1);
344 touch_nmi_watchdog();
347 void disable_timer_nmi_watchdog(void)
349 BUG_ON(nmi_watchdog
!= NMI_IO_APIC
);
351 if (atomic_read(&nmi_active
) <= 0)
355 on_each_cpu(stop_apic_nmi_watchdog
, NULL
, 0, 1);
357 BUG_ON(atomic_read(&nmi_active
) != 0);
360 void enable_timer_nmi_watchdog(void)
362 BUG_ON(nmi_watchdog
!= NMI_IO_APIC
);
364 if (atomic_read(&nmi_active
) == 0) {
365 touch_nmi_watchdog();
366 on_each_cpu(setup_apic_nmi_watchdog
, NULL
, 0, 1);
371 static void __acpi_nmi_disable(void *__unused
)
373 apic_write(APIC_LVT0
, APIC_DM_NMI
| APIC_LVT_MASKED
);
377 * Disable timer based NMIs on all CPUs:
379 void acpi_nmi_disable(void)
381 if (atomic_read(&nmi_active
) && nmi_watchdog
== NMI_IO_APIC
)
382 on_each_cpu(__acpi_nmi_disable
, NULL
, 0, 1);
385 static void __acpi_nmi_enable(void *__unused
)
387 apic_write(APIC_LVT0
, APIC_DM_NMI
);
391 * Enable timer based NMIs on all CPUs:
393 void acpi_nmi_enable(void)
395 if (atomic_read(&nmi_active
) && nmi_watchdog
== NMI_IO_APIC
)
396 on_each_cpu(__acpi_nmi_enable
, NULL
, 0, 1);
400 static int nmi_pm_active
; /* nmi_active before suspend */
402 static int lapic_nmi_suspend(struct sys_device
*dev
, pm_message_t state
)
404 /* only CPU0 goes here, other CPUs should be offline */
405 nmi_pm_active
= atomic_read(&nmi_active
);
406 stop_apic_nmi_watchdog(NULL
);
407 BUG_ON(atomic_read(&nmi_active
) != 0);
411 static int lapic_nmi_resume(struct sys_device
*dev
)
413 /* only CPU0 goes here, other CPUs should be offline */
414 if (nmi_pm_active
> 0) {
415 setup_apic_nmi_watchdog(NULL
);
416 touch_nmi_watchdog();
421 static struct sysdev_class nmi_sysclass
= {
422 set_kset_name("lapic_nmi"),
423 .resume
= lapic_nmi_resume
,
424 .suspend
= lapic_nmi_suspend
,
427 static struct sys_device device_lapic_nmi
= {
429 .cls
= &nmi_sysclass
,
432 static int __init
init_lapic_nmi_sysfs(void)
436 /* should really be a BUG_ON but b/c this is an
437 * init call, it just doesn't work. -dcz
439 if (nmi_watchdog
!= NMI_LOCAL_APIC
)
442 if ( atomic_read(&nmi_active
) < 0 )
445 error
= sysdev_class_register(&nmi_sysclass
);
447 error
= sysdev_register(&device_lapic_nmi
);
450 /* must come after the local APIC's device_initcall() */
451 late_initcall(init_lapic_nmi_sysfs
);
453 #endif /* CONFIG_PM */
456 * Activate the NMI watchdog via the local APIC.
457 * Original code written by Keith Owens.
460 /* Note that these events don't tick when the CPU idles. This means
461 the frequency varies with CPU load. */
463 #define K7_EVNTSEL_ENABLE (1 << 22)
464 #define K7_EVNTSEL_INT (1 << 20)
465 #define K7_EVNTSEL_OS (1 << 17)
466 #define K7_EVNTSEL_USR (1 << 16)
467 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
468 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
470 static int setup_k7_watchdog(void)
472 unsigned int perfctr_msr
, evntsel_msr
;
473 unsigned int evntsel
;
474 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
476 perfctr_msr
= MSR_K7_PERFCTR0
;
477 evntsel_msr
= MSR_K7_EVNTSEL0
;
478 if (!reserve_perfctr_nmi(perfctr_msr
))
481 if (!reserve_evntsel_nmi(evntsel_msr
))
484 /* Simulator may not support it */
485 if (checking_wrmsrl(evntsel_msr
, 0UL))
487 wrmsrl(perfctr_msr
, 0UL);
489 evntsel
= K7_EVNTSEL_INT
494 /* setup the timer */
495 wrmsr(evntsel_msr
, evntsel
, 0);
496 wrmsrl(perfctr_msr
, -((u64
)cpu_khz
* 1000 / nmi_hz
));
497 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
498 evntsel
|= K7_EVNTSEL_ENABLE
;
499 wrmsr(evntsel_msr
, evntsel
, 0);
501 wd
->perfctr_msr
= perfctr_msr
;
502 wd
->evntsel_msr
= evntsel_msr
;
503 wd
->cccr_msr
= 0; //unused
504 wd
->check_bit
= 1ULL<<63;
507 release_evntsel_nmi(evntsel_msr
);
509 release_perfctr_nmi(perfctr_msr
);
514 static void stop_k7_watchdog(void)
516 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
518 wrmsr(wd
->evntsel_msr
, 0, 0);
520 release_evntsel_nmi(wd
->evntsel_msr
);
521 release_perfctr_nmi(wd
->perfctr_msr
);
524 /* Note that these events don't tick when the CPU idles. This means
525 the frequency varies with CPU load. */
527 #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
528 #define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
529 #define P4_ESCR_OS (1<<3)
530 #define P4_ESCR_USR (1<<2)
531 #define P4_CCCR_OVF_PMI0 (1<<26)
532 #define P4_CCCR_OVF_PMI1 (1<<27)
533 #define P4_CCCR_THRESHOLD(N) ((N)<<20)
534 #define P4_CCCR_COMPLEMENT (1<<19)
535 #define P4_CCCR_COMPARE (1<<18)
536 #define P4_CCCR_REQUIRED (3<<16)
537 #define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
538 #define P4_CCCR_ENABLE (1<<12)
539 #define P4_CCCR_OVF (1<<31)
540 /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
541 CRU_ESCR0 (with any non-null event selector) through a complemented
542 max threshold. [IA32-Vol3, Section 14.9.9] */
544 static int setup_p4_watchdog(void)
546 unsigned int perfctr_msr
, evntsel_msr
, cccr_msr
;
547 unsigned int evntsel
, cccr_val
;
548 unsigned int misc_enable
, dummy
;
550 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
552 rdmsr(MSR_IA32_MISC_ENABLE
, misc_enable
, dummy
);
553 if (!(misc_enable
& MSR_P4_MISC_ENABLE_PERF_AVAIL
))
557 /* detect which hyperthread we are on */
558 if (smp_num_siblings
== 2) {
559 unsigned int ebx
, apicid
;
562 apicid
= (ebx
>> 24) & 0xff;
568 /* performance counters are shared resources
569 * assign each hyperthread its own set
570 * (re-use the ESCR0 register, seems safe
571 * and keeps the cccr_val the same)
575 perfctr_msr
= MSR_P4_IQ_PERFCTR0
;
576 evntsel_msr
= MSR_P4_CRU_ESCR0
;
577 cccr_msr
= MSR_P4_IQ_CCCR0
;
578 cccr_val
= P4_CCCR_OVF_PMI0
| P4_CCCR_ESCR_SELECT(4);
581 perfctr_msr
= MSR_P4_IQ_PERFCTR1
;
582 evntsel_msr
= MSR_P4_CRU_ESCR0
;
583 cccr_msr
= MSR_P4_IQ_CCCR1
;
584 cccr_val
= P4_CCCR_OVF_PMI1
| P4_CCCR_ESCR_SELECT(4);
587 if (!reserve_perfctr_nmi(perfctr_msr
))
590 if (!reserve_evntsel_nmi(evntsel_msr
))
593 evntsel
= P4_ESCR_EVENT_SELECT(0x3F)
597 cccr_val
|= P4_CCCR_THRESHOLD(15)
602 wrmsr(evntsel_msr
, evntsel
, 0);
603 wrmsr(cccr_msr
, cccr_val
, 0);
604 wrmsrl(perfctr_msr
, -((u64
)cpu_khz
* 1000 / nmi_hz
));
605 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
606 cccr_val
|= P4_CCCR_ENABLE
;
607 wrmsr(cccr_msr
, cccr_val
, 0);
609 wd
->perfctr_msr
= perfctr_msr
;
610 wd
->evntsel_msr
= evntsel_msr
;
611 wd
->cccr_msr
= cccr_msr
;
612 wd
->check_bit
= 1ULL<<39;
615 release_perfctr_nmi(perfctr_msr
);
620 static void stop_p4_watchdog(void)
622 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
624 wrmsr(wd
->cccr_msr
, 0, 0);
625 wrmsr(wd
->evntsel_msr
, 0, 0);
627 release_evntsel_nmi(wd
->evntsel_msr
);
628 release_perfctr_nmi(wd
->perfctr_msr
);
631 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
632 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
634 static int setup_intel_arch_watchdog(void)
637 union cpuid10_eax eax
;
639 unsigned int perfctr_msr
, evntsel_msr
;
640 unsigned int evntsel
;
641 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
644 * Check whether the Architectural PerfMon supports
645 * Unhalted Core Cycles Event or not.
646 * NOTE: Corresponding bit = 0 in ebx indicates event present.
648 cpuid(10, &(eax
.full
), &ebx
, &unused
, &unused
);
649 if ((eax
.split
.mask_length
< (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX
+1)) ||
650 (ebx
& ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT
))
653 perfctr_msr
= MSR_ARCH_PERFMON_PERFCTR0
;
654 evntsel_msr
= MSR_ARCH_PERFMON_EVENTSEL0
;
656 if (!reserve_perfctr_nmi(perfctr_msr
))
659 if (!reserve_evntsel_nmi(evntsel_msr
))
662 wrmsrl(perfctr_msr
, 0UL);
664 evntsel
= ARCH_PERFMON_EVENTSEL_INT
665 | ARCH_PERFMON_EVENTSEL_OS
666 | ARCH_PERFMON_EVENTSEL_USR
667 | ARCH_PERFMON_NMI_EVENT_SEL
668 | ARCH_PERFMON_NMI_EVENT_UMASK
;
670 /* setup the timer */
671 wrmsr(evntsel_msr
, evntsel
, 0);
673 nmi_hz
= adjust_for_32bit_ctr(nmi_hz
);
674 wrmsr(perfctr_msr
, (u32
)(-((u64
)cpu_khz
* 1000 / nmi_hz
)), 0);
676 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
677 evntsel
|= ARCH_PERFMON_EVENTSEL0_ENABLE
;
678 wrmsr(evntsel_msr
, evntsel
, 0);
680 wd
->perfctr_msr
= perfctr_msr
;
681 wd
->evntsel_msr
= evntsel_msr
;
682 wd
->cccr_msr
= 0; //unused
683 wd
->check_bit
= 1ULL << (eax
.split
.bit_width
- 1);
686 release_perfctr_nmi(perfctr_msr
);
691 static void stop_intel_arch_watchdog(void)
694 union cpuid10_eax eax
;
696 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
699 * Check whether the Architectural PerfMon supports
700 * Unhalted Core Cycles Event or not.
701 * NOTE: Corresponding bit = 0 in ebx indicates event present.
703 cpuid(10, &(eax
.full
), &ebx
, &unused
, &unused
);
704 if ((eax
.split
.mask_length
< (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX
+1)) ||
705 (ebx
& ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT
))
708 wrmsr(wd
->evntsel_msr
, 0, 0);
710 release_evntsel_nmi(wd
->evntsel_msr
);
711 release_perfctr_nmi(wd
->perfctr_msr
);
714 void setup_apic_nmi_watchdog(void *unused
)
716 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
718 /* only support LOCAL and IO APICs for now */
719 if ((nmi_watchdog
!= NMI_LOCAL_APIC
) &&
720 (nmi_watchdog
!= NMI_IO_APIC
))
723 if (wd
->enabled
== 1)
726 /* cheap hack to support suspend/resume */
727 /* if cpu0 is not active neither should the other cpus */
728 if ((smp_processor_id() != 0) && (atomic_read(&nmi_active
) <= 0))
731 if (nmi_watchdog
== NMI_LOCAL_APIC
) {
732 switch (boot_cpu_data
.x86_vendor
) {
734 if (strstr(boot_cpu_data
.x86_model_id
, "Screwdriver"))
736 if (!setup_k7_watchdog())
739 case X86_VENDOR_INTEL
:
740 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
741 if (!setup_intel_arch_watchdog())
745 if (!setup_p4_watchdog())
753 atomic_inc(&nmi_active
);
756 void stop_apic_nmi_watchdog(void *unused
)
758 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
760 /* only support LOCAL and IO APICs for now */
761 if ((nmi_watchdog
!= NMI_LOCAL_APIC
) &&
762 (nmi_watchdog
!= NMI_IO_APIC
))
765 if (wd
->enabled
== 0)
768 if (nmi_watchdog
== NMI_LOCAL_APIC
) {
769 switch (boot_cpu_data
.x86_vendor
) {
771 if (strstr(boot_cpu_data
.x86_model_id
, "Screwdriver"))
775 case X86_VENDOR_INTEL
:
776 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
777 stop_intel_arch_watchdog();
787 atomic_dec(&nmi_active
);
791 * the best way to detect whether a CPU has a 'hard lockup' problem
792 * is to check it's local APIC timer IRQ counts. If they are not
793 * changing then that CPU has some problem.
795 * as these watchdog NMI IRQs are generated on every CPU, we only
796 * have to check the current processor.
799 static DEFINE_PER_CPU(unsigned, last_irq_sum
);
800 static DEFINE_PER_CPU(local_t
, alert_counter
);
801 static DEFINE_PER_CPU(int, nmi_touch
);
803 void touch_nmi_watchdog (void)
805 if (nmi_watchdog
> 0) {
809 * Tell other CPUs to reset their alert counters. We cannot
810 * do it ourselves because the alert count increase is not
813 for_each_present_cpu (cpu
)
814 per_cpu(nmi_touch
, cpu
) = 1;
817 touch_softlockup_watchdog();
820 int __kprobes
nmi_watchdog_tick(struct pt_regs
* regs
, unsigned reason
)
824 int cpu
= smp_processor_id();
825 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
829 /* check for other users first */
830 if (notify_die(DIE_NMI
, "nmi", regs
, reason
, 2, SIGINT
)
836 sum
= read_pda(apic_timer_irqs
);
837 if (__get_cpu_var(nmi_touch
)) {
838 __get_cpu_var(nmi_touch
) = 0;
842 if (cpu_isset(cpu
, backtrace_mask
)) {
843 static DEFINE_SPINLOCK(lock
); /* Serialise the printks */
846 printk("NMI backtrace for cpu %d\n", cpu
);
849 cpu_clear(cpu
, backtrace_mask
);
852 #ifdef CONFIG_X86_MCE
853 /* Could check oops_in_progress here too, but it's safer
855 if (atomic_read(&mce_entry
) > 0)
858 /* if the apic timer isn't firing, this cpu isn't doing much */
859 if (!touched
&& __get_cpu_var(last_irq_sum
) == sum
) {
861 * Ayiee, looks like this CPU is stuck ...
862 * wait a few IRQs (5 seconds) before doing the oops ...
864 local_inc(&__get_cpu_var(alert_counter
));
865 if (local_read(&__get_cpu_var(alert_counter
)) == 5*nmi_hz
)
866 die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs
,
869 __get_cpu_var(last_irq_sum
) = sum
;
870 local_set(&__get_cpu_var(alert_counter
), 0);
873 /* see if the nmi watchdog went off */
875 if (nmi_watchdog
== NMI_LOCAL_APIC
) {
876 rdmsrl(wd
->perfctr_msr
, dummy
);
877 if (dummy
& wd
->check_bit
){
878 /* this wasn't a watchdog timer interrupt */
882 /* only Intel uses the cccr msr */
883 if (wd
->cccr_msr
!= 0) {
886 * - An overflown perfctr will assert its interrupt
887 * until the OVF flag in its CCCR is cleared.
888 * - LVTPC is masked on interrupt and must be
889 * unmasked by the LVTPC handler.
891 rdmsrl(wd
->cccr_msr
, dummy
);
892 dummy
&= ~P4_CCCR_OVF
;
893 wrmsrl(wd
->cccr_msr
, dummy
);
894 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
895 /* start the cycle over again */
896 wrmsrl(wd
->perfctr_msr
,
897 -((u64
)cpu_khz
* 1000 / nmi_hz
));
898 } else if (wd
->perfctr_msr
== MSR_ARCH_PERFMON_PERFCTR0
) {
900 * ArchPerfom/Core Duo needs to re-unmask
903 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
904 /* ARCH_PERFMON has 32 bit counter writes */
905 wrmsr(wd
->perfctr_msr
,
906 (u32
)(-((u64
)cpu_khz
* 1000 / nmi_hz
)), 0);
908 /* start the cycle over again */
909 wrmsrl(wd
->perfctr_msr
,
910 -((u64
)cpu_khz
* 1000 / nmi_hz
));
913 } else if (nmi_watchdog
== NMI_IO_APIC
) {
914 /* don't know how to accurately check for this.
915 * just assume it was a watchdog timer interrupt
916 * This matches the old behaviour.
920 printk(KERN_WARNING
"Unknown enabled NMI hardware?!\n");
926 asmlinkage __kprobes
void do_nmi(struct pt_regs
* regs
, long error_code
)
929 add_pda(__nmi_count
,1);
930 default_do_nmi(regs
);
934 int do_nmi_callback(struct pt_regs
* regs
, int cpu
)
937 if (unknown_nmi_panic
)
938 return unknown_nmi_panic_callback(regs
, cpu
);
945 static int unknown_nmi_panic_callback(struct pt_regs
*regs
, int cpu
)
947 unsigned char reason
= get_nmi_reason();
950 sprintf(buf
, "NMI received for unknown reason %02x\n", reason
);
951 die_nmi(buf
, regs
, 1); /* Always panic here */
956 * proc handler for /proc/sys/kernel/nmi
958 int proc_nmi_enabled(struct ctl_table
*table
, int write
, struct file
*file
,
959 void __user
*buffer
, size_t *length
, loff_t
*ppos
)
963 nmi_watchdog_enabled
= (atomic_read(&nmi_active
) > 0) ? 1 : 0;
964 old_state
= nmi_watchdog_enabled
;
965 proc_dointvec(table
, write
, file
, buffer
, length
, ppos
);
966 if (!!old_state
== !!nmi_watchdog_enabled
)
969 if (atomic_read(&nmi_active
) < 0) {
970 printk( KERN_WARNING
"NMI watchdog is permanently disabled\n");
974 /* if nmi_watchdog is not set yet, then set it */
975 nmi_watchdog_default();
977 if (nmi_watchdog
== NMI_LOCAL_APIC
) {
978 if (nmi_watchdog_enabled
)
979 enable_lapic_nmi_watchdog();
981 disable_lapic_nmi_watchdog();
984 "NMI watchdog doesn't know what hardware to touch\n");
992 void __trigger_all_cpu_backtrace(void)
996 backtrace_mask
= cpu_online_map
;
997 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
998 for (i
= 0; i
< 10 * 1000; i
++) {
999 if (cpus_empty(backtrace_mask
))
1005 EXPORT_SYMBOL(nmi_active
);
1006 EXPORT_SYMBOL(nmi_watchdog
);
1007 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi
);
1008 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit
);
1009 EXPORT_SYMBOL(reserve_perfctr_nmi
);
1010 EXPORT_SYMBOL(release_perfctr_nmi
);
1011 EXPORT_SYMBOL(reserve_evntsel_nmi
);
1012 EXPORT_SYMBOL(release_evntsel_nmi
);
1013 EXPORT_SYMBOL(disable_timer_nmi_watchdog
);
1014 EXPORT_SYMBOL(enable_timer_nmi_watchdog
);
1015 EXPORT_SYMBOL(touch_nmi_watchdog
);