2 * NMI watchdog support on APIC systems
4 * Started by Ingo Molnar <mingo@redhat.com>
7 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
8 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
9 * Mikael Pettersson : Pentium 4 support for local APIC NMI watchdog.
11 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
16 #include <linux/nmi.h>
18 #include <linux/delay.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/sysdev.h>
22 #include <linux/sysctl.h>
23 #include <linux/percpu.h>
24 #include <linux/kprobes.h>
25 #include <linux/cpumask.h>
26 #include <linux/kernel_stat.h>
27 #include <linux/kdebug.h>
28 #include <linux/smp.h>
30 #include <asm/proto.h>
31 #include <asm/timer.h>
35 #include <mach_traps.h>
37 int unknown_nmi_panic
;
38 int nmi_watchdog_enabled
;
40 static cpumask_t backtrace_mask
= CPU_MASK_NONE
;
43 * >0: the lapic NMI watchdog is active, but can be disabled
44 * <0: the lapic NMI watchdog has not been set up, and cannot
46 * 0: the lapic NMI watchdog is disabled, but can be enabled
48 atomic_t nmi_active
= ATOMIC_INIT(0); /* oprofile uses this */
49 EXPORT_SYMBOL(nmi_active
);
51 unsigned int nmi_watchdog
= NMI_DEFAULT
;
52 EXPORT_SYMBOL(nmi_watchdog
);
54 static int panic_on_timeout
;
56 static unsigned int nmi_hz
= HZ
;
57 static DEFINE_PER_CPU(short, wd_enabled
);
58 static int endflag __initdata
;
60 static inline unsigned int get_nmi_count(int cpu
)
63 return cpu_pda(cpu
)->__nmi_count
;
65 return nmi_count(cpu
);
69 static inline int mce_in_progress(void)
71 #if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
72 return atomic_read(&mce_entry
) > 0;
78 * Take the local apic timer and PIT/HPET into account. We don't
79 * know which one is active, when we have highres/dyntick on
81 static inline unsigned int get_timer_irqs(int cpu
)
84 return read_pda(apic_timer_irqs
) + read_pda(irq0_irqs
);
86 return per_cpu(irq_stat
, cpu
).apic_timer_irqs
+
87 per_cpu(irq_stat
, cpu
).irq0_irqs
;
91 /* Run after command line and cpu_init init, but before all other checks */
92 void nmi_watchdog_default(void)
94 if (nmi_watchdog
!= NMI_DEFAULT
)
96 nmi_watchdog
= NMI_NONE
;
101 * The performance counters used by NMI_LOCAL_APIC don't trigger when
102 * the CPU is idle. To make sure the NMI watchdog really ticks on all
103 * CPUs during the test make them busy.
105 static __init
void nmi_cpu_busy(void *data
)
107 local_irq_enable_in_hardirq();
109 * Intentionally don't use cpu_relax here. This is
110 * to make sure that the performance counter really ticks,
111 * even if there is a simulator or similar that catches the
112 * pause instruction. On a real HT machine this is fine because
113 * all other CPUs are busy with "useless" delay loops and don't
114 * care if they get somewhat less cycles.
121 int __init
check_nmi_watchdog(void)
123 unsigned int *prev_nmi_count
;
126 if (nmi_watchdog
== NMI_NONE
|| nmi_watchdog
== NMI_DISABLED
)
129 if (!atomic_read(&nmi_active
))
132 prev_nmi_count
= kmalloc(NR_CPUS
* sizeof(int), GFP_KERNEL
);
136 printk(KERN_INFO
"Testing NMI watchdog ... ");
139 if (nmi_watchdog
== NMI_LOCAL_APIC
)
140 smp_call_function(nmi_cpu_busy
, (void *)&endflag
, 0, 0);
143 for_each_possible_cpu(cpu
)
144 prev_nmi_count
[cpu
] = get_nmi_count(cpu
);
146 mdelay((20 * 1000) / nmi_hz
); /* wait 20 ticks */
148 for_each_online_cpu(cpu
) {
149 if (!per_cpu(wd_enabled
, cpu
))
151 if (get_nmi_count(cpu
) - prev_nmi_count
[cpu
] <= 5) {
152 printk(KERN_WARNING
"WARNING: CPU#%d: NMI "
153 "appears to be stuck (%d->%d)!\n",
157 per_cpu(wd_enabled
, cpu
) = 0;
158 atomic_dec(&nmi_active
);
162 if (!atomic_read(&nmi_active
)) {
163 kfree(prev_nmi_count
);
164 atomic_set(&nmi_active
, -1);
170 * now that we know it works we can reduce NMI frequency to
171 * something more reasonable; makes a difference in some configs
173 if (nmi_watchdog
== NMI_LOCAL_APIC
)
174 nmi_hz
= lapic_adjust_nmi_hz(1);
176 kfree(prev_nmi_count
);
181 timer_ack
= !cpu_has_tsc
;
186 static int __init
setup_nmi_watchdog(char *str
)
190 if (!strncmp(str
, "panic", 5)) {
191 panic_on_timeout
= 1;
192 str
= strchr(str
, ',');
198 get_option(&str
, &nmi
);
200 if (nmi
>= NMI_INVALID
|| nmi
< NMI_NONE
)
206 __setup("nmi_watchdog=", setup_nmi_watchdog
);
209 * Suspend/resume support
213 static int nmi_pm_active
; /* nmi_active before suspend */
215 static int lapic_nmi_suspend(struct sys_device
*dev
, pm_message_t state
)
217 /* only CPU0 goes here, other CPUs should be offline */
218 nmi_pm_active
= atomic_read(&nmi_active
);
219 stop_apic_nmi_watchdog(NULL
);
220 BUG_ON(atomic_read(&nmi_active
) != 0);
224 static int lapic_nmi_resume(struct sys_device
*dev
)
226 /* only CPU0 goes here, other CPUs should be offline */
227 if (nmi_pm_active
> 0) {
228 setup_apic_nmi_watchdog(NULL
);
229 touch_nmi_watchdog();
234 static struct sysdev_class nmi_sysclass
= {
236 .resume
= lapic_nmi_resume
,
237 .suspend
= lapic_nmi_suspend
,
240 static struct sys_device device_lapic_nmi
= {
242 .cls
= &nmi_sysclass
,
245 static int __init
init_lapic_nmi_sysfs(void)
250 * should really be a BUG_ON but b/c this is an
251 * init call, it just doesn't work. -dcz
253 if (nmi_watchdog
!= NMI_LOCAL_APIC
)
256 if (atomic_read(&nmi_active
) < 0)
259 error
= sysdev_class_register(&nmi_sysclass
);
261 error
= sysdev_register(&device_lapic_nmi
);
265 /* must come after the local APIC's device_initcall() */
266 late_initcall(init_lapic_nmi_sysfs
);
268 #endif /* CONFIG_PM */
270 static void __acpi_nmi_enable(void *__unused
)
272 apic_write_around(APIC_LVT0
, APIC_DM_NMI
);
276 * Enable timer based NMIs on all CPUs:
278 void acpi_nmi_enable(void)
280 if (atomic_read(&nmi_active
) && nmi_watchdog
== NMI_IO_APIC
)
281 on_each_cpu(__acpi_nmi_enable
, NULL
, 0, 1);
284 static void __acpi_nmi_disable(void *__unused
)
286 apic_write_around(APIC_LVT0
, APIC_DM_NMI
| APIC_LVT_MASKED
);
290 * Disable timer based NMIs on all CPUs:
292 void acpi_nmi_disable(void)
294 if (atomic_read(&nmi_active
) && nmi_watchdog
== NMI_IO_APIC
)
295 on_each_cpu(__acpi_nmi_disable
, NULL
, 0, 1);
298 void setup_apic_nmi_watchdog(void *unused
)
300 if (__get_cpu_var(wd_enabled
))
303 /* cheap hack to support suspend/resume */
304 /* if cpu0 is not active neither should the other cpus */
305 if (smp_processor_id() != 0 && atomic_read(&nmi_active
) <= 0)
308 switch (nmi_watchdog
) {
310 /* enable it before to avoid race with handler */
311 __get_cpu_var(wd_enabled
) = 1;
312 if (lapic_watchdog_init(nmi_hz
) < 0) {
313 __get_cpu_var(wd_enabled
) = 0;
318 __get_cpu_var(wd_enabled
) = 1;
319 atomic_inc(&nmi_active
);
323 void stop_apic_nmi_watchdog(void *unused
)
325 /* only support LOCAL and IO APICs for now */
326 if (nmi_watchdog
!= NMI_LOCAL_APIC
&&
327 nmi_watchdog
!= NMI_IO_APIC
)
329 if (__get_cpu_var(wd_enabled
) == 0)
331 if (nmi_watchdog
== NMI_LOCAL_APIC
)
332 lapic_watchdog_stop();
333 __get_cpu_var(wd_enabled
) = 0;
334 atomic_dec(&nmi_active
);
338 * the best way to detect whether a CPU has a 'hard lockup' problem
339 * is to check it's local APIC timer IRQ counts. If they are not
340 * changing then that CPU has some problem.
342 * as these watchdog NMI IRQs are generated on every CPU, we only
343 * have to check the current processor.
345 * since NMIs don't listen to _any_ locks, we have to be extremely
346 * careful not to rely on unsafe variables. The printk might lock
347 * up though, so we have to break up any console locks first ...
348 * [when there will be more tty-related locks, break them up here too!]
351 static DEFINE_PER_CPU(unsigned, last_irq_sum
);
352 static DEFINE_PER_CPU(local_t
, alert_counter
);
353 static DEFINE_PER_CPU(int, nmi_touch
);
355 void touch_nmi_watchdog(void)
357 if (nmi_watchdog
== NMI_LOCAL_APIC
||
358 nmi_watchdog
== NMI_IO_APIC
) {
362 * Tell other CPUs to reset their alert counters. We cannot
363 * do it ourselves because the alert count increase is not
366 for_each_present_cpu(cpu
) {
367 if (per_cpu(nmi_touch
, cpu
) != 1)
368 per_cpu(nmi_touch
, cpu
) = 1;
373 * Tickle the softlockup detector too:
375 touch_softlockup_watchdog();
377 EXPORT_SYMBOL(touch_nmi_watchdog
);
379 notrace __kprobes
int
380 nmi_watchdog_tick(struct pt_regs
*regs
, unsigned reason
)
383 * Since current_thread_info()-> is always on the stack, and we
384 * always switch the stack NMI-atomically, it's safe to use
385 * smp_processor_id().
389 int cpu
= smp_processor_id();
392 /* check for other users first */
393 if (notify_die(DIE_NMI
, "nmi", regs
, reason
, 2, SIGINT
)
399 sum
= get_timer_irqs(cpu
);
401 if (__get_cpu_var(nmi_touch
)) {
402 __get_cpu_var(nmi_touch
) = 0;
406 if (cpu_isset(cpu
, backtrace_mask
)) {
407 static DEFINE_SPINLOCK(lock
); /* Serialise the printks */
410 printk(KERN_WARNING
"NMI backtrace for cpu %d\n", cpu
);
413 cpu_clear(cpu
, backtrace_mask
);
416 /* Could check oops_in_progress here too, but it's safer not to */
417 if (mce_in_progress())
420 /* if the none of the timers isn't firing, this cpu isn't doing much */
421 if (!touched
&& __get_cpu_var(last_irq_sum
) == sum
) {
423 * Ayiee, looks like this CPU is stuck ...
424 * wait a few IRQs (5 seconds) before doing the oops ...
426 local_inc(&__get_cpu_var(alert_counter
));
427 if (local_read(&__get_cpu_var(alert_counter
)) == 5 * nmi_hz
)
429 * die_nmi will return ONLY if NOTIFY_STOP happens..
431 die_nmi("BUG: NMI Watchdog detected LOCKUP",
432 regs
, panic_on_timeout
);
434 __get_cpu_var(last_irq_sum
) = sum
;
435 local_set(&__get_cpu_var(alert_counter
), 0);
438 /* see if the nmi watchdog went off */
439 if (!__get_cpu_var(wd_enabled
))
441 switch (nmi_watchdog
) {
443 rc
|= lapic_wd_event(nmi_hz
);
447 * don't know how to accurately check for this.
448 * just assume it was a watchdog timer interrupt
449 * This matches the old behaviour.
459 static int unknown_nmi_panic_callback(struct pt_regs
*regs
, int cpu
)
461 unsigned char reason
= get_nmi_reason();
464 sprintf(buf
, "NMI received for unknown reason %02x\n", reason
);
465 die_nmi(buf
, regs
, 1); /* Always panic here */
470 * proc handler for /proc/sys/kernel/nmi
472 int proc_nmi_enabled(struct ctl_table
*table
, int write
, struct file
*file
,
473 void __user
*buffer
, size_t *length
, loff_t
*ppos
)
477 nmi_watchdog_enabled
= (atomic_read(&nmi_active
) > 0) ? 1 : 0;
478 old_state
= nmi_watchdog_enabled
;
479 proc_dointvec(table
, write
, file
, buffer
, length
, ppos
);
480 if (!!old_state
== !!nmi_watchdog_enabled
)
483 if (atomic_read(&nmi_active
) < 0 || nmi_watchdog
== NMI_DISABLED
) {
485 "NMI watchdog is permanently disabled\n");
489 /* if nmi_watchdog is not set yet, then set it */
490 nmi_watchdog_default();
493 if (nmi_watchdog
== NMI_NONE
) {
494 if (lapic_watchdog_ok())
495 nmi_watchdog
= NMI_LOCAL_APIC
;
497 nmi_watchdog
= NMI_IO_APIC
;
501 if (nmi_watchdog
== NMI_LOCAL_APIC
) {
502 if (nmi_watchdog_enabled
)
503 enable_lapic_nmi_watchdog();
505 disable_lapic_nmi_watchdog();
508 "NMI watchdog doesn't know what hardware to touch\n");
514 #endif /* CONFIG_SYSCTL */
516 int do_nmi_callback(struct pt_regs
*regs
, int cpu
)
519 if (unknown_nmi_panic
)
520 return unknown_nmi_panic_callback(regs
, cpu
);
525 void __trigger_all_cpu_backtrace(void)
529 backtrace_mask
= cpu_online_map
;
530 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
531 for (i
= 0; i
< 10 * 1000; i
++) {
532 if (cpus_empty(backtrace_mask
))