4 * started by Ingo Molnar, Copyright (C) 2005, 2006 Red Hat, Inc.
6 * this code detects soft lockups: incidents in where on a CPU
7 * the kernel does not reschedule for 10 seconds or more.
10 #include <linux/cpu.h>
11 #include <linux/init.h>
12 #include <linux/delay.h>
13 #include <linux/freezer.h>
14 #include <linux/kthread.h>
15 #include <linux/notifier.h>
16 #include <linux/module.h>
18 #include <asm/irq_regs.h>
20 static DEFINE_SPINLOCK(print_lock
);
22 static DEFINE_PER_CPU(unsigned long, touch_timestamp
);
23 static DEFINE_PER_CPU(unsigned long, print_timestamp
);
24 static DEFINE_PER_CPU(struct task_struct
*, watchdog_task
);
29 softlock_panic(struct notifier_block
*this, unsigned long event
, void *ptr
)
36 static struct notifier_block panic_block
= {
37 .notifier_call
= softlock_panic
,
41 * Returns seconds, approximately. We don't need nanosecond
42 * resolution, and we don't need to waste time with a big divide when
45 static unsigned long get_timestamp(int this_cpu
)
47 return cpu_clock(this_cpu
) >> 30; /* 2^30 ~= 10^9 */
50 void touch_softlockup_watchdog(void)
52 int this_cpu
= raw_smp_processor_id();
54 __raw_get_cpu_var(touch_timestamp
) = get_timestamp(this_cpu
);
56 EXPORT_SYMBOL(touch_softlockup_watchdog
);
58 void touch_all_softlockup_watchdogs(void)
62 /* Cause each CPU to re-update its timestamp rather than complain */
63 for_each_online_cpu(cpu
)
64 per_cpu(touch_timestamp
, cpu
) = 0;
66 EXPORT_SYMBOL(touch_all_softlockup_watchdogs
);
69 * This callback runs from the timer interrupt, and checks
70 * whether the watchdog thread has hung or not:
72 void softlockup_tick(void)
74 int this_cpu
= smp_processor_id();
75 unsigned long touch_timestamp
= per_cpu(touch_timestamp
, this_cpu
);
76 unsigned long print_timestamp
;
77 struct pt_regs
*regs
= get_irq_regs();
80 if (touch_timestamp
== 0) {
81 touch_softlockup_watchdog();
85 print_timestamp
= per_cpu(print_timestamp
, this_cpu
);
87 /* report at most once a second */
88 if ((print_timestamp
>= touch_timestamp
&&
89 print_timestamp
< (touch_timestamp
+ 1)) ||
90 did_panic
|| !per_cpu(watchdog_task
, this_cpu
)) {
94 /* do not print during early bootup: */
95 if (unlikely(system_state
!= SYSTEM_RUNNING
)) {
96 touch_softlockup_watchdog();
100 now
= get_timestamp(this_cpu
);
102 /* Wake up the high-prio watchdog task every second: */
103 if (now
> (touch_timestamp
+ 1))
104 wake_up_process(per_cpu(watchdog_task
, this_cpu
));
106 /* Warn about unreasonable 10+ seconds delays: */
107 if (now
<= (touch_timestamp
+ 10))
110 per_cpu(print_timestamp
, this_cpu
) = touch_timestamp
;
112 spin_lock(&print_lock
);
113 printk(KERN_ERR
"BUG: soft lockup detected on CPU#%d!\n", this_cpu
);
118 spin_unlock(&print_lock
);
122 * The watchdog thread - runs every second and touches the timestamp.
124 static int watchdog(void *__bind_cpu
)
126 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
-1 };
128 sched_setscheduler(current
, SCHED_FIFO
, ¶m
);
130 /* initialize timestamp */
131 touch_softlockup_watchdog();
134 * Run briefly once per second to reset the softlockup timestamp.
135 * If this gets delayed for more than 10 seconds then the
136 * debug-printout triggers in softlockup_tick().
138 while (!kthread_should_stop()) {
139 set_current_state(TASK_INTERRUPTIBLE
);
140 touch_softlockup_watchdog();
148 * Create/destroy watchdog threads as CPUs come and go:
151 cpu_callback(struct notifier_block
*nfb
, unsigned long action
, void *hcpu
)
153 int hotcpu
= (unsigned long)hcpu
;
154 struct task_struct
*p
;
158 case CPU_UP_PREPARE_FROZEN
:
159 BUG_ON(per_cpu(watchdog_task
, hotcpu
));
160 p
= kthread_create(watchdog
, hcpu
, "watchdog/%d", hotcpu
);
162 printk(KERN_ERR
"watchdog for %i failed\n", hotcpu
);
165 per_cpu(touch_timestamp
, hotcpu
) = 0;
166 per_cpu(watchdog_task
, hotcpu
) = p
;
167 kthread_bind(p
, hotcpu
);
170 case CPU_ONLINE_FROZEN
:
171 wake_up_process(per_cpu(watchdog_task
, hotcpu
));
173 #ifdef CONFIG_HOTPLUG_CPU
174 case CPU_UP_CANCELED
:
175 case CPU_UP_CANCELED_FROZEN
:
176 if (!per_cpu(watchdog_task
, hotcpu
))
178 /* Unbind so it can run. Fall thru. */
179 kthread_bind(per_cpu(watchdog_task
, hotcpu
),
180 any_online_cpu(cpu_online_map
));
182 case CPU_DEAD_FROZEN
:
183 p
= per_cpu(watchdog_task
, hotcpu
);
184 per_cpu(watchdog_task
, hotcpu
) = NULL
;
187 #endif /* CONFIG_HOTPLUG_CPU */
192 static struct notifier_block __cpuinitdata cpu_nfb
= {
193 .notifier_call
= cpu_callback
196 __init
void spawn_softlockup_task(void)
198 void *cpu
= (void *)(long)smp_processor_id();
199 int err
= cpu_callback(&cpu_nfb
, CPU_UP_PREPARE
, cpu
);
201 BUG_ON(err
== NOTIFY_BAD
);
202 cpu_callback(&cpu_nfb
, CPU_ONLINE
, cpu
);
203 register_cpu_notifier(&cpu_nfb
);
205 atomic_notifier_chain_register(&panic_notifier_list
, &panic_block
);