4 * started by Ingo Molnar, Copyright (C) 2005, 2006 Red Hat, Inc.
6 * this code detects soft lockups: incidents in where on a CPU
7 * the kernel does not reschedule for 10 seconds or more.
10 #include <linux/cpu.h>
11 #include <linux/nmi.h>
12 #include <linux/init.h>
13 #include <linux/delay.h>
14 #include <linux/freezer.h>
15 #include <linux/kthread.h>
16 #include <linux/notifier.h>
17 #include <linux/module.h>
19 #include <asm/irq_regs.h>
21 static DEFINE_SPINLOCK(print_lock
);
23 static DEFINE_PER_CPU(unsigned long, touch_timestamp
);
24 static DEFINE_PER_CPU(unsigned long, print_timestamp
);
25 static DEFINE_PER_CPU(struct task_struct
*, watchdog_task
);
27 static int __read_mostly did_panic
;
28 unsigned long __read_mostly softlockup_thresh
= 60;
31 softlock_panic(struct notifier_block
*this, unsigned long event
, void *ptr
)
38 static struct notifier_block panic_block
= {
39 .notifier_call
= softlock_panic
,
43 * Returns seconds, approximately. We don't need nanosecond
44 * resolution, and we don't need to waste time with a big divide when
47 static unsigned long get_timestamp(int this_cpu
)
49 return cpu_clock(this_cpu
) >> 30LL; /* 2^30 ~= 10^9 */
52 void touch_softlockup_watchdog(void)
54 int this_cpu
= raw_smp_processor_id();
56 __raw_get_cpu_var(touch_timestamp
) = get_timestamp(this_cpu
);
58 EXPORT_SYMBOL(touch_softlockup_watchdog
);
60 void touch_all_softlockup_watchdogs(void)
64 /* Cause each CPU to re-update its timestamp rather than complain */
65 for_each_online_cpu(cpu
)
66 per_cpu(touch_timestamp
, cpu
) = 0;
68 EXPORT_SYMBOL(touch_all_softlockup_watchdogs
);
71 * This callback runs from the timer interrupt, and checks
72 * whether the watchdog thread has hung or not:
74 void softlockup_tick(void)
76 int this_cpu
= smp_processor_id();
77 unsigned long touch_timestamp
= per_cpu(touch_timestamp
, this_cpu
);
78 unsigned long print_timestamp
;
79 struct pt_regs
*regs
= get_irq_regs();
82 if (touch_timestamp
== 0) {
83 touch_softlockup_watchdog();
87 print_timestamp
= per_cpu(print_timestamp
, this_cpu
);
89 /* report at most once a second */
90 if ((print_timestamp
>= touch_timestamp
&&
91 print_timestamp
< (touch_timestamp
+ 1)) ||
92 did_panic
|| !per_cpu(watchdog_task
, this_cpu
)) {
96 /* do not print during early bootup: */
97 if (unlikely(system_state
!= SYSTEM_RUNNING
)) {
98 touch_softlockup_watchdog();
102 now
= get_timestamp(this_cpu
);
104 /* Warn about unreasonable delays: */
105 if (now
<= (touch_timestamp
+ softlockup_thresh
))
108 per_cpu(print_timestamp
, this_cpu
) = touch_timestamp
;
110 spin_lock(&print_lock
);
111 printk(KERN_ERR
"BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
112 this_cpu
, now
- touch_timestamp
,
113 current
->comm
, task_pid_nr(current
));
118 spin_unlock(&print_lock
);
122 * Have a reasonable limit on the number of tasks checked:
124 unsigned long __read_mostly sysctl_hung_task_check_count
= 1024;
127 * Zero means infinite timeout - no checking done:
129 unsigned long __read_mostly sysctl_hung_task_timeout_secs
= 120;
131 unsigned long __read_mostly sysctl_hung_task_warnings
= 10;
134 * Only do the hung-tasks check on one CPU:
136 static int check_cpu __read_mostly
= -1;
138 static void check_hung_task(struct task_struct
*t
, unsigned long now
)
140 unsigned long switch_count
= t
->nvcsw
+ t
->nivcsw
;
142 if (t
->flags
& PF_FROZEN
)
145 if (switch_count
!= t
->last_switch_count
|| !t
->last_switch_timestamp
) {
146 t
->last_switch_count
= switch_count
;
147 t
->last_switch_timestamp
= now
;
150 if ((long)(now
- t
->last_switch_timestamp
) <
151 sysctl_hung_task_timeout_secs
)
153 if (sysctl_hung_task_warnings
< 0)
155 sysctl_hung_task_warnings
--;
158 * Ok, the task did not get scheduled for more than 2 minutes,
161 printk(KERN_ERR
"INFO: task %s:%d blocked for more than "
162 "%ld seconds.\n", t
->comm
, t
->pid
,
163 sysctl_hung_task_timeout_secs
);
164 printk(KERN_ERR
"\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
165 " disables this message.\n");
167 __debug_show_held_locks(t
);
169 t
->last_switch_timestamp
= now
;
170 touch_nmi_watchdog();
174 * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for
175 * a really long time (120 seconds). If that happens, print out
178 static void check_hung_uninterruptible_tasks(int this_cpu
)
180 int max_count
= sysctl_hung_task_check_count
;
181 unsigned long now
= get_timestamp(this_cpu
);
182 struct task_struct
*g
, *t
;
185 * If the system crashed already then all bets are off,
186 * do not report extra hung tasks:
188 if ((tainted
& TAINT_DIE
) || did_panic
)
191 read_lock(&tasklist_lock
);
192 do_each_thread(g
, t
) {
195 if (t
->state
& TASK_UNINTERRUPTIBLE
)
196 check_hung_task(t
, now
);
197 } while_each_thread(g
, t
);
199 read_unlock(&tasklist_lock
);
203 * The watchdog thread - runs every second and touches the timestamp.
205 static int watchdog(void *__bind_cpu
)
207 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
-1 };
208 int this_cpu
= (long)__bind_cpu
;
210 sched_setscheduler(current
, SCHED_FIFO
, ¶m
);
212 /* initialize timestamp */
213 touch_softlockup_watchdog();
216 * Run briefly once per second to reset the softlockup timestamp.
217 * If this gets delayed for more than 60 seconds then the
218 * debug-printout triggers in softlockup_tick().
220 while (!kthread_should_stop()) {
221 touch_softlockup_watchdog();
222 msleep_interruptible(10000);
224 if (this_cpu
!= check_cpu
)
227 if (sysctl_hung_task_timeout_secs
)
228 check_hung_uninterruptible_tasks(this_cpu
);
235 * Create/destroy watchdog threads as CPUs come and go:
238 cpu_callback(struct notifier_block
*nfb
, unsigned long action
, void *hcpu
)
240 int hotcpu
= (unsigned long)hcpu
;
241 struct task_struct
*p
;
245 case CPU_UP_PREPARE_FROZEN
:
246 BUG_ON(per_cpu(watchdog_task
, hotcpu
));
247 p
= kthread_create(watchdog
, hcpu
, "watchdog/%d", hotcpu
);
249 printk(KERN_ERR
"watchdog for %i failed\n", hotcpu
);
252 per_cpu(touch_timestamp
, hotcpu
) = 0;
253 per_cpu(watchdog_task
, hotcpu
) = p
;
254 kthread_bind(p
, hotcpu
);
257 case CPU_ONLINE_FROZEN
:
258 check_cpu
= any_online_cpu(cpu_online_map
);
259 wake_up_process(per_cpu(watchdog_task
, hotcpu
));
261 #ifdef CONFIG_HOTPLUG_CPU
262 case CPU_UP_CANCELED
:
263 case CPU_UP_CANCELED_FROZEN
:
264 if (!per_cpu(watchdog_task
, hotcpu
))
266 /* Unbind so it can run. Fall thru. */
267 kthread_bind(per_cpu(watchdog_task
, hotcpu
),
268 any_online_cpu(cpu_online_map
));
269 case CPU_DOWN_PREPARE
:
270 case CPU_DOWN_PREPARE_FROZEN
:
271 if (hotcpu
== check_cpu
) {
272 cpumask_t temp_cpu_online_map
= cpu_online_map
;
274 cpu_clear(hotcpu
, temp_cpu_online_map
);
275 check_cpu
= any_online_cpu(temp_cpu_online_map
);
279 case CPU_DEAD_FROZEN
:
280 p
= per_cpu(watchdog_task
, hotcpu
);
281 per_cpu(watchdog_task
, hotcpu
) = NULL
;
284 #endif /* CONFIG_HOTPLUG_CPU */
289 static struct notifier_block __cpuinitdata cpu_nfb
= {
290 .notifier_call
= cpu_callback
293 __init
void spawn_softlockup_task(void)
295 void *cpu
= (void *)(long)smp_processor_id();
296 int err
= cpu_callback(&cpu_nfb
, CPU_UP_PREPARE
, cpu
);
298 BUG_ON(err
== NOTIFY_BAD
);
299 cpu_callback(&cpu_nfb
, CPU_ONLINE
, cpu
);
300 register_cpu_notifier(&cpu_nfb
);
302 atomic_notifier_chain_register(&panic_notifier_list
, &panic_block
);