4 * kernel/hung_task.c - kernel thread for detecting tasks stuck in D state
10 #include <linux/nmi.h>
11 #include <linux/init.h>
12 #include <linux/delay.h>
13 #include <linux/freezer.h>
14 #include <linux/kthread.h>
15 #include <linux/lockdep.h>
16 #include <linux/module.h>
17 #include <linux/sysctl.h>
20 * The number of tasks checked:
22 unsigned long __read_mostly sysctl_hung_task_check_count
= PID_MAX_LIMIT
;
25 * Limit number of tasks checked in a batch.
27 * This value controls the preemptibility of khungtaskd since preemption
28 * is disabled during the critical section. It also controls the size of
29 * the RCU grace period. So it needs to be upper-bound.
31 #define HUNG_TASK_BATCHING 1024
34 * Zero means infinite timeout - no checking done:
36 unsigned long __read_mostly sysctl_hung_task_timeout_secs
= 120;
37 static unsigned long __read_mostly hung_task_poll_jiffies
;
39 unsigned long __read_mostly sysctl_hung_task_warnings
= 10;
41 static int __read_mostly did_panic
;
43 static struct task_struct
*watchdog_task
;
46 * Should we panic (and reboot, if panic_timeout= is set) when a
47 * hung task is detected:
49 unsigned int __read_mostly sysctl_hung_task_panic
=
50 CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE
;
52 static int __init
hung_task_panic_setup(char *str
)
54 sysctl_hung_task_panic
= simple_strtoul(str
, NULL
, 0);
58 __setup("hung_task_panic=", hung_task_panic_setup
);
61 hung_task_panic(struct notifier_block
*this, unsigned long event
, void *ptr
)
68 static struct notifier_block panic_block
= {
69 .notifier_call
= hung_task_panic
,
73 * Returns seconds, approximately. We don't need nanosecond
74 * resolution, and we don't need to waste time with a big divide when
77 static unsigned long get_timestamp(void)
79 int this_cpu
= raw_smp_processor_id();
81 return cpu_clock(this_cpu
) >> 30LL; /* 2^30 ~= 10^9 */
84 static void check_hung_task(struct task_struct
*t
, unsigned long now
,
85 unsigned long timeout
)
87 unsigned long switch_count
= t
->nvcsw
+ t
->nivcsw
;
89 if (t
->flags
& PF_FROZEN
)
92 if (switch_count
!= t
->last_switch_count
|| !t
->last_switch_timestamp
) {
93 t
->last_switch_count
= switch_count
;
94 t
->last_switch_timestamp
= now
;
97 if ((long)(now
- t
->last_switch_timestamp
) < timeout
)
99 if (!sysctl_hung_task_warnings
)
101 sysctl_hung_task_warnings
--;
104 * Ok, the task did not get scheduled for more than 2 minutes,
107 printk(KERN_ERR
"INFO: task %s:%d blocked for more than "
108 "%ld seconds.\n", t
->comm
, t
->pid
, timeout
);
109 printk(KERN_ERR
"\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
110 " disables this message.\n");
112 __debug_show_held_locks(t
);
114 t
->last_switch_timestamp
= now
;
115 touch_nmi_watchdog();
117 if (sysctl_hung_task_panic
)
118 panic("hung_task: blocked tasks");
122 * To avoid extending the RCU grace period for an unbounded amount of time,
123 * periodically exit the critical section and enter a new one.
125 * For preemptible RCU it is sufficient to call rcu_read_unlock in order
126 * exit the grace period. For classic RCU, a reschedule is required.
128 static void rcu_lock_break(struct task_struct
*g
, struct task_struct
*t
)
140 * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for
141 * a really long time (120 seconds). If that happens, print out
144 static void check_hung_uninterruptible_tasks(unsigned long timeout
)
146 int max_count
= sysctl_hung_task_check_count
;
147 int batch_count
= HUNG_TASK_BATCHING
;
148 unsigned long now
= get_timestamp();
149 struct task_struct
*g
, *t
;
152 * If the system crashed already then all bets are off,
153 * do not report extra hung tasks:
155 if (test_taint(TAINT_DIE
) || did_panic
)
159 do_each_thread(g
, t
) {
162 if (!--batch_count
) {
163 batch_count
= HUNG_TASK_BATCHING
;
164 rcu_lock_break(g
, t
);
165 /* Exit if t or g was unhashed during refresh. */
166 if (t
->state
== TASK_DEAD
|| g
->state
== TASK_DEAD
)
169 /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
170 if (t
->state
== TASK_UNINTERRUPTIBLE
)
171 check_hung_task(t
, now
, timeout
);
172 } while_each_thread(g
, t
);
177 static void update_poll_jiffies(void)
179 /* timeout of 0 will disable the watchdog */
180 if (sysctl_hung_task_timeout_secs
== 0)
181 hung_task_poll_jiffies
= MAX_SCHEDULE_TIMEOUT
;
183 hung_task_poll_jiffies
= sysctl_hung_task_timeout_secs
* HZ
/ 2;
187 * Process updating of timeout sysctl
189 int proc_dohung_task_timeout_secs(struct ctl_table
*table
, int write
,
190 struct file
*filp
, void __user
*buffer
,
191 size_t *lenp
, loff_t
*ppos
)
195 ret
= proc_doulongvec_minmax(table
, write
, filp
, buffer
, lenp
, ppos
);
200 update_poll_jiffies();
202 wake_up_process(watchdog_task
);
209 * kthread which checks for tasks stuck in D state
211 static int watchdog(void *dummy
)
213 set_user_nice(current
, 0);
214 update_poll_jiffies();
217 unsigned long timeout
;
219 while (schedule_timeout_interruptible(hung_task_poll_jiffies
));
222 * Need to cache timeout here to avoid timeout being set
223 * to 0 via sysctl while inside check_hung_*_tasks().
225 timeout
= sysctl_hung_task_timeout_secs
;
227 check_hung_uninterruptible_tasks(timeout
);
233 static int __init
hung_task_init(void)
235 atomic_notifier_chain_register(&panic_notifier_list
, &panic_block
);
236 watchdog_task
= kthread_run(watchdog
, NULL
, "khungtaskd");
241 module_init(hung_task_init
);