2 * Detect hard and soft lockups on a system
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
12 #define pr_fmt(fmt) "watchdog: " fmt
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/sysctl.h>
20 #include <linux/smpboot.h>
21 #include <linux/sched/rt.h>
22 #include <uapi/linux/sched/types.h>
23 #include <linux/tick.h>
24 #include <linux/workqueue.h>
25 #include <linux/sched/clock.h>
26 #include <linux/sched/debug.h>
28 #include <asm/irq_regs.h>
29 #include <linux/kvm_para.h>
30 #include <linux/kthread.h>
32 static DEFINE_MUTEX(watchdog_mutex
);
34 int __read_mostly nmi_watchdog_enabled
;
36 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
37 unsigned long __read_mostly watchdog_enabled
= SOFT_WATCHDOG_ENABLED
|
40 unsigned long __read_mostly watchdog_enabled
= SOFT_WATCHDOG_ENABLED
;
43 #ifdef CONFIG_HARDLOCKUP_DETECTOR
46 * Should we panic when a soft-lockup or hard-lockup occurs:
48 unsigned int __read_mostly hardlockup_panic
=
49 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE
;
51 * We may not want to enable hard lockup detection by default in all cases,
52 * for example when running the kernel as a guest on a hypervisor. In these
53 * cases this function can be called to disable hard lockup detection. This
54 * function should only be executed once by the boot processor before the
55 * kernel command line parameters are parsed, because otherwise it is not
56 * possible to override this in hardlockup_panic_setup().
58 void __init
hardlockup_detector_disable(void)
60 watchdog_enabled
&= ~NMI_WATCHDOG_ENABLED
;
63 static int __init
hardlockup_panic_setup(char *str
)
65 if (!strncmp(str
, "panic", 5))
67 else if (!strncmp(str
, "nopanic", 7))
69 else if (!strncmp(str
, "0", 1))
70 watchdog_enabled
&= ~NMI_WATCHDOG_ENABLED
;
71 else if (!strncmp(str
, "1", 1))
72 watchdog_enabled
|= NMI_WATCHDOG_ENABLED
;
75 __setup("nmi_watchdog=", hardlockup_panic_setup
);
79 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
80 int __read_mostly soft_watchdog_enabled
;
83 int __read_mostly watchdog_user_enabled
;
84 int __read_mostly watchdog_thresh
= 10;
87 int __read_mostly sysctl_softlockup_all_cpu_backtrace
;
88 int __read_mostly sysctl_hardlockup_all_cpu_backtrace
;
90 struct cpumask watchdog_cpumask __read_mostly
;
91 unsigned long *watchdog_cpumask_bits
= cpumask_bits(&watchdog_cpumask
);
94 * The 'watchdog_running' variable is set to 1 when the watchdog threads
95 * are registered/started and is set to 0 when the watchdog threads are
96 * unregistered/stopped, so it is an indicator whether the threads exist.
98 static int __read_mostly watchdog_running
;
101 * These functions can be overridden if an architecture implements its
102 * own hardlockup detector.
104 * watchdog_nmi_enable/disable can be implemented to start and stop when
105 * softlockup watchdog threads start and stop. The arch must select the
106 * SOFTLOCKUP_DETECTOR Kconfig.
108 int __weak
watchdog_nmi_enable(unsigned int cpu
)
113 void __weak
watchdog_nmi_disable(unsigned int cpu
)
115 hardlockup_detector_perf_disable();
119 * watchdog_nmi_reconfigure can be implemented to be notified after any
120 * watchdog configuration change. The arch hardlockup watchdog should
121 * respond to the following variables:
122 * - nmi_watchdog_enabled
125 * - sysctl_hardlockup_all_cpu_backtrace
128 void __weak
watchdog_nmi_reconfigure(void)
133 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
135 /* Helper for online, unparked cpus. */
136 #define for_each_watchdog_cpu(cpu) \
137 for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
139 static u64 __read_mostly sample_period
;
141 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts
);
142 static DEFINE_PER_CPU(struct task_struct
*, softlockup_watchdog
);
143 static DEFINE_PER_CPU(struct hrtimer
, watchdog_hrtimer
);
144 static DEFINE_PER_CPU(bool, softlockup_touch_sync
);
145 static DEFINE_PER_CPU(bool, soft_watchdog_warn
);
146 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts
);
147 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt
);
148 static DEFINE_PER_CPU(struct task_struct
*, softlockup_task_ptr_saved
);
149 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved
);
150 static unsigned long soft_lockup_nmi_warn
;
152 unsigned int __read_mostly softlockup_panic
=
153 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
;
155 static int __init
softlockup_panic_setup(char *str
)
157 softlockup_panic
= simple_strtoul(str
, NULL
, 0);
161 __setup("softlockup_panic=", softlockup_panic_setup
);
163 static int __init
nowatchdog_setup(char *str
)
165 watchdog_enabled
= 0;
168 __setup("nowatchdog", nowatchdog_setup
);
170 static int __init
nosoftlockup_setup(char *str
)
172 watchdog_enabled
&= ~SOFT_WATCHDOG_ENABLED
;
175 __setup("nosoftlockup", nosoftlockup_setup
);
178 static int __init
softlockup_all_cpu_backtrace_setup(char *str
)
180 sysctl_softlockup_all_cpu_backtrace
=
181 !!simple_strtol(str
, NULL
, 0);
184 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup
);
185 #ifdef CONFIG_HARDLOCKUP_DETECTOR
186 static int __init
hardlockup_all_cpu_backtrace_setup(char *str
)
188 sysctl_hardlockup_all_cpu_backtrace
=
189 !!simple_strtol(str
, NULL
, 0);
192 __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup
);
196 static void __lockup_detector_cleanup(void);
199 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
200 * lockups can have false positives under extreme conditions. So we generally
201 * want a higher threshold for soft lockups than for hard lockups. So we couple
202 * the thresholds with a factor: we make the soft threshold twice the amount of
203 * time the hard threshold is.
205 static int get_softlockup_thresh(void)
207 return watchdog_thresh
* 2;
211 * Returns seconds, approximately. We don't need nanosecond
212 * resolution, and we don't need to waste time with a big divide when
215 static unsigned long get_timestamp(void)
217 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
220 static void set_sample_period(void)
223 * convert watchdog_thresh from seconds to ns
224 * the divide by 5 is to give hrtimer several chances (two
225 * or three with the current relation between the soft
226 * and hard thresholds) to increment before the
227 * hardlockup detector generates a warning
229 sample_period
= get_softlockup_thresh() * ((u64
)NSEC_PER_SEC
/ 5);
230 watchdog_update_hrtimer_threshold(sample_period
);
233 /* Commands for resetting the watchdog */
234 static void __touch_watchdog(void)
236 __this_cpu_write(watchdog_touch_ts
, get_timestamp());
240 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
242 * Call when the scheduler may have stalled for legitimate reasons
243 * preventing the watchdog task from executing - e.g. the scheduler
244 * entering idle state. This should only be used for scheduler events.
245 * Use touch_softlockup_watchdog() for everything else.
247 void touch_softlockup_watchdog_sched(void)
250 * Preemption can be enabled. It doesn't matter which CPU's timestamp
251 * gets zeroed here, so use the raw_ operation.
253 raw_cpu_write(watchdog_touch_ts
, 0);
256 void touch_softlockup_watchdog(void)
258 touch_softlockup_watchdog_sched();
259 wq_watchdog_touch(raw_smp_processor_id());
261 EXPORT_SYMBOL(touch_softlockup_watchdog
);
263 void touch_all_softlockup_watchdogs(void)
268 * this is done lockless
269 * do we care if a 0 races with a timestamp?
270 * all it means is the softlock check starts one cycle later
272 for_each_watchdog_cpu(cpu
)
273 per_cpu(watchdog_touch_ts
, cpu
) = 0;
274 wq_watchdog_touch(-1);
277 void touch_softlockup_watchdog_sync(void)
279 __this_cpu_write(softlockup_touch_sync
, true);
280 __this_cpu_write(watchdog_touch_ts
, 0);
283 static int is_softlockup(unsigned long touch_ts
)
285 unsigned long now
= get_timestamp();
287 if ((watchdog_enabled
& SOFT_WATCHDOG_ENABLED
) && watchdog_thresh
){
288 /* Warn about unreasonable delays. */
289 if (time_after(now
, touch_ts
+ get_softlockup_thresh()))
290 return now
- touch_ts
;
295 /* watchdog detector functions */
296 bool is_hardlockup(void)
298 unsigned long hrint
= __this_cpu_read(hrtimer_interrupts
);
300 if (__this_cpu_read(hrtimer_interrupts_saved
) == hrint
)
303 __this_cpu_write(hrtimer_interrupts_saved
, hrint
);
307 static void watchdog_interrupt_count(void)
309 __this_cpu_inc(hrtimer_interrupts
);
312 static int watchdog_enable_all_cpus(void);
313 static void watchdog_disable_all_cpus(void);
315 /* watchdog kicker functions */
316 static enum hrtimer_restart
watchdog_timer_fn(struct hrtimer
*hrtimer
)
318 unsigned long touch_ts
= __this_cpu_read(watchdog_touch_ts
);
319 struct pt_regs
*regs
= get_irq_regs();
321 int softlockup_all_cpu_backtrace
= sysctl_softlockup_all_cpu_backtrace
;
323 if (!watchdog_enabled
)
324 return HRTIMER_NORESTART
;
326 /* kick the hardlockup detector */
327 watchdog_interrupt_count();
329 /* kick the softlockup detector */
330 wake_up_process(__this_cpu_read(softlockup_watchdog
));
333 hrtimer_forward_now(hrtimer
, ns_to_ktime(sample_period
));
336 if (unlikely(__this_cpu_read(softlockup_touch_sync
))) {
338 * If the time stamp was touched atomically
339 * make sure the scheduler tick is up to date.
341 __this_cpu_write(softlockup_touch_sync
, false);
345 /* Clear the guest paused flag on watchdog reset */
346 kvm_check_and_clear_guest_paused();
348 return HRTIMER_RESTART
;
351 /* check for a softlockup
352 * This is done by making sure a high priority task is
353 * being scheduled. The task touches the watchdog to
354 * indicate it is getting cpu time. If it hasn't then
355 * this is a good indication some task is hogging the cpu
357 duration
= is_softlockup(touch_ts
);
358 if (unlikely(duration
)) {
360 * If a virtual machine is stopped by the host it can look to
361 * the watchdog like a soft lockup, check to see if the host
362 * stopped the vm before we issue the warning
364 if (kvm_check_and_clear_guest_paused())
365 return HRTIMER_RESTART
;
368 if (__this_cpu_read(soft_watchdog_warn
) == true) {
370 * When multiple processes are causing softlockups the
371 * softlockup detector only warns on the first one
372 * because the code relies on a full quiet cycle to
373 * re-arm. The second process prevents the quiet cycle
374 * and never gets reported. Use task pointers to detect
377 if (__this_cpu_read(softlockup_task_ptr_saved
) !=
379 __this_cpu_write(soft_watchdog_warn
, false);
382 return HRTIMER_RESTART
;
385 if (softlockup_all_cpu_backtrace
) {
386 /* Prevent multiple soft-lockup reports if one cpu is already
387 * engaged in dumping cpu back traces
389 if (test_and_set_bit(0, &soft_lockup_nmi_warn
)) {
390 /* Someone else will report us. Let's give up */
391 __this_cpu_write(soft_watchdog_warn
, true);
392 return HRTIMER_RESTART
;
396 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
397 smp_processor_id(), duration
,
398 current
->comm
, task_pid_nr(current
));
399 __this_cpu_write(softlockup_task_ptr_saved
, current
);
401 print_irqtrace_events(current
);
407 if (softlockup_all_cpu_backtrace
) {
408 /* Avoid generating two back traces for current
409 * given that one is already made above
411 trigger_allbutself_cpu_backtrace();
413 clear_bit(0, &soft_lockup_nmi_warn
);
414 /* Barrier to sync with other cpus */
415 smp_mb__after_atomic();
418 add_taint(TAINT_SOFTLOCKUP
, LOCKDEP_STILL_OK
);
419 if (softlockup_panic
)
420 panic("softlockup: hung tasks");
421 __this_cpu_write(soft_watchdog_warn
, true);
423 __this_cpu_write(soft_watchdog_warn
, false);
425 return HRTIMER_RESTART
;
428 static void watchdog_set_prio(unsigned int policy
, unsigned int prio
)
430 struct sched_param param
= { .sched_priority
= prio
};
432 sched_setscheduler(current
, policy
, ¶m
);
435 static void watchdog_enable(unsigned int cpu
)
437 struct hrtimer
*hrtimer
= this_cpu_ptr(&watchdog_hrtimer
);
440 * Start the timer first to prevent the NMI watchdog triggering
441 * before the timer has a chance to fire.
443 hrtimer_init(hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
444 hrtimer
->function
= watchdog_timer_fn
;
445 hrtimer_start(hrtimer
, ns_to_ktime(sample_period
),
446 HRTIMER_MODE_REL_PINNED
);
448 /* Initialize timestamp */
450 /* Enable the perf event */
451 watchdog_nmi_enable(cpu
);
453 watchdog_set_prio(SCHED_FIFO
, MAX_RT_PRIO
- 1);
456 static void watchdog_disable(unsigned int cpu
)
458 struct hrtimer
*hrtimer
= this_cpu_ptr(&watchdog_hrtimer
);
460 watchdog_set_prio(SCHED_NORMAL
, 0);
462 * Disable the perf event first. That prevents that a large delay
463 * between disabling the timer and disabling the perf event causes
464 * the perf NMI to detect a false positive.
466 watchdog_nmi_disable(cpu
);
467 hrtimer_cancel(hrtimer
);
470 static void watchdog_cleanup(unsigned int cpu
, bool online
)
472 watchdog_disable(cpu
);
475 static int watchdog_should_run(unsigned int cpu
)
477 return __this_cpu_read(hrtimer_interrupts
) !=
478 __this_cpu_read(soft_lockup_hrtimer_cnt
);
482 * The watchdog thread function - touches the timestamp.
484 * It only runs once every sample_period seconds (4 seconds by
485 * default) to reset the softlockup timestamp. If this gets delayed
486 * for more than 2*watchdog_thresh seconds then the debug-printout
487 * triggers in watchdog_timer_fn().
489 static void watchdog(unsigned int cpu
)
491 __this_cpu_write(soft_lockup_hrtimer_cnt
,
492 __this_cpu_read(hrtimer_interrupts
));
496 static struct smp_hotplug_thread watchdog_threads
= {
497 .store
= &softlockup_watchdog
,
498 .thread_should_run
= watchdog_should_run
,
499 .thread_fn
= watchdog
,
500 .thread_comm
= "watchdog/%u",
501 .setup
= watchdog_enable
,
502 .cleanup
= watchdog_cleanup
,
503 .park
= watchdog_disable
,
504 .unpark
= watchdog_enable
,
508 * park all watchdog threads that are specified in 'watchdog_cpumask'
510 * This function returns an error if kthread_park() of a watchdog thread
511 * fails. In this situation, the watchdog threads of some CPUs can already
512 * be parked and the watchdog threads of other CPUs can still be runnable.
513 * Callers are expected to handle this special condition as appropriate in
516 * This function may only be called in a context that is protected against
517 * races with CPU hotplug - for example, via get_online_cpus().
519 static int watchdog_park_threads(void)
523 for_each_watchdog_cpu(cpu
) {
524 ret
= kthread_park(per_cpu(softlockup_watchdog
, cpu
));
532 * unpark all watchdog threads that are specified in 'watchdog_cpumask'
534 * This function may only be called in a context that is protected against
535 * races with CPU hotplug - for example, via get_online_cpus().
537 static void watchdog_unpark_threads(void)
541 for_each_watchdog_cpu(cpu
)
542 kthread_unpark(per_cpu(softlockup_watchdog
, cpu
));
545 static int update_watchdog_all_cpus(void)
549 ret
= watchdog_park_threads();
553 watchdog_unpark_threads();
558 static int watchdog_enable_all_cpus(void)
562 if (!watchdog_running
) {
563 err
= smpboot_register_percpu_thread_cpumask(&watchdog_threads
,
566 pr_err("Failed to create watchdog threads, disabled\n");
568 watchdog_running
= 1;
571 * Enable/disable the lockup detectors or
572 * change the sample period 'on the fly'.
574 err
= update_watchdog_all_cpus();
577 watchdog_disable_all_cpus();
578 pr_err("Failed to update lockup detectors, disabled\n");
583 watchdog_enabled
= 0;
588 static void watchdog_disable_all_cpus(void)
590 if (watchdog_running
) {
591 watchdog_running
= 0;
592 smpboot_unregister_percpu_thread(&watchdog_threads
);
597 static int watchdog_update_cpus(void)
599 return smpboot_update_cpumask_percpu_thread(
600 &watchdog_threads
, &watchdog_cpumask
);
604 #else /* SOFTLOCKUP */
605 static int watchdog_park_threads(void)
610 static void watchdog_unpark_threads(void)
614 static int watchdog_enable_all_cpus(void)
619 static void watchdog_disable_all_cpus(void)
624 static int watchdog_update_cpus(void)
630 static void set_sample_period(void)
633 #endif /* SOFTLOCKUP */
635 static void __lockup_detector_cleanup(void)
637 lockdep_assert_held(&watchdog_mutex
);
638 hardlockup_detector_perf_cleanup();
642 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
644 * Caller must not hold the cpu hotplug rwsem.
646 void lockup_detector_cleanup(void)
648 mutex_lock(&watchdog_mutex
);
649 __lockup_detector_cleanup();
650 mutex_unlock(&watchdog_mutex
);
654 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
656 * Special interface for parisc. It prevents lockup detector warnings from
657 * the default pm_poweroff() function which busy loops forever.
659 void lockup_detector_soft_poweroff(void)
661 watchdog_enabled
= 0;
667 * Update the run state of the lockup detectors.
669 static int proc_watchdog_update(void)
674 * Watchdog threads won't be started if they are already active.
675 * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
676 * care of this. If those threads are already active, the sample
677 * period will be updated and the lockup detectors will be enabled
678 * or disabled 'on the fly'.
680 if (watchdog_enabled
&& watchdog_thresh
)
681 err
= watchdog_enable_all_cpus();
683 watchdog_disable_all_cpus();
685 watchdog_nmi_reconfigure();
687 __lockup_detector_cleanup();
694 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
696 * caller | table->data points to | 'which' contains the flag(s)
697 * -------------------|-----------------------|-----------------------------
698 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
699 * | | with SOFT_WATCHDOG_ENABLED
700 * -------------------|-----------------------|-----------------------------
701 * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED
702 * -------------------|-----------------------|-----------------------------
703 * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
705 static int proc_watchdog_common(int which
, struct ctl_table
*table
, int write
,
706 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
709 int *watchdog_param
= (int *)table
->data
;
711 cpu_hotplug_disable();
712 mutex_lock(&watchdog_mutex
);
715 * If the parameter is being read return the state of the corresponding
716 * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
717 * run state of the lockup detectors.
720 *watchdog_param
= (watchdog_enabled
& which
) != 0;
721 err
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
723 err
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
728 * There is a race window between fetching the current value
729 * from 'watchdog_enabled' and storing the new value. During
730 * this race window, watchdog_nmi_enable() can sneak in and
731 * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
732 * The 'cmpxchg' detects this race and the loop retries.
735 old
= watchdog_enabled
;
737 * If the parameter value is not zero set the
738 * corresponding bit(s), else clear it(them).
744 } while (cmpxchg(&watchdog_enabled
, old
, new) != old
);
747 * Update the run state of the lockup detectors. There is _no_
748 * need to check the value returned by proc_watchdog_update()
749 * and to restore the previous value of 'watchdog_enabled' as
750 * both lockup detectors are disabled if proc_watchdog_update()
756 err
= proc_watchdog_update();
759 mutex_unlock(&watchdog_mutex
);
760 cpu_hotplug_enable();
765 * /proc/sys/kernel/watchdog
767 int proc_watchdog(struct ctl_table
*table
, int write
,
768 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
770 return proc_watchdog_common(NMI_WATCHDOG_ENABLED
|SOFT_WATCHDOG_ENABLED
,
771 table
, write
, buffer
, lenp
, ppos
);
775 * /proc/sys/kernel/nmi_watchdog
777 int proc_nmi_watchdog(struct ctl_table
*table
, int write
,
778 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
780 return proc_watchdog_common(NMI_WATCHDOG_ENABLED
,
781 table
, write
, buffer
, lenp
, ppos
);
785 * /proc/sys/kernel/soft_watchdog
787 int proc_soft_watchdog(struct ctl_table
*table
, int write
,
788 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
790 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED
,
791 table
, write
, buffer
, lenp
, ppos
);
795 * /proc/sys/kernel/watchdog_thresh
797 int proc_watchdog_thresh(struct ctl_table
*table
, int write
,
798 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
802 cpu_hotplug_disable();
803 mutex_lock(&watchdog_mutex
);
805 old
= ACCESS_ONCE(watchdog_thresh
);
806 err
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
812 * Update the sample period. Restore on failure.
814 new = ACCESS_ONCE(watchdog_thresh
);
819 err
= proc_watchdog_update();
821 watchdog_thresh
= old
;
825 mutex_unlock(&watchdog_mutex
);
826 cpu_hotplug_enable();
831 * The cpumask is the mask of possible cpus that the watchdog can run
832 * on, not the mask of cpus it is actually running on. This allows the
833 * user to specify a mask that will include cpus that have not yet
834 * been brought online, if desired.
836 int proc_watchdog_cpumask(struct ctl_table
*table
, int write
,
837 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
841 cpu_hotplug_disable();
842 mutex_lock(&watchdog_mutex
);
844 err
= proc_do_large_bitmap(table
, write
, buffer
, lenp
, ppos
);
846 /* Remove impossible cpus to keep sysctl output cleaner. */
847 cpumask_and(&watchdog_cpumask
, &watchdog_cpumask
,
850 if (watchdog_running
) {
852 * Failure would be due to being unable to allocate
853 * a temporary cpumask, so we are likely not in a
854 * position to do much else to make things better.
856 if (watchdog_update_cpus() != 0)
857 pr_err("cpumask update failed\n");
860 watchdog_nmi_reconfigure();
861 __lockup_detector_cleanup();
864 mutex_unlock(&watchdog_mutex
);
865 cpu_hotplug_enable();
869 #endif /* CONFIG_SYSCTL */
871 void __init
lockup_detector_init(void)
875 #ifdef CONFIG_NO_HZ_FULL
876 if (tick_nohz_full_enabled()) {
877 pr_info("Disabling watchdog on nohz_full cores by default\n");
878 cpumask_copy(&watchdog_cpumask
, housekeeping_mask
);
880 cpumask_copy(&watchdog_cpumask
, cpu_possible_mask
);
882 cpumask_copy(&watchdog_cpumask
, cpu_possible_mask
);
885 if (watchdog_enabled
)
886 watchdog_enable_all_cpus();