2 * Copyright (c) 1986, 1988, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)kern_shutdown.c 8.3 (Berkeley) 1/21/94
35 * $FreeBSD: src/sys/kern/kern_shutdown.c,v 1.72.2.12 2002/02/21 19:15:10 dillon Exp $
39 #include "opt_ddb_trace.h"
40 #include "opt_panic.h"
41 #include "opt_show_busybufs.h"
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/eventhandler.h>
49 #include <sys/diskslice.h>
50 #include <sys/reboot.h>
53 #include <sys/fcntl.h> /* FREAD */
54 #include <sys/stat.h> /* S_IFCHR */
55 #include <sys/vnode.h>
56 #include <sys/kernel.h>
57 #include <sys/kerneldump.h>
58 #include <sys/kthread.h>
59 #include <sys/malloc.h>
60 #include <sys/mount.h>
61 #include <sys/queue.h>
62 #include <sys/sysctl.h>
63 #include <sys/vkernel.h>
65 #include <sys/sysproto.h>
66 #include <sys/device.h>
70 #include <sys/kern_syscall.h>
71 #include <vm/vm_map.h>
74 #include <sys/thread2.h>
76 #include <sys/mplock2.h>
78 #include <machine/cpu.h>
79 #include <machine/clock.h>
80 #include <machine/md_var.h>
81 #include <machine/smp.h> /* smp_active_mask, cpuid */
82 #include <machine/vmparam.h>
83 #include <machine/thread.h>
85 #include <sys/signalvar.h>
88 #include <dev/acpica/acpi_pvpanic/panic_notifier.h>
89 #include <dev/misc/gpio/gpio.h>
91 #ifndef PANIC_REBOOT_WAIT_TIME
92 #define PANIC_REBOOT_WAIT_TIME 15 /* default to 15 seconds */
96 * Note that stdarg.h and the ANSI style va_start macro is used for both
97 * ANSI and traditional C compilers. We use the machine version to stay
98 * within the confines of the kernel header files.
100 #include <machine/stdarg.h>
104 #ifdef DDB_UNATTENDED
105 int debugger_on_panic
= 0;
107 int debugger_on_panic
= 1;
109 SYSCTL_INT(_debug
, OID_AUTO
, debugger_on_panic
, CTLFLAG_RW
,
110 &debugger_on_panic
, 0, "Run debugger on kernel panic");
113 int trace_on_panic
= 1;
115 int trace_on_panic
= 0;
117 SYSCTL_INT(_debug
, OID_AUTO
, trace_on_panic
, CTLFLAG_RW
,
118 &trace_on_panic
, 0, "Print stack trace on kernel panic");
121 static int sync_on_panic
= 0;
122 SYSCTL_INT(_kern
, OID_AUTO
, sync_on_panic
, CTLFLAG_RW
,
123 &sync_on_panic
, 0, "Do a sync before rebooting from a panic");
125 SYSCTL_NODE(_kern
, OID_AUTO
, shutdown
, CTLFLAG_RW
, 0, "Shutdown environment");
128 * Variable panicstr contains argument to first call to panic; used as flag
129 * to indicate that the kernel has already called panic.
131 const char *panicstr
;
133 int dumping
; /* system is dumping */
134 static struct dumperinfo dumper
; /* selected dumper */
136 globaldata_t panic_cpu_gd
; /* which cpu took the panic */
137 struct lwkt_tokref panic_tokens
[LWKT_MAXTOKENS
];
138 int panic_tokens_count
;
140 int bootverbose
= 0; /* note: assignment to force non-bss */
141 SYSCTL_INT(_debug
, OID_AUTO
, bootverbose
, CTLFLAG_RW
,
142 &bootverbose
, 0, "Verbose kernel messages");
144 int cold
= 1; /* note: assignment to force non-bss */
145 int dumplo
; /* OBSOLETE - savecore compat */
148 static void boot (int) __dead2
;
149 static int setdumpdev (cdev_t dev
);
150 static void poweroff_wait (void *, int);
151 static void print_uptime (void);
152 static void shutdown_halt (void *junk
, int howto
);
153 static void shutdown_panic (void *junk
, int howto
);
154 static void shutdown_reset (void *junk
, int howto
);
155 static int shutdown_busycount1(struct buf
*bp
, void *info
);
156 static int shutdown_busycount2(struct buf
*bp
, void *info
);
157 static void shutdown_cleanup_proc(struct proc
*p
);
159 /* register various local shutdown events */
161 shutdown_conf(void *unused
)
163 EVENTHANDLER_REGISTER(shutdown_final
, poweroff_wait
, NULL
, SHUTDOWN_PRI_FIRST
);
164 EVENTHANDLER_REGISTER(shutdown_final
, shutdown_halt
, NULL
, SHUTDOWN_PRI_LAST
+ 100);
165 EVENTHANDLER_REGISTER(shutdown_final
, shutdown_panic
, NULL
, SHUTDOWN_PRI_LAST
+ 100);
166 EVENTHANDLER_REGISTER(shutdown_final
, shutdown_reset
, NULL
, SHUTDOWN_PRI_LAST
+ 200);
169 SYSINIT(shutdown_conf
, SI_BOOT2_MACHDEP
, SI_ORDER_ANY
, shutdown_conf
, NULL
);
174 * The system call that results in a reboot
179 sys_reboot(struct reboot_args
*uap
)
181 struct thread
*td
= curthread
;
184 if ((error
= priv_check(td
, PRIV_REBOOT
)))
194 * Called by events that want to shut down.. e.g <CTL><ALT><DEL> on a PC
196 static int shutdown_howto
= 0;
199 shutdown_nice(int howto
)
201 shutdown_howto
= howto
;
203 /* Send a signal to init(8) and have it shutdown the world */
204 if (initproc
!= NULL
) {
205 ksignal(initproc
, SIGINT
);
207 /* No init(8) running, so simply reboot */
212 static int waittime
= -1;
214 struct thread
*dumpthread
;
225 if (ts
.tv_sec
>= 86400) {
226 kprintf("%ldd", ts
.tv_sec
/ 86400);
230 if (f
|| ts
.tv_sec
>= 3600) {
231 kprintf("%ldh", ts
.tv_sec
/ 3600);
235 if (f
|| ts
.tv_sec
>= 60) {
236 kprintf("%ldm", ts
.tv_sec
/ 60);
240 kprintf("%lds\n", ts
.tv_sec
);
244 * Go through the rigmarole of shutting down..
245 * this used to be in machdep.c but I'll be dammned if I could see
246 * anything machine dependant in it.
252 * Get rid of any user scheduler baggage and then give
253 * us a high priority.
255 if (curthread
->td_release
)
256 curthread
->td_release(curthread
);
257 lwkt_setpri_self(TDPRI_MAX
);
259 /* collect extra flags that shutdown_nice might have set */
260 howto
|= shutdown_howto
;
263 * We really want to shutdown on the BSP. Subsystems such as ACPI
264 * can't power-down the box otherwise.
266 if (!CPUMASK_ISUP(smp_active_mask
)) {
267 kprintf("boot() called on cpu#%d\n", mycpu
->gd_cpuid
);
269 if (panicstr
== NULL
&& mycpu
->gd_cpuid
!= 0) {
270 kprintf("Switching to cpu #0 for shutdown\n");
271 lwkt_setcpu_self(globaldata_find(0));
274 * Do any callouts that should be done BEFORE syncing the filesystems.
276 EVENTHANDLER_INVOKE(shutdown_pre_sync
, howto
);
279 * Try to get rid of any remaining FS references. The calling
280 * process, proc0, and init may still hold references. The
281 * VFS cache subsystem may still hold a root reference to root.
283 * XXX this needs work. We really need to SIGSTOP all remaining
284 * processes in order to avoid blowups due to proc0's filesystem
285 * references going away. For now just make sure that the init
286 * process is stopped.
288 if (panicstr
== NULL
) {
289 shutdown_cleanup_proc(curproc
);
290 shutdown_cleanup_proc(&proc0
);
292 if (initproc
!= curproc
) {
293 ksignal(initproc
, SIGSTOP
);
294 tsleep(boot
, 0, "shutdn", hz
/ 20);
296 shutdown_cleanup_proc(initproc
);
298 vfs_cache_setroot(NULL
, NULL
);
302 * Now sync filesystems
304 if (!cold
&& (howto
& RB_NOSYNC
) == 0 && waittime
< 0) {
305 int iter
, nbusy
, pbusy
;
308 kprintf("\nsyncing disks... ");
310 sys_sync(NULL
); /* YYY was sync(&proc0, NULL). why proc0 ? */
313 * With soft updates, some buffers that are
314 * written will be remarked as dirty until other
315 * buffers are written.
317 for (iter
= pbusy
= 0; iter
< 20; iter
++) {
318 nbusy
= scan_all_buffers(shutdown_busycount1
, NULL
);
321 kprintf("%d ", nbusy
);
327 * Process soft update work queue if buffers don't sync
328 * after 6 iterations by permitting the syncer to run.
333 sys_sync(NULL
); /* YYY was sync(&proc0, NULL). why proc0 ? */
334 tsleep(boot
, 0, "shutdn", hz
* iter
/ 20 + 1);
338 * Count only busy local buffers to prevent forcing
339 * a fsck if we're just a client of a wedged NFS server
341 nbusy
= scan_all_buffers(shutdown_busycount2
, NULL
);
344 * Failed to sync all blocks. Indicate this and don't
345 * unmount filesystems (thus forcing an fsck on reboot).
347 kprintf("giving up on %d buffers\n", nbusy
);
349 if (debugger_on_panic
)
350 Debugger("busy buffer problem");
352 tsleep(boot
, 0, "shutdn", hz
* 5 + 1);
356 * Unmount filesystems
358 if (panicstr
== NULL
)
361 tsleep(boot
, 0, "shutdn", hz
/ 10 + 1);
367 * Dump before doing post_sync shutdown ops
370 if ((howto
& (RB_HALT
|RB_DUMP
)) == RB_DUMP
&& !cold
) {
375 * Ok, now do things that assume all filesystem activity has
376 * been completed. This will also call the device shutdown
379 EVENTHANDLER_INVOKE(shutdown_post_sync
, howto
);
381 /* Now that we're going to really halt the system... */
382 EVENTHANDLER_INVOKE(shutdown_final
, howto
);
384 for(;;) ; /* safety against shutdown_reset not working */
389 * Pass 1 - Figure out if there are any busy or dirty buffers still present.
391 * We ignore TMPFS mounts in this pass.
394 shutdown_busycount1(struct buf
*bp
, void *info
)
398 if ((vp
= bp
->b_vp
) != NULL
&& vp
->v_tag
== VT_TMPFS
)
400 if ((bp
->b_flags
& B_INVAL
) == 0 && BUF_REFCNT(bp
) > 0)
402 if ((bp
->b_flags
& (B_DELWRI
| B_INVAL
)) == B_DELWRI
)
408 * Pass 2 - only run after pass 1 has completed or has given up
410 * We ignore TMPFS, NFS, MFS, and SMBFS mounts in this pass.
413 shutdown_busycount2(struct buf
*bp
, void *info
)
418 * Ignore tmpfs and nfs mounts
420 if ((vp
= bp
->b_vp
) != NULL
) {
421 if (vp
->v_tag
== VT_TMPFS
)
423 if (vp
->v_tag
== VT_NFS
)
425 if (vp
->v_tag
== VT_MFS
)
427 if (vp
->v_tag
== VT_SMBFS
)
432 * Only count buffers stuck on I/O, ignore everything else
434 if (((bp
->b_flags
& B_INVAL
) == 0 && BUF_REFCNT(bp
)) ||
435 ((bp
->b_flags
& (B_DELWRI
|B_INVAL
)) == B_DELWRI
)) {
437 * Only count buffers undergoing write I/O
438 * on the related vnode.
440 if (bp
->b_vp
== NULL
||
441 bio_track_active(&bp
->b_vp
->v_track_write
) == 0) {
444 #if defined(SHOW_BUSYBUFS) || defined(DIAGNOSTIC)
446 "%p dev:?, flags:%08x, loffset:%jd, doffset:%jd\n",
448 bp
->b_flags
, (intmax_t)bp
->b_loffset
,
449 (intmax_t)bp
->b_bio2
.bio_offset
);
457 * If the shutdown was a clean halt, behave accordingly.
460 shutdown_halt(void *junk
, int howto
)
462 if (howto
& RB_HALT
) {
464 kprintf("The operating system has halted.\n");
465 #ifdef _KERNEL_VIRTUAL
468 kprintf("Please press any key to reboot.\n\n");
470 case -1: /* No console, just die */
482 * Check to see if the system paniced, pause and then reboot
483 * according to the specified delay.
486 shutdown_panic(void *junk
, int howto
)
491 if (howto
& RB_DUMP
) {
492 if (PANIC_REBOOT_WAIT_TIME
!= 0) {
493 if (PANIC_REBOOT_WAIT_TIME
!= -1) {
494 kprintf("Automatic reboot in %d seconds - "
495 "press a key on the console to abort\n",
496 PANIC_REBOOT_WAIT_TIME
);
497 for (loop
= PANIC_REBOOT_WAIT_TIME
* 10;
499 DELAY(1000 * 100); /* 1/10th second */
500 /* Did user type a key? */
502 if (c
!= -1 && c
!= NOKEY
)
508 } else { /* zero time specified - reboot NOW */
511 kprintf("--> Press a key on the console to reboot,\n");
512 kprintf("--> or switch off the system now.\n");
518 * Everything done, now reset
521 shutdown_reset(void *junk
, int howto
)
523 kprintf("Rebooting...\n");
524 DELAY(1000000); /* wait 1 sec for kprintf's to complete and be read */
525 /* cpu_boot(howto); */ /* doesn't do anything at the moment */
527 /* NOTREACHED */ /* assuming reset worked */
531 * Try to remove FS references in the specified process. This function
532 * is used during shutdown
536 shutdown_cleanup_proc(struct proc
*p
)
538 struct filedesc
*fdp
;
543 if ((fdp
= p
->p_fd
) != NULL
) {
546 cache_drop(&fdp
->fd_ncdir
);
551 cache_drop(&fdp
->fd_nrdir
);
556 cache_drop(&fdp
->fd_njdir
);
569 pmap_remove_pages(vmspace_pmap(vm
),
571 VM_MAX_USER_ADDRESS
);
572 vm_map_remove(&vm
->vm_map
,
574 VM_MAX_USER_ADDRESS
);
579 * Magic number for savecore
581 * exported (symorder) and used at least by savecore(8)
583 * Mark it as used so that gcc doesn't optimize it away.
585 __attribute__((__used__
))
586 static u_long
const dumpmag
= 0x8fca0101UL
;
588 __attribute__((__used__
))
589 static int dumpsize
= 0; /* also for savecore */
591 static int dodump
= 1;
593 SYSCTL_INT(_machdep
, OID_AUTO
, do_dump
, CTLFLAG_RW
, &dodump
, 0,
594 "Try to perform coredump on kernel panic");
597 mkdumpheader(struct kerneldumpheader
*kdh
, char *magic
, uint32_t archver
,
598 uint64_t dumplen
, uint32_t blksz
)
600 bzero(kdh
, sizeof(*kdh
));
601 strncpy(kdh
->magic
, magic
, sizeof(kdh
->magic
));
602 strncpy(kdh
->architecture
, MACHINE_ARCH
, sizeof(kdh
->architecture
));
603 kdh
->version
= htod32(KERNELDUMPVERSION
);
604 kdh
->architectureversion
= htod32(archver
);
605 kdh
->dumplength
= htod64(dumplen
);
606 kdh
->dumptime
= htod64(time_second
);
607 kdh
->blocksize
= htod32(blksz
);
608 strncpy(kdh
->hostname
, hostname
, sizeof(kdh
->hostname
));
609 strncpy(kdh
->versionstring
, version
, sizeof(kdh
->versionstring
));
610 if (panicstr
!= NULL
)
611 strncpy(kdh
->panicstring
, panicstr
, sizeof(kdh
->panicstring
));
612 kdh
->parity
= kerneldump_parity(kdh
);
616 setdumpdev(cdev_t dev
)
622 disk_dumpconf(NULL
, 0/*off*/);
628 * We have to open the device before we can perform ioctls on it,
629 * or the slice/label data may not be present. Device opens are
630 * usually tracked by specfs, but the dump device can be set in
631 * early boot and may not be open so this is somewhat of a hack.
633 doopen
= (dev
->si_sysref
.refcnt
== 1);
635 error
= dev_dopen(dev
, FREAD
, S_IFCHR
, proc0
.p_ucred
, NULL
);
639 error
= disk_dumpconf(dev
, 1/*on*/);
647 static void dump_conf (void *dummy
);
649 dump_conf(void *dummy
)
655 path
= kmalloc(MNAMELEN
, M_TEMP
, M_WAITOK
);
656 if (TUNABLE_STR_FETCH("dumpdev", path
, MNAMELEN
) != 0) {
658 * Make sure all disk devices created so far have also been
659 * probed, and also make sure that the newly created device
660 * nodes for probed disks are ready, too.
662 * XXX - Delay an additional 2 seconds to help drivers which
663 * pickup devices asynchronously and are not caught by
664 * CAM's initial probe.
667 tsleep(&_dummy
, 0, "syncer", hz
*2);
669 dev
= kgetdiskbyname(path
);
674 if (setdumpdev(dumpdev
) != 0)
678 SYSINIT(dump_conf
, SI_SUB_DUMP_CONF
, SI_ORDER_FIRST
, dump_conf
, NULL
);
681 sysctl_kern_dumpdev(SYSCTL_HANDLER_ARGS
)
686 ndumpdev
= dev2udev(dumpdev
);
687 error
= sysctl_handle_opaque(oidp
, &ndumpdev
, sizeof ndumpdev
, req
);
688 if (error
== 0 && req
->newptr
!= NULL
)
689 error
= setdumpdev(udev2dev(ndumpdev
, 0));
693 SYSCTL_PROC(_kern
, KERN_DUMPDEV
, dumpdev
, CTLTYPE_OPAQUE
|CTLFLAG_RW
,
694 0, sizeof dumpdev
, sysctl_kern_dumpdev
, "T,udev_t", "");
696 static struct panicerinfo
*panic_notifier
;
699 set_panic_notifier(struct panicerinfo
*info
)
702 panic_notifier
= NULL
;
703 else if (panic_notifier
!= NULL
)
706 panic_notifier
= info
;
712 * Panic is called on unresolvable fatal errors. It prints "panic: mesg",
713 * and then reboots. If we are called twice, then we avoid trying to sync
714 * the disks as this often leads to recursive panics.
717 panic(const char *fmt
, ...)
719 int bootopt
, newpanic
;
720 globaldata_t gd
= mycpu
;
721 thread_t td
= gd
->gd_curthread
;
723 static char buf
[256];
726 * If a panic occurs on multiple cpus before the first is able to
727 * halt the other cpus, only one cpu is allowed to take the panic.
728 * Attempt to be verbose about this situation but if the kprintf()
729 * itself panics don't let us overrun the kernel stack.
731 * Be very nasty about descheduling our thread at the lowest
732 * level possible in an attempt to freeze the thread without
733 * inducing further panics.
735 * Bumping gd_trap_nesting_level will also bypass assertions in
736 * lwkt_switch() and allow us to switch away even if we are a
737 * FAST interrupt or IPI.
739 * The setting of panic_cpu_gd also determines how kprintf()
740 * spin-locks itself. DDB can set panic_cpu_gd as well.
743 globaldata_t xgd
= panic_cpu_gd
;
746 * Someone else got the panic cpu
748 if (xgd
&& xgd
!= gd
) {
750 ++mycpu
->gd_trap_nesting_level
;
751 if (mycpu
->gd_trap_nesting_level
< 25) {
752 kprintf("SECONDARY PANIC ON CPU %d THREAD %p\n",
753 mycpu
->gd_cpuid
, td
);
755 td
->td_release
= NULL
; /* be a grinch */
757 lwkt_deschedule_self(td
);
761 /* --mycpu->gd_trap_nesting_level */
768 if (xgd
&& xgd
== gd
)
774 if (atomic_cmpset_ptr(&panic_cpu_gd
, NULL
, gd
))
778 * Try to get the system into a working state. Save information
779 * we are about to destroy.
782 if (panicstr
== NULL
) {
783 bcopy(td
->td_toks_array
, panic_tokens
, sizeof(panic_tokens
));
784 panic_tokens_count
= td
->td_toks_stop
- &td
->td_toks_base
;
786 lwkt_relalltokens(td
);
787 td
->td_toks_stop
= &td
->td_toks_base
;
788 if (gd
->gd_spinlocks
)
789 kprintf("panic with %d spinlocks held\n", gd
->gd_spinlocks
);
790 gd
->gd_spinlocks
= 0;
795 bootopt
= RB_AUTOBOOT
| RB_DUMP
;
796 if (sync_on_panic
== 0)
797 bootopt
|= RB_NOSYNC
;
800 bootopt
|= RB_NOSYNC
;
807 * Format the panic string.
810 kvsnprintf(buf
, sizeof(buf
), fmt
, ap
);
814 if (panic_notifier
!= NULL
)
815 panic_notifier
->notifier(panic_notifier
->arg
);
816 kprintf("panic: %s\n", buf
);
817 /* two separate prints in case of an unmapped page and trap */
818 kprintf("cpuid = %d\n", mycpu
->gd_cpuid
);
820 #if (NGPIO > 0) && defined(ERROR_LED_ON_PANIC)
821 led_switch("error", 1);
824 #if defined(WDOG_DISABLE_ON_PANIC)
829 * Make sure kgdb knows who we are, there won't be a stoppcbs[]
830 * entry since our cpu wasn't stopped.
833 dumpthread
= curthread
;
836 * Enter the debugger or fall through & dump. Entering the
837 * debugger will stop cpus. If not entering the debugger stop
840 * Limit the trace history to leave more panic data on a
841 * potentially row-limited console.
845 if (newpanic
&& trace_on_panic
)
847 if (debugger_on_panic
)
852 stop_cpus(mycpu
->gd_other_cpus
);
857 * Support for poweroff delay.
859 #ifndef POWEROFF_DELAY
860 # define POWEROFF_DELAY 5000
862 static int poweroff_delay
= POWEROFF_DELAY
;
864 SYSCTL_INT(_kern_shutdown
, OID_AUTO
, poweroff_delay
, CTLFLAG_RW
,
865 &poweroff_delay
, 0, "");
868 poweroff_wait(void *junk
, int howto
)
870 if(!(howto
& RB_POWEROFF
) || poweroff_delay
<= 0)
872 DELAY(poweroff_delay
* 1000);
876 * Some system processes (e.g. syncer) need to be stopped at appropriate
877 * points in their main loops prior to a system shutdown, so that they
878 * won't interfere with the shutdown process (e.g. by holding a disk buf
879 * to cause sync to fail). For each of these system processes, register
880 * shutdown_kproc() as a handler for one of shutdown events.
882 static int kproc_shutdown_wait
= 60;
883 SYSCTL_INT(_kern_shutdown
, OID_AUTO
, kproc_shutdown_wait
, CTLFLAG_RW
,
884 &kproc_shutdown_wait
, 0, "");
887 shutdown_kproc(void *arg
, int howto
)
896 td
= (struct thread
*)arg
;
897 if ((p
= td
->td_proc
) != NULL
) {
898 kprintf("Waiting (max %d seconds) for system process `%s' to stop...",
899 kproc_shutdown_wait
, p
->p_comm
);
901 kprintf("Waiting (max %d seconds) for system thread %s to stop...",
902 kproc_shutdown_wait
, td
->td_comm
);
904 error
= suspend_kproc(td
, kproc_shutdown_wait
* hz
);
906 if (error
== EWOULDBLOCK
)
907 kprintf("timed out\n");
909 kprintf("stopped\n");
912 /* Registration of dumpers */
914 set_dumper(struct dumperinfo
*di
)
917 bzero(&dumper
, sizeof(dumper
));
921 if (dumper
.dumper
!= NULL
)
931 #if defined (_KERNEL_VIRTUAL)
932 /* vkernels don't support dumps */
933 kprintf("vkernels don't support dumps\n");
937 * If there is a dumper registered and we aren't dumping already, call
938 * the machine dependent dumpsys (md_dumpsys) to do the hard work.
940 * XXX: while right now the md_dumpsys() of x86 and x86_64 could be
941 * factored out completely into here, I rather keep them machine
942 * dependent in case we ever add a platform which does not share
943 * the same dumpsys() code, such as arm.
945 if (dumper
.dumper
!= NULL
&& !dumping
) {
951 int dump_stop_usertds
= 0;
955 need_user_resched_remote(void *dummy
)
961 dump_reactivate_cpus(void)
966 dump_stop_usertds
= 1;
970 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
971 gd
= globaldata_find(cpu
);
972 seq
= lwkt_send_ipiq(gd
, need_user_resched_remote
, NULL
);
973 lwkt_wait_ipiq(gd
, seq
);
976 restart_cpus(stopped_cpus
);