2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
35 * $FreeBSD: src/sys/kern/kern_exit.c,v 1.92.2.11 2003/01/13 22:51:16 dillon Exp $
38 #include "opt_ktrace.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysmsg.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
46 #include <sys/ktrace.h>
47 #include <sys/pioctl.h>
50 #include <sys/vnode.h>
51 #include <sys/resourcevar.h>
52 #include <sys/signalvar.h>
53 #include <sys/taskqueue.h>
54 #include <sys/ptrace.h>
55 #include <sys/acct.h> /* for acct_process() function prototype */
56 #include <sys/filedesc.h>
60 #include <sys/kern_syscall.h>
61 #include <sys/unistd.h>
62 #include <sys/eventhandler.h>
63 #include <sys/dsched.h>
66 #include <vm/vm_param.h>
69 #include <vm/vm_map.h>
70 #include <vm/vm_extern.h>
72 #include <sys/refcount.h>
73 #include <sys/spinlock2.h>
75 static void reaplwps(void *context
, int dummy
);
76 static void reaplwp(struct lwp
*lp
);
77 static void killlwps(struct lwp
*lp
);
79 static MALLOC_DEFINE(M_ATEXIT
, "atexit", "atexit callback");
82 * callout list for things to do at exit time
86 TAILQ_ENTRY(exitlist
) next
;
89 TAILQ_HEAD(exit_list_head
, exitlist
);
90 static struct exit_list_head exit_list
= TAILQ_HEAD_INITIALIZER(exit_list
);
95 static struct task
*deadlwp_task
[MAXCPU
];
96 static struct lwplist deadlwp_list
[MAXCPU
];
97 static struct lwkt_token deadlwp_token
[MAXCPU
];
99 void (*linux_task_drop_callback
)(thread_t td
);
100 void (*linux_proc_drop_callback
)(struct proc
*p
);
106 * SYS_EXIT_ARGS(int rval)
109 sys_exit(struct sysmsg
*sysmsg
, const struct exit_args
*uap
)
111 exit1(W_EXITCODE(uap
->rval
, 0));
117 * Death of a lwp or process with optional bells and whistles.
120 sys_extexit(struct sysmsg
*sysmsg
, const struct extexit_args
*uap
)
122 struct proc
*p
= curproc
;
126 action
= EXTEXIT_ACTION(uap
->how
);
127 who
= EXTEXIT_WHO(uap
->how
);
129 /* Check parameters before we might perform some action */
142 error
= copyout(&uap
->status
, uap
->addr
, sizeof(uap
->status
));
150 lwkt_gettoken(&p
->p_token
);
155 * Be sure only to perform a simple lwp exit if there is at
156 * least one more lwp in the proc, which will call exit1()
157 * later, otherwise the proc will be an UNDEAD and not even a
160 if (p
->p_nthreads
> 1) {
161 lwp_exit(0, NULL
); /* called w/ p_token held */
164 /* else last lwp in proc: do the real thing */
166 default: /* to help gcc */
168 lwkt_reltoken(&p
->p_token
);
169 exit1(W_EXITCODE(uap
->status
, 0));
174 lwkt_reltoken(&p
->p_token
); /* safety */
178 * Kill all lwps associated with the current process except the
179 * current lwp. Return an error if we race another thread trying to
180 * do the same thing and lose the race.
182 * If forexec is non-zero the current thread and process flags are
183 * cleaned up so they can be reused.
186 killalllwps(int forexec
)
188 struct lwp
*lp
= curthread
->td_lwp
;
189 struct proc
*p
= lp
->lwp_proc
;
193 * Interlock against P_WEXIT. Only one of the process's thread
194 * is allowed to do the master exit.
196 lwkt_gettoken(&p
->p_token
);
197 if (p
->p_flags
& P_WEXIT
) {
198 lwkt_reltoken(&p
->p_token
);
201 p
->p_flags
|= P_WEXIT
;
202 lwkt_gettoken(&lp
->lwp_token
);
205 * Set temporary stopped state in case we are racing a coredump.
206 * Otherwise the coredump may hang forever.
208 if (lp
->lwp_mpflags
& LWP_MP_WSTOP
) {
211 atomic_set_int(&lp
->lwp_mpflags
, LWP_MP_WSTOP
);
214 wakeup(&p
->p_nstopped
);
218 * Interlock with LWP_MP_WEXIT and kill any remaining LWPs
220 atomic_set_int(&lp
->lwp_mpflags
, LWP_MP_WEXIT
);
221 if (p
->p_nthreads
> 1)
225 * Undo temporary stopped state
227 if (fakestop
&& (lp
->lwp_mpflags
& LWP_MP_WSTOP
)) {
228 atomic_clear_int(&lp
->lwp_mpflags
, LWP_MP_WSTOP
);
233 * If doing this for an exec, clean up the remaining thread
234 * (us) for continuing operation after all the other threads
238 atomic_clear_int(&lp
->lwp_mpflags
, LWP_MP_WEXIT
);
239 p
->p_flags
&= ~P_WEXIT
;
241 lwkt_reltoken(&lp
->lwp_token
);
242 lwkt_reltoken(&p
->p_token
);
248 * Kill all LWPs except the current one. Do not try to signal
249 * LWPs which have exited on their own or have already been
253 killlwps(struct lwp
*lp
)
255 struct proc
*p
= lp
->lwp_proc
;
259 * Kill the remaining LWPs. We must send the signal before setting
260 * LWP_MP_WEXIT. The setting of WEXIT is optional but helps reduce
261 * races. tlp must be held across the call as it might block and
262 * allow the target lwp to rip itself out from under our loop.
264 FOREACH_LWP_IN_PROC(tlp
, p
) {
266 lwkt_gettoken(&tlp
->lwp_token
);
267 if ((tlp
->lwp_mpflags
& LWP_MP_WEXIT
) == 0) {
268 atomic_set_int(&tlp
->lwp_mpflags
, LWP_MP_WEXIT
);
269 lwpsignal(p
, tlp
, SIGKILL
);
271 lwkt_reltoken(&tlp
->lwp_token
);
276 * Wait for everything to clear out. Also make sure any tstop()s
277 * are signalled (we are holding p_token for the interlock).
280 while (p
->p_nthreads
> 1)
281 tsleep(&p
->p_nthreads
, 0, "killlwps", 0);
285 * Exit: deallocate address space and other resources, change proc state
286 * to zombie, and unlink proc from allproc and parent's lists. Save exit
287 * status and rusage for wait(). Check for child processes and orphan them.
292 struct thread
*td
= curthread
;
293 struct proc
*p
= td
->td_proc
;
294 struct lwp
*lp
= td
->td_lwp
;
298 struct sysreaper
*reap
;
304 lwkt_gettoken(&p
->p_token
);
307 kprintf("init died (signal %d, exit %d)\n",
308 WTERMSIG(rv
), WEXITSTATUS(rv
));
309 panic("Going nowhere without my init!");
311 varsymset_clean(&p
->p_varsymset
);
312 lockuninit(&p
->p_varsymset
.vx_lock
);
315 * Kill all lwps associated with the current process, return an
316 * error if we race another thread trying to do the same thing
319 error
= killalllwps(0);
325 /* are we a task leader? */
326 if (p
== p
->p_leader
) {
327 struct sysmsg sysmsg
;
329 sysmsg
.extargs
.kill
.signum
= SIGKILL
;
332 sysmsg
.extargs
.kill
.pid
= q
->p_pid
;
334 * The interface for kill is better
335 * than the internal signal
337 sys_kill(&sysmsg
, &sysmsg
.extargs
.kill
);
341 tsleep((caddr_t
)p
, 0, "exit1", 0);
347 STOPEVENT(p
, S_EXIT
, rv
);
348 p
->p_flags
|= P_POSTEXIT
; /* stop procfs stepping */
351 * Check if any loadable modules need anything done at process exit.
352 * e.g. SYSV IPC stuff
353 * XXX what if one of these generates an error?
358 * XXX: imho, the eventhandler stuff is much cleaner than this.
359 * Maybe we should move everything to use eventhandler.
361 TAILQ_FOREACH(ep
, &exit_list
, next
)
364 if (p
->p_flags
& P_PROFIL
)
367 SIGEMPTYSET(p
->p_siglist
);
368 SIGEMPTYSET(lp
->lwp_siglist
);
369 if (timevalisset(&p
->p_realtimer
.it_value
))
370 callout_terminate(&p
->p_ithandle
);
373 * Reset any sigio structures pointing to us as a result of
374 * F_SETOWN with our pid.
376 funsetownlst(&p
->p_sigiolst
);
379 * Close open files and release open-file table.
384 if (p
->p_leader
->p_peers
) {
386 while(q
->p_peers
!= p
)
388 q
->p_peers
= p
->p_peers
;
389 wakeup((caddr_t
)p
->p_leader
);
393 * XXX Shutdown SYSV semaphores
397 /* The next two chunks should probably be moved to vmspace_exit. */
401 * Clean up data related to virtual kernel operation. Clean up
402 * any vkernel context related to the current lwp now so we can
406 vkernel_lwp_exit(lp
);
411 * Release the user portion of address space. The exitbump prevents
412 * the vmspace from being completely eradicated (using holdcnt).
413 * This releases references to vnodes, which could cause I/O if the
414 * file has been unlinked. We need to do this early enough that
415 * we can still sleep.
417 * We can't free the entire vmspace as the kernel stack may be mapped
418 * within that space also.
420 * Processes sharing the same vmspace may exit in one order, and
421 * get cleaned up by vmspace_exit() in a different order. The
422 * last exiting process to reach this point releases as much of
423 * the environment as it can, and the last process cleaned up
424 * by vmspace_exit() (which decrements exitingcnt) cleans up the
427 * NOTE: Releasing p_token around this call is helpful if the
428 * vmspace had a huge RSS. Otherwise some other process
429 * trying to do an allproc or other scan (like 'ps') may
430 * stall for a long time.
432 lwkt_reltoken(&p
->p_token
);
434 lwkt_gettoken(&p
->p_token
);
436 if (SESS_LEADER(p
)) {
437 struct session
*sp
= p
->p_session
;
441 * We are the controlling process. Signal the
442 * foreground process group, drain the controlling
443 * terminal, and revoke access to the controlling
446 * NOTE: While waiting for the process group to exit
447 * it is possible that one of the processes in
448 * the group will revoke the tty, so the
449 * ttyclosesession() function will re-check
452 * NOTE: Force a timeout of one second when draining
453 * the controlling terminal. PCATCH won't work
456 if (sp
->s_ttyp
&& (sp
->s_ttyp
->t_session
== sp
)) {
457 if (sp
->s_ttyp
->t_pgrp
)
458 pgsignal(sp
->s_ttyp
->t_pgrp
, SIGHUP
, 1);
459 sp
->s_ttyp
->t_timeout
= hz
;
461 ttyclosesession(sp
, 1); /* also revoke */
465 * Release the tty. If someone has it open via
466 * /dev/tty then close it (since they no longer can
467 * once we've NULL'd it out).
469 ttyclosesession(sp
, 0);
472 * s_ttyp is not zero'd; we use this to indicate
473 * that the session once had a controlling terminal.
474 * (for logging and informational purposes)
479 fixjobc(p
, p
->p_pgrp
, 0);
480 (void)acct_process(p
);
486 ktrdestroy(&p
->p_tracenode
);
490 * Release reference to text vnode
492 if ((vtmp
= p
->p_textvp
) != NULL
) {
497 /* Release namecache handle to text file */
498 if (p
->p_textnch
.ncp
)
499 cache_drop(&p
->p_textnch
);
502 * We have to handle PPWAIT here or proc_move_allproc_zombie()
503 * will block on the PHOLD() the parent is doing.
505 * We are using the flag as an interlock so an atomic op is
506 * necessary to synchronize with the parent's cpu.
508 if (p
->p_flags
& P_PPWAIT
) {
509 if (p
->p_pptr
&& p
->p_pptr
->p_upmap
)
510 atomic_add_int(&p
->p_pptr
->p_upmap
->invfork
, -1);
511 atomic_clear_int(&p
->p_flags
, P_PPWAIT
);
516 * Move the process to the zombie list. This will block
517 * until the process p_lock count reaches 0. The process will
518 * not be reaped until TDF_EXITING is set by cpu_thread_exit(),
519 * which is called from cpu_proc_exit().
521 * Interlock against waiters using p_waitgen. We increment
522 * p_waitgen after completing the move of our process to the
525 * WARNING: pp becomes stale when we block, clear it now as a
528 proc_move_allproc_zombie(p
);
530 atomic_add_long(&pp
->p_waitgen
, 1);
534 * release controlled reaper for exit if we own it and return the
535 * remaining reaper (the one for us), which we will drop after we
538 reap
= reaper_exit(p
);
541 * Reparent all of this process's children to the init process or
542 * to the designated reaper. We must hold the reaper's p_token in
543 * order to safely mess with p_children.
545 * Issue the p_deathsig signal to children that request it.
547 * We already hold p->p_token (to remove the children from our list).
550 q
= LIST_FIRST(&p
->p_children
);
552 reproc
= reaper_get(reap
);
553 lwkt_gettoken(&reproc
->p_token
);
554 while ((q
= LIST_FIRST(&p
->p_children
)) != NULL
) {
556 lwkt_gettoken(&q
->p_token
);
557 if (q
!= LIST_FIRST(&p
->p_children
)) {
558 lwkt_reltoken(&q
->p_token
);
562 LIST_REMOVE(q
, p_sibling
);
563 LIST_INSERT_HEAD(&reproc
->p_children
, q
, p_sibling
);
565 q
->p_ppid
= reproc
->p_pid
;
566 q
->p_sigparent
= SIGCHLD
;
569 * Traced processes are killed
570 * since their existence means someone is screwing up.
572 if (q
->p_flags
& P_TRACED
) {
573 q
->p_flags
&= ~P_TRACED
;
578 * Issue p_deathsig to children that request it
581 ksignal(q
, q
->p_deathsig
);
582 lwkt_reltoken(&q
->p_token
);
585 lwkt_reltoken(&reproc
->p_token
);
590 * Save exit status and final rusage info. We no longer add
591 * child rusage info into self times, wait4() and kern_wait()
592 * handles it in order to properly support wait6().
594 calcru_proc(p
, &p
->p_ru
);
595 /*ruadd(&p->p_ru, &p->p_cru); REMOVED */
598 * notify interested parties of our demise.
600 KNOTE(&p
->p_klist
, NOTE_EXIT
);
603 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT
604 * flag set, or if the handler is set to SIG_IGN, notify the reaper
605 * instead (it will handle this situation).
607 * NOTE: The reaper can still be the parent process.
611 if (p
->p_pptr
->p_sigacts
->ps_flag
& (PS_NOCLDWAIT
| PS_CLDSIGIGN
)) {
613 reproc
= reaper_get(reap
);
614 proc_reparent(p
, reproc
);
622 * Signal (possibly new) parent.
626 if (p
->p_sigparent
&& pp
!= initproc
) {
627 int sig
= p
->p_sigparent
;
629 if (sig
!= SIGUSR1
&& sig
!= SIGCHLD
)
633 ksignal(pp
, SIGCHLD
);
635 p
->p_flags
&= ~P_TRACED
;
639 * cpu_exit is responsible for clearing curproc, since
640 * it is heavily integrated with the thread/switching sequence.
642 * Other substructures are freed from wait().
645 struct plimit
*rlimit
;
653 * Finally, call machine-dependent code to release as many of the
654 * lwp's resources as we can and halt execution of this thread.
656 * pp is a wild pointer now but still the correct wakeup() target.
657 * lwp_exit() only uses it to send the wakeup() signal to the likely
658 * parent. Any reparenting race that occurs will get a signal
659 * automatically and not be an issue.
665 * Eventually called by every exiting LWP
667 * p->p_token must be held. mplock may be held and will be released.
670 lwp_exit(int masterexit
, void *waddr
)
672 struct thread
*td
= curthread
;
673 struct lwp
*lp
= td
->td_lwp
;
674 struct proc
*p
= lp
->lwp_proc
;
678 * Release the current user process designation on the process so
679 * the userland scheduler can work in someone else.
681 p
->p_usched
->release_curproc(lp
);
684 * Destroy the per-thread shared page and remove from any pmaps
690 * lwp_exit() may be called without setting LWP_MP_WEXIT, so
691 * make sure it is set here.
693 ASSERT_LWKT_TOKEN_HELD(&p
->p_token
);
694 atomic_set_int(&lp
->lwp_mpflags
, LWP_MP_WEXIT
);
697 * Clean up any virtualization
700 vkernel_lwp_exit(lp
);
703 * Clean up select/poll support
705 kqueue_terminate(&lp
->lwp_kqueue
);
707 if (td
->td_linux_task
)
708 linux_task_drop_callback(td
);
709 if (masterexit
&& p
->p_linux_mm
)
710 linux_proc_drop_callback(p
);
713 * Clean up any syscall-cached ucred or rlimit.
716 crfree(td
->td_ucred
);
720 struct plimit
*rlimit
;
722 rlimit
= td
->td_limit
;
728 * Cleanup any cached descriptors for this thread
734 * Nobody actually wakes us when the lock
735 * count reaches zero, so just wait one tick.
737 while (lp
->lwp_lock
> 0)
738 tsleep(lp
, 0, "lwpexit", 1);
740 /* Hand down resource usage to our proc */
741 ruadd(&p
->p_ru
, &lp
->lwp_ru
);
744 * If we don't hold the process until the LWP is reaped wait*()
745 * may try to dispose of its vmspace before all the LWPs have
746 * actually terminated.
751 * Do any remaining work that might block on us. We should be
752 * coded such that further blocking is ok after decrementing
753 * p_nthreads but don't take the chance.
755 dsched_exit_thread(td
);
756 biosched_done(curthread
);
759 * We have to use the reaper for all the LWPs except the one doing
760 * the master exit. The LWP doing the master exit can just be
761 * left on p_lwps and the process reaper will deal with it
762 * synchronously, which is much faster.
764 * Wakeup anyone waiting on p_nthreads to drop to 1 or 0.
766 * The process is left held until the reaper calls lwp_dispose() on
767 * the lp (after calling lwp_wait()).
769 if (masterexit
== 0) {
772 lwp_rb_tree_RB_REMOVE(&p
->p_lwp_tree
, lp
);
774 if ((p
->p_flags
& P_MAYBETHREADED
) && p
->p_nthreads
<= 1)
776 lwkt_gettoken(&deadlwp_token
[cpu
]);
777 LIST_INSERT_HEAD(&deadlwp_list
[cpu
], lp
, u
.lwp_reap_entry
);
778 taskqueue_enqueue(taskqueue_thread
[cpu
], deadlwp_task
[cpu
]);
779 lwkt_reltoken(&deadlwp_token
[cpu
]);
782 if ((p
->p_flags
& P_MAYBETHREADED
) && p
->p_nthreads
<= 1)
787 * We no longer need p_token.
789 * Tell the userland scheduler that we are going away
791 lwkt_reltoken(&p
->p_token
);
792 p
->p_usched
->heuristic_exiting(lp
, p
);
795 * Issue late wakeups after releasing our token to give us a chance
796 * to deschedule and switch away before another cpu in a wait*()
797 * reaps us. This is done as late as possible to reduce contention.
800 wakeup(&p
->p_nthreads
);
808 * Wait until a lwp is completely dead. The final interlock in this drama
809 * is when TDF_EXITING is set in cpu_thread_exit() just before the final
812 * At the point TDF_EXITING is set a complete exit is accomplished when
813 * TDF_RUNNING and TDF_PREEMPT_LOCK are both clear. td_mpflags has two
814 * post-switch interlock flags that can be used to wait for the TDF_
817 * Returns non-zero on success, and zero if the caller needs to retry
821 lwp_wait(struct lwp
*lp
)
823 struct thread
*td
= lp
->lwp_thread
;
826 KKASSERT(lwkt_preempted_proc() != lp
);
829 * This bit of code uses the thread destruction interlock
830 * managed by lwkt_switch_return() to wait for the lwp's
831 * thread to completely disengage.
833 * It is possible for us to race another cpu core so we
834 * have to do this correctly.
837 mpflags
= td
->td_mpflags
;
839 if (mpflags
& TDF_MP_EXITSIG
)
841 tsleep_interlock(td
, 0);
842 if (atomic_cmpset_int(&td
->td_mpflags
, mpflags
,
843 mpflags
| TDF_MP_EXITWAIT
)) {
844 tsleep(td
, PINTERLOCKED
, "lwpxt", 0);
849 * We've already waited for the core exit but there can still
850 * be other refs from e.g. process scans and such.
852 if (lp
->lwp_lock
> 0) {
853 tsleep(lp
, 0, "lwpwait1", 1);
857 tsleep(td
, 0, "lwpwait2", 1);
862 * Now that we have the thread destruction interlock these flags
863 * really should already be cleaned up, keep a check for safety.
865 * We can't rip its stack out from under it until TDF_EXITING is
866 * set and both TDF_RUNNING and TDF_PREEMPT_LOCK are clear.
867 * TDF_PREEMPT_LOCK must be checked because TDF_RUNNING
868 * will be cleared temporarily if a thread gets preempted.
870 while ((td
->td_flags
& (TDF_RUNNING
|
873 TDF_EXITING
)) != TDF_EXITING
) {
874 tsleep(lp
, 0, "lwpwait3", 1);
878 KASSERT((td
->td_flags
& (TDF_RUNQ
|TDF_TSLEEPQ
)) == 0,
879 ("lwp_wait: td %p (%s) still on run or sleep queue",
885 * Release the resources associated with a lwp.
886 * The lwp must be completely dead.
889 lwp_dispose(struct lwp
*lp
)
891 struct thread
*td
= lp
->lwp_thread
;
893 KKASSERT(lwkt_preempted_proc() != lp
);
894 KKASSERT(lp
->lwp_lock
== 0);
895 KKASSERT(td
->td_refs
== 0);
896 KKASSERT((td
->td_flags
& (TDF_RUNNING
|
899 TDF_EXITING
)) == TDF_EXITING
);
906 lp
->lwp_thread
= NULL
;
907 lwkt_free_thread(td
);
913 sys_wait4(struct sysmsg
*sysmsg
, const struct wait_args
*uap
)
915 struct __wrusage wrusage
;
922 options
= uap
->options
| WEXITED
| WTRAPPED
;
925 if (id
== WAIT_ANY
) {
927 } else if (id
== WAIT_MYPGRP
) {
929 id
= curproc
->p_pgid
;
937 error
= kern_wait(idtype
, id
, &status
, options
, &wrusage
,
938 NULL
, &sysmsg
->sysmsg_result
);
940 if (error
== 0 && uap
->status
)
941 error
= copyout(&status
, uap
->status
, sizeof(*uap
->status
));
942 if (error
== 0 && uap
->rusage
) {
943 ruadd(&wrusage
.wru_self
, &wrusage
.wru_children
);
944 error
= copyout(&wrusage
.wru_self
, uap
->rusage
, sizeof(*uap
->rusage
));
950 sys_wait6(struct sysmsg
*sysmsg
, const struct wait6_args
*uap
)
952 struct __wrusage wrusage
;
962 * NOTE: wait6() requires WEXITED and WTRAPPED to be specified if
965 options
= uap
->options
;
966 idtype
= uap
->idtype
;
968 infop
= uap
->info
? &info
: NULL
;
973 if (id
== WAIT_MYPGRP
) {
975 id
= curproc
->p_pgid
;
979 /* let kern_wait deal with the remainder */
983 error
= kern_wait(idtype
, id
, &status
, options
,
984 &wrusage
, infop
, &sysmsg
->sysmsg_result
);
986 if (error
== 0 && uap
->status
)
987 error
= copyout(&status
, uap
->status
, sizeof(*uap
->status
));
988 if (error
== 0 && uap
->wrusage
)
989 error
= copyout(&wrusage
, uap
->wrusage
, sizeof(*uap
->wrusage
));
990 if (error
== 0 && uap
->info
)
991 error
= copyout(&info
, uap
->info
, sizeof(*uap
->info
));
996 * kernel wait*() system call support
999 kern_wait(idtype_t idtype
, id_t id
, int *status
, int options
,
1000 struct __wrusage
*wrusage
, siginfo_t
*info
, int *res
)
1002 struct thread
*td
= curthread
;
1004 struct proc
*q
= td
->td_proc
;
1013 * Must not have extraneous options. Must have at least one
1016 if (options
&~ (WUNTRACED
|WNOHANG
|WCONTINUED
|WLINUXCLONE
|WSTOPPED
|
1017 WEXITED
|WTRAPPED
|WNOWAIT
)) {
1020 if ((options
& (WEXITED
| WUNTRACED
| WCONTINUED
| WTRAPPED
)) == 0) {
1025 * Protect the q->p_children list
1027 lwkt_gettoken(&q
->p_token
);
1030 * All sorts of things can change due to blocking so we have to loop
1031 * all the way back up here.
1033 * The problem is that if a process group is stopped and the parent
1034 * is doing a wait*(..., WUNTRACED, ...), it will see the STOP
1035 * of the child and then stop itself when it tries to return from the
1036 * system call. When the process group is resumed the parent will
1037 * then get the STOP status even though the child has now resumed
1038 * (a followup wait*() will get the CONT status).
1040 * Previously the CONT would overwrite the STOP because the tstop
1041 * was handled within tsleep(), and the parent would only see
1042 * the CONT when both are stopped and continued together. This little
1043 * two-line hack restores this effect.
1045 * No locks are held so we can safely block the process here.
1047 if (STOPLWP(q
, td
->td_lwp
))
1055 * NOTE: We don't want to break q's p_token in the loop for the
1056 * case where no children are found or we risk breaking the
1057 * interlock between child and parent.
1059 waitgen
= atomic_fetchadd_long(&q
->p_waitgen
, 0x80000000);
1060 LIST_FOREACH(p
, &q
->p_children
, p_sibling
) {
1062 * Skip children that another thread is already uninterruptably
1065 if (PWAITRES_PENDING(p
))
1069 * Filter, (p) will be held on fall-through. Try to optimize
1070 * this to avoid the atomic op until we are pretty sure we
1071 * want this process.
1078 if (p
->p_pid
!= (pid_t
)id
)
1083 if (p
->p_pgid
!= (pid_t
)id
)
1089 if (p
->p_session
&& p
->p_session
->s_sid
!= (pid_t
)id
) {
1096 if (p
->p_ucred
->cr_uid
!= (uid_t
)id
) {
1103 if (p
->p_ucred
->cr_gid
!= (gid_t
)id
) {
1110 if (p
->p_ucred
->cr_prison
&&
1111 p
->p_ucred
->cr_prison
->pr_id
!= (int)id
) {
1117 /* unsupported filter */
1120 /* (p) is held at this point */
1123 * This special case handles a kthread spawned by linux_clone
1124 * (see linux_misc.c). The linux_wait4 and linux_waitpid
1125 * functions need to be able to distinguish between waiting
1126 * on a process and waiting on a thread. It is a thread if
1127 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option
1128 * signifies we want to wait for threads and not processes.
1130 if ((p
->p_sigparent
!= SIGCHLD
) ^
1131 ((options
& WLINUXCLONE
) != 0)) {
1137 if (p
->p_stat
== SZOMB
&& (options
& WEXITED
)) {
1139 * We may go into SZOMB with threads still present.
1140 * We must wait for them to exit before we can reap
1141 * the master thread, otherwise we may race reaping
1142 * non-master threads.
1144 * Only this routine can remove a process from
1145 * the zombie list and destroy it.
1147 * This function will fail after sleeping if another
1148 * thread owns the zombie lock. This function will
1149 * fail immediately or after sleeping if another
1150 * thread owns or obtains ownership of the reap via
1157 lwkt_gettoken(&p
->p_token
);
1158 if (p
->p_pptr
!= q
) {
1159 lwkt_reltoken(&p
->p_token
);
1166 * We are the reaper, from this point on the reap
1167 * cannot be aborted.
1170 while (p
->p_nthreads
> 0) {
1171 tsleep(&p
->p_nthreads
, 0, "lwpzomb", hz
);
1175 * Reap any LWPs left in p->p_lwps. This is usually
1176 * just the last LWP. This must be done before
1177 * we loop on p_lock since the lwps hold a ref on
1178 * it as a vmspace interlock.
1180 * Once that is accomplished p_nthreads had better
1183 while ((lp
= RB_ROOT(&p
->p_lwp_tree
)) != NULL
) {
1185 * Make sure no one is using this lwp, before
1186 * it is removed from the tree. If we didn't
1187 * wait it here, lwp tree iteration with
1188 * blocking operation would be broken.
1190 while (lp
->lwp_lock
> 0)
1191 tsleep(lp
, 0, "zomblwp", 1);
1192 lwp_rb_tree_RB_REMOVE(&p
->p_lwp_tree
, lp
);
1195 KKASSERT(p
->p_nthreads
== 0);
1198 * Don't do anything really bad until all references
1199 * to the process go away. This may include other
1200 * LWPs which are still in the process of being
1201 * reaped. We can't just pull the rug out from under
1202 * them because they may still be using the VM space.
1204 * Certain kernel facilities such as /proc will also
1205 * put a hold on the process for short periods of
1208 PRELE(p
); /* from top of loop */
1209 PSTALL(p
, "reap3", 1); /* 1 ref (for PZOMBHOLD) */
1211 /* Take care of our return values. */
1214 *status
= p
->p_xstat
;
1215 wrusage
->wru_self
= p
->p_ru
;
1216 wrusage
->wru_children
= p
->p_cru
;
1219 bzero(info
, sizeof(*info
));
1221 info
->si_signo
= SIGCHLD
;
1222 if (WIFEXITED(p
->p_xstat
)) {
1223 info
->si_code
= CLD_EXITED
;
1225 WEXITSTATUS(p
->p_xstat
);
1227 info
->si_code
= CLD_KILLED
;
1228 info
->si_status
= WTERMSIG(p
->p_xstat
);
1230 info
->si_pid
= p
->p_pid
;
1231 info
->si_uid
= p
->p_ucred
->cr_uid
;
1235 * WNOWAIT shortcuts to done here, leaving the
1236 * child on the zombie list.
1238 if (options
& WNOWAIT
) {
1239 lwkt_reltoken(&p
->p_token
);
1246 * If we got the child via a ptrace 'attach',
1247 * we need to give it back to the old parent.
1249 if (p
->p_oppid
&& (t
= pfind(p
->p_oppid
)) != NULL
) {
1251 proc_reparent(p
, t
);
1252 ksignal(t
, SIGCHLD
);
1255 lwkt_reltoken(&p
->p_token
);
1262 * Unlink the proc from its process group so that
1263 * the following operations won't lead to an
1264 * inconsistent state for processes running down
1267 proc_remove_zombie(p
);
1269 lwkt_reltoken(&p
->p_token
);
1273 ruadd(&q
->p_cru
, &p
->p_ru
);
1274 ruadd(&q
->p_cru
, &p
->p_cru
);
1277 * Decrement the count of procs running with this uid.
1279 chgproccnt(p
->p_ucred
->cr_ruidinfo
, -1, 0);
1282 * Free up credentials. p_spin is required to
1283 * avoid races against allproc scans.
1285 spin_lock(&p
->p_spin
);
1288 spin_unlock(&p
->p_spin
);
1292 * Remove unused arguments
1296 if (pa
&& refcount_release(&pa
->ar_ref
)) {
1302 p
->p_sigacts
= NULL
;
1303 if (ps
&& refcount_release(&ps
->ps_refcnt
)) {
1304 kfree(ps
, M_SUBPROC
);
1309 * Our exitingcount was incremented when the process
1310 * became a zombie, now that the process has been
1311 * removed from (almost) all lists we should be able
1312 * to safely destroy its vmspace. Wait for any current
1313 * holders to go away (so the vmspace remains stable),
1316 * NOTE: Releasing the parent process (q) p_token
1317 * across the vmspace_exitfree() call is
1318 * important here to reduce stalls on
1319 * interactions with (q) (such as
1320 * fork/exec/wait or 'ps').
1322 PSTALL(p
, "reap4", 1);
1323 lwkt_reltoken(&q
->p_token
);
1324 vmspace_exitfree(p
);
1325 lwkt_gettoken(&q
->p_token
);
1326 PSTALL(p
, "reap5", 1);
1329 * NOTE: We have to officially release ZOMB in order
1330 * to ensure that a racing thread in kern_wait()
1331 * which blocked on ZOMB is woken up.
1334 kfree(p
->p_uidpcpu
, M_SUBPROC
);
1336 atomic_add_int(&nprocs
, -1);
1342 * Process has not yet exited
1344 if ((p
->p_stat
== SSTOP
|| p
->p_stat
== SCORE
) &&
1345 (p
->p_flags
& P_WAITED
) == 0 &&
1346 (((p
->p_flags
& P_TRACED
) && (options
& WTRAPPED
)) ||
1347 (options
& WSTOPPED
))) {
1348 lwkt_gettoken(&p
->p_token
);
1349 if (p
->p_pptr
!= q
) {
1350 lwkt_reltoken(&p
->p_token
);
1354 if ((p
->p_stat
!= SSTOP
&& p
->p_stat
!= SCORE
) ||
1355 (p
->p_flags
& P_WAITED
) != 0 ||
1356 ((p
->p_flags
& P_TRACED
) == 0 &&
1357 (options
& WUNTRACED
) == 0)) {
1358 lwkt_reltoken(&p
->p_token
);
1364 * Don't set P_WAITED if WNOWAIT specified, leaving
1365 * the process in a waitable state.
1367 if ((options
& WNOWAIT
) == 0)
1368 p
->p_flags
|= P_WAITED
;
1371 *status
= W_STOPCODE(p
->p_xstat
);
1372 /* Zero rusage so we get something consistent. */
1373 bzero(wrusage
, sizeof(*wrusage
));
1376 bzero(info
, sizeof(*info
));
1377 if (p
->p_flags
& P_TRACED
)
1378 info
->si_code
= CLD_TRAPPED
;
1380 info
->si_code
= CLD_STOPPED
;
1381 info
->si_status
= WSTOPSIG(p
->p_xstat
);
1383 lwkt_reltoken(&p
->p_token
);
1387 if ((options
& WCONTINUED
) && (p
->p_flags
& P_CONTINUED
)) {
1388 lwkt_gettoken(&p
->p_token
);
1389 if (p
->p_pptr
!= q
) {
1390 lwkt_reltoken(&p
->p_token
);
1394 if ((p
->p_flags
& P_CONTINUED
) == 0) {
1395 lwkt_reltoken(&p
->p_token
);
1403 * Don't set P_WAITED if WNOWAIT specified, leaving
1404 * the process in a waitable state.
1406 if ((options
& WNOWAIT
) == 0)
1407 p
->p_flags
&= ~P_CONTINUED
;
1412 bzero(info
, sizeof(*info
));
1413 info
->si_code
= CLD_CONTINUED
;
1414 info
->si_status
= WSTOPSIG(p
->p_xstat
);
1416 lwkt_reltoken(&p
->p_token
);
1426 if (options
& WNOHANG
) {
1433 * Wait for signal - interlocked using q->p_waitgen.
1436 while ((waitgen
& 0x7FFFFFFF) == (q
->p_waitgen
& 0x7FFFFFFF)) {
1437 tsleep_interlock(q
, PCATCH
);
1438 waitgen
= atomic_fetchadd_long(&q
->p_waitgen
, 0x80000000);
1439 if ((waitgen
& 0x7FFFFFFF) == (q
->p_waitgen
& 0x7FFFFFFF)) {
1440 error
= tsleep(q
, PCATCH
| PINTERLOCKED
, "wait", 0);
1446 lwkt_reltoken(&q
->p_token
);
1453 * Change child's parent process to parent.
1455 * p_children/p_sibling requires the parent's token, and
1456 * changing pptr requires the child's token, so we have to
1457 * get three tokens to do this operation. We also need to
1458 * hold pointers that might get ripped out from under us to
1459 * preserve structural integrity.
1461 * It is possible to race another reparent or disconnect or other
1462 * similar operation. We must retry when this situation occurs.
1463 * Once we successfully reparent the process we no longer care
1467 proc_reparent(struct proc
*child
, struct proc
*parent
)
1472 while ((opp
= child
->p_pptr
) != parent
) {
1474 lwkt_gettoken(&opp
->p_token
);
1475 lwkt_gettoken(&child
->p_token
);
1476 lwkt_gettoken(&parent
->p_token
);
1477 if (child
->p_pptr
!= opp
) {
1478 lwkt_reltoken(&parent
->p_token
);
1479 lwkt_reltoken(&child
->p_token
);
1480 lwkt_reltoken(&opp
->p_token
);
1484 LIST_REMOVE(child
, p_sibling
);
1485 LIST_INSERT_HEAD(&parent
->p_children
, child
, p_sibling
);
1486 child
->p_pptr
= parent
;
1487 child
->p_ppid
= parent
->p_pid
;
1488 lwkt_reltoken(&parent
->p_token
);
1489 lwkt_reltoken(&child
->p_token
);
1490 lwkt_reltoken(&opp
->p_token
);
1491 if (LIST_EMPTY(&opp
->p_children
))
1500 * The next two functions are to handle adding/deleting items on the
1504 * Take the arguments given and put them onto the exit callout list,
1505 * However first make sure that it's not already there.
1506 * returns 0 on success.
1510 at_exit(exitlist_fn function
)
1512 struct exitlist
*ep
;
1515 /* Be noisy if the programmer has lost track of things */
1516 if (rm_at_exit(function
))
1517 kprintf("WARNING: exit callout entry (%p) already present\n",
1520 ep
= kmalloc(sizeof(*ep
), M_ATEXIT
, M_NOWAIT
);
1523 ep
->function
= function
;
1524 TAILQ_INSERT_TAIL(&exit_list
, ep
, next
);
1529 * Scan the exit callout list for the given item and remove it.
1530 * Returns the number of items removed (0 or 1)
1533 rm_at_exit(exitlist_fn function
)
1535 struct exitlist
*ep
;
1537 TAILQ_FOREACH(ep
, &exit_list
, next
) {
1538 if (ep
->function
== function
) {
1539 TAILQ_REMOVE(&exit_list
, ep
, next
);
1540 kfree(ep
, M_ATEXIT
);
1548 * LWP reaper related code.
1551 reaplwps(void *context
, int dummy
)
1553 struct lwplist
*lwplist
= context
;
1557 lwkt_gettoken(&deadlwp_token
[cpu
]);
1558 while ((lp
= LIST_FIRST(lwplist
))) {
1559 LIST_REMOVE(lp
, u
.lwp_reap_entry
);
1562 lwkt_reltoken(&deadlwp_token
[cpu
]);
1566 reaplwp(struct lwp
*lp
)
1568 while (lwp_wait(lp
) == 0)
1578 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
1579 lwkt_token_init(&deadlwp_token
[cpu
], "deadlwpl");
1580 LIST_INIT(&deadlwp_list
[cpu
]);
1581 deadlwp_task
[cpu
] = kmalloc(sizeof(*deadlwp_task
[cpu
]),
1582 M_DEVBUF
, M_WAITOK
);
1583 TASK_INIT(deadlwp_task
[cpu
], 0, reaplwps
, &deadlwp_list
[cpu
]);
1587 SYSINIT(deadlwpinit
, SI_SUB_CONFIGURE
, SI_ORDER_ANY
, deadlwp_init
, NULL
);