kernel - Third time's the charm - move lwp_ucred to td_ucred
[dragonfly.git] / sys / kern / kern_exit.c
blob07930a79a3e47903e5de962a193c3e932058a4e3
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
38 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
39 * $FreeBSD: src/sys/kern/kern_exit.c,v 1.92.2.11 2003/01/13 22:51:16 dillon Exp $
40 * $DragonFly: src/sys/kern/kern_exit.c,v 1.91 2008/05/18 20:02:02 nth Exp $
43 #include "opt_compat.h"
44 #include "opt_ktrace.h"
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/sysproto.h>
49 #include <sys/kernel.h>
50 #include <sys/malloc.h>
51 #include <sys/proc.h>
52 #include <sys/ktrace.h>
53 #include <sys/pioctl.h>
54 #include <sys/tty.h>
55 #include <sys/wait.h>
56 #include <sys/vnode.h>
57 #include <sys/resourcevar.h>
58 #include <sys/signalvar.h>
59 #include <sys/taskqueue.h>
60 #include <sys/ptrace.h>
61 #include <sys/acct.h> /* for acct_process() function prototype */
62 #include <sys/filedesc.h>
63 #include <sys/shm.h>
64 #include <sys/sem.h>
65 #include <sys/aio.h>
66 #include <sys/jail.h>
67 #include <sys/kern_syscall.h>
68 #include <sys/upcall.h>
69 #include <sys/caps.h>
70 #include <sys/unistd.h>
72 #include <vm/vm.h>
73 #include <vm/vm_param.h>
74 #include <sys/lock.h>
75 #include <vm/pmap.h>
76 #include <vm/vm_map.h>
77 #include <vm/vm_extern.h>
78 #include <sys/user.h>
80 #include <sys/thread2.h>
81 #include <sys/sysref2.h>
83 static void reaplwps(void *context, int dummy);
84 static void reaplwp(struct lwp *lp);
85 static void killlwps(struct lwp *lp);
87 static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback");
88 static MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status");
91 * callout list for things to do at exit time
93 struct exitlist {
94 exitlist_fn function;
95 TAILQ_ENTRY(exitlist) next;
98 TAILQ_HEAD(exit_list_head, exitlist);
99 static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list);
102 * LWP reaper data
104 struct task *deadlwp_task[MAXCPU];
105 struct lwplist deadlwp_list[MAXCPU];
108 * exit --
109 * Death of process.
111 * SYS_EXIT_ARGS(int rval)
113 * MPALMOSTSAFE
116 sys_exit(struct exit_args *uap)
118 get_mplock();
119 exit1(W_EXITCODE(uap->rval, 0));
120 /* NOTREACHED */
121 rel_mplock();
125 * Extended exit --
126 * Death of a lwp or process with optional bells and whistles.
128 * MPALMOSTSAFE
131 sys_extexit(struct extexit_args *uap)
133 int action, who;
134 int error;
136 action = EXTEXIT_ACTION(uap->how);
137 who = EXTEXIT_WHO(uap->how);
139 /* Check parameters before we might perform some action */
140 switch (who) {
141 case EXTEXIT_PROC:
142 case EXTEXIT_LWP:
143 break;
144 default:
145 return (EINVAL);
148 switch (action) {
149 case EXTEXIT_SIMPLE:
150 break;
151 case EXTEXIT_SETINT:
152 error = copyout(&uap->status, uap->addr, sizeof(uap->status));
153 if (error)
154 return (error);
155 break;
156 default:
157 return (EINVAL);
160 get_mplock();
162 switch (who) {
163 case EXTEXIT_LWP:
165 * Be sure only to perform a simple lwp exit if there is at
166 * least one more lwp in the proc, which will call exit1()
167 * later, otherwise the proc will be an UNDEAD and not even a
168 * SZOMB!
170 if (curproc->p_nthreads > 1) {
171 lwp_exit(0);
172 /* NOT REACHED */
174 /* else last lwp in proc: do the real thing */
175 /* FALLTHROUGH */
176 default: /* to help gcc */
177 case EXTEXIT_PROC:
178 exit1(W_EXITCODE(uap->status, 0));
179 /* NOTREACHED */
182 /* NOTREACHED */
183 rel_mplock(); /* safety */
187 * Kill all lwps associated with the current process except the
188 * current lwp. Return an error if we race another thread trying to
189 * do the same thing and lose the race.
191 * If forexec is non-zero the current thread and process flags are
192 * cleaned up so they can be reused.
195 killalllwps(int forexec)
197 struct lwp *lp = curthread->td_lwp;
198 struct proc *p = lp->lwp_proc;
201 * Interlock against P_WEXIT. Only one of the process's thread
202 * is allowed to do the master exit.
204 if (p->p_flag & P_WEXIT)
205 return (EALREADY);
206 p->p_flag |= P_WEXIT;
209 * Interlock with LWP_WEXIT and kill any remaining LWPs
211 lp->lwp_flag |= LWP_WEXIT;
212 if (p->p_nthreads > 1)
213 killlwps(lp);
216 * If doing this for an exec, clean up the remaining thread
217 * (us) for continuing operation after all the other threads
218 * have been killed.
220 if (forexec) {
221 lp->lwp_flag &= ~LWP_WEXIT;
222 p->p_flag &= ~P_WEXIT;
224 return(0);
228 * Kill all LWPs except the current one. Do not try to signal
229 * LWPs which have exited on their own or have already been
230 * signaled.
232 static void
233 killlwps(struct lwp *lp)
235 struct proc *p = lp->lwp_proc;
236 struct lwp *tlp;
239 * Kill the remaining LWPs. We must send the signal before setting
240 * LWP_WEXIT. The setting of WEXIT is optional but helps reduce
241 * races. tlp must be held across the call as it might block and
242 * allow the target lwp to rip itself out from under our loop.
244 FOREACH_LWP_IN_PROC(tlp, p) {
245 LWPHOLD(tlp);
246 if ((tlp->lwp_flag & LWP_WEXIT) == 0) {
247 lwpsignal(p, tlp, SIGKILL);
248 tlp->lwp_flag |= LWP_WEXIT;
250 LWPRELE(tlp);
254 * Wait for everything to clear out.
256 while (p->p_nthreads > 1) {
257 tsleep(&p->p_nthreads, 0, "killlwps", 0);
262 * Exit: deallocate address space and other resources, change proc state
263 * to zombie, and unlink proc from allproc and parent's lists. Save exit
264 * status and rusage for wait(). Check for child processes and orphan them.
266 void
267 exit1(int rv)
269 struct thread *td = curthread;
270 struct proc *p = td->td_proc;
271 struct lwp *lp = td->td_lwp;
272 struct proc *q, *nq;
273 struct vmspace *vm;
274 struct vnode *vtmp;
275 struct exitlist *ep;
276 int error;
278 if (p->p_pid == 1) {
279 kprintf("init died (signal %d, exit %d)\n",
280 WTERMSIG(rv), WEXITSTATUS(rv));
281 panic("Going nowhere without my init!");
284 varsymset_clean(&p->p_varsymset);
285 lockuninit(&p->p_varsymset.vx_lock);
287 * Kill all lwps associated with the current process, return an
288 * error if we race another thread trying to do the same thing
289 * and lose the race.
291 error = killalllwps(0);
292 if (error) {
293 lwp_exit(0);
294 /* NOT REACHED */
297 caps_exit(lp->lwp_thread);
298 aio_proc_rundown(p);
300 /* are we a task leader? */
301 if (p == p->p_leader) {
302 struct kill_args killArgs;
303 killArgs.signum = SIGKILL;
304 q = p->p_peers;
305 while(q) {
306 killArgs.pid = q->p_pid;
308 * The interface for kill is better
309 * than the internal signal
311 sys_kill(&killArgs);
312 nq = q;
313 q = q->p_peers;
315 while (p->p_peers)
316 tsleep((caddr_t)p, 0, "exit1", 0);
319 #ifdef PGINPROF
320 vmsizmon();
321 #endif
322 STOPEVENT(p, S_EXIT, rv);
323 wakeup(&p->p_stype); /* Wakeup anyone in procfs' PIOCWAIT */
326 * Check if any loadable modules need anything done at process exit.
327 * e.g. SYSV IPC stuff
328 * XXX what if one of these generates an error?
330 TAILQ_FOREACH(ep, &exit_list, next)
331 (*ep->function)(td);
333 if (p->p_flag & P_PROFIL)
334 stopprofclock(p);
336 * If parent is waiting for us to exit or exec,
337 * P_PPWAIT is set; we will wakeup the parent below.
339 p->p_flag &= ~(P_TRACED | P_PPWAIT);
340 SIGEMPTYSET(p->p_siglist);
341 SIGEMPTYSET(lp->lwp_siglist);
342 if (timevalisset(&p->p_realtimer.it_value))
343 callout_stop(&p->p_ithandle);
346 * Reset any sigio structures pointing to us as a result of
347 * F_SETOWN with our pid.
349 funsetownlst(&p->p_sigiolst);
352 * Close open files and release open-file table.
353 * This may block!
355 fdfree(p, NULL);
357 if(p->p_leader->p_peers) {
358 q = p->p_leader;
359 while(q->p_peers != p)
360 q = q->p_peers;
361 q->p_peers = p->p_peers;
362 wakeup((caddr_t)p->p_leader);
366 * XXX Shutdown SYSV semaphores
368 semexit(p);
370 KKASSERT(p->p_numposixlocks == 0);
372 /* The next two chunks should probably be moved to vmspace_exit. */
373 vm = p->p_vmspace;
376 * Release upcalls associated with this process
378 if (vm->vm_upcalls)
379 upc_release(vm, lp);
382 * Clean up data related to virtual kernel operation. Clean up
383 * any vkernel context related to the current lwp now so we can
384 * destroy p_vkernel.
386 if (p->p_vkernel) {
387 vkernel_lwp_exit(lp);
388 vkernel_exit(p);
392 * Release user portion of address space.
393 * This releases references to vnodes,
394 * which could cause I/O if the file has been unlinked.
395 * Need to do this early enough that we can still sleep.
396 * Can't free the entire vmspace as the kernel stack
397 * may be mapped within that space also.
399 * Processes sharing the same vmspace may exit in one order, and
400 * get cleaned up by vmspace_exit() in a different order. The
401 * last exiting process to reach this point releases as much of
402 * the environment as it can, and the last process cleaned up
403 * by vmspace_exit() (which decrements exitingcnt) cleans up the
404 * remainder.
406 ++vm->vm_exitingcnt;
407 sysref_put(&vm->vm_sysref);
409 if (SESS_LEADER(p)) {
410 struct session *sp = p->p_session;
412 if (sp->s_ttyvp) {
414 * We are the controlling process. Signal the
415 * foreground process group, drain the controlling
416 * terminal, and revoke access to the controlling
417 * terminal.
419 * NOTE: while waiting for the process group to exit
420 * it is possible that one of the processes in the
421 * group will revoke the tty, so the ttyclosesession()
422 * function will re-check sp->s_ttyvp.
424 if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) {
425 if (sp->s_ttyp->t_pgrp)
426 pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
427 ttywait(sp->s_ttyp);
428 ttyclosesession(sp, 1); /* also revoke */
431 * Release the tty. If someone has it open via
432 * /dev/tty then close it (since they no longer can
433 * once we've NULL'd it out).
435 ttyclosesession(sp, 0);
438 * s_ttyp is not zero'd; we use this to indicate
439 * that the session once had a controlling terminal.
440 * (for logging and informational purposes)
443 sp->s_leader = NULL;
445 fixjobc(p, p->p_pgrp, 0);
446 (void)acct_process(p);
447 #ifdef KTRACE
449 * release trace file
451 if (p->p_tracenode)
452 ktrdestroy(&p->p_tracenode);
453 p->p_traceflag = 0;
454 #endif
456 * Release reference to text vnode
458 if ((vtmp = p->p_textvp) != NULL) {
459 p->p_textvp = NULL;
460 vrele(vtmp);
464 * Move the process to the zombie list. This will block
465 * until the process p_lock count reaches 0. The process will
466 * not be reaped until TDF_EXITING is set by cpu_thread_exit(),
467 * which is called from cpu_proc_exit().
469 proc_move_allproc_zombie(p);
471 q = LIST_FIRST(&p->p_children);
472 if (q) /* only need this if any child is S_ZOMB */
473 wakeup((caddr_t) initproc);
474 for (; q != 0; q = nq) {
475 nq = LIST_NEXT(q, p_sibling);
476 LIST_REMOVE(q, p_sibling);
477 LIST_INSERT_HEAD(&initproc->p_children, q, p_sibling);
478 q->p_pptr = initproc;
479 q->p_sigparent = SIGCHLD;
481 * Traced processes are killed
482 * since their existence means someone is screwing up.
484 if (q->p_flag & P_TRACED) {
485 q->p_flag &= ~P_TRACED;
486 ksignal(q, SIGKILL);
491 * Save exit status and final rusage info, adding in child rusage
492 * info and self times.
494 p->p_xstat = rv;
495 calcru_proc(p, &p->p_ru);
496 ruadd(&p->p_ru, &p->p_cru);
499 * notify interested parties of our demise.
501 KNOTE(&p->p_klist, NOTE_EXIT);
504 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT
505 * flag set, notify process 1 instead (and hope it will handle
506 * this situation).
508 if (p->p_pptr->p_sigacts->ps_flag & PS_NOCLDWAIT) {
509 struct proc *pp = p->p_pptr;
510 proc_reparent(p, initproc);
512 * If this was the last child of our parent, notify
513 * parent, so in case he was wait(2)ing, he will
514 * continue.
516 if (LIST_EMPTY(&pp->p_children))
517 wakeup((caddr_t)pp);
520 if (p->p_sigparent && p->p_pptr != initproc) {
521 ksignal(p->p_pptr, p->p_sigparent);
522 } else {
523 ksignal(p->p_pptr, SIGCHLD);
526 wakeup((caddr_t)p->p_pptr);
528 * cpu_exit is responsible for clearing curproc, since
529 * it is heavily integrated with the thread/switching sequence.
531 * Other substructures are freed from wait().
533 plimit_free(p);
536 * Release the current user process designation on the process so
537 * the userland scheduler can work in someone else.
539 p->p_usched->release_curproc(lp);
542 * Finally, call machine-dependent code to release as many of the
543 * lwp's resources as we can and halt execution of this thread.
545 lwp_exit(1);
549 * Eventually called by every exiting LWP
551 void
552 lwp_exit(int masterexit)
554 struct thread *td = curthread;
555 struct lwp *lp = td->td_lwp;
556 struct proc *p = lp->lwp_proc;
559 * lwp_exit() may be called without setting LWP_WEXIT, so
560 * make sure it is set here.
562 lp->lwp_flag |= LWP_WEXIT;
565 * Clean up any virtualization
567 if (lp->lwp_vkernel)
568 vkernel_lwp_exit(lp);
571 * Clean up any syscall-cached ucred
573 if (td->td_ucred) {
574 crfree(td->td_ucred);
575 td->td_ucred = NULL;
579 * Nobody actually wakes us when the lock
580 * count reaches zero, so just wait one tick.
582 while (lp->lwp_lock > 0)
583 tsleep(lp, 0, "lwpexit", 1);
585 /* Hand down resource usage to our proc */
586 ruadd(&p->p_ru, &lp->lwp_ru);
589 * If we don't hold the process until the LWP is reaped wait*()
590 * may try to dispose of its vmspace before all the LWPs have
591 * actually terminated.
593 PHOLD(p);
596 * We have to use the reaper for all the LWPs except the one doing
597 * the master exit. The LWP doing the master exit can just be
598 * left on p_lwps and the process reaper will deal with it
599 * synchronously, which is much faster.
601 if (masterexit == 0) {
602 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp);
603 --p->p_nthreads;
604 wakeup(&p->p_nthreads);
605 LIST_INSERT_HEAD(&deadlwp_list[mycpuid], lp, u.lwp_reap_entry);
606 taskqueue_enqueue(taskqueue_thread[mycpuid], deadlwp_task[mycpuid]);
607 } else {
608 --p->p_nthreads;
610 biosched_done(curthread);
611 cpu_lwp_exit();
615 * Wait until a lwp is completely dead.
617 * If the thread is still executing, which can't be waited upon,
618 * return failure. The caller is responsible of waiting a little
619 * bit and checking again.
621 * Suggested use:
622 * while (!lwp_wait(lp))
623 * tsleep(lp, 0, "lwpwait", 1);
625 static int
626 lwp_wait(struct lwp *lp)
628 struct thread *td = lp->lwp_thread;;
630 KKASSERT(lwkt_preempted_proc() != lp);
632 while (lp->lwp_lock > 0)
633 tsleep(lp, 0, "lwpwait1", 1);
635 lwkt_wait_free(td);
638 * The lwp's thread may still be in the middle
639 * of switching away, we can't rip its stack out from
640 * under it until TDF_EXITING is set and both
641 * TDF_RUNNING and TDF_PREEMPT_LOCK are clear.
642 * TDF_PREEMPT_LOCK must be checked because TDF_RUNNING
643 * will be cleared temporarily if a thread gets
644 * preempted.
646 * YYY no wakeup occurs, so we simply return failure
647 * and let the caller deal with sleeping and calling
648 * us again.
650 if ((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|TDF_EXITING)) !=
651 TDF_EXITING)
652 return (0);
654 return (1);
658 * Release the resources associated with a lwp.
659 * The lwp must be completely dead.
661 void
662 lwp_dispose(struct lwp *lp)
664 struct thread *td = lp->lwp_thread;;
666 KKASSERT(lwkt_preempted_proc() != lp);
667 KKASSERT(td->td_refs == 0);
668 KKASSERT((td->td_flags & (TDF_RUNNING|TDF_PREEMPT_LOCK|TDF_EXITING)) ==
669 TDF_EXITING);
671 PRELE(lp->lwp_proc);
672 lp->lwp_proc = NULL;
673 if (td != NULL) {
674 td->td_proc = NULL;
675 td->td_lwp = NULL;
676 lp->lwp_thread = NULL;
677 lwkt_free_thread(td);
679 kfree(lp, M_LWP);
683 * MPSAFE
686 sys_wait4(struct wait_args *uap)
688 struct rusage rusage;
689 int error, status;
691 error = kern_wait(uap->pid, (uap->status ? &status : NULL),
692 uap->options, (uap->rusage ? &rusage : NULL),
693 &uap->sysmsg_result);
695 if (error == 0 && uap->status)
696 error = copyout(&status, uap->status, sizeof(*uap->status));
697 if (error == 0 && uap->rusage)
698 error = copyout(&rusage, uap->rusage, sizeof(*uap->rusage));
699 return (error);
703 * wait1()
705 * wait_args(int pid, int *status, int options, struct rusage *rusage)
707 * MPALMOSTSAFE
710 kern_wait(pid_t pid, int *status, int options, struct rusage *rusage, int *res)
712 struct thread *td = curthread;
713 struct lwp *lp;
714 struct proc *q = td->td_proc;
715 struct proc *p, *t;
716 int nfound, error;
718 if (pid == 0)
719 pid = -q->p_pgid;
720 if (options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE))
721 return (EINVAL);
722 get_mplock();
723 loop:
725 * Hack for backwards compatibility with badly written user code.
726 * Or perhaps we have to do this anyway, it is unclear. XXX
728 * The problem is that if a process group is stopped and the parent
729 * is doing a wait*(..., WUNTRACED, ...), it will see the STOP
730 * of the child and then stop itself when it tries to return from the
731 * system call. When the process group is resumed the parent will
732 * then get the STOP status even though the child has now resumed
733 * (a followup wait*() will get the CONT status).
735 * Previously the CONT would overwrite the STOP because the tstop
736 * was handled within tsleep(), and the parent would only see
737 * the CONT when both are stopped and continued together. This litte
738 * two-line hack restores this effect.
740 while (q->p_stat == SSTOP)
741 tstop();
743 nfound = 0;
744 LIST_FOREACH(p, &q->p_children, p_sibling) {
745 if (pid != WAIT_ANY &&
746 p->p_pid != pid && p->p_pgid != -pid)
747 continue;
749 /* This special case handles a kthread spawned by linux_clone
750 * (see linux_misc.c). The linux_wait4 and linux_waitpid
751 * functions need to be able to distinguish between waiting
752 * on a process and waiting on a thread. It is a thread if
753 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option
754 * signifies we want to wait for threads and not processes.
756 if ((p->p_sigparent != SIGCHLD) ^
757 ((options & WLINUXCLONE) != 0)) {
758 continue;
761 nfound++;
762 if (p->p_stat == SZOMB) {
764 * We may go into SZOMB with threads still present.
765 * We must wait for them to exit before we can reap
766 * the master thread, otherwise we may race reaping
767 * non-master threads.
769 while (p->p_nthreads > 0) {
770 tsleep(&p->p_nthreads, 0, "lwpzomb", hz);
774 * Reap any LWPs left in p->p_lwps. This is usually
775 * just the last LWP. This must be done before
776 * we loop on p_lock since the lwps hold a ref on
777 * it as a vmspace interlock.
779 * Once that is accomplished p_nthreads had better
780 * be zero.
782 while ((lp = RB_ROOT(&p->p_lwp_tree)) != NULL) {
783 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp);
784 reaplwp(lp);
786 KKASSERT(p->p_nthreads == 0);
789 * Don't do anything really bad until all references
790 * to the process go away. This may include other
791 * LWPs which are still in the process of being
792 * reaped. We can't just pull the rug out from under
793 * them because they may still be using the VM space.
795 * Certain kernel facilities such as /proc will also
796 * put a hold on the process for short periods of
797 * time.
799 while (p->p_lock)
800 tsleep(p, 0, "reap3", hz);
802 /* scheduling hook for heuristic */
803 /* XXX no lwp available, we need a different heuristic */
805 p->p_usched->heuristic_exiting(td->td_lwp, deadlp);
808 /* Take care of our return values. */
809 *res = p->p_pid;
810 if (status)
811 *status = p->p_xstat;
812 if (rusage)
813 *rusage = p->p_ru;
815 * If we got the child via a ptrace 'attach',
816 * we need to give it back to the old parent.
818 if (p->p_oppid && (t = pfind(p->p_oppid))) {
819 p->p_oppid = 0;
820 proc_reparent(p, t);
821 ksignal(t, SIGCHLD);
822 wakeup((caddr_t)t);
823 error = 0;
824 goto done;
828 * Unlink the proc from its process group so that
829 * the following operations won't lead to an
830 * inconsistent state for processes running down
831 * the zombie list.
833 KKASSERT(p->p_lock == 0);
834 proc_remove_zombie(p);
835 leavepgrp(p);
837 p->p_xstat = 0;
838 ruadd(&q->p_cru, &p->p_ru);
841 * Decrement the count of procs running with this uid.
843 chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0);
846 * Free up credentials.
848 crfree(p->p_ucred);
849 p->p_ucred = NULL;
852 * Remove unused arguments
854 if (p->p_args && --p->p_args->ar_ref == 0)
855 FREE(p->p_args, M_PARGS);
857 if (--p->p_sigacts->ps_refcnt == 0) {
858 kfree(p->p_sigacts, M_SUBPROC);
859 p->p_sigacts = NULL;
862 vm_waitproc(p);
863 kfree(p, M_PROC);
864 nprocs--;
865 error = 0;
866 goto done;
868 if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 &&
869 (p->p_flag & P_TRACED || options & WUNTRACED)) {
870 p->p_flag |= P_WAITED;
872 *res = p->p_pid;
873 if (status)
874 *status = W_STOPCODE(p->p_xstat);
875 /* Zero rusage so we get something consistent. */
876 if (rusage)
877 bzero(rusage, sizeof(rusage));
878 error = 0;
879 goto done;
881 if (options & WCONTINUED && (p->p_flag & P_CONTINUED)) {
882 *res = p->p_pid;
883 p->p_flag &= ~P_CONTINUED;
885 if (status)
886 *status = SIGCONT;
887 error = 0;
888 goto done;
891 if (nfound == 0) {
892 error = ECHILD;
893 goto done;
895 if (options & WNOHANG) {
896 *res = 0;
897 error = 0;
898 goto done;
900 error = tsleep((caddr_t)q, PCATCH, "wait", 0);
901 if (error) {
902 done:
903 rel_mplock();
904 return (error);
906 goto loop;
910 * make process 'parent' the new parent of process 'child'.
912 void
913 proc_reparent(struct proc *child, struct proc *parent)
916 if (child->p_pptr == parent)
917 return;
919 LIST_REMOVE(child, p_sibling);
920 LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
921 child->p_pptr = parent;
925 * The next two functions are to handle adding/deleting items on the
926 * exit callout list
928 * at_exit():
929 * Take the arguments given and put them onto the exit callout list,
930 * However first make sure that it's not already there.
931 * returns 0 on success.
935 at_exit(exitlist_fn function)
937 struct exitlist *ep;
939 #ifdef INVARIANTS
940 /* Be noisy if the programmer has lost track of things */
941 if (rm_at_exit(function))
942 kprintf("WARNING: exit callout entry (%p) already present\n",
943 function);
944 #endif
945 ep = kmalloc(sizeof(*ep), M_ATEXIT, M_NOWAIT);
946 if (ep == NULL)
947 return (ENOMEM);
948 ep->function = function;
949 TAILQ_INSERT_TAIL(&exit_list, ep, next);
950 return (0);
954 * Scan the exit callout list for the given item and remove it.
955 * Returns the number of items removed (0 or 1)
958 rm_at_exit(exitlist_fn function)
960 struct exitlist *ep;
962 TAILQ_FOREACH(ep, &exit_list, next) {
963 if (ep->function == function) {
964 TAILQ_REMOVE(&exit_list, ep, next);
965 kfree(ep, M_ATEXIT);
966 return(1);
969 return (0);
973 * LWP reaper related code.
975 static void
976 reaplwps(void *context, int dummy)
978 struct lwplist *lwplist = context;
979 struct lwp *lp;
981 get_mplock();
982 while ((lp = LIST_FIRST(lwplist))) {
983 LIST_REMOVE(lp, u.lwp_reap_entry);
984 reaplwp(lp);
986 rel_mplock();
989 static void
990 reaplwp(struct lwp *lp)
992 while (lwp_wait(lp) == 0)
993 tsleep(lp, 0, "lwpreap", 1);
994 lwp_dispose(lp);
997 static void
998 deadlwp_init(void)
1000 int cpu;
1002 for (cpu = 0; cpu < ncpus; cpu++) {
1003 LIST_INIT(&deadlwp_list[cpu]);
1004 deadlwp_task[cpu] = kmalloc(sizeof(*deadlwp_task[cpu]), M_DEVBUF, M_WAITOK);
1005 TASK_INIT(deadlwp_task[cpu], 0, reaplwps, &deadlwp_list[cpu]);
1009 SYSINIT(deadlwpinit, SI_SUB_CONFIGURE, SI_ORDER_ANY, deadlwp_init, NULL);