2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
39 * $FreeBSD: src/sys/kern/kern_exit.c,v 1.92.2.11 2003/01/13 22:51:16 dillon Exp $
40 * $DragonFly: src/sys/kern/kern_exit.c,v 1.91 2008/05/18 20:02:02 nth Exp $
43 #include "opt_compat.h"
44 #include "opt_ktrace.h"
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/sysproto.h>
49 #include <sys/kernel.h>
50 #include <sys/malloc.h>
52 #include <sys/ktrace.h>
53 #include <sys/pioctl.h>
56 #include <sys/vnode.h>
57 #include <sys/resourcevar.h>
58 #include <sys/signalvar.h>
59 #include <sys/taskqueue.h>
60 #include <sys/ptrace.h>
61 #include <sys/acct.h> /* for acct_process() function prototype */
62 #include <sys/filedesc.h>
67 #include <sys/kern_syscall.h>
68 #include <sys/upcall.h>
70 #include <sys/unistd.h>
71 #include <sys/eventhandler.h>
72 #include <sys/dsched.h>
75 #include <vm/vm_param.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_extern.h>
82 #include <sys/thread2.h>
83 #include <sys/sysref2.h>
84 #include <sys/mplock2.h>
86 static void reaplwps(void *context
, int dummy
);
87 static void reaplwp(struct lwp
*lp
);
88 static void killlwps(struct lwp
*lp
);
90 static MALLOC_DEFINE(M_ATEXIT
, "atexit", "atexit callback");
91 static MALLOC_DEFINE(M_ZOMBIE
, "zombie", "zombie proc status");
94 * callout list for things to do at exit time
98 TAILQ_ENTRY(exitlist
) next
;
101 TAILQ_HEAD(exit_list_head
, exitlist
);
102 static struct exit_list_head exit_list
= TAILQ_HEAD_INITIALIZER(exit_list
);
107 struct task
*deadlwp_task
[MAXCPU
];
108 struct lwplist deadlwp_list
[MAXCPU
];
114 * SYS_EXIT_ARGS(int rval)
119 sys_exit(struct exit_args
*uap
)
121 exit1(W_EXITCODE(uap
->rval
, 0));
127 * Death of a lwp or process with optional bells and whistles.
132 sys_extexit(struct extexit_args
*uap
)
137 action
= EXTEXIT_ACTION(uap
->how
);
138 who
= EXTEXIT_WHO(uap
->how
);
140 /* Check parameters before we might perform some action */
153 error
= copyout(&uap
->status
, uap
->addr
, sizeof(uap
->status
));
166 * Be sure only to perform a simple lwp exit if there is at
167 * least one more lwp in the proc, which will call exit1()
168 * later, otherwise the proc will be an UNDEAD and not even a
171 if (curproc
->p_nthreads
> 1) {
175 /* else last lwp in proc: do the real thing */
177 default: /* to help gcc */
179 exit1(W_EXITCODE(uap
->status
, 0));
184 rel_mplock(); /* safety */
188 * Kill all lwps associated with the current process except the
189 * current lwp. Return an error if we race another thread trying to
190 * do the same thing and lose the race.
192 * If forexec is non-zero the current thread and process flags are
193 * cleaned up so they can be reused.
196 killalllwps(int forexec
)
198 struct lwp
*lp
= curthread
->td_lwp
;
199 struct proc
*p
= lp
->lwp_proc
;
202 * Interlock against P_WEXIT. Only one of the process's thread
203 * is allowed to do the master exit.
205 if (p
->p_flag
& P_WEXIT
)
207 p
->p_flag
|= P_WEXIT
;
210 * Interlock with LWP_WEXIT and kill any remaining LWPs
212 lp
->lwp_flag
|= LWP_WEXIT
;
213 if (p
->p_nthreads
> 1)
217 * If doing this for an exec, clean up the remaining thread
218 * (us) for continuing operation after all the other threads
222 lp
->lwp_flag
&= ~LWP_WEXIT
;
223 p
->p_flag
&= ~P_WEXIT
;
229 * Kill all LWPs except the current one. Do not try to signal
230 * LWPs which have exited on their own or have already been
234 killlwps(struct lwp
*lp
)
236 struct proc
*p
= lp
->lwp_proc
;
240 * Kill the remaining LWPs. We must send the signal before setting
241 * LWP_WEXIT. The setting of WEXIT is optional but helps reduce
242 * races. tlp must be held across the call as it might block and
243 * allow the target lwp to rip itself out from under our loop.
245 FOREACH_LWP_IN_PROC(tlp
, p
) {
247 if ((tlp
->lwp_flag
& LWP_WEXIT
) == 0) {
248 lwpsignal(p
, tlp
, SIGKILL
);
249 tlp
->lwp_flag
|= LWP_WEXIT
;
255 * Wait for everything to clear out.
257 while (p
->p_nthreads
> 1) {
258 tsleep(&p
->p_nthreads
, 0, "killlwps", 0);
263 * Exit: deallocate address space and other resources, change proc state
264 * to zombie, and unlink proc from allproc and parent's lists. Save exit
265 * status and rusage for wait(). Check for child processes and orphan them.
270 struct thread
*td
= curthread
;
271 struct proc
*p
= td
->td_proc
;
272 struct lwp
*lp
= td
->td_lwp
;
280 kprintf("init died (signal %d, exit %d)\n",
281 WTERMSIG(rv
), WEXITSTATUS(rv
));
282 panic("Going nowhere without my init!");
287 varsymset_clean(&p
->p_varsymset
);
288 lockuninit(&p
->p_varsymset
.vx_lock
);
290 * Kill all lwps associated with the current process, return an
291 * error if we race another thread trying to do the same thing
294 error
= killalllwps(0);
300 caps_exit(lp
->lwp_thread
);
303 /* are we a task leader? */
304 if (p
== p
->p_leader
) {
305 struct kill_args killArgs
;
306 killArgs
.signum
= SIGKILL
;
309 killArgs
.pid
= q
->p_pid
;
311 * The interface for kill is better
312 * than the internal signal
319 tsleep((caddr_t
)p
, 0, "exit1", 0);
325 STOPEVENT(p
, S_EXIT
, rv
);
326 wakeup(&p
->p_stype
); /* Wakeup anyone in procfs' PIOCWAIT */
329 * Check if any loadable modules need anything done at process exit.
330 * e.g. SYSV IPC stuff
331 * XXX what if one of these generates an error?
334 EVENTHANDLER_INVOKE(process_exit
, p
);
337 * XXX: imho, the eventhandler stuff is much cleaner than this.
338 * Maybe we should move everything to use eventhandler.
340 TAILQ_FOREACH(ep
, &exit_list
, next
)
343 if (p
->p_flag
& P_PROFIL
)
346 * If parent is waiting for us to exit or exec,
347 * P_PPWAIT is set; we will wakeup the parent below.
349 p
->p_flag
&= ~(P_TRACED
| P_PPWAIT
);
350 SIGEMPTYSET(p
->p_siglist
);
351 SIGEMPTYSET(lp
->lwp_siglist
);
352 if (timevalisset(&p
->p_realtimer
.it_value
))
353 callout_stop(&p
->p_ithandle
);
356 * Reset any sigio structures pointing to us as a result of
357 * F_SETOWN with our pid.
359 funsetownlst(&p
->p_sigiolst
);
362 * Close open files and release open-file table.
367 if(p
->p_leader
->p_peers
) {
369 while(q
->p_peers
!= p
)
371 q
->p_peers
= p
->p_peers
;
372 wakeup((caddr_t
)p
->p_leader
);
376 * XXX Shutdown SYSV semaphores
380 KKASSERT(p
->p_numposixlocks
== 0);
382 /* The next two chunks should probably be moved to vmspace_exit. */
386 * Release upcalls associated with this process
392 * Clean up data related to virtual kernel operation. Clean up
393 * any vkernel context related to the current lwp now so we can
397 vkernel_lwp_exit(lp
);
402 * Release user portion of address space.
403 * This releases references to vnodes,
404 * which could cause I/O if the file has been unlinked.
405 * Need to do this early enough that we can still sleep.
406 * Can't free the entire vmspace as the kernel stack
407 * may be mapped within that space also.
409 * Processes sharing the same vmspace may exit in one order, and
410 * get cleaned up by vmspace_exit() in a different order. The
411 * last exiting process to reach this point releases as much of
412 * the environment as it can, and the last process cleaned up
413 * by vmspace_exit() (which decrements exitingcnt) cleans up the
416 vmspace_exitbump(vm
);
417 sysref_put(&vm
->vm_sysref
);
419 if (SESS_LEADER(p
)) {
420 struct session
*sp
= p
->p_session
;
424 * We are the controlling process. Signal the
425 * foreground process group, drain the controlling
426 * terminal, and revoke access to the controlling
429 * NOTE: while waiting for the process group to exit
430 * it is possible that one of the processes in the
431 * group will revoke the tty, so the ttyclosesession()
432 * function will re-check sp->s_ttyvp.
434 if (sp
->s_ttyp
&& (sp
->s_ttyp
->t_session
== sp
)) {
435 if (sp
->s_ttyp
->t_pgrp
)
436 pgsignal(sp
->s_ttyp
->t_pgrp
, SIGHUP
, 1);
438 ttyclosesession(sp
, 1); /* also revoke */
441 * Release the tty. If someone has it open via
442 * /dev/tty then close it (since they no longer can
443 * once we've NULL'd it out).
445 ttyclosesession(sp
, 0);
448 * s_ttyp is not zero'd; we use this to indicate
449 * that the session once had a controlling terminal.
450 * (for logging and informational purposes)
455 fixjobc(p
, p
->p_pgrp
, 0);
456 (void)acct_process(p
);
462 ktrdestroy(&p
->p_tracenode
);
466 * Release reference to text vnode
468 if ((vtmp
= p
->p_textvp
) != NULL
) {
473 /* Release namecache handle to text file */
474 if (p
->p_textnch
.ncp
)
475 cache_drop(&p
->p_textnch
);
478 * Move the process to the zombie list. This will block
479 * until the process p_lock count reaches 0. The process will
480 * not be reaped until TDF_EXITING is set by cpu_thread_exit(),
481 * which is called from cpu_proc_exit().
483 proc_move_allproc_zombie(p
);
485 q
= LIST_FIRST(&p
->p_children
);
486 if (q
) /* only need this if any child is S_ZOMB */
487 wakeup((caddr_t
) initproc
);
488 for (; q
!= 0; q
= nq
) {
489 nq
= LIST_NEXT(q
, p_sibling
);
490 LIST_REMOVE(q
, p_sibling
);
491 LIST_INSERT_HEAD(&initproc
->p_children
, q
, p_sibling
);
492 q
->p_pptr
= initproc
;
493 q
->p_sigparent
= SIGCHLD
;
495 * Traced processes are killed
496 * since their existence means someone is screwing up.
498 if (q
->p_flag
& P_TRACED
) {
499 q
->p_flag
&= ~P_TRACED
;
505 * Save exit status and final rusage info, adding in child rusage
506 * info and self times.
508 calcru_proc(p
, &p
->p_ru
);
509 ruadd(&p
->p_ru
, &p
->p_cru
);
512 * notify interested parties of our demise.
514 KNOTE(&p
->p_klist
, NOTE_EXIT
);
517 * Notify parent that we're gone. If parent has the PS_NOCLDWAIT
518 * flag set, notify process 1 instead (and hope it will handle
521 if (p
->p_pptr
->p_sigacts
->ps_flag
& PS_NOCLDWAIT
) {
522 struct proc
*pp
= p
->p_pptr
;
523 proc_reparent(p
, initproc
);
525 * If this was the last child of our parent, notify
526 * parent, so in case he was wait(2)ing, he will
529 if (LIST_EMPTY(&pp
->p_children
))
533 if (p
->p_sigparent
&& p
->p_pptr
!= initproc
) {
534 ksignal(p
->p_pptr
, p
->p_sigparent
);
536 ksignal(p
->p_pptr
, SIGCHLD
);
539 wakeup((caddr_t
)p
->p_pptr
);
541 * cpu_exit is responsible for clearing curproc, since
542 * it is heavily integrated with the thread/switching sequence.
544 * Other substructures are freed from wait().
549 * Release the current user process designation on the process so
550 * the userland scheduler can work in someone else.
552 p
->p_usched
->release_curproc(lp
);
555 * Finally, call machine-dependent code to release as many of the
556 * lwp's resources as we can and halt execution of this thread.
562 * Eventually called by every exiting LWP
565 lwp_exit(int masterexit
)
567 struct thread
*td
= curthread
;
568 struct lwp
*lp
= td
->td_lwp
;
569 struct proc
*p
= lp
->lwp_proc
;
572 * lwp_exit() may be called without setting LWP_WEXIT, so
573 * make sure it is set here.
575 lp
->lwp_flag
|= LWP_WEXIT
;
578 * Clean up any virtualization
581 vkernel_lwp_exit(lp
);
584 * Clean up select/poll support
586 kqueue_terminate(&lp
->lwp_kqueue
);
589 * Clean up any syscall-cached ucred
592 crfree(td
->td_ucred
);
597 * Nobody actually wakes us when the lock
598 * count reaches zero, so just wait one tick.
600 while (lp
->lwp_lock
> 0)
601 tsleep(lp
, 0, "lwpexit", 1);
603 /* Hand down resource usage to our proc */
604 ruadd(&p
->p_ru
, &lp
->lwp_ru
);
607 * If we don't hold the process until the LWP is reaped wait*()
608 * may try to dispose of its vmspace before all the LWPs have
609 * actually terminated.
614 * Do any remaining work that might block on us. We should be
615 * coded such that further blocking is ok after decrementing
616 * p_nthreads but don't take the chance.
618 dsched_exit_thread(td
);
619 biosched_done(curthread
);
622 * We have to use the reaper for all the LWPs except the one doing
623 * the master exit. The LWP doing the master exit can just be
624 * left on p_lwps and the process reaper will deal with it
625 * synchronously, which is much faster.
627 if (masterexit
== 0) {
628 lwp_rb_tree_RB_REMOVE(&p
->p_lwp_tree
, lp
);
630 wakeup(&p
->p_nthreads
);
631 LIST_INSERT_HEAD(&deadlwp_list
[mycpuid
], lp
, u
.lwp_reap_entry
);
632 taskqueue_enqueue(taskqueue_thread
[mycpuid
],
633 deadlwp_task
[mycpuid
]);
641 * Wait until a lwp is completely dead.
643 * If the thread is still executing, which can't be waited upon,
644 * return failure. The caller is responsible of waiting a little
645 * bit and checking again.
648 * while (!lwp_wait(lp))
649 * tsleep(lp, 0, "lwpwait", 1);
652 lwp_wait(struct lwp
*lp
)
654 struct thread
*td
= lp
->lwp_thread
;;
656 KKASSERT(lwkt_preempted_proc() != lp
);
658 while (lp
->lwp_lock
> 0)
659 tsleep(lp
, 0, "lwpwait1", 1);
664 * The lwp's thread may still be in the middle
665 * of switching away, we can't rip its stack out from
666 * under it until TDF_EXITING is set and both
667 * TDF_RUNNING and TDF_PREEMPT_LOCK are clear.
668 * TDF_PREEMPT_LOCK must be checked because TDF_RUNNING
669 * will be cleared temporarily if a thread gets
672 * YYY no wakeup occurs, so we simply return failure
673 * and let the caller deal with sleeping and calling
676 if ((td
->td_flags
& (TDF_RUNNING
|TDF_PREEMPT_LOCK
|TDF_EXITING
)) !=
684 * Release the resources associated with a lwp.
685 * The lwp must be completely dead.
688 lwp_dispose(struct lwp
*lp
)
690 struct thread
*td
= lp
->lwp_thread
;;
692 KKASSERT(lwkt_preempted_proc() != lp
);
693 KKASSERT(td
->td_refs
== 0);
694 KKASSERT((td
->td_flags
& (TDF_RUNNING
|TDF_PREEMPT_LOCK
|TDF_EXITING
)) ==
702 lp
->lwp_thread
= NULL
;
703 lwkt_free_thread(td
);
712 sys_wait4(struct wait_args
*uap
)
714 struct rusage rusage
;
717 error
= kern_wait(uap
->pid
, (uap
->status
? &status
: NULL
),
718 uap
->options
, (uap
->rusage
? &rusage
: NULL
),
719 &uap
->sysmsg_result
);
721 if (error
== 0 && uap
->status
)
722 error
= copyout(&status
, uap
->status
, sizeof(*uap
->status
));
723 if (error
== 0 && uap
->rusage
)
724 error
= copyout(&rusage
, uap
->rusage
, sizeof(*uap
->rusage
));
731 * wait_args(int pid, int *status, int options, struct rusage *rusage)
736 kern_wait(pid_t pid
, int *status
, int options
, struct rusage
*rusage
, int *res
)
738 struct thread
*td
= curthread
;
740 struct proc
*q
= td
->td_proc
;
746 if (options
&~ (WUNTRACED
|WNOHANG
|WCONTINUED
|WLINUXCLONE
))
751 * Hack for backwards compatibility with badly written user code.
752 * Or perhaps we have to do this anyway, it is unclear. XXX
754 * The problem is that if a process group is stopped and the parent
755 * is doing a wait*(..., WUNTRACED, ...), it will see the STOP
756 * of the child and then stop itself when it tries to return from the
757 * system call. When the process group is resumed the parent will
758 * then get the STOP status even though the child has now resumed
759 * (a followup wait*() will get the CONT status).
761 * Previously the CONT would overwrite the STOP because the tstop
762 * was handled within tsleep(), and the parent would only see
763 * the CONT when both are stopped and continued together. This litte
764 * two-line hack restores this effect.
766 while (q
->p_stat
== SSTOP
)
770 LIST_FOREACH(p
, &q
->p_children
, p_sibling
) {
771 if (pid
!= WAIT_ANY
&&
772 p
->p_pid
!= pid
&& p
->p_pgid
!= -pid
)
775 /* This special case handles a kthread spawned by linux_clone
776 * (see linux_misc.c). The linux_wait4 and linux_waitpid
777 * functions need to be able to distinguish between waiting
778 * on a process and waiting on a thread. It is a thread if
779 * p_sigparent is not SIGCHLD, and the WLINUXCLONE option
780 * signifies we want to wait for threads and not processes.
782 if ((p
->p_sigparent
!= SIGCHLD
) ^
783 ((options
& WLINUXCLONE
) != 0)) {
788 if (p
->p_stat
== SZOMB
) {
790 * We may go into SZOMB with threads still present.
791 * We must wait for them to exit before we can reap
792 * the master thread, otherwise we may race reaping
793 * non-master threads.
795 while (p
->p_nthreads
> 0) {
796 tsleep(&p
->p_nthreads
, 0, "lwpzomb", hz
);
800 * Reap any LWPs left in p->p_lwps. This is usually
801 * just the last LWP. This must be done before
802 * we loop on p_lock since the lwps hold a ref on
803 * it as a vmspace interlock.
805 * Once that is accomplished p_nthreads had better
808 while ((lp
= RB_ROOT(&p
->p_lwp_tree
)) != NULL
) {
809 lwp_rb_tree_RB_REMOVE(&p
->p_lwp_tree
, lp
);
812 KKASSERT(p
->p_nthreads
== 0);
815 * Don't do anything really bad until all references
816 * to the process go away. This may include other
817 * LWPs which are still in the process of being
818 * reaped. We can't just pull the rug out from under
819 * them because they may still be using the VM space.
821 * Certain kernel facilities such as /proc will also
822 * put a hold on the process for short periods of
826 tsleep(p
, 0, "reap3", hz
);
828 /* scheduling hook for heuristic */
829 /* XXX no lwp available, we need a different heuristic */
831 p->p_usched->heuristic_exiting(td->td_lwp, deadlp);
834 /* Take care of our return values. */
837 *status
= p
->p_xstat
;
841 * If we got the child via a ptrace 'attach',
842 * we need to give it back to the old parent.
844 if (p
->p_oppid
&& (t
= pfind(p
->p_oppid
))) {
854 * Unlink the proc from its process group so that
855 * the following operations won't lead to an
856 * inconsistent state for processes running down
859 KKASSERT(p
->p_lock
== 0);
860 proc_remove_zombie(p
);
864 ruadd(&q
->p_cru
, &p
->p_ru
);
867 * Decrement the count of procs running with this uid.
869 chgproccnt(p
->p_ucred
->cr_ruidinfo
, -1, 0);
872 * Free up credentials.
878 * Remove unused arguments
880 if (p
->p_args
&& --p
->p_args
->ar_ref
== 0)
881 FREE(p
->p_args
, M_PARGS
);
883 if (--p
->p_sigacts
->ps_refcnt
== 0) {
884 kfree(p
->p_sigacts
, M_SUBPROC
);
894 if (p
->p_stat
== SSTOP
&& (p
->p_flag
& P_WAITED
) == 0 &&
895 (p
->p_flag
& P_TRACED
|| options
& WUNTRACED
)) {
896 p
->p_flag
|= P_WAITED
;
900 *status
= W_STOPCODE(p
->p_xstat
);
901 /* Zero rusage so we get something consistent. */
903 bzero(rusage
, sizeof(rusage
));
907 if (options
& WCONTINUED
&& (p
->p_flag
& P_CONTINUED
)) {
909 p
->p_flag
&= ~P_CONTINUED
;
921 if (options
& WNOHANG
) {
926 error
= tsleep((caddr_t
)q
, PCATCH
, "wait", 0);
936 * make process 'parent' the new parent of process 'child'.
939 proc_reparent(struct proc
*child
, struct proc
*parent
)
942 if (child
->p_pptr
== parent
)
945 LIST_REMOVE(child
, p_sibling
);
946 LIST_INSERT_HEAD(&parent
->p_children
, child
, p_sibling
);
947 child
->p_pptr
= parent
;
951 * The next two functions are to handle adding/deleting items on the
955 * Take the arguments given and put them onto the exit callout list,
956 * However first make sure that it's not already there.
957 * returns 0 on success.
961 at_exit(exitlist_fn function
)
966 /* Be noisy if the programmer has lost track of things */
967 if (rm_at_exit(function
))
968 kprintf("WARNING: exit callout entry (%p) already present\n",
971 ep
= kmalloc(sizeof(*ep
), M_ATEXIT
, M_NOWAIT
);
974 ep
->function
= function
;
975 TAILQ_INSERT_TAIL(&exit_list
, ep
, next
);
980 * Scan the exit callout list for the given item and remove it.
981 * Returns the number of items removed (0 or 1)
984 rm_at_exit(exitlist_fn function
)
988 TAILQ_FOREACH(ep
, &exit_list
, next
) {
989 if (ep
->function
== function
) {
990 TAILQ_REMOVE(&exit_list
, ep
, next
);
999 * LWP reaper related code.
1002 reaplwps(void *context
, int dummy
)
1004 struct lwplist
*lwplist
= context
;
1008 while ((lp
= LIST_FIRST(lwplist
))) {
1009 LIST_REMOVE(lp
, u
.lwp_reap_entry
);
1016 reaplwp(struct lwp
*lp
)
1018 while (lwp_wait(lp
) == 0)
1019 tsleep(lp
, 0, "lwpreap", 1);
1028 for (cpu
= 0; cpu
< ncpus
; cpu
++) {
1029 LIST_INIT(&deadlwp_list
[cpu
]);
1030 deadlwp_task
[cpu
] = kmalloc(sizeof(*deadlwp_task
[cpu
]), M_DEVBUF
, M_WAITOK
);
1031 TASK_INIT(deadlwp_task
[cpu
], 0, reaplwps
, &deadlwp_list
[cpu
]);
1035 SYSINIT(deadlwpinit
, SI_SUB_CONFIGURE
, SI_ORDER_ANY
, deadlwp_init
, NULL
);