2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
35 * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.14 2003/06/26 04:15:10 silby Exp $
38 #include "opt_ktrace.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysproto.h>
43 #include <sys/filedesc.h>
44 #include <sys/kernel.h>
45 #include <sys/sysctl.h>
46 #include <sys/malloc.h>
48 #include <sys/resourcevar.h>
49 #include <sys/vnode.h>
51 #include <sys/ktrace.h>
52 #include <sys/unistd.h>
59 #include <vm/vm_map.h>
60 #include <vm/vm_extern.h>
62 #include <sys/vmmeter.h>
63 #include <sys/refcount.h>
64 #include <sys/thread2.h>
65 #include <sys/signal2.h>
66 #include <sys/spinlock2.h>
68 #include <sys/dsched.h>
70 static MALLOC_DEFINE(M_ATFORK
, "atfork", "atfork callback");
71 static MALLOC_DEFINE(M_REAPER
, "reaper", "process reapers");
74 * These are the stuctures used to create a callout list for things to do
75 * when forking a process
79 TAILQ_ENTRY(forklist
) next
;
82 TAILQ_HEAD(forklist_head
, forklist
);
83 static struct forklist_head fork_list
= TAILQ_HEAD_INITIALIZER(fork_list
);
85 static struct lwp
*lwp_fork(struct lwp
*, struct proc
*, int flags
,
86 const cpumask_t
*mask
);
87 static int lwp_create1(struct lwp_params
*params
,
88 const cpumask_t
*mask
);
89 static struct lock reaper_lock
= LOCK_INITIALIZER("reapgl", 0, 0);
91 int forksleep
; /* Place for fork1() to sleep on. */
94 * Red-Black tree support for LWPs
98 rb_lwp_compare(struct lwp
*lp1
, struct lwp
*lp2
)
100 if (lp1
->lwp_tid
< lp2
->lwp_tid
)
102 if (lp1
->lwp_tid
> lp2
->lwp_tid
)
107 RB_GENERATE2(lwp_rb_tree
, lwp
, u
.lwp_rbnode
, rb_lwp_compare
, lwpid_t
, lwp_tid
);
110 * When forking, memory underpinning umtx-supported mutexes may be set
111 * COW causing the physical address to change. We must wakeup any threads
112 * blocked on the physical address to allow them to re-resolve their VM.
114 * (caller is holding p->p_token)
117 wake_umtx_threads(struct proc
*p1
)
122 RB_FOREACH(lp
, lwp_rb_tree
, &p1
->p_lwp_tree
) {
124 if (td
&& (td
->td_flags
& TDF_TSLEEPQ
) &&
125 (td
->td_wdomain
& PDOMAIN_MASK
) == PDOMAIN_UMTX
) {
126 wakeup_domain(td
->td_wchan
, PDOMAIN_UMTX
);
135 sys_fork(struct fork_args
*uap
)
137 struct lwp
*lp
= curthread
->td_lwp
;
141 error
= fork1(lp
, RFFDG
| RFPROC
| RFPGLOCK
, &p2
);
144 start_forked_proc(lp
, p2
);
145 uap
->sysmsg_fds
[0] = p2
->p_pid
;
146 uap
->sysmsg_fds
[1] = 0;
153 * vfork() system call
156 sys_vfork(struct vfork_args
*uap
)
158 struct lwp
*lp
= curthread
->td_lwp
;
162 error
= fork1(lp
, RFFDG
| RFPROC
| RFPPWAIT
| RFMEM
| RFPGLOCK
, &p2
);
165 start_forked_proc(lp
, p2
);
166 uap
->sysmsg_fds
[0] = p2
->p_pid
;
167 uap
->sysmsg_fds
[1] = 0;
174 * Handle rforks. An rfork may (1) operate on the current process without
175 * creating a new, (2) create a new process that shared the current process's
176 * vmspace, signals, and/or descriptors, or (3) create a new process that does
177 * not share these things (normal fork).
179 * Note that we only call start_forked_proc() if a new process is actually
182 * rfork { int flags }
185 sys_rfork(struct rfork_args
*uap
)
187 struct lwp
*lp
= curthread
->td_lwp
;
191 if ((uap
->flags
& RFKERNELONLY
) != 0)
194 error
= fork1(lp
, uap
->flags
| RFPGLOCK
, &p2
);
198 start_forked_proc(lp
, p2
);
199 uap
->sysmsg_fds
[0] = p2
->p_pid
;
200 uap
->sysmsg_fds
[1] = 0;
203 uap
->sysmsg_fds
[0] = 0;
204 uap
->sysmsg_fds
[1] = 0;
211 lwp_create1(struct lwp_params
*uprm
, const cpumask_t
*umask
)
213 struct proc
*p
= curproc
;
215 struct lwp_params params
;
216 cpumask_t
*mask
= NULL
, mask0
;
219 error
= copyin(uprm
, ¶ms
, sizeof(params
));
224 error
= copyin(umask
, &mask0
, sizeof(mask0
));
227 CPUMASK_ANDMASK(mask0
, smp_active_mask
);
228 if (CPUMASK_TESTNZERO(mask0
))
232 lwkt_gettoken(&p
->p_token
);
233 plimit_lwp_fork(p
); /* force exclusive access */
234 lp
= lwp_fork(curthread
->td_lwp
, p
, RFPROC
| RFMEM
, mask
);
235 error
= cpu_prepare_lwp(lp
, ¶ms
);
238 if (params
.lwp_tid1
!= NULL
&&
239 (error
= copyout(&lp
->lwp_tid
, params
.lwp_tid1
, sizeof(lp
->lwp_tid
))))
241 if (params
.lwp_tid2
!= NULL
&&
242 (error
= copyout(&lp
->lwp_tid
, params
.lwp_tid2
, sizeof(lp
->lwp_tid
))))
246 * Now schedule the new lwp.
248 p
->p_usched
->resetpriority(lp
);
250 lp
->lwp_stat
= LSRUN
;
251 p
->p_usched
->setrunqueue(lp
);
253 lwkt_reltoken(&p
->p_token
);
259 * Make sure no one is using this lwp, before it is removed from
260 * the tree. If we didn't wait it here, lwp tree iteration with
261 * blocking operation would be broken.
263 while (lp
->lwp_lock
> 0)
264 tsleep(lp
, 0, "lwpfail", 1);
265 lwp_rb_tree_RB_REMOVE(&p
->p_lwp_tree
, lp
);
267 /* lwp_dispose expects an exited lwp, and a held proc */
268 atomic_set_int(&lp
->lwp_mpflags
, LWP_MP_WEXIT
);
269 lp
->lwp_thread
->td_flags
|= TDF_EXITING
;
270 lwkt_remove_tdallq(lp
->lwp_thread
);
272 biosched_done(lp
->lwp_thread
);
273 dsched_exit_thread(lp
->lwp_thread
);
275 lwkt_reltoken(&p
->p_token
);
281 * Low level thread create used by pthreads.
284 sys_lwp_create(struct lwp_create_args
*uap
)
287 return (lwp_create1(uap
->params
, NULL
));
291 sys_lwp_create2(struct lwp_create2_args
*uap
)
294 return (lwp_create1(uap
->params
, uap
->mask
));
297 int nprocs
= 1; /* process 0 */
300 fork1(struct lwp
*lp1
, int flags
, struct proc
**procp
)
302 struct proc
*p1
= lp1
->lwp_proc
;
307 struct sysreaper
*reap
;
310 static int curfail
= 0;
311 static struct timeval lastfail
;
313 struct filedesc_to_leader
*fdtol
;
315 if ((flags
& (RFFDG
|RFCFDG
)) == (RFFDG
|RFCFDG
))
318 lwkt_gettoken(&p1
->p_token
);
323 * Here we don't create a new process, but we divorce
324 * certain parts of a process from itself.
326 if ((flags
& RFPROC
) == 0) {
328 * This kind of stunt does not work anymore if
329 * there are native threads (lwps) running
331 if (p1
->p_nthreads
!= 1) {
336 vm_fork(p1
, 0, flags
);
337 if ((flags
& RFMEM
) == 0)
338 wake_umtx_threads(p1
);
341 * Close all file descriptors.
343 if (flags
& RFCFDG
) {
344 struct filedesc
*fdtmp
;
350 * Unshare file descriptors (from parent.)
353 if (p1
->p_fd
->fd_refcnt
> 1) {
354 struct filedesc
*newfd
;
355 error
= fdcopy(p1
, &newfd
);
369 * Interlock against process group signal delivery. If signals
370 * are pending after the interlock is obtained we have to restart
371 * the system call to process the signals. If we don't the child
372 * can miss a pgsignal (such as ^C) sent during the fork.
374 * We can't use CURSIG() here because it will process any STOPs
375 * and cause the process group lock to be held indefinitely. If
376 * a STOP occurs, the fork will be restarted after the CONT.
379 if ((flags
& RFPGLOCK
) && (plkgrp
= p1
->p_pgrp
) != NULL
) {
381 lockmgr(&plkgrp
->pg_lock
, LK_SHARED
);
382 if (CURSIG_NOBLOCK(lp1
)) {
389 * Although process entries are dynamically created, we still keep
390 * a global limit on the maximum number we will create. Don't allow
391 * a nonprivileged user to use the last ten processes; don't let root
392 * exceed the limit. The variable nprocs is the current number of
393 * processes, maxproc is the limit.
395 uid
= lp1
->lwp_thread
->td_ucred
->cr_ruid
;
396 if ((nprocs
>= maxproc
- 10 && uid
!= 0) || nprocs
>= maxproc
) {
397 if (ppsratecheck(&lastfail
, &curfail
, 1))
398 kprintf("maxproc limit exceeded by uid %d, please "
399 "see tuning(7) and login.conf(5).\n", uid
);
400 tsleep(&forksleep
, 0, "fork", hz
/ 2);
406 * Increment the nprocs resource before blocking can occur. There
407 * are hard-limits as to the number of processes that can run.
409 atomic_add_int(&nprocs
, 1);
412 * Increment the count of procs running with this uid. This also
415 ok
= chgproccnt(lp1
->lwp_thread
->td_ucred
->cr_ruidinfo
, 1,
416 plimit_getadjvalue(RLIMIT_NPROC
));
419 * Back out the process count
421 atomic_add_int(&nprocs
, -1);
422 if (ppsratecheck(&lastfail
, &curfail
, 1)) {
423 kprintf("maxproc limit of %jd "
424 "exceeded by \"%s\" uid %d, "
425 "please see tuning(7) and login.conf(5).\n",
426 plimit_getadjvalue(RLIMIT_NPROC
),
430 tsleep(&forksleep
, 0, "fork", hz
/ 2);
436 * Allocate a new process, don't get fancy: zero the structure.
438 p2
= kmalloc(sizeof(struct proc
), M_PROC
, M_WAITOK
|M_ZERO
);
441 * Core initialization. SIDL is a safety state that protects the
442 * partially initialized process once it starts getting hooked
443 * into system structures and becomes addressable.
445 * We must be sure to acquire p2->p_token as well, we must hold it
446 * once the process is on the allproc list to avoid things such
447 * as competing modifications to p_flags.
449 mycpu
->gd_forkid
+= ncpus
;
450 p2
->p_forkid
= mycpu
->gd_forkid
+ mycpu
->gd_cpuid
;
451 p2
->p_lasttid
= 0; /* first tid will be 1 */
455 * NOTE: Process 0 will not have a reaper, but process 1 (init) and
456 * all other processes always will.
458 if ((reap
= p1
->p_reaper
) != NULL
) {
465 RB_INIT(&p2
->p_lwp_tree
);
466 spin_init(&p2
->p_spin
, "procfork1");
467 lwkt_token_init(&p2
->p_token
, "proc");
468 lwkt_gettoken(&p2
->p_token
);
471 * Setup linkage for kernel based threading XXX lwp. Also add the
472 * process to the allproclist.
474 * The process structure is addressable after this point.
476 if (flags
& RFTHREAD
) {
477 p2
->p_peers
= p1
->p_peers
;
479 p2
->p_leader
= p1
->p_leader
;
483 proc_add_allproc(p2
);
486 * Initialize the section which is copied verbatim from the parent.
488 bcopy(&p1
->p_startcopy
, &p2
->p_startcopy
,
489 ((caddr_t
)&p2
->p_endcopy
- (caddr_t
)&p2
->p_startcopy
));
492 * Duplicate sub-structures as needed. Increase reference counts
495 * NOTE: because we are now on the allproc list it is possible for
496 * other consumers to gain temporary references to p2
497 * (p2->p_lock can change).
499 if (p1
->p_flags
& P_PROFIL
)
501 p2
->p_ucred
= crhold(lp1
->lwp_thread
->td_ucred
);
503 if (jailed(p2
->p_ucred
))
504 p2
->p_flags
|= P_JAILED
;
507 refcount_acquire(&p2
->p_args
->ar_ref
);
509 p2
->p_usched
= p1
->p_usched
;
510 /* XXX: verify copy of the secondary iosched stuff */
511 dsched_enter_proc(p2
);
513 if (flags
& RFSIGSHARE
) {
514 p2
->p_sigacts
= p1
->p_sigacts
;
515 refcount_acquire(&p2
->p_sigacts
->ps_refcnt
);
517 p2
->p_sigacts
= kmalloc(sizeof(*p2
->p_sigacts
),
518 M_SUBPROC
, M_WAITOK
);
519 bcopy(p1
->p_sigacts
, p2
->p_sigacts
, sizeof(*p2
->p_sigacts
));
520 refcount_init(&p2
->p_sigacts
->ps_refcnt
, 1);
522 if (flags
& RFLINUXTHPN
)
523 p2
->p_sigparent
= SIGUSR1
;
525 p2
->p_sigparent
= SIGCHLD
;
527 /* bump references to the text vnode (for procfs) */
528 p2
->p_textvp
= p1
->p_textvp
;
532 /* copy namecache handle to the text file */
533 if (p1
->p_textnch
.mount
)
534 cache_copy(&p1
->p_textnch
, &p2
->p_textnch
);
537 * Handle file descriptors
539 if (flags
& RFCFDG
) {
540 p2
->p_fd
= fdinit(p1
);
542 } else if (flags
& RFFDG
) {
543 error
= fdcopy(p1
, &p2
->p_fd
);
550 p2
->p_fd
= fdshare(p1
);
551 if (p1
->p_fdtol
== NULL
) {
552 p1
->p_fdtol
= filedesc_to_leader_alloc(NULL
,
555 if ((flags
& RFTHREAD
) != 0) {
557 * Shared file descriptor table and
558 * shared process leaders.
561 fdtol
->fdl_refcount
++;
564 * Shared file descriptor table, and
565 * different process leaders
567 fdtol
= filedesc_to_leader_alloc(p1
->p_fdtol
, p2
);
571 p2
->p_limit
= plimit_fork(p1
);
574 * Adjust depth for resource downscaling
576 if ((p2
->p_depth
& 31) != 31)
580 * Preserve some more flags in subprocess. P_PROFIL has already
583 p2
->p_flags
|= p1
->p_flags
& P_SUGID
;
584 if (p1
->p_session
->s_ttyvp
!= NULL
&& (p1
->p_flags
& P_CONTROLT
))
585 p2
->p_flags
|= P_CONTROLT
;
586 if (flags
& RFPPWAIT
) {
587 p2
->p_flags
|= P_PPWAIT
;
589 atomic_add_int(&p1
->p_upmap
->invfork
, 1);
593 * Inherit the virtual kernel structure (allows a virtual kernel
594 * to fork to simulate multiple cpus).
597 vkernel_inherit(p1
, p2
);
600 * Once we are on a pglist we may receive signals. XXX we might
601 * race a ^C being sent to the process group by not receiving it
602 * at all prior to this line.
605 lwkt_gettoken(&p1grp
->pg_token
);
606 LIST_INSERT_AFTER(p1
, p2
, p_pglist
);
607 lwkt_reltoken(&p1grp
->pg_token
);
610 * Attach the new process to its parent.
612 * If RFNOWAIT is set, the newly created process becomes a child
613 * of the reaper (typically init). This effectively disassociates
614 * the child from the parent.
616 * Temporarily hold pptr for the RFNOWAIT case to avoid ripouts.
618 if (flags
& RFNOWAIT
) {
619 pptr
= reaper_get(reap
);
628 p2
->p_ppid
= pptr
->p_pid
;
629 LIST_INIT(&p2
->p_children
);
631 lwkt_gettoken(&pptr
->p_token
);
632 LIST_INSERT_HEAD(&pptr
->p_children
, p2
, p_sibling
);
633 lwkt_reltoken(&pptr
->p_token
);
635 if (flags
& RFNOWAIT
)
638 varsymset_init(&p2
->p_varsymset
, &p1
->p_varsymset
);
639 callout_init_mp(&p2
->p_ithandle
);
643 * Copy traceflag and tracefile if enabled. If not inherited,
644 * these were zeroed above but we still could have a trace race
645 * so make sure p2's p_tracenode is NULL.
647 if ((p1
->p_traceflag
& KTRFAC_INHERIT
) && p2
->p_tracenode
== NULL
) {
648 p2
->p_traceflag
= p1
->p_traceflag
;
649 p2
->p_tracenode
= ktrinherit(p1
->p_tracenode
);
654 * This begins the section where we must prevent the parent
655 * from being swapped.
657 * Gets PRELE'd in the caller in start_forked_proc().
661 vm_fork(p1
, p2
, flags
);
662 if ((flags
& RFMEM
) == 0)
663 wake_umtx_threads(p1
);
666 * Create the first lwp associated with the new proc.
667 * It will return via a different execution path later, directly
668 * into userland, after it was put on the runq by
669 * start_forked_proc().
671 lwp_fork(lp1
, p2
, flags
, NULL
);
673 if (flags
== (RFFDG
| RFPROC
| RFPGLOCK
)) {
674 mycpu
->gd_cnt
.v_forks
++;
675 mycpu
->gd_cnt
.v_forkpages
+= p2
->p_vmspace
->vm_dsize
+
676 p2
->p_vmspace
->vm_ssize
;
677 } else if (flags
== (RFFDG
| RFPROC
| RFPPWAIT
| RFMEM
| RFPGLOCK
)) {
678 mycpu
->gd_cnt
.v_vforks
++;
679 mycpu
->gd_cnt
.v_vforkpages
+= p2
->p_vmspace
->vm_dsize
+
680 p2
->p_vmspace
->vm_ssize
;
681 } else if (p1
== &proc0
) {
682 mycpu
->gd_cnt
.v_kthreads
++;
683 mycpu
->gd_cnt
.v_kthreadpages
+= p2
->p_vmspace
->vm_dsize
+
684 p2
->p_vmspace
->vm_ssize
;
686 mycpu
->gd_cnt
.v_rforks
++;
687 mycpu
->gd_cnt
.v_rforkpages
+= p2
->p_vmspace
->vm_dsize
+
688 p2
->p_vmspace
->vm_ssize
;
692 * Both processes are set up, now check if any loadable modules want
693 * to adjust anything.
694 * What if they have an error? XXX
696 TAILQ_FOREACH(ep
, &fork_list
, next
) {
697 (*ep
->function
)(p1
, p2
, flags
);
701 * Set the start time. Note that the process is not runnable. The
702 * caller is responsible for making it runnable.
704 microtime(&p2
->p_start
);
705 p2
->p_acflag
= AFORK
;
708 * tell any interested parties about the new process
710 KNOTE(&p1
->p_klist
, NOTE_FORK
| p2
->p_pid
);
713 * Return child proc pointer to parent.
719 lwkt_reltoken(&p2
->p_token
);
720 lwkt_reltoken(&p1
->p_token
);
722 lockmgr(&plkgrp
->pg_lock
, LK_RELEASE
);
729 lwp_fork(struct lwp
*origlp
, struct proc
*destproc
, int flags
,
730 const cpumask_t
*mask
)
732 globaldata_t gd
= mycpu
;
736 lp
= kmalloc(sizeof(struct lwp
), M_LWP
, M_WAITOK
|M_ZERO
);
738 lp
->lwp_proc
= destproc
;
739 lp
->lwp_vmspace
= destproc
->p_vmspace
;
740 lp
->lwp_stat
= LSRUN
;
741 bcopy(&origlp
->lwp_startcopy
, &lp
->lwp_startcopy
,
742 (unsigned) ((caddr_t
)&lp
->lwp_endcopy
-
743 (caddr_t
)&lp
->lwp_startcopy
));
745 lp
->lwp_cpumask
= *mask
;
748 * Reset the sigaltstack if memory is shared, otherwise inherit
752 lp
->lwp_sigstk
.ss_flags
= SS_DISABLE
;
753 lp
->lwp_sigstk
.ss_size
= 0;
754 lp
->lwp_sigstk
.ss_sp
= NULL
;
755 lp
->lwp_flags
&= ~LWP_ALTSTACK
;
757 lp
->lwp_flags
|= origlp
->lwp_flags
& LWP_ALTSTACK
;
761 * Set cpbase to the last timeout that occured (not the upcoming
764 * A critical section is required since a timer IPI can update
765 * scheduler specific data.
768 lp
->lwp_cpbase
= gd
->gd_schedclock
.time
- gd
->gd_schedclock
.periodic
;
769 destproc
->p_usched
->heuristic_forking(origlp
, lp
);
771 CPUMASK_ANDMASK(lp
->lwp_cpumask
, usched_mastermask
);
772 lwkt_token_init(&lp
->lwp_token
, "lwp_token");
773 spin_init(&lp
->lwp_spin
, "lwptoken");
776 * Assign the thread to the current cpu to begin with so we
779 td
= lwkt_alloc_thread(NULL
, LWKT_THREAD_STACK
, gd
->gd_cpuid
, 0);
781 td
->td_ucred
= crhold(destproc
->p_ucred
);
782 td
->td_proc
= destproc
;
784 td
->td_switch
= cpu_heavy_switch
;
785 #ifdef NO_LWKT_SPLIT_USERPRI
786 lwkt_setpri(td
, TDPRI_USER_NORM
);
788 lwkt_setpri(td
, TDPRI_KERN_USER
);
790 lwkt_set_comm(td
, "%s", destproc
->p_comm
);
793 * cpu_fork will copy and update the pcb, set up the kernel stack,
794 * and make the child ready to run.
796 cpu_fork(origlp
, lp
, flags
);
797 kqueue_init(&lp
->lwp_kqueue
, destproc
->p_fd
);
800 * Assign a TID to the lp. Loop until the insert succeeds (returns
803 * If we are in a vfork assign the same TID as the lwp that did the
804 * vfork(). This way if the user program messes around with
805 * pthread calls inside the vfork(), it will operate like an
806 * extension of the (blocked) parent. Also note that since the
807 * address space is being shared, insofar as pthreads is concerned,
808 * the code running in the vfork() is part of the original process.
810 if (flags
& RFPPWAIT
) {
811 lp
->lwp_tid
= origlp
->lwp_tid
- 1;
813 lp
->lwp_tid
= destproc
->p_lasttid
;
817 if (++lp
->lwp_tid
<= 0)
819 } while (lwp_rb_tree_RB_INSERT(&destproc
->p_lwp_tree
, lp
) != NULL
);
821 destproc
->p_lasttid
= lp
->lwp_tid
;
822 destproc
->p_nthreads
++;
825 * This flag is set and never cleared. It means that the process
826 * was threaded at some point. Used to improve exit performance.
828 destproc
->p_flags
|= P_MAYBETHREADED
;
834 * The next two functionms are general routines to handle adding/deleting
835 * items on the fork callout list.
838 * Take the arguments given and put them onto the fork callout list,
839 * However first make sure that it's not already there.
840 * Returns 0 on success or a standard error number.
843 at_fork(forklist_fn function
)
848 /* let the programmer know if he's been stupid */
849 if (rm_at_fork(function
)) {
850 kprintf("WARNING: fork callout entry (%p) already present\n",
854 ep
= kmalloc(sizeof(*ep
), M_ATFORK
, M_WAITOK
|M_ZERO
);
855 ep
->function
= function
;
856 TAILQ_INSERT_TAIL(&fork_list
, ep
, next
);
861 * Scan the exit callout list for the given item and remove it..
862 * Returns the number of items removed (0 or 1)
865 rm_at_fork(forklist_fn function
)
869 TAILQ_FOREACH(ep
, &fork_list
, next
) {
870 if (ep
->function
== function
) {
871 TAILQ_REMOVE(&fork_list
, ep
, next
);
880 * Add a forked process to the run queue after any remaining setup, such
881 * as setting the fork handler, has been completed.
883 * p2 is held by the caller.
886 start_forked_proc(struct lwp
*lp1
, struct proc
*p2
)
888 struct lwp
*lp2
= ONLY_LWP_IN_PROC(p2
);
892 * Move from SIDL to RUN queue, and activate the process's thread.
893 * Activation of the thread effectively makes the process "a"
894 * current process, so we do not setrunqueue().
896 * YYY setrunqueue works here but we should clean up the trampoline
897 * code so we just schedule the LWKT thread and let the trampoline
898 * deal with the userland scheduler on return to userland.
900 KASSERT(p2
->p_stat
== SIDL
,
901 ("cannot start forked process, bad status: %p", p2
));
902 p2
->p_usched
->resetpriority(lp2
);
904 p2
->p_stat
= SACTIVE
;
905 lp2
->lwp_stat
= LSRUN
;
906 p2
->p_usched
->setrunqueue(lp2
);
910 * Now can be swapped.
912 PRELE(lp1
->lwp_proc
);
915 * Preserve synchronization semantics of vfork. P_PPWAIT is set in
916 * the child until it has retired the parent's resources. The parent
917 * must wait for the flag to be cleared by the child.
919 * Interlock the flag/tsleep with atomic ops to avoid unnecessary
922 * XXX Is this use of an atomic op on a field that is not normally
923 * manipulated with atomic ops ok?
925 while ((pflags
= p2
->p_flags
) & P_PPWAIT
) {
927 tsleep_interlock(lp1
->lwp_proc
, 0);
928 if (atomic_cmpset_int(&p2
->p_flags
, pflags
, pflags
))
929 tsleep(lp1
->lwp_proc
, PINTERLOCKED
, "ppwait", 0);
934 * procctl (idtype_t idtype, id_t id, int cmd, void *arg)
937 sys_procctl(struct procctl_args
*uap
)
939 struct proc
*p
= curproc
;
941 struct sysreaper
*reap
;
942 union reaper_info udata
;
945 if (uap
->idtype
!= P_PID
|| uap
->id
!= (id_t
)p
->p_pid
)
949 case PROC_REAP_ACQUIRE
:
950 lwkt_gettoken(&p
->p_token
);
951 reap
= kmalloc(sizeof(*reap
), M_REAPER
, M_WAITOK
|M_ZERO
);
952 if (p
->p_reaper
== NULL
|| p
->p_reaper
->p
!= p
) {
953 reaper_init(p
, reap
);
956 kfree(reap
, M_REAPER
);
959 lwkt_reltoken(&p
->p_token
);
961 case PROC_REAP_RELEASE
:
962 lwkt_gettoken(&p
->p_token
);
965 KKASSERT(reap
!= NULL
);
967 reaper_hold(reap
); /* in case of thread race */
968 lockmgr(&reap
->lock
, LK_EXCLUSIVE
);
970 lockmgr(&reap
->lock
, LK_RELEASE
);
975 p
->p_reaper
= reap
->parent
;
977 reaper_hold(p
->p_reaper
);
978 lockmgr(&reap
->lock
, LK_RELEASE
);
979 reaper_drop(reap
); /* our ref */
980 reaper_drop(reap
); /* old p_reaper ref */
985 lwkt_reltoken(&p
->p_token
);
987 case PROC_REAP_STATUS
:
988 bzero(&udata
, sizeof(udata
));
989 lwkt_gettoken_shared(&p
->p_token
);
990 if ((reap
= p
->p_reaper
) != NULL
&& reap
->p
== p
) {
991 udata
.status
.flags
= reap
->flags
;
992 udata
.status
.refs
= reap
->refs
- 1; /* minus ours */
994 p2
= LIST_FIRST(&p
->p_children
);
995 udata
.status
.pid_head
= p2
? p2
->p_pid
: -1;
996 lwkt_reltoken(&p
->p_token
);
999 error
= copyout(&udata
, uap
->data
,
1000 sizeof(udata
.status
));
1013 * Bump ref on reaper, preventing destruction
1016 reaper_hold(struct sysreaper
*reap
)
1018 KKASSERT(reap
->refs
> 0);
1019 refcount_acquire(&reap
->refs
);
1023 * Drop ref on reaper, destroy the structure on the 1->0
1024 * transition and loop on the parent.
1027 reaper_drop(struct sysreaper
*next
)
1029 struct sysreaper
*reap
;
1031 while ((reap
= next
) != NULL
) {
1032 if (refcount_release(&reap
->refs
)) {
1033 next
= reap
->parent
;
1034 KKASSERT(reap
->p
== NULL
);
1035 lockmgr(&reaper_lock
, LK_EXCLUSIVE
);
1036 reap
->parent
= NULL
;
1037 kfree(reap
, M_REAPER
);
1038 lockmgr(&reaper_lock
, LK_RELEASE
);
1046 * Initialize a static or newly allocated reaper structure
1049 reaper_init(struct proc
*p
, struct sysreaper
*reap
)
1051 reap
->parent
= p
->p_reaper
;
1053 if (p
== initproc
) {
1054 reap
->flags
= REAPER_STAT_OWNED
| REAPER_STAT_REALINIT
;
1057 reap
->flags
= REAPER_STAT_OWNED
;
1060 lockinit(&reap
->lock
, "subrp", 0, 0);
1066 * Called with p->p_token held during exit.
1068 * This is a bit simpler than RELEASE because there are no threads remaining
1069 * to race. We only release if we own the reaper, the exit code will handle
1070 * the final p_reaper release.
1073 reaper_exit(struct proc
*p
)
1075 struct sysreaper
*reap
;
1078 * Release acquired reaper
1080 if ((reap
= p
->p_reaper
) != NULL
&& reap
->p
== p
) {
1081 lockmgr(&reap
->lock
, LK_EXCLUSIVE
);
1082 p
->p_reaper
= reap
->parent
;
1084 reaper_hold(p
->p_reaper
);
1086 lockmgr(&reap
->lock
, LK_RELEASE
);
1091 * Return and clear reaper (caller is holding p_token for us)
1092 * (reap->p does not equal p). Caller must drop it.
1094 if ((reap
= p
->p_reaper
) != NULL
) {
1101 * Return a held (PHOLD) process representing the reaper for process (p).
1102 * NULL should not normally be returned. Caller should PRELE() the returned
1103 * reaper process when finished.
1105 * Remove dead internal nodes while we are at it.
1107 * Process (p)'s token must be held on call.
1108 * The returned process's token is NOT acquired by this routine.
1111 reaper_get(struct sysreaper
*reap
)
1113 struct sysreaper
*next
;
1114 struct proc
*reproc
;
1120 * Extra hold for loop
1125 lockmgr(&reap
->lock
, LK_SHARED
);
1133 lockmgr(&reap
->lock
, LK_RELEASE
);
1141 lockmgr(&reap
->lock
, LK_RELEASE
);
1146 * Traverse upwards in the reaper topology, destroy
1147 * dead internal nodes when possible.
1149 * NOTE: Our ref on next means that a dead node should
1150 * have 2 (ours and reap->parent's).
1152 next
= reap
->parent
;
1155 if (next
->refs
== 2 && next
->p
== NULL
) {
1156 lockmgr(&reap
->lock
, LK_RELEASE
);
1157 lockmgr(&reap
->lock
, LK_EXCLUSIVE
);
1158 if (next
->refs
== 2 &&
1159 reap
->parent
== next
&&
1162 * reap->parent inherits ref from next.
1164 reap
->parent
= next
->parent
;
1165 next
->parent
= NULL
;
1166 reaper_drop(next
); /* ours */
1167 reaper_drop(next
); /* old parent */
1168 next
= reap
->parent
;
1169 continue; /* possible chain */
1174 lockmgr(&reap
->lock
, LK_RELEASE
);
1182 * Test that the sender is allowed to send a signal to the target.
1183 * The sender process is assumed to have a stable reaper. The
1184 * target can be e.g. from a scan callback.
1186 * Target cannot be the reaper process itself unless reaper_ok is specified,
1187 * or sender == target.
1190 reaper_sigtest(struct proc
*sender
, struct proc
*target
, int reaper_ok
)
1192 struct sysreaper
*sreap
;
1193 struct sysreaper
*reap
;
1196 sreap
= sender
->p_reaper
;
1200 if (sreap
== target
->p_reaper
) {
1201 if (sreap
->p
== target
&& sreap
->p
!= sender
&& reaper_ok
== 0)
1205 lockmgr(&reaper_lock
, LK_SHARED
);
1207 for (reap
= target
->p_reaper
; reap
; reap
= reap
->parent
) {
1208 if (sreap
== reap
) {
1209 if (sreap
->p
!= target
|| reaper_ok
)
1214 lockmgr(&reaper_lock
, LK_RELEASE
);