2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
39 * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.14 2003/06/26 04:15:10 silby Exp $
40 * $DragonFly: src/sys/kern/kern_fork.c,v 1.68 2007/04/29 18:25:34 dillon Exp $
43 #include "opt_ktrace.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
48 #include <sys/filedesc.h>
49 #include <sys/kernel.h>
50 #include <sys/sysctl.h>
51 #include <sys/malloc.h>
53 #include <sys/resourcevar.h>
54 #include <sys/vnode.h>
56 #include <sys/ktrace.h>
57 #include <sys/unistd.h>
64 #include <vm/vm_map.h>
65 #include <vm/vm_extern.h>
66 #include <vm/vm_zone.h>
68 #include <sys/vmmeter.h>
69 #include <sys/thread2.h>
70 #include <sys/signal2.h>
72 static MALLOC_DEFINE(M_ATFORK
, "atfork", "atfork callback");
75 * These are the stuctures used to create a callout list for things to do
76 * when forking a process
80 TAILQ_ENTRY(forklist
) next
;
83 TAILQ_HEAD(forklist_head
, forklist
);
84 static struct forklist_head fork_list
= TAILQ_HEAD_INITIALIZER(fork_list
);
86 static struct lwp
*lwp_fork(struct lwp
*, struct proc
*, int flags
);
88 int forksleep
; /* Place for fork1() to sleep on. */
92 sys_fork(struct fork_args
*uap
)
94 struct lwp
*lp
= curthread
->td_lwp
;
98 error
= fork1(lp
, RFFDG
| RFPROC
| RFPGLOCK
, &p2
);
100 start_forked_proc(lp
, p2
);
101 uap
->sysmsg_fds
[0] = p2
->p_pid
;
102 uap
->sysmsg_fds
[1] = 0;
109 sys_vfork(struct vfork_args
*uap
)
111 struct lwp
*lp
= curthread
->td_lwp
;
115 error
= fork1(lp
, RFFDG
| RFPROC
| RFPPWAIT
| RFMEM
| RFPGLOCK
, &p2
);
117 start_forked_proc(lp
, p2
);
118 uap
->sysmsg_fds
[0] = p2
->p_pid
;
119 uap
->sysmsg_fds
[1] = 0;
125 * Handle rforks. An rfork may (1) operate on the current process without
126 * creating a new, (2) create a new process that shared the current process's
127 * vmspace, signals, and/or descriptors, or (3) create a new process that does
128 * not share these things (normal fork).
130 * Note that we only call start_forked_proc() if a new process is actually
133 * rfork { int flags }
136 sys_rfork(struct rfork_args
*uap
)
138 struct lwp
*lp
= curthread
->td_lwp
;
142 if ((uap
->flags
& RFKERNELONLY
) != 0)
145 error
= fork1(lp
, uap
->flags
| RFPGLOCK
, &p2
);
148 start_forked_proc(lp
, p2
);
149 uap
->sysmsg_fds
[0] = p2
? p2
->p_pid
: 0;
150 uap
->sysmsg_fds
[1] = 0;
156 sys_lwp_create(struct lwp_create_args
*uap
)
158 struct proc
*p
= curproc
;
160 struct lwp_params params
;
163 error
= copyin(uap
->params
, ¶ms
, sizeof(params
));
167 lp
= lwp_fork(curthread
->td_lwp
, p
, RFPROC
);
168 error
= cpu_prepare_lwp(lp
, ¶ms
);
169 if (params
.tid1
!= NULL
&&
170 (error
= copyout(&lp
->lwp_tid
, params
.tid1
, sizeof(lp
->lwp_tid
))))
172 if (params
.tid2
!= NULL
&&
173 (error
= copyout(&lp
->lwp_tid
, params
.tid2
, sizeof(lp
->lwp_tid
))))
177 * Now schedule the new lwp.
179 p
->p_usched
->resetpriority(lp
);
181 lp
->lwp_stat
= LSRUN
;
182 p
->p_usched
->setrunqueue(lp
);
189 LIST_REMOVE(lp
, lwp_list
);
190 /* lwp_dispose expects an exited lwp, and a held proc */
191 lp
->lwp_flag
|= LWP_WEXIT
;
192 lp
->lwp_thread
->td_flags
|= TDF_EXITING
;
199 int nprocs
= 1; /* process 0 */
202 fork1(struct lwp
*lp1
, int flags
, struct proc
**procp
)
204 struct proc
*p1
= lp1
->lwp_proc
;
205 struct proc
*p2
, *pptr
;
209 static int curfail
= 0;
210 static struct timeval lastfail
;
212 struct filedesc_to_leader
*fdtol
;
214 if ((flags
& (RFFDG
|RFCFDG
)) == (RFFDG
|RFCFDG
))
218 * Here we don't create a new process, but we divorce
219 * certain parts of a process from itself.
221 if ((flags
& RFPROC
) == 0) {
223 * This kind of stunt does not work anymore if
224 * there are native threads (lwps) running
226 if (p1
->p_nthreads
!= 1)
229 vm_fork(p1
, 0, flags
);
232 * Close all file descriptors.
234 if (flags
& RFCFDG
) {
235 struct filedesc
*fdtmp
;
242 * Unshare file descriptors (from parent.)
245 if (p1
->p_fd
->fd_refcnt
> 1) {
246 struct filedesc
*newfd
;
257 * Interlock against process group signal delivery. If signals
258 * are pending after the interlock is obtained we have to restart
259 * the system call to process the signals. If we don't the child
260 * can miss a pgsignal (such as ^C) sent during the fork.
262 * We can't use CURSIG() here because it will process any STOPs
263 * and cause the process group lock to be held indefinitely. If
264 * a STOP occurs, the fork will be restarted after the CONT.
268 if ((flags
& RFPGLOCK
) && (pgrp
= p1
->p_pgrp
) != NULL
) {
269 lockmgr(&pgrp
->pg_lock
, LK_SHARED
);
277 * Although process entries are dynamically created, we still keep
278 * a global limit on the maximum number we will create. Don't allow
279 * a nonprivileged user to use the last ten processes; don't let root
280 * exceed the limit. The variable nprocs is the current number of
281 * processes, maxproc is the limit.
283 uid
= p1
->p_ucred
->cr_ruid
;
284 if ((nprocs
>= maxproc
- 10 && uid
!= 0) || nprocs
>= maxproc
) {
285 if (ppsratecheck(&lastfail
, &curfail
, 1))
286 kprintf("maxproc limit exceeded by uid %d, please "
287 "see tuning(7) and login.conf(5).\n", uid
);
288 tsleep(&forksleep
, 0, "fork", hz
/ 2);
293 * Increment the nprocs resource before blocking can occur. There
294 * are hard-limits as to the number of processes that can run.
299 * Increment the count of procs running with this uid. Don't allow
300 * a nonprivileged user to exceed their current limit.
302 ok
= chgproccnt(p1
->p_ucred
->cr_ruidinfo
, 1,
303 (uid
!= 0) ? p1
->p_rlimit
[RLIMIT_NPROC
].rlim_cur
: 0);
306 * Back out the process count
309 if (ppsratecheck(&lastfail
, &curfail
, 1))
310 kprintf("maxproc limit exceeded by uid %d, please "
311 "see tuning(7) and login.conf(5).\n", uid
);
312 tsleep(&forksleep
, 0, "fork", hz
/ 2);
317 /* Allocate new proc. */
318 p2
= zalloc(proc_zone
);
319 bzero(p2
, sizeof(*p2
));
322 * Setup linkage for kernel based threading XXX lwp
324 if (flags
& RFTHREAD
) {
325 p2
->p_peers
= p1
->p_peers
;
327 p2
->p_leader
= p1
->p_leader
;
332 LIST_INIT(&p2
->p_lwps
);
335 * Setting the state to SIDL protects the partially initialized
336 * process once it starts getting hooked into the rest of the system.
339 proc_add_allproc(p2
);
342 * Make a proc table entry for the new process.
343 * The whole structure was zeroed above, so copy the section that is
344 * copied directly from the parent.
346 bcopy(&p1
->p_startcopy
, &p2
->p_startcopy
,
347 (unsigned) ((caddr_t
)&p2
->p_endcopy
- (caddr_t
)&p2
->p_startcopy
));
350 * Duplicate sub-structures as needed.
351 * Increase reference counts on shared objects.
353 if (p1
->p_flag
& P_PROFIL
)
355 p2
->p_ucred
= crhold(p1
->p_ucred
);
357 if (jailed(p2
->p_ucred
))
358 p2
->p_flag
|= P_JAILED
;
361 p2
->p_args
->ar_ref
++;
363 p2
->p_usched
= p1
->p_usched
;
365 if (flags
& RFSIGSHARE
) {
366 p2
->p_sigacts
= p1
->p_sigacts
;
367 p2
->p_sigacts
->ps_refcnt
++;
369 p2
->p_sigacts
= (struct sigacts
*)kmalloc(sizeof(*p2
->p_sigacts
),
370 M_SUBPROC
, M_WAITOK
);
371 bcopy(p1
->p_sigacts
, p2
->p_sigacts
, sizeof(*p2
->p_sigacts
));
372 p2
->p_sigacts
->ps_refcnt
= 1;
374 if (flags
& RFLINUXTHPN
)
375 p2
->p_sigparent
= SIGUSR1
;
377 p2
->p_sigparent
= SIGCHLD
;
379 /* bump references to the text vnode (for procfs) */
380 p2
->p_textvp
= p1
->p_textvp
;
385 * Handle file descriptors
387 if (flags
& RFCFDG
) {
388 p2
->p_fd
= fdinit(p1
);
390 } else if (flags
& RFFDG
) {
391 p2
->p_fd
= fdcopy(p1
);
394 p2
->p_fd
= fdshare(p1
);
395 if (p1
->p_fdtol
== NULL
)
397 filedesc_to_leader_alloc(NULL
,
399 if ((flags
& RFTHREAD
) != 0) {
401 * Shared file descriptor table and
402 * shared process leaders.
405 fdtol
->fdl_refcount
++;
408 * Shared file descriptor table, and
409 * different process leaders
411 fdtol
= filedesc_to_leader_alloc(p1
->p_fdtol
, p2
);
415 p2
->p_limit
= plimit_fork(p1
->p_limit
);
418 * Preserve some more flags in subprocess. P_PROFIL has already
421 p2
->p_flag
|= p1
->p_flag
& P_SUGID
;
422 if (p1
->p_session
->s_ttyvp
!= NULL
&& p1
->p_flag
& P_CONTROLT
)
423 p2
->p_flag
|= P_CONTROLT
;
424 if (flags
& RFPPWAIT
)
425 p2
->p_flag
|= P_PPWAIT
;
428 * Inherit the virtual kernel structure (allows a virtual kernel
429 * to fork to simulate multiple cpus).
432 vkernel_inherit(p1
, p2
);
435 * Once we are on a pglist we may receive signals. XXX we might
436 * race a ^C being sent to the process group by not receiving it
437 * at all prior to this line.
439 LIST_INSERT_AFTER(p1
, p2
, p_pglist
);
442 * Attach the new process to its parent.
444 * If RFNOWAIT is set, the newly created process becomes a child
445 * of init. This effectively disassociates the child from the
448 if (flags
& RFNOWAIT
)
453 LIST_INSERT_HEAD(&pptr
->p_children
, p2
, p_sibling
);
454 LIST_INIT(&p2
->p_children
);
455 varsymset_init(&p2
->p_varsymset
, &p1
->p_varsymset
);
456 callout_init(&p2
->p_ithandle
);
460 * Copy traceflag and tracefile if enabled. If not inherited,
461 * these were zeroed above but we still could have a trace race
462 * so make sure p2's p_tracenode is NULL.
464 if ((p1
->p_traceflag
& KTRFAC_INHERIT
) && p2
->p_tracenode
== NULL
) {
465 p2
->p_traceflag
= p1
->p_traceflag
;
466 p2
->p_tracenode
= ktrinherit(p1
->p_tracenode
);
471 * This begins the section where we must prevent the parent
472 * from being swapped.
474 * Gets PRELE'd in the caller in start_forked_proc().
478 vm_fork(p1
, p2
, flags
);
481 * Create the first lwp associated with the new proc.
482 * It will return via a different execution path later, directly
483 * into userland, after it was put on the runq by
484 * start_forked_proc().
486 lwp_fork(lp1
, p2
, flags
);
488 if (flags
== (RFFDG
| RFPROC
)) {
489 mycpu
->gd_cnt
.v_forks
++;
490 mycpu
->gd_cnt
.v_forkpages
+= p2
->p_vmspace
->vm_dsize
+ p2
->p_vmspace
->vm_ssize
;
491 } else if (flags
== (RFFDG
| RFPROC
| RFPPWAIT
| RFMEM
)) {
492 mycpu
->gd_cnt
.v_vforks
++;
493 mycpu
->gd_cnt
.v_vforkpages
+= p2
->p_vmspace
->vm_dsize
+ p2
->p_vmspace
->vm_ssize
;
494 } else if (p1
== &proc0
) {
495 mycpu
->gd_cnt
.v_kthreads
++;
496 mycpu
->gd_cnt
.v_kthreadpages
+= p2
->p_vmspace
->vm_dsize
+ p2
->p_vmspace
->vm_ssize
;
498 mycpu
->gd_cnt
.v_rforks
++;
499 mycpu
->gd_cnt
.v_rforkpages
+= p2
->p_vmspace
->vm_dsize
+ p2
->p_vmspace
->vm_ssize
;
503 * Both processes are set up, now check if any loadable modules want
504 * to adjust anything.
505 * What if they have an error? XXX
507 TAILQ_FOREACH(ep
, &fork_list
, next
) {
508 (*ep
->function
)(p1
, p2
, flags
);
512 * Set the start time. Note that the process is not runnable. The
513 * caller is responsible for making it runnable.
515 microtime(&p2
->p_start
);
516 p2
->p_acflag
= AFORK
;
519 * tell any interested parties about the new process
521 KNOTE(&p1
->p_klist
, NOTE_FORK
| p2
->p_pid
);
524 * Return child proc pointer to parent.
529 lockmgr(&pgrp
->pg_lock
, LK_RELEASE
);
534 lwp_fork(struct lwp
*origlp
, struct proc
*destproc
, int flags
)
541 * We need to prevent wrap-around collisions.
542 * Until we have a nice tid allocator, we need to
543 * start searching for free tids once we wrap around.
545 * XXX give me a nicer allocator
547 if (destproc
->p_lasttid
+ 1 <= 0) {
550 FOREACH_LWP_IN_PROC(lp
, destproc
) {
551 if (lp
->lwp_tid
!= tid
)
553 /* tids match, search next. */
556 * Wait -- the whole tid space is depleted?
560 panic("lwp_fork: All tids depleted?!");
563 /* When we come here, the tid is not occupied */
565 tid
= destproc
->p_lasttid
++;
568 lp
= zalloc(lwp_zone
);
569 bzero(lp
, sizeof(*lp
));
570 lp
->lwp_proc
= destproc
;
572 LIST_INSERT_HEAD(&destproc
->p_lwps
, lp
, lwp_list
);
573 destproc
->p_nthreads
++;
574 lp
->lwp_stat
= LSRUN
;
575 bcopy(&origlp
->lwp_startcopy
, &lp
->lwp_startcopy
,
576 (unsigned) ((caddr_t
)&lp
->lwp_endcopy
-
577 (caddr_t
)&lp
->lwp_startcopy
));
578 lp
->lwp_flag
|= origlp
->lwp_flag
& LWP_ALTSTACK
;
580 * Set cpbase to the last timeout that occured (not the upcoming
583 * A critical section is required since a timer IPI can update
584 * scheduler specific data.
587 lp
->lwp_cpbase
= mycpu
->gd_schedclock
.time
-
588 mycpu
->gd_schedclock
.periodic
;
589 destproc
->p_usched
->heuristic_forking(origlp
, lp
);
592 td
= lwkt_alloc_thread(NULL
, LWKT_THREAD_STACK
, -1, 0);
594 td
->td_proc
= destproc
;
596 td
->td_switch
= cpu_heavy_switch
;
598 KKASSERT(td
->td_mpcount
== 1);
600 lwkt_setpri(td
, TDPRI_KERN_USER
);
601 lwkt_set_comm(td
, "%s", destproc
->p_comm
);
604 * cpu_fork will copy and update the pcb, set up the kernel stack,
605 * and make the child ready to run.
607 cpu_fork(origlp
, lp
, flags
);
608 caps_fork(origlp
->lwp_thread
, lp
->lwp_thread
);
614 * The next two functionms are general routines to handle adding/deleting
615 * items on the fork callout list.
618 * Take the arguments given and put them onto the fork callout list,
619 * However first make sure that it's not already there.
620 * Returns 0 on success or a standard error number.
623 at_fork(forklist_fn function
)
628 /* let the programmer know if he's been stupid */
629 if (rm_at_fork(function
)) {
630 kprintf("WARNING: fork callout entry (%p) already present\n",
634 ep
= kmalloc(sizeof(*ep
), M_ATFORK
, M_WAITOK
|M_ZERO
);
635 ep
->function
= function
;
636 TAILQ_INSERT_TAIL(&fork_list
, ep
, next
);
641 * Scan the exit callout list for the given item and remove it..
642 * Returns the number of items removed (0 or 1)
645 rm_at_fork(forklist_fn function
)
649 TAILQ_FOREACH(ep
, &fork_list
, next
) {
650 if (ep
->function
== function
) {
651 TAILQ_REMOVE(&fork_list
, ep
, next
);
660 * Add a forked process to the run queue after any remaining setup, such
661 * as setting the fork handler, has been completed.
664 start_forked_proc(struct lwp
*lp1
, struct proc
*p2
)
666 struct lwp
*lp2
= ONLY_LWP_IN_PROC(p2
);
669 * Move from SIDL to RUN queue, and activate the process's thread.
670 * Activation of the thread effectively makes the process "a"
671 * current process, so we do not setrunqueue().
673 * YYY setrunqueue works here but we should clean up the trampoline
674 * code so we just schedule the LWKT thread and let the trampoline
675 * deal with the userland scheduler on return to userland.
677 KASSERT(p2
->p_stat
== SIDL
,
678 ("cannot start forked process, bad status: %p", p2
));
679 p2
->p_usched
->resetpriority(lp2
);
681 p2
->p_stat
= SACTIVE
;
682 lp2
->lwp_stat
= LSRUN
;
683 p2
->p_usched
->setrunqueue(lp2
);
687 * Now can be swapped.
689 PRELE(lp1
->lwp_proc
);
692 * Preserve synchronization semantics of vfork. If waiting for
693 * child to exec or exit, set P_PPWAIT on child, and sleep on our
694 * proc (in case of exit).
696 while (p2
->p_flag
& P_PPWAIT
)
697 tsleep(lp1
->lwp_proc
, 0, "ppwait", 0);