kernel - Revert part of the contig allocation work
[dragonfly.git] / sys / kern / kern_fork.c
blobad66d2e4246fcad9d094993e01549f13eae47e90
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
35 * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.14 2003/06/26 04:15:10 silby Exp $
38 #include "opt_ktrace.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysproto.h>
43 #include <sys/filedesc.h>
44 #include <sys/kernel.h>
45 #include <sys/sysctl.h>
46 #include <sys/malloc.h>
47 #include <sys/proc.h>
48 #include <sys/resourcevar.h>
49 #include <sys/vnode.h>
50 #include <sys/acct.h>
51 #include <sys/ktrace.h>
52 #include <sys/unistd.h>
53 #include <sys/jail.h>
54 #include <sys/lwp.h>
56 #include <vm/vm.h>
57 #include <sys/lock.h>
58 #include <vm/pmap.h>
59 #include <vm/vm_map.h>
60 #include <vm/vm_extern.h>
62 #include <sys/vmmeter.h>
63 #include <sys/refcount.h>
64 #include <sys/thread2.h>
65 #include <sys/signal2.h>
66 #include <sys/spinlock2.h>
68 #include <sys/dsched.h>
70 static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback");
71 static MALLOC_DEFINE(M_REAPER, "reaper", "process reapers");
74 * These are the stuctures used to create a callout list for things to do
75 * when forking a process
77 struct forklist {
78 forklist_fn function;
79 TAILQ_ENTRY(forklist) next;
82 TAILQ_HEAD(forklist_head, forklist);
83 static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list);
85 static struct lwp *lwp_fork(struct lwp *, struct proc *, int flags,
86 const cpumask_t *mask);
87 static int lwp_create1(struct lwp_params *params,
88 const cpumask_t *mask);
89 static struct lock reaper_lock = LOCK_INITIALIZER("reapgl", 0, 0);
91 int forksleep; /* Place for fork1() to sleep on. */
94 * Red-Black tree support for LWPs
97 static int
98 rb_lwp_compare(struct lwp *lp1, struct lwp *lp2)
100 if (lp1->lwp_tid < lp2->lwp_tid)
101 return(-1);
102 if (lp1->lwp_tid > lp2->lwp_tid)
103 return(1);
104 return(0);
107 RB_GENERATE2(lwp_rb_tree, lwp, u.lwp_rbnode, rb_lwp_compare, lwpid_t, lwp_tid);
110 * When forking, memory underpinning umtx-supported mutexes may be set
111 * COW causing the physical address to change. We must wakeup any threads
112 * blocked on the physical address to allow them to re-resolve their VM.
114 * (caller is holding p->p_token)
116 static void
117 wake_umtx_threads(struct proc *p1)
119 struct lwp *lp;
120 struct thread *td;
122 RB_FOREACH(lp, lwp_rb_tree, &p1->p_lwp_tree) {
123 td = lp->lwp_thread;
124 if (td && (td->td_flags & TDF_TSLEEPQ) &&
125 (td->td_wdomain & PDOMAIN_MASK) == PDOMAIN_UMTX) {
126 wakeup_domain(td->td_wchan, PDOMAIN_UMTX);
132 * fork() system call
135 sys_fork(struct fork_args *uap)
137 struct lwp *lp = curthread->td_lwp;
138 struct proc *p2;
139 int error;
141 error = fork1(lp, RFFDG | RFPROC | RFPGLOCK, &p2);
142 if (error == 0) {
143 PHOLD(p2);
144 start_forked_proc(lp, p2);
145 uap->sysmsg_fds[0] = p2->p_pid;
146 uap->sysmsg_fds[1] = 0;
147 PRELE(p2);
149 return error;
153 * vfork() system call
156 sys_vfork(struct vfork_args *uap)
158 struct lwp *lp = curthread->td_lwp;
159 struct proc *p2;
160 int error;
162 error = fork1(lp, RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK, &p2);
163 if (error == 0) {
164 PHOLD(p2);
165 start_forked_proc(lp, p2);
166 uap->sysmsg_fds[0] = p2->p_pid;
167 uap->sysmsg_fds[1] = 0;
168 PRELE(p2);
170 return error;
174 * Handle rforks. An rfork may (1) operate on the current process without
175 * creating a new, (2) create a new process that shared the current process's
176 * vmspace, signals, and/or descriptors, or (3) create a new process that does
177 * not share these things (normal fork).
179 * Note that we only call start_forked_proc() if a new process is actually
180 * created.
182 * rfork { int flags }
185 sys_rfork(struct rfork_args *uap)
187 struct lwp *lp = curthread->td_lwp;
188 struct proc *p2;
189 int error;
191 if ((uap->flags & RFKERNELONLY) != 0)
192 return (EINVAL);
194 error = fork1(lp, uap->flags | RFPGLOCK, &p2);
195 if (error == 0) {
196 if (p2) {
197 PHOLD(p2);
198 start_forked_proc(lp, p2);
199 uap->sysmsg_fds[0] = p2->p_pid;
200 uap->sysmsg_fds[1] = 0;
201 PRELE(p2);
202 } else {
203 uap->sysmsg_fds[0] = 0;
204 uap->sysmsg_fds[1] = 0;
207 return error;
210 static int
211 lwp_create1(struct lwp_params *uprm, const cpumask_t *umask)
213 struct proc *p = curproc;
214 struct lwp *lp;
215 struct lwp_params params;
216 cpumask_t *mask = NULL, mask0;
217 int error;
219 error = copyin(uprm, &params, sizeof(params));
220 if (error)
221 goto fail2;
223 if (umask != NULL) {
224 error = copyin(umask, &mask0, sizeof(mask0));
225 if (error)
226 goto fail2;
227 CPUMASK_ANDMASK(mask0, smp_active_mask);
228 if (CPUMASK_TESTNZERO(mask0))
229 mask = &mask0;
232 lwkt_gettoken(&p->p_token);
233 plimit_lwp_fork(p); /* force exclusive access */
234 lp = lwp_fork(curthread->td_lwp, p, RFPROC | RFMEM, mask);
235 error = cpu_prepare_lwp(lp, &params);
236 if (error)
237 goto fail;
238 if (params.lwp_tid1 != NULL &&
239 (error = copyout(&lp->lwp_tid, params.lwp_tid1, sizeof(lp->lwp_tid))))
240 goto fail;
241 if (params.lwp_tid2 != NULL &&
242 (error = copyout(&lp->lwp_tid, params.lwp_tid2, sizeof(lp->lwp_tid))))
243 goto fail;
246 * Now schedule the new lwp.
248 p->p_usched->resetpriority(lp);
249 crit_enter();
250 lp->lwp_stat = LSRUN;
251 p->p_usched->setrunqueue(lp);
252 crit_exit();
253 lwkt_reltoken(&p->p_token);
255 return (0);
257 fail:
259 * Make sure no one is using this lwp, before it is removed from
260 * the tree. If we didn't wait it here, lwp tree iteration with
261 * blocking operation would be broken.
263 while (lp->lwp_lock > 0)
264 tsleep(lp, 0, "lwpfail", 1);
265 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp);
266 --p->p_nthreads;
267 /* lwp_dispose expects an exited lwp, and a held proc */
268 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT);
269 lp->lwp_thread->td_flags |= TDF_EXITING;
270 lwkt_remove_tdallq(lp->lwp_thread);
271 PHOLD(p);
272 biosched_done(lp->lwp_thread);
273 dsched_exit_thread(lp->lwp_thread);
274 lwp_dispose(lp);
275 lwkt_reltoken(&p->p_token);
276 fail2:
277 return (error);
281 * Low level thread create used by pthreads.
284 sys_lwp_create(struct lwp_create_args *uap)
287 return (lwp_create1(uap->params, NULL));
291 sys_lwp_create2(struct lwp_create2_args *uap)
294 return (lwp_create1(uap->params, uap->mask));
297 int nprocs = 1; /* process 0 */
300 fork1(struct lwp *lp1, int flags, struct proc **procp)
302 struct proc *p1 = lp1->lwp_proc;
303 struct proc *p2;
304 struct proc *pptr;
305 struct pgrp *p1grp;
306 struct pgrp *plkgrp;
307 struct sysreaper *reap;
308 uid_t uid;
309 int ok, error;
310 static int curfail = 0;
311 static struct timeval lastfail;
312 struct forklist *ep;
313 struct filedesc_to_leader *fdtol;
315 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
316 return (EINVAL);
318 lwkt_gettoken(&p1->p_token);
319 plkgrp = NULL;
320 p2 = NULL;
323 * Here we don't create a new process, but we divorce
324 * certain parts of a process from itself.
326 if ((flags & RFPROC) == 0) {
328 * This kind of stunt does not work anymore if
329 * there are native threads (lwps) running
331 if (p1->p_nthreads != 1) {
332 error = EINVAL;
333 goto done;
336 vm_fork(p1, 0, flags);
337 if ((flags & RFMEM) == 0)
338 wake_umtx_threads(p1);
341 * Close all file descriptors.
343 if (flags & RFCFDG) {
344 struct filedesc *fdtmp;
345 fdtmp = fdinit(p1);
346 fdfree(p1, fdtmp);
350 * Unshare file descriptors (from parent.)
352 if (flags & RFFDG) {
353 if (p1->p_fd->fd_refcnt > 1) {
354 struct filedesc *newfd;
355 error = fdcopy(p1, &newfd);
356 if (error != 0) {
357 error = ENOMEM;
358 goto done;
360 fdfree(p1, newfd);
363 *procp = NULL;
364 error = 0;
365 goto done;
369 * Interlock against process group signal delivery. If signals
370 * are pending after the interlock is obtained we have to restart
371 * the system call to process the signals. If we don't the child
372 * can miss a pgsignal (such as ^C) sent during the fork.
374 * We can't use CURSIG() here because it will process any STOPs
375 * and cause the process group lock to be held indefinitely. If
376 * a STOP occurs, the fork will be restarted after the CONT.
378 p1grp = p1->p_pgrp;
379 if ((flags & RFPGLOCK) && (plkgrp = p1->p_pgrp) != NULL) {
380 pgref(plkgrp);
381 lockmgr(&plkgrp->pg_lock, LK_SHARED);
382 if (CURSIG_NOBLOCK(lp1)) {
383 error = ERESTART;
384 goto done;
389 * Although process entries are dynamically created, we still keep
390 * a global limit on the maximum number we will create. Don't allow
391 * a nonprivileged user to use the last ten processes; don't let root
392 * exceed the limit. The variable nprocs is the current number of
393 * processes, maxproc is the limit.
395 uid = lp1->lwp_thread->td_ucred->cr_ruid;
396 if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) {
397 if (ppsratecheck(&lastfail, &curfail, 1))
398 kprintf("maxproc limit exceeded by uid %d, please "
399 "see tuning(7) and login.conf(5).\n", uid);
400 tsleep(&forksleep, 0, "fork", hz / 2);
401 error = EAGAIN;
402 goto done;
406 * Increment the nprocs resource before blocking can occur. There
407 * are hard-limits as to the number of processes that can run.
409 atomic_add_int(&nprocs, 1);
412 * Increment the count of procs running with this uid. This also
413 * applies to root.
415 ok = chgproccnt(lp1->lwp_thread->td_ucred->cr_ruidinfo, 1,
416 plimit_getadjvalue(RLIMIT_NPROC));
417 if (!ok) {
419 * Back out the process count
421 atomic_add_int(&nprocs, -1);
422 if (ppsratecheck(&lastfail, &curfail, 1)) {
423 kprintf("maxproc limit of %jd "
424 "exceeded by \"%s\" uid %d, "
425 "please see tuning(7) and login.conf(5).\n",
426 plimit_getadjvalue(RLIMIT_NPROC),
427 p1->p_comm,
428 uid);
430 tsleep(&forksleep, 0, "fork", hz / 2);
431 error = EAGAIN;
432 goto done;
436 * Allocate a new process, don't get fancy: zero the structure.
438 p2 = kmalloc(sizeof(struct proc), M_PROC, M_WAITOK|M_ZERO);
441 * Core initialization. SIDL is a safety state that protects the
442 * partially initialized process once it starts getting hooked
443 * into system structures and becomes addressable.
445 * We must be sure to acquire p2->p_token as well, we must hold it
446 * once the process is on the allproc list to avoid things such
447 * as competing modifications to p_flags.
449 mycpu->gd_forkid += ncpus;
450 p2->p_forkid = mycpu->gd_forkid + mycpu->gd_cpuid;
451 p2->p_lasttid = 0; /* first tid will be 1 */
452 p2->p_stat = SIDL;
455 * NOTE: Process 0 will not have a reaper, but process 1 (init) and
456 * all other processes always will.
458 if ((reap = p1->p_reaper) != NULL) {
459 reaper_hold(reap);
460 p2->p_reaper = reap;
461 } else {
462 p2->p_reaper = NULL;
465 RB_INIT(&p2->p_lwp_tree);
466 spin_init(&p2->p_spin, "procfork1");
467 lwkt_token_init(&p2->p_token, "proc");
468 lwkt_gettoken(&p2->p_token);
471 * Setup linkage for kernel based threading XXX lwp. Also add the
472 * process to the allproclist.
474 * The process structure is addressable after this point.
476 if (flags & RFTHREAD) {
477 p2->p_peers = p1->p_peers;
478 p1->p_peers = p2;
479 p2->p_leader = p1->p_leader;
480 } else {
481 p2->p_leader = p2;
483 proc_add_allproc(p2);
486 * Initialize the section which is copied verbatim from the parent.
488 bcopy(&p1->p_startcopy, &p2->p_startcopy,
489 ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy));
492 * Duplicate sub-structures as needed. Increase reference counts
493 * on shared objects.
495 * NOTE: because we are now on the allproc list it is possible for
496 * other consumers to gain temporary references to p2
497 * (p2->p_lock can change).
499 if (p1->p_flags & P_PROFIL)
500 startprofclock(p2);
501 p2->p_ucred = crhold(lp1->lwp_thread->td_ucred);
503 if (jailed(p2->p_ucred))
504 p2->p_flags |= P_JAILED;
506 if (p2->p_args)
507 refcount_acquire(&p2->p_args->ar_ref);
509 p2->p_usched = p1->p_usched;
510 /* XXX: verify copy of the secondary iosched stuff */
511 dsched_enter_proc(p2);
513 if (flags & RFSIGSHARE) {
514 p2->p_sigacts = p1->p_sigacts;
515 refcount_acquire(&p2->p_sigacts->ps_refcnt);
516 } else {
517 p2->p_sigacts = kmalloc(sizeof(*p2->p_sigacts),
518 M_SUBPROC, M_WAITOK);
519 bcopy(p1->p_sigacts, p2->p_sigacts, sizeof(*p2->p_sigacts));
520 refcount_init(&p2->p_sigacts->ps_refcnt, 1);
522 if (flags & RFLINUXTHPN)
523 p2->p_sigparent = SIGUSR1;
524 else
525 p2->p_sigparent = SIGCHLD;
527 /* bump references to the text vnode (for procfs) */
528 p2->p_textvp = p1->p_textvp;
529 if (p2->p_textvp)
530 vref(p2->p_textvp);
532 /* copy namecache handle to the text file */
533 if (p1->p_textnch.mount)
534 cache_copy(&p1->p_textnch, &p2->p_textnch);
537 * Handle file descriptors
539 if (flags & RFCFDG) {
540 p2->p_fd = fdinit(p1);
541 fdtol = NULL;
542 } else if (flags & RFFDG) {
543 error = fdcopy(p1, &p2->p_fd);
544 if (error != 0) {
545 error = ENOMEM;
546 goto done;
548 fdtol = NULL;
549 } else {
550 p2->p_fd = fdshare(p1);
551 if (p1->p_fdtol == NULL) {
552 p1->p_fdtol = filedesc_to_leader_alloc(NULL,
553 p1->p_leader);
555 if ((flags & RFTHREAD) != 0) {
557 * Shared file descriptor table and
558 * shared process leaders.
560 fdtol = p1->p_fdtol;
561 fdtol->fdl_refcount++;
562 } else {
564 * Shared file descriptor table, and
565 * different process leaders
567 fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p2);
570 p2->p_fdtol = fdtol;
571 p2->p_limit = plimit_fork(p1);
574 * Adjust depth for resource downscaling
576 if ((p2->p_depth & 31) != 31)
577 ++p2->p_depth;
580 * Preserve some more flags in subprocess. P_PROFIL has already
581 * been preserved.
583 p2->p_flags |= p1->p_flags & P_SUGID;
584 if (p1->p_session->s_ttyvp != NULL && (p1->p_flags & P_CONTROLT))
585 p2->p_flags |= P_CONTROLT;
586 if (flags & RFPPWAIT) {
587 p2->p_flags |= P_PPWAIT;
588 if (p1->p_upmap)
589 atomic_add_int(&p1->p_upmap->invfork, 1);
593 * Inherit the virtual kernel structure (allows a virtual kernel
594 * to fork to simulate multiple cpus).
596 if (p1->p_vkernel)
597 vkernel_inherit(p1, p2);
600 * Once we are on a pglist we may receive signals. XXX we might
601 * race a ^C being sent to the process group by not receiving it
602 * at all prior to this line.
604 pgref(p1grp);
605 lwkt_gettoken(&p1grp->pg_token);
606 LIST_INSERT_AFTER(p1, p2, p_pglist);
607 lwkt_reltoken(&p1grp->pg_token);
610 * Attach the new process to its parent.
612 * If RFNOWAIT is set, the newly created process becomes a child
613 * of the reaper (typically init). This effectively disassociates
614 * the child from the parent.
616 * Temporarily hold pptr for the RFNOWAIT case to avoid ripouts.
618 if (flags & RFNOWAIT) {
619 pptr = reaper_get(reap);
620 if (pptr == NULL) {
621 pptr = initproc;
622 PHOLD(pptr);
624 } else {
625 pptr = p1;
627 p2->p_pptr = pptr;
628 p2->p_ppid = pptr->p_pid;
629 LIST_INIT(&p2->p_children);
631 lwkt_gettoken(&pptr->p_token);
632 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
633 lwkt_reltoken(&pptr->p_token);
635 if (flags & RFNOWAIT)
636 PRELE(pptr);
638 varsymset_init(&p2->p_varsymset, &p1->p_varsymset);
639 callout_init_mp(&p2->p_ithandle);
641 #ifdef KTRACE
643 * Copy traceflag and tracefile if enabled. If not inherited,
644 * these were zeroed above but we still could have a trace race
645 * so make sure p2's p_tracenode is NULL.
647 if ((p1->p_traceflag & KTRFAC_INHERIT) && p2->p_tracenode == NULL) {
648 p2->p_traceflag = p1->p_traceflag;
649 p2->p_tracenode = ktrinherit(p1->p_tracenode);
651 #endif
654 * This begins the section where we must prevent the parent
655 * from being swapped.
657 * Gets PRELE'd in the caller in start_forked_proc().
659 PHOLD(p1);
661 vm_fork(p1, p2, flags);
662 if ((flags & RFMEM) == 0)
663 wake_umtx_threads(p1);
666 * Create the first lwp associated with the new proc.
667 * It will return via a different execution path later, directly
668 * into userland, after it was put on the runq by
669 * start_forked_proc().
671 lwp_fork(lp1, p2, flags, NULL);
673 if (flags == (RFFDG | RFPROC | RFPGLOCK)) {
674 mycpu->gd_cnt.v_forks++;
675 mycpu->gd_cnt.v_forkpages += p2->p_vmspace->vm_dsize +
676 p2->p_vmspace->vm_ssize;
677 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK)) {
678 mycpu->gd_cnt.v_vforks++;
679 mycpu->gd_cnt.v_vforkpages += p2->p_vmspace->vm_dsize +
680 p2->p_vmspace->vm_ssize;
681 } else if (p1 == &proc0) {
682 mycpu->gd_cnt.v_kthreads++;
683 mycpu->gd_cnt.v_kthreadpages += p2->p_vmspace->vm_dsize +
684 p2->p_vmspace->vm_ssize;
685 } else {
686 mycpu->gd_cnt.v_rforks++;
687 mycpu->gd_cnt.v_rforkpages += p2->p_vmspace->vm_dsize +
688 p2->p_vmspace->vm_ssize;
692 * Both processes are set up, now check if any loadable modules want
693 * to adjust anything.
694 * What if they have an error? XXX
696 TAILQ_FOREACH(ep, &fork_list, next) {
697 (*ep->function)(p1, p2, flags);
701 * Set the start time. Note that the process is not runnable. The
702 * caller is responsible for making it runnable.
704 microtime(&p2->p_start);
705 p2->p_acflag = AFORK;
708 * tell any interested parties about the new process
710 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);
713 * Return child proc pointer to parent.
715 *procp = p2;
716 error = 0;
717 done:
718 if (p2)
719 lwkt_reltoken(&p2->p_token);
720 lwkt_reltoken(&p1->p_token);
721 if (plkgrp) {
722 lockmgr(&plkgrp->pg_lock, LK_RELEASE);
723 pgrel(plkgrp);
725 return (error);
728 static struct lwp *
729 lwp_fork(struct lwp *origlp, struct proc *destproc, int flags,
730 const cpumask_t *mask)
732 globaldata_t gd = mycpu;
733 struct lwp *lp;
734 struct thread *td;
736 lp = kmalloc(sizeof(struct lwp), M_LWP, M_WAITOK|M_ZERO);
738 lp->lwp_proc = destproc;
739 lp->lwp_vmspace = destproc->p_vmspace;
740 lp->lwp_stat = LSRUN;
741 bcopy(&origlp->lwp_startcopy, &lp->lwp_startcopy,
742 (unsigned) ((caddr_t)&lp->lwp_endcopy -
743 (caddr_t)&lp->lwp_startcopy));
744 if (mask != NULL)
745 lp->lwp_cpumask = *mask;
748 * Reset the sigaltstack if memory is shared, otherwise inherit
749 * it.
751 if (flags & RFMEM) {
752 lp->lwp_sigstk.ss_flags = SS_DISABLE;
753 lp->lwp_sigstk.ss_size = 0;
754 lp->lwp_sigstk.ss_sp = NULL;
755 lp->lwp_flags &= ~LWP_ALTSTACK;
756 } else {
757 lp->lwp_flags |= origlp->lwp_flags & LWP_ALTSTACK;
761 * Set cpbase to the last timeout that occured (not the upcoming
762 * timeout).
764 * A critical section is required since a timer IPI can update
765 * scheduler specific data.
767 crit_enter();
768 lp->lwp_cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
769 destproc->p_usched->heuristic_forking(origlp, lp);
770 crit_exit();
771 CPUMASK_ANDMASK(lp->lwp_cpumask, usched_mastermask);
772 lwkt_token_init(&lp->lwp_token, "lwp_token");
773 spin_init(&lp->lwp_spin, "lwptoken");
776 * Assign the thread to the current cpu to begin with so we
777 * can manipulate it.
779 td = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, gd->gd_cpuid, 0);
780 lp->lwp_thread = td;
781 td->td_ucred = crhold(destproc->p_ucred);
782 td->td_proc = destproc;
783 td->td_lwp = lp;
784 td->td_switch = cpu_heavy_switch;
785 #ifdef NO_LWKT_SPLIT_USERPRI
786 lwkt_setpri(td, TDPRI_USER_NORM);
787 #else
788 lwkt_setpri(td, TDPRI_KERN_USER);
789 #endif
790 lwkt_set_comm(td, "%s", destproc->p_comm);
793 * cpu_fork will copy and update the pcb, set up the kernel stack,
794 * and make the child ready to run.
796 cpu_fork(origlp, lp, flags);
797 kqueue_init(&lp->lwp_kqueue, destproc->p_fd);
800 * Assign a TID to the lp. Loop until the insert succeeds (returns
801 * NULL).
803 * If we are in a vfork assign the same TID as the lwp that did the
804 * vfork(). This way if the user program messes around with
805 * pthread calls inside the vfork(), it will operate like an
806 * extension of the (blocked) parent. Also note that since the
807 * address space is being shared, insofar as pthreads is concerned,
808 * the code running in the vfork() is part of the original process.
810 if (flags & RFPPWAIT) {
811 lp->lwp_tid = origlp->lwp_tid - 1;
812 } else {
813 lp->lwp_tid = destproc->p_lasttid;
816 do {
817 if (++lp->lwp_tid <= 0)
818 lp->lwp_tid = 1;
819 } while (lwp_rb_tree_RB_INSERT(&destproc->p_lwp_tree, lp) != NULL);
821 destproc->p_lasttid = lp->lwp_tid;
822 destproc->p_nthreads++;
825 * This flag is set and never cleared. It means that the process
826 * was threaded at some point. Used to improve exit performance.
828 destproc->p_flags |= P_MAYBETHREADED;
830 return (lp);
834 * The next two functionms are general routines to handle adding/deleting
835 * items on the fork callout list.
837 * at_fork():
838 * Take the arguments given and put them onto the fork callout list,
839 * However first make sure that it's not already there.
840 * Returns 0 on success or a standard error number.
843 at_fork(forklist_fn function)
845 struct forklist *ep;
847 #ifdef INVARIANTS
848 /* let the programmer know if he's been stupid */
849 if (rm_at_fork(function)) {
850 kprintf("WARNING: fork callout entry (%p) already present\n",
851 function);
853 #endif
854 ep = kmalloc(sizeof(*ep), M_ATFORK, M_WAITOK|M_ZERO);
855 ep->function = function;
856 TAILQ_INSERT_TAIL(&fork_list, ep, next);
857 return (0);
861 * Scan the exit callout list for the given item and remove it..
862 * Returns the number of items removed (0 or 1)
865 rm_at_fork(forklist_fn function)
867 struct forklist *ep;
869 TAILQ_FOREACH(ep, &fork_list, next) {
870 if (ep->function == function) {
871 TAILQ_REMOVE(&fork_list, ep, next);
872 kfree(ep, M_ATFORK);
873 return(1);
876 return (0);
880 * Add a forked process to the run queue after any remaining setup, such
881 * as setting the fork handler, has been completed.
883 * p2 is held by the caller.
885 void
886 start_forked_proc(struct lwp *lp1, struct proc *p2)
888 struct lwp *lp2 = ONLY_LWP_IN_PROC(p2);
889 int pflags;
892 * Move from SIDL to RUN queue, and activate the process's thread.
893 * Activation of the thread effectively makes the process "a"
894 * current process, so we do not setrunqueue().
896 * YYY setrunqueue works here but we should clean up the trampoline
897 * code so we just schedule the LWKT thread and let the trampoline
898 * deal with the userland scheduler on return to userland.
900 KASSERT(p2->p_stat == SIDL,
901 ("cannot start forked process, bad status: %p", p2));
902 p2->p_usched->resetpriority(lp2);
903 crit_enter();
904 p2->p_stat = SACTIVE;
905 lp2->lwp_stat = LSRUN;
906 p2->p_usched->setrunqueue(lp2);
907 crit_exit();
910 * Now can be swapped.
912 PRELE(lp1->lwp_proc);
915 * Preserve synchronization semantics of vfork. P_PPWAIT is set in
916 * the child until it has retired the parent's resources. The parent
917 * must wait for the flag to be cleared by the child.
919 * Interlock the flag/tsleep with atomic ops to avoid unnecessary
920 * p_token conflicts.
922 * XXX Is this use of an atomic op on a field that is not normally
923 * manipulated with atomic ops ok?
925 while ((pflags = p2->p_flags) & P_PPWAIT) {
926 cpu_ccfence();
927 tsleep_interlock(lp1->lwp_proc, 0);
928 if (atomic_cmpset_int(&p2->p_flags, pflags, pflags))
929 tsleep(lp1->lwp_proc, PINTERLOCKED, "ppwait", 0);
934 * procctl (idtype_t idtype, id_t id, int cmd, void *arg)
937 sys_procctl(struct procctl_args *uap)
939 struct proc *p = curproc;
940 struct proc *p2;
941 struct sysreaper *reap;
942 union reaper_info udata;
943 int error;
945 if (uap->idtype != P_PID || uap->id != (id_t)p->p_pid)
946 return EINVAL;
948 switch(uap->cmd) {
949 case PROC_REAP_ACQUIRE:
950 lwkt_gettoken(&p->p_token);
951 reap = kmalloc(sizeof(*reap), M_REAPER, M_WAITOK|M_ZERO);
952 if (p->p_reaper == NULL || p->p_reaper->p != p) {
953 reaper_init(p, reap);
954 error = 0;
955 } else {
956 kfree(reap, M_REAPER);
957 error = EALREADY;
959 lwkt_reltoken(&p->p_token);
960 break;
961 case PROC_REAP_RELEASE:
962 lwkt_gettoken(&p->p_token);
963 release_again:
964 reap = p->p_reaper;
965 KKASSERT(reap != NULL);
966 if (reap->p == p) {
967 reaper_hold(reap); /* in case of thread race */
968 lockmgr(&reap->lock, LK_EXCLUSIVE);
969 if (reap->p != p) {
970 lockmgr(&reap->lock, LK_RELEASE);
971 reaper_drop(reap);
972 goto release_again;
974 reap->p = NULL;
975 p->p_reaper = reap->parent;
976 if (p->p_reaper)
977 reaper_hold(p->p_reaper);
978 lockmgr(&reap->lock, LK_RELEASE);
979 reaper_drop(reap); /* our ref */
980 reaper_drop(reap); /* old p_reaper ref */
981 error = 0;
982 } else {
983 error = ENOTCONN;
985 lwkt_reltoken(&p->p_token);
986 break;
987 case PROC_REAP_STATUS:
988 bzero(&udata, sizeof(udata));
989 lwkt_gettoken_shared(&p->p_token);
990 if ((reap = p->p_reaper) != NULL && reap->p == p) {
991 udata.status.flags = reap->flags;
992 udata.status.refs = reap->refs - 1; /* minus ours */
994 p2 = LIST_FIRST(&p->p_children);
995 udata.status.pid_head = p2 ? p2->p_pid : -1;
996 lwkt_reltoken(&p->p_token);
998 if (uap->data) {
999 error = copyout(&udata, uap->data,
1000 sizeof(udata.status));
1001 } else {
1002 error = 0;
1004 break;
1005 default:
1006 error = EINVAL;
1007 break;
1009 return error;
1013 * Bump ref on reaper, preventing destruction
1015 void
1016 reaper_hold(struct sysreaper *reap)
1018 KKASSERT(reap->refs > 0);
1019 refcount_acquire(&reap->refs);
1023 * Drop ref on reaper, destroy the structure on the 1->0
1024 * transition and loop on the parent.
1026 void
1027 reaper_drop(struct sysreaper *next)
1029 struct sysreaper *reap;
1031 while ((reap = next) != NULL) {
1032 if (refcount_release(&reap->refs)) {
1033 next = reap->parent;
1034 KKASSERT(reap->p == NULL);
1035 lockmgr(&reaper_lock, LK_EXCLUSIVE);
1036 reap->parent = NULL;
1037 kfree(reap, M_REAPER);
1038 lockmgr(&reaper_lock, LK_RELEASE);
1039 } else {
1040 next = NULL;
1046 * Initialize a static or newly allocated reaper structure
1048 void
1049 reaper_init(struct proc *p, struct sysreaper *reap)
1051 reap->parent = p->p_reaper;
1052 reap->p = p;
1053 if (p == initproc) {
1054 reap->flags = REAPER_STAT_OWNED | REAPER_STAT_REALINIT;
1055 reap->refs = 2;
1056 } else {
1057 reap->flags = REAPER_STAT_OWNED;
1058 reap->refs = 1;
1060 lockinit(&reap->lock, "subrp", 0, 0);
1061 cpu_sfence();
1062 p->p_reaper = reap;
1066 * Called with p->p_token held during exit.
1068 * This is a bit simpler than RELEASE because there are no threads remaining
1069 * to race. We only release if we own the reaper, the exit code will handle
1070 * the final p_reaper release.
1072 struct sysreaper *
1073 reaper_exit(struct proc *p)
1075 struct sysreaper *reap;
1078 * Release acquired reaper
1080 if ((reap = p->p_reaper) != NULL && reap->p == p) {
1081 lockmgr(&reap->lock, LK_EXCLUSIVE);
1082 p->p_reaper = reap->parent;
1083 if (p->p_reaper)
1084 reaper_hold(p->p_reaper);
1085 reap->p = NULL;
1086 lockmgr(&reap->lock, LK_RELEASE);
1087 reaper_drop(reap);
1091 * Return and clear reaper (caller is holding p_token for us)
1092 * (reap->p does not equal p). Caller must drop it.
1094 if ((reap = p->p_reaper) != NULL) {
1095 p->p_reaper = NULL;
1097 return reap;
1101 * Return a held (PHOLD) process representing the reaper for process (p).
1102 * NULL should not normally be returned. Caller should PRELE() the returned
1103 * reaper process when finished.
1105 * Remove dead internal nodes while we are at it.
1107 * Process (p)'s token must be held on call.
1108 * The returned process's token is NOT acquired by this routine.
1110 struct proc *
1111 reaper_get(struct sysreaper *reap)
1113 struct sysreaper *next;
1114 struct proc *reproc;
1116 if (reap == NULL)
1117 return NULL;
1120 * Extra hold for loop
1122 reaper_hold(reap);
1124 while (reap) {
1125 lockmgr(&reap->lock, LK_SHARED);
1126 if (reap->p) {
1128 * Probable reaper
1130 if (reap->p) {
1131 reproc = reap->p;
1132 PHOLD(reproc);
1133 lockmgr(&reap->lock, LK_RELEASE);
1134 reaper_drop(reap);
1135 return reproc;
1139 * Raced, try again
1141 lockmgr(&reap->lock, LK_RELEASE);
1142 continue;
1146 * Traverse upwards in the reaper topology, destroy
1147 * dead internal nodes when possible.
1149 * NOTE: Our ref on next means that a dead node should
1150 * have 2 (ours and reap->parent's).
1152 next = reap->parent;
1153 while (next) {
1154 reaper_hold(next);
1155 if (next->refs == 2 && next->p == NULL) {
1156 lockmgr(&reap->lock, LK_RELEASE);
1157 lockmgr(&reap->lock, LK_EXCLUSIVE);
1158 if (next->refs == 2 &&
1159 reap->parent == next &&
1160 next->p == NULL) {
1162 * reap->parent inherits ref from next.
1164 reap->parent = next->parent;
1165 next->parent = NULL;
1166 reaper_drop(next); /* ours */
1167 reaper_drop(next); /* old parent */
1168 next = reap->parent;
1169 continue; /* possible chain */
1172 break;
1174 lockmgr(&reap->lock, LK_RELEASE);
1175 reaper_drop(reap);
1176 reap = next;
1178 return NULL;
1182 * Test that the sender is allowed to send a signal to the target.
1183 * The sender process is assumed to have a stable reaper. The
1184 * target can be e.g. from a scan callback.
1186 * Target cannot be the reaper process itself unless reaper_ok is specified,
1187 * or sender == target.
1190 reaper_sigtest(struct proc *sender, struct proc *target, int reaper_ok)
1192 struct sysreaper *sreap;
1193 struct sysreaper *reap;
1194 int r;
1196 sreap = sender->p_reaper;
1197 if (sreap == NULL)
1198 return 1;
1200 if (sreap == target->p_reaper) {
1201 if (sreap->p == target && sreap->p != sender && reaper_ok == 0)
1202 return 0;
1203 return 1;
1205 lockmgr(&reaper_lock, LK_SHARED);
1206 r = 0;
1207 for (reap = target->p_reaper; reap; reap = reap->parent) {
1208 if (sreap == reap) {
1209 if (sreap->p != target || reaper_ok)
1210 r = 1;
1211 break;
1214 lockmgr(&reaper_lock, LK_RELEASE);
1216 return r;