kernel - Fix excessive call stack depth on stuck interrupt
[dragonfly.git] / sys / kern / kern_fork.c
blob949b870341daefe22576052c26edfd21f1ed23dc
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
35 * $FreeBSD: src/sys/kern/kern_fork.c,v 1.72.2.14 2003/06/26 04:15:10 silby Exp $
38 #include "opt_ktrace.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysproto.h>
43 #include <sys/filedesc.h>
44 #include <sys/kernel.h>
45 #include <sys/sysctl.h>
46 #include <sys/malloc.h>
47 #include <sys/proc.h>
48 #include <sys/resourcevar.h>
49 #include <sys/vnode.h>
50 #include <sys/acct.h>
51 #include <sys/ktrace.h>
52 #include <sys/unistd.h>
53 #include <sys/jail.h>
54 #include <sys/lwp.h>
56 #include <vm/vm.h>
57 #include <sys/lock.h>
58 #include <vm/pmap.h>
59 #include <vm/vm_map.h>
60 #include <vm/vm_extern.h>
62 #include <sys/vmmeter.h>
63 #include <sys/refcount.h>
64 #include <sys/thread2.h>
65 #include <sys/signal2.h>
66 #include <sys/spinlock2.h>
68 #include <sys/dsched.h>
70 static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback");
71 static MALLOC_DEFINE(M_REAPER, "reaper", "process reapers");
74 * These are the stuctures used to create a callout list for things to do
75 * when forking a process
77 struct forklist {
78 forklist_fn function;
79 TAILQ_ENTRY(forklist) next;
82 TAILQ_HEAD(forklist_head, forklist);
83 static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list);
85 static struct lwp *lwp_fork(struct lwp *, struct proc *, int flags,
86 const cpumask_t *mask);
87 static int lwp_create1(struct lwp_params *params,
88 const cpumask_t *mask);
90 int forksleep; /* Place for fork1() to sleep on. */
93 * Red-Black tree support for LWPs
96 static int
97 rb_lwp_compare(struct lwp *lp1, struct lwp *lp2)
99 if (lp1->lwp_tid < lp2->lwp_tid)
100 return(-1);
101 if (lp1->lwp_tid > lp2->lwp_tid)
102 return(1);
103 return(0);
106 RB_GENERATE2(lwp_rb_tree, lwp, u.lwp_rbnode, rb_lwp_compare, lwpid_t, lwp_tid);
109 * fork() system call
112 sys_fork(struct fork_args *uap)
114 struct lwp *lp = curthread->td_lwp;
115 struct proc *p2;
116 int error;
118 error = fork1(lp, RFFDG | RFPROC | RFPGLOCK, &p2);
119 if (error == 0) {
120 PHOLD(p2);
121 start_forked_proc(lp, p2);
122 uap->sysmsg_fds[0] = p2->p_pid;
123 uap->sysmsg_fds[1] = 0;
124 PRELE(p2);
126 return error;
130 * vfork() system call
133 sys_vfork(struct vfork_args *uap)
135 struct lwp *lp = curthread->td_lwp;
136 struct proc *p2;
137 int error;
139 error = fork1(lp, RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK, &p2);
140 if (error == 0) {
141 PHOLD(p2);
142 start_forked_proc(lp, p2);
143 uap->sysmsg_fds[0] = p2->p_pid;
144 uap->sysmsg_fds[1] = 0;
145 PRELE(p2);
147 return error;
151 * Handle rforks. An rfork may (1) operate on the current process without
152 * creating a new, (2) create a new process that shared the current process's
153 * vmspace, signals, and/or descriptors, or (3) create a new process that does
154 * not share these things (normal fork).
156 * Note that we only call start_forked_proc() if a new process is actually
157 * created.
159 * rfork { int flags }
162 sys_rfork(struct rfork_args *uap)
164 struct lwp *lp = curthread->td_lwp;
165 struct proc *p2;
166 int error;
168 if ((uap->flags & RFKERNELONLY) != 0)
169 return (EINVAL);
171 error = fork1(lp, uap->flags | RFPGLOCK, &p2);
172 if (error == 0) {
173 if (p2) {
174 PHOLD(p2);
175 start_forked_proc(lp, p2);
176 uap->sysmsg_fds[0] = p2->p_pid;
177 uap->sysmsg_fds[1] = 0;
178 PRELE(p2);
179 } else {
180 uap->sysmsg_fds[0] = 0;
181 uap->sysmsg_fds[1] = 0;
184 return error;
187 static int
188 lwp_create1(struct lwp_params *uprm, const cpumask_t *umask)
190 struct proc *p = curproc;
191 struct lwp *lp;
192 struct lwp_params params;
193 cpumask_t *mask = NULL, mask0;
194 int error;
196 error = copyin(uprm, &params, sizeof(params));
197 if (error)
198 goto fail2;
200 if (umask != NULL) {
201 error = copyin(umask, &mask0, sizeof(mask0));
202 if (error)
203 goto fail2;
204 CPUMASK_ANDMASK(mask0, smp_active_mask);
205 if (CPUMASK_TESTNZERO(mask0))
206 mask = &mask0;
209 lwkt_gettoken(&p->p_token);
210 plimit_lwp_fork(p); /* force exclusive access */
211 lp = lwp_fork(curthread->td_lwp, p, RFPROC | RFMEM, mask);
212 error = cpu_prepare_lwp(lp, &params);
213 if (error)
214 goto fail;
215 if (params.lwp_tid1 != NULL &&
216 (error = copyout(&lp->lwp_tid, params.lwp_tid1, sizeof(lp->lwp_tid))))
217 goto fail;
218 if (params.lwp_tid2 != NULL &&
219 (error = copyout(&lp->lwp_tid, params.lwp_tid2, sizeof(lp->lwp_tid))))
220 goto fail;
223 * Now schedule the new lwp.
225 p->p_usched->resetpriority(lp);
226 crit_enter();
227 lp->lwp_stat = LSRUN;
228 p->p_usched->setrunqueue(lp);
229 crit_exit();
230 lwkt_reltoken(&p->p_token);
232 return (0);
234 fail:
236 * Make sure no one is using this lwp, before it is removed from
237 * the tree. If we didn't wait it here, lwp tree iteration with
238 * blocking operation would be broken.
240 while (lp->lwp_lock > 0)
241 tsleep(lp, 0, "lwpfail", 1);
242 lwp_rb_tree_RB_REMOVE(&p->p_lwp_tree, lp);
243 --p->p_nthreads;
244 /* lwp_dispose expects an exited lwp, and a held proc */
245 atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT);
246 lp->lwp_thread->td_flags |= TDF_EXITING;
247 lwkt_remove_tdallq(lp->lwp_thread);
248 PHOLD(p);
249 biosched_done(lp->lwp_thread);
250 dsched_exit_thread(lp->lwp_thread);
251 lwp_dispose(lp);
252 lwkt_reltoken(&p->p_token);
253 fail2:
254 return (error);
258 * Low level thread create used by pthreads.
261 sys_lwp_create(struct lwp_create_args *uap)
264 return (lwp_create1(uap->params, NULL));
268 sys_lwp_create2(struct lwp_create2_args *uap)
271 return (lwp_create1(uap->params, uap->mask));
274 int nprocs = 1; /* process 0 */
277 fork1(struct lwp *lp1, int flags, struct proc **procp)
279 struct proc *p1 = lp1->lwp_proc;
280 struct proc *p2;
281 struct proc *pptr;
282 struct pgrp *p1grp;
283 struct pgrp *plkgrp;
284 struct sysreaper *reap;
285 uid_t uid;
286 int ok, error;
287 static int curfail = 0;
288 static struct timeval lastfail;
289 struct forklist *ep;
290 struct filedesc_to_leader *fdtol;
292 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
293 return (EINVAL);
295 lwkt_gettoken(&p1->p_token);
296 plkgrp = NULL;
297 p2 = NULL;
300 * Here we don't create a new process, but we divorce
301 * certain parts of a process from itself.
303 if ((flags & RFPROC) == 0) {
305 * This kind of stunt does not work anymore if
306 * there are native threads (lwps) running
308 if (p1->p_nthreads != 1) {
309 error = EINVAL;
310 goto done;
313 vm_fork(p1, 0, flags);
316 * Close all file descriptors.
318 if (flags & RFCFDG) {
319 struct filedesc *fdtmp;
320 fdtmp = fdinit(p1);
321 fdfree(p1, fdtmp);
325 * Unshare file descriptors (from parent.)
327 if (flags & RFFDG) {
328 if (p1->p_fd->fd_refcnt > 1) {
329 struct filedesc *newfd;
330 error = fdcopy(p1, &newfd);
331 if (error != 0) {
332 error = ENOMEM;
333 goto done;
335 fdfree(p1, newfd);
338 *procp = NULL;
339 error = 0;
340 goto done;
344 * Interlock against process group signal delivery. If signals
345 * are pending after the interlock is obtained we have to restart
346 * the system call to process the signals. If we don't the child
347 * can miss a pgsignal (such as ^C) sent during the fork.
349 * We can't use CURSIG() here because it will process any STOPs
350 * and cause the process group lock to be held indefinitely. If
351 * a STOP occurs, the fork will be restarted after the CONT.
353 p1grp = p1->p_pgrp;
354 if ((flags & RFPGLOCK) && (plkgrp = p1->p_pgrp) != NULL) {
355 pgref(plkgrp);
356 lockmgr(&plkgrp->pg_lock, LK_SHARED);
357 if (CURSIG_NOBLOCK(lp1)) {
358 error = ERESTART;
359 goto done;
364 * Although process entries are dynamically created, we still keep
365 * a global limit on the maximum number we will create. Don't allow
366 * a nonprivileged user to use the last ten processes; don't let root
367 * exceed the limit. The variable nprocs is the current number of
368 * processes, maxproc is the limit.
370 uid = lp1->lwp_thread->td_ucred->cr_ruid;
371 if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) {
372 if (ppsratecheck(&lastfail, &curfail, 1))
373 kprintf("maxproc limit exceeded by uid %d, please "
374 "see tuning(7) and login.conf(5).\n", uid);
375 tsleep(&forksleep, 0, "fork", hz / 2);
376 error = EAGAIN;
377 goto done;
381 * Increment the nprocs resource before blocking can occur. There
382 * are hard-limits as to the number of processes that can run.
384 atomic_add_int(&nprocs, 1);
387 * Increment the count of procs running with this uid. Don't allow
388 * a nonprivileged user to exceed their current limit.
390 ok = chgproccnt(lp1->lwp_thread->td_ucred->cr_ruidinfo, 1,
391 (uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0);
392 if (!ok) {
394 * Back out the process count
396 atomic_add_int(&nprocs, -1);
397 if (ppsratecheck(&lastfail, &curfail, 1))
398 kprintf("maxproc limit exceeded by uid %d, please "
399 "see tuning(7) and login.conf(5).\n", uid);
400 tsleep(&forksleep, 0, "fork", hz / 2);
401 error = EAGAIN;
402 goto done;
406 * Allocate a new process, don't get fancy: zero the structure.
408 p2 = kmalloc(sizeof(struct proc), M_PROC, M_WAITOK|M_ZERO);
411 * Core initialization. SIDL is a safety state that protects the
412 * partially initialized process once it starts getting hooked
413 * into system structures and becomes addressable.
415 * We must be sure to acquire p2->p_token as well, we must hold it
416 * once the process is on the allproc list to avoid things such
417 * as competing modifications to p_flags.
419 mycpu->gd_forkid += ncpus;
420 p2->p_forkid = mycpu->gd_forkid + mycpu->gd_cpuid;
421 p2->p_lasttid = -1; /* first tid will be 0 */
422 p2->p_stat = SIDL;
425 * NOTE: Process 0 will not have a reaper, but process 1 (init) and
426 * all other processes always will.
428 if ((reap = p1->p_reaper) != NULL) {
429 reaper_hold(reap);
430 p2->p_reaper = reap;
431 } else {
432 p2->p_reaper = NULL;
435 RB_INIT(&p2->p_lwp_tree);
436 spin_init(&p2->p_spin, "procfork1");
437 lwkt_token_init(&p2->p_token, "proc");
438 lwkt_gettoken(&p2->p_token);
441 * Setup linkage for kernel based threading XXX lwp. Also add the
442 * process to the allproclist.
444 * The process structure is addressable after this point.
446 if (flags & RFTHREAD) {
447 p2->p_peers = p1->p_peers;
448 p1->p_peers = p2;
449 p2->p_leader = p1->p_leader;
450 } else {
451 p2->p_leader = p2;
453 proc_add_allproc(p2);
456 * Initialize the section which is copied verbatim from the parent.
458 bcopy(&p1->p_startcopy, &p2->p_startcopy,
459 ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy));
462 * Duplicate sub-structures as needed. Increase reference counts
463 * on shared objects.
465 * NOTE: because we are now on the allproc list it is possible for
466 * other consumers to gain temporary references to p2
467 * (p2->p_lock can change).
469 if (p1->p_flags & P_PROFIL)
470 startprofclock(p2);
471 p2->p_ucred = crhold(lp1->lwp_thread->td_ucred);
473 if (jailed(p2->p_ucred))
474 p2->p_flags |= P_JAILED;
476 if (p2->p_args)
477 refcount_acquire(&p2->p_args->ar_ref);
479 p2->p_usched = p1->p_usched;
480 /* XXX: verify copy of the secondary iosched stuff */
481 dsched_enter_proc(p2);
483 if (flags & RFSIGSHARE) {
484 p2->p_sigacts = p1->p_sigacts;
485 refcount_acquire(&p2->p_sigacts->ps_refcnt);
486 } else {
487 p2->p_sigacts = kmalloc(sizeof(*p2->p_sigacts),
488 M_SUBPROC, M_WAITOK);
489 bcopy(p1->p_sigacts, p2->p_sigacts, sizeof(*p2->p_sigacts));
490 refcount_init(&p2->p_sigacts->ps_refcnt, 1);
492 if (flags & RFLINUXTHPN)
493 p2->p_sigparent = SIGUSR1;
494 else
495 p2->p_sigparent = SIGCHLD;
497 /* bump references to the text vnode (for procfs) */
498 p2->p_textvp = p1->p_textvp;
499 if (p2->p_textvp)
500 vref(p2->p_textvp);
502 /* copy namecache handle to the text file */
503 if (p1->p_textnch.mount)
504 cache_copy(&p1->p_textnch, &p2->p_textnch);
507 * Handle file descriptors
509 if (flags & RFCFDG) {
510 p2->p_fd = fdinit(p1);
511 fdtol = NULL;
512 } else if (flags & RFFDG) {
513 error = fdcopy(p1, &p2->p_fd);
514 if (error != 0) {
515 error = ENOMEM;
516 goto done;
518 fdtol = NULL;
519 } else {
520 p2->p_fd = fdshare(p1);
521 if (p1->p_fdtol == NULL) {
522 p1->p_fdtol = filedesc_to_leader_alloc(NULL,
523 p1->p_leader);
525 if ((flags & RFTHREAD) != 0) {
527 * Shared file descriptor table and
528 * shared process leaders.
530 fdtol = p1->p_fdtol;
531 fdtol->fdl_refcount++;
532 } else {
534 * Shared file descriptor table, and
535 * different process leaders
537 fdtol = filedesc_to_leader_alloc(p1->p_fdtol, p2);
540 p2->p_fdtol = fdtol;
541 p2->p_limit = plimit_fork(p1);
544 * Preserve some more flags in subprocess. P_PROFIL has already
545 * been preserved.
547 p2->p_flags |= p1->p_flags & P_SUGID;
548 if (p1->p_session->s_ttyvp != NULL && (p1->p_flags & P_CONTROLT))
549 p2->p_flags |= P_CONTROLT;
550 if (flags & RFPPWAIT) {
551 p2->p_flags |= P_PPWAIT;
552 if (p1->p_upmap)
553 atomic_add_int(&p1->p_upmap->invfork, 1);
557 * Inherit the virtual kernel structure (allows a virtual kernel
558 * to fork to simulate multiple cpus).
560 if (p1->p_vkernel)
561 vkernel_inherit(p1, p2);
564 * Once we are on a pglist we may receive signals. XXX we might
565 * race a ^C being sent to the process group by not receiving it
566 * at all prior to this line.
568 pgref(p1grp);
569 lwkt_gettoken(&p1grp->pg_token);
570 LIST_INSERT_AFTER(p1, p2, p_pglist);
571 lwkt_reltoken(&p1grp->pg_token);
574 * Attach the new process to its parent.
576 * If RFNOWAIT is set, the newly created process becomes a child
577 * of the reaper (typically init). This effectively disassociates
578 * the child from the parent.
580 * Temporarily hold pptr for the RFNOWAIT case to avoid ripouts.
582 if (flags & RFNOWAIT) {
583 pptr = reaper_get(reap);
584 if (pptr == NULL) {
585 pptr = initproc;
586 PHOLD(pptr);
588 } else {
589 pptr = p1;
591 p2->p_pptr = pptr;
592 LIST_INIT(&p2->p_children);
594 lwkt_gettoken(&pptr->p_token);
595 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
596 lwkt_reltoken(&pptr->p_token);
598 if (flags & RFNOWAIT)
599 PRELE(pptr);
601 varsymset_init(&p2->p_varsymset, &p1->p_varsymset);
602 callout_init_mp(&p2->p_ithandle);
604 #ifdef KTRACE
606 * Copy traceflag and tracefile if enabled. If not inherited,
607 * these were zeroed above but we still could have a trace race
608 * so make sure p2's p_tracenode is NULL.
610 if ((p1->p_traceflag & KTRFAC_INHERIT) && p2->p_tracenode == NULL) {
611 p2->p_traceflag = p1->p_traceflag;
612 p2->p_tracenode = ktrinherit(p1->p_tracenode);
614 #endif
617 * This begins the section where we must prevent the parent
618 * from being swapped.
620 * Gets PRELE'd in the caller in start_forked_proc().
622 PHOLD(p1);
624 vm_fork(p1, p2, flags);
627 * Create the first lwp associated with the new proc.
628 * It will return via a different execution path later, directly
629 * into userland, after it was put on the runq by
630 * start_forked_proc().
632 lwp_fork(lp1, p2, flags, NULL);
634 if (flags == (RFFDG | RFPROC | RFPGLOCK)) {
635 mycpu->gd_cnt.v_forks++;
636 mycpu->gd_cnt.v_forkpages += p2->p_vmspace->vm_dsize +
637 p2->p_vmspace->vm_ssize;
638 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK)) {
639 mycpu->gd_cnt.v_vforks++;
640 mycpu->gd_cnt.v_vforkpages += p2->p_vmspace->vm_dsize +
641 p2->p_vmspace->vm_ssize;
642 } else if (p1 == &proc0) {
643 mycpu->gd_cnt.v_kthreads++;
644 mycpu->gd_cnt.v_kthreadpages += p2->p_vmspace->vm_dsize +
645 p2->p_vmspace->vm_ssize;
646 } else {
647 mycpu->gd_cnt.v_rforks++;
648 mycpu->gd_cnt.v_rforkpages += p2->p_vmspace->vm_dsize +
649 p2->p_vmspace->vm_ssize;
653 * Both processes are set up, now check if any loadable modules want
654 * to adjust anything.
655 * What if they have an error? XXX
657 TAILQ_FOREACH(ep, &fork_list, next) {
658 (*ep->function)(p1, p2, flags);
662 * Set the start time. Note that the process is not runnable. The
663 * caller is responsible for making it runnable.
665 microtime(&p2->p_start);
666 p2->p_acflag = AFORK;
669 * tell any interested parties about the new process
671 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);
674 * Return child proc pointer to parent.
676 *procp = p2;
677 error = 0;
678 done:
679 if (p2)
680 lwkt_reltoken(&p2->p_token);
681 lwkt_reltoken(&p1->p_token);
682 if (plkgrp) {
683 lockmgr(&plkgrp->pg_lock, LK_RELEASE);
684 pgrel(plkgrp);
686 return (error);
689 static struct lwp *
690 lwp_fork(struct lwp *origlp, struct proc *destproc, int flags,
691 const cpumask_t *mask)
693 globaldata_t gd = mycpu;
694 struct lwp *lp;
695 struct thread *td;
697 lp = kmalloc(sizeof(struct lwp), M_LWP, M_WAITOK|M_ZERO);
699 lp->lwp_proc = destproc;
700 lp->lwp_vmspace = destproc->p_vmspace;
701 lp->lwp_stat = LSRUN;
702 bcopy(&origlp->lwp_startcopy, &lp->lwp_startcopy,
703 (unsigned) ((caddr_t)&lp->lwp_endcopy -
704 (caddr_t)&lp->lwp_startcopy));
705 if (mask != NULL)
706 lp->lwp_cpumask = *mask;
709 * Reset the sigaltstack if memory is shared, otherwise inherit
710 * it.
712 if (flags & RFMEM) {
713 lp->lwp_sigstk.ss_flags = SS_DISABLE;
714 lp->lwp_sigstk.ss_size = 0;
715 lp->lwp_sigstk.ss_sp = NULL;
716 lp->lwp_flags &= ~LWP_ALTSTACK;
717 } else {
718 lp->lwp_flags |= origlp->lwp_flags & LWP_ALTSTACK;
722 * Set cpbase to the last timeout that occured (not the upcoming
723 * timeout).
725 * A critical section is required since a timer IPI can update
726 * scheduler specific data.
728 crit_enter();
729 lp->lwp_cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic;
730 destproc->p_usched->heuristic_forking(origlp, lp);
731 crit_exit();
732 CPUMASK_ANDMASK(lp->lwp_cpumask, usched_mastermask);
733 lwkt_token_init(&lp->lwp_token, "lwp_token");
734 spin_init(&lp->lwp_spin, "lwptoken");
737 * Assign the thread to the current cpu to begin with so we
738 * can manipulate it.
740 td = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, gd->gd_cpuid, 0);
741 lp->lwp_thread = td;
742 td->td_ucred = crhold(destproc->p_ucred);
743 td->td_proc = destproc;
744 td->td_lwp = lp;
745 td->td_switch = cpu_heavy_switch;
746 #ifdef NO_LWKT_SPLIT_USERPRI
747 lwkt_setpri(td, TDPRI_USER_NORM);
748 #else
749 lwkt_setpri(td, TDPRI_KERN_USER);
750 #endif
751 lwkt_set_comm(td, "%s", destproc->p_comm);
754 * cpu_fork will copy and update the pcb, set up the kernel stack,
755 * and make the child ready to run.
757 cpu_fork(origlp, lp, flags);
758 kqueue_init(&lp->lwp_kqueue, destproc->p_fd);
761 * Assign a TID to the lp. Loop until the insert succeeds (returns
762 * NULL).
764 * If we are in a vfork assign the same TID as the lwp that did the
765 * vfork(). This way if the user program messes around with
766 * pthread calls inside the vfork(), it will operate like an
767 * extension of the (blocked) parent. Also note that since the
768 * address space is being shared, insofar as pthreads is concerned,
769 * the code running in the vfork() is part of the original process.
771 if (flags & RFPPWAIT) {
772 lp->lwp_tid = origlp->lwp_tid - 1;
773 } else {
774 lp->lwp_tid = destproc->p_lasttid;
777 do {
778 if (++lp->lwp_tid < 0)
779 lp->lwp_tid = 1;
780 } while (lwp_rb_tree_RB_INSERT(&destproc->p_lwp_tree, lp) != NULL);
782 destproc->p_lasttid = lp->lwp_tid;
783 destproc->p_nthreads++;
786 * This flag is set and never cleared. It means that the process
787 * was threaded at some point. Used to improve exit performance.
789 destproc->p_flags |= P_MAYBETHREADED;
791 return (lp);
795 * The next two functionms are general routines to handle adding/deleting
796 * items on the fork callout list.
798 * at_fork():
799 * Take the arguments given and put them onto the fork callout list,
800 * However first make sure that it's not already there.
801 * Returns 0 on success or a standard error number.
804 at_fork(forklist_fn function)
806 struct forklist *ep;
808 #ifdef INVARIANTS
809 /* let the programmer know if he's been stupid */
810 if (rm_at_fork(function)) {
811 kprintf("WARNING: fork callout entry (%p) already present\n",
812 function);
814 #endif
815 ep = kmalloc(sizeof(*ep), M_ATFORK, M_WAITOK|M_ZERO);
816 ep->function = function;
817 TAILQ_INSERT_TAIL(&fork_list, ep, next);
818 return (0);
822 * Scan the exit callout list for the given item and remove it..
823 * Returns the number of items removed (0 or 1)
826 rm_at_fork(forklist_fn function)
828 struct forklist *ep;
830 TAILQ_FOREACH(ep, &fork_list, next) {
831 if (ep->function == function) {
832 TAILQ_REMOVE(&fork_list, ep, next);
833 kfree(ep, M_ATFORK);
834 return(1);
837 return (0);
841 * Add a forked process to the run queue after any remaining setup, such
842 * as setting the fork handler, has been completed.
844 * p2 is held by the caller.
846 void
847 start_forked_proc(struct lwp *lp1, struct proc *p2)
849 struct lwp *lp2 = ONLY_LWP_IN_PROC(p2);
850 int pflags;
853 * Move from SIDL to RUN queue, and activate the process's thread.
854 * Activation of the thread effectively makes the process "a"
855 * current process, so we do not setrunqueue().
857 * YYY setrunqueue works here but we should clean up the trampoline
858 * code so we just schedule the LWKT thread and let the trampoline
859 * deal with the userland scheduler on return to userland.
861 KASSERT(p2->p_stat == SIDL,
862 ("cannot start forked process, bad status: %p", p2));
863 p2->p_usched->resetpriority(lp2);
864 crit_enter();
865 p2->p_stat = SACTIVE;
866 lp2->lwp_stat = LSRUN;
867 p2->p_usched->setrunqueue(lp2);
868 crit_exit();
871 * Now can be swapped.
873 PRELE(lp1->lwp_proc);
876 * Preserve synchronization semantics of vfork. P_PPWAIT is set in
877 * the child until it has retired the parent's resources. The parent
878 * must wait for the flag to be cleared by the child.
880 * Interlock the flag/tsleep with atomic ops to avoid unnecessary
881 * p_token conflicts.
883 * XXX Is this use of an atomic op on a field that is not normally
884 * manipulated with atomic ops ok?
886 while ((pflags = p2->p_flags) & P_PPWAIT) {
887 cpu_ccfence();
888 tsleep_interlock(lp1->lwp_proc, 0);
889 if (atomic_cmpset_int(&p2->p_flags, pflags, pflags))
890 tsleep(lp1->lwp_proc, PINTERLOCKED, "ppwait", 0);
895 * procctl (idtype_t idtype, id_t id, int cmd, void *arg)
898 sys_procctl(struct procctl_args *uap)
900 struct proc *p = curproc;
901 struct proc *p2;
902 struct sysreaper *reap;
903 union reaper_info udata;
904 int error;
906 if (uap->idtype != P_PID || uap->id != (id_t)p->p_pid)
907 return EINVAL;
909 switch(uap->cmd) {
910 case PROC_REAP_ACQUIRE:
911 lwkt_gettoken(&p->p_token);
912 reap = kmalloc(sizeof(*reap), M_REAPER, M_WAITOK|M_ZERO);
913 if (p->p_reaper == NULL || p->p_reaper->p != p) {
914 reaper_init(p, reap);
915 error = 0;
916 } else {
917 kfree(reap, M_REAPER);
918 error = EALREADY;
920 lwkt_reltoken(&p->p_token);
921 break;
922 case PROC_REAP_RELEASE:
923 lwkt_gettoken(&p->p_token);
924 release_again:
925 reap = p->p_reaper;
926 KKASSERT(reap != NULL);
927 if (reap->p == p) {
928 reaper_hold(reap); /* in case of thread race */
929 lockmgr(&reap->lock, LK_EXCLUSIVE);
930 if (reap->p != p) {
931 lockmgr(&reap->lock, LK_RELEASE);
932 reaper_drop(reap);
933 goto release_again;
935 reap->p = NULL;
936 p->p_reaper = reap->parent;
937 if (p->p_reaper)
938 reaper_hold(p->p_reaper);
939 lockmgr(&reap->lock, LK_RELEASE);
940 reaper_drop(reap); /* our ref */
941 reaper_drop(reap); /* old p_reaper ref */
942 error = 0;
943 } else {
944 error = ENOTCONN;
946 lwkt_reltoken(&p->p_token);
947 break;
948 case PROC_REAP_STATUS:
949 bzero(&udata, sizeof(udata));
950 lwkt_gettoken_shared(&p->p_token);
951 if ((reap = p->p_reaper) != NULL && reap->p == p) {
952 udata.status.flags = reap->flags;
953 udata.status.refs = reap->refs - 1; /* minus ours */
955 p2 = LIST_FIRST(&p->p_children);
956 udata.status.pid_head = p2 ? p2->p_pid : -1;
957 lwkt_reltoken(&p->p_token);
959 if (uap->data) {
960 error = copyout(&udata, uap->data,
961 sizeof(udata.status));
962 } else {
963 error = 0;
965 break;
966 default:
967 error = EINVAL;
968 break;
970 return error;
974 * Bump ref on reaper, preventing destruction
976 void
977 reaper_hold(struct sysreaper *reap)
979 KKASSERT(reap->refs > 0);
980 refcount_acquire(&reap->refs);
984 * Drop ref on reaper, destroy the structure on the 1->0
985 * transition and loop on the parent.
987 void
988 reaper_drop(struct sysreaper *next)
990 struct sysreaper *reap;
992 while ((reap = next) != NULL) {
993 if (refcount_release(&reap->refs)) {
994 next = reap->parent;
995 KKASSERT(reap->p == NULL);
996 reap->parent = NULL;
997 kfree(reap, M_REAPER);
998 } else {
999 next = NULL;
1005 * Initialize a static or newly allocated reaper structure
1007 void
1008 reaper_init(struct proc *p, struct sysreaper *reap)
1010 reap->parent = p->p_reaper;
1011 reap->p = p;
1012 if (p == initproc) {
1013 reap->flags = REAPER_STAT_OWNED | REAPER_STAT_REALINIT;
1014 reap->refs = 2;
1015 } else {
1016 reap->flags = REAPER_STAT_OWNED;
1017 reap->refs = 1;
1019 lockinit(&reap->lock, "subrp", 0, 0);
1020 cpu_sfence();
1021 p->p_reaper = reap;
1025 * Called with p->p_token held during exit.
1027 * This is a bit simpler than RELEASE because there are no threads remaining
1028 * to race. We only release if we own the reaper, the exit code will handle
1029 * the final p_reaper release.
1031 struct sysreaper *
1032 reaper_exit(struct proc *p)
1034 struct sysreaper *reap;
1037 * Release acquired reaper
1039 if ((reap = p->p_reaper) != NULL && reap->p == p) {
1040 lockmgr(&reap->lock, LK_EXCLUSIVE);
1041 p->p_reaper = reap->parent;
1042 if (p->p_reaper)
1043 reaper_hold(p->p_reaper);
1044 reap->p = NULL;
1045 lockmgr(&reap->lock, LK_RELEASE);
1046 reaper_drop(reap);
1050 * Return and clear reaper (caller is holding p_token for us)
1051 * (reap->p does not equal p). Caller must drop it.
1053 if ((reap = p->p_reaper) != NULL) {
1054 p->p_reaper = NULL;
1056 return reap;
1060 * Return a held (PHOLD) process representing the reaper for process (p).
1061 * NULL should not normally be returned. Caller should PRELE() the returned
1062 * reaper process when finished.
1064 * Remove dead internal nodes while we are at it.
1066 * Process (p)'s token must be held on call.
1067 * The returned process's token is NOT acquired by this routine.
1069 struct proc *
1070 reaper_get(struct sysreaper *reap)
1072 struct sysreaper *next;
1073 struct proc *reproc;
1075 if (reap == NULL)
1076 return NULL;
1079 * Extra hold for loop
1081 reaper_hold(reap);
1083 while (reap) {
1084 lockmgr(&reap->lock, LK_SHARED);
1085 if (reap->p) {
1087 * Probable reaper
1089 if (reap->p) {
1090 reproc = reap->p;
1091 PHOLD(reproc);
1092 lockmgr(&reap->lock, LK_RELEASE);
1093 reaper_drop(reap);
1094 return reproc;
1098 * Raced, try again
1100 lockmgr(&reap->lock, LK_RELEASE);
1101 continue;
1105 * Traverse upwards in the reaper topology, destroy
1106 * dead internal nodes when possible.
1108 * NOTE: Our ref on next means that a dead node should
1109 * have 2 (ours and reap->parent's).
1111 next = reap->parent;
1112 while (next) {
1113 reaper_hold(next);
1114 if (next->refs == 2 && next->p == NULL) {
1115 lockmgr(&reap->lock, LK_RELEASE);
1116 lockmgr(&reap->lock, LK_EXCLUSIVE);
1117 if (next->refs == 2 &&
1118 reap->parent == next &&
1119 next->p == NULL) {
1121 * reap->parent inherits ref from next.
1123 reap->parent = next->parent;
1124 next->parent = NULL;
1125 reaper_drop(next); /* ours */
1126 reaper_drop(next); /* old parent */
1127 next = reap->parent;
1128 continue; /* possible chain */
1131 break;
1133 lockmgr(&reap->lock, LK_RELEASE);
1134 reaper_drop(reap);
1135 reap = next;
1137 return NULL;