Merge branch 'vendor/OPENSSL'
[dragonfly.git] / sys / kern / kern_proc.c
blobd2004e47c98b51eac7f6403767c982bdc501ed92
1 /*
2 * (MPSAFE)
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
35 * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95
36 * $FreeBSD: src/sys/kern/kern_proc.c,v 1.63.2.9 2003/05/08 07:47:16 kbyanc Exp $
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/sysctl.h>
43 #include <sys/malloc.h>
44 #include <sys/proc.h>
45 #include <sys/vnode.h>
46 #include <sys/jail.h>
47 #include <sys/filedesc.h>
48 #include <sys/tty.h>
49 #include <sys/dsched.h>
50 #include <sys/signalvar.h>
51 #include <sys/spinlock.h>
52 #include <vm/vm.h>
53 #include <sys/lock.h>
54 #include <vm/pmap.h>
55 #include <vm/vm_map.h>
56 #include <sys/user.h>
57 #include <machine/smp.h>
59 #include <sys/refcount.h>
60 #include <sys/spinlock2.h>
61 #include <sys/mplock2.h>
63 static MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
64 MALLOC_DEFINE(M_SESSION, "session", "session header");
65 MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
66 MALLOC_DEFINE(M_LWP, "lwp", "lwp structures");
67 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
69 int ps_showallprocs = 1;
70 static int ps_showallthreads = 1;
71 SYSCTL_INT(_security, OID_AUTO, ps_showallprocs, CTLFLAG_RW,
72 &ps_showallprocs, 0,
73 "Unprivileged processes can see processes with different UID/GID");
74 SYSCTL_INT(_security, OID_AUTO, ps_showallthreads, CTLFLAG_RW,
75 &ps_showallthreads, 0,
76 "Unprivileged processes can see kernel threads");
78 static void pgdelete(struct pgrp *);
79 static void orphanpg(struct pgrp *pg);
80 static pid_t proc_getnewpid_locked(int random_offset);
83 * Other process lists
85 struct pidhashhead *pidhashtbl;
86 u_long pidhash;
87 struct pgrphashhead *pgrphashtbl;
88 u_long pgrphash;
89 struct proclist allproc;
90 struct proclist zombproc;
93 * Random component to nextpid generation. We mix in a random factor to make
94 * it a little harder to predict. We sanity check the modulus value to avoid
95 * doing it in critical paths. Don't let it be too small or we pointlessly
96 * waste randomness entropy, and don't let it be impossibly large. Using a
97 * modulus that is too big causes a LOT more process table scans and slows
98 * down fork processing as the pidchecked caching is defeated.
100 static int randompid = 0;
103 * No requirements.
105 static int
106 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
108 int error, pid;
110 pid = randompid;
111 error = sysctl_handle_int(oidp, &pid, 0, req);
112 if (error || !req->newptr)
113 return (error);
114 if (pid < 0 || pid > PID_MAX - 100) /* out of range */
115 pid = PID_MAX - 100;
116 else if (pid < 2) /* NOP */
117 pid = 0;
118 else if (pid < 100) /* Make it reasonable */
119 pid = 100;
120 randompid = pid;
121 return (error);
124 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
125 0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
128 * Initialize global process hashing structures.
130 * Called from the low level boot code only.
132 void
133 procinit(void)
135 LIST_INIT(&allproc);
136 LIST_INIT(&zombproc);
137 lwkt_init();
138 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
139 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
140 uihashinit();
144 * Process hold/release support functions. These functions must be MPSAFE.
145 * Called via the PHOLD(), PRELE(), and PSTALL() macros.
147 * p->p_lock is a simple hold count with a waiting interlock. No wakeup()
148 * is issued unless someone is actually waiting for the process.
150 * Most holds are short-term, allowing a process scan or other similar
151 * operation to access a proc structure without it getting ripped out from
152 * under us. procfs and process-list sysctl ops also use the hold function
153 * interlocked with various p_flags to keep the vmspace intact when reading
154 * or writing a user process's address space.
156 * There are two situations where a hold count can be longer. Exiting lwps
157 * hold the process until the lwp is reaped, and the parent will hold the
158 * child during vfork()/exec() sequences while the child is marked P_PPWAIT.
160 * The kernel waits for the hold count to drop to 0 (or 1 in some cases) at
161 * various critical points in the fork/exec and exit paths before proceeding.
163 #define PLOCK_ZOMB 0x20000000
164 #define PLOCK_WAITING 0x40000000
165 #define PLOCK_MASK 0x1FFFFFFF
167 void
168 pstall(struct proc *p, const char *wmesg, int count)
170 int o;
171 int n;
173 for (;;) {
174 o = p->p_lock;
175 cpu_ccfence();
176 if ((o & PLOCK_MASK) <= count)
177 break;
178 n = o | PLOCK_WAITING;
179 tsleep_interlock(&p->p_lock, 0);
182 * If someone is trying to single-step the process during
183 * an exec or an exit they can deadlock us because procfs
184 * sleeps with the process held.
186 if (p->p_stops) {
187 if (p->p_flags & P_INEXEC) {
188 wakeup(&p->p_stype);
189 } else if (p->p_flags & P_POSTEXIT) {
190 spin_lock(&p->p_spin);
191 p->p_stops = 0;
192 p->p_step = 0;
193 spin_unlock(&p->p_spin);
194 wakeup(&p->p_stype);
198 if (atomic_cmpset_int(&p->p_lock, o, n)) {
199 tsleep(&p->p_lock, PINTERLOCKED, wmesg, 0);
204 void
205 phold(struct proc *p)
207 atomic_add_int(&p->p_lock, 1);
211 * WARNING! On last release (p) can become instantly invalid due to
212 * MP races.
214 void
215 prele(struct proc *p)
217 int o;
218 int n;
221 * Fast path
223 if (atomic_cmpset_int(&p->p_lock, 1, 0))
224 return;
227 * Slow path
229 for (;;) {
230 o = p->p_lock;
231 KKASSERT((o & PLOCK_MASK) > 0);
232 cpu_ccfence();
233 n = (o - 1) & ~PLOCK_WAITING;
234 if (atomic_cmpset_int(&p->p_lock, o, n)) {
235 if (o & PLOCK_WAITING)
236 wakeup(&p->p_lock);
237 break;
243 * Hold and flag serialized for zombie reaping purposes.
245 * This function will fail if it has to block, returning non-zero with
246 * neither the flag set or the hold count bumped. Note that we must block
247 * without holding a ref, meaning that the caller must ensure that (p)
248 * remains valid through some other interlock (typically on its parent
249 * process's p_token).
251 * Zero is returned on success. The hold count will be incremented and
252 * the serialization flag acquired. Note that serialization is only against
253 * other pholdzomb() calls, not against phold() calls.
256 pholdzomb(struct proc *p)
258 int o;
259 int n;
262 * Fast path
264 if (atomic_cmpset_int(&p->p_lock, 0, PLOCK_ZOMB | 1))
265 return(0);
268 * Slow path
270 for (;;) {
271 o = p->p_lock;
272 cpu_ccfence();
273 if ((o & PLOCK_ZOMB) == 0) {
274 n = (o + 1) | PLOCK_ZOMB;
275 if (atomic_cmpset_int(&p->p_lock, o, n))
276 return(0);
277 } else {
278 KKASSERT((o & PLOCK_MASK) > 0);
279 n = o | PLOCK_WAITING;
280 tsleep_interlock(&p->p_lock, 0);
281 if (atomic_cmpset_int(&p->p_lock, o, n)) {
282 tsleep(&p->p_lock, PINTERLOCKED, "phldz", 0);
283 /* (p) can be ripped out at this point */
284 return(1);
291 * Release PLOCK_ZOMB and the hold count, waking up any waiters.
293 * WARNING! On last release (p) can become instantly invalid due to
294 * MP races.
296 void
297 prelezomb(struct proc *p)
299 int o;
300 int n;
303 * Fast path
305 if (atomic_cmpset_int(&p->p_lock, PLOCK_ZOMB | 1, 0))
306 return;
309 * Slow path
311 KKASSERT(p->p_lock & PLOCK_ZOMB);
312 for (;;) {
313 o = p->p_lock;
314 KKASSERT((o & PLOCK_MASK) > 0);
315 cpu_ccfence();
316 n = (o - 1) & ~(PLOCK_ZOMB | PLOCK_WAITING);
317 if (atomic_cmpset_int(&p->p_lock, o, n)) {
318 if (o & PLOCK_WAITING)
319 wakeup(&p->p_lock);
320 break;
326 * Is p an inferior of the current process?
328 * No requirements.
329 * The caller must hold proc_token if the caller wishes a stable result.
332 inferior(struct proc *p)
334 lwkt_gettoken(&proc_token);
335 while (p != curproc) {
336 if (p->p_pid == 0) {
337 lwkt_reltoken(&proc_token);
338 return (0);
340 p = p->p_pptr;
342 lwkt_reltoken(&proc_token);
343 return (1);
347 * Locate a process by number. The returned process will be referenced and
348 * must be released with PRELE().
350 * No requirements.
352 struct proc *
353 pfind(pid_t pid)
355 struct proc *p;
357 lwkt_gettoken(&proc_token);
358 LIST_FOREACH(p, PIDHASH(pid), p_hash) {
359 if (p->p_pid == pid) {
360 PHOLD(p);
361 lwkt_reltoken(&proc_token);
362 return (p);
365 lwkt_reltoken(&proc_token);
366 return (NULL);
370 * Locate a process by number. The returned process is NOT referenced.
371 * The caller should hold proc_token if the caller wishes a stable result.
373 * No requirements.
375 struct proc *
376 pfindn(pid_t pid)
378 struct proc *p;
380 lwkt_gettoken(&proc_token);
381 LIST_FOREACH(p, PIDHASH(pid), p_hash) {
382 if (p->p_pid == pid) {
383 lwkt_reltoken(&proc_token);
384 return (p);
387 lwkt_reltoken(&proc_token);
388 return (NULL);
391 void
392 pgref(struct pgrp *pgrp)
394 refcount_acquire(&pgrp->pg_refs);
397 void
398 pgrel(struct pgrp *pgrp)
400 if (refcount_release(&pgrp->pg_refs))
401 pgdelete(pgrp);
405 * Locate a process group by number. The returned process group will be
406 * referenced w/pgref() and must be released with pgrel() (or assigned
407 * somewhere if you wish to keep the reference).
409 * No requirements.
411 struct pgrp *
412 pgfind(pid_t pgid)
414 struct pgrp *pgrp;
416 lwkt_gettoken(&proc_token);
417 LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) {
418 if (pgrp->pg_id == pgid) {
419 refcount_acquire(&pgrp->pg_refs);
420 lwkt_reltoken(&proc_token);
421 return (pgrp);
424 lwkt_reltoken(&proc_token);
425 return (NULL);
429 * Move p to a new or existing process group (and session)
431 * No requirements.
434 enterpgrp(struct proc *p, pid_t pgid, int mksess)
436 struct pgrp *pgrp;
437 struct pgrp *opgrp;
438 int error;
440 pgrp = pgfind(pgid);
442 KASSERT(pgrp == NULL || !mksess,
443 ("enterpgrp: setsid into non-empty pgrp"));
444 KASSERT(!SESS_LEADER(p),
445 ("enterpgrp: session leader attempted setpgrp"));
447 if (pgrp == NULL) {
448 pid_t savepid = p->p_pid;
449 struct proc *np;
451 * new process group
453 KASSERT(p->p_pid == pgid,
454 ("enterpgrp: new pgrp and pid != pgid"));
455 if ((np = pfindn(savepid)) == NULL || np != p) {
456 error = ESRCH;
457 goto fatal;
459 pgrp = kmalloc(sizeof(struct pgrp), M_PGRP, M_WAITOK);
460 if (mksess) {
461 struct session *sess;
464 * new session
466 sess = kmalloc(sizeof(struct session), M_SESSION,
467 M_WAITOK);
468 sess->s_leader = p;
469 sess->s_sid = p->p_pid;
470 sess->s_count = 1;
471 sess->s_ttyvp = NULL;
472 sess->s_ttyp = NULL;
473 bcopy(p->p_session->s_login, sess->s_login,
474 sizeof(sess->s_login));
475 pgrp->pg_session = sess;
476 KASSERT(p == curproc,
477 ("enterpgrp: mksession and p != curproc"));
478 lwkt_gettoken(&p->p_token);
479 p->p_flags &= ~P_CONTROLT;
480 lwkt_reltoken(&p->p_token);
481 } else {
482 pgrp->pg_session = p->p_session;
483 sess_hold(pgrp->pg_session);
485 pgrp->pg_id = pgid;
486 LIST_INIT(&pgrp->pg_members);
487 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
488 pgrp->pg_jobc = 0;
489 SLIST_INIT(&pgrp->pg_sigiolst);
490 lwkt_token_init(&pgrp->pg_token, "pgrp_token");
491 refcount_init(&pgrp->pg_refs, 1);
492 lockinit(&pgrp->pg_lock, "pgwt", 0, 0);
493 } else if (pgrp == p->p_pgrp) {
494 pgrel(pgrp);
495 goto done;
496 } /* else pgfind() referenced the pgrp */
499 * Adjust eligibility of affected pgrps to participate in job control.
500 * Increment eligibility counts before decrementing, otherwise we
501 * could reach 0 spuriously during the first call.
503 lwkt_gettoken(&pgrp->pg_token);
504 lwkt_gettoken(&p->p_token);
505 fixjobc(p, pgrp, 1);
506 fixjobc(p, p->p_pgrp, 0);
507 while ((opgrp = p->p_pgrp) != NULL) {
508 opgrp = p->p_pgrp;
509 lwkt_gettoken(&opgrp->pg_token);
510 LIST_REMOVE(p, p_pglist);
511 p->p_pgrp = NULL;
512 lwkt_reltoken(&opgrp->pg_token);
513 pgrel(opgrp);
515 p->p_pgrp = pgrp;
516 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
517 lwkt_reltoken(&p->p_token);
518 lwkt_reltoken(&pgrp->pg_token);
519 done:
520 error = 0;
521 fatal:
522 return (error);
526 * Remove process from process group
528 * No requirements.
531 leavepgrp(struct proc *p)
533 struct pgrp *pg = p->p_pgrp;
535 lwkt_gettoken(&p->p_token);
536 pg = p->p_pgrp;
537 if (pg) {
538 pgref(pg);
539 lwkt_gettoken(&pg->pg_token);
540 if (p->p_pgrp == pg) {
541 p->p_pgrp = NULL;
542 LIST_REMOVE(p, p_pglist);
543 pgrel(pg);
545 lwkt_reltoken(&pg->pg_token);
546 lwkt_reltoken(&p->p_token); /* avoid chaining on rel */
547 pgrel(pg);
548 } else {
549 lwkt_reltoken(&p->p_token);
551 return (0);
555 * Delete a process group. Must be called only after the last ref has been
556 * released.
558 static void
559 pgdelete(struct pgrp *pgrp)
562 * Reset any sigio structures pointing to us as a result of
563 * F_SETOWN with our pgid.
565 funsetownlst(&pgrp->pg_sigiolst);
567 if (pgrp->pg_session->s_ttyp != NULL &&
568 pgrp->pg_session->s_ttyp->t_pgrp == pgrp)
569 pgrp->pg_session->s_ttyp->t_pgrp = NULL;
570 LIST_REMOVE(pgrp, pg_hash);
571 sess_rele(pgrp->pg_session);
572 kfree(pgrp, M_PGRP);
576 * Adjust the ref count on a session structure. When the ref count falls to
577 * zero the tty is disassociated from the session and the session structure
578 * is freed. Note that tty assocation is not itself ref-counted.
580 * No requirements.
582 void
583 sess_hold(struct session *sp)
585 lwkt_gettoken(&tty_token);
586 ++sp->s_count;
587 lwkt_reltoken(&tty_token);
591 * No requirements.
593 void
594 sess_rele(struct session *sp)
596 struct tty *tp;
598 KKASSERT(sp->s_count > 0);
599 lwkt_gettoken(&tty_token);
600 if (--sp->s_count == 0) {
601 if (sp->s_ttyp && sp->s_ttyp->t_session) {
602 #ifdef TTY_DO_FULL_CLOSE
603 /* FULL CLOSE, see ttyclearsession() */
604 KKASSERT(sp->s_ttyp->t_session == sp);
605 sp->s_ttyp->t_session = NULL;
606 #else
607 /* HALF CLOSE, see ttyclearsession() */
608 if (sp->s_ttyp->t_session == sp)
609 sp->s_ttyp->t_session = NULL;
610 #endif
612 if ((tp = sp->s_ttyp) != NULL) {
613 sp->s_ttyp = NULL;
614 ttyunhold(tp);
616 kfree(sp, M_SESSION);
618 lwkt_reltoken(&tty_token);
622 * Adjust pgrp jobc counters when specified process changes process group.
623 * We count the number of processes in each process group that "qualify"
624 * the group for terminal job control (those with a parent in a different
625 * process group of the same session). If that count reaches zero, the
626 * process group becomes orphaned. Check both the specified process'
627 * process group and that of its children.
628 * entering == 0 => p is leaving specified group.
629 * entering == 1 => p is entering specified group.
631 * No requirements.
633 void
634 fixjobc(struct proc *p, struct pgrp *pgrp, int entering)
636 struct pgrp *hispgrp;
637 struct session *mysession;
638 struct proc *np;
641 * Check p's parent to see whether p qualifies its own process
642 * group; if so, adjust count for p's process group.
644 lwkt_gettoken(&p->p_token); /* p_children scan */
645 lwkt_gettoken(&pgrp->pg_token);
647 mysession = pgrp->pg_session;
648 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
649 hispgrp->pg_session == mysession) {
650 if (entering)
651 pgrp->pg_jobc++;
652 else if (--pgrp->pg_jobc == 0)
653 orphanpg(pgrp);
657 * Check this process' children to see whether they qualify
658 * their process groups; if so, adjust counts for children's
659 * process groups.
661 LIST_FOREACH(np, &p->p_children, p_sibling) {
662 PHOLD(np);
663 lwkt_gettoken(&np->p_token);
664 if ((hispgrp = np->p_pgrp) != pgrp &&
665 hispgrp->pg_session == mysession &&
666 np->p_stat != SZOMB) {
667 pgref(hispgrp);
668 lwkt_gettoken(&hispgrp->pg_token);
669 if (entering)
670 hispgrp->pg_jobc++;
671 else if (--hispgrp->pg_jobc == 0)
672 orphanpg(hispgrp);
673 lwkt_reltoken(&hispgrp->pg_token);
674 pgrel(hispgrp);
676 lwkt_reltoken(&np->p_token);
677 PRELE(np);
679 KKASSERT(pgrp->pg_refs > 0);
680 lwkt_reltoken(&pgrp->pg_token);
681 lwkt_reltoken(&p->p_token);
685 * A process group has become orphaned;
686 * if there are any stopped processes in the group,
687 * hang-up all process in that group.
689 * The caller must hold pg_token.
691 static void
692 orphanpg(struct pgrp *pg)
694 struct proc *p;
696 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
697 if (p->p_stat == SSTOP) {
698 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
699 ksignal(p, SIGHUP);
700 ksignal(p, SIGCONT);
702 return;
708 * Add a new process to the allproc list and the PID hash. This
709 * also assigns a pid to the new process.
711 * No requirements.
713 void
714 proc_add_allproc(struct proc *p)
716 int random_offset;
718 if ((random_offset = randompid) != 0) {
719 get_mplock();
720 random_offset = karc4random() % random_offset;
721 rel_mplock();
724 lwkt_gettoken(&proc_token);
725 p->p_pid = proc_getnewpid_locked(random_offset);
726 LIST_INSERT_HEAD(&allproc, p, p_list);
727 LIST_INSERT_HEAD(PIDHASH(p->p_pid), p, p_hash);
728 lwkt_reltoken(&proc_token);
732 * Calculate a new process pid. This function is integrated into
733 * proc_add_allproc() to guarentee that the new pid is not reused before
734 * the new process can be added to the allproc list.
736 * The caller must hold proc_token.
738 static
739 pid_t
740 proc_getnewpid_locked(int random_offset)
742 static pid_t nextpid;
743 static pid_t pidchecked;
744 struct proc *p;
747 * Find an unused process ID. We remember a range of unused IDs
748 * ready to use (from nextpid+1 through pidchecked-1).
750 nextpid = nextpid + 1 + random_offset;
751 retry:
753 * If the process ID prototype has wrapped around,
754 * restart somewhat above 0, as the low-numbered procs
755 * tend to include daemons that don't exit.
757 if (nextpid >= PID_MAX) {
758 nextpid = nextpid % PID_MAX;
759 if (nextpid < 100)
760 nextpid += 100;
761 pidchecked = 0;
763 if (nextpid >= pidchecked) {
764 int doingzomb = 0;
766 pidchecked = PID_MAX;
769 * Scan the active and zombie procs to check whether this pid
770 * is in use. Remember the lowest pid that's greater
771 * than nextpid, so we can avoid checking for a while.
773 * NOTE: Processes in the midst of being forked may not
774 * yet have p_pgrp and p_pgrp->pg_session set up
775 * yet, so we have to check for NULL.
777 * Processes being torn down should be interlocked
778 * with proc_token prior to the clearing of their
779 * p_pgrp.
781 p = LIST_FIRST(&allproc);
782 again:
783 for (; p != NULL; p = LIST_NEXT(p, p_list)) {
784 while (p->p_pid == nextpid ||
785 (p->p_pgrp && p->p_pgrp->pg_id == nextpid) ||
786 (p->p_pgrp && p->p_session &&
787 p->p_session->s_sid == nextpid)) {
788 nextpid++;
789 if (nextpid >= pidchecked)
790 goto retry;
792 if (p->p_pid > nextpid && pidchecked > p->p_pid)
793 pidchecked = p->p_pid;
794 if (p->p_pgrp &&
795 p->p_pgrp->pg_id > nextpid &&
796 pidchecked > p->p_pgrp->pg_id) {
797 pidchecked = p->p_pgrp->pg_id;
799 if (p->p_pgrp && p->p_session &&
800 p->p_session->s_sid > nextpid &&
801 pidchecked > p->p_session->s_sid) {
802 pidchecked = p->p_session->s_sid;
805 if (!doingzomb) {
806 doingzomb = 1;
807 p = LIST_FIRST(&zombproc);
808 goto again;
811 return(nextpid);
815 * Called from exit1 to remove a process from the allproc
816 * list and move it to the zombie list.
818 * Caller must hold p->p_token. We are required to wait until p_lock
819 * becomes zero before we can manipulate the list, allowing allproc
820 * scans to guarantee consistency during a list scan.
822 void
823 proc_move_allproc_zombie(struct proc *p)
825 lwkt_gettoken(&proc_token);
826 PSTALL(p, "reap1", 0);
827 LIST_REMOVE(p, p_list);
828 LIST_INSERT_HEAD(&zombproc, p, p_list);
829 LIST_REMOVE(p, p_hash);
830 p->p_stat = SZOMB;
831 lwkt_reltoken(&proc_token);
832 dsched_exit_proc(p);
836 * This routine is called from kern_wait() and will remove the process
837 * from the zombie list and the sibling list. This routine will block
838 * if someone has a lock on the proces (p_lock).
840 * Caller must hold p->p_token. We are required to wait until p_lock
841 * becomes zero before we can manipulate the list, allowing allproc
842 * scans to guarantee consistency during a list scan.
844 void
845 proc_remove_zombie(struct proc *p)
847 lwkt_gettoken(&proc_token);
848 PSTALL(p, "reap2", 0);
849 LIST_REMOVE(p, p_list); /* off zombproc */
850 LIST_REMOVE(p, p_sibling);
851 p->p_pptr = NULL;
852 lwkt_reltoken(&proc_token);
856 * Handle various requirements prior to returning to usermode. Called from
857 * platform trap and system call code.
859 void
860 lwpuserret(struct lwp *lp)
862 struct proc *p = lp->lwp_proc;
864 if (lp->lwp_mpflags & LWP_MP_VNLRU) {
865 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU);
866 allocvnode_gc();
868 if (lp->lwp_mpflags & LWP_MP_WEXIT) {
869 lwkt_gettoken(&p->p_token);
870 lwp_exit(0);
871 lwkt_reltoken(&p->p_token); /* NOT REACHED */
876 * Kernel threads run from user processes can also accumulate deferred
877 * actions which need to be acted upon. Callers include:
879 * nfsd - Can allocate lots of vnodes
881 void
882 lwpkthreaddeferred(void)
884 struct lwp *lp = curthread->td_lwp;
886 if (lp) {
887 if (lp->lwp_mpflags & LWP_MP_VNLRU) {
888 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU);
889 allocvnode_gc();
895 * Scan all processes on the allproc list. The process is automatically
896 * held for the callback. A return value of -1 terminates the loop.
898 * The callback is made with the process held and proc_token held.
900 * We limit the scan to the number of processes as-of the start of
901 * the scan so as not to get caught up in an endless loop if new processes
902 * are created more quickly than we can scan the old ones. Add a little
903 * slop to try to catch edge cases since nprocs can race.
905 * No requirements.
907 void
908 allproc_scan(int (*callback)(struct proc *, void *), void *data)
910 struct proc *p;
911 int r;
912 int limit = nprocs + ncpus;
915 * proc_token protects the allproc list and PHOLD() prevents the
916 * process from being removed from the allproc list or the zombproc
917 * list.
919 lwkt_gettoken(&proc_token);
920 LIST_FOREACH(p, &allproc, p_list) {
921 PHOLD(p);
922 r = callback(p, data);
923 PRELE(p);
924 if (r < 0)
925 break;
926 if (--limit < 0)
927 break;
929 lwkt_reltoken(&proc_token);
933 * Scan all lwps of processes on the allproc list. The lwp is automatically
934 * held for the callback. A return value of -1 terminates the loop.
936 * The callback is made with the proces and lwp both held, and proc_token held.
938 * No requirements.
940 void
941 alllwp_scan(int (*callback)(struct lwp *, void *), void *data)
943 struct proc *p;
944 struct lwp *lp;
945 int r = 0;
948 * proc_token protects the allproc list and PHOLD() prevents the
949 * process from being removed from the allproc list or the zombproc
950 * list.
952 lwkt_gettoken(&proc_token);
953 LIST_FOREACH(p, &allproc, p_list) {
954 PHOLD(p);
955 FOREACH_LWP_IN_PROC(lp, p) {
956 LWPHOLD(lp);
957 r = callback(lp, data);
958 LWPRELE(lp);
960 PRELE(p);
961 if (r < 0)
962 break;
964 lwkt_reltoken(&proc_token);
968 * Scan all processes on the zombproc list. The process is automatically
969 * held for the callback. A return value of -1 terminates the loop.
971 * No requirements.
972 * The callback is made with the proces held and proc_token held.
974 void
975 zombproc_scan(int (*callback)(struct proc *, void *), void *data)
977 struct proc *p;
978 int r;
980 lwkt_gettoken(&proc_token);
981 LIST_FOREACH(p, &zombproc, p_list) {
982 PHOLD(p);
983 r = callback(p, data);
984 PRELE(p);
985 if (r < 0)
986 break;
988 lwkt_reltoken(&proc_token);
991 #include "opt_ddb.h"
992 #ifdef DDB
993 #include <ddb/ddb.h>
996 * Debugging only
998 DB_SHOW_COMMAND(pgrpdump, pgrpdump)
1000 struct pgrp *pgrp;
1001 struct proc *p;
1002 int i;
1004 for (i = 0; i <= pgrphash; i++) {
1005 if (!LIST_EMPTY(&pgrphashtbl[i])) {
1006 kprintf("\tindx %d\n", i);
1007 LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) {
1008 kprintf(
1009 "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n",
1010 (void *)pgrp, (long)pgrp->pg_id,
1011 (void *)pgrp->pg_session,
1012 pgrp->pg_session->s_count,
1013 (void *)LIST_FIRST(&pgrp->pg_members));
1014 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1015 kprintf("\t\tpid %ld addr %p pgrp %p\n",
1016 (long)p->p_pid, (void *)p,
1017 (void *)p->p_pgrp);
1023 #endif /* DDB */
1026 * Locate a process on the zombie list. Return a process or NULL.
1027 * The returned process will be referenced and the caller must release
1028 * it with PRELE().
1030 * No other requirements.
1032 struct proc *
1033 zpfind(pid_t pid)
1035 struct proc *p;
1037 lwkt_gettoken(&proc_token);
1038 LIST_FOREACH(p, &zombproc, p_list) {
1039 if (p->p_pid == pid) {
1040 PHOLD(p);
1041 lwkt_reltoken(&proc_token);
1042 return (p);
1045 lwkt_reltoken(&proc_token);
1046 return (NULL);
1050 * The caller must hold proc_token.
1052 static int
1053 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags)
1055 struct kinfo_proc ki;
1056 struct lwp *lp;
1057 int skp = 0, had_output = 0;
1058 int error;
1060 bzero(&ki, sizeof(ki));
1061 lwkt_gettoken(&p->p_token);
1062 fill_kinfo_proc(p, &ki);
1063 if ((flags & KERN_PROC_FLAG_LWP) == 0)
1064 skp = 1;
1065 error = 0;
1066 FOREACH_LWP_IN_PROC(lp, p) {
1067 LWPHOLD(lp);
1068 fill_kinfo_lwp(lp, &ki.kp_lwp);
1069 had_output = 1;
1070 error = SYSCTL_OUT(req, &ki, sizeof(ki));
1071 LWPRELE(lp);
1072 if (error)
1073 break;
1074 if (skp)
1075 break;
1077 lwkt_reltoken(&p->p_token);
1078 /* We need to output at least the proc, even if there is no lwp. */
1079 if (had_output == 0) {
1080 error = SYSCTL_OUT(req, &ki, sizeof(ki));
1082 return (error);
1086 * The caller must hold proc_token.
1088 static int
1089 sysctl_out_proc_kthread(struct thread *td, struct sysctl_req *req, int flags)
1091 struct kinfo_proc ki;
1092 int error;
1094 fill_kinfo_proc_kthread(td, &ki);
1095 error = SYSCTL_OUT(req, &ki, sizeof(ki));
1096 if (error)
1097 return error;
1098 return(0);
1102 * No requirements.
1104 static int
1105 sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
1107 int *name = (int*) arg1;
1108 int oid = oidp->oid_number;
1109 u_int namelen = arg2;
1110 struct proc *p;
1111 struct proclist *plist;
1112 struct thread *td;
1113 struct thread *marker;
1114 int doingzomb, flags = 0;
1115 int error = 0;
1116 int n;
1117 int origcpu;
1118 struct ucred *cr1 = curproc->p_ucred;
1120 flags = oid & KERN_PROC_FLAGMASK;
1121 oid &= ~KERN_PROC_FLAGMASK;
1123 if ((oid == KERN_PROC_ALL && namelen != 0) ||
1124 (oid != KERN_PROC_ALL && namelen != 1)) {
1125 return (EINVAL);
1129 * proc_token protects the allproc list and PHOLD() prevents the
1130 * process from being removed from the allproc list or the zombproc
1131 * list.
1133 lwkt_gettoken(&proc_token);
1134 if (oid == KERN_PROC_PID) {
1135 p = pfindn((pid_t)name[0]);
1136 if (p == NULL)
1137 goto post_threads;
1138 if (!PRISON_CHECK(cr1, p->p_ucred))
1139 goto post_threads;
1140 PHOLD(p);
1141 error = sysctl_out_proc(p, req, flags);
1142 PRELE(p);
1143 goto post_threads;
1146 if (!req->oldptr) {
1147 /* overestimate by 5 procs */
1148 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5);
1149 if (error)
1150 goto post_threads;
1152 for (doingzomb = 0; doingzomb <= 1; doingzomb++) {
1153 if (doingzomb)
1154 plist = &zombproc;
1155 else
1156 plist = &allproc;
1157 LIST_FOREACH(p, plist, p_list) {
1159 * Show a user only their processes.
1161 if ((!ps_showallprocs) && p_trespass(cr1, p->p_ucred))
1162 continue;
1164 * Skip embryonic processes.
1166 if (p->p_stat == SIDL)
1167 continue;
1169 * TODO - make more efficient (see notes below).
1170 * do by session.
1172 switch (oid) {
1173 case KERN_PROC_PGRP:
1174 /* could do this by traversing pgrp */
1175 if (p->p_pgrp == NULL ||
1176 p->p_pgrp->pg_id != (pid_t)name[0])
1177 continue;
1178 break;
1180 case KERN_PROC_TTY:
1181 if ((p->p_flags & P_CONTROLT) == 0 ||
1182 p->p_session == NULL ||
1183 p->p_session->s_ttyp == NULL ||
1184 dev2udev(p->p_session->s_ttyp->t_dev) !=
1185 (udev_t)name[0])
1186 continue;
1187 break;
1189 case KERN_PROC_UID:
1190 if (p->p_ucred == NULL ||
1191 p->p_ucred->cr_uid != (uid_t)name[0])
1192 continue;
1193 break;
1195 case KERN_PROC_RUID:
1196 if (p->p_ucred == NULL ||
1197 p->p_ucred->cr_ruid != (uid_t)name[0])
1198 continue;
1199 break;
1202 if (!PRISON_CHECK(cr1, p->p_ucred))
1203 continue;
1204 PHOLD(p);
1205 error = sysctl_out_proc(p, req, flags);
1206 PRELE(p);
1207 if (error)
1208 goto post_threads;
1213 * Iterate over all active cpus and scan their thread list. Start
1214 * with the next logical cpu and end with our original cpu. We
1215 * migrate our own thread to each target cpu in order to safely scan
1216 * its thread list. In the last loop we migrate back to our original
1217 * cpu.
1219 origcpu = mycpu->gd_cpuid;
1220 if (!ps_showallthreads || jailed(cr1))
1221 goto post_threads;
1223 marker = kmalloc(sizeof(struct thread), M_TEMP, M_WAITOK|M_ZERO);
1224 marker->td_flags = TDF_MARKER;
1225 error = 0;
1227 for (n = 1; n <= ncpus; ++n) {
1228 globaldata_t rgd;
1229 int nid;
1231 nid = (origcpu + n) % ncpus;
1232 if ((smp_active_mask & CPUMASK(nid)) == 0)
1233 continue;
1234 rgd = globaldata_find(nid);
1235 lwkt_setcpu_self(rgd);
1237 crit_enter();
1238 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, marker, td_allq);
1240 while ((td = TAILQ_PREV(marker, lwkt_queue, td_allq)) != NULL) {
1241 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq);
1242 TAILQ_INSERT_BEFORE(td, marker, td_allq);
1243 if (td->td_flags & TDF_MARKER)
1244 continue;
1245 if (td->td_proc)
1246 continue;
1248 lwkt_hold(td);
1249 crit_exit();
1251 switch (oid) {
1252 case KERN_PROC_PGRP:
1253 case KERN_PROC_TTY:
1254 case KERN_PROC_UID:
1255 case KERN_PROC_RUID:
1256 break;
1257 default:
1258 error = sysctl_out_proc_kthread(td, req,
1259 doingzomb);
1260 break;
1262 lwkt_rele(td);
1263 crit_enter();
1264 if (error)
1265 break;
1267 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq);
1268 crit_exit();
1270 if (error)
1271 break;
1273 kfree(marker, M_TEMP);
1275 post_threads:
1276 lwkt_reltoken(&proc_token);
1277 return (error);
1281 * This sysctl allows a process to retrieve the argument list or process
1282 * title for another process without groping around in the address space
1283 * of the other process. It also allow a process to set its own "process
1284 * title to a string of its own choice.
1286 * No requirements.
1288 static int
1289 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
1291 int *name = (int*) arg1;
1292 u_int namelen = arg2;
1293 struct proc *p;
1294 struct pargs *opa;
1295 struct pargs *pa;
1296 int error = 0;
1297 struct ucred *cr1 = curproc->p_ucred;
1299 if (namelen != 1)
1300 return (EINVAL);
1302 p = pfind((pid_t)name[0]);
1303 if (p == NULL)
1304 goto done;
1305 lwkt_gettoken(&p->p_token);
1307 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred))
1308 goto done;
1310 if (req->newptr && curproc != p) {
1311 error = EPERM;
1312 goto done;
1314 if (req->oldptr && (pa = p->p_args) != NULL) {
1315 refcount_acquire(&pa->ar_ref);
1316 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length);
1317 if (refcount_release(&pa->ar_ref))
1318 kfree(pa, M_PARGS);
1320 if (req->newptr == NULL)
1321 goto done;
1323 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) {
1324 goto done;
1327 pa = kmalloc(sizeof(struct pargs) + req->newlen, M_PARGS, M_WAITOK);
1328 refcount_init(&pa->ar_ref, 1);
1329 pa->ar_length = req->newlen;
1330 error = SYSCTL_IN(req, pa->ar_args, req->newlen);
1331 if (error) {
1332 kfree(pa, M_PARGS);
1333 goto done;
1338 * Replace p_args with the new pa. p_args may have previously
1339 * been NULL.
1341 opa = p->p_args;
1342 p->p_args = pa;
1344 if (opa) {
1345 KKASSERT(opa->ar_ref > 0);
1346 if (refcount_release(&opa->ar_ref)) {
1347 kfree(opa, M_PARGS);
1348 /* opa = NULL; */
1351 done:
1352 if (p) {
1353 lwkt_reltoken(&p->p_token);
1354 PRELE(p);
1356 return (error);
1359 static int
1360 sysctl_kern_proc_cwd(SYSCTL_HANDLER_ARGS)
1362 int *name = (int*) arg1;
1363 u_int namelen = arg2;
1364 struct proc *p;
1365 int error = 0;
1366 char *fullpath, *freepath;
1367 struct ucred *cr1 = curproc->p_ucred;
1369 if (namelen != 1)
1370 return (EINVAL);
1372 p = pfind((pid_t)name[0]);
1373 if (p == NULL)
1374 goto done;
1375 lwkt_gettoken(&p->p_token);
1378 * If we are not allowed to see other args, we certainly shouldn't
1379 * get the cwd either. Also check the usual trespassing.
1381 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred))
1382 goto done;
1384 if (req->oldptr && p->p_fd != NULL && p->p_fd->fd_ncdir.ncp) {
1385 struct nchandle nch;
1387 cache_copy(&p->p_fd->fd_ncdir, &nch);
1388 error = cache_fullpath(p, &nch, NULL,
1389 &fullpath, &freepath, 0);
1390 cache_drop(&nch);
1391 if (error)
1392 goto done;
1393 error = SYSCTL_OUT(req, fullpath, strlen(fullpath) + 1);
1394 kfree(freepath, M_TEMP);
1397 done:
1398 if (p) {
1399 lwkt_reltoken(&p->p_token);
1400 PRELE(p);
1402 return (error);
1405 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table");
1407 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT,
1408 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table");
1410 SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD,
1411 sysctl_kern_proc, "Process table");
1413 SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD,
1414 sysctl_kern_proc, "Process table");
1416 SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD,
1417 sysctl_kern_proc, "Process table");
1419 SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD,
1420 sysctl_kern_proc, "Process table");
1422 SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD,
1423 sysctl_kern_proc, "Process table");
1425 SYSCTL_NODE(_kern_proc, (KERN_PROC_ALL | KERN_PROC_FLAG_LWP), all_lwp, CTLFLAG_RD,
1426 sysctl_kern_proc, "Process table");
1428 SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_FLAG_LWP), pgrp_lwp, CTLFLAG_RD,
1429 sysctl_kern_proc, "Process table");
1431 SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_FLAG_LWP), tty_lwp, CTLFLAG_RD,
1432 sysctl_kern_proc, "Process table");
1434 SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_FLAG_LWP), uid_lwp, CTLFLAG_RD,
1435 sysctl_kern_proc, "Process table");
1437 SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_FLAG_LWP), ruid_lwp, CTLFLAG_RD,
1438 sysctl_kern_proc, "Process table");
1440 SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_FLAG_LWP), pid_lwp, CTLFLAG_RD,
1441 sysctl_kern_proc, "Process table");
1443 SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY,
1444 sysctl_kern_proc_args, "Process argument list");
1446 SYSCTL_NODE(_kern_proc, KERN_PROC_CWD, cwd, CTLFLAG_RD | CTLFLAG_ANYBODY,
1447 sysctl_kern_proc_cwd, "Process argument list");