if_vtnet - Use ifsq_watchdog_* functions as the watchdog.
[dragonfly.git] / sys / kern / kern_proc.c
blobe7c0b4b2747f9996cbbbdb788ba47106a7fb825a
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/sysctl.h>
34 #include <sys/malloc.h>
35 #include <sys/proc.h>
36 #include <sys/vnode.h>
37 #include <sys/jail.h>
38 #include <sys/filedesc.h>
39 #include <sys/tty.h>
40 #include <sys/dsched.h>
41 #include <sys/signalvar.h>
42 #include <sys/spinlock.h>
43 #include <sys/random.h>
44 #include <sys/vnode.h>
45 #include <sys/exec.h>
46 #include <vm/vm.h>
47 #include <sys/lock.h>
48 #include <vm/pmap.h>
49 #include <vm/vm_map.h>
50 #include <sys/user.h>
51 #include <machine/smp.h>
53 #include <sys/refcount.h>
54 #include <sys/spinlock2.h>
57 * Hash table size must be a power of two and is not currently dynamically
58 * sized. There is a trade-off between the linear scans which must iterate
59 * all HSIZE elements and the number of elements which might accumulate
60 * within each hash chain.
62 #define ALLPROC_HSIZE 256
63 #define ALLPROC_HMASK (ALLPROC_HSIZE - 1)
64 #define ALLPROC_HASH(pid) (pid & ALLPROC_HMASK)
65 #define PGRP_HASH(pid) (pid & ALLPROC_HMASK)
66 #define SESS_HASH(pid) (pid & ALLPROC_HMASK)
69 * pid_doms[] management, used to control how quickly a PID can be recycled.
70 * Must be a multiple of ALLPROC_HSIZE for the proc_makepid() inner loops.
72 * WARNING! PIDDOM_DELAY should not be defined > 20 or so unless you change
73 * the array from int8_t's to int16_t's.
75 #define PIDDOM_COUNT 10 /* 10 pids per domain - reduce array size */
76 #define PIDDOM_DELAY 10 /* min 10 seconds after exit before reuse */
77 #define PIDDOM_SCALE 10 /* (10,000*SCALE)/sec performance guarantee */
78 #define PIDSEL_DOMAINS (PID_MAX * PIDDOM_SCALE / PIDDOM_COUNT / \
79 ALLPROC_HSIZE * ALLPROC_HSIZE)
81 /* Used by libkvm */
82 int allproc_hsize = ALLPROC_HSIZE;
84 LIST_HEAD(pidhashhead, proc);
86 static MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
87 MALLOC_DEFINE(M_SESSION, "session", "session header");
88 MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
89 MALLOC_DEFINE(M_LWP, "lwp", "lwp structures");
90 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
92 int ps_showallprocs = 1;
93 static int ps_showallthreads = 1;
94 SYSCTL_INT(_security, OID_AUTO, ps_showallprocs, CTLFLAG_RW,
95 &ps_showallprocs, 0,
96 "Unprivileged processes can see processes with different UID/GID");
97 SYSCTL_INT(_security, OID_AUTO, ps_showallthreads, CTLFLAG_RW,
98 &ps_showallthreads, 0,
99 "Unprivileged processes can see kernel threads");
100 static u_int pid_domain_skips;
101 SYSCTL_UINT(_kern, OID_AUTO, pid_domain_skips, CTLFLAG_RW,
102 &pid_domain_skips, 0,
103 "Number of pid_doms[] skipped");
104 static u_int pid_inner_skips;
105 SYSCTL_UINT(_kern, OID_AUTO, pid_inner_skips, CTLFLAG_RW,
106 &pid_inner_skips, 0,
107 "Number of pid_doms[] skipped");
109 static void orphanpg(struct pgrp *pg);
110 static void proc_makepid(struct proc *p, int random_offset);
113 * Process related lists (for proc_token, allproc, allpgrp, and allsess)
115 typedef struct procglob procglob_t;
117 static procglob_t procglob[ALLPROC_HSIZE];
120 * We try our best to avoid recycling a PID too quickly. We do this by
121 * storing (uint8_t)time_second in the related pid domain on-reap and then
122 * using that to skip-over the domain on-allocate.
124 * This array has to be fairly large to support a high fork/exec rate.
125 * A ~100,000 entry array will support a 10-second reuse latency at
126 * 10,000 execs/second, worst case. Best-case multiply by PIDDOM_COUNT
127 * (approximately 100,000 execs/second).
129 * Currently we allocate around a megabyte, making the worst-case fork
130 * rate around 100,000/second.
132 static uint8_t *pid_doms;
135 * Random component to nextpid generation. We mix in a random factor to make
136 * it a little harder to predict. We sanity check the modulus value to avoid
137 * doing it in critical paths. Don't let it be too small or we pointlessly
138 * waste randomness entropy, and don't let it be impossibly large. Using a
139 * modulus that is too big causes a LOT more process table scans and slows
140 * down fork processing as the pidchecked caching is defeated.
142 static int randompid = 0;
144 static __inline
145 struct ucred *
146 pcredcache(struct ucred *cr, struct proc *p)
148 if (cr != p->p_ucred) {
149 if (cr)
150 crfree(cr);
151 spin_lock(&p->p_spin);
152 if ((cr = p->p_ucred) != NULL)
153 crhold(cr);
154 spin_unlock(&p->p_spin);
156 return cr;
160 * No requirements.
162 static int
163 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
165 int error, pid;
167 pid = randompid;
168 error = sysctl_handle_int(oidp, &pid, 0, req);
169 if (error || !req->newptr)
170 return (error);
171 if (pid < 0 || pid > PID_MAX - 100) /* out of range */
172 pid = PID_MAX - 100;
173 else if (pid < 2) /* NOP */
174 pid = 0;
175 else if (pid < 100) /* Make it reasonable */
176 pid = 100;
177 randompid = pid;
178 return (error);
181 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
182 0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
185 * Initialize global process hashing structures.
187 * These functions are ONLY called from the low level boot code and do
188 * not lock their operations.
190 void
191 procinit(void)
193 u_long i;
196 * Allocate dynamically. This array can be large (~1MB) so don't
197 * waste boot loader space.
199 pid_doms = kmalloc(sizeof(pid_doms[0]) * PIDSEL_DOMAINS,
200 M_PROC, M_WAITOK | M_ZERO);
203 * Avoid unnecessary stalls due to pid_doms[] values all being
204 * the same. Make sure that the allocation of pid 1 and pid 2
205 * succeeds.
207 for (i = 0; i < PIDSEL_DOMAINS; ++i)
208 pid_doms[i] = (int8_t)i - (int8_t)(PIDDOM_DELAY + 1);
211 * Other misc init.
213 for (i = 0; i < ALLPROC_HSIZE; ++i) {
214 procglob_t *prg = &procglob[i];
215 LIST_INIT(&prg->allproc);
216 LIST_INIT(&prg->allsess);
217 LIST_INIT(&prg->allpgrp);
218 lwkt_token_init(&prg->proc_token, "allproc");
220 uihashinit();
223 void
224 procinsertinit(struct proc *p)
226 LIST_INSERT_HEAD(&procglob[ALLPROC_HASH(p->p_pid)].allproc,
227 p, p_list);
230 void
231 pgrpinsertinit(struct pgrp *pg)
233 LIST_INSERT_HEAD(&procglob[ALLPROC_HASH(pg->pg_id)].allpgrp,
234 pg, pg_list);
237 void
238 sessinsertinit(struct session *sess)
240 LIST_INSERT_HEAD(&procglob[ALLPROC_HASH(sess->s_sid)].allsess,
241 sess, s_list);
245 * Process hold/release support functions. Called via the PHOLD(),
246 * PRELE(), and PSTALL() macros.
248 * p->p_lock is a simple hold count with a waiting interlock. No wakeup()
249 * is issued unless someone is actually waiting for the process.
251 * Most holds are short-term, allowing a process scan or other similar
252 * operation to access a proc structure without it getting ripped out from
253 * under us. procfs and process-list sysctl ops also use the hold function
254 * interlocked with various p_flags to keep the vmspace intact when reading
255 * or writing a user process's address space.
257 * There are two situations where a hold count can be longer. Exiting lwps
258 * hold the process until the lwp is reaped, and the parent will hold the
259 * child during vfork()/exec() sequences while the child is marked P_PPWAIT.
261 * The kernel waits for the hold count to drop to 0 (or 1 in some cases) at
262 * various critical points in the fork/exec and exit paths before proceeding.
264 #define PLOCK_ZOMB 0x20000000
265 #define PLOCK_WAITING 0x40000000
266 #define PLOCK_MASK 0x1FFFFFFF
268 void
269 pstall(struct proc *p, const char *wmesg, int count)
271 int o;
272 int n;
274 for (;;) {
275 o = p->p_lock;
276 cpu_ccfence();
277 if ((o & PLOCK_MASK) <= count)
278 break;
279 n = o | PLOCK_WAITING;
280 tsleep_interlock(&p->p_lock, 0);
283 * If someone is trying to single-step the process during
284 * an exec or an exit they can deadlock us because procfs
285 * sleeps with the process held.
287 if (p->p_stops) {
288 if (p->p_flags & P_INEXEC) {
289 wakeup(&p->p_stype);
290 } else if (p->p_flags & P_POSTEXIT) {
291 spin_lock(&p->p_spin);
292 p->p_stops = 0;
293 p->p_step = 0;
294 spin_unlock(&p->p_spin);
295 wakeup(&p->p_stype);
299 if (atomic_cmpset_int(&p->p_lock, o, n)) {
300 tsleep(&p->p_lock, PINTERLOCKED, wmesg, 0);
305 void
306 phold(struct proc *p)
308 atomic_add_int(&p->p_lock, 1);
312 * WARNING! On last release (p) can become instantly invalid due to
313 * MP races.
315 void
316 prele(struct proc *p)
318 int o;
319 int n;
322 * Fast path
324 if (atomic_cmpset_int(&p->p_lock, 1, 0))
325 return;
328 * Slow path
330 for (;;) {
331 o = p->p_lock;
332 KKASSERT((o & PLOCK_MASK) > 0);
333 cpu_ccfence();
334 n = (o - 1) & ~PLOCK_WAITING;
335 if (atomic_cmpset_int(&p->p_lock, o, n)) {
336 if (o & PLOCK_WAITING)
337 wakeup(&p->p_lock);
338 break;
344 * Hold and flag serialized for zombie reaping purposes.
346 * This function will fail if it has to block, returning non-zero with
347 * neither the flag set or the hold count bumped. Note that we must block
348 * without holding a ref, meaning that the caller must ensure that (p)
349 * remains valid through some other interlock (typically on its parent
350 * process's p_token).
352 * Zero is returned on success. The hold count will be incremented and
353 * the serialization flag acquired. Note that serialization is only against
354 * other pholdzomb() calls, not against phold() calls.
357 pholdzomb(struct proc *p)
359 int o;
360 int n;
363 * Fast path
365 if (atomic_cmpset_int(&p->p_lock, 0, PLOCK_ZOMB | 1))
366 return(0);
369 * Slow path
371 for (;;) {
372 o = p->p_lock;
373 cpu_ccfence();
374 if ((o & PLOCK_ZOMB) == 0) {
375 n = (o + 1) | PLOCK_ZOMB;
376 if (atomic_cmpset_int(&p->p_lock, o, n))
377 return(0);
378 } else {
379 KKASSERT((o & PLOCK_MASK) > 0);
380 n = o | PLOCK_WAITING;
381 tsleep_interlock(&p->p_lock, 0);
382 if (atomic_cmpset_int(&p->p_lock, o, n)) {
383 tsleep(&p->p_lock, PINTERLOCKED, "phldz", 0);
384 /* (p) can be ripped out at this point */
385 return(1);
392 * Release PLOCK_ZOMB and the hold count, waking up any waiters.
394 * WARNING! On last release (p) can become instantly invalid due to
395 * MP races.
397 void
398 prelezomb(struct proc *p)
400 int o;
401 int n;
404 * Fast path
406 if (atomic_cmpset_int(&p->p_lock, PLOCK_ZOMB | 1, 0))
407 return;
410 * Slow path
412 KKASSERT(p->p_lock & PLOCK_ZOMB);
413 for (;;) {
414 o = p->p_lock;
415 KKASSERT((o & PLOCK_MASK) > 0);
416 cpu_ccfence();
417 n = (o - 1) & ~(PLOCK_ZOMB | PLOCK_WAITING);
418 if (atomic_cmpset_int(&p->p_lock, o, n)) {
419 if (o & PLOCK_WAITING)
420 wakeup(&p->p_lock);
421 break;
427 * Is p an inferior of the current process?
429 * No requirements.
432 inferior(struct proc *p)
434 struct proc *p2;
436 PHOLD(p);
437 lwkt_gettoken_shared(&p->p_token);
438 while (p != curproc) {
439 if (p->p_pid == 0) {
440 lwkt_reltoken(&p->p_token);
441 return (0);
443 p2 = p->p_pptr;
444 PHOLD(p2);
445 lwkt_reltoken(&p->p_token);
446 PRELE(p);
447 lwkt_gettoken_shared(&p2->p_token);
448 p = p2;
450 lwkt_reltoken(&p->p_token);
451 PRELE(p);
453 return (1);
457 * Locate a process by number. The returned process will be referenced and
458 * must be released with PRELE().
460 * No requirements.
462 struct proc *
463 pfind(pid_t pid)
465 struct proc *p = curproc;
466 procglob_t *prg;
467 int n;
470 * Shortcut the current process
472 if (p && p->p_pid == pid) {
473 PHOLD(p);
474 return (p);
478 * Otherwise find it in the hash table.
480 n = ALLPROC_HASH(pid);
481 prg = &procglob[n];
483 lwkt_gettoken_shared(&prg->proc_token);
484 LIST_FOREACH(p, &prg->allproc, p_list) {
485 if (p->p_stat == SZOMB)
486 continue;
487 if (p->p_pid == pid) {
488 PHOLD(p);
489 lwkt_reltoken(&prg->proc_token);
490 return (p);
493 lwkt_reltoken(&prg->proc_token);
495 return (NULL);
499 * Locate a process by number. The returned process is NOT referenced.
500 * The result will not be stable and is typically only used to validate
501 * against a process that the caller has in-hand.
503 * No requirements.
505 struct proc *
506 pfindn(pid_t pid)
508 struct proc *p = curproc;
509 procglob_t *prg;
510 int n;
513 * Shortcut the current process
515 if (p && p->p_pid == pid)
516 return (p);
519 * Otherwise find it in the hash table.
521 n = ALLPROC_HASH(pid);
522 prg = &procglob[n];
524 lwkt_gettoken_shared(&prg->proc_token);
525 LIST_FOREACH(p, &prg->allproc, p_list) {
526 if (p->p_stat == SZOMB)
527 continue;
528 if (p->p_pid == pid) {
529 lwkt_reltoken(&prg->proc_token);
530 return (p);
533 lwkt_reltoken(&prg->proc_token);
535 return (NULL);
539 * Locate a process on the zombie list. Return a process or NULL.
540 * The returned process will be referenced and the caller must release
541 * it with PRELE().
543 * No other requirements.
545 struct proc *
546 zpfind(pid_t pid)
548 struct proc *p = curproc;
549 procglob_t *prg;
550 int n;
553 * Shortcut the current process
555 if (p && p->p_pid == pid) {
556 PHOLD(p);
557 return (p);
561 * Otherwise find it in the hash table.
563 n = ALLPROC_HASH(pid);
564 prg = &procglob[n];
566 lwkt_gettoken_shared(&prg->proc_token);
567 LIST_FOREACH(p, &prg->allproc, p_list) {
568 if (p->p_stat != SZOMB)
569 continue;
570 if (p->p_pid == pid) {
571 PHOLD(p);
572 lwkt_reltoken(&prg->proc_token);
573 return (p);
576 lwkt_reltoken(&prg->proc_token);
578 return (NULL);
582 void
583 pgref(struct pgrp *pgrp)
585 refcount_acquire(&pgrp->pg_refs);
588 void
589 pgrel(struct pgrp *pgrp)
591 procglob_t *prg;
592 int count;
593 int n;
595 n = PGRP_HASH(pgrp->pg_id);
596 prg = &procglob[n];
598 for (;;) {
599 count = pgrp->pg_refs;
600 cpu_ccfence();
601 KKASSERT(count > 0);
602 if (count == 1) {
603 lwkt_gettoken(&prg->proc_token);
604 if (atomic_cmpset_int(&pgrp->pg_refs, 1, 0))
605 break;
606 lwkt_reltoken(&prg->proc_token);
607 /* retry */
608 } else {
609 if (atomic_cmpset_int(&pgrp->pg_refs, count, count - 1))
610 return;
611 /* retry */
616 * Successful 1->0 transition, pghash_spin is held.
618 LIST_REMOVE(pgrp, pg_list);
619 if (pid_doms[pgrp->pg_id % PIDSEL_DOMAINS] != (uint8_t)time_second)
620 pid_doms[pgrp->pg_id % PIDSEL_DOMAINS] = (uint8_t)time_second;
623 * Reset any sigio structures pointing to us as a result of
624 * F_SETOWN with our pgid.
626 funsetownlst(&pgrp->pg_sigiolst);
628 if (pgrp->pg_session->s_ttyp != NULL &&
629 pgrp->pg_session->s_ttyp->t_pgrp == pgrp) {
630 pgrp->pg_session->s_ttyp->t_pgrp = NULL;
632 lwkt_reltoken(&prg->proc_token);
634 sess_rele(pgrp->pg_session);
635 kfree(pgrp, M_PGRP);
639 * Locate a process group by number. The returned process group will be
640 * referenced w/pgref() and must be released with pgrel() (or assigned
641 * somewhere if you wish to keep the reference).
643 * No requirements.
645 struct pgrp *
646 pgfind(pid_t pgid)
648 struct pgrp *pgrp;
649 procglob_t *prg;
650 int n;
652 n = PGRP_HASH(pgid);
653 prg = &procglob[n];
654 lwkt_gettoken_shared(&prg->proc_token);
656 LIST_FOREACH(pgrp, &prg->allpgrp, pg_list) {
657 if (pgrp->pg_id == pgid) {
658 refcount_acquire(&pgrp->pg_refs);
659 lwkt_reltoken(&prg->proc_token);
660 return (pgrp);
663 lwkt_reltoken(&prg->proc_token);
664 return (NULL);
668 * Move p to a new or existing process group (and session)
670 * No requirements.
673 enterpgrp(struct proc *p, pid_t pgid, int mksess)
675 struct pgrp *pgrp;
676 struct pgrp *opgrp;
677 int error;
679 pgrp = pgfind(pgid);
681 KASSERT(pgrp == NULL || !mksess,
682 ("enterpgrp: setsid into non-empty pgrp"));
683 KASSERT(!SESS_LEADER(p),
684 ("enterpgrp: session leader attempted setpgrp"));
686 if (pgrp == NULL) {
687 pid_t savepid = p->p_pid;
688 struct proc *np;
689 procglob_t *prg;
690 int n;
693 * new process group
695 KASSERT(p->p_pid == pgid,
696 ("enterpgrp: new pgrp and pid != pgid"));
697 pgrp = kmalloc(sizeof(struct pgrp), M_PGRP, M_WAITOK | M_ZERO);
698 pgrp->pg_id = pgid;
699 LIST_INIT(&pgrp->pg_members);
700 pgrp->pg_jobc = 0;
701 SLIST_INIT(&pgrp->pg_sigiolst);
702 lwkt_token_init(&pgrp->pg_token, "pgrp_token");
703 refcount_init(&pgrp->pg_refs, 1);
704 lockinit(&pgrp->pg_lock, "pgwt", 0, 0);
706 n = PGRP_HASH(pgid);
707 prg = &procglob[n];
709 if ((np = pfindn(savepid)) == NULL || np != p) {
710 lwkt_reltoken(&prg->proc_token);
711 error = ESRCH;
712 kfree(pgrp, M_PGRP);
713 goto fatal;
716 lwkt_gettoken(&prg->proc_token);
717 if (mksess) {
718 struct session *sess;
721 * new session
723 sess = kmalloc(sizeof(struct session), M_SESSION,
724 M_WAITOK | M_ZERO);
725 lwkt_gettoken(&p->p_token);
726 sess->s_leader = p;
727 sess->s_sid = p->p_pid;
728 sess->s_count = 1;
729 sess->s_ttyvp = NULL;
730 sess->s_ttyp = NULL;
731 bcopy(p->p_session->s_login, sess->s_login,
732 sizeof(sess->s_login));
733 pgrp->pg_session = sess;
734 KASSERT(p == curproc,
735 ("enterpgrp: mksession and p != curproc"));
736 p->p_flags &= ~P_CONTROLT;
737 LIST_INSERT_HEAD(&prg->allsess, sess, s_list);
738 lwkt_reltoken(&p->p_token);
739 } else {
740 lwkt_gettoken(&p->p_token);
741 pgrp->pg_session = p->p_session;
742 sess_hold(pgrp->pg_session);
743 lwkt_reltoken(&p->p_token);
745 LIST_INSERT_HEAD(&prg->allpgrp, pgrp, pg_list);
747 lwkt_reltoken(&prg->proc_token);
748 } else if (pgrp == p->p_pgrp) {
749 pgrel(pgrp);
750 goto done;
751 } /* else pgfind() referenced the pgrp */
753 lwkt_gettoken(&pgrp->pg_token);
754 lwkt_gettoken(&p->p_token);
757 * Replace p->p_pgrp, handling any races that occur.
759 while ((opgrp = p->p_pgrp) != NULL) {
760 pgref(opgrp);
761 lwkt_gettoken(&opgrp->pg_token);
762 if (opgrp != p->p_pgrp) {
763 lwkt_reltoken(&opgrp->pg_token);
764 pgrel(opgrp);
765 continue;
767 LIST_REMOVE(p, p_pglist);
768 break;
770 p->p_pgrp = pgrp;
771 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
774 * Adjust eligibility of affected pgrps to participate in job control.
775 * Increment eligibility counts before decrementing, otherwise we
776 * could reach 0 spuriously during the first call.
778 fixjobc(p, pgrp, 1);
779 if (opgrp) {
780 fixjobc(p, opgrp, 0);
781 lwkt_reltoken(&opgrp->pg_token);
782 pgrel(opgrp); /* manual pgref */
783 pgrel(opgrp); /* p->p_pgrp ref */
785 lwkt_reltoken(&p->p_token);
786 lwkt_reltoken(&pgrp->pg_token);
787 done:
788 error = 0;
789 fatal:
790 return (error);
794 * Remove process from process group
796 * No requirements.
799 leavepgrp(struct proc *p)
801 struct pgrp *pg = p->p_pgrp;
803 lwkt_gettoken(&p->p_token);
804 while ((pg = p->p_pgrp) != NULL) {
805 pgref(pg);
806 lwkt_gettoken(&pg->pg_token);
807 if (p->p_pgrp != pg) {
808 lwkt_reltoken(&pg->pg_token);
809 pgrel(pg);
810 continue;
812 p->p_pgrp = NULL;
813 LIST_REMOVE(p, p_pglist);
814 lwkt_reltoken(&pg->pg_token);
815 pgrel(pg); /* manual pgref */
816 pgrel(pg); /* p->p_pgrp ref */
817 break;
819 lwkt_reltoken(&p->p_token);
821 return (0);
825 * Adjust the ref count on a session structure. When the ref count falls to
826 * zero the tty is disassociated from the session and the session structure
827 * is freed. Note that tty assocation is not itself ref-counted.
829 * No requirements.
831 void
832 sess_hold(struct session *sp)
834 atomic_add_int(&sp->s_count, 1);
838 * No requirements.
840 void
841 sess_rele(struct session *sess)
843 procglob_t *prg;
844 struct tty *tp;
845 int count;
846 int n;
848 n = SESS_HASH(sess->s_sid);
849 prg = &procglob[n];
851 for (;;) {
852 count = sess->s_count;
853 cpu_ccfence();
854 KKASSERT(count > 0);
855 if (count == 1) {
856 lwkt_gettoken(&tty_token);
857 lwkt_gettoken(&prg->proc_token);
858 if (atomic_cmpset_int(&sess->s_count, 1, 0))
859 break;
860 lwkt_reltoken(&prg->proc_token);
861 lwkt_reltoken(&tty_token);
862 /* retry */
863 } else {
864 if (atomic_cmpset_int(&sess->s_count, count, count - 1))
865 return;
866 /* retry */
871 * Successful 1->0 transition and tty_token is held.
873 LIST_REMOVE(sess, s_list);
874 if (pid_doms[sess->s_sid % PIDSEL_DOMAINS] != (uint8_t)time_second)
875 pid_doms[sess->s_sid % PIDSEL_DOMAINS] = (uint8_t)time_second;
877 if (sess->s_ttyp && sess->s_ttyp->t_session) {
878 #ifdef TTY_DO_FULL_CLOSE
879 /* FULL CLOSE, see ttyclearsession() */
880 KKASSERT(sess->s_ttyp->t_session == sess);
881 sess->s_ttyp->t_session = NULL;
882 #else
883 /* HALF CLOSE, see ttyclearsession() */
884 if (sess->s_ttyp->t_session == sess)
885 sess->s_ttyp->t_session = NULL;
886 #endif
888 if ((tp = sess->s_ttyp) != NULL) {
889 sess->s_ttyp = NULL;
890 ttyunhold(tp);
892 lwkt_reltoken(&prg->proc_token);
893 lwkt_reltoken(&tty_token);
895 kfree(sess, M_SESSION);
899 * Adjust pgrp jobc counters when specified process changes process group.
900 * We count the number of processes in each process group that "qualify"
901 * the group for terminal job control (those with a parent in a different
902 * process group of the same session). If that count reaches zero, the
903 * process group becomes orphaned. Check both the specified process'
904 * process group and that of its children.
905 * entering == 0 => p is leaving specified group.
906 * entering == 1 => p is entering specified group.
908 * No requirements.
910 void
911 fixjobc(struct proc *p, struct pgrp *pgrp, int entering)
913 struct pgrp *hispgrp;
914 struct session *mysession;
915 struct proc *np;
918 * Check p's parent to see whether p qualifies its own process
919 * group; if so, adjust count for p's process group.
921 lwkt_gettoken(&p->p_token); /* p_children scan */
922 lwkt_gettoken(&pgrp->pg_token);
924 mysession = pgrp->pg_session;
925 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
926 hispgrp->pg_session == mysession) {
927 if (entering)
928 pgrp->pg_jobc++;
929 else if (--pgrp->pg_jobc == 0)
930 orphanpg(pgrp);
934 * Check this process' children to see whether they qualify
935 * their process groups; if so, adjust counts for children's
936 * process groups.
938 LIST_FOREACH(np, &p->p_children, p_sibling) {
939 PHOLD(np);
940 lwkt_gettoken(&np->p_token);
941 if ((hispgrp = np->p_pgrp) != pgrp &&
942 hispgrp->pg_session == mysession &&
943 np->p_stat != SZOMB) {
944 pgref(hispgrp);
945 lwkt_gettoken(&hispgrp->pg_token);
946 if (entering)
947 hispgrp->pg_jobc++;
948 else if (--hispgrp->pg_jobc == 0)
949 orphanpg(hispgrp);
950 lwkt_reltoken(&hispgrp->pg_token);
951 pgrel(hispgrp);
953 lwkt_reltoken(&np->p_token);
954 PRELE(np);
956 KKASSERT(pgrp->pg_refs > 0);
957 lwkt_reltoken(&pgrp->pg_token);
958 lwkt_reltoken(&p->p_token);
962 * A process group has become orphaned;
963 * if there are any stopped processes in the group,
964 * hang-up all process in that group.
966 * The caller must hold pg_token.
968 static void
969 orphanpg(struct pgrp *pg)
971 struct proc *p;
973 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
974 if (p->p_stat == SSTOP) {
975 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
976 ksignal(p, SIGHUP);
977 ksignal(p, SIGCONT);
979 return;
985 * Add a new process to the allproc list and the PID hash. This
986 * also assigns a pid to the new process.
988 * No requirements.
990 void
991 proc_add_allproc(struct proc *p)
993 int random_offset;
995 if ((random_offset = randompid) != 0) {
996 read_random(&random_offset, sizeof(random_offset));
997 random_offset = (random_offset & 0x7FFFFFFF) % randompid;
999 proc_makepid(p, random_offset);
1003 * Calculate a new process pid. This function is integrated into
1004 * proc_add_allproc() to guarentee that the new pid is not reused before
1005 * the new process can be added to the allproc list.
1007 * p_pid is assigned and the process is added to the allproc hash table
1009 * WARNING! We need to allocate PIDs sequentially during early boot.
1010 * In particular, init needs to have a pid of 1.
1012 static
1013 void
1014 proc_makepid(struct proc *p, int random_offset)
1016 static pid_t nextpid = 1; /* heuristic, allowed to race */
1017 procglob_t *prg;
1018 struct pgrp *pg;
1019 struct proc *ps;
1020 struct session *sess;
1021 pid_t base;
1022 int8_t delta8;
1023 int retries;
1024 int n;
1027 * Select the next pid base candidate.
1029 * Check cyclement, do not allow a pid < 100.
1031 retries = 0;
1032 retry:
1033 base = atomic_fetchadd_int(&nextpid, 1) + random_offset;
1034 if (base <= 0 || base >= PID_MAX) {
1035 base = base % PID_MAX;
1036 if (base < 0)
1037 base = 100;
1038 if (base < 100)
1039 base += 100;
1040 nextpid = base; /* reset (SMP race ok) */
1044 * Do not allow a base pid to be selected from a domain that has
1045 * recently seen a pid/pgid/sessid reap. Sleep a little if we looped
1046 * through all available domains.
1048 * WARNING: We want the early pids to be allocated linearly,
1049 * particularly pid 1 and pid 2.
1051 if (++retries >= PIDSEL_DOMAINS)
1052 tsleep(&nextpid, 0, "makepid", 1);
1053 if (base >= 100) {
1054 delta8 = (int8_t)time_second -
1055 (int8_t)pid_doms[base % PIDSEL_DOMAINS];
1056 if (delta8 >= 0 && delta8 <= PIDDOM_DELAY) {
1057 ++pid_domain_skips;
1058 goto retry;
1063 * Calculate a hash index and find an unused process id within
1064 * the table, looping if we cannot find one.
1066 * The inner loop increments by ALLPROC_HSIZE which keeps the
1067 * PID at the same pid_doms[] index as well as the same hash index.
1069 n = ALLPROC_HASH(base);
1070 prg = &procglob[n];
1071 lwkt_gettoken(&prg->proc_token);
1073 restart1:
1074 LIST_FOREACH(ps, &prg->allproc, p_list) {
1075 if (ps->p_pid == base) {
1076 base += ALLPROC_HSIZE;
1077 if (base >= PID_MAX) {
1078 lwkt_reltoken(&prg->proc_token);
1079 goto retry;
1081 ++pid_inner_skips;
1082 goto restart1;
1085 LIST_FOREACH(pg, &prg->allpgrp, pg_list) {
1086 if (pg->pg_id == base) {
1087 base += ALLPROC_HSIZE;
1088 if (base >= PID_MAX) {
1089 lwkt_reltoken(&prg->proc_token);
1090 goto retry;
1092 ++pid_inner_skips;
1093 goto restart1;
1096 LIST_FOREACH(sess, &prg->allsess, s_list) {
1097 if (sess->s_sid == base) {
1098 base += ALLPROC_HSIZE;
1099 if (base >= PID_MAX) {
1100 lwkt_reltoken(&prg->proc_token);
1101 goto retry;
1103 ++pid_inner_skips;
1104 goto restart1;
1109 * Assign the pid and insert the process.
1111 p->p_pid = base;
1112 LIST_INSERT_HEAD(&prg->allproc, p, p_list);
1113 lwkt_reltoken(&prg->proc_token);
1117 * Called from exit1 to place the process into a zombie state.
1118 * The process is removed from the pid hash and p_stat is set
1119 * to SZOMB. Normal pfind[n]() calls will not find it any more.
1121 * Caller must hold p->p_token. We are required to wait until p_lock
1122 * becomes zero before we can manipulate the list, allowing allproc
1123 * scans to guarantee consistency during a list scan.
1125 void
1126 proc_move_allproc_zombie(struct proc *p)
1128 procglob_t *prg;
1129 int n;
1131 n = ALLPROC_HASH(p->p_pid);
1132 prg = &procglob[n];
1133 PSTALL(p, "reap1", 0);
1134 lwkt_gettoken(&prg->proc_token);
1136 PSTALL(p, "reap1a", 0);
1137 p->p_stat = SZOMB;
1139 lwkt_reltoken(&prg->proc_token);
1140 dsched_exit_proc(p);
1144 * This routine is called from kern_wait() and will remove the process
1145 * from the zombie list and the sibling list. This routine will block
1146 * if someone has a lock on the proces (p_lock).
1148 * Caller must hold p->p_token. We are required to wait until p_lock
1149 * becomes zero before we can manipulate the list, allowing allproc
1150 * scans to guarantee consistency during a list scan.
1152 void
1153 proc_remove_zombie(struct proc *p)
1155 procglob_t *prg;
1156 int n;
1158 n = ALLPROC_HASH(p->p_pid);
1159 prg = &procglob[n];
1161 PSTALL(p, "reap2", 0);
1162 lwkt_gettoken(&prg->proc_token);
1163 PSTALL(p, "reap2a", 0);
1164 LIST_REMOVE(p, p_list); /* from remove master list */
1165 LIST_REMOVE(p, p_sibling); /* and from sibling list */
1166 p->p_pptr = NULL;
1167 p->p_ppid = 0;
1168 if (pid_doms[p->p_pid % PIDSEL_DOMAINS] != (uint8_t)time_second)
1169 pid_doms[p->p_pid % PIDSEL_DOMAINS] = (uint8_t)time_second;
1170 lwkt_reltoken(&prg->proc_token);
1174 * Handle various requirements prior to returning to usermode. Called from
1175 * platform trap and system call code.
1177 void
1178 lwpuserret(struct lwp *lp)
1180 struct proc *p = lp->lwp_proc;
1182 if (lp->lwp_mpflags & LWP_MP_VNLRU) {
1183 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU);
1184 allocvnode_gc();
1186 if (lp->lwp_mpflags & LWP_MP_WEXIT) {
1187 lwkt_gettoken(&p->p_token);
1188 lwp_exit(0, NULL);
1189 lwkt_reltoken(&p->p_token); /* NOT REACHED */
1194 * Kernel threads run from user processes can also accumulate deferred
1195 * actions which need to be acted upon. Callers include:
1197 * nfsd - Can allocate lots of vnodes
1199 void
1200 lwpkthreaddeferred(void)
1202 struct lwp *lp = curthread->td_lwp;
1204 if (lp) {
1205 if (lp->lwp_mpflags & LWP_MP_VNLRU) {
1206 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU);
1207 allocvnode_gc();
1212 void
1213 proc_usermap(struct proc *p, int invfork)
1215 struct sys_upmap *upmap;
1217 lwkt_gettoken(&p->p_token);
1218 upmap = kmalloc(roundup2(sizeof(*upmap), PAGE_SIZE), M_PROC,
1219 M_WAITOK | M_ZERO);
1220 if (p->p_upmap == NULL) {
1221 upmap->header[0].type = UKPTYPE_VERSION;
1222 upmap->header[0].offset = offsetof(struct sys_upmap, version);
1223 upmap->header[1].type = UPTYPE_RUNTICKS;
1224 upmap->header[1].offset = offsetof(struct sys_upmap, runticks);
1225 upmap->header[2].type = UPTYPE_FORKID;
1226 upmap->header[2].offset = offsetof(struct sys_upmap, forkid);
1227 upmap->header[3].type = UPTYPE_PID;
1228 upmap->header[3].offset = offsetof(struct sys_upmap, pid);
1229 upmap->header[4].type = UPTYPE_PROC_TITLE;
1230 upmap->header[4].offset = offsetof(struct sys_upmap,proc_title);
1231 upmap->header[5].type = UPTYPE_INVFORK;
1232 upmap->header[5].offset = offsetof(struct sys_upmap, invfork);
1234 upmap->version = UPMAP_VERSION;
1235 upmap->pid = p->p_pid;
1236 upmap->forkid = p->p_forkid;
1237 upmap->invfork = invfork;
1238 p->p_upmap = upmap;
1239 } else {
1240 kfree(upmap, M_PROC);
1242 lwkt_reltoken(&p->p_token);
1245 void
1246 proc_userunmap(struct proc *p)
1248 struct sys_upmap *upmap;
1250 lwkt_gettoken(&p->p_token);
1251 if ((upmap = p->p_upmap) != NULL) {
1252 p->p_upmap = NULL;
1253 kfree(upmap, M_PROC);
1255 lwkt_reltoken(&p->p_token);
1259 * Scan all processes on the allproc list. The process is automatically
1260 * held for the callback. A return value of -1 terminates the loop.
1261 * Zombie procs are skipped.
1263 * The callback is made with the process held and proc_token held.
1265 * We limit the scan to the number of processes as-of the start of
1266 * the scan so as not to get caught up in an endless loop if new processes
1267 * are created more quickly than we can scan the old ones. Add a little
1268 * slop to try to catch edge cases since nprocs can race.
1270 * No requirements.
1272 void
1273 allproc_scan(int (*callback)(struct proc *, void *), void *data, int segmented)
1275 int limit = nprocs + ncpus;
1276 struct proc *p;
1277 int ns;
1278 int ne;
1279 int r;
1280 int n;
1282 if (segmented) {
1283 int id = mycpu->gd_cpuid;
1284 ns = id * ALLPROC_HSIZE / ncpus;
1285 ne = (id + 1) * ALLPROC_HSIZE / ncpus;
1286 } else {
1287 ns = 0;
1288 ne = ALLPROC_HSIZE;
1292 * prg->proc_token protects the allproc list and PHOLD() prevents the
1293 * process from being removed from the allproc list or the zombproc
1294 * list.
1296 for (n = ns; n < ne; ++n) {
1297 procglob_t *prg = &procglob[n];
1298 if (LIST_FIRST(&prg->allproc) == NULL)
1299 continue;
1300 lwkt_gettoken(&prg->proc_token);
1301 LIST_FOREACH(p, &prg->allproc, p_list) {
1302 if (p->p_stat == SZOMB)
1303 continue;
1304 PHOLD(p);
1305 r = callback(p, data);
1306 PRELE(p);
1307 if (r < 0)
1308 break;
1309 if (--limit < 0)
1310 break;
1312 lwkt_reltoken(&prg->proc_token);
1315 * Check if asked to stop early
1317 if (p)
1318 break;
1323 * Scan all lwps of processes on the allproc list. The lwp is automatically
1324 * held for the callback. A return value of -1 terminates the loop.
1326 * The callback is made with the proces and lwp both held, and proc_token held.
1328 * No requirements.
1330 void
1331 alllwp_scan(int (*callback)(struct lwp *, void *), void *data, int segmented)
1333 struct proc *p;
1334 struct lwp *lp;
1335 int ns;
1336 int ne;
1337 int r = 0;
1338 int n;
1340 if (segmented) {
1341 int id = mycpu->gd_cpuid;
1342 ns = id * ALLPROC_HSIZE / ncpus;
1343 ne = (id + 1) * ALLPROC_HSIZE / ncpus;
1344 } else {
1345 ns = 0;
1346 ne = ALLPROC_HSIZE;
1349 for (n = ns; n < ne; ++n) {
1350 procglob_t *prg = &procglob[n];
1352 if (LIST_FIRST(&prg->allproc) == NULL)
1353 continue;
1354 lwkt_gettoken(&prg->proc_token);
1355 LIST_FOREACH(p, &prg->allproc, p_list) {
1356 if (p->p_stat == SZOMB)
1357 continue;
1358 PHOLD(p);
1359 lwkt_gettoken(&p->p_token);
1360 FOREACH_LWP_IN_PROC(lp, p) {
1361 LWPHOLD(lp);
1362 r = callback(lp, data);
1363 LWPRELE(lp);
1365 lwkt_reltoken(&p->p_token);
1366 PRELE(p);
1367 if (r < 0)
1368 break;
1370 lwkt_reltoken(&prg->proc_token);
1373 * Asked to exit early
1375 if (p)
1376 break;
1381 * Scan all processes on the zombproc list. The process is automatically
1382 * held for the callback. A return value of -1 terminates the loop.
1384 * No requirements.
1385 * The callback is made with the proces held and proc_token held.
1387 void
1388 zombproc_scan(int (*callback)(struct proc *, void *), void *data)
1390 struct proc *p;
1391 int r;
1392 int n;
1395 * prg->proc_token protects the allproc list and PHOLD() prevents the
1396 * process from being removed from the allproc list or the zombproc
1397 * list.
1399 for (n = 0; n < ALLPROC_HSIZE; ++n) {
1400 procglob_t *prg = &procglob[n];
1402 if (LIST_FIRST(&prg->allproc) == NULL)
1403 continue;
1404 lwkt_gettoken(&prg->proc_token);
1405 LIST_FOREACH(p, &prg->allproc, p_list) {
1406 if (p->p_stat != SZOMB)
1407 continue;
1408 PHOLD(p);
1409 r = callback(p, data);
1410 PRELE(p);
1411 if (r < 0)
1412 break;
1414 lwkt_reltoken(&prg->proc_token);
1417 * Check if asked to stop early
1419 if (p)
1420 break;
1424 #include "opt_ddb.h"
1425 #ifdef DDB
1426 #include <ddb/ddb.h>
1429 * Debugging only
1431 DB_SHOW_COMMAND(pgrpdump, pgrpdump)
1433 struct pgrp *pgrp;
1434 struct proc *p;
1435 procglob_t *prg;
1436 int i;
1438 for (i = 0; i < ALLPROC_HSIZE; ++i) {
1439 prg = &procglob[i];
1441 if (LIST_EMPTY(&prg->allpgrp))
1442 continue;
1443 kprintf("\tindx %d\n", i);
1444 LIST_FOREACH(pgrp, &prg->allpgrp, pg_list) {
1445 kprintf("\tpgrp %p, pgid %ld, sess %p, "
1446 "sesscnt %d, mem %p\n",
1447 (void *)pgrp, (long)pgrp->pg_id,
1448 (void *)pgrp->pg_session,
1449 pgrp->pg_session->s_count,
1450 (void *)LIST_FIRST(&pgrp->pg_members));
1451 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1452 kprintf("\t\tpid %ld addr %p pgrp %p\n",
1453 (long)p->p_pid, (void *)p,
1454 (void *)p->p_pgrp);
1459 #endif /* DDB */
1462 * The caller must hold proc_token.
1464 static int
1465 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags)
1467 struct kinfo_proc ki;
1468 struct lwp *lp;
1469 int skp = 0, had_output = 0;
1470 int error;
1472 bzero(&ki, sizeof(ki));
1473 lwkt_gettoken_shared(&p->p_token);
1474 fill_kinfo_proc(p, &ki);
1475 if ((flags & KERN_PROC_FLAG_LWP) == 0)
1476 skp = 1;
1477 error = 0;
1478 FOREACH_LWP_IN_PROC(lp, p) {
1479 LWPHOLD(lp);
1480 fill_kinfo_lwp(lp, &ki.kp_lwp);
1481 had_output = 1;
1482 error = SYSCTL_OUT(req, &ki, sizeof(ki));
1483 LWPRELE(lp);
1484 if (error)
1485 break;
1486 if (skp)
1487 break;
1489 lwkt_reltoken(&p->p_token);
1490 /* We need to output at least the proc, even if there is no lwp. */
1491 if (had_output == 0) {
1492 error = SYSCTL_OUT(req, &ki, sizeof(ki));
1494 return (error);
1498 * The caller must hold proc_token.
1500 static int
1501 sysctl_out_proc_kthread(struct thread *td, struct sysctl_req *req)
1503 struct kinfo_proc ki;
1504 int error;
1506 fill_kinfo_proc_kthread(td, &ki);
1507 error = SYSCTL_OUT(req, &ki, sizeof(ki));
1508 if (error)
1509 return error;
1510 return(0);
1514 * No requirements.
1516 static int
1517 sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
1519 int *name = (int *)arg1;
1520 int oid = oidp->oid_number;
1521 u_int namelen = arg2;
1522 struct proc *p;
1523 struct thread *td;
1524 struct thread *marker;
1525 int flags = 0;
1526 int error = 0;
1527 int n;
1528 int origcpu;
1529 struct ucred *cr1 = curproc->p_ucred;
1530 struct ucred *crcache = NULL;
1532 flags = oid & KERN_PROC_FLAGMASK;
1533 oid &= ~KERN_PROC_FLAGMASK;
1535 if ((oid == KERN_PROC_ALL && namelen != 0) ||
1536 (oid != KERN_PROC_ALL && namelen != 1)) {
1537 return (EINVAL);
1541 * proc_token protects the allproc list and PHOLD() prevents the
1542 * process from being removed from the allproc list or the zombproc
1543 * list.
1545 if (oid == KERN_PROC_PID) {
1546 p = pfind((pid_t)name[0]);
1547 if (p) {
1548 crcache = pcredcache(crcache, p);
1549 if (PRISON_CHECK(cr1, crcache))
1550 error = sysctl_out_proc(p, req, flags);
1551 PRELE(p);
1553 goto post_threads;
1555 p = NULL;
1557 if (!req->oldptr) {
1558 /* overestimate by 5 procs */
1559 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5);
1560 if (error)
1561 goto post_threads;
1564 for (n = 0; n < ALLPROC_HSIZE; ++n) {
1565 procglob_t *prg = &procglob[n];
1567 if (LIST_EMPTY(&prg->allproc))
1568 continue;
1569 lwkt_gettoken_shared(&prg->proc_token);
1570 LIST_FOREACH(p, &prg->allproc, p_list) {
1572 * Show a user only their processes.
1574 if (ps_showallprocs == 0) {
1575 crcache = pcredcache(crcache, p);
1576 if (crcache == NULL ||
1577 p_trespass(cr1, crcache)) {
1578 continue;
1583 * Skip embryonic processes.
1585 if (p->p_stat == SIDL)
1586 continue;
1588 * TODO - make more efficient (see notes below).
1589 * do by session.
1591 switch (oid) {
1592 case KERN_PROC_PGRP:
1593 /* could do this by traversing pgrp */
1594 if (p->p_pgrp == NULL ||
1595 p->p_pgrp->pg_id != (pid_t)name[0])
1596 continue;
1597 break;
1599 case KERN_PROC_TTY:
1600 if ((p->p_flags & P_CONTROLT) == 0 ||
1601 p->p_session == NULL ||
1602 p->p_session->s_ttyp == NULL ||
1603 dev2udev(p->p_session->s_ttyp->t_dev) !=
1604 (udev_t)name[0])
1605 continue;
1606 break;
1608 case KERN_PROC_UID:
1609 crcache = pcredcache(crcache, p);
1610 if (crcache == NULL ||
1611 crcache->cr_uid != (uid_t)name[0]) {
1612 continue;
1614 break;
1616 case KERN_PROC_RUID:
1617 crcache = pcredcache(crcache, p);
1618 if (crcache == NULL ||
1619 crcache->cr_ruid != (uid_t)name[0]) {
1620 continue;
1622 break;
1625 crcache = pcredcache(crcache, p);
1626 if (!PRISON_CHECK(cr1, crcache))
1627 continue;
1628 PHOLD(p);
1629 error = sysctl_out_proc(p, req, flags);
1630 PRELE(p);
1631 if (error) {
1632 lwkt_reltoken(&prg->proc_token);
1633 goto post_threads;
1636 lwkt_reltoken(&prg->proc_token);
1640 * Iterate over all active cpus and scan their thread list. Start
1641 * with the next logical cpu and end with our original cpu. We
1642 * migrate our own thread to each target cpu in order to safely scan
1643 * its thread list. In the last loop we migrate back to our original
1644 * cpu.
1646 origcpu = mycpu->gd_cpuid;
1647 if (!ps_showallthreads || jailed(cr1))
1648 goto post_threads;
1650 marker = kmalloc(sizeof(struct thread), M_TEMP, M_WAITOK|M_ZERO);
1651 marker->td_flags = TDF_MARKER;
1652 error = 0;
1654 for (n = 1; n <= ncpus; ++n) {
1655 globaldata_t rgd;
1656 int nid;
1658 nid = (origcpu + n) % ncpus;
1659 if (CPUMASK_TESTBIT(smp_active_mask, nid) == 0)
1660 continue;
1661 rgd = globaldata_find(nid);
1662 lwkt_setcpu_self(rgd);
1664 crit_enter();
1665 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, marker, td_allq);
1667 while ((td = TAILQ_PREV(marker, lwkt_queue, td_allq)) != NULL) {
1668 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq);
1669 TAILQ_INSERT_BEFORE(td, marker, td_allq);
1670 if (td->td_flags & TDF_MARKER)
1671 continue;
1672 if (td->td_proc)
1673 continue;
1675 lwkt_hold(td);
1676 crit_exit();
1678 switch (oid) {
1679 case KERN_PROC_PGRP:
1680 case KERN_PROC_TTY:
1681 case KERN_PROC_UID:
1682 case KERN_PROC_RUID:
1683 break;
1684 default:
1685 error = sysctl_out_proc_kthread(td, req);
1686 break;
1688 lwkt_rele(td);
1689 crit_enter();
1690 if (error)
1691 break;
1693 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq);
1694 crit_exit();
1696 if (error)
1697 break;
1701 * Userland scheduler expects us to return on the same cpu we
1702 * started on.
1704 if (mycpu->gd_cpuid != origcpu)
1705 lwkt_setcpu_self(globaldata_find(origcpu));
1707 kfree(marker, M_TEMP);
1709 post_threads:
1710 if (crcache)
1711 crfree(crcache);
1712 return (error);
1716 * This sysctl allows a process to retrieve the argument list or process
1717 * title for another process without groping around in the address space
1718 * of the other process. It also allow a process to set its own "process
1719 * title to a string of its own choice.
1721 * No requirements.
1723 static int
1724 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
1726 int *name = (int*) arg1;
1727 u_int namelen = arg2;
1728 struct proc *p;
1729 struct pargs *opa;
1730 struct pargs *pa;
1731 int error = 0;
1732 struct ucred *cr1 = curproc->p_ucred;
1734 if (namelen != 1)
1735 return (EINVAL);
1737 p = pfind((pid_t)name[0]);
1738 if (p == NULL)
1739 goto done;
1740 lwkt_gettoken(&p->p_token);
1742 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred))
1743 goto done;
1745 if (req->newptr && curproc != p) {
1746 error = EPERM;
1747 goto done;
1749 if (req->oldptr) {
1750 if (p->p_upmap != NULL && p->p_upmap->proc_title[0]) {
1752 * Args set via writable user process mmap.
1753 * We must calculate the string length manually
1754 * because the user data can change at any time.
1756 size_t n;
1757 char *base;
1759 base = p->p_upmap->proc_title;
1760 for (n = 0; n < UPMAP_MAXPROCTITLE - 1; ++n) {
1761 if (base[n] == 0)
1762 break;
1764 error = SYSCTL_OUT(req, base, n);
1765 if (error == 0)
1766 error = SYSCTL_OUT(req, "", 1);
1767 } else if ((pa = p->p_args) != NULL) {
1769 * Args set by setproctitle() sysctl.
1771 refcount_acquire(&pa->ar_ref);
1772 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length);
1773 if (refcount_release(&pa->ar_ref))
1774 kfree(pa, M_PARGS);
1777 if (req->newptr == NULL)
1778 goto done;
1780 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) {
1781 goto done;
1784 pa = kmalloc(sizeof(struct pargs) + req->newlen, M_PARGS, M_WAITOK);
1785 refcount_init(&pa->ar_ref, 1);
1786 pa->ar_length = req->newlen;
1787 error = SYSCTL_IN(req, pa->ar_args, req->newlen);
1788 if (error) {
1789 kfree(pa, M_PARGS);
1790 goto done;
1795 * Replace p_args with the new pa. p_args may have previously
1796 * been NULL.
1798 opa = p->p_args;
1799 p->p_args = pa;
1801 if (opa) {
1802 KKASSERT(opa->ar_ref > 0);
1803 if (refcount_release(&opa->ar_ref)) {
1804 kfree(opa, M_PARGS);
1805 /* opa = NULL; */
1808 done:
1809 if (p) {
1810 lwkt_reltoken(&p->p_token);
1811 PRELE(p);
1813 return (error);
1816 static int
1817 sysctl_kern_proc_cwd(SYSCTL_HANDLER_ARGS)
1819 int *name = (int*) arg1;
1820 u_int namelen = arg2;
1821 struct proc *p;
1822 int error = 0;
1823 char *fullpath, *freepath;
1824 struct ucred *cr1 = curproc->p_ucred;
1826 if (namelen != 1)
1827 return (EINVAL);
1829 p = pfind((pid_t)name[0]);
1830 if (p == NULL)
1831 goto done;
1832 lwkt_gettoken_shared(&p->p_token);
1835 * If we are not allowed to see other args, we certainly shouldn't
1836 * get the cwd either. Also check the usual trespassing.
1838 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred))
1839 goto done;
1841 if (req->oldptr && p->p_fd != NULL && p->p_fd->fd_ncdir.ncp) {
1842 struct nchandle nch;
1844 cache_copy(&p->p_fd->fd_ncdir, &nch);
1845 error = cache_fullpath(p, &nch, NULL,
1846 &fullpath, &freepath, 0);
1847 cache_drop(&nch);
1848 if (error)
1849 goto done;
1850 error = SYSCTL_OUT(req, fullpath, strlen(fullpath) + 1);
1851 kfree(freepath, M_TEMP);
1854 done:
1855 if (p) {
1856 lwkt_reltoken(&p->p_token);
1857 PRELE(p);
1859 return (error);
1863 * This sysctl allows a process to retrieve the path of the executable for
1864 * itself or another process.
1866 static int
1867 sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS)
1869 pid_t *pidp = (pid_t *)arg1;
1870 unsigned int arglen = arg2;
1871 struct proc *p;
1872 char *retbuf, *freebuf;
1873 int error = 0;
1874 struct nchandle nch;
1876 if (arglen != 1)
1877 return (EINVAL);
1878 if (*pidp == -1) { /* -1 means this process */
1879 p = curproc;
1880 } else {
1881 p = pfind(*pidp);
1882 if (p == NULL)
1883 return (ESRCH);
1886 cache_copy(&p->p_textnch, &nch);
1887 error = cache_fullpath(p, &nch, NULL, &retbuf, &freebuf, 0);
1888 cache_drop(&nch);
1889 if (error)
1890 goto done;
1891 error = SYSCTL_OUT(req, retbuf, strlen(retbuf) + 1);
1892 kfree(freebuf, M_TEMP);
1893 done:
1894 if (*pidp != -1)
1895 PRELE(p);
1897 return (error);
1900 static int
1901 sysctl_kern_proc_sigtramp(SYSCTL_HANDLER_ARGS)
1903 /*int *name = (int *)arg1;*/
1904 u_int namelen = arg2;
1905 struct kinfo_sigtramp kst;
1906 const struct sysentvec *sv;
1907 int error;
1909 if (namelen > 1)
1910 return (EINVAL);
1911 /* ignore pid if passed in (freebsd compatibility) */
1913 sv = curproc->p_sysent;
1914 bzero(&kst, sizeof(kst));
1915 if (sv->sv_szsigcode) {
1916 intptr_t sigbase;
1918 sigbase = trunc_page64((intptr_t)PS_STRINGS -
1919 *sv->sv_szsigcode);
1920 sigbase -= SZSIGCODE_EXTRA_BYTES;
1922 kst.ksigtramp_start = (void *)sigbase;
1923 kst.ksigtramp_end = (void *)(sigbase + *sv->sv_szsigcode);
1925 error = SYSCTL_OUT(req, &kst, sizeof(kst));
1927 return (error);
1930 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table");
1932 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all,
1933 CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_NOLOCK,
1934 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table");
1936 SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp,
1937 CTLFLAG_RD | CTLFLAG_NOLOCK,
1938 sysctl_kern_proc, "Process table");
1940 SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty,
1941 CTLFLAG_RD | CTLFLAG_NOLOCK,
1942 sysctl_kern_proc, "Process table");
1944 SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid,
1945 CTLFLAG_RD | CTLFLAG_NOLOCK,
1946 sysctl_kern_proc, "Process table");
1948 SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid,
1949 CTLFLAG_RD | CTLFLAG_NOLOCK,
1950 sysctl_kern_proc, "Process table");
1952 SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid,
1953 CTLFLAG_RD | CTLFLAG_NOLOCK,
1954 sysctl_kern_proc, "Process table");
1956 SYSCTL_NODE(_kern_proc, (KERN_PROC_ALL | KERN_PROC_FLAG_LWP), all_lwp,
1957 CTLFLAG_RD | CTLFLAG_NOLOCK,
1958 sysctl_kern_proc, "Process table");
1960 SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_FLAG_LWP), pgrp_lwp,
1961 CTLFLAG_RD | CTLFLAG_NOLOCK,
1962 sysctl_kern_proc, "Process table");
1964 SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_FLAG_LWP), tty_lwp,
1965 CTLFLAG_RD | CTLFLAG_NOLOCK,
1966 sysctl_kern_proc, "Process table");
1968 SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_FLAG_LWP), uid_lwp,
1969 CTLFLAG_RD | CTLFLAG_NOLOCK,
1970 sysctl_kern_proc, "Process table");
1972 SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_FLAG_LWP), ruid_lwp,
1973 CTLFLAG_RD | CTLFLAG_NOLOCK,
1974 sysctl_kern_proc, "Process table");
1976 SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_FLAG_LWP), pid_lwp,
1977 CTLFLAG_RD | CTLFLAG_NOLOCK,
1978 sysctl_kern_proc, "Process table");
1980 SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args,
1981 CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_NOLOCK,
1982 sysctl_kern_proc_args, "Process argument list");
1984 SYSCTL_NODE(_kern_proc, KERN_PROC_CWD, cwd,
1985 CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_NOLOCK,
1986 sysctl_kern_proc_cwd, "Process argument list");
1988 static SYSCTL_NODE(_kern_proc, KERN_PROC_PATHNAME, pathname,
1989 CTLFLAG_RD | CTLFLAG_NOLOCK,
1990 sysctl_kern_proc_pathname, "Process executable path");
1992 SYSCTL_PROC(_kern_proc, KERN_PROC_SIGTRAMP, sigtramp,
1993 CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_NOLOCK,
1994 0, 0, sysctl_kern_proc_sigtramp, "S,sigtramp",
1995 "Return sigtramp address range");