kernel - Make kern_proc cache-friendly
[dragonfly.git] / sys / kern / kern_proc.c
blob66886a6a8f1afa85f4e4e76bf96b10b8203792af
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/sysctl.h>
34 #include <sys/malloc.h>
35 #include <sys/proc.h>
36 #include <sys/vnode.h>
37 #include <sys/jail.h>
38 #include <sys/filedesc.h>
39 #include <sys/tty.h>
40 #include <sys/dsched.h>
41 #include <sys/signalvar.h>
42 #include <sys/spinlock.h>
43 #include <sys/random.h>
44 #include <sys/vnode.h>
45 #include <vm/vm.h>
46 #include <sys/lock.h>
47 #include <vm/pmap.h>
48 #include <vm/vm_map.h>
49 #include <sys/user.h>
50 #include <machine/smp.h>
52 #include <sys/refcount.h>
53 #include <sys/spinlock2.h>
54 #include <sys/mplock2.h>
57 * Hash table size must be a power of two and is not currently dynamically
58 * sized. There is a trade-off between the linear scans which must iterate
59 * all HSIZE elements and the number of elements which might accumulate
60 * within each hash chain.
62 #define ALLPROC_HSIZE 256
63 #define ALLPROC_HMASK (ALLPROC_HSIZE - 1)
64 #define ALLPROC_HASH(pid) (pid & ALLPROC_HMASK)
65 #define PGRP_HASH(pid) (pid & ALLPROC_HMASK)
66 #define SESS_HASH(pid) (pid & ALLPROC_HMASK)
69 * pid_doms[] management, used to control how quickly a PID can be recycled.
70 * Must be a multiple of ALLPROC_HSIZE for the proc_makepid() inner loops.
72 * WARNING! PIDDOM_DELAY should not be defined > 20 or so unless you change
73 * the array from int8_t's to int16_t's.
75 #define PIDDOM_COUNT 10 /* 10 pids per domain - reduce array size */
76 #define PIDDOM_DELAY 10 /* min 10 seconds after exit before reuse */
77 #define PIDSEL_DOMAINS (PID_MAX / PIDDOM_COUNT / ALLPROC_HSIZE * ALLPROC_HSIZE)
79 /* Used by libkvm */
80 int allproc_hsize = ALLPROC_HSIZE;
82 LIST_HEAD(pidhashhead, proc);
84 static MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
85 MALLOC_DEFINE(M_SESSION, "session", "session header");
86 MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
87 MALLOC_DEFINE(M_LWP, "lwp", "lwp structures");
88 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
90 int ps_showallprocs = 1;
91 static int ps_showallthreads = 1;
92 SYSCTL_INT(_security, OID_AUTO, ps_showallprocs, CTLFLAG_RW,
93 &ps_showallprocs, 0,
94 "Unprivileged processes can see processes with different UID/GID");
95 SYSCTL_INT(_security, OID_AUTO, ps_showallthreads, CTLFLAG_RW,
96 &ps_showallthreads, 0,
97 "Unprivileged processes can see kernel threads");
98 static u_int pid_domain_skips;
99 SYSCTL_UINT(_kern, OID_AUTO, pid_domain_skips, CTLFLAG_RW,
100 &pid_domain_skips, 0,
101 "Number of pid_doms[] skipped");
102 static u_int pid_inner_skips;
103 SYSCTL_UINT(_kern, OID_AUTO, pid_inner_skips, CTLFLAG_RW,
104 &pid_inner_skips, 0,
105 "Number of pid_doms[] skipped");
107 static void orphanpg(struct pgrp *pg);
108 static void proc_makepid(struct proc *p, int random_offset);
111 * Process related lists (for proc_token, allproc, allpgrp, and allsess)
113 typedef struct procglob procglob_t;
115 static procglob_t procglob[ALLPROC_HSIZE];
118 * We try our best to avoid recycling a PID too quickly. We do this by
119 * storing (uint8_t)time_second in the related pid domain on-reap and then
120 * using that to skip-over the domain on-allocate.
122 * This array has to be fairly large to support a high fork/exec rate.
123 * We want ~100,000 entries or so to support a 10-second reuse latency
124 * at 10,000 execs/second, worst case. Best-case multiply by PIDDOM_COUNT
125 * (approximately 100,000 execs/second).
127 static uint8_t pid_doms[PIDSEL_DOMAINS]; /* ~100,000 entries */
130 * Random component to nextpid generation. We mix in a random factor to make
131 * it a little harder to predict. We sanity check the modulus value to avoid
132 * doing it in critical paths. Don't let it be too small or we pointlessly
133 * waste randomness entropy, and don't let it be impossibly large. Using a
134 * modulus that is too big causes a LOT more process table scans and slows
135 * down fork processing as the pidchecked caching is defeated.
137 static int randompid = 0;
140 * No requirements.
142 static int
143 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
145 int error, pid;
147 pid = randompid;
148 error = sysctl_handle_int(oidp, &pid, 0, req);
149 if (error || !req->newptr)
150 return (error);
151 if (pid < 0 || pid > PID_MAX - 100) /* out of range */
152 pid = PID_MAX - 100;
153 else if (pid < 2) /* NOP */
154 pid = 0;
155 else if (pid < 100) /* Make it reasonable */
156 pid = 100;
157 randompid = pid;
158 return (error);
161 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
162 0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
165 * Initialize global process hashing structures.
167 * These functions are ONLY called from the low level boot code and do
168 * not lock their operations.
170 void
171 procinit(void)
173 u_long i;
176 * Avoid unnecessary stalls due to pid_doms[] values all being
177 * the same. Make sure that the allocation of pid 1 and pid 2
178 * succeeds.
180 for (i = 0; i < PIDSEL_DOMAINS; ++i)
181 pid_doms[i] = (int8_t)i - (int8_t)(PIDDOM_DELAY + 1);
184 * Other misc init.
186 for (i = 0; i < ALLPROC_HSIZE; ++i) {
187 procglob_t *prg = &procglob[i];
188 LIST_INIT(&prg->allproc);
189 LIST_INIT(&prg->allsess);
190 LIST_INIT(&prg->allpgrp);
191 lwkt_token_init(&prg->proc_token, "allproc");
193 uihashinit();
196 void
197 procinsertinit(struct proc *p)
199 LIST_INSERT_HEAD(&procglob[ALLPROC_HASH(p->p_pid)].allproc,
200 p, p_list);
203 void
204 pgrpinsertinit(struct pgrp *pg)
206 LIST_INSERT_HEAD(&procglob[ALLPROC_HASH(pg->pg_id)].allpgrp,
207 pg, pg_list);
210 void
211 sessinsertinit(struct session *sess)
213 LIST_INSERT_HEAD(&procglob[ALLPROC_HASH(sess->s_sid)].allsess,
214 sess, s_list);
218 * Process hold/release support functions. Called via the PHOLD(),
219 * PRELE(), and PSTALL() macros.
221 * p->p_lock is a simple hold count with a waiting interlock. No wakeup()
222 * is issued unless someone is actually waiting for the process.
224 * Most holds are short-term, allowing a process scan or other similar
225 * operation to access a proc structure without it getting ripped out from
226 * under us. procfs and process-list sysctl ops also use the hold function
227 * interlocked with various p_flags to keep the vmspace intact when reading
228 * or writing a user process's address space.
230 * There are two situations where a hold count can be longer. Exiting lwps
231 * hold the process until the lwp is reaped, and the parent will hold the
232 * child during vfork()/exec() sequences while the child is marked P_PPWAIT.
234 * The kernel waits for the hold count to drop to 0 (or 1 in some cases) at
235 * various critical points in the fork/exec and exit paths before proceeding.
237 #define PLOCK_ZOMB 0x20000000
238 #define PLOCK_WAITING 0x40000000
239 #define PLOCK_MASK 0x1FFFFFFF
241 void
242 pstall(struct proc *p, const char *wmesg, int count)
244 int o;
245 int n;
247 for (;;) {
248 o = p->p_lock;
249 cpu_ccfence();
250 if ((o & PLOCK_MASK) <= count)
251 break;
252 n = o | PLOCK_WAITING;
253 tsleep_interlock(&p->p_lock, 0);
256 * If someone is trying to single-step the process during
257 * an exec or an exit they can deadlock us because procfs
258 * sleeps with the process held.
260 if (p->p_stops) {
261 if (p->p_flags & P_INEXEC) {
262 wakeup(&p->p_stype);
263 } else if (p->p_flags & P_POSTEXIT) {
264 spin_lock(&p->p_spin);
265 p->p_stops = 0;
266 p->p_step = 0;
267 spin_unlock(&p->p_spin);
268 wakeup(&p->p_stype);
272 if (atomic_cmpset_int(&p->p_lock, o, n)) {
273 tsleep(&p->p_lock, PINTERLOCKED, wmesg, 0);
278 void
279 phold(struct proc *p)
281 atomic_add_int(&p->p_lock, 1);
285 * WARNING! On last release (p) can become instantly invalid due to
286 * MP races.
288 void
289 prele(struct proc *p)
291 int o;
292 int n;
295 * Fast path
297 if (atomic_cmpset_int(&p->p_lock, 1, 0))
298 return;
301 * Slow path
303 for (;;) {
304 o = p->p_lock;
305 KKASSERT((o & PLOCK_MASK) > 0);
306 cpu_ccfence();
307 n = (o - 1) & ~PLOCK_WAITING;
308 if (atomic_cmpset_int(&p->p_lock, o, n)) {
309 if (o & PLOCK_WAITING)
310 wakeup(&p->p_lock);
311 break;
317 * Hold and flag serialized for zombie reaping purposes.
319 * This function will fail if it has to block, returning non-zero with
320 * neither the flag set or the hold count bumped. Note that we must block
321 * without holding a ref, meaning that the caller must ensure that (p)
322 * remains valid through some other interlock (typically on its parent
323 * process's p_token).
325 * Zero is returned on success. The hold count will be incremented and
326 * the serialization flag acquired. Note that serialization is only against
327 * other pholdzomb() calls, not against phold() calls.
330 pholdzomb(struct proc *p)
332 int o;
333 int n;
336 * Fast path
338 if (atomic_cmpset_int(&p->p_lock, 0, PLOCK_ZOMB | 1))
339 return(0);
342 * Slow path
344 for (;;) {
345 o = p->p_lock;
346 cpu_ccfence();
347 if ((o & PLOCK_ZOMB) == 0) {
348 n = (o + 1) | PLOCK_ZOMB;
349 if (atomic_cmpset_int(&p->p_lock, o, n))
350 return(0);
351 } else {
352 KKASSERT((o & PLOCK_MASK) > 0);
353 n = o | PLOCK_WAITING;
354 tsleep_interlock(&p->p_lock, 0);
355 if (atomic_cmpset_int(&p->p_lock, o, n)) {
356 tsleep(&p->p_lock, PINTERLOCKED, "phldz", 0);
357 /* (p) can be ripped out at this point */
358 return(1);
365 * Release PLOCK_ZOMB and the hold count, waking up any waiters.
367 * WARNING! On last release (p) can become instantly invalid due to
368 * MP races.
370 void
371 prelezomb(struct proc *p)
373 int o;
374 int n;
377 * Fast path
379 if (atomic_cmpset_int(&p->p_lock, PLOCK_ZOMB | 1, 0))
380 return;
383 * Slow path
385 KKASSERT(p->p_lock & PLOCK_ZOMB);
386 for (;;) {
387 o = p->p_lock;
388 KKASSERT((o & PLOCK_MASK) > 0);
389 cpu_ccfence();
390 n = (o - 1) & ~(PLOCK_ZOMB | PLOCK_WAITING);
391 if (atomic_cmpset_int(&p->p_lock, o, n)) {
392 if (o & PLOCK_WAITING)
393 wakeup(&p->p_lock);
394 break;
400 * Is p an inferior of the current process?
402 * No requirements.
405 inferior(struct proc *p)
407 struct proc *p2;
409 PHOLD(p);
410 lwkt_gettoken_shared(&p->p_token);
411 while (p != curproc) {
412 if (p->p_pid == 0) {
413 lwkt_reltoken(&p->p_token);
414 return (0);
416 p2 = p->p_pptr;
417 PHOLD(p2);
418 lwkt_reltoken(&p->p_token);
419 PRELE(p);
420 lwkt_gettoken_shared(&p2->p_token);
421 p = p2;
423 lwkt_reltoken(&p->p_token);
424 PRELE(p);
426 return (1);
430 * Locate a process by number. The returned process will be referenced and
431 * must be released with PRELE().
433 * No requirements.
435 struct proc *
436 pfind(pid_t pid)
438 struct proc *p = curproc;
439 procglob_t *prg;
440 int n;
443 * Shortcut the current process
445 if (p && p->p_pid == pid) {
446 PHOLD(p);
447 return (p);
451 * Otherwise find it in the hash table.
453 n = ALLPROC_HASH(pid);
454 prg = &procglob[n];
456 lwkt_gettoken_shared(&prg->proc_token);
457 LIST_FOREACH(p, &prg->allproc, p_list) {
458 if (p->p_stat == SZOMB)
459 continue;
460 if (p->p_pid == pid) {
461 PHOLD(p);
462 lwkt_reltoken(&prg->proc_token);
463 return (p);
466 lwkt_reltoken(&prg->proc_token);
468 return (NULL);
472 * Locate a process by number. The returned process is NOT referenced.
473 * The result will not be stable and is typically only used to validate
474 * against a process that the caller has in-hand.
476 * No requirements.
478 struct proc *
479 pfindn(pid_t pid)
481 struct proc *p = curproc;
482 procglob_t *prg;
483 int n;
486 * Shortcut the current process
488 if (p && p->p_pid == pid)
489 return (p);
492 * Otherwise find it in the hash table.
494 n = ALLPROC_HASH(pid);
495 prg = &procglob[n];
497 lwkt_gettoken_shared(&prg->proc_token);
498 LIST_FOREACH(p, &prg->allproc, p_list) {
499 if (p->p_stat == SZOMB)
500 continue;
501 if (p->p_pid == pid) {
502 lwkt_reltoken(&prg->proc_token);
503 return (p);
506 lwkt_reltoken(&prg->proc_token);
508 return (NULL);
512 * Locate a process on the zombie list. Return a process or NULL.
513 * The returned process will be referenced and the caller must release
514 * it with PRELE().
516 * No other requirements.
518 struct proc *
519 zpfind(pid_t pid)
521 struct proc *p = curproc;
522 procglob_t *prg;
523 int n;
526 * Shortcut the current process
528 if (p && p->p_pid == pid) {
529 PHOLD(p);
530 return (p);
534 * Otherwise find it in the hash table.
536 n = ALLPROC_HASH(pid);
537 prg = &procglob[n];
539 lwkt_gettoken_shared(&prg->proc_token);
540 LIST_FOREACH(p, &prg->allproc, p_list) {
541 if (p->p_stat != SZOMB)
542 continue;
543 if (p->p_pid == pid) {
544 PHOLD(p);
545 lwkt_reltoken(&prg->proc_token);
546 return (p);
549 lwkt_reltoken(&prg->proc_token);
551 return (NULL);
555 void
556 pgref(struct pgrp *pgrp)
558 refcount_acquire(&pgrp->pg_refs);
561 void
562 pgrel(struct pgrp *pgrp)
564 procglob_t *prg;
565 int count;
566 int n;
568 n = PGRP_HASH(pgrp->pg_id);
569 prg = &procglob[n];
571 for (;;) {
572 count = pgrp->pg_refs;
573 cpu_ccfence();
574 KKASSERT(count > 0);
575 if (count == 1) {
576 lwkt_gettoken(&prg->proc_token);
577 if (atomic_cmpset_int(&pgrp->pg_refs, 1, 0))
578 break;
579 lwkt_reltoken(&prg->proc_token);
580 /* retry */
581 } else {
582 if (atomic_cmpset_int(&pgrp->pg_refs, count, count - 1))
583 return;
584 /* retry */
589 * Successful 1->0 transition, pghash_spin is held.
591 LIST_REMOVE(pgrp, pg_list);
592 pid_doms[pgrp->pg_id % PIDSEL_DOMAINS] = (uint8_t)time_second;
595 * Reset any sigio structures pointing to us as a result of
596 * F_SETOWN with our pgid.
598 funsetownlst(&pgrp->pg_sigiolst);
600 if (pgrp->pg_session->s_ttyp != NULL &&
601 pgrp->pg_session->s_ttyp->t_pgrp == pgrp) {
602 pgrp->pg_session->s_ttyp->t_pgrp = NULL;
604 lwkt_reltoken(&prg->proc_token);
606 sess_rele(pgrp->pg_session);
607 kfree(pgrp, M_PGRP);
611 * Locate a process group by number. The returned process group will be
612 * referenced w/pgref() and must be released with pgrel() (or assigned
613 * somewhere if you wish to keep the reference).
615 * No requirements.
617 struct pgrp *
618 pgfind(pid_t pgid)
620 struct pgrp *pgrp;
621 procglob_t *prg;
622 int n;
624 n = PGRP_HASH(pgid);
625 prg = &procglob[n];
626 lwkt_gettoken_shared(&prg->proc_token);
628 LIST_FOREACH(pgrp, &prg->allpgrp, pg_list) {
629 if (pgrp->pg_id == pgid) {
630 refcount_acquire(&pgrp->pg_refs);
631 lwkt_reltoken(&prg->proc_token);
632 return (pgrp);
635 lwkt_reltoken(&prg->proc_token);
636 return (NULL);
640 * Move p to a new or existing process group (and session)
642 * No requirements.
645 enterpgrp(struct proc *p, pid_t pgid, int mksess)
647 struct pgrp *pgrp;
648 struct pgrp *opgrp;
649 int error;
651 pgrp = pgfind(pgid);
653 KASSERT(pgrp == NULL || !mksess,
654 ("enterpgrp: setsid into non-empty pgrp"));
655 KASSERT(!SESS_LEADER(p),
656 ("enterpgrp: session leader attempted setpgrp"));
658 if (pgrp == NULL) {
659 pid_t savepid = p->p_pid;
660 struct proc *np;
661 procglob_t *prg;
662 int n;
665 * new process group
667 KASSERT(p->p_pid == pgid,
668 ("enterpgrp: new pgrp and pid != pgid"));
669 pgrp = kmalloc(sizeof(struct pgrp), M_PGRP, M_WAITOK | M_ZERO);
670 pgrp->pg_id = pgid;
671 LIST_INIT(&pgrp->pg_members);
672 pgrp->pg_jobc = 0;
673 SLIST_INIT(&pgrp->pg_sigiolst);
674 lwkt_token_init(&pgrp->pg_token, "pgrp_token");
675 refcount_init(&pgrp->pg_refs, 1);
676 lockinit(&pgrp->pg_lock, "pgwt", 0, 0);
678 n = PGRP_HASH(pgid);
679 prg = &procglob[n];
681 if ((np = pfindn(savepid)) == NULL || np != p) {
682 lwkt_reltoken(&prg->proc_token);
683 error = ESRCH;
684 kfree(pgrp, M_PGRP);
685 goto fatal;
688 lwkt_gettoken(&prg->proc_token);
689 if (mksess) {
690 struct session *sess;
693 * new session
695 sess = kmalloc(sizeof(struct session), M_SESSION,
696 M_WAITOK | M_ZERO);
697 lwkt_gettoken(&p->p_token);
698 sess->s_leader = p;
699 sess->s_sid = p->p_pid;
700 sess->s_count = 1;
701 sess->s_ttyvp = NULL;
702 sess->s_ttyp = NULL;
703 bcopy(p->p_session->s_login, sess->s_login,
704 sizeof(sess->s_login));
705 pgrp->pg_session = sess;
706 KASSERT(p == curproc,
707 ("enterpgrp: mksession and p != curproc"));
708 p->p_flags &= ~P_CONTROLT;
709 LIST_INSERT_HEAD(&prg->allsess, sess, s_list);
710 lwkt_reltoken(&p->p_token);
711 } else {
712 lwkt_gettoken(&p->p_token);
713 pgrp->pg_session = p->p_session;
714 sess_hold(pgrp->pg_session);
715 lwkt_reltoken(&p->p_token);
717 LIST_INSERT_HEAD(&prg->allpgrp, pgrp, pg_list);
719 lwkt_reltoken(&prg->proc_token);
720 } else if (pgrp == p->p_pgrp) {
721 pgrel(pgrp);
722 goto done;
723 } /* else pgfind() referenced the pgrp */
725 lwkt_gettoken(&pgrp->pg_token);
726 lwkt_gettoken(&p->p_token);
729 * Replace p->p_pgrp, handling any races that occur.
731 while ((opgrp = p->p_pgrp) != NULL) {
732 pgref(opgrp);
733 lwkt_gettoken(&opgrp->pg_token);
734 if (opgrp != p->p_pgrp) {
735 lwkt_reltoken(&opgrp->pg_token);
736 pgrel(opgrp);
737 continue;
739 LIST_REMOVE(p, p_pglist);
740 break;
742 p->p_pgrp = pgrp;
743 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
746 * Adjust eligibility of affected pgrps to participate in job control.
747 * Increment eligibility counts before decrementing, otherwise we
748 * could reach 0 spuriously during the first call.
750 fixjobc(p, pgrp, 1);
751 if (opgrp) {
752 fixjobc(p, opgrp, 0);
753 lwkt_reltoken(&opgrp->pg_token);
754 pgrel(opgrp); /* manual pgref */
755 pgrel(opgrp); /* p->p_pgrp ref */
757 lwkt_reltoken(&p->p_token);
758 lwkt_reltoken(&pgrp->pg_token);
759 done:
760 error = 0;
761 fatal:
762 return (error);
766 * Remove process from process group
768 * No requirements.
771 leavepgrp(struct proc *p)
773 struct pgrp *pg = p->p_pgrp;
775 lwkt_gettoken(&p->p_token);
776 while ((pg = p->p_pgrp) != NULL) {
777 pgref(pg);
778 lwkt_gettoken(&pg->pg_token);
779 if (p->p_pgrp != pg) {
780 lwkt_reltoken(&pg->pg_token);
781 pgrel(pg);
782 continue;
784 p->p_pgrp = NULL;
785 LIST_REMOVE(p, p_pglist);
786 lwkt_reltoken(&pg->pg_token);
787 pgrel(pg); /* manual pgref */
788 pgrel(pg); /* p->p_pgrp ref */
789 break;
791 lwkt_reltoken(&p->p_token);
793 return (0);
797 * Adjust the ref count on a session structure. When the ref count falls to
798 * zero the tty is disassociated from the session and the session structure
799 * is freed. Note that tty assocation is not itself ref-counted.
801 * No requirements.
803 void
804 sess_hold(struct session *sp)
806 atomic_add_int(&sp->s_count, 1);
810 * No requirements.
812 void
813 sess_rele(struct session *sess)
815 procglob_t *prg;
816 struct tty *tp;
817 int count;
818 int n;
820 n = SESS_HASH(sess->s_sid);
821 prg = &procglob[n];
823 for (;;) {
824 count = sess->s_count;
825 cpu_ccfence();
826 KKASSERT(count > 0);
827 if (count == 1) {
828 lwkt_gettoken(&tty_token);
829 lwkt_gettoken(&prg->proc_token);
830 if (atomic_cmpset_int(&sess->s_count, 1, 0))
831 break;
832 lwkt_reltoken(&prg->proc_token);
833 lwkt_reltoken(&tty_token);
834 /* retry */
835 } else {
836 if (atomic_cmpset_int(&sess->s_count, count, count - 1))
837 return;
838 /* retry */
843 * Successful 1->0 transition and tty_token is held.
845 LIST_REMOVE(sess, s_list);
846 pid_doms[sess->s_sid % PIDSEL_DOMAINS] = (uint8_t)time_second;
848 if (sess->s_ttyp && sess->s_ttyp->t_session) {
849 #ifdef TTY_DO_FULL_CLOSE
850 /* FULL CLOSE, see ttyclearsession() */
851 KKASSERT(sess->s_ttyp->t_session == sess);
852 sess->s_ttyp->t_session = NULL;
853 #else
854 /* HALF CLOSE, see ttyclearsession() */
855 if (sess->s_ttyp->t_session == sess)
856 sess->s_ttyp->t_session = NULL;
857 #endif
859 if ((tp = sess->s_ttyp) != NULL) {
860 sess->s_ttyp = NULL;
861 ttyunhold(tp);
863 lwkt_reltoken(&prg->proc_token);
864 lwkt_reltoken(&tty_token);
866 kfree(sess, M_SESSION);
870 * Adjust pgrp jobc counters when specified process changes process group.
871 * We count the number of processes in each process group that "qualify"
872 * the group for terminal job control (those with a parent in a different
873 * process group of the same session). If that count reaches zero, the
874 * process group becomes orphaned. Check both the specified process'
875 * process group and that of its children.
876 * entering == 0 => p is leaving specified group.
877 * entering == 1 => p is entering specified group.
879 * No requirements.
881 void
882 fixjobc(struct proc *p, struct pgrp *pgrp, int entering)
884 struct pgrp *hispgrp;
885 struct session *mysession;
886 struct proc *np;
889 * Check p's parent to see whether p qualifies its own process
890 * group; if so, adjust count for p's process group.
892 lwkt_gettoken(&p->p_token); /* p_children scan */
893 lwkt_gettoken(&pgrp->pg_token);
895 mysession = pgrp->pg_session;
896 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
897 hispgrp->pg_session == mysession) {
898 if (entering)
899 pgrp->pg_jobc++;
900 else if (--pgrp->pg_jobc == 0)
901 orphanpg(pgrp);
905 * Check this process' children to see whether they qualify
906 * their process groups; if so, adjust counts for children's
907 * process groups.
909 LIST_FOREACH(np, &p->p_children, p_sibling) {
910 PHOLD(np);
911 lwkt_gettoken(&np->p_token);
912 if ((hispgrp = np->p_pgrp) != pgrp &&
913 hispgrp->pg_session == mysession &&
914 np->p_stat != SZOMB) {
915 pgref(hispgrp);
916 lwkt_gettoken(&hispgrp->pg_token);
917 if (entering)
918 hispgrp->pg_jobc++;
919 else if (--hispgrp->pg_jobc == 0)
920 orphanpg(hispgrp);
921 lwkt_reltoken(&hispgrp->pg_token);
922 pgrel(hispgrp);
924 lwkt_reltoken(&np->p_token);
925 PRELE(np);
927 KKASSERT(pgrp->pg_refs > 0);
928 lwkt_reltoken(&pgrp->pg_token);
929 lwkt_reltoken(&p->p_token);
933 * A process group has become orphaned;
934 * if there are any stopped processes in the group,
935 * hang-up all process in that group.
937 * The caller must hold pg_token.
939 static void
940 orphanpg(struct pgrp *pg)
942 struct proc *p;
944 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
945 if (p->p_stat == SSTOP) {
946 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
947 ksignal(p, SIGHUP);
948 ksignal(p, SIGCONT);
950 return;
956 * Add a new process to the allproc list and the PID hash. This
957 * also assigns a pid to the new process.
959 * No requirements.
961 void
962 proc_add_allproc(struct proc *p)
964 int random_offset;
966 if ((random_offset = randompid) != 0) {
967 read_random(&random_offset, sizeof(random_offset));
968 random_offset = (random_offset & 0x7FFFFFFF) % randompid;
970 proc_makepid(p, random_offset);
974 * Calculate a new process pid. This function is integrated into
975 * proc_add_allproc() to guarentee that the new pid is not reused before
976 * the new process can be added to the allproc list.
978 * p_pid is assigned and the process is added to the allproc hash table
980 * WARNING! We need to allocate PIDs sequentially during early boot.
981 * In particular, init needs to have a pid of 1.
983 static
984 void
985 proc_makepid(struct proc *p, int random_offset)
987 static pid_t nextpid = 1; /* heuristic, allowed to race */
988 procglob_t *prg;
989 struct pgrp *pg;
990 struct proc *ps;
991 struct session *sess;
992 pid_t base;
993 int8_t delta8;
994 int retries;
995 int n;
998 * Select the next pid base candidate.
1000 * Check cyclement, do not allow a pid < 100.
1002 retries = 0;
1003 retry:
1004 base = atomic_fetchadd_int(&nextpid, 1) + random_offset;
1005 if (base <= 0 || base >= PID_MAX) {
1006 base = base % PID_MAX;
1007 if (base < 0)
1008 base = 100;
1009 if (base < 100)
1010 base += 100;
1011 nextpid = base; /* reset (SMP race ok) */
1015 * Do not allow a base pid to be selected from a domain that has
1016 * recently seen a pid/pgid/sessid reap. Sleep a little if we looped
1017 * through all available domains.
1019 * WARNING: We want the early pids to be allocated linearly,
1020 * particularly pid 1 and pid 2.
1022 if (++retries >= PIDSEL_DOMAINS)
1023 tsleep(&nextpid, 0, "makepid", 1);
1024 if (base >= 100) {
1025 delta8 = (int8_t)time_second -
1026 (int8_t)pid_doms[base % PIDSEL_DOMAINS];
1027 if (delta8 >= 0 && delta8 <= PIDDOM_DELAY) {
1028 ++pid_domain_skips;
1029 goto retry;
1034 * Calculate a hash index and find an unused process id within
1035 * the table, looping if we cannot find one.
1037 * The inner loop increments by ALLPROC_HSIZE which keeps the
1038 * PID at the same pid_doms[] index as well as the same hash index.
1040 n = ALLPROC_HASH(base);
1041 prg = &procglob[n];
1042 lwkt_gettoken(&prg->proc_token);
1044 restart1:
1045 LIST_FOREACH(ps, &prg->allproc, p_list) {
1046 if (ps->p_pid == base) {
1047 base += ALLPROC_HSIZE;
1048 if (base >= PID_MAX) {
1049 lwkt_reltoken(&prg->proc_token);
1050 goto retry;
1052 ++pid_inner_skips;
1053 goto restart1;
1056 LIST_FOREACH(pg, &prg->allpgrp, pg_list) {
1057 if (pg->pg_id == base) {
1058 base += ALLPROC_HSIZE;
1059 if (base >= PID_MAX) {
1060 lwkt_reltoken(&prg->proc_token);
1061 goto retry;
1063 ++pid_inner_skips;
1064 goto restart1;
1067 LIST_FOREACH(sess, &prg->allsess, s_list) {
1068 if (sess->s_sid == base) {
1069 base += ALLPROC_HSIZE;
1070 if (base >= PID_MAX) {
1071 lwkt_reltoken(&prg->proc_token);
1072 goto retry;
1074 ++pid_inner_skips;
1075 goto restart1;
1080 * Assign the pid and insert the process.
1082 p->p_pid = base;
1083 LIST_INSERT_HEAD(&prg->allproc, p, p_list);
1084 lwkt_reltoken(&prg->proc_token);
1088 * Called from exit1 to place the process into a zombie state.
1089 * The process is removed from the pid hash and p_stat is set
1090 * to SZOMB. Normal pfind[n]() calls will not find it any more.
1092 * Caller must hold p->p_token. We are required to wait until p_lock
1093 * becomes zero before we can manipulate the list, allowing allproc
1094 * scans to guarantee consistency during a list scan.
1096 void
1097 proc_move_allproc_zombie(struct proc *p)
1099 procglob_t *prg;
1100 int n;
1102 n = ALLPROC_HASH(p->p_pid);
1103 prg = &procglob[n];
1104 PSTALL(p, "reap1", 0);
1105 lwkt_gettoken(&prg->proc_token);
1107 PSTALL(p, "reap1a", 0);
1108 p->p_stat = SZOMB;
1110 lwkt_reltoken(&prg->proc_token);
1111 dsched_exit_proc(p);
1115 * This routine is called from kern_wait() and will remove the process
1116 * from the zombie list and the sibling list. This routine will block
1117 * if someone has a lock on the proces (p_lock).
1119 * Caller must hold p->p_token. We are required to wait until p_lock
1120 * becomes zero before we can manipulate the list, allowing allproc
1121 * scans to guarantee consistency during a list scan.
1123 void
1124 proc_remove_zombie(struct proc *p)
1126 procglob_t *prg;
1127 int n;
1129 n = ALLPROC_HASH(p->p_pid);
1130 prg = &procglob[n];
1132 PSTALL(p, "reap2", 0);
1133 lwkt_gettoken(&prg->proc_token);
1134 PSTALL(p, "reap2a", 0);
1135 LIST_REMOVE(p, p_list); /* from remove master list */
1136 LIST_REMOVE(p, p_sibling); /* and from sibling list */
1137 p->p_pptr = NULL;
1138 pid_doms[p->p_pid % PIDSEL_DOMAINS] = (uint8_t)time_second;
1139 lwkt_reltoken(&prg->proc_token);
1143 * Handle various requirements prior to returning to usermode. Called from
1144 * platform trap and system call code.
1146 void
1147 lwpuserret(struct lwp *lp)
1149 struct proc *p = lp->lwp_proc;
1151 if (lp->lwp_mpflags & LWP_MP_VNLRU) {
1152 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU);
1153 allocvnode_gc();
1155 if (lp->lwp_mpflags & LWP_MP_WEXIT) {
1156 lwkt_gettoken(&p->p_token);
1157 lwp_exit(0, NULL);
1158 lwkt_reltoken(&p->p_token); /* NOT REACHED */
1163 * Kernel threads run from user processes can also accumulate deferred
1164 * actions which need to be acted upon. Callers include:
1166 * nfsd - Can allocate lots of vnodes
1168 void
1169 lwpkthreaddeferred(void)
1171 struct lwp *lp = curthread->td_lwp;
1173 if (lp) {
1174 if (lp->lwp_mpflags & LWP_MP_VNLRU) {
1175 atomic_clear_int(&lp->lwp_mpflags, LWP_MP_VNLRU);
1176 allocvnode_gc();
1181 void
1182 proc_usermap(struct proc *p, int invfork)
1184 struct sys_upmap *upmap;
1186 lwkt_gettoken(&p->p_token);
1187 upmap = kmalloc(roundup2(sizeof(*upmap), PAGE_SIZE), M_PROC,
1188 M_WAITOK | M_ZERO);
1189 if (p->p_upmap == NULL) {
1190 upmap->header[0].type = UKPTYPE_VERSION;
1191 upmap->header[0].offset = offsetof(struct sys_upmap, version);
1192 upmap->header[1].type = UPTYPE_RUNTICKS;
1193 upmap->header[1].offset = offsetof(struct sys_upmap, runticks);
1194 upmap->header[2].type = UPTYPE_FORKID;
1195 upmap->header[2].offset = offsetof(struct sys_upmap, forkid);
1196 upmap->header[3].type = UPTYPE_PID;
1197 upmap->header[3].offset = offsetof(struct sys_upmap, pid);
1198 upmap->header[4].type = UPTYPE_PROC_TITLE;
1199 upmap->header[4].offset = offsetof(struct sys_upmap,proc_title);
1200 upmap->header[5].type = UPTYPE_INVFORK;
1201 upmap->header[5].offset = offsetof(struct sys_upmap, invfork);
1203 upmap->version = UPMAP_VERSION;
1204 upmap->pid = p->p_pid;
1205 upmap->forkid = p->p_forkid;
1206 upmap->invfork = invfork;
1207 p->p_upmap = upmap;
1208 } else {
1209 kfree(upmap, M_PROC);
1211 lwkt_reltoken(&p->p_token);
1214 void
1215 proc_userunmap(struct proc *p)
1217 struct sys_upmap *upmap;
1219 lwkt_gettoken(&p->p_token);
1220 if ((upmap = p->p_upmap) != NULL) {
1221 p->p_upmap = NULL;
1222 kfree(upmap, M_PROC);
1224 lwkt_reltoken(&p->p_token);
1228 * Scan all processes on the allproc list. The process is automatically
1229 * held for the callback. A return value of -1 terminates the loop.
1230 * Zombie procs are skipped.
1232 * The callback is made with the process held and proc_token held.
1234 * We limit the scan to the number of processes as-of the start of
1235 * the scan so as not to get caught up in an endless loop if new processes
1236 * are created more quickly than we can scan the old ones. Add a little
1237 * slop to try to catch edge cases since nprocs can race.
1239 * No requirements.
1241 void
1242 allproc_scan(int (*callback)(struct proc *, void *), void *data)
1244 int limit = nprocs + ncpus;
1245 struct proc *p;
1246 int r;
1247 int n;
1250 * prg->proc_token protects the allproc list and PHOLD() prevents the
1251 * process from being removed from the allproc list or the zombproc
1252 * list.
1254 for (n = 0; n < ALLPROC_HSIZE; ++n) {
1255 procglob_t *prg = &procglob[n];
1256 if (LIST_FIRST(&prg->allproc) == NULL)
1257 continue;
1258 lwkt_gettoken(&prg->proc_token);
1259 LIST_FOREACH(p, &prg->allproc, p_list) {
1260 if (p->p_stat == SZOMB)
1261 continue;
1262 PHOLD(p);
1263 r = callback(p, data);
1264 PRELE(p);
1265 if (r < 0)
1266 break;
1267 if (--limit < 0)
1268 break;
1270 lwkt_reltoken(&prg->proc_token);
1273 * Check if asked to stop early
1275 if (p)
1276 break;
1281 * Scan all lwps of processes on the allproc list. The lwp is automatically
1282 * held for the callback. A return value of -1 terminates the loop.
1284 * The callback is made with the proces and lwp both held, and proc_token held.
1286 * No requirements.
1288 void
1289 alllwp_scan(int (*callback)(struct lwp *, void *), void *data)
1291 struct proc *p;
1292 struct lwp *lp;
1293 int r = 0;
1294 int n;
1296 for (n = 0; n < ALLPROC_HSIZE; ++n) {
1297 procglob_t *prg = &procglob[n];
1299 if (LIST_FIRST(&prg->allproc) == NULL)
1300 continue;
1301 lwkt_gettoken(&prg->proc_token);
1302 LIST_FOREACH(p, &prg->allproc, p_list) {
1303 if (p->p_stat == SZOMB)
1304 continue;
1305 PHOLD(p);
1306 lwkt_gettoken(&p->p_token);
1307 FOREACH_LWP_IN_PROC(lp, p) {
1308 LWPHOLD(lp);
1309 r = callback(lp, data);
1310 LWPRELE(lp);
1312 lwkt_reltoken(&p->p_token);
1313 PRELE(p);
1314 if (r < 0)
1315 break;
1317 lwkt_reltoken(&prg->proc_token);
1320 * Asked to exit early
1322 if (p)
1323 break;
1328 * Scan all processes on the zombproc list. The process is automatically
1329 * held for the callback. A return value of -1 terminates the loop.
1331 * No requirements.
1332 * The callback is made with the proces held and proc_token held.
1334 void
1335 zombproc_scan(int (*callback)(struct proc *, void *), void *data)
1337 struct proc *p;
1338 int r;
1339 int n;
1342 * prg->proc_token protects the allproc list and PHOLD() prevents the
1343 * process from being removed from the allproc list or the zombproc
1344 * list.
1346 for (n = 0; n < ALLPROC_HSIZE; ++n) {
1347 procglob_t *prg = &procglob[n];
1349 if (LIST_FIRST(&prg->allproc) == NULL)
1350 continue;
1351 lwkt_gettoken(&prg->proc_token);
1352 LIST_FOREACH(p, &prg->allproc, p_list) {
1353 if (p->p_stat != SZOMB)
1354 continue;
1355 PHOLD(p);
1356 r = callback(p, data);
1357 PRELE(p);
1358 if (r < 0)
1359 break;
1361 lwkt_reltoken(&prg->proc_token);
1364 * Check if asked to stop early
1366 if (p)
1367 break;
1371 #include "opt_ddb.h"
1372 #ifdef DDB
1373 #include <ddb/ddb.h>
1376 * Debugging only
1378 DB_SHOW_COMMAND(pgrpdump, pgrpdump)
1380 struct pgrp *pgrp;
1381 struct proc *p;
1382 procglob_t *prg;
1383 int i;
1385 for (i = 0; i < ALLPROC_HSIZE; ++i) {
1386 prg = &procglob[i];
1388 if (LIST_EMPTY(&prg->allpgrp))
1389 continue;
1390 kprintf("\tindx %d\n", i);
1391 LIST_FOREACH(pgrp, &prg->allpgrp, pg_list) {
1392 kprintf("\tpgrp %p, pgid %ld, sess %p, "
1393 "sesscnt %d, mem %p\n",
1394 (void *)pgrp, (long)pgrp->pg_id,
1395 (void *)pgrp->pg_session,
1396 pgrp->pg_session->s_count,
1397 (void *)LIST_FIRST(&pgrp->pg_members));
1398 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1399 kprintf("\t\tpid %ld addr %p pgrp %p\n",
1400 (long)p->p_pid, (void *)p,
1401 (void *)p->p_pgrp);
1406 #endif /* DDB */
1409 * The caller must hold proc_token.
1411 static int
1412 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags)
1414 struct kinfo_proc ki;
1415 struct lwp *lp;
1416 int skp = 0, had_output = 0;
1417 int error;
1419 bzero(&ki, sizeof(ki));
1420 lwkt_gettoken_shared(&p->p_token);
1421 fill_kinfo_proc(p, &ki);
1422 if ((flags & KERN_PROC_FLAG_LWP) == 0)
1423 skp = 1;
1424 error = 0;
1425 FOREACH_LWP_IN_PROC(lp, p) {
1426 LWPHOLD(lp);
1427 fill_kinfo_lwp(lp, &ki.kp_lwp);
1428 had_output = 1;
1429 error = SYSCTL_OUT(req, &ki, sizeof(ki));
1430 LWPRELE(lp);
1431 if (error)
1432 break;
1433 if (skp)
1434 break;
1436 lwkt_reltoken(&p->p_token);
1437 /* We need to output at least the proc, even if there is no lwp. */
1438 if (had_output == 0) {
1439 error = SYSCTL_OUT(req, &ki, sizeof(ki));
1441 return (error);
1445 * The caller must hold proc_token.
1447 static int
1448 sysctl_out_proc_kthread(struct thread *td, struct sysctl_req *req)
1450 struct kinfo_proc ki;
1451 int error;
1453 fill_kinfo_proc_kthread(td, &ki);
1454 error = SYSCTL_OUT(req, &ki, sizeof(ki));
1455 if (error)
1456 return error;
1457 return(0);
1461 * No requirements.
1463 static int
1464 sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
1466 int *name = (int *)arg1;
1467 int oid = oidp->oid_number;
1468 u_int namelen = arg2;
1469 struct proc *p;
1470 struct thread *td;
1471 struct thread *marker;
1472 int flags = 0;
1473 int error = 0;
1474 int n;
1475 int origcpu;
1476 struct ucred *cr1 = curproc->p_ucred;
1478 flags = oid & KERN_PROC_FLAGMASK;
1479 oid &= ~KERN_PROC_FLAGMASK;
1481 if ((oid == KERN_PROC_ALL && namelen != 0) ||
1482 (oid != KERN_PROC_ALL && namelen != 1)) {
1483 return (EINVAL);
1487 * proc_token protects the allproc list and PHOLD() prevents the
1488 * process from being removed from the allproc list or the zombproc
1489 * list.
1491 if (oid == KERN_PROC_PID) {
1492 p = pfind((pid_t)name[0]);
1493 if (p) {
1494 if (PRISON_CHECK(cr1, p->p_ucred))
1495 error = sysctl_out_proc(p, req, flags);
1496 PRELE(p);
1498 goto post_threads;
1500 p = NULL;
1502 if (!req->oldptr) {
1503 /* overestimate by 5 procs */
1504 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5);
1505 if (error)
1506 goto post_threads;
1509 for (n = 0; n < ALLPROC_HSIZE; ++n) {
1510 procglob_t *prg = &procglob[n];
1512 if (LIST_EMPTY(&prg->allproc))
1513 continue;
1514 lwkt_gettoken_shared(&prg->proc_token);
1515 LIST_FOREACH(p, &prg->allproc, p_list) {
1517 * Show a user only their processes.
1519 if ((!ps_showallprocs) &&
1520 (p->p_ucred == NULL || p_trespass(cr1, p->p_ucred))) {
1521 continue;
1524 * Skip embryonic processes.
1526 if (p->p_stat == SIDL)
1527 continue;
1529 * TODO - make more efficient (see notes below).
1530 * do by session.
1532 switch (oid) {
1533 case KERN_PROC_PGRP:
1534 /* could do this by traversing pgrp */
1535 if (p->p_pgrp == NULL ||
1536 p->p_pgrp->pg_id != (pid_t)name[0])
1537 continue;
1538 break;
1540 case KERN_PROC_TTY:
1541 if ((p->p_flags & P_CONTROLT) == 0 ||
1542 p->p_session == NULL ||
1543 p->p_session->s_ttyp == NULL ||
1544 dev2udev(p->p_session->s_ttyp->t_dev) !=
1545 (udev_t)name[0])
1546 continue;
1547 break;
1549 case KERN_PROC_UID:
1550 if (p->p_ucred == NULL ||
1551 p->p_ucred->cr_uid != (uid_t)name[0])
1552 continue;
1553 break;
1555 case KERN_PROC_RUID:
1556 if (p->p_ucred == NULL ||
1557 p->p_ucred->cr_ruid != (uid_t)name[0])
1558 continue;
1559 break;
1562 if (!PRISON_CHECK(cr1, p->p_ucred))
1563 continue;
1564 PHOLD(p);
1565 error = sysctl_out_proc(p, req, flags);
1566 PRELE(p);
1567 if (error) {
1568 lwkt_reltoken(&prg->proc_token);
1569 goto post_threads;
1572 lwkt_reltoken(&prg->proc_token);
1576 * Iterate over all active cpus and scan their thread list. Start
1577 * with the next logical cpu and end with our original cpu. We
1578 * migrate our own thread to each target cpu in order to safely scan
1579 * its thread list. In the last loop we migrate back to our original
1580 * cpu.
1582 origcpu = mycpu->gd_cpuid;
1583 if (!ps_showallthreads || jailed(cr1))
1584 goto post_threads;
1586 marker = kmalloc(sizeof(struct thread), M_TEMP, M_WAITOK|M_ZERO);
1587 marker->td_flags = TDF_MARKER;
1588 error = 0;
1590 for (n = 1; n <= ncpus; ++n) {
1591 globaldata_t rgd;
1592 int nid;
1594 nid = (origcpu + n) % ncpus;
1595 if (CPUMASK_TESTBIT(smp_active_mask, nid) == 0)
1596 continue;
1597 rgd = globaldata_find(nid);
1598 lwkt_setcpu_self(rgd);
1600 crit_enter();
1601 TAILQ_INSERT_TAIL(&rgd->gd_tdallq, marker, td_allq);
1603 while ((td = TAILQ_PREV(marker, lwkt_queue, td_allq)) != NULL) {
1604 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq);
1605 TAILQ_INSERT_BEFORE(td, marker, td_allq);
1606 if (td->td_flags & TDF_MARKER)
1607 continue;
1608 if (td->td_proc)
1609 continue;
1611 lwkt_hold(td);
1612 crit_exit();
1614 switch (oid) {
1615 case KERN_PROC_PGRP:
1616 case KERN_PROC_TTY:
1617 case KERN_PROC_UID:
1618 case KERN_PROC_RUID:
1619 break;
1620 default:
1621 error = sysctl_out_proc_kthread(td, req);
1622 break;
1624 lwkt_rele(td);
1625 crit_enter();
1626 if (error)
1627 break;
1629 TAILQ_REMOVE(&rgd->gd_tdallq, marker, td_allq);
1630 crit_exit();
1632 if (error)
1633 break;
1637 * Userland scheduler expects us to return on the same cpu we
1638 * started on.
1640 if (mycpu->gd_cpuid != origcpu)
1641 lwkt_setcpu_self(globaldata_find(origcpu));
1643 kfree(marker, M_TEMP);
1645 post_threads:
1646 return (error);
1650 * This sysctl allows a process to retrieve the argument list or process
1651 * title for another process without groping around in the address space
1652 * of the other process. It also allow a process to set its own "process
1653 * title to a string of its own choice.
1655 * No requirements.
1657 static int
1658 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
1660 int *name = (int*) arg1;
1661 u_int namelen = arg2;
1662 struct proc *p;
1663 struct pargs *opa;
1664 struct pargs *pa;
1665 int error = 0;
1666 struct ucred *cr1 = curproc->p_ucred;
1668 if (namelen != 1)
1669 return (EINVAL);
1671 p = pfind((pid_t)name[0]);
1672 if (p == NULL)
1673 goto done;
1674 lwkt_gettoken(&p->p_token);
1676 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred))
1677 goto done;
1679 if (req->newptr && curproc != p) {
1680 error = EPERM;
1681 goto done;
1683 if (req->oldptr) {
1684 if (p->p_upmap != NULL && p->p_upmap->proc_title[0]) {
1686 * Args set via writable user process mmap.
1687 * We must calculate the string length manually
1688 * because the user data can change at any time.
1690 size_t n;
1691 char *base;
1693 base = p->p_upmap->proc_title;
1694 for (n = 0; n < UPMAP_MAXPROCTITLE - 1; ++n) {
1695 if (base[n] == 0)
1696 break;
1698 error = SYSCTL_OUT(req, base, n);
1699 if (error == 0)
1700 error = SYSCTL_OUT(req, "", 1);
1701 } else if ((pa = p->p_args) != NULL) {
1703 * Args set by setproctitle() sysctl.
1705 refcount_acquire(&pa->ar_ref);
1706 error = SYSCTL_OUT(req, pa->ar_args, pa->ar_length);
1707 if (refcount_release(&pa->ar_ref))
1708 kfree(pa, M_PARGS);
1711 if (req->newptr == NULL)
1712 goto done;
1714 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) {
1715 goto done;
1718 pa = kmalloc(sizeof(struct pargs) + req->newlen, M_PARGS, M_WAITOK);
1719 refcount_init(&pa->ar_ref, 1);
1720 pa->ar_length = req->newlen;
1721 error = SYSCTL_IN(req, pa->ar_args, req->newlen);
1722 if (error) {
1723 kfree(pa, M_PARGS);
1724 goto done;
1729 * Replace p_args with the new pa. p_args may have previously
1730 * been NULL.
1732 opa = p->p_args;
1733 p->p_args = pa;
1735 if (opa) {
1736 KKASSERT(opa->ar_ref > 0);
1737 if (refcount_release(&opa->ar_ref)) {
1738 kfree(opa, M_PARGS);
1739 /* opa = NULL; */
1742 done:
1743 if (p) {
1744 lwkt_reltoken(&p->p_token);
1745 PRELE(p);
1747 return (error);
1750 static int
1751 sysctl_kern_proc_cwd(SYSCTL_HANDLER_ARGS)
1753 int *name = (int*) arg1;
1754 u_int namelen = arg2;
1755 struct proc *p;
1756 int error = 0;
1757 char *fullpath, *freepath;
1758 struct ucred *cr1 = curproc->p_ucred;
1760 if (namelen != 1)
1761 return (EINVAL);
1763 p = pfind((pid_t)name[0]);
1764 if (p == NULL)
1765 goto done;
1766 lwkt_gettoken_shared(&p->p_token);
1769 * If we are not allowed to see other args, we certainly shouldn't
1770 * get the cwd either. Also check the usual trespassing.
1772 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred))
1773 goto done;
1775 if (req->oldptr && p->p_fd != NULL && p->p_fd->fd_ncdir.ncp) {
1776 struct nchandle nch;
1778 cache_copy(&p->p_fd->fd_ncdir, &nch);
1779 error = cache_fullpath(p, &nch, NULL,
1780 &fullpath, &freepath, 0);
1781 cache_drop(&nch);
1782 if (error)
1783 goto done;
1784 error = SYSCTL_OUT(req, fullpath, strlen(fullpath) + 1);
1785 kfree(freepath, M_TEMP);
1788 done:
1789 if (p) {
1790 lwkt_reltoken(&p->p_token);
1791 PRELE(p);
1793 return (error);
1797 * This sysctl allows a process to retrieve the path of the executable for
1798 * itself or another process.
1800 static int
1801 sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS)
1803 pid_t *pidp = (pid_t *)arg1;
1804 unsigned int arglen = arg2;
1805 struct proc *p;
1806 char *retbuf, *freebuf;
1807 int error = 0;
1808 struct nchandle nch;
1810 if (arglen != 1)
1811 return (EINVAL);
1812 if (*pidp == -1) { /* -1 means this process */
1813 p = curproc;
1814 } else {
1815 p = pfind(*pidp);
1816 if (p == NULL)
1817 return (ESRCH);
1820 cache_copy(&p->p_textnch, &nch);
1821 error = cache_fullpath(p, &nch, NULL, &retbuf, &freebuf, 0);
1822 cache_drop(&nch);
1823 if (error)
1824 goto done;
1825 error = SYSCTL_OUT(req, retbuf, strlen(retbuf) + 1);
1826 kfree(freebuf, M_TEMP);
1827 done:
1828 if (*pidp != -1)
1829 PRELE(p);
1831 return (error);
1834 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table");
1836 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT,
1837 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table");
1839 SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD,
1840 sysctl_kern_proc, "Process table");
1842 SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD,
1843 sysctl_kern_proc, "Process table");
1845 SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD,
1846 sysctl_kern_proc, "Process table");
1848 SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD,
1849 sysctl_kern_proc, "Process table");
1851 SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD,
1852 sysctl_kern_proc, "Process table");
1854 SYSCTL_NODE(_kern_proc, (KERN_PROC_ALL | KERN_PROC_FLAG_LWP), all_lwp, CTLFLAG_RD,
1855 sysctl_kern_proc, "Process table");
1857 SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_FLAG_LWP), pgrp_lwp, CTLFLAG_RD,
1858 sysctl_kern_proc, "Process table");
1860 SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_FLAG_LWP), tty_lwp, CTLFLAG_RD,
1861 sysctl_kern_proc, "Process table");
1863 SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_FLAG_LWP), uid_lwp, CTLFLAG_RD,
1864 sysctl_kern_proc, "Process table");
1866 SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_FLAG_LWP), ruid_lwp, CTLFLAG_RD,
1867 sysctl_kern_proc, "Process table");
1869 SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_FLAG_LWP), pid_lwp, CTLFLAG_RD,
1870 sysctl_kern_proc, "Process table");
1872 SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY,
1873 sysctl_kern_proc_args, "Process argument list");
1875 SYSCTL_NODE(_kern_proc, KERN_PROC_CWD, cwd, CTLFLAG_RD | CTLFLAG_ANYBODY,
1876 sysctl_kern_proc_cwd, "Process argument list");
1878 static SYSCTL_NODE(_kern_proc, KERN_PROC_PATHNAME, pathname, CTLFLAG_RD,
1879 sysctl_kern_proc_pathname, "Process executable path");