Add precautions so that threads won't race to exit1() and get stuck there.
[dragonfly.git] / sys / kern / kern_proc.c
blob8de79bdcaf59ef853abe078e5d49e1f99e7c3af3
1 /*
2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
33 * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95
34 * $FreeBSD: src/sys/kern/kern_proc.c,v 1.63.2.9 2003/05/08 07:47:16 kbyanc Exp $
35 * $DragonFly: src/sys/kern/kern_proc.c,v 1.38 2007/02/19 01:14:23 corecode Exp $
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/sysctl.h>
42 #include <sys/malloc.h>
43 #include <sys/proc.h>
44 #include <sys/jail.h>
45 #include <sys/filedesc.h>
46 #include <sys/tty.h>
47 #include <sys/signalvar.h>
48 #include <sys/spinlock.h>
49 #include <vm/vm.h>
50 #include <sys/lock.h>
51 #include <vm/pmap.h>
52 #include <vm/vm_map.h>
53 #include <sys/user.h>
54 #include <vm/vm_zone.h>
55 #include <machine/smp.h>
57 #include <sys/spinlock2.h>
59 static MALLOC_DEFINE(M_PGRP, "pgrp", "process group header");
60 MALLOC_DEFINE(M_SESSION, "session", "session header");
61 static MALLOC_DEFINE(M_PROC, "proc", "Proc structures");
62 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures");
64 int ps_showallprocs = 1;
65 static int ps_showallthreads = 1;
66 SYSCTL_INT(_kern, OID_AUTO, ps_showallprocs, CTLFLAG_RW,
67 &ps_showallprocs, 0, "");
68 SYSCTL_INT(_kern, OID_AUTO, ps_showallthreads, CTLFLAG_RW,
69 &ps_showallthreads, 0, "");
71 static void pgdelete(struct pgrp *);
72 static void orphanpg(struct pgrp *pg);
73 static pid_t proc_getnewpid_locked(int random_offset);
76 * Other process lists
78 struct pidhashhead *pidhashtbl;
79 u_long pidhash;
80 struct pgrphashhead *pgrphashtbl;
81 u_long pgrphash;
82 struct proclist allproc;
83 struct proclist zombproc;
84 struct spinlock allproc_spin;
85 vm_zone_t proc_zone;
86 vm_zone_t lwp_zone;
87 vm_zone_t thread_zone;
90 * Random component to nextpid generation. We mix in a random factor to make
91 * it a little harder to predict. We sanity check the modulus value to avoid
92 * doing it in critical paths. Don't let it be too small or we pointlessly
93 * waste randomness entropy, and don't let it be impossibly large. Using a
94 * modulus that is too big causes a LOT more process table scans and slows
95 * down fork processing as the pidchecked caching is defeated.
97 static int randompid = 0;
99 static int
100 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
102 int error, pid;
104 pid = randompid;
105 error = sysctl_handle_int(oidp, &pid, 0, req);
106 if (error || !req->newptr)
107 return (error);
108 if (pid < 0 || pid > PID_MAX - 100) /* out of range */
109 pid = PID_MAX - 100;
110 else if (pid < 2) /* NOP */
111 pid = 0;
112 else if (pid < 100) /* Make it reasonable */
113 pid = 100;
114 randompid = pid;
115 return (error);
118 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
119 0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
122 * Initialize global process hashing structures.
124 void
125 procinit(void)
127 LIST_INIT(&allproc);
128 LIST_INIT(&zombproc);
129 spin_init(&allproc_spin);
130 pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
131 pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
132 proc_zone = zinit("PROC", sizeof (struct proc), 0, 0, 5);
133 lwp_zone = zinit("LWP", sizeof (struct lwp), 0, 0, 5);
134 thread_zone = zinit("THREAD", sizeof (struct thread), 0, 0, 5);
135 uihashinit();
139 * Is p an inferior of the current process?
142 inferior(struct proc *p)
144 for (; p != curproc; p = p->p_pptr)
145 if (p->p_pid == 0)
146 return (0);
147 return (1);
151 * Locate a process by number
153 struct proc *
154 pfind(pid_t pid)
156 struct proc *p;
158 LIST_FOREACH(p, PIDHASH(pid), p_hash) {
159 if (p->p_pid == pid)
160 return (p);
162 return (NULL);
166 * Locate a process group by number
168 struct pgrp *
169 pgfind(pid_t pgid)
171 struct pgrp *pgrp;
173 LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) {
174 if (pgrp->pg_id == pgid)
175 return (pgrp);
177 return (NULL);
181 * Move p to a new or existing process group (and session)
184 enterpgrp(struct proc *p, pid_t pgid, int mksess)
186 struct pgrp *pgrp = pgfind(pgid);
188 KASSERT(pgrp == NULL || !mksess,
189 ("enterpgrp: setsid into non-empty pgrp"));
190 KASSERT(!SESS_LEADER(p),
191 ("enterpgrp: session leader attempted setpgrp"));
193 if (pgrp == NULL) {
194 pid_t savepid = p->p_pid;
195 struct proc *np;
197 * new process group
199 KASSERT(p->p_pid == pgid,
200 ("enterpgrp: new pgrp and pid != pgid"));
201 if ((np = pfind(savepid)) == NULL || np != p)
202 return (ESRCH);
203 MALLOC(pgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP,
204 M_WAITOK);
205 if (mksess) {
206 struct session *sess;
209 * new session
211 MALLOC(sess, struct session *, sizeof(struct session),
212 M_SESSION, M_WAITOK);
213 sess->s_leader = p;
214 sess->s_sid = p->p_pid;
215 sess->s_count = 1;
216 sess->s_ttyvp = NULL;
217 sess->s_ttyp = NULL;
218 bcopy(p->p_session->s_login, sess->s_login,
219 sizeof(sess->s_login));
220 p->p_flag &= ~P_CONTROLT;
221 pgrp->pg_session = sess;
222 KASSERT(p == curproc,
223 ("enterpgrp: mksession and p != curproc"));
224 } else {
225 pgrp->pg_session = p->p_session;
226 sess_hold(pgrp->pg_session);
228 pgrp->pg_id = pgid;
229 LIST_INIT(&pgrp->pg_members);
230 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash);
231 pgrp->pg_jobc = 0;
232 SLIST_INIT(&pgrp->pg_sigiolst);
233 lockinit(&pgrp->pg_lock, "pgwt", 0, 0);
234 } else if (pgrp == p->p_pgrp)
235 return (0);
238 * Adjust eligibility of affected pgrps to participate in job control.
239 * Increment eligibility counts before decrementing, otherwise we
240 * could reach 0 spuriously during the first call.
242 fixjobc(p, pgrp, 1);
243 fixjobc(p, p->p_pgrp, 0);
245 LIST_REMOVE(p, p_pglist);
246 if (LIST_EMPTY(&p->p_pgrp->pg_members))
247 pgdelete(p->p_pgrp);
248 p->p_pgrp = pgrp;
249 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
250 return (0);
254 * remove process from process group
257 leavepgrp(struct proc *p)
260 LIST_REMOVE(p, p_pglist);
261 if (LIST_EMPTY(&p->p_pgrp->pg_members))
262 pgdelete(p->p_pgrp);
263 p->p_pgrp = 0;
264 return (0);
268 * delete a process group
270 static void
271 pgdelete(struct pgrp *pgrp)
275 * Reset any sigio structures pointing to us as a result of
276 * F_SETOWN with our pgid.
278 funsetownlst(&pgrp->pg_sigiolst);
280 if (pgrp->pg_session->s_ttyp != NULL &&
281 pgrp->pg_session->s_ttyp->t_pgrp == pgrp)
282 pgrp->pg_session->s_ttyp->t_pgrp = NULL;
283 LIST_REMOVE(pgrp, pg_hash);
284 sess_rele(pgrp->pg_session);
285 kfree(pgrp, M_PGRP);
289 * Adjust the ref count on a session structure. When the ref count falls to
290 * zero the tty is disassociated from the session and the session structure
291 * is freed. Note that tty assocation is not itself ref-counted.
293 void
294 sess_hold(struct session *sp)
296 ++sp->s_count;
299 void
300 sess_rele(struct session *sp)
302 KKASSERT(sp->s_count > 0);
303 if (--sp->s_count == 0) {
304 if (sp->s_ttyp && sp->s_ttyp->t_session) {
305 #ifdef TTY_DO_FULL_CLOSE
306 /* FULL CLOSE, see ttyclearsession() */
307 KKASSERT(sp->s_ttyp->t_session == sp);
308 sp->s_ttyp->t_session = NULL;
309 #else
310 /* HALF CLOSE, see ttyclearsession() */
311 if (sp->s_ttyp->t_session == sp)
312 sp->s_ttyp->t_session = NULL;
313 #endif
315 kfree(sp, M_SESSION);
320 * Adjust pgrp jobc counters when specified process changes process group.
321 * We count the number of processes in each process group that "qualify"
322 * the group for terminal job control (those with a parent in a different
323 * process group of the same session). If that count reaches zero, the
324 * process group becomes orphaned. Check both the specified process'
325 * process group and that of its children.
326 * entering == 0 => p is leaving specified group.
327 * entering == 1 => p is entering specified group.
329 void
330 fixjobc(struct proc *p, struct pgrp *pgrp, int entering)
332 struct pgrp *hispgrp;
333 struct session *mysession = pgrp->pg_session;
336 * Check p's parent to see whether p qualifies its own process
337 * group; if so, adjust count for p's process group.
339 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp &&
340 hispgrp->pg_session == mysession) {
341 if (entering)
342 pgrp->pg_jobc++;
343 else if (--pgrp->pg_jobc == 0)
344 orphanpg(pgrp);
348 * Check this process' children to see whether they qualify
349 * their process groups; if so, adjust counts for children's
350 * process groups.
352 LIST_FOREACH(p, &p->p_children, p_sibling)
353 if ((hispgrp = p->p_pgrp) != pgrp &&
354 hispgrp->pg_session == mysession &&
355 p->p_stat != SZOMB) {
356 if (entering)
357 hispgrp->pg_jobc++;
358 else if (--hispgrp->pg_jobc == 0)
359 orphanpg(hispgrp);
364 * A process group has become orphaned;
365 * if there are any stopped processes in the group,
366 * hang-up all process in that group.
368 static void
369 orphanpg(struct pgrp *pg)
371 struct proc *p;
373 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
374 if (p->p_stat == SSTOP) {
375 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
376 ksignal(p, SIGHUP);
377 ksignal(p, SIGCONT);
379 return;
385 * Add a new process to the allproc list and the PID hash. This
386 * also assigns a pid to the new process.
388 * MPALMOSTSAFE - acquires mplock for karc4random() call
390 void
391 proc_add_allproc(struct proc *p)
393 int random_offset;
395 if ((random_offset = randompid) != 0) {
396 get_mplock();
397 random_offset = karc4random() % random_offset;
398 rel_mplock();
401 spin_lock_wr(&allproc_spin);
402 p->p_pid = proc_getnewpid_locked(random_offset);
403 LIST_INSERT_HEAD(&allproc, p, p_list);
404 LIST_INSERT_HEAD(PIDHASH(p->p_pid), p, p_hash);
405 spin_unlock_wr(&allproc_spin);
409 * Calculate a new process pid. This function is integrated into
410 * proc_add_allproc() to guarentee that the new pid is not reused before
411 * the new process can be added to the allproc list.
413 * MPSAFE - must be called with allproc_spin held.
415 static
416 pid_t
417 proc_getnewpid_locked(int random_offset)
419 static pid_t nextpid;
420 static pid_t pidchecked;
421 struct proc *p;
424 * Find an unused process ID. We remember a range of unused IDs
425 * ready to use (from nextpid+1 through pidchecked-1).
427 nextpid = nextpid + 1 + random_offset;
428 retry:
430 * If the process ID prototype has wrapped around,
431 * restart somewhat above 0, as the low-numbered procs
432 * tend to include daemons that don't exit.
434 if (nextpid >= PID_MAX) {
435 nextpid = nextpid % PID_MAX;
436 if (nextpid < 100)
437 nextpid += 100;
438 pidchecked = 0;
440 if (nextpid >= pidchecked) {
441 int doingzomb = 0;
443 pidchecked = PID_MAX;
445 * Scan the active and zombie procs to check whether this pid
446 * is in use. Remember the lowest pid that's greater
447 * than nextpid, so we can avoid checking for a while.
449 p = LIST_FIRST(&allproc);
450 again:
451 for (; p != 0; p = LIST_NEXT(p, p_list)) {
452 while (p->p_pid == nextpid ||
453 p->p_pgrp->pg_id == nextpid ||
454 p->p_session->s_sid == nextpid) {
455 nextpid++;
456 if (nextpid >= pidchecked)
457 goto retry;
459 if (p->p_pid > nextpid && pidchecked > p->p_pid)
460 pidchecked = p->p_pid;
461 if (p->p_pgrp->pg_id > nextpid &&
462 pidchecked > p->p_pgrp->pg_id)
463 pidchecked = p->p_pgrp->pg_id;
464 if (p->p_session->s_sid > nextpid &&
465 pidchecked > p->p_session->s_sid)
466 pidchecked = p->p_session->s_sid;
468 if (!doingzomb) {
469 doingzomb = 1;
470 p = LIST_FIRST(&zombproc);
471 goto again;
474 return(nextpid);
478 * Called from exit1 to remove a process from the allproc
479 * list and move it to the zombie list.
481 * MPSAFE
483 void
484 proc_move_allproc_zombie(struct proc *p)
486 spin_lock_wr(&allproc_spin);
487 while (p->p_lock) {
488 spin_unlock_wr(&allproc_spin);
489 tsleep(p, 0, "reap1", hz / 10);
490 spin_lock_wr(&allproc_spin);
492 LIST_REMOVE(p, p_list);
493 LIST_INSERT_HEAD(&zombproc, p, p_list);
494 LIST_REMOVE(p, p_hash);
495 p->p_stat = SZOMB;
496 spin_unlock_wr(&allproc_spin);
500 * This routine is called from kern_wait() and will remove the process
501 * from the zombie list and the sibling list. This routine will block
502 * if someone has a lock on the proces (p_lock).
504 * MPSAFE
506 void
507 proc_remove_zombie(struct proc *p)
509 spin_lock_wr(&allproc_spin);
510 while (p->p_lock) {
511 spin_unlock_wr(&allproc_spin);
512 tsleep(p, 0, "reap1", hz / 10);
513 spin_lock_wr(&allproc_spin);
515 LIST_REMOVE(p, p_list); /* off zombproc */
516 LIST_REMOVE(p, p_sibling);
517 spin_unlock_wr(&allproc_spin);
521 * Scan all processes on the allproc list. The process is automatically
522 * held for the callback. A return value of -1 terminates the loop.
524 * MPSAFE
526 void
527 allproc_scan(int (*callback)(struct proc *, void *), void *data)
529 struct proc *p;
530 int r;
532 spin_lock_rd(&allproc_spin);
533 LIST_FOREACH(p, &allproc, p_list) {
534 PHOLD(p);
535 spin_unlock_rd(&allproc_spin);
536 r = callback(p, data);
537 spin_lock_rd(&allproc_spin);
538 PRELE(p);
539 if (r < 0)
540 break;
542 spin_unlock_rd(&allproc_spin);
546 * Scan all lwps of processes on the allproc list. The lwp is automatically
547 * held for the callback. A return value of -1 terminates the loop.
549 * possibly not MPSAFE, needs to access foreingn proc structures
551 void
552 alllwp_scan(int (*callback)(struct lwp *, void *), void *data)
554 struct proc *p;
555 struct lwp *lp;
556 int r = 0;
558 spin_lock_rd(&allproc_spin);
559 LIST_FOREACH(p, &allproc, p_list) {
560 PHOLD(p);
561 spin_unlock_rd(&allproc_spin);
562 FOREACH_LWP_IN_PROC(lp, p) {
563 LWPHOLD(lp);
564 r = callback(lp, data);
565 LWPRELE(lp);
567 spin_lock_rd(&allproc_spin);
568 PRELE(p);
569 if (r < 0)
570 break;
572 spin_unlock_rd(&allproc_spin);
576 * Scan all processes on the zombproc list. The process is automatically
577 * held for the callback. A return value of -1 terminates the loop.
579 * MPSAFE
581 void
582 zombproc_scan(int (*callback)(struct proc *, void *), void *data)
584 struct proc *p;
585 int r;
587 spin_lock_rd(&allproc_spin);
588 LIST_FOREACH(p, &zombproc, p_list) {
589 PHOLD(p);
590 spin_unlock_rd(&allproc_spin);
591 r = callback(p, data);
592 spin_lock_rd(&allproc_spin);
593 PRELE(p);
594 if (r < 0)
595 break;
597 spin_unlock_rd(&allproc_spin);
600 #include "opt_ddb.h"
601 #ifdef DDB
602 #include <ddb/ddb.h>
604 DB_SHOW_COMMAND(pgrpdump, pgrpdump)
606 struct pgrp *pgrp;
607 struct proc *p;
608 int i;
610 for (i = 0; i <= pgrphash; i++) {
611 if (!LIST_EMPTY(&pgrphashtbl[i])) {
612 kprintf("\tindx %d\n", i);
613 LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) {
614 kprintf(
615 "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n",
616 (void *)pgrp, (long)pgrp->pg_id,
617 (void *)pgrp->pg_session,
618 pgrp->pg_session->s_count,
619 (void *)LIST_FIRST(&pgrp->pg_members));
620 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
621 kprintf("\t\tpid %ld addr %p pgrp %p\n",
622 (long)p->p_pid, (void *)p,
623 (void *)p->p_pgrp);
629 #endif /* DDB */
632 * Locate a process on the zombie list. Return a held process or NULL.
634 struct proc *
635 zpfind(pid_t pid)
637 struct proc *p;
639 LIST_FOREACH(p, &zombproc, p_list)
640 if (p->p_pid == pid)
641 return (p);
642 return (NULL);
645 static int
646 sysctl_out_proc(struct proc *p, struct sysctl_req *req, int flags)
648 struct kinfo_proc ki;
649 struct lwp *lp;
650 int skp = 1, had_output = 0;
651 int error;
653 fill_kinfo_proc(p, &ki);
654 if ((flags & KERN_PROC_FLAG_LWP) == 0)
655 skp = 1;
656 FOREACH_LWP_IN_PROC(lp, p) {
657 fill_kinfo_lwp(lp, &ki.kp_lwp);
658 output:
659 had_output = 1;
660 error = SYSCTL_OUT(req, &ki, sizeof(ki));
661 if (error)
662 return error;
663 if (skp)
664 break;
666 /* We need to output at least the proc, even if there is no lwp. */
667 if (!had_output)
668 goto output;
669 #if 0
670 if (!doingzomb && pid && (pfind(pid) != p))
671 return EAGAIN;
672 if (doingzomb && zpfind(pid) != p)
673 return EAGAIN;
674 #endif
675 return (0);
678 static int
679 sysctl_out_proc_kthread(struct thread *td, struct sysctl_req *req, int flags)
681 struct kinfo_proc ki;
682 int error;
684 fill_kinfo_proc_kthread(td, &ki);
685 error = SYSCTL_OUT(req, &ki, sizeof(ki));
686 if (error)
687 return error;
688 return(0);
691 static int
692 sysctl_kern_proc(SYSCTL_HANDLER_ARGS)
694 int *name = (int*) arg1;
695 int oid = oidp->oid_number;
696 u_int namelen = arg2;
697 struct proc *p, *np;
698 struct proclist *plist;
699 struct thread *td;
700 int doingzomb, flags = 0;
701 int error = 0;
702 int n;
703 int origcpu;
704 struct ucred *cr1 = curproc->p_ucred;
706 flags = oid & KERN_PROC_FLAGMASK;
707 oid &= ~KERN_PROC_FLAGMASK;
709 if ((oid == KERN_PROC_ALL && namelen != 0) ||
710 (oid != KERN_PROC_ALL && namelen != 1))
711 return (EINVAL);
713 if (oid == KERN_PROC_PID) {
714 p = pfind((pid_t)name[0]);
715 if (!p)
716 return (0);
717 if (!PRISON_CHECK(cr1, p->p_ucred))
718 return (0);
719 error = sysctl_out_proc(p, req, flags);
720 return (error);
723 if (!req->oldptr) {
724 /* overestimate by 5 procs */
725 error = SYSCTL_OUT(req, 0, sizeof (struct kinfo_proc) * 5);
726 if (error)
727 return (error);
729 for (doingzomb = 0; doingzomb <= 1; doingzomb++) {
730 if (doingzomb)
731 plist = &zombproc;
732 else
733 plist = &allproc;
734 LIST_FOREACH_MUTABLE(p, plist, p_list, np) {
736 * Show a user only their processes.
738 if ((!ps_showallprocs) && p_trespass(cr1, p->p_ucred))
739 continue;
741 * Skip embryonic processes.
743 if (p->p_stat == SIDL)
744 continue;
746 * TODO - make more efficient (see notes below).
747 * do by session.
749 switch (oid) {
750 case KERN_PROC_PGRP:
751 /* could do this by traversing pgrp */
752 if (p->p_pgrp == NULL ||
753 p->p_pgrp->pg_id != (pid_t)name[0])
754 continue;
755 break;
757 case KERN_PROC_TTY:
758 if ((p->p_flag & P_CONTROLT) == 0 ||
759 p->p_session == NULL ||
760 p->p_session->s_ttyp == NULL ||
761 dev2udev(p->p_session->s_ttyp->t_dev) !=
762 (udev_t)name[0])
763 continue;
764 break;
766 case KERN_PROC_UID:
767 if (p->p_ucred == NULL ||
768 p->p_ucred->cr_uid != (uid_t)name[0])
769 continue;
770 break;
772 case KERN_PROC_RUID:
773 if (p->p_ucred == NULL ||
774 p->p_ucred->cr_ruid != (uid_t)name[0])
775 continue;
776 break;
779 if (!PRISON_CHECK(cr1, p->p_ucred))
780 continue;
781 PHOLD(p);
782 error = sysctl_out_proc(p, req, flags);
783 PRELE(p);
784 if (error)
785 return (error);
790 * Iterate over all active cpus and scan their thread list. Start
791 * with the next logical cpu and end with our original cpu. We
792 * migrate our own thread to each target cpu in order to safely scan
793 * its thread list. In the last loop we migrate back to our original
794 * cpu.
796 origcpu = mycpu->gd_cpuid;
797 if (!ps_showallthreads || jailed(cr1))
798 goto post_threads;
799 for (n = 1; n <= ncpus; ++n) {
800 globaldata_t rgd;
801 int nid;
803 nid = (origcpu + n) % ncpus;
804 if ((smp_active_mask & (1 << nid)) == 0)
805 continue;
806 rgd = globaldata_find(nid);
807 lwkt_setcpu_self(rgd);
809 TAILQ_FOREACH(td, &mycpu->gd_tdallq, td_allq) {
810 if (td->td_proc)
811 continue;
812 switch (oid) {
813 case KERN_PROC_PGRP:
814 case KERN_PROC_TTY:
815 case KERN_PROC_UID:
816 case KERN_PROC_RUID:
817 continue;
818 default:
819 break;
821 lwkt_hold(td);
822 error = sysctl_out_proc_kthread(td, req, doingzomb);
823 lwkt_rele(td);
824 if (error)
825 return (error);
828 post_threads:
829 return (0);
833 * This sysctl allows a process to retrieve the argument list or process
834 * title for another process without groping around in the address space
835 * of the other process. It also allow a process to set its own "process
836 * title to a string of its own choice.
838 static int
839 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS)
841 int *name = (int*) arg1;
842 u_int namelen = arg2;
843 struct proc *p;
844 struct pargs *pa;
845 int error = 0;
846 struct ucred *cr1 = curproc->p_ucred;
848 if (namelen != 1)
849 return (EINVAL);
851 p = pfind((pid_t)name[0]);
852 if (!p)
853 return (0);
855 if ((!ps_argsopen) && p_trespass(cr1, p->p_ucred))
856 return (0);
858 if (req->newptr && curproc != p)
859 return (EPERM);
861 if (req->oldptr && p->p_args != NULL)
862 error = SYSCTL_OUT(req, p->p_args->ar_args, p->p_args->ar_length);
863 if (req->newptr == NULL)
864 return (error);
866 if (p->p_args && --p->p_args->ar_ref == 0)
867 FREE(p->p_args, M_PARGS);
868 p->p_args = NULL;
870 if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit)
871 return (error);
873 MALLOC(pa, struct pargs *, sizeof(struct pargs) + req->newlen,
874 M_PARGS, M_WAITOK);
875 pa->ar_ref = 1;
876 pa->ar_length = req->newlen;
877 error = SYSCTL_IN(req, pa->ar_args, req->newlen);
878 if (!error)
879 p->p_args = pa;
880 else
881 FREE(pa, M_PARGS);
882 return (error);
885 SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD, 0, "Process table");
887 SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLFLAG_RD|CTLTYPE_STRUCT,
888 0, 0, sysctl_kern_proc, "S,proc", "Return entire process table");
890 SYSCTL_NODE(_kern_proc, KERN_PROC_PGRP, pgrp, CTLFLAG_RD,
891 sysctl_kern_proc, "Process table");
893 SYSCTL_NODE(_kern_proc, KERN_PROC_TTY, tty, CTLFLAG_RD,
894 sysctl_kern_proc, "Process table");
896 SYSCTL_NODE(_kern_proc, KERN_PROC_UID, uid, CTLFLAG_RD,
897 sysctl_kern_proc, "Process table");
899 SYSCTL_NODE(_kern_proc, KERN_PROC_RUID, ruid, CTLFLAG_RD,
900 sysctl_kern_proc, "Process table");
902 SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD,
903 sysctl_kern_proc, "Process table");
905 SYSCTL_NODE(_kern_proc, (KERN_PROC_ALL | KERN_PROC_FLAG_LWP), all_lwp, CTLFLAG_RD,
906 sysctl_kern_proc, "Process table");
908 SYSCTL_NODE(_kern_proc, (KERN_PROC_PGRP | KERN_PROC_FLAG_LWP), pgrp_lwp, CTLFLAG_RD,
909 sysctl_kern_proc, "Process table");
911 SYSCTL_NODE(_kern_proc, (KERN_PROC_TTY | KERN_PROC_FLAG_LWP), tty_lwp, CTLFLAG_RD,
912 sysctl_kern_proc, "Process table");
914 SYSCTL_NODE(_kern_proc, (KERN_PROC_UID | KERN_PROC_FLAG_LWP), uid_lwp, CTLFLAG_RD,
915 sysctl_kern_proc, "Process table");
917 SYSCTL_NODE(_kern_proc, (KERN_PROC_RUID | KERN_PROC_FLAG_LWP), ruid_lwp, CTLFLAG_RD,
918 sysctl_kern_proc, "Process table");
920 SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_FLAG_LWP), pid_lwp, CTLFLAG_RD,
921 sysctl_kern_proc, "Process table");
923 SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY,
924 sysctl_kern_proc_args, "Process argument list");