2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/sysctl.h>
34 #include <sys/malloc.h>
36 #include <sys/vnode.h>
38 #include <sys/filedesc.h>
40 #include <sys/dsched.h>
41 #include <sys/signalvar.h>
42 #include <sys/spinlock.h>
43 #include <sys/random.h>
44 #include <sys/vnode.h>
49 #include <vm/vm_map.h>
51 #include <machine/smp.h>
53 #include <sys/refcount.h>
54 #include <sys/spinlock2.h>
57 * Hash table size must be a power of two and is not currently dynamically
58 * sized. There is a trade-off between the linear scans which must iterate
59 * all HSIZE elements and the number of elements which might accumulate
60 * within each hash chain.
62 #define ALLPROC_HSIZE 256
63 #define ALLPROC_HMASK (ALLPROC_HSIZE - 1)
64 #define ALLPROC_HASH(pid) (pid & ALLPROC_HMASK)
65 #define PGRP_HASH(pid) (pid & ALLPROC_HMASK)
66 #define SESS_HASH(pid) (pid & ALLPROC_HMASK)
69 * pid_doms[] management, used to control how quickly a PID can be recycled.
70 * Must be a multiple of ALLPROC_HSIZE for the proc_makepid() inner loops.
72 * WARNING! PIDDOM_DELAY should not be defined > 20 or so unless you change
73 * the array from int8_t's to int16_t's.
75 #define PIDDOM_COUNT 10 /* 10 pids per domain - reduce array size */
76 #define PIDDOM_DELAY 10 /* min 10 seconds after exit before reuse */
77 #define PIDDOM_SCALE 10 /* (10,000*SCALE)/sec performance guarantee */
78 #define PIDSEL_DOMAINS (PID_MAX * PIDDOM_SCALE / PIDDOM_COUNT / \
79 ALLPROC_HSIZE * ALLPROC_HSIZE)
82 int allproc_hsize
= ALLPROC_HSIZE
;
84 LIST_HEAD(pidhashhead
, proc
);
86 static MALLOC_DEFINE(M_PGRP
, "pgrp", "process group header");
87 MALLOC_DEFINE(M_SESSION
, "session", "session header");
88 MALLOC_DEFINE(M_PROC
, "proc", "Proc structures");
89 MALLOC_DEFINE(M_LWP
, "lwp", "lwp structures");
90 MALLOC_DEFINE(M_SUBPROC
, "subproc", "Proc sub-structures");
92 int ps_showallprocs
= 1;
93 static int ps_showallthreads
= 1;
94 SYSCTL_INT(_security
, OID_AUTO
, ps_showallprocs
, CTLFLAG_RW
,
96 "Unprivileged processes can see processes with different UID/GID");
97 SYSCTL_INT(_security
, OID_AUTO
, ps_showallthreads
, CTLFLAG_RW
,
98 &ps_showallthreads
, 0,
99 "Unprivileged processes can see kernel threads");
100 static u_int pid_domain_skips
;
101 SYSCTL_UINT(_kern
, OID_AUTO
, pid_domain_skips
, CTLFLAG_RW
,
102 &pid_domain_skips
, 0,
103 "Number of pid_doms[] skipped");
104 static u_int pid_inner_skips
;
105 SYSCTL_UINT(_kern
, OID_AUTO
, pid_inner_skips
, CTLFLAG_RW
,
107 "Number of pid_doms[] skipped");
109 static void orphanpg(struct pgrp
*pg
);
110 static void proc_makepid(struct proc
*p
, int random_offset
);
113 * Process related lists (for proc_token, allproc, allpgrp, and allsess)
115 typedef struct procglob procglob_t
;
117 static procglob_t procglob
[ALLPROC_HSIZE
];
120 * We try our best to avoid recycling a PID too quickly. We do this by
121 * storing (uint8_t)time_second in the related pid domain on-reap and then
122 * using that to skip-over the domain on-allocate.
124 * This array has to be fairly large to support a high fork/exec rate.
125 * A ~100,000 entry array will support a 10-second reuse latency at
126 * 10,000 execs/second, worst case. Best-case multiply by PIDDOM_COUNT
127 * (approximately 100,000 execs/second).
129 * Currently we allocate around a megabyte, making the worst-case fork
130 * rate around 100,000/second.
132 static uint8_t *pid_doms
;
135 * Random component to nextpid generation. We mix in a random factor to make
136 * it a little harder to predict. We sanity check the modulus value to avoid
137 * doing it in critical paths. Don't let it be too small or we pointlessly
138 * waste randomness entropy, and don't let it be impossibly large. Using a
139 * modulus that is too big causes a LOT more process table scans and slows
140 * down fork processing as the pidchecked caching is defeated.
142 static int randompid
= 0;
146 pcredcache(struct ucred
*cr
, struct proc
*p
)
148 if (cr
!= p
->p_ucred
) {
151 spin_lock(&p
->p_spin
);
152 if ((cr
= p
->p_ucred
) != NULL
)
154 spin_unlock(&p
->p_spin
);
163 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS
)
168 error
= sysctl_handle_int(oidp
, &pid
, 0, req
);
169 if (error
|| !req
->newptr
)
171 if (pid
< 0 || pid
> PID_MAX
- 100) /* out of range */
173 else if (pid
< 2) /* NOP */
175 else if (pid
< 100) /* Make it reasonable */
181 SYSCTL_PROC(_kern
, OID_AUTO
, randompid
, CTLTYPE_INT
|CTLFLAG_RW
,
182 0, 0, sysctl_kern_randompid
, "I", "Random PID modulus");
185 * Initialize global process hashing structures.
187 * These functions are ONLY called from the low level boot code and do
188 * not lock their operations.
196 * Allocate dynamically. This array can be large (~1MB) so don't
197 * waste boot loader space.
199 pid_doms
= kmalloc(sizeof(pid_doms
[0]) * PIDSEL_DOMAINS
,
200 M_PROC
, M_WAITOK
| M_ZERO
);
203 * Avoid unnecessary stalls due to pid_doms[] values all being
204 * the same. Make sure that the allocation of pid 1 and pid 2
207 for (i
= 0; i
< PIDSEL_DOMAINS
; ++i
)
208 pid_doms
[i
] = (int8_t)i
- (int8_t)(PIDDOM_DELAY
+ 1);
213 for (i
= 0; i
< ALLPROC_HSIZE
; ++i
) {
214 procglob_t
*prg
= &procglob
[i
];
215 LIST_INIT(&prg
->allproc
);
216 LIST_INIT(&prg
->allsess
);
217 LIST_INIT(&prg
->allpgrp
);
218 lwkt_token_init(&prg
->proc_token
, "allproc");
224 procinsertinit(struct proc
*p
)
226 LIST_INSERT_HEAD(&procglob
[ALLPROC_HASH(p
->p_pid
)].allproc
,
231 pgrpinsertinit(struct pgrp
*pg
)
233 LIST_INSERT_HEAD(&procglob
[ALLPROC_HASH(pg
->pg_id
)].allpgrp
,
238 sessinsertinit(struct session
*sess
)
240 LIST_INSERT_HEAD(&procglob
[ALLPROC_HASH(sess
->s_sid
)].allsess
,
245 * Process hold/release support functions. Called via the PHOLD(),
246 * PRELE(), and PSTALL() macros.
248 * p->p_lock is a simple hold count with a waiting interlock. No wakeup()
249 * is issued unless someone is actually waiting for the process.
251 * Most holds are short-term, allowing a process scan or other similar
252 * operation to access a proc structure without it getting ripped out from
253 * under us. procfs and process-list sysctl ops also use the hold function
254 * interlocked with various p_flags to keep the vmspace intact when reading
255 * or writing a user process's address space.
257 * There are two situations where a hold count can be longer. Exiting lwps
258 * hold the process until the lwp is reaped, and the parent will hold the
259 * child during vfork()/exec() sequences while the child is marked P_PPWAIT.
261 * The kernel waits for the hold count to drop to 0 (or 1 in some cases) at
262 * various critical points in the fork/exec and exit paths before proceeding.
264 #define PLOCK_ZOMB 0x20000000
265 #define PLOCK_WAITING 0x40000000
266 #define PLOCK_MASK 0x1FFFFFFF
269 pstall(struct proc
*p
, const char *wmesg
, int count
)
277 if ((o
& PLOCK_MASK
) <= count
)
279 n
= o
| PLOCK_WAITING
;
280 tsleep_interlock(&p
->p_lock
, 0);
283 * If someone is trying to single-step the process during
284 * an exec or an exit they can deadlock us because procfs
285 * sleeps with the process held.
288 if (p
->p_flags
& P_INEXEC
) {
290 } else if (p
->p_flags
& P_POSTEXIT
) {
291 spin_lock(&p
->p_spin
);
294 spin_unlock(&p
->p_spin
);
299 if (atomic_cmpset_int(&p
->p_lock
, o
, n
)) {
300 tsleep(&p
->p_lock
, PINTERLOCKED
, wmesg
, 0);
306 phold(struct proc
*p
)
308 atomic_add_int(&p
->p_lock
, 1);
312 * WARNING! On last release (p) can become instantly invalid due to
316 prele(struct proc
*p
)
324 if (atomic_cmpset_int(&p
->p_lock
, 1, 0))
332 KKASSERT((o
& PLOCK_MASK
) > 0);
334 n
= (o
- 1) & ~PLOCK_WAITING
;
335 if (atomic_cmpset_int(&p
->p_lock
, o
, n
)) {
336 if (o
& PLOCK_WAITING
)
344 * Hold and flag serialized for zombie reaping purposes.
346 * This function will fail if it has to block, returning non-zero with
347 * neither the flag set or the hold count bumped. Note that we must block
348 * without holding a ref, meaning that the caller must ensure that (p)
349 * remains valid through some other interlock (typically on its parent
350 * process's p_token).
352 * Zero is returned on success. The hold count will be incremented and
353 * the serialization flag acquired. Note that serialization is only against
354 * other pholdzomb() calls, not against phold() calls.
357 pholdzomb(struct proc
*p
)
365 if (atomic_cmpset_int(&p
->p_lock
, 0, PLOCK_ZOMB
| 1))
374 if ((o
& PLOCK_ZOMB
) == 0) {
375 n
= (o
+ 1) | PLOCK_ZOMB
;
376 if (atomic_cmpset_int(&p
->p_lock
, o
, n
))
379 KKASSERT((o
& PLOCK_MASK
) > 0);
380 n
= o
| PLOCK_WAITING
;
381 tsleep_interlock(&p
->p_lock
, 0);
382 if (atomic_cmpset_int(&p
->p_lock
, o
, n
)) {
383 tsleep(&p
->p_lock
, PINTERLOCKED
, "phldz", 0);
384 /* (p) can be ripped out at this point */
392 * Release PLOCK_ZOMB and the hold count, waking up any waiters.
394 * WARNING! On last release (p) can become instantly invalid due to
398 prelezomb(struct proc
*p
)
406 if (atomic_cmpset_int(&p
->p_lock
, PLOCK_ZOMB
| 1, 0))
412 KKASSERT(p
->p_lock
& PLOCK_ZOMB
);
415 KKASSERT((o
& PLOCK_MASK
) > 0);
417 n
= (o
- 1) & ~(PLOCK_ZOMB
| PLOCK_WAITING
);
418 if (atomic_cmpset_int(&p
->p_lock
, o
, n
)) {
419 if (o
& PLOCK_WAITING
)
427 * Is p an inferior of the current process?
432 inferior(struct proc
*p
)
437 lwkt_gettoken_shared(&p
->p_token
);
438 while (p
!= curproc
) {
440 lwkt_reltoken(&p
->p_token
);
445 lwkt_reltoken(&p
->p_token
);
447 lwkt_gettoken_shared(&p2
->p_token
);
450 lwkt_reltoken(&p
->p_token
);
457 * Locate a process by number. The returned process will be referenced and
458 * must be released with PRELE().
465 struct proc
*p
= curproc
;
470 * Shortcut the current process
472 if (p
&& p
->p_pid
== pid
) {
478 * Otherwise find it in the hash table.
480 n
= ALLPROC_HASH(pid
);
483 lwkt_gettoken_shared(&prg
->proc_token
);
484 LIST_FOREACH(p
, &prg
->allproc
, p_list
) {
485 if (p
->p_stat
== SZOMB
)
487 if (p
->p_pid
== pid
) {
489 lwkt_reltoken(&prg
->proc_token
);
493 lwkt_reltoken(&prg
->proc_token
);
499 * Locate a process by number. The returned process is NOT referenced.
500 * The result will not be stable and is typically only used to validate
501 * against a process that the caller has in-hand.
508 struct proc
*p
= curproc
;
513 * Shortcut the current process
515 if (p
&& p
->p_pid
== pid
)
519 * Otherwise find it in the hash table.
521 n
= ALLPROC_HASH(pid
);
524 lwkt_gettoken_shared(&prg
->proc_token
);
525 LIST_FOREACH(p
, &prg
->allproc
, p_list
) {
526 if (p
->p_stat
== SZOMB
)
528 if (p
->p_pid
== pid
) {
529 lwkt_reltoken(&prg
->proc_token
);
533 lwkt_reltoken(&prg
->proc_token
);
539 * Locate a process on the zombie list. Return a process or NULL.
540 * The returned process will be referenced and the caller must release
543 * No other requirements.
548 struct proc
*p
= curproc
;
553 * Shortcut the current process
555 if (p
&& p
->p_pid
== pid
) {
561 * Otherwise find it in the hash table.
563 n
= ALLPROC_HASH(pid
);
566 lwkt_gettoken_shared(&prg
->proc_token
);
567 LIST_FOREACH(p
, &prg
->allproc
, p_list
) {
568 if (p
->p_stat
!= SZOMB
)
570 if (p
->p_pid
== pid
) {
572 lwkt_reltoken(&prg
->proc_token
);
576 lwkt_reltoken(&prg
->proc_token
);
583 pgref(struct pgrp
*pgrp
)
585 refcount_acquire(&pgrp
->pg_refs
);
589 pgrel(struct pgrp
*pgrp
)
595 n
= PGRP_HASH(pgrp
->pg_id
);
599 count
= pgrp
->pg_refs
;
603 lwkt_gettoken(&prg
->proc_token
);
604 if (atomic_cmpset_int(&pgrp
->pg_refs
, 1, 0))
606 lwkt_reltoken(&prg
->proc_token
);
609 if (atomic_cmpset_int(&pgrp
->pg_refs
, count
, count
- 1))
616 * Successful 1->0 transition, pghash_spin is held.
618 LIST_REMOVE(pgrp
, pg_list
);
619 if (pid_doms
[pgrp
->pg_id
% PIDSEL_DOMAINS
] != (uint8_t)time_second
)
620 pid_doms
[pgrp
->pg_id
% PIDSEL_DOMAINS
] = (uint8_t)time_second
;
623 * Reset any sigio structures pointing to us as a result of
624 * F_SETOWN with our pgid.
626 funsetownlst(&pgrp
->pg_sigiolst
);
628 if (pgrp
->pg_session
->s_ttyp
!= NULL
&&
629 pgrp
->pg_session
->s_ttyp
->t_pgrp
== pgrp
) {
630 pgrp
->pg_session
->s_ttyp
->t_pgrp
= NULL
;
632 lwkt_reltoken(&prg
->proc_token
);
634 sess_rele(pgrp
->pg_session
);
639 * Locate a process group by number. The returned process group will be
640 * referenced w/pgref() and must be released with pgrel() (or assigned
641 * somewhere if you wish to keep the reference).
654 lwkt_gettoken_shared(&prg
->proc_token
);
656 LIST_FOREACH(pgrp
, &prg
->allpgrp
, pg_list
) {
657 if (pgrp
->pg_id
== pgid
) {
658 refcount_acquire(&pgrp
->pg_refs
);
659 lwkt_reltoken(&prg
->proc_token
);
663 lwkt_reltoken(&prg
->proc_token
);
668 * Move p to a new or existing process group (and session)
673 enterpgrp(struct proc
*p
, pid_t pgid
, int mksess
)
681 KASSERT(pgrp
== NULL
|| !mksess
,
682 ("enterpgrp: setsid into non-empty pgrp"));
683 KASSERT(!SESS_LEADER(p
),
684 ("enterpgrp: session leader attempted setpgrp"));
687 pid_t savepid
= p
->p_pid
;
695 KASSERT(p
->p_pid
== pgid
,
696 ("enterpgrp: new pgrp and pid != pgid"));
697 pgrp
= kmalloc(sizeof(struct pgrp
), M_PGRP
, M_WAITOK
| M_ZERO
);
699 LIST_INIT(&pgrp
->pg_members
);
701 SLIST_INIT(&pgrp
->pg_sigiolst
);
702 lwkt_token_init(&pgrp
->pg_token
, "pgrp_token");
703 refcount_init(&pgrp
->pg_refs
, 1);
704 lockinit(&pgrp
->pg_lock
, "pgwt", 0, 0);
709 if ((np
= pfindn(savepid
)) == NULL
|| np
!= p
) {
710 lwkt_reltoken(&prg
->proc_token
);
716 lwkt_gettoken(&prg
->proc_token
);
718 struct session
*sess
;
723 sess
= kmalloc(sizeof(struct session
), M_SESSION
,
725 lwkt_gettoken(&p
->p_token
);
727 sess
->s_sid
= p
->p_pid
;
729 sess
->s_ttyvp
= NULL
;
731 bcopy(p
->p_session
->s_login
, sess
->s_login
,
732 sizeof(sess
->s_login
));
733 pgrp
->pg_session
= sess
;
734 KASSERT(p
== curproc
,
735 ("enterpgrp: mksession and p != curproc"));
736 p
->p_flags
&= ~P_CONTROLT
;
737 LIST_INSERT_HEAD(&prg
->allsess
, sess
, s_list
);
738 lwkt_reltoken(&p
->p_token
);
740 lwkt_gettoken(&p
->p_token
);
741 pgrp
->pg_session
= p
->p_session
;
742 sess_hold(pgrp
->pg_session
);
743 lwkt_reltoken(&p
->p_token
);
745 LIST_INSERT_HEAD(&prg
->allpgrp
, pgrp
, pg_list
);
747 lwkt_reltoken(&prg
->proc_token
);
748 } else if (pgrp
== p
->p_pgrp
) {
751 } /* else pgfind() referenced the pgrp */
753 lwkt_gettoken(&pgrp
->pg_token
);
754 lwkt_gettoken(&p
->p_token
);
757 * Replace p->p_pgrp, handling any races that occur.
759 while ((opgrp
= p
->p_pgrp
) != NULL
) {
761 lwkt_gettoken(&opgrp
->pg_token
);
762 if (opgrp
!= p
->p_pgrp
) {
763 lwkt_reltoken(&opgrp
->pg_token
);
767 LIST_REMOVE(p
, p_pglist
);
771 LIST_INSERT_HEAD(&pgrp
->pg_members
, p
, p_pglist
);
774 * Adjust eligibility of affected pgrps to participate in job control.
775 * Increment eligibility counts before decrementing, otherwise we
776 * could reach 0 spuriously during the first call.
780 fixjobc(p
, opgrp
, 0);
781 lwkt_reltoken(&opgrp
->pg_token
);
782 pgrel(opgrp
); /* manual pgref */
783 pgrel(opgrp
); /* p->p_pgrp ref */
785 lwkt_reltoken(&p
->p_token
);
786 lwkt_reltoken(&pgrp
->pg_token
);
794 * Remove process from process group
799 leavepgrp(struct proc
*p
)
801 struct pgrp
*pg
= p
->p_pgrp
;
803 lwkt_gettoken(&p
->p_token
);
804 while ((pg
= p
->p_pgrp
) != NULL
) {
806 lwkt_gettoken(&pg
->pg_token
);
807 if (p
->p_pgrp
!= pg
) {
808 lwkt_reltoken(&pg
->pg_token
);
813 LIST_REMOVE(p
, p_pglist
);
814 lwkt_reltoken(&pg
->pg_token
);
815 pgrel(pg
); /* manual pgref */
816 pgrel(pg
); /* p->p_pgrp ref */
819 lwkt_reltoken(&p
->p_token
);
825 * Adjust the ref count on a session structure. When the ref count falls to
826 * zero the tty is disassociated from the session and the session structure
827 * is freed. Note that tty assocation is not itself ref-counted.
832 sess_hold(struct session
*sp
)
834 atomic_add_int(&sp
->s_count
, 1);
841 sess_rele(struct session
*sess
)
848 n
= SESS_HASH(sess
->s_sid
);
852 count
= sess
->s_count
;
856 lwkt_gettoken(&tty_token
);
857 lwkt_gettoken(&prg
->proc_token
);
858 if (atomic_cmpset_int(&sess
->s_count
, 1, 0))
860 lwkt_reltoken(&prg
->proc_token
);
861 lwkt_reltoken(&tty_token
);
864 if (atomic_cmpset_int(&sess
->s_count
, count
, count
- 1))
871 * Successful 1->0 transition and tty_token is held.
873 LIST_REMOVE(sess
, s_list
);
874 if (pid_doms
[sess
->s_sid
% PIDSEL_DOMAINS
] != (uint8_t)time_second
)
875 pid_doms
[sess
->s_sid
% PIDSEL_DOMAINS
] = (uint8_t)time_second
;
877 if (sess
->s_ttyp
&& sess
->s_ttyp
->t_session
) {
878 #ifdef TTY_DO_FULL_CLOSE
879 /* FULL CLOSE, see ttyclearsession() */
880 KKASSERT(sess
->s_ttyp
->t_session
== sess
);
881 sess
->s_ttyp
->t_session
= NULL
;
883 /* HALF CLOSE, see ttyclearsession() */
884 if (sess
->s_ttyp
->t_session
== sess
)
885 sess
->s_ttyp
->t_session
= NULL
;
888 if ((tp
= sess
->s_ttyp
) != NULL
) {
892 lwkt_reltoken(&prg
->proc_token
);
893 lwkt_reltoken(&tty_token
);
895 kfree(sess
, M_SESSION
);
899 * Adjust pgrp jobc counters when specified process changes process group.
900 * We count the number of processes in each process group that "qualify"
901 * the group for terminal job control (those with a parent in a different
902 * process group of the same session). If that count reaches zero, the
903 * process group becomes orphaned. Check both the specified process'
904 * process group and that of its children.
905 * entering == 0 => p is leaving specified group.
906 * entering == 1 => p is entering specified group.
911 fixjobc(struct proc
*p
, struct pgrp
*pgrp
, int entering
)
913 struct pgrp
*hispgrp
;
914 struct session
*mysession
;
918 * Check p's parent to see whether p qualifies its own process
919 * group; if so, adjust count for p's process group.
921 lwkt_gettoken(&p
->p_token
); /* p_children scan */
922 lwkt_gettoken(&pgrp
->pg_token
);
924 mysession
= pgrp
->pg_session
;
925 if ((hispgrp
= p
->p_pptr
->p_pgrp
) != pgrp
&&
926 hispgrp
->pg_session
== mysession
) {
929 else if (--pgrp
->pg_jobc
== 0)
934 * Check this process' children to see whether they qualify
935 * their process groups; if so, adjust counts for children's
938 LIST_FOREACH(np
, &p
->p_children
, p_sibling
) {
940 lwkt_gettoken(&np
->p_token
);
941 if ((hispgrp
= np
->p_pgrp
) != pgrp
&&
942 hispgrp
->pg_session
== mysession
&&
943 np
->p_stat
!= SZOMB
) {
945 lwkt_gettoken(&hispgrp
->pg_token
);
948 else if (--hispgrp
->pg_jobc
== 0)
950 lwkt_reltoken(&hispgrp
->pg_token
);
953 lwkt_reltoken(&np
->p_token
);
956 KKASSERT(pgrp
->pg_refs
> 0);
957 lwkt_reltoken(&pgrp
->pg_token
);
958 lwkt_reltoken(&p
->p_token
);
962 * A process group has become orphaned;
963 * if there are any stopped processes in the group,
964 * hang-up all process in that group.
966 * The caller must hold pg_token.
969 orphanpg(struct pgrp
*pg
)
973 LIST_FOREACH(p
, &pg
->pg_members
, p_pglist
) {
974 if (p
->p_stat
== SSTOP
) {
975 LIST_FOREACH(p
, &pg
->pg_members
, p_pglist
) {
985 * Add a new process to the allproc list and the PID hash. This
986 * also assigns a pid to the new process.
991 proc_add_allproc(struct proc
*p
)
995 if ((random_offset
= randompid
) != 0) {
996 read_random(&random_offset
, sizeof(random_offset
));
997 random_offset
= (random_offset
& 0x7FFFFFFF) % randompid
;
999 proc_makepid(p
, random_offset
);
1003 * Calculate a new process pid. This function is integrated into
1004 * proc_add_allproc() to guarentee that the new pid is not reused before
1005 * the new process can be added to the allproc list.
1007 * p_pid is assigned and the process is added to the allproc hash table
1009 * WARNING! We need to allocate PIDs sequentially during early boot.
1010 * In particular, init needs to have a pid of 1.
1014 proc_makepid(struct proc
*p
, int random_offset
)
1016 static pid_t nextpid
= 1; /* heuristic, allowed to race */
1020 struct session
*sess
;
1027 * Select the next pid base candidate.
1029 * Check cyclement, do not allow a pid < 100.
1033 base
= atomic_fetchadd_int(&nextpid
, 1) + random_offset
;
1034 if (base
<= 0 || base
>= PID_MAX
) {
1035 base
= base
% PID_MAX
;
1040 nextpid
= base
; /* reset (SMP race ok) */
1044 * Do not allow a base pid to be selected from a domain that has
1045 * recently seen a pid/pgid/sessid reap. Sleep a little if we looped
1046 * through all available domains.
1048 * WARNING: We want the early pids to be allocated linearly,
1049 * particularly pid 1 and pid 2.
1051 if (++retries
>= PIDSEL_DOMAINS
)
1052 tsleep(&nextpid
, 0, "makepid", 1);
1054 delta8
= (int8_t)time_second
-
1055 (int8_t)pid_doms
[base
% PIDSEL_DOMAINS
];
1056 if (delta8
>= 0 && delta8
<= PIDDOM_DELAY
) {
1063 * Calculate a hash index and find an unused process id within
1064 * the table, looping if we cannot find one.
1066 * The inner loop increments by ALLPROC_HSIZE which keeps the
1067 * PID at the same pid_doms[] index as well as the same hash index.
1069 n
= ALLPROC_HASH(base
);
1071 lwkt_gettoken(&prg
->proc_token
);
1074 LIST_FOREACH(ps
, &prg
->allproc
, p_list
) {
1075 if (ps
->p_pid
== base
) {
1076 base
+= ALLPROC_HSIZE
;
1077 if (base
>= PID_MAX
) {
1078 lwkt_reltoken(&prg
->proc_token
);
1085 LIST_FOREACH(pg
, &prg
->allpgrp
, pg_list
) {
1086 if (pg
->pg_id
== base
) {
1087 base
+= ALLPROC_HSIZE
;
1088 if (base
>= PID_MAX
) {
1089 lwkt_reltoken(&prg
->proc_token
);
1096 LIST_FOREACH(sess
, &prg
->allsess
, s_list
) {
1097 if (sess
->s_sid
== base
) {
1098 base
+= ALLPROC_HSIZE
;
1099 if (base
>= PID_MAX
) {
1100 lwkt_reltoken(&prg
->proc_token
);
1109 * Assign the pid and insert the process.
1112 LIST_INSERT_HEAD(&prg
->allproc
, p
, p_list
);
1113 lwkt_reltoken(&prg
->proc_token
);
1117 * Called from exit1 to place the process into a zombie state.
1118 * The process is removed from the pid hash and p_stat is set
1119 * to SZOMB. Normal pfind[n]() calls will not find it any more.
1121 * Caller must hold p->p_token. We are required to wait until p_lock
1122 * becomes zero before we can manipulate the list, allowing allproc
1123 * scans to guarantee consistency during a list scan.
1126 proc_move_allproc_zombie(struct proc
*p
)
1131 n
= ALLPROC_HASH(p
->p_pid
);
1133 PSTALL(p
, "reap1", 0);
1134 lwkt_gettoken(&prg
->proc_token
);
1136 PSTALL(p
, "reap1a", 0);
1139 lwkt_reltoken(&prg
->proc_token
);
1140 dsched_exit_proc(p
);
1144 * This routine is called from kern_wait() and will remove the process
1145 * from the zombie list and the sibling list. This routine will block
1146 * if someone has a lock on the proces (p_lock).
1148 * Caller must hold p->p_token. We are required to wait until p_lock
1149 * becomes zero before we can manipulate the list, allowing allproc
1150 * scans to guarantee consistency during a list scan.
1153 proc_remove_zombie(struct proc
*p
)
1158 n
= ALLPROC_HASH(p
->p_pid
);
1161 PSTALL(p
, "reap2", 0);
1162 lwkt_gettoken(&prg
->proc_token
);
1163 PSTALL(p
, "reap2a", 0);
1164 LIST_REMOVE(p
, p_list
); /* from remove master list */
1165 LIST_REMOVE(p
, p_sibling
); /* and from sibling list */
1168 if (pid_doms
[p
->p_pid
% PIDSEL_DOMAINS
] != (uint8_t)time_second
)
1169 pid_doms
[p
->p_pid
% PIDSEL_DOMAINS
] = (uint8_t)time_second
;
1170 lwkt_reltoken(&prg
->proc_token
);
1174 * Handle various requirements prior to returning to usermode. Called from
1175 * platform trap and system call code.
1178 lwpuserret(struct lwp
*lp
)
1180 struct proc
*p
= lp
->lwp_proc
;
1182 if (lp
->lwp_mpflags
& LWP_MP_VNLRU
) {
1183 atomic_clear_int(&lp
->lwp_mpflags
, LWP_MP_VNLRU
);
1186 if (lp
->lwp_mpflags
& LWP_MP_WEXIT
) {
1187 lwkt_gettoken(&p
->p_token
);
1189 lwkt_reltoken(&p
->p_token
); /* NOT REACHED */
1194 * Kernel threads run from user processes can also accumulate deferred
1195 * actions which need to be acted upon. Callers include:
1197 * nfsd - Can allocate lots of vnodes
1200 lwpkthreaddeferred(void)
1202 struct lwp
*lp
= curthread
->td_lwp
;
1205 if (lp
->lwp_mpflags
& LWP_MP_VNLRU
) {
1206 atomic_clear_int(&lp
->lwp_mpflags
, LWP_MP_VNLRU
);
1213 proc_usermap(struct proc
*p
, int invfork
)
1215 struct sys_upmap
*upmap
;
1217 lwkt_gettoken(&p
->p_token
);
1218 upmap
= kmalloc(roundup2(sizeof(*upmap
), PAGE_SIZE
), M_PROC
,
1220 if (p
->p_upmap
== NULL
) {
1221 upmap
->header
[0].type
= UKPTYPE_VERSION
;
1222 upmap
->header
[0].offset
= offsetof(struct sys_upmap
, version
);
1223 upmap
->header
[1].type
= UPTYPE_RUNTICKS
;
1224 upmap
->header
[1].offset
= offsetof(struct sys_upmap
, runticks
);
1225 upmap
->header
[2].type
= UPTYPE_FORKID
;
1226 upmap
->header
[2].offset
= offsetof(struct sys_upmap
, forkid
);
1227 upmap
->header
[3].type
= UPTYPE_PID
;
1228 upmap
->header
[3].offset
= offsetof(struct sys_upmap
, pid
);
1229 upmap
->header
[4].type
= UPTYPE_PROC_TITLE
;
1230 upmap
->header
[4].offset
= offsetof(struct sys_upmap
,proc_title
);
1231 upmap
->header
[5].type
= UPTYPE_INVFORK
;
1232 upmap
->header
[5].offset
= offsetof(struct sys_upmap
, invfork
);
1234 upmap
->version
= UPMAP_VERSION
;
1235 upmap
->pid
= p
->p_pid
;
1236 upmap
->forkid
= p
->p_forkid
;
1237 upmap
->invfork
= invfork
;
1240 kfree(upmap
, M_PROC
);
1242 lwkt_reltoken(&p
->p_token
);
1246 proc_userunmap(struct proc
*p
)
1248 struct sys_upmap
*upmap
;
1250 lwkt_gettoken(&p
->p_token
);
1251 if ((upmap
= p
->p_upmap
) != NULL
) {
1253 kfree(upmap
, M_PROC
);
1255 lwkt_reltoken(&p
->p_token
);
1259 * Scan all processes on the allproc list. The process is automatically
1260 * held for the callback. A return value of -1 terminates the loop.
1261 * Zombie procs are skipped.
1263 * The callback is made with the process held and proc_token held.
1265 * We limit the scan to the number of processes as-of the start of
1266 * the scan so as not to get caught up in an endless loop if new processes
1267 * are created more quickly than we can scan the old ones. Add a little
1268 * slop to try to catch edge cases since nprocs can race.
1273 allproc_scan(int (*callback
)(struct proc
*, void *), void *data
, int segmented
)
1275 int limit
= nprocs
+ ncpus
;
1283 int id
= mycpu
->gd_cpuid
;
1284 ns
= id
* ALLPROC_HSIZE
/ ncpus
;
1285 ne
= (id
+ 1) * ALLPROC_HSIZE
/ ncpus
;
1292 * prg->proc_token protects the allproc list and PHOLD() prevents the
1293 * process from being removed from the allproc list or the zombproc
1296 for (n
= ns
; n
< ne
; ++n
) {
1297 procglob_t
*prg
= &procglob
[n
];
1298 if (LIST_FIRST(&prg
->allproc
) == NULL
)
1300 lwkt_gettoken(&prg
->proc_token
);
1301 LIST_FOREACH(p
, &prg
->allproc
, p_list
) {
1302 if (p
->p_stat
== SZOMB
)
1305 r
= callback(p
, data
);
1312 lwkt_reltoken(&prg
->proc_token
);
1315 * Check if asked to stop early
1323 * Scan all lwps of processes on the allproc list. The lwp is automatically
1324 * held for the callback. A return value of -1 terminates the loop.
1326 * The callback is made with the proces and lwp both held, and proc_token held.
1331 alllwp_scan(int (*callback
)(struct lwp
*, void *), void *data
, int segmented
)
1341 int id
= mycpu
->gd_cpuid
;
1342 ns
= id
* ALLPROC_HSIZE
/ ncpus
;
1343 ne
= (id
+ 1) * ALLPROC_HSIZE
/ ncpus
;
1349 for (n
= ns
; n
< ne
; ++n
) {
1350 procglob_t
*prg
= &procglob
[n
];
1352 if (LIST_FIRST(&prg
->allproc
) == NULL
)
1354 lwkt_gettoken(&prg
->proc_token
);
1355 LIST_FOREACH(p
, &prg
->allproc
, p_list
) {
1356 if (p
->p_stat
== SZOMB
)
1359 lwkt_gettoken(&p
->p_token
);
1360 FOREACH_LWP_IN_PROC(lp
, p
) {
1362 r
= callback(lp
, data
);
1365 lwkt_reltoken(&p
->p_token
);
1370 lwkt_reltoken(&prg
->proc_token
);
1373 * Asked to exit early
1381 * Scan all processes on the zombproc list. The process is automatically
1382 * held for the callback. A return value of -1 terminates the loop.
1385 * The callback is made with the proces held and proc_token held.
1388 zombproc_scan(int (*callback
)(struct proc
*, void *), void *data
)
1395 * prg->proc_token protects the allproc list and PHOLD() prevents the
1396 * process from being removed from the allproc list or the zombproc
1399 for (n
= 0; n
< ALLPROC_HSIZE
; ++n
) {
1400 procglob_t
*prg
= &procglob
[n
];
1402 if (LIST_FIRST(&prg
->allproc
) == NULL
)
1404 lwkt_gettoken(&prg
->proc_token
);
1405 LIST_FOREACH(p
, &prg
->allproc
, p_list
) {
1406 if (p
->p_stat
!= SZOMB
)
1409 r
= callback(p
, data
);
1414 lwkt_reltoken(&prg
->proc_token
);
1417 * Check if asked to stop early
1424 #include "opt_ddb.h"
1426 #include <ddb/ddb.h>
1431 DB_SHOW_COMMAND(pgrpdump
, pgrpdump
)
1438 for (i
= 0; i
< ALLPROC_HSIZE
; ++i
) {
1441 if (LIST_EMPTY(&prg
->allpgrp
))
1443 kprintf("\tindx %d\n", i
);
1444 LIST_FOREACH(pgrp
, &prg
->allpgrp
, pg_list
) {
1445 kprintf("\tpgrp %p, pgid %ld, sess %p, "
1446 "sesscnt %d, mem %p\n",
1447 (void *)pgrp
, (long)pgrp
->pg_id
,
1448 (void *)pgrp
->pg_session
,
1449 pgrp
->pg_session
->s_count
,
1450 (void *)LIST_FIRST(&pgrp
->pg_members
));
1451 LIST_FOREACH(p
, &pgrp
->pg_members
, p_pglist
) {
1452 kprintf("\t\tpid %ld addr %p pgrp %p\n",
1453 (long)p
->p_pid
, (void *)p
,
1462 * The caller must hold proc_token.
1465 sysctl_out_proc(struct proc
*p
, struct sysctl_req
*req
, int flags
)
1467 struct kinfo_proc ki
;
1469 int skp
= 0, had_output
= 0;
1472 bzero(&ki
, sizeof(ki
));
1473 lwkt_gettoken_shared(&p
->p_token
);
1474 fill_kinfo_proc(p
, &ki
);
1475 if ((flags
& KERN_PROC_FLAG_LWP
) == 0)
1478 FOREACH_LWP_IN_PROC(lp
, p
) {
1480 fill_kinfo_lwp(lp
, &ki
.kp_lwp
);
1482 error
= SYSCTL_OUT(req
, &ki
, sizeof(ki
));
1489 lwkt_reltoken(&p
->p_token
);
1490 /* We need to output at least the proc, even if there is no lwp. */
1491 if (had_output
== 0) {
1492 error
= SYSCTL_OUT(req
, &ki
, sizeof(ki
));
1498 * The caller must hold proc_token.
1501 sysctl_out_proc_kthread(struct thread
*td
, struct sysctl_req
*req
)
1503 struct kinfo_proc ki
;
1506 fill_kinfo_proc_kthread(td
, &ki
);
1507 error
= SYSCTL_OUT(req
, &ki
, sizeof(ki
));
1517 sysctl_kern_proc(SYSCTL_HANDLER_ARGS
)
1519 int *name
= (int *)arg1
;
1520 int oid
= oidp
->oid_number
;
1521 u_int namelen
= arg2
;
1524 struct thread
*marker
;
1529 struct ucred
*cr1
= curproc
->p_ucred
;
1530 struct ucred
*crcache
= NULL
;
1532 flags
= oid
& KERN_PROC_FLAGMASK
;
1533 oid
&= ~KERN_PROC_FLAGMASK
;
1535 if ((oid
== KERN_PROC_ALL
&& namelen
!= 0) ||
1536 (oid
!= KERN_PROC_ALL
&& namelen
!= 1)) {
1541 * proc_token protects the allproc list and PHOLD() prevents the
1542 * process from being removed from the allproc list or the zombproc
1545 if (oid
== KERN_PROC_PID
) {
1546 p
= pfind((pid_t
)name
[0]);
1548 crcache
= pcredcache(crcache
, p
);
1549 if (PRISON_CHECK(cr1
, crcache
))
1550 error
= sysctl_out_proc(p
, req
, flags
);
1558 /* overestimate by 5 procs */
1559 error
= SYSCTL_OUT(req
, 0, sizeof (struct kinfo_proc
) * 5);
1564 for (n
= 0; n
< ALLPROC_HSIZE
; ++n
) {
1565 procglob_t
*prg
= &procglob
[n
];
1567 if (LIST_EMPTY(&prg
->allproc
))
1569 lwkt_gettoken_shared(&prg
->proc_token
);
1570 LIST_FOREACH(p
, &prg
->allproc
, p_list
) {
1572 * Show a user only their processes.
1574 if (ps_showallprocs
== 0) {
1575 crcache
= pcredcache(crcache
, p
);
1576 if (crcache
== NULL
||
1577 p_trespass(cr1
, crcache
)) {
1583 * Skip embryonic processes.
1585 if (p
->p_stat
== SIDL
)
1588 * TODO - make more efficient (see notes below).
1592 case KERN_PROC_PGRP
:
1593 /* could do this by traversing pgrp */
1594 if (p
->p_pgrp
== NULL
||
1595 p
->p_pgrp
->pg_id
!= (pid_t
)name
[0])
1600 if ((p
->p_flags
& P_CONTROLT
) == 0 ||
1601 p
->p_session
== NULL
||
1602 p
->p_session
->s_ttyp
== NULL
||
1603 dev2udev(p
->p_session
->s_ttyp
->t_dev
) !=
1609 crcache
= pcredcache(crcache
, p
);
1610 if (crcache
== NULL
||
1611 crcache
->cr_uid
!= (uid_t
)name
[0]) {
1616 case KERN_PROC_RUID
:
1617 crcache
= pcredcache(crcache
, p
);
1618 if (crcache
== NULL
||
1619 crcache
->cr_ruid
!= (uid_t
)name
[0]) {
1625 crcache
= pcredcache(crcache
, p
);
1626 if (!PRISON_CHECK(cr1
, crcache
))
1629 error
= sysctl_out_proc(p
, req
, flags
);
1632 lwkt_reltoken(&prg
->proc_token
);
1636 lwkt_reltoken(&prg
->proc_token
);
1640 * Iterate over all active cpus and scan their thread list. Start
1641 * with the next logical cpu and end with our original cpu. We
1642 * migrate our own thread to each target cpu in order to safely scan
1643 * its thread list. In the last loop we migrate back to our original
1646 origcpu
= mycpu
->gd_cpuid
;
1647 if (!ps_showallthreads
|| jailed(cr1
))
1650 marker
= kmalloc(sizeof(struct thread
), M_TEMP
, M_WAITOK
|M_ZERO
);
1651 marker
->td_flags
= TDF_MARKER
;
1654 for (n
= 1; n
<= ncpus
; ++n
) {
1658 nid
= (origcpu
+ n
) % ncpus
;
1659 if (CPUMASK_TESTBIT(smp_active_mask
, nid
) == 0)
1661 rgd
= globaldata_find(nid
);
1662 lwkt_setcpu_self(rgd
);
1665 TAILQ_INSERT_TAIL(&rgd
->gd_tdallq
, marker
, td_allq
);
1667 while ((td
= TAILQ_PREV(marker
, lwkt_queue
, td_allq
)) != NULL
) {
1668 TAILQ_REMOVE(&rgd
->gd_tdallq
, marker
, td_allq
);
1669 TAILQ_INSERT_BEFORE(td
, marker
, td_allq
);
1670 if (td
->td_flags
& TDF_MARKER
)
1679 case KERN_PROC_PGRP
:
1682 case KERN_PROC_RUID
:
1685 error
= sysctl_out_proc_kthread(td
, req
);
1693 TAILQ_REMOVE(&rgd
->gd_tdallq
, marker
, td_allq
);
1701 * Userland scheduler expects us to return on the same cpu we
1704 if (mycpu
->gd_cpuid
!= origcpu
)
1705 lwkt_setcpu_self(globaldata_find(origcpu
));
1707 kfree(marker
, M_TEMP
);
1716 * This sysctl allows a process to retrieve the argument list or process
1717 * title for another process without groping around in the address space
1718 * of the other process. It also allow a process to set its own "process
1719 * title to a string of its own choice.
1724 sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS
)
1726 int *name
= (int*) arg1
;
1727 u_int namelen
= arg2
;
1732 struct ucred
*cr1
= curproc
->p_ucred
;
1737 p
= pfind((pid_t
)name
[0]);
1740 lwkt_gettoken(&p
->p_token
);
1742 if ((!ps_argsopen
) && p_trespass(cr1
, p
->p_ucred
))
1745 if (req
->newptr
&& curproc
!= p
) {
1750 if (p
->p_upmap
!= NULL
&& p
->p_upmap
->proc_title
[0]) {
1752 * Args set via writable user process mmap.
1753 * We must calculate the string length manually
1754 * because the user data can change at any time.
1759 base
= p
->p_upmap
->proc_title
;
1760 for (n
= 0; n
< UPMAP_MAXPROCTITLE
- 1; ++n
) {
1764 error
= SYSCTL_OUT(req
, base
, n
);
1766 error
= SYSCTL_OUT(req
, "", 1);
1767 } else if ((pa
= p
->p_args
) != NULL
) {
1769 * Args set by setproctitle() sysctl.
1771 refcount_acquire(&pa
->ar_ref
);
1772 error
= SYSCTL_OUT(req
, pa
->ar_args
, pa
->ar_length
);
1773 if (refcount_release(&pa
->ar_ref
))
1777 if (req
->newptr
== NULL
)
1780 if (req
->newlen
+ sizeof(struct pargs
) > ps_arg_cache_limit
) {
1784 pa
= kmalloc(sizeof(struct pargs
) + req
->newlen
, M_PARGS
, M_WAITOK
);
1785 refcount_init(&pa
->ar_ref
, 1);
1786 pa
->ar_length
= req
->newlen
;
1787 error
= SYSCTL_IN(req
, pa
->ar_args
, req
->newlen
);
1795 * Replace p_args with the new pa. p_args may have previously
1802 KKASSERT(opa
->ar_ref
> 0);
1803 if (refcount_release(&opa
->ar_ref
)) {
1804 kfree(opa
, M_PARGS
);
1810 lwkt_reltoken(&p
->p_token
);
1817 sysctl_kern_proc_cwd(SYSCTL_HANDLER_ARGS
)
1819 int *name
= (int*) arg1
;
1820 u_int namelen
= arg2
;
1823 char *fullpath
, *freepath
;
1824 struct ucred
*cr1
= curproc
->p_ucred
;
1829 p
= pfind((pid_t
)name
[0]);
1832 lwkt_gettoken_shared(&p
->p_token
);
1835 * If we are not allowed to see other args, we certainly shouldn't
1836 * get the cwd either. Also check the usual trespassing.
1838 if ((!ps_argsopen
) && p_trespass(cr1
, p
->p_ucred
))
1841 if (req
->oldptr
&& p
->p_fd
!= NULL
&& p
->p_fd
->fd_ncdir
.ncp
) {
1842 struct nchandle nch
;
1844 cache_copy(&p
->p_fd
->fd_ncdir
, &nch
);
1845 error
= cache_fullpath(p
, &nch
, NULL
,
1846 &fullpath
, &freepath
, 0);
1850 error
= SYSCTL_OUT(req
, fullpath
, strlen(fullpath
) + 1);
1851 kfree(freepath
, M_TEMP
);
1856 lwkt_reltoken(&p
->p_token
);
1863 * This sysctl allows a process to retrieve the path of the executable for
1864 * itself or another process.
1867 sysctl_kern_proc_pathname(SYSCTL_HANDLER_ARGS
)
1869 pid_t
*pidp
= (pid_t
*)arg1
;
1870 unsigned int arglen
= arg2
;
1872 char *retbuf
, *freebuf
;
1874 struct nchandle nch
;
1878 if (*pidp
== -1) { /* -1 means this process */
1886 cache_copy(&p
->p_textnch
, &nch
);
1887 error
= cache_fullpath(p
, &nch
, NULL
, &retbuf
, &freebuf
, 0);
1891 error
= SYSCTL_OUT(req
, retbuf
, strlen(retbuf
) + 1);
1892 kfree(freebuf
, M_TEMP
);
1901 sysctl_kern_proc_sigtramp(SYSCTL_HANDLER_ARGS
)
1903 /*int *name = (int *)arg1;*/
1904 u_int namelen
= arg2
;
1905 struct kinfo_sigtramp kst
;
1906 const struct sysentvec
*sv
;
1911 /* ignore pid if passed in (freebsd compatibility) */
1913 sv
= curproc
->p_sysent
;
1914 bzero(&kst
, sizeof(kst
));
1915 if (sv
->sv_szsigcode
) {
1918 sigbase
= trunc_page64((intptr_t)PS_STRINGS
-
1920 sigbase
-= SZSIGCODE_EXTRA_BYTES
;
1922 kst
.ksigtramp_start
= (void *)sigbase
;
1923 kst
.ksigtramp_end
= (void *)(sigbase
+ *sv
->sv_szsigcode
);
1925 error
= SYSCTL_OUT(req
, &kst
, sizeof(kst
));
1930 SYSCTL_NODE(_kern
, KERN_PROC
, proc
, CTLFLAG_RD
, 0, "Process table");
1932 SYSCTL_PROC(_kern_proc
, KERN_PROC_ALL
, all
,
1933 CTLFLAG_RD
| CTLTYPE_STRUCT
| CTLFLAG_NOLOCK
,
1934 0, 0, sysctl_kern_proc
, "S,proc", "Return entire process table");
1936 SYSCTL_NODE(_kern_proc
, KERN_PROC_PGRP
, pgrp
,
1937 CTLFLAG_RD
| CTLFLAG_NOLOCK
,
1938 sysctl_kern_proc
, "Process table");
1940 SYSCTL_NODE(_kern_proc
, KERN_PROC_TTY
, tty
,
1941 CTLFLAG_RD
| CTLFLAG_NOLOCK
,
1942 sysctl_kern_proc
, "Process table");
1944 SYSCTL_NODE(_kern_proc
, KERN_PROC_UID
, uid
,
1945 CTLFLAG_RD
| CTLFLAG_NOLOCK
,
1946 sysctl_kern_proc
, "Process table");
1948 SYSCTL_NODE(_kern_proc
, KERN_PROC_RUID
, ruid
,
1949 CTLFLAG_RD
| CTLFLAG_NOLOCK
,
1950 sysctl_kern_proc
, "Process table");
1952 SYSCTL_NODE(_kern_proc
, KERN_PROC_PID
, pid
,
1953 CTLFLAG_RD
| CTLFLAG_NOLOCK
,
1954 sysctl_kern_proc
, "Process table");
1956 SYSCTL_NODE(_kern_proc
, (KERN_PROC_ALL
| KERN_PROC_FLAG_LWP
), all_lwp
,
1957 CTLFLAG_RD
| CTLFLAG_NOLOCK
,
1958 sysctl_kern_proc
, "Process table");
1960 SYSCTL_NODE(_kern_proc
, (KERN_PROC_PGRP
| KERN_PROC_FLAG_LWP
), pgrp_lwp
,
1961 CTLFLAG_RD
| CTLFLAG_NOLOCK
,
1962 sysctl_kern_proc
, "Process table");
1964 SYSCTL_NODE(_kern_proc
, (KERN_PROC_TTY
| KERN_PROC_FLAG_LWP
), tty_lwp
,
1965 CTLFLAG_RD
| CTLFLAG_NOLOCK
,
1966 sysctl_kern_proc
, "Process table");
1968 SYSCTL_NODE(_kern_proc
, (KERN_PROC_UID
| KERN_PROC_FLAG_LWP
), uid_lwp
,
1969 CTLFLAG_RD
| CTLFLAG_NOLOCK
,
1970 sysctl_kern_proc
, "Process table");
1972 SYSCTL_NODE(_kern_proc
, (KERN_PROC_RUID
| KERN_PROC_FLAG_LWP
), ruid_lwp
,
1973 CTLFLAG_RD
| CTLFLAG_NOLOCK
,
1974 sysctl_kern_proc
, "Process table");
1976 SYSCTL_NODE(_kern_proc
, (KERN_PROC_PID
| KERN_PROC_FLAG_LWP
), pid_lwp
,
1977 CTLFLAG_RD
| CTLFLAG_NOLOCK
,
1978 sysctl_kern_proc
, "Process table");
1980 SYSCTL_NODE(_kern_proc
, KERN_PROC_ARGS
, args
,
1981 CTLFLAG_RW
| CTLFLAG_ANYBODY
| CTLFLAG_NOLOCK
,
1982 sysctl_kern_proc_args
, "Process argument list");
1984 SYSCTL_NODE(_kern_proc
, KERN_PROC_CWD
, cwd
,
1985 CTLFLAG_RD
| CTLFLAG_ANYBODY
| CTLFLAG_NOLOCK
,
1986 sysctl_kern_proc_cwd
, "Process argument list");
1988 static SYSCTL_NODE(_kern_proc
, KERN_PROC_PATHNAME
, pathname
,
1989 CTLFLAG_RD
| CTLFLAG_NOLOCK
,
1990 sysctl_kern_proc_pathname
, "Process executable path");
1992 SYSCTL_PROC(_kern_proc
, KERN_PROC_SIGTRAMP
, sigtramp
,
1993 CTLFLAG_RD
| CTLTYPE_STRUCT
| CTLFLAG_NOLOCK
,
1994 0, 0, sysctl_kern_proc_sigtramp
, "S,sigtramp",
1995 "Return sigtramp address range");