2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Simon 'corecode' Schubert <corecode@fs.ei.tum.de>
6 * by Thomas E. Spanjaard <tgen@netphreax.net>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * $DragonFly: src/sys/kern/kern_kinfo.c,v 1.17 2008/01/07 23:41:55 dillon Exp $
39 * This is a source file used by both the kernel and libkvm.
43 #define _KERNEL_STRUCTURES
47 #include <vm/vm_map.h>
48 #include <sys/kinfo.h>
52 #include <sys/globaldata.h>
54 #include <sys/systm.h>
61 * Fill in a struct kinfo_proc.
63 * NOTE! We may be asked to fill in kinfo_proc for a zombied process, and
64 * the process may be in the middle of being deallocated. Check all pointers
68 fill_kinfo_proc(struct proc
*p
, struct kinfo_proc
*kp
)
74 sess
= pgrp
? pgrp
->pg_session
: NULL
;
76 bzero(kp
, sizeof(*kp
));
78 kp
->kp_paddr
= (uintptr_t)p
;
79 kp
->kp_fd
= (uintptr_t)p
->p_fd
;
81 kp
->kp_flags
= p
->p_flag
;
82 kp
->kp_stat
= p
->p_stat
;
83 kp
->kp_lock
= p
->p_lock
;
84 kp
->kp_acflag
= p
->p_acflag
;
85 kp
->kp_traceflag
= p
->p_traceflag
;
86 kp
->kp_siglist
= p
->p_siglist
;
88 kp
->kp_sigignore
= p
->p_sigignore
; /* p_sigacts-> */
89 kp
->kp_sigcatch
= p
->p_sigcatch
; /* p_sigacts-> */
90 kp
->kp_sigflag
= p
->p_sigacts
->ps_flag
;
92 kp
->kp_start
= p
->p_start
;
94 strncpy(kp
->kp_comm
, p
->p_comm
, sizeof(kp
->kp_comm
) - 1);
95 kp
->kp_comm
[sizeof(kp
->kp_comm
) - 1] = 0;
98 kp
->kp_uid
= p
->p_ucred
->cr_uid
;
99 kp
->kp_ngroups
= p
->p_ucred
->cr_ngroups
;
100 if (p
->p_ucred
->cr_groups
) {
101 bcopy(p
->p_ucred
->cr_groups
, kp
->kp_groups
,
102 NGROUPS
* sizeof(kp
->kp_groups
[0]));
104 kp
->kp_ruid
= p
->p_ucred
->cr_ruid
;
105 kp
->kp_svuid
= p
->p_ucred
->cr_svuid
;
106 kp
->kp_rgid
= p
->p_ucred
->cr_rgid
;
107 kp
->kp_svgid
= p
->p_ucred
->cr_svgid
;
110 kp
->kp_pid
= p
->p_pid
;
112 kp
->kp_ppid
= p
->p_oppid
;
114 kp
->kp_ppid
= p
->p_pptr
!= NULL
? p
->p_pptr
->p_pid
: -1;
116 kp
->kp_pgid
= pgrp
->pg_id
;
117 kp
->kp_jobc
= pgrp
->pg_jobc
;
120 kp
->kp_sid
= sess
->s_sid
;
121 bcopy(sess
->s_login
, kp
->kp_login
, MAXLOGNAME
);
122 if (sess
->s_ttyvp
!= NULL
)
123 kp
->kp_auxflags
|= KI_CTTY
;
125 kp
->kp_auxflags
|= KI_SLEADER
;
127 if (sess
&& (p
->p_flag
& P_CONTROLT
) != 0 && sess
->s_ttyp
!= NULL
) {
128 kp
->kp_tdev
= dev2udev(sess
->s_ttyp
->t_dev
);
129 if (sess
->s_ttyp
->t_pgrp
!= NULL
)
130 kp
->kp_tpgid
= sess
->s_ttyp
->t_pgrp
->pg_id
;
133 if (sess
->s_ttyp
->t_session
!= NULL
)
134 kp
->kp_tsid
= sess
->s_ttyp
->t_session
->s_sid
;
138 kp
->kp_tdev
= NOUDEV
;
140 kp
->kp_exitstat
= p
->p_xstat
;
141 kp
->kp_nthreads
= p
->p_nthreads
;
142 kp
->kp_nice
= p
->p_nice
;
143 kp
->kp_swtime
= p
->p_swtime
;
146 kp
->kp_vm_map_size
= p
->p_vmspace
->vm_map
.size
;
147 kp
->kp_vm_rssize
= vmspace_resident_count(p
->p_vmspace
);
148 kp
->kp_vm_swrss
= p
->p_vmspace
->vm_swrss
;
149 kp
->kp_vm_tsize
= p
->p_vmspace
->vm_tsize
;
150 kp
->kp_vm_dsize
= p
->p_vmspace
->vm_dsize
;
151 kp
->kp_vm_ssize
= p
->p_vmspace
->vm_ssize
;
154 if (p
->p_ucred
&& jailed(p
->p_ucred
))
155 kp
->kp_jailid
= p
->p_ucred
->cr_prison
->pr_id
;
158 kp
->kp_ru
= p
->p_cru
;
162 * Fill in a struct kinfo_lwp.
165 fill_kinfo_lwp(struct lwp
*lwp
, struct kinfo_lwp
*kl
)
167 bzero(kl
, sizeof(*kl
));
169 kl
->kl_pid
= lwp
->lwp_proc
->p_pid
;
170 kl
->kl_tid
= lwp
->lwp_tid
;
172 kl
->kl_flags
= lwp
->lwp_flag
;
173 kl
->kl_stat
= lwp
->lwp_stat
;
174 kl
->kl_lock
= lwp
->lwp_lock
;
175 kl
->kl_tdflags
= lwp
->lwp_thread
->td_flags
;
178 * The process/lwp stat may not reflect whether the process is
179 * actually sleeping or not if the related thread was directly
180 * descheduled by LWKT. Adjust the stat if the thread is not
181 * runnable and not waiting to be scheduled on a cpu by the
182 * user process scheduler.
184 if (kl
->kl_stat
== LSRUN
) {
185 if ((kl
->kl_tdflags
& TDF_RUNQ
) == 0 &&
186 (lwp
->lwp_flag
& LWP_ONRUNQ
) == 0) {
187 kl
->kl_stat
= LSSLEEP
;
191 kl
->kl_mpcount
= lwp
->lwp_thread
->td_mpcount
;
196 kl
->kl_prio
= lwp
->lwp_usdata
.bsd4
.priority
; /* XXX TGEN dangerous assumption */
197 kl
->kl_tdprio
= lwp
->lwp_thread
->td_pri
;
198 kl
->kl_rtprio
= lwp
->lwp_rtprio
;
200 kl
->kl_uticks
= lwp
->lwp_thread
->td_uticks
;
201 kl
->kl_sticks
= lwp
->lwp_thread
->td_sticks
;
202 kl
->kl_iticks
= lwp
->lwp_thread
->td_iticks
;
203 kl
->kl_cpticks
= lwp
->lwp_cpticks
;
204 kl
->kl_pctcpu
= lwp
->lwp_pctcpu
;
205 kl
->kl_slptime
= lwp
->lwp_slptime
;
206 kl
->kl_origcpu
= lwp
->lwp_usdata
.bsd4
.origcpu
; /* XXX TGEN same */
207 kl
->kl_estcpu
= lwp
->lwp_usdata
.bsd4
.estcpu
;
208 kl
->kl_cpuid
= lwp
->lwp_thread
->td_gd
->gd_cpuid
;
210 kl
->kl_ru
= lwp
->lwp_ru
;
212 kl
->kl_siglist
= lwp
->lwp_siglist
;
213 kl
->kl_sigmask
= lwp
->lwp_sigmask
;
215 kl
->kl_wchan
= (uintptr_t)lwp
->lwp_thread
->td_wchan
;
216 if (lwp
->lwp_thread
->td_wmesg
) {
217 strncpy(kl
->kl_wmesg
, lwp
->lwp_thread
->td_wmesg
, WMESGLEN
);
218 kl
->kl_wmesg
[WMESGLEN
] = 0;
223 * Fill in a struct kinfo_proc for kernel threads (i.e. those without proc).
226 fill_kinfo_proc_kthread(struct thread
*td
, struct kinfo_proc
*kp
)
228 bzero(kp
, sizeof(*kp
));
231 * Fill in fake proc information and semi-fake lwp info.
234 kp
->kp_tdev
= NOUDEV
;
235 strncpy(kp
->kp_comm
, td
->td_comm
, sizeof(kp
->kp_comm
) - 1);
236 kp
->kp_comm
[sizeof(kp
->kp_comm
) - 1] = 0;
237 kp
->kp_flags
= P_SYSTEM
;
238 kp
->kp_stat
= SACTIVE
;
240 kp
->kp_lwp
.kl_pid
= -1;
241 kp
->kp_lwp
.kl_tid
= -1;
242 kp
->kp_lwp
.kl_tdflags
= td
->td_flags
;
244 kp
->kp_lwp
.kl_mpcount
= td
->td_mpcount
;
246 kp
->kp_lwp
.kl_mpcount
= 0;
249 kp
->kp_lwp
.kl_tdprio
= td
->td_pri
;
250 kp
->kp_lwp
.kl_rtprio
.type
= RTP_PRIO_THREAD
;
251 kp
->kp_lwp
.kl_rtprio
.prio
= td
->td_pri
& TDPRI_MASK
;
253 kp
->kp_lwp
.kl_uticks
= td
->td_uticks
;
254 kp
->kp_lwp
.kl_sticks
= td
->td_sticks
;
255 kp
->kp_lwp
.kl_iticks
= td
->td_iticks
;
256 kp
->kp_lwp
.kl_cpuid
= td
->td_gd
->gd_cpuid
;
258 kp
->kp_lwp
.kl_wchan
= (uintptr_t)td
->td_wchan
;
259 if (td
->td_flags
& TDF_RUNQ
)
260 kp
->kp_lwp
.kl_stat
= LSRUN
;
262 kp
->kp_lwp
.kl_stat
= LSSLEEP
;
264 strncpy(kp
->kp_lwp
.kl_wmesg
, td
->td_wmesg
, WMESGLEN
);
265 kp
->kp_lwp
.kl_wmesg
[WMESGLEN
] = 0;