2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Simon 'corecode' Schubert <corecode@fs.ei.tum.de>
6 * by Thomas E. Spanjaard <tgen@netphreax.net>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * $DragonFly: src/sys/kern/kern_kinfo.c,v 1.17 2008/01/07 23:41:55 dillon Exp $
39 * This is a source file used by both the kernel and libkvm.
43 #define _KERNEL_STRUCTURES
47 #include <vm/vm_map.h>
48 #include <sys/kinfo.h>
52 #include <sys/globaldata.h>
54 #include <sys/systm.h>
58 dev_t
dev2udev(cdev_t dev
); /* kvm_proc.c */
63 * Fill in a struct kinfo_proc.
65 * NOTE! We may be asked to fill in kinfo_proc for a zombied process, and
66 * the process may be in the middle of being deallocated. Check all pointers
70 fill_kinfo_proc(struct proc
*p
, struct kinfo_proc
*kp
)
76 sess
= pgrp
? pgrp
->pg_session
: NULL
;
78 bzero(kp
, sizeof(*kp
));
80 kp
->kp_paddr
= (uintptr_t)p
;
81 kp
->kp_fd
= (uintptr_t)p
->p_fd
;
83 kp
->kp_flags
= p
->p_flag
;
84 kp
->kp_stat
= p
->p_stat
;
85 kp
->kp_lock
= p
->p_lock
;
86 kp
->kp_acflag
= p
->p_acflag
;
87 kp
->kp_traceflag
= p
->p_traceflag
;
88 kp
->kp_siglist
= p
->p_siglist
;
90 kp
->kp_sigignore
= p
->p_sigignore
; /* p_sigacts-> */
91 kp
->kp_sigcatch
= p
->p_sigcatch
; /* p_sigacts-> */
92 kp
->kp_sigflag
= p
->p_sigacts
->ps_flag
;
94 kp
->kp_start
= p
->p_start
;
96 strncpy(kp
->kp_comm
, p
->p_comm
, sizeof(kp
->kp_comm
) - 1);
97 kp
->kp_comm
[sizeof(kp
->kp_comm
) - 1] = 0;
100 kp
->kp_uid
= p
->p_ucred
->cr_uid
;
101 kp
->kp_ngroups
= p
->p_ucred
->cr_ngroups
;
102 if (p
->p_ucred
->cr_groups
) {
103 bcopy(p
->p_ucred
->cr_groups
, kp
->kp_groups
,
104 NGROUPS
* sizeof(kp
->kp_groups
[0]));
106 kp
->kp_ruid
= p
->p_ucred
->cr_ruid
;
107 kp
->kp_svuid
= p
->p_ucred
->cr_svuid
;
108 kp
->kp_rgid
= p
->p_ucred
->cr_rgid
;
109 kp
->kp_svgid
= p
->p_ucred
->cr_svgid
;
112 kp
->kp_pid
= p
->p_pid
;
114 kp
->kp_ppid
= p
->p_oppid
;
116 kp
->kp_ppid
= p
->p_pptr
!= NULL
? p
->p_pptr
->p_pid
: -1;
118 kp
->kp_pgid
= pgrp
->pg_id
;
119 kp
->kp_jobc
= pgrp
->pg_jobc
;
122 kp
->kp_sid
= sess
->s_sid
;
123 bcopy(sess
->s_login
, kp
->kp_login
, MAXLOGNAME
);
124 if (sess
->s_ttyvp
!= NULL
)
125 kp
->kp_auxflags
|= KI_CTTY
;
127 kp
->kp_auxflags
|= KI_SLEADER
;
129 if (sess
&& (p
->p_flag
& P_CONTROLT
) != 0 && sess
->s_ttyp
!= NULL
) {
130 kp
->kp_tdev
= dev2udev(sess
->s_ttyp
->t_dev
);
131 if (sess
->s_ttyp
->t_pgrp
!= NULL
)
132 kp
->kp_tpgid
= sess
->s_ttyp
->t_pgrp
->pg_id
;
135 if (sess
->s_ttyp
->t_session
!= NULL
)
136 kp
->kp_tsid
= sess
->s_ttyp
->t_session
->s_sid
;
140 kp
->kp_tdev
= NOUDEV
;
142 kp
->kp_exitstat
= p
->p_xstat
;
143 kp
->kp_nthreads
= p
->p_nthreads
;
144 kp
->kp_nice
= p
->p_nice
;
145 kp
->kp_swtime
= p
->p_swtime
;
148 kp
->kp_vm_map_size
= p
->p_vmspace
->vm_map
.size
;
149 kp
->kp_vm_rssize
= vmspace_resident_count(p
->p_vmspace
);
150 kp
->kp_vm_swrss
= p
->p_vmspace
->vm_swrss
;
151 kp
->kp_vm_tsize
= p
->p_vmspace
->vm_tsize
;
152 kp
->kp_vm_dsize
= p
->p_vmspace
->vm_dsize
;
153 kp
->kp_vm_ssize
= p
->p_vmspace
->vm_ssize
;
156 if (p
->p_ucred
&& jailed(p
->p_ucred
))
157 kp
->kp_jailid
= p
->p_ucred
->cr_prison
->pr_id
;
160 kp
->kp_cru
= p
->p_cru
;
164 * Fill in a struct kinfo_lwp.
167 fill_kinfo_lwp(struct lwp
*lwp
, struct kinfo_lwp
*kl
)
169 bzero(kl
, sizeof(*kl
));
171 kl
->kl_pid
= lwp
->lwp_proc
->p_pid
;
172 kl
->kl_tid
= lwp
->lwp_tid
;
174 kl
->kl_flags
= lwp
->lwp_flag
;
175 kl
->kl_stat
= lwp
->lwp_stat
;
176 kl
->kl_lock
= lwp
->lwp_lock
;
177 kl
->kl_tdflags
= lwp
->lwp_thread
->td_flags
;
180 * The process/lwp stat may not reflect whether the process is
181 * actually sleeping or not if the related thread was directly
182 * descheduled by LWKT. Adjust the stat if the thread is not
183 * runnable and not waiting to be scheduled on a cpu by the
184 * user process scheduler.
186 if (kl
->kl_stat
== LSRUN
) {
187 if ((kl
->kl_tdflags
& TDF_RUNQ
) == 0 &&
188 (lwp
->lwp_flag
& LWP_ONRUNQ
) == 0) {
189 kl
->kl_stat
= LSSLEEP
;
193 kl
->kl_mpcount
= lwp
->lwp_thread
->td_mpcount
;
198 kl
->kl_prio
= lwp
->lwp_usdata
.bsd4
.priority
; /* XXX TGEN dangerous assumption */
199 kl
->kl_tdprio
= lwp
->lwp_thread
->td_pri
;
200 kl
->kl_rtprio
= lwp
->lwp_rtprio
;
202 kl
->kl_uticks
= lwp
->lwp_thread
->td_uticks
;
203 kl
->kl_sticks
= lwp
->lwp_thread
->td_sticks
;
204 kl
->kl_iticks
= lwp
->lwp_thread
->td_iticks
;
205 kl
->kl_cpticks
= lwp
->lwp_cpticks
;
206 kl
->kl_pctcpu
= lwp
->lwp_pctcpu
;
207 kl
->kl_slptime
= lwp
->lwp_slptime
;
208 kl
->kl_origcpu
= lwp
->lwp_usdata
.bsd4
.origcpu
; /* XXX TGEN same */
209 kl
->kl_estcpu
= lwp
->lwp_usdata
.bsd4
.estcpu
;
210 kl
->kl_cpuid
= lwp
->lwp_thread
->td_gd
->gd_cpuid
;
212 kl
->kl_ru
= lwp
->lwp_ru
;
214 kl
->kl_siglist
= lwp
->lwp_siglist
;
215 kl
->kl_sigmask
= lwp
->lwp_sigmask
;
217 kl
->kl_wchan
= (uintptr_t)lwp
->lwp_thread
->td_wchan
;
218 if (lwp
->lwp_thread
->td_wmesg
) {
219 strncpy(kl
->kl_wmesg
, lwp
->lwp_thread
->td_wmesg
, WMESGLEN
);
220 kl
->kl_wmesg
[WMESGLEN
] = 0;
225 * Fill in a struct kinfo_proc for kernel threads (i.e. those without proc).
228 fill_kinfo_proc_kthread(struct thread
*td
, struct kinfo_proc
*kp
)
230 bzero(kp
, sizeof(*kp
));
233 * Fill in fake proc information and semi-fake lwp info.
236 kp
->kp_tdev
= NOUDEV
;
237 strncpy(kp
->kp_comm
, td
->td_comm
, sizeof(kp
->kp_comm
) - 1);
238 kp
->kp_comm
[sizeof(kp
->kp_comm
) - 1] = 0;
239 kp
->kp_flags
= P_SYSTEM
;
240 kp
->kp_stat
= SACTIVE
;
242 kp
->kp_lwp
.kl_pid
= -1;
243 kp
->kp_lwp
.kl_tid
= -1;
244 kp
->kp_lwp
.kl_tdflags
= td
->td_flags
;
246 kp
->kp_lwp
.kl_mpcount
= td
->td_mpcount
;
248 kp
->kp_lwp
.kl_mpcount
= 0;
251 kp
->kp_lwp
.kl_tdprio
= td
->td_pri
;
252 kp
->kp_lwp
.kl_rtprio
.type
= RTP_PRIO_THREAD
;
253 kp
->kp_lwp
.kl_rtprio
.prio
= td
->td_pri
& TDPRI_MASK
;
255 kp
->kp_lwp
.kl_uticks
= td
->td_uticks
;
256 kp
->kp_lwp
.kl_sticks
= td
->td_sticks
;
257 kp
->kp_lwp
.kl_iticks
= td
->td_iticks
;
258 kp
->kp_lwp
.kl_cpuid
= td
->td_gd
->gd_cpuid
;
260 kp
->kp_lwp
.kl_wchan
= (uintptr_t)td
->td_wchan
;
261 if (td
->td_flags
& TDF_RUNQ
)
262 kp
->kp_lwp
.kl_stat
= LSRUN
;
264 kp
->kp_lwp
.kl_stat
= LSSLEEP
;
266 strncpy(kp
->kp_lwp
.kl_wmesg
, td
->td_wmesg
, WMESGLEN
);
267 kp
->kp_lwp
.kl_wmesg
[WMESGLEN
] = 0;