2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Simon 'corecode' Schubert <corecode@fs.ei.tum.de>
6 * by Thomas E. Spanjaard <tgen@netphreax.net>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * This is a source file used by both the kernel and libkvm.
41 #define _KERNEL_STRUCTURES
45 #include <vm/vm_map.h>
46 #include <sys/kinfo.h>
50 #include <sys/mplock2.h>
51 #include <sys/globaldata.h>
53 #include <sys/systm.h>
57 dev_t
dev2udev(cdev_t dev
); /* kvm_proc.c */
62 * Fill in a struct kinfo_proc.
64 * NOTE! We may be asked to fill in kinfo_proc for a zombied process, and
65 * the process may be in the middle of being deallocated. Check all pointers
68 * Caller must hold p->p_token
71 fill_kinfo_proc(struct proc
*p
, struct kinfo_proc
*kp
)
78 sess
= pgrp
? pgrp
->pg_session
: NULL
;
80 bzero(kp
, sizeof(*kp
));
82 kp
->kp_paddr
= (uintptr_t)p
;
83 kp
->kp_fd
= (uintptr_t)p
->p_fd
;
85 kp
->kp_flags
= p
->p_flags
;
86 kp
->kp_stat
= p
->p_stat
;
87 kp
->kp_lock
= p
->p_lock
;
88 kp
->kp_acflag
= p
->p_acflag
;
89 kp
->kp_traceflag
= p
->p_traceflag
;
90 kp
->kp_siglist
= p
->p_siglist
;
92 kp
->kp_sigignore
= p
->p_sigignore
; /* p_sigacts-> */
93 kp
->kp_sigcatch
= p
->p_sigcatch
; /* p_sigacts-> */
94 kp
->kp_sigflag
= p
->p_sigacts
->ps_flag
;
96 kp
->kp_start
= p
->p_start
;
98 strncpy(kp
->kp_comm
, p
->p_comm
, sizeof(kp
->kp_comm
) - 1);
99 kp
->kp_comm
[sizeof(kp
->kp_comm
) - 1] = 0;
102 kp
->kp_uid
= p
->p_ucred
->cr_uid
;
103 kp
->kp_ngroups
= p
->p_ucred
->cr_ngroups
;
104 if (p
->p_ucred
->cr_groups
) {
105 bcopy(p
->p_ucred
->cr_groups
, kp
->kp_groups
,
106 NGROUPS
* sizeof(kp
->kp_groups
[0]));
108 kp
->kp_ruid
= p
->p_ucred
->cr_ruid
;
109 kp
->kp_svuid
= p
->p_ucred
->cr_svuid
;
110 kp
->kp_rgid
= p
->p_ucred
->cr_rgid
;
111 kp
->kp_svgid
= p
->p_ucred
->cr_svgid
;
114 kp
->kp_pid
= p
->p_pid
;
116 kp
->kp_ppid
= p
->p_oppid
;
118 kp
->kp_ppid
= p
->p_pptr
!= NULL
? p
->p_pptr
->p_pid
: -1;
120 kp
->kp_pgid
= pgrp
->pg_id
;
121 kp
->kp_jobc
= pgrp
->pg_jobc
;
124 kp
->kp_sid
= sess
->s_sid
;
125 bcopy(sess
->s_login
, kp
->kp_login
, MAXLOGNAME
);
126 if (sess
->s_ttyvp
!= NULL
)
127 kp
->kp_auxflags
|= KI_CTTY
;
128 if ((p
->p_session
!= NULL
) && SESS_LEADER(p
))
129 kp
->kp_auxflags
|= KI_SLEADER
;
131 if (sess
&& (p
->p_flags
& P_CONTROLT
) != 0 && sess
->s_ttyp
!= NULL
) {
132 kp
->kp_tdev
= dev2udev(sess
->s_ttyp
->t_dev
);
133 if (sess
->s_ttyp
->t_pgrp
!= NULL
)
134 kp
->kp_tpgid
= sess
->s_ttyp
->t_pgrp
->pg_id
;
137 if (sess
->s_ttyp
->t_session
!= NULL
)
138 kp
->kp_tsid
= sess
->s_ttyp
->t_session
->s_sid
;
142 kp
->kp_tdev
= NOUDEV
;
144 kp
->kp_exitstat
= p
->p_xstat
;
145 kp
->kp_nthreads
= p
->p_nthreads
;
146 kp
->kp_nice
= p
->p_nice
;
147 kp
->kp_swtime
= p
->p_swtime
;
149 if ((vm
= p
->p_vmspace
) != NULL
) {
150 kp
->kp_vm_map_size
= vm
->vm_map
.size
;
151 kp
->kp_vm_rssize
= vmspace_resident_count(vm
);
154 /*kp->kp_vm_prssize = vmspace_president_count(vm);*/
156 kp
->kp_vm_swrss
= vm
->vm_swrss
;
157 kp
->kp_vm_tsize
= vm
->vm_tsize
;
158 kp
->kp_vm_dsize
= vm
->vm_dsize
;
159 kp
->kp_vm_ssize
= vm
->vm_ssize
;
162 if (p
->p_ucred
&& jailed(p
->p_ucred
))
163 kp
->kp_jailid
= p
->p_ucred
->cr_prison
->pr_id
;
166 kp
->kp_cru
= p
->p_cru
;
170 * Fill in a struct kinfo_lwp.
173 fill_kinfo_lwp(struct lwp
*lwp
, struct kinfo_lwp
*kl
)
175 bzero(kl
, sizeof(*kl
));
177 kl
->kl_pid
= lwp
->lwp_proc
->p_pid
;
178 kl
->kl_tid
= lwp
->lwp_tid
;
180 kl
->kl_flags
= lwp
->lwp_flags
;
181 kl
->kl_stat
= lwp
->lwp_stat
;
182 kl
->kl_lock
= lwp
->lwp_lock
;
183 kl
->kl_tdflags
= lwp
->lwp_thread
->td_flags
;
186 * The process/lwp stat may not reflect whether the process is
187 * actually sleeping or not if the related thread was directly
188 * descheduled by LWKT. Adjust the stat if the thread is not
189 * runnable and not waiting to be scheduled on a cpu by the
190 * user process scheduler.
192 if (kl
->kl_stat
== LSRUN
) {
193 if ((kl
->kl_tdflags
& TDF_RUNQ
) == 0 &&
194 (lwp
->lwp_mpflags
& LWP_MP_ONRUNQ
) == 0) {
195 kl
->kl_stat
= LSSLEEP
;
199 kl
->kl_mpcount
= get_mplock_count(lwp
->lwp_thread
);
204 kl
->kl_prio
= lwp
->lwp_usdata
.bsd4
.priority
; /* XXX TGEN dangerous assumption */
205 kl
->kl_tdprio
= lwp
->lwp_thread
->td_pri
;
206 kl
->kl_rtprio
= lwp
->lwp_rtprio
;
208 kl
->kl_uticks
= lwp
->lwp_thread
->td_uticks
;
209 kl
->kl_sticks
= lwp
->lwp_thread
->td_sticks
;
210 kl
->kl_iticks
= lwp
->lwp_thread
->td_iticks
;
211 kl
->kl_cpticks
= lwp
->lwp_cpticks
;
212 kl
->kl_pctcpu
= lwp
->lwp_proc
->p_stat
== SZOMB
? 0 : lwp
->lwp_pctcpu
;
213 kl
->kl_slptime
= lwp
->lwp_slptime
;
214 kl
->kl_origcpu
= lwp
->lwp_usdata
.bsd4
.batch
;
215 kl
->kl_estcpu
= lwp
->lwp_usdata
.bsd4
.estcpu
;
216 kl
->kl_cpuid
= lwp
->lwp_thread
->td_gd
->gd_cpuid
;
218 kl
->kl_ru
= lwp
->lwp_ru
;
220 kl
->kl_siglist
= lwp
->lwp_siglist
;
221 kl
->kl_sigmask
= lwp
->lwp_sigmask
;
223 kl
->kl_wchan
= (uintptr_t)lwp
->lwp_thread
->td_wchan
;
224 if (lwp
->lwp_thread
->td_wmesg
) {
225 strncpy(kl
->kl_wmesg
, lwp
->lwp_thread
->td_wmesg
, WMESGLEN
);
226 kl
->kl_wmesg
[WMESGLEN
] = 0;
228 strlcpy(kl
->kl_comm
, lwp
->lwp_thread
->td_comm
, sizeof(kl
->kl_comm
));
232 * Fill in a struct kinfo_proc for kernel threads (i.e. those without proc).
235 fill_kinfo_proc_kthread(struct thread
*td
, struct kinfo_proc
*kp
)
237 bzero(kp
, sizeof(*kp
));
240 * Fill in fake proc information and semi-fake lwp info.
243 kp
->kp_tdev
= NOUDEV
;
244 strncpy(kp
->kp_comm
, td
->td_comm
, sizeof(kp
->kp_comm
) - 1);
245 kp
->kp_comm
[sizeof(kp
->kp_comm
) - 1] = 0;
246 kp
->kp_flags
= P_SYSTEM
;
247 if (td
!= &td
->td_gd
->gd_idlethread
)
248 kp
->kp_stat
= SACTIVE
;
252 kp
->kp_ktaddr
= (uintptr_t)td
;
254 kp
->kp_lwp
.kl_pid
= -1;
255 kp
->kp_lwp
.kl_tid
= -1;
256 kp
->kp_lwp
.kl_tdflags
= td
->td_flags
;
258 kp
->kp_lwp
.kl_mpcount
= get_mplock_count(td
);
260 kp
->kp_lwp
.kl_mpcount
= 0;
263 kp
->kp_lwp
.kl_tdprio
= td
->td_pri
;
264 kp
->kp_lwp
.kl_rtprio
.type
= RTP_PRIO_THREAD
;
265 kp
->kp_lwp
.kl_rtprio
.prio
= td
->td_pri
;
267 kp
->kp_lwp
.kl_uticks
= td
->td_uticks
;
268 kp
->kp_lwp
.kl_sticks
= td
->td_sticks
;
269 kp
->kp_lwp
.kl_iticks
= td
->td_iticks
;
270 kp
->kp_lwp
.kl_cpuid
= td
->td_gd
->gd_cpuid
;
272 kp
->kp_lwp
.kl_wchan
= (uintptr_t)td
->td_wchan
;
273 if (td
->td_flags
& TDF_RUNQ
)
274 kp
->kp_lwp
.kl_stat
= LSRUN
;
276 kp
->kp_lwp
.kl_stat
= LSSLEEP
;
278 strncpy(kp
->kp_lwp
.kl_wmesg
, td
->td_wmesg
, WMESGLEN
);
279 kp
->kp_lwp
.kl_wmesg
[WMESGLEN
] = 0;
281 strlcpy(kp
->kp_lwp
.kl_comm
, td
->td_comm
, sizeof(kp
->kp_lwp
.kl_comm
));