Do not run mandoc for lintmanpages if MANPAGES is empty.
[netbsd-mini2440.git] / lib / libkvm / kvm_proc.c
blob7fb09a8c765652fe9a4623f1266cae624cc321f8
1 /* $NetBSD: kvm_proc.c,v 1.83 2009/05/16 11:56:47 yamt Exp $ */
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 /*-
33 * Copyright (c) 1989, 1992, 1993
34 * The Regents of the University of California. All rights reserved.
36 * This code is derived from software developed by the Computer Systems
37 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
38 * BG 91-66 and contributed to Berkeley.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
65 #include <sys/cdefs.h>
66 #if defined(LIBC_SCCS) && !defined(lint)
67 #if 0
68 static char sccsid[] = "@(#)kvm_proc.c 8.3 (Berkeley) 9/23/93";
69 #else
70 __RCSID("$NetBSD: kvm_proc.c,v 1.83 2009/05/16 11:56:47 yamt Exp $");
71 #endif
72 #endif /* LIBC_SCCS and not lint */
75 * Proc traversal interface for kvm. ps and w are (probably) the exclusive
76 * users of this code, so we've factored it out into a separate module.
77 * Thus, we keep this grunge out of the other kvm applications (i.e.,
78 * most other applications are interested only in open/close/read/nlist).
81 #include <sys/param.h>
82 #include <sys/user.h>
83 #include <sys/lwp.h>
84 #include <sys/proc.h>
85 #include <sys/exec.h>
86 #include <sys/stat.h>
87 #include <sys/ioctl.h>
88 #include <sys/tty.h>
89 #include <sys/resourcevar.h>
90 #include <sys/mutex.h>
91 #include <sys/specificdata.h>
93 #include <errno.h>
94 #include <stdlib.h>
95 #include <stddef.h>
96 #include <string.h>
97 #include <unistd.h>
98 #include <nlist.h>
99 #include <kvm.h>
101 #include <uvm/uvm_extern.h>
102 #include <uvm/uvm_param.h>
103 #include <uvm/uvm_amap.h>
105 #include <sys/sysctl.h>
107 #include <limits.h>
108 #include <db.h>
109 #include <paths.h>
111 #include "kvm_private.h"
114 * Common info from kinfo_proc and kinfo_proc2 used by helper routines.
116 struct miniproc {
117 struct vmspace *p_vmspace;
118 char p_stat;
119 struct proc *p_paddr;
120 pid_t p_pid;
124 * Convert from struct proc and kinfo_proc{,2} to miniproc.
126 #define PTOMINI(kp, p) \
127 do { \
128 (p)->p_stat = (kp)->p_stat; \
129 (p)->p_pid = (kp)->p_pid; \
130 (p)->p_paddr = NULL; \
131 (p)->p_vmspace = (kp)->p_vmspace; \
132 } while (/*CONSTCOND*/0);
134 #define KPTOMINI(kp, p) \
135 do { \
136 (p)->p_stat = (kp)->kp_proc.p_stat; \
137 (p)->p_pid = (kp)->kp_proc.p_pid; \
138 (p)->p_paddr = (kp)->kp_eproc.e_paddr; \
139 (p)->p_vmspace = (kp)->kp_proc.p_vmspace; \
140 } while (/*CONSTCOND*/0);
142 #define KP2TOMINI(kp, p) \
143 do { \
144 (p)->p_stat = (kp)->p_stat; \
145 (p)->p_pid = (kp)->p_pid; \
146 (p)->p_paddr = (void *)(long)(kp)->p_paddr; \
147 (p)->p_vmspace = (void *)(long)(kp)->p_vmspace; \
148 } while (/*CONSTCOND*/0);
151 * NetBSD uses kauth(9) to manage credentials, which are stored in kauth_cred_t,
152 * a kernel-only opaque type. This is an embedded version which is *INTERNAL* to
153 * kvm(3) so dumps can be read properly.
155 * Whenever NetBSD starts exporting credentials to userland consistently (using
156 * 'struct uucred', or something) this will have to be updated again.
158 struct kvm_kauth_cred {
159 u_int cr_refcnt; /* reference count */
160 uint8_t cr_pad[CACHE_LINE_SIZE - sizeof(u_int)];
161 uid_t cr_uid; /* user id */
162 uid_t cr_euid; /* effective user id */
163 uid_t cr_svuid; /* saved effective user id */
164 gid_t cr_gid; /* group id */
165 gid_t cr_egid; /* effective group id */
166 gid_t cr_svgid; /* saved effective group id */
167 u_int cr_ngroups; /* number of groups */
168 gid_t cr_groups[NGROUPS]; /* group memberships */
169 specificdata_reference cr_sd; /* specific data */
172 #define KREAD(kd, addr, obj) \
173 (kvm_read(kd, addr, (obj), sizeof(*obj)) != sizeof(*obj))
175 /* XXX: What uses these two functions? */
176 char *_kvm_uread __P((kvm_t *, const struct proc *, u_long,
177 u_long *));
178 ssize_t kvm_uread __P((kvm_t *, const struct proc *, u_long, char *,
179 size_t));
181 static char *_kvm_ureadm __P((kvm_t *, const struct miniproc *, u_long,
182 u_long *));
183 static ssize_t kvm_ureadm __P((kvm_t *, const struct miniproc *, u_long,
184 char *, size_t));
186 static char **kvm_argv __P((kvm_t *, const struct miniproc *, u_long, int,
187 int));
188 static int kvm_deadprocs __P((kvm_t *, int, int, u_long, u_long, int));
189 static char **kvm_doargv __P((kvm_t *, const struct miniproc *, int,
190 void (*)(struct ps_strings *, u_long *, int *)));
191 static char **kvm_doargv2 __P((kvm_t *, pid_t, int, int));
192 static int kvm_proclist __P((kvm_t *, int, int, struct proc *,
193 struct kinfo_proc *, int));
194 static int proc_verify __P((kvm_t *, u_long, const struct miniproc *));
195 static void ps_str_a __P((struct ps_strings *, u_long *, int *));
196 static void ps_str_e __P((struct ps_strings *, u_long *, int *));
199 static char *
200 _kvm_ureadm(kd, p, va, cnt)
201 kvm_t *kd;
202 const struct miniproc *p;
203 u_long va;
204 u_long *cnt;
206 u_long addr, head;
207 u_long offset;
208 struct vm_map_entry vme;
209 struct vm_amap amap;
210 struct vm_anon *anonp, anon;
211 struct vm_page pg;
212 u_long slot;
214 if (kd->swapspc == NULL) {
215 kd->swapspc = _kvm_malloc(kd, (size_t)kd->nbpg);
216 if (kd->swapspc == NULL)
217 return (NULL);
221 * Look through the address map for the memory object
222 * that corresponds to the given virtual address.
223 * The header just has the entire valid range.
225 head = (u_long)&p->p_vmspace->vm_map.header;
226 addr = head;
227 for (;;) {
228 if (KREAD(kd, addr, &vme))
229 return (NULL);
231 if (va >= vme.start && va < vme.end &&
232 vme.aref.ar_amap != NULL)
233 break;
235 addr = (u_long)vme.next;
236 if (addr == head)
237 return (NULL);
241 * we found the map entry, now to find the object...
243 if (vme.aref.ar_amap == NULL)
244 return (NULL);
246 addr = (u_long)vme.aref.ar_amap;
247 if (KREAD(kd, addr, &amap))
248 return (NULL);
250 offset = va - vme.start;
251 slot = offset / kd->nbpg + vme.aref.ar_pageoff;
252 /* sanity-check slot number */
253 if (slot > amap.am_nslot)
254 return (NULL);
256 addr = (u_long)amap.am_anon + (offset / kd->nbpg) * sizeof(anonp);
257 if (KREAD(kd, addr, &anonp))
258 return (NULL);
260 addr = (u_long)anonp;
261 if (KREAD(kd, addr, &anon))
262 return (NULL);
264 addr = (u_long)anon.an_page;
265 if (addr) {
266 if (KREAD(kd, addr, &pg))
267 return (NULL);
269 if (_kvm_pread(kd, kd->pmfd, kd->swapspc, (size_t)kd->nbpg,
270 (off_t)pg.phys_addr) != kd->nbpg)
271 return (NULL);
272 } else {
273 if (kd->swfd < 0 ||
274 _kvm_pread(kd, kd->swfd, kd->swapspc, (size_t)kd->nbpg,
275 (off_t)(anon.an_swslot * kd->nbpg)) != kd->nbpg)
276 return (NULL);
279 /* Found the page. */
280 offset %= kd->nbpg;
281 *cnt = kd->nbpg - offset;
282 return (&kd->swapspc[(size_t)offset]);
285 char *
286 _kvm_uread(kd, p, va, cnt)
287 kvm_t *kd;
288 const struct proc *p;
289 u_long va;
290 u_long *cnt;
292 struct miniproc mp;
294 PTOMINI(p, &mp);
295 return (_kvm_ureadm(kd, &mp, va, cnt));
299 * Convert credentials located in kernel space address 'cred' and store
300 * them in the appropriate members of 'eproc'.
302 static int
303 _kvm_convertcred(kvm_t *kd, u_long cred, struct eproc *eproc)
305 struct kvm_kauth_cred kauthcred;
306 struct ki_pcred *pc = &eproc->e_pcred;
307 struct ki_ucred *uc = &eproc->e_ucred;
309 if (KREAD(kd, cred, &kauthcred) != 0)
310 return (-1);
312 /* inlined version of kauth_cred_to_pcred, see kauth(9). */
313 pc->p_ruid = kauthcred.cr_uid;
314 pc->p_svuid = kauthcred.cr_svuid;
315 pc->p_rgid = kauthcred.cr_gid;
316 pc->p_svgid = kauthcred.cr_svgid;
317 pc->p_refcnt = kauthcred.cr_refcnt;
318 pc->p_pad = NULL;
320 /* inlined version of kauth_cred_to_ucred(), see kauth(9). */
321 uc->cr_ref = kauthcred.cr_refcnt;
322 uc->cr_uid = kauthcred.cr_euid;
323 uc->cr_gid = kauthcred.cr_egid;
324 uc->cr_ngroups = (uint32_t)MIN(kauthcred.cr_ngroups,
325 sizeof(uc->cr_groups) / sizeof(uc->cr_groups[0]));
326 memcpy(uc->cr_groups, kauthcred.cr_groups,
327 uc->cr_ngroups * sizeof(uc->cr_groups[0]));
329 return (0);
333 * Read proc's from memory file into buffer bp, which has space to hold
334 * at most maxcnt procs.
336 static int
337 kvm_proclist(kd, what, arg, p, bp, maxcnt)
338 kvm_t *kd;
339 int what, arg;
340 struct proc *p;
341 struct kinfo_proc *bp;
342 int maxcnt;
344 int cnt = 0;
345 int nlwps;
346 struct kinfo_lwp *kl;
347 struct eproc eproc;
348 struct pgrp pgrp;
349 struct session sess;
350 struct tty tty;
351 struct proc proc;
353 for (; cnt < maxcnt && p != NULL; p = proc.p_list.le_next) {
354 if (KREAD(kd, (u_long)p, &proc)) {
355 _kvm_err(kd, kd->program, "can't read proc at %p", p);
356 return (-1);
358 if (_kvm_convertcred(kd, (u_long)proc.p_cred, &eproc) != 0) {
359 _kvm_err(kd, kd->program,
360 "can't read proc credentials at %p", p);
361 return (-1);
364 switch (what) {
366 case KERN_PROC_PID:
367 if (proc.p_pid != (pid_t)arg)
368 continue;
369 break;
371 case KERN_PROC_UID:
372 if (eproc.e_ucred.cr_uid != (uid_t)arg)
373 continue;
374 break;
376 case KERN_PROC_RUID:
377 if (eproc.e_pcred.p_ruid != (uid_t)arg)
378 continue;
379 break;
382 * We're going to add another proc to the set. If this
383 * will overflow the buffer, assume the reason is because
384 * nprocs (or the proc list) is corrupt and declare an error.
386 if (cnt >= maxcnt) {
387 _kvm_err(kd, kd->program, "nprocs corrupt");
388 return (-1);
391 * gather eproc
393 eproc.e_paddr = p;
394 if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) {
395 _kvm_err(kd, kd->program, "can't read pgrp at %p",
396 proc.p_pgrp);
397 return (-1);
399 eproc.e_sess = pgrp.pg_session;
400 eproc.e_pgid = pgrp.pg_id;
401 eproc.e_jobc = pgrp.pg_jobc;
402 if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) {
403 _kvm_err(kd, kd->program, "can't read session at %p",
404 pgrp.pg_session);
405 return (-1);
407 if ((proc.p_lflag & PL_CONTROLT) && sess.s_ttyp != NULL) {
408 if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) {
409 _kvm_err(kd, kd->program,
410 "can't read tty at %p", sess.s_ttyp);
411 return (-1);
413 eproc.e_tdev = (uint32_t)tty.t_dev;
414 eproc.e_tsess = tty.t_session;
415 if (tty.t_pgrp != NULL) {
416 if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) {
417 _kvm_err(kd, kd->program,
418 "can't read tpgrp at %p",
419 tty.t_pgrp);
420 return (-1);
422 eproc.e_tpgid = pgrp.pg_id;
423 } else
424 eproc.e_tpgid = -1;
425 } else
426 eproc.e_tdev = (uint32_t)NODEV;
427 eproc.e_flag = sess.s_ttyvp ? EPROC_CTTY : 0;
428 eproc.e_sid = sess.s_sid;
429 if (sess.s_leader == p)
430 eproc.e_flag |= EPROC_SLEADER;
432 * Fill in the old-style proc.p_wmesg by copying the wmesg
433 * from the first available LWP.
435 kl = kvm_getlwps(kd, proc.p_pid,
436 (u_long)PTRTOUINT64(eproc.e_paddr),
437 sizeof(struct kinfo_lwp), &nlwps);
438 if (kl) {
439 if (nlwps > 0) {
440 strcpy(eproc.e_wmesg, kl[0].l_wmesg);
443 (void)kvm_read(kd, (u_long)proc.p_vmspace, &eproc.e_vm,
444 sizeof(eproc.e_vm));
446 eproc.e_xsize = eproc.e_xrssize = 0;
447 eproc.e_xccount = eproc.e_xswrss = 0;
449 switch (what) {
451 case KERN_PROC_PGRP:
452 if (eproc.e_pgid != (pid_t)arg)
453 continue;
454 break;
456 case KERN_PROC_TTY:
457 if ((proc.p_lflag & PL_CONTROLT) == 0 ||
458 eproc.e_tdev != (dev_t)arg)
459 continue;
460 break;
462 memcpy(&bp->kp_proc, &proc, sizeof(proc));
463 memcpy(&bp->kp_eproc, &eproc, sizeof(eproc));
464 ++bp;
465 ++cnt;
467 return (cnt);
471 * Build proc info array by reading in proc list from a crash dump.
472 * Return number of procs read. maxcnt is the max we will read.
474 static int
475 kvm_deadprocs(kd, what, arg, a_allproc, a_zombproc, maxcnt)
476 kvm_t *kd;
477 int what, arg;
478 u_long a_allproc;
479 u_long a_zombproc;
480 int maxcnt;
482 struct kinfo_proc *bp = kd->procbase;
483 int acnt, zcnt;
484 struct proc *p;
486 if (KREAD(kd, a_allproc, &p)) {
487 _kvm_err(kd, kd->program, "cannot read allproc");
488 return (-1);
490 acnt = kvm_proclist(kd, what, arg, p, bp, maxcnt);
491 if (acnt < 0)
492 return (acnt);
494 if (KREAD(kd, a_zombproc, &p)) {
495 _kvm_err(kd, kd->program, "cannot read zombproc");
496 return (-1);
498 zcnt = kvm_proclist(kd, what, arg, p, bp + acnt,
499 maxcnt - acnt);
500 if (zcnt < 0)
501 zcnt = 0;
503 return (acnt + zcnt);
506 struct kinfo_proc2 *
507 kvm_getproc2(kd, op, arg, esize, cnt)
508 kvm_t *kd;
509 int op, arg;
510 size_t esize;
511 int *cnt;
513 size_t size;
514 int mib[6], st, nprocs;
515 struct pstats pstats;
517 if (ISSYSCTL(kd)) {
518 size = 0;
519 mib[0] = CTL_KERN;
520 mib[1] = KERN_PROC2;
521 mib[2] = op;
522 mib[3] = arg;
523 mib[4] = (int)esize;
524 again:
525 mib[5] = 0;
526 st = sysctl(mib, 6, NULL, &size, NULL, (size_t)0);
527 if (st == -1) {
528 _kvm_syserr(kd, kd->program, "kvm_getproc2");
529 return (NULL);
532 mib[5] = (int) (size / esize);
533 KVM_ALLOC(kd, procbase2, size);
534 st = sysctl(mib, 6, kd->procbase2, &size, NULL, (size_t)0);
535 if (st == -1) {
536 if (errno == ENOMEM) {
537 goto again;
539 _kvm_syserr(kd, kd->program, "kvm_getproc2");
540 return (NULL);
542 nprocs = (int) (size / esize);
543 } else {
544 char *kp2c;
545 struct kinfo_proc *kp;
546 struct kinfo_proc2 kp2, *kp2p;
547 struct kinfo_lwp *kl;
548 int i, nlwps;
550 kp = kvm_getprocs(kd, op, arg, &nprocs);
551 if (kp == NULL)
552 return (NULL);
554 size = nprocs * esize;
555 KVM_ALLOC(kd, procbase2, size);
556 kp2c = (char *)(void *)kd->procbase2;
557 kp2p = &kp2;
558 for (i = 0; i < nprocs; i++, kp++) {
559 struct timeval tv;
561 kl = kvm_getlwps(kd, kp->kp_proc.p_pid,
562 (u_long)PTRTOUINT64(kp->kp_eproc.e_paddr),
563 sizeof(struct kinfo_lwp), &nlwps);
565 if (kl == NULL) {
566 _kvm_syserr(kd, NULL,
567 "kvm_getlwps() failed on process %u\n",
568 kp->kp_proc.p_pid);
569 if (nlwps == 0)
570 return NULL;
571 else
572 continue;
575 /* We use kl[0] as the "representative" LWP */
576 memset(kp2p, 0, sizeof(kp2));
577 kp2p->p_forw = kl[0].l_forw;
578 kp2p->p_back = kl[0].l_back;
579 kp2p->p_paddr = PTRTOUINT64(kp->kp_eproc.e_paddr);
580 kp2p->p_addr = kl[0].l_addr;
581 kp2p->p_fd = PTRTOUINT64(kp->kp_proc.p_fd);
582 kp2p->p_cwdi = PTRTOUINT64(kp->kp_proc.p_cwdi);
583 kp2p->p_stats = PTRTOUINT64(kp->kp_proc.p_stats);
584 kp2p->p_limit = PTRTOUINT64(kp->kp_proc.p_limit);
585 kp2p->p_vmspace = PTRTOUINT64(kp->kp_proc.p_vmspace);
586 kp2p->p_sigacts = PTRTOUINT64(kp->kp_proc.p_sigacts);
587 kp2p->p_sess = PTRTOUINT64(kp->kp_eproc.e_sess);
588 kp2p->p_tsess = 0;
589 #if 1 /* XXX: dsl - p_ru was only ever non-zero for zombies */
590 kp2p->p_ru = 0;
591 #else
592 kp2p->p_ru = PTRTOUINT64(pstats.p_ru);
593 #endif
595 kp2p->p_eflag = 0;
596 kp2p->p_exitsig = kp->kp_proc.p_exitsig;
597 kp2p->p_flag = kp->kp_proc.p_flag;
599 kp2p->p_pid = kp->kp_proc.p_pid;
601 kp2p->p_ppid = kp->kp_eproc.e_ppid;
602 kp2p->p_sid = kp->kp_eproc.e_sid;
603 kp2p->p__pgid = kp->kp_eproc.e_pgid;
605 kp2p->p_tpgid = -1 /* XXX NO_PGID! */;
607 kp2p->p_uid = kp->kp_eproc.e_ucred.cr_uid;
608 kp2p->p_ruid = kp->kp_eproc.e_pcred.p_ruid;
609 kp2p->p_svuid = kp->kp_eproc.e_pcred.p_svuid;
610 kp2p->p_gid = kp->kp_eproc.e_ucred.cr_gid;
611 kp2p->p_rgid = kp->kp_eproc.e_pcred.p_rgid;
612 kp2p->p_svgid = kp->kp_eproc.e_pcred.p_svgid;
614 /*CONSTCOND*/
615 memcpy(kp2p->p_groups, kp->kp_eproc.e_ucred.cr_groups,
616 MIN(sizeof(kp2p->p_groups),
617 sizeof(kp->kp_eproc.e_ucred.cr_groups)));
618 kp2p->p_ngroups = kp->kp_eproc.e_ucred.cr_ngroups;
620 kp2p->p_jobc = kp->kp_eproc.e_jobc;
621 kp2p->p_tdev = kp->kp_eproc.e_tdev;
622 kp2p->p_tpgid = kp->kp_eproc.e_tpgid;
623 kp2p->p_tsess = PTRTOUINT64(kp->kp_eproc.e_tsess);
625 kp2p->p_estcpu = 0;
626 bintime2timeval(&kp->kp_proc.p_rtime, &tv);
627 kp2p->p_rtime_sec = (uint32_t)tv.tv_sec;
628 kp2p->p_rtime_usec = (uint32_t)tv.tv_usec;
629 kp2p->p_cpticks = kl[0].l_cpticks;
630 kp2p->p_pctcpu = kp->kp_proc.p_pctcpu;
631 kp2p->p_swtime = kl[0].l_swtime;
632 kp2p->p_slptime = kl[0].l_slptime;
633 #if 0 /* XXX thorpej */
634 kp2p->p_schedflags = kp->kp_proc.p_schedflags;
635 #else
636 kp2p->p_schedflags = 0;
637 #endif
639 kp2p->p_uticks = kp->kp_proc.p_uticks;
640 kp2p->p_sticks = kp->kp_proc.p_sticks;
641 kp2p->p_iticks = kp->kp_proc.p_iticks;
643 kp2p->p_tracep = PTRTOUINT64(kp->kp_proc.p_tracep);
644 kp2p->p_traceflag = kp->kp_proc.p_traceflag;
646 kp2p->p_holdcnt = kl[0].l_holdcnt;
648 memcpy(&kp2p->p_siglist,
649 &kp->kp_proc.p_sigpend.sp_set,
650 sizeof(ki_sigset_t));
651 memset(&kp2p->p_sigmask, 0,
652 sizeof(ki_sigset_t));
653 memcpy(&kp2p->p_sigignore,
654 &kp->kp_proc.p_sigctx.ps_sigignore,
655 sizeof(ki_sigset_t));
656 memcpy(&kp2p->p_sigcatch,
657 &kp->kp_proc.p_sigctx.ps_sigcatch,
658 sizeof(ki_sigset_t));
660 kp2p->p_stat = kl[0].l_stat;
661 kp2p->p_priority = kl[0].l_priority;
662 kp2p->p_usrpri = kl[0].l_priority;
663 kp2p->p_nice = kp->kp_proc.p_nice;
665 kp2p->p_xstat = kp->kp_proc.p_xstat;
666 kp2p->p_acflag = kp->kp_proc.p_acflag;
668 /*CONSTCOND*/
669 strncpy(kp2p->p_comm, kp->kp_proc.p_comm,
670 MIN(sizeof(kp2p->p_comm),
671 sizeof(kp->kp_proc.p_comm)));
673 strncpy(kp2p->p_wmesg, kp->kp_eproc.e_wmesg,
674 sizeof(kp2p->p_wmesg));
675 kp2p->p_wchan = kl[0].l_wchan;
676 strncpy(kp2p->p_login, kp->kp_eproc.e_login,
677 sizeof(kp2p->p_login));
679 kp2p->p_vm_rssize = kp->kp_eproc.e_xrssize;
680 kp2p->p_vm_tsize = kp->kp_eproc.e_vm.vm_tsize;
681 kp2p->p_vm_dsize = kp->kp_eproc.e_vm.vm_dsize;
682 kp2p->p_vm_ssize = kp->kp_eproc.e_vm.vm_ssize;
683 kp2p->p_vm_vsize = kp->kp_eproc.e_vm.vm_map.size;
684 /* Adjust mapped size */
685 kp2p->p_vm_msize =
686 (kp->kp_eproc.e_vm.vm_map.size / kd->nbpg) -
687 kp->kp_eproc.e_vm.vm_issize +
688 kp->kp_eproc.e_vm.vm_ssize;
690 kp2p->p_eflag = (int32_t)kp->kp_eproc.e_flag;
692 kp2p->p_realflag = kp->kp_proc.p_flag;
693 kp2p->p_nlwps = kp->kp_proc.p_nlwps;
694 kp2p->p_nrlwps = kp->kp_proc.p_nrlwps;
695 kp2p->p_realstat = kp->kp_proc.p_stat;
697 if (P_ZOMBIE(&kp->kp_proc) ||
698 kp->kp_proc.p_stats == NULL ||
699 KREAD(kd, (u_long)kp->kp_proc.p_stats, &pstats)) {
700 kp2p->p_uvalid = 0;
701 } else {
702 kp2p->p_uvalid = 1;
704 kp2p->p_ustart_sec = (u_int32_t)
705 pstats.p_start.tv_sec;
706 kp2p->p_ustart_usec = (u_int32_t)
707 pstats.p_start.tv_usec;
709 kp2p->p_uutime_sec = (u_int32_t)
710 pstats.p_ru.ru_utime.tv_sec;
711 kp2p->p_uutime_usec = (u_int32_t)
712 pstats.p_ru.ru_utime.tv_usec;
713 kp2p->p_ustime_sec = (u_int32_t)
714 pstats.p_ru.ru_stime.tv_sec;
715 kp2p->p_ustime_usec = (u_int32_t)
716 pstats.p_ru.ru_stime.tv_usec;
718 kp2p->p_uru_maxrss = pstats.p_ru.ru_maxrss;
719 kp2p->p_uru_ixrss = pstats.p_ru.ru_ixrss;
720 kp2p->p_uru_idrss = pstats.p_ru.ru_idrss;
721 kp2p->p_uru_isrss = pstats.p_ru.ru_isrss;
722 kp2p->p_uru_minflt = pstats.p_ru.ru_minflt;
723 kp2p->p_uru_majflt = pstats.p_ru.ru_majflt;
724 kp2p->p_uru_nswap = pstats.p_ru.ru_nswap;
725 kp2p->p_uru_inblock = pstats.p_ru.ru_inblock;
726 kp2p->p_uru_oublock = pstats.p_ru.ru_oublock;
727 kp2p->p_uru_msgsnd = pstats.p_ru.ru_msgsnd;
728 kp2p->p_uru_msgrcv = pstats.p_ru.ru_msgrcv;
729 kp2p->p_uru_nsignals = pstats.p_ru.ru_nsignals;
730 kp2p->p_uru_nvcsw = pstats.p_ru.ru_nvcsw;
731 kp2p->p_uru_nivcsw = pstats.p_ru.ru_nivcsw;
733 kp2p->p_uctime_sec = (u_int32_t)
734 (pstats.p_cru.ru_utime.tv_sec +
735 pstats.p_cru.ru_stime.tv_sec);
736 kp2p->p_uctime_usec = (u_int32_t)
737 (pstats.p_cru.ru_utime.tv_usec +
738 pstats.p_cru.ru_stime.tv_usec);
741 memcpy(kp2c, &kp2, esize);
742 kp2c += esize;
745 *cnt = nprocs;
746 return (kd->procbase2);
749 struct kinfo_lwp *
750 kvm_getlwps(kd, pid, paddr, esize, cnt)
751 kvm_t *kd;
752 int pid;
753 u_long paddr;
754 size_t esize;
755 int *cnt;
757 size_t size;
758 int mib[5], nlwps;
759 ssize_t st;
760 struct kinfo_lwp *kl;
762 if (ISSYSCTL(kd)) {
763 size = 0;
764 mib[0] = CTL_KERN;
765 mib[1] = KERN_LWP;
766 mib[2] = pid;
767 mib[3] = (int)esize;
768 mib[4] = 0;
769 again:
770 st = sysctl(mib, 5, NULL, &size, NULL, (size_t)0);
771 if (st == -1) {
772 switch (errno) {
773 case ESRCH: /* Treat this as a soft error; see kvm.c */
774 _kvm_syserr(kd, NULL, "kvm_getlwps");
775 return NULL;
776 default:
777 _kvm_syserr(kd, kd->program, "kvm_getlwps");
778 return NULL;
781 mib[4] = (int) (size / esize);
782 KVM_ALLOC(kd, lwpbase, size);
783 st = sysctl(mib, 5, kd->lwpbase, &size, NULL, (size_t)0);
784 if (st == -1) {
785 switch (errno) {
786 case ESRCH: /* Treat this as a soft error; see kvm.c */
787 _kvm_syserr(kd, NULL, "kvm_getlwps");
788 return NULL;
789 case ENOMEM:
790 goto again;
791 default:
792 _kvm_syserr(kd, kd->program, "kvm_getlwps");
793 return NULL;
796 nlwps = (int) (size / esize);
797 } else {
798 /* grovel through the memory image */
799 struct proc p;
800 struct lwp l;
801 u_long laddr;
802 void *back;
803 int i;
805 st = kvm_read(kd, paddr, &p, sizeof(p));
806 if (st == -1) {
807 _kvm_syserr(kd, kd->program, "kvm_getlwps");
808 return (NULL);
811 nlwps = p.p_nlwps;
812 size = nlwps * sizeof(*kd->lwpbase);
813 KVM_ALLOC(kd, lwpbase, size);
814 laddr = (u_long)PTRTOUINT64(p.p_lwps.lh_first);
815 for (i = 0; (i < nlwps) && (laddr != 0); i++) {
816 st = kvm_read(kd, laddr, &l, sizeof(l));
817 if (st == -1) {
818 _kvm_syserr(kd, kd->program, "kvm_getlwps");
819 return (NULL);
821 kl = &kd->lwpbase[i];
822 kl->l_laddr = laddr;
823 kl->l_forw = PTRTOUINT64(l.l_runq.tqe_next);
824 laddr = (u_long)PTRTOUINT64(l.l_runq.tqe_prev);
825 st = kvm_read(kd, laddr, &back, sizeof(back));
826 if (st == -1) {
827 _kvm_syserr(kd, kd->program, "kvm_getlwps");
828 return (NULL);
830 kl->l_back = PTRTOUINT64(back);
831 kl->l_addr = PTRTOUINT64(l.l_addr);
832 kl->l_lid = l.l_lid;
833 kl->l_flag = l.l_flag;
834 kl->l_swtime = l.l_swtime;
835 kl->l_slptime = l.l_slptime;
836 kl->l_schedflags = 0; /* XXX */
837 kl->l_holdcnt = 0;
838 kl->l_priority = l.l_priority;
839 kl->l_usrpri = l.l_priority;
840 kl->l_stat = l.l_stat;
841 kl->l_wchan = PTRTOUINT64(l.l_wchan);
842 if (l.l_wmesg)
843 (void)kvm_read(kd, (u_long)l.l_wmesg,
844 kl->l_wmesg, (size_t)WMESGLEN);
845 kl->l_cpuid = KI_NOCPU;
846 laddr = (u_long)PTRTOUINT64(l.l_sibling.le_next);
850 *cnt = nlwps;
851 return (kd->lwpbase);
854 struct kinfo_proc *
855 kvm_getprocs(kd, op, arg, cnt)
856 kvm_t *kd;
857 int op, arg;
858 int *cnt;
860 size_t size;
861 int mib[4], st, nprocs;
863 if (ISALIVE(kd)) {
864 size = 0;
865 mib[0] = CTL_KERN;
866 mib[1] = KERN_PROC;
867 mib[2] = op;
868 mib[3] = arg;
869 st = sysctl(mib, 4, NULL, &size, NULL, (size_t)0);
870 if (st == -1) {
871 _kvm_syserr(kd, kd->program, "kvm_getprocs");
872 return (NULL);
874 KVM_ALLOC(kd, procbase, size);
875 st = sysctl(mib, 4, kd->procbase, &size, NULL, (size_t)0);
876 if (st == -1) {
877 _kvm_syserr(kd, kd->program, "kvm_getprocs");
878 return (NULL);
880 if (size % sizeof(struct kinfo_proc) != 0) {
881 _kvm_err(kd, kd->program,
882 "proc size mismatch (%lu total, %lu chunks)",
883 (u_long)size, (u_long)sizeof(struct kinfo_proc));
884 return (NULL);
886 nprocs = (int) (size / sizeof(struct kinfo_proc));
887 } else {
888 struct nlist nl[4], *p;
890 (void)memset(nl, 0, sizeof(nl));
891 nl[0].n_name = "_nprocs";
892 nl[1].n_name = "_allproc";
893 nl[2].n_name = "_zombproc";
894 nl[3].n_name = NULL;
896 if (kvm_nlist(kd, nl) != 0) {
897 for (p = nl; p->n_type != 0; ++p)
898 continue;
899 _kvm_err(kd, kd->program,
900 "%s: no such symbol", p->n_name);
901 return (NULL);
903 if (KREAD(kd, nl[0].n_value, &nprocs)) {
904 _kvm_err(kd, kd->program, "can't read nprocs");
905 return (NULL);
907 size = nprocs * sizeof(*kd->procbase);
908 KVM_ALLOC(kd, procbase, size);
909 nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
910 nl[2].n_value, nprocs);
911 if (nprocs < 0)
912 return (NULL);
913 #ifdef notdef
914 size = nprocs * sizeof(struct kinfo_proc);
915 (void)realloc(kd->procbase, size);
916 #endif
918 *cnt = nprocs;
919 return (kd->procbase);
922 void *
923 _kvm_realloc(kd, p, n)
924 kvm_t *kd;
925 void *p;
926 size_t n;
928 void *np = realloc(p, n);
930 if (np == NULL)
931 _kvm_err(kd, kd->program, "out of memory");
932 return (np);
936 * Read in an argument vector from the user address space of process p.
937 * addr if the user-space base address of narg null-terminated contiguous
938 * strings. This is used to read in both the command arguments and
939 * environment strings. Read at most maxcnt characters of strings.
941 static char **
942 kvm_argv(kd, p, addr, narg, maxcnt)
943 kvm_t *kd;
944 const struct miniproc *p;
945 u_long addr;
946 int narg;
947 int maxcnt;
949 char *np, *cp, *ep, *ap;
950 u_long oaddr = (u_long)~0L;
951 u_long len;
952 size_t cc;
953 char **argv;
956 * Check that there aren't an unreasonable number of arguments,
957 * and that the address is in user space.
959 if (narg > ARG_MAX || addr < kd->min_uva || addr >= kd->max_uva)
960 return (NULL);
962 if (kd->argv == NULL) {
964 * Try to avoid reallocs.
966 kd->argc = MAX(narg + 1, 32);
967 kd->argv = _kvm_malloc(kd, kd->argc * sizeof(*kd->argv));
968 if (kd->argv == NULL)
969 return (NULL);
970 } else if (narg + 1 > kd->argc) {
971 kd->argc = MAX(2 * kd->argc, narg + 1);
972 kd->argv = _kvm_realloc(kd, kd->argv, kd->argc *
973 sizeof(*kd->argv));
974 if (kd->argv == NULL)
975 return (NULL);
977 if (kd->argspc == NULL) {
978 kd->argspc = _kvm_malloc(kd, (size_t)kd->nbpg);
979 if (kd->argspc == NULL)
980 return (NULL);
981 kd->argspc_len = kd->nbpg;
983 if (kd->argbuf == NULL) {
984 kd->argbuf = _kvm_malloc(kd, (size_t)kd->nbpg);
985 if (kd->argbuf == NULL)
986 return (NULL);
988 cc = sizeof(char *) * narg;
989 if (kvm_ureadm(kd, p, addr, (void *)kd->argv, cc) != cc)
990 return (NULL);
991 ap = np = kd->argspc;
992 argv = kd->argv;
993 len = 0;
995 * Loop over pages, filling in the argument vector.
997 while (argv < kd->argv + narg && *argv != NULL) {
998 addr = (u_long)*argv & ~(kd->nbpg - 1);
999 if (addr != oaddr) {
1000 if (kvm_ureadm(kd, p, addr, kd->argbuf,
1001 (size_t)kd->nbpg) != kd->nbpg)
1002 return (NULL);
1003 oaddr = addr;
1005 addr = (u_long)*argv & (kd->nbpg - 1);
1006 cp = kd->argbuf + (size_t)addr;
1007 cc = kd->nbpg - (size_t)addr;
1008 if (maxcnt > 0 && cc > (size_t)(maxcnt - len))
1009 cc = (size_t)(maxcnt - len);
1010 ep = memchr(cp, '\0', cc);
1011 if (ep != NULL)
1012 cc = ep - cp + 1;
1013 if (len + cc > kd->argspc_len) {
1014 ptrdiff_t off;
1015 char **pp;
1016 char *op = kd->argspc;
1018 kd->argspc_len *= 2;
1019 kd->argspc = _kvm_realloc(kd, kd->argspc,
1020 kd->argspc_len);
1021 if (kd->argspc == NULL)
1022 return (NULL);
1024 * Adjust argv pointers in case realloc moved
1025 * the string space.
1027 off = kd->argspc - op;
1028 for (pp = kd->argv; pp < argv; pp++)
1029 *pp += off;
1030 ap += off;
1031 np += off;
1033 memcpy(np, cp, cc);
1034 np += cc;
1035 len += cc;
1036 if (ep != NULL) {
1037 *argv++ = ap;
1038 ap = np;
1039 } else
1040 *argv += cc;
1041 if (maxcnt > 0 && len >= maxcnt) {
1043 * We're stopping prematurely. Terminate the
1044 * current string.
1046 if (ep == NULL) {
1047 *np = '\0';
1048 *argv++ = ap;
1050 break;
1053 /* Make sure argv is terminated. */
1054 *argv = NULL;
1055 return (kd->argv);
1058 static void
1059 ps_str_a(p, addr, n)
1060 struct ps_strings *p;
1061 u_long *addr;
1062 int *n;
1065 *addr = (u_long)p->ps_argvstr;
1066 *n = p->ps_nargvstr;
1069 static void
1070 ps_str_e(p, addr, n)
1071 struct ps_strings *p;
1072 u_long *addr;
1073 int *n;
1076 *addr = (u_long)p->ps_envstr;
1077 *n = p->ps_nenvstr;
1081 * Determine if the proc indicated by p is still active.
1082 * This test is not 100% foolproof in theory, but chances of
1083 * being wrong are very low.
1085 static int
1086 proc_verify(kd, kernp, p)
1087 kvm_t *kd;
1088 u_long kernp;
1089 const struct miniproc *p;
1091 struct proc kernproc;
1094 * Just read in the whole proc. It's not that big relative
1095 * to the cost of the read system call.
1097 if (kvm_read(kd, kernp, &kernproc, sizeof(kernproc)) !=
1098 sizeof(kernproc))
1099 return (0);
1100 return (p->p_pid == kernproc.p_pid &&
1101 (kernproc.p_stat != SZOMB || p->p_stat == SZOMB));
1104 static char **
1105 kvm_doargv(kd, p, nchr, info)
1106 kvm_t *kd;
1107 const struct miniproc *p;
1108 int nchr;
1109 void (*info)(struct ps_strings *, u_long *, int *);
1111 char **ap;
1112 u_long addr;
1113 int cnt;
1114 struct ps_strings arginfo;
1117 * Pointers are stored at the top of the user stack.
1119 if (p->p_stat == SZOMB)
1120 return (NULL);
1121 cnt = (int)kvm_ureadm(kd, p, kd->usrstack - sizeof(arginfo),
1122 (void *)&arginfo, sizeof(arginfo));
1123 if (cnt != sizeof(arginfo))
1124 return (NULL);
1126 (*info)(&arginfo, &addr, &cnt);
1127 if (cnt == 0)
1128 return (NULL);
1129 ap = kvm_argv(kd, p, addr, cnt, nchr);
1131 * For live kernels, make sure this process didn't go away.
1133 if (ap != NULL && ISALIVE(kd) &&
1134 !proc_verify(kd, (u_long)p->p_paddr, p))
1135 ap = NULL;
1136 return (ap);
1140 * Get the command args. This code is now machine independent.
1142 char **
1143 kvm_getargv(kd, kp, nchr)
1144 kvm_t *kd;
1145 const struct kinfo_proc *kp;
1146 int nchr;
1148 struct miniproc p;
1150 KPTOMINI(kp, &p);
1151 return (kvm_doargv(kd, &p, nchr, ps_str_a));
1154 char **
1155 kvm_getenvv(kd, kp, nchr)
1156 kvm_t *kd;
1157 const struct kinfo_proc *kp;
1158 int nchr;
1160 struct miniproc p;
1162 KPTOMINI(kp, &p);
1163 return (kvm_doargv(kd, &p, nchr, ps_str_e));
1166 static char **
1167 kvm_doargv2(kd, pid, type, nchr)
1168 kvm_t *kd;
1169 pid_t pid;
1170 int type;
1171 int nchr;
1173 size_t bufs;
1174 int narg, mib[4];
1175 size_t newargspc_len;
1176 char **ap, *bp, *endp;
1179 * Check that there aren't an unreasonable number of arguments.
1181 if (nchr > ARG_MAX)
1182 return (NULL);
1184 if (nchr == 0)
1185 nchr = ARG_MAX;
1187 /* Get number of strings in argv */
1188 mib[0] = CTL_KERN;
1189 mib[1] = KERN_PROC_ARGS;
1190 mib[2] = pid;
1191 mib[3] = type == KERN_PROC_ARGV ? KERN_PROC_NARGV : KERN_PROC_NENV;
1192 bufs = sizeof(narg);
1193 if (sysctl(mib, 4, &narg, &bufs, NULL, (size_t)0) == -1)
1194 return (NULL);
1196 if (kd->argv == NULL) {
1198 * Try to avoid reallocs.
1200 kd->argc = MAX(narg + 1, 32);
1201 kd->argv = _kvm_malloc(kd, kd->argc * sizeof(*kd->argv));
1202 if (kd->argv == NULL)
1203 return (NULL);
1204 } else if (narg + 1 > kd->argc) {
1205 kd->argc = MAX(2 * kd->argc, narg + 1);
1206 kd->argv = _kvm_realloc(kd, kd->argv, kd->argc *
1207 sizeof(*kd->argv));
1208 if (kd->argv == NULL)
1209 return (NULL);
1212 newargspc_len = MIN(nchr, ARG_MAX);
1213 KVM_ALLOC(kd, argspc, newargspc_len);
1214 memset(kd->argspc, 0, (size_t)kd->argspc_len); /* XXX necessary? */
1216 mib[0] = CTL_KERN;
1217 mib[1] = KERN_PROC_ARGS;
1218 mib[2] = pid;
1219 mib[3] = type;
1220 bufs = kd->argspc_len;
1221 if (sysctl(mib, 4, kd->argspc, &bufs, NULL, (size_t)0) == -1)
1222 return (NULL);
1224 bp = kd->argspc;
1225 bp[kd->argspc_len-1] = '\0'; /* make sure the string ends with nul */
1226 ap = kd->argv;
1227 endp = bp + MIN(nchr, bufs);
1229 while (bp < endp) {
1230 *ap++ = bp;
1232 * XXX: don't need following anymore, or stick check
1233 * for max argc in above while loop?
1235 if (ap >= kd->argv + kd->argc) {
1236 kd->argc *= 2;
1237 kd->argv = _kvm_realloc(kd, kd->argv,
1238 kd->argc * sizeof(*kd->argv));
1239 ap = kd->argv;
1241 bp += strlen(bp) + 1;
1243 *ap = NULL;
1245 return (kd->argv);
1248 char **
1249 kvm_getargv2(kd, kp, nchr)
1250 kvm_t *kd;
1251 const struct kinfo_proc2 *kp;
1252 int nchr;
1255 return (kvm_doargv2(kd, kp->p_pid, KERN_PROC_ARGV, nchr));
1258 char **
1259 kvm_getenvv2(kd, kp, nchr)
1260 kvm_t *kd;
1261 const struct kinfo_proc2 *kp;
1262 int nchr;
1265 return (kvm_doargv2(kd, kp->p_pid, KERN_PROC_ENV, nchr));
1269 * Read from user space. The user context is given by p.
1271 static ssize_t
1272 kvm_ureadm(kd, p, uva, buf, len)
1273 kvm_t *kd;
1274 const struct miniproc *p;
1275 u_long uva;
1276 char *buf;
1277 size_t len;
1279 char *cp;
1281 cp = buf;
1282 while (len > 0) {
1283 size_t cc;
1284 char *dp;
1285 u_long cnt;
1287 dp = _kvm_ureadm(kd, p, uva, &cnt);
1288 if (dp == NULL) {
1289 _kvm_err(kd, 0, "invalid address (%lx)", uva);
1290 return (0);
1292 cc = (size_t)MIN(cnt, len);
1293 memcpy(cp, dp, cc);
1294 cp += cc;
1295 uva += cc;
1296 len -= cc;
1298 return (ssize_t)(cp - buf);
1301 ssize_t
1302 kvm_uread(kd, p, uva, buf, len)
1303 kvm_t *kd;
1304 const struct proc *p;
1305 u_long uva;
1306 char *buf;
1307 size_t len;
1309 struct miniproc mp;
1311 PTOMINI(p, &mp);
1312 return (kvm_ureadm(kd, &mp, uva, buf, len));