makeroot: zero out subsecond component of time= keywords
[freebsd-src.git] / lib / libkvm / kvm.c
blob5ad8dcc67fe21e8d4b7d902d173cadd41f917c10
1 /*-
2 * Copyright (c) 1989, 1992, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software developed by the Computer Systems
6 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
7 * BG 91-66 and contributed to Berkeley.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #if defined(LIBC_SCCS) && !defined(lint)
38 #if 0
39 static char sccsid[] = "@(#)kvm.c 8.2 (Berkeley) 2/13/94";
40 #endif
41 #endif /* LIBC_SCCS and not lint */
43 #include <sys/param.h>
44 #include <sys/fnv_hash.h>
46 #define _WANT_VNET
48 #include <sys/user.h>
49 #include <sys/linker.h>
50 #include <sys/pcpu.h>
51 #include <sys/stat.h>
53 #include <net/vnet.h>
55 #include <fcntl.h>
56 #include <kvm.h>
57 #include <limits.h>
58 #include <paths.h>
59 #include <stdint.h>
60 #include <stdio.h>
61 #include <stdlib.h>
62 #include <string.h>
63 #include <unistd.h>
65 #include "kvm_private.h"
67 SET_DECLARE(kvm_arch, struct kvm_arch);
69 /* from src/lib/libc/gen/nlist.c */
70 int __fdnlist(int, struct nlist *);
72 static int
73 kvm_fdnlist(kvm_t *kd, struct kvm_nlist *list)
75 kvaddr_t addr;
76 int error, nfail;
78 if (kd->resolve_symbol == NULL) {
79 struct nlist *nl;
80 int count, i;
82 for (count = 0; list[count].n_name != NULL &&
83 list[count].n_name[0] != '\0'; count++)
85 nl = calloc(count + 1, sizeof(*nl));
86 for (i = 0; i < count; i++)
87 nl[i].n_name = list[i].n_name;
88 nfail = __fdnlist(kd->nlfd, nl);
89 for (i = 0; i < count; i++) {
90 list[i].n_type = nl[i].n_type;
91 list[i].n_value = nl[i].n_value;
93 free(nl);
94 return (nfail);
97 nfail = 0;
98 while (list->n_name != NULL && list->n_name[0] != '\0') {
99 error = kd->resolve_symbol(list->n_name, &addr);
100 if (error != 0) {
101 nfail++;
102 list->n_value = 0;
103 list->n_type = 0;
104 } else {
105 list->n_value = addr;
106 list->n_type = N_DATA | N_EXT;
108 list++;
110 return (nfail);
113 char *
114 kvm_geterr(kvm_t *kd)
116 return (kd->errbuf);
119 #include <stdarg.h>
122 * Report an error using printf style arguments. "program" is kd->program
123 * on hard errors, and 0 on soft errors, so that under sun error emulation,
124 * only hard errors are printed out (otherwise, programs like gdb will
125 * generate tons of error messages when trying to access bogus pointers).
127 void
128 _kvm_err(kvm_t *kd, const char *program, const char *fmt, ...)
130 va_list ap;
132 va_start(ap, fmt);
133 if (program != NULL) {
134 (void)fprintf(stderr, "%s: ", program);
135 (void)vfprintf(stderr, fmt, ap);
136 (void)fputc('\n', stderr);
137 } else
138 (void)vsnprintf(kd->errbuf,
139 sizeof(kd->errbuf), fmt, ap);
141 va_end(ap);
144 void
145 _kvm_syserr(kvm_t *kd, const char *program, const char *fmt, ...)
147 va_list ap;
148 int n;
150 va_start(ap, fmt);
151 if (program != NULL) {
152 (void)fprintf(stderr, "%s: ", program);
153 (void)vfprintf(stderr, fmt, ap);
154 (void)fprintf(stderr, ": %s\n", strerror(errno));
155 } else {
156 char *cp = kd->errbuf;
158 (void)vsnprintf(cp, sizeof(kd->errbuf), fmt, ap);
159 n = strlen(cp);
160 (void)snprintf(&cp[n], sizeof(kd->errbuf) - n, ": %s",
161 strerror(errno));
163 va_end(ap);
166 void *
167 _kvm_malloc(kvm_t *kd, size_t n)
169 void *p;
171 if ((p = calloc(n, sizeof(char))) == NULL)
172 _kvm_err(kd, kd->program, "can't allocate %zu bytes: %s",
173 n, strerror(errno));
174 return (p);
177 static int
178 _kvm_read_kernel_ehdr(kvm_t *kd)
180 Elf *elf;
182 if (elf_version(EV_CURRENT) == EV_NONE) {
183 _kvm_err(kd, kd->program, "Unsupported libelf");
184 return (-1);
186 elf = elf_begin(kd->nlfd, ELF_C_READ, NULL);
187 if (elf == NULL) {
188 _kvm_err(kd, kd->program, "%s", elf_errmsg(0));
189 return (-1);
191 if (elf_kind(elf) != ELF_K_ELF) {
192 _kvm_err(kd, kd->program, "kernel is not an ELF file");
193 return (-1);
195 if (gelf_getehdr(elf, &kd->nlehdr) == NULL) {
196 _kvm_err(kd, kd->program, "%s", elf_errmsg(0));
197 elf_end(elf);
198 return (-1);
200 elf_end(elf);
202 switch (kd->nlehdr.e_ident[EI_DATA]) {
203 case ELFDATA2LSB:
204 case ELFDATA2MSB:
205 return (0);
206 default:
207 _kvm_err(kd, kd->program,
208 "unsupported ELF data encoding for kernel");
209 return (-1);
214 _kvm_probe_elf_kernel(kvm_t *kd, int class, int machine)
217 return (kd->nlehdr.e_ident[EI_CLASS] == class &&
218 kd->nlehdr.e_type == ET_EXEC &&
219 kd->nlehdr.e_machine == machine);
223 _kvm_is_minidump(kvm_t *kd)
225 char minihdr[8];
227 if (kd->rawdump)
228 return (0);
229 if (pread(kd->pmfd, &minihdr, 8, 0) == 8 &&
230 memcmp(&minihdr, "minidump", 8) == 0)
231 return (1);
232 return (0);
236 * The powerpc backend has a hack to strip a leading kerneldump
237 * header from the core before treating it as an ELF header.
239 * We can add that here if we can get a change to libelf to support
240 * an initial offset into the file. Alternatively we could patch
241 * savecore to extract cores from a regular file instead.
244 _kvm_read_core_phdrs(kvm_t *kd, size_t *phnump, GElf_Phdr **phdrp)
246 GElf_Ehdr ehdr;
247 GElf_Phdr *phdr;
248 Elf *elf;
249 size_t i, phnum;
251 elf = elf_begin(kd->pmfd, ELF_C_READ, NULL);
252 if (elf == NULL) {
253 _kvm_err(kd, kd->program, "%s", elf_errmsg(0));
254 return (-1);
256 if (elf_kind(elf) != ELF_K_ELF) {
257 _kvm_err(kd, kd->program, "invalid core");
258 goto bad;
260 if (gelf_getclass(elf) != kd->nlehdr.e_ident[EI_CLASS]) {
261 _kvm_err(kd, kd->program, "invalid core");
262 goto bad;
264 if (gelf_getehdr(elf, &ehdr) == NULL) {
265 _kvm_err(kd, kd->program, "%s", elf_errmsg(0));
266 goto bad;
268 if (ehdr.e_type != ET_CORE) {
269 _kvm_err(kd, kd->program, "invalid core");
270 goto bad;
272 if (ehdr.e_machine != kd->nlehdr.e_machine) {
273 _kvm_err(kd, kd->program, "invalid core");
274 goto bad;
277 if (elf_getphdrnum(elf, &phnum) == -1) {
278 _kvm_err(kd, kd->program, "%s", elf_errmsg(0));
279 goto bad;
282 phdr = calloc(phnum, sizeof(*phdr));
283 if (phdr == NULL) {
284 _kvm_err(kd, kd->program, "failed to allocate phdrs");
285 goto bad;
288 for (i = 0; i < phnum; i++) {
289 if (gelf_getphdr(elf, i, &phdr[i]) == NULL) {
290 _kvm_err(kd, kd->program, "%s", elf_errmsg(0));
291 goto bad;
294 elf_end(elf);
295 *phnump = phnum;
296 *phdrp = phdr;
297 return (0);
299 bad:
300 elf_end(elf);
301 return (-1);
304 static void
305 _kvm_hpt_insert(struct hpt *hpt, uint64_t pa, off_t off)
307 struct hpte *hpte;
308 uint32_t fnv = FNV1_32_INIT;
310 fnv = fnv_32_buf(&pa, sizeof(pa), fnv);
311 fnv &= (HPT_SIZE - 1);
312 hpte = malloc(sizeof(*hpte));
313 hpte->pa = pa;
314 hpte->off = off;
315 hpte->next = hpt->hpt_head[fnv];
316 hpt->hpt_head[fnv] = hpte;
319 void
320 _kvm_hpt_init(kvm_t *kd, struct hpt *hpt, void *base, size_t len, off_t off,
321 int page_size, int word_size)
323 uint64_t bits, idx, pa;
324 uint64_t *base64;
325 uint32_t *base32;
327 base64 = base;
328 base32 = base;
329 for (idx = 0; idx < len / word_size; idx++) {
330 if (word_size == sizeof(uint64_t))
331 bits = _kvm64toh(kd, base64[idx]);
332 else
333 bits = _kvm32toh(kd, base32[idx]);
334 pa = idx * word_size * NBBY * page_size;
335 for (; bits != 0; bits >>= 1, pa += page_size) {
336 if ((bits & 1) == 0)
337 continue;
338 _kvm_hpt_insert(hpt, pa, off);
339 off += page_size;
344 off_t
345 _kvm_hpt_find(struct hpt *hpt, uint64_t pa)
347 struct hpte *hpte;
348 uint32_t fnv = FNV1_32_INIT;
350 fnv = fnv_32_buf(&pa, sizeof(pa), fnv);
351 fnv &= (HPT_SIZE - 1);
352 for (hpte = hpt->hpt_head[fnv]; hpte != NULL; hpte = hpte->next) {
353 if (pa == hpte->pa)
354 return (hpte->off);
356 return (-1);
359 void
360 _kvm_hpt_free(struct hpt *hpt)
362 struct hpte *hpte, *next;
363 int i;
365 for (i = 0; i < HPT_SIZE; i++) {
366 for (hpte = hpt->hpt_head[i]; hpte != NULL; hpte = next) {
367 next = hpte->next;
368 free(hpte);
373 static kvm_t *
374 _kvm_open(kvm_t *kd, const char *uf, const char *mf, int flag, char *errout)
376 struct kvm_arch **parch;
377 struct stat st;
379 kd->vmfd = -1;
380 kd->pmfd = -1;
381 kd->nlfd = -1;
382 kd->vmst = NULL;
383 kd->procbase = NULL;
384 kd->argspc = NULL;
385 kd->argv = NULL;
387 if (uf == NULL)
388 uf = getbootfile();
389 else if (strlen(uf) >= MAXPATHLEN) {
390 _kvm_err(kd, kd->program, "exec file name too long");
391 goto failed;
393 if (flag & ~O_RDWR) {
394 _kvm_err(kd, kd->program, "bad flags arg");
395 goto failed;
397 if (mf == NULL)
398 mf = _PATH_MEM;
400 if ((kd->pmfd = open(mf, flag | O_CLOEXEC, 0)) < 0) {
401 _kvm_syserr(kd, kd->program, "%s", mf);
402 goto failed;
404 if (fstat(kd->pmfd, &st) < 0) {
405 _kvm_syserr(kd, kd->program, "%s", mf);
406 goto failed;
408 if (S_ISREG(st.st_mode) && st.st_size <= 0) {
409 errno = EINVAL;
410 _kvm_syserr(kd, kd->program, "empty file");
411 goto failed;
413 if (S_ISCHR(st.st_mode)) {
415 * If this is a character special device, then check that
416 * it's /dev/mem. If so, open kmem too. (Maybe we should
417 * make it work for either /dev/mem or /dev/kmem -- in either
418 * case you're working with a live kernel.)
420 if (strcmp(mf, _PATH_DEVNULL) == 0) {
421 kd->vmfd = open(_PATH_DEVNULL, O_RDONLY | O_CLOEXEC);
422 return (kd);
423 } else if (strcmp(mf, _PATH_MEM) == 0) {
424 if ((kd->vmfd = open(_PATH_KMEM, flag | O_CLOEXEC)) <
425 0) {
426 _kvm_syserr(kd, kd->program, "%s", _PATH_KMEM);
427 goto failed;
429 return (kd);
433 * This is a crash dump.
434 * Open the namelist fd and determine the architecture.
436 if ((kd->nlfd = open(uf, O_RDONLY | O_CLOEXEC, 0)) < 0) {
437 _kvm_syserr(kd, kd->program, "%s", uf);
438 goto failed;
440 if (_kvm_read_kernel_ehdr(kd) < 0)
441 goto failed;
442 if (strncmp(mf, _PATH_FWMEM, strlen(_PATH_FWMEM)) == 0)
443 kd->rawdump = 1;
444 SET_FOREACH(parch, kvm_arch) {
445 if ((*parch)->ka_probe(kd)) {
446 kd->arch = *parch;
447 break;
450 if (kd->arch == NULL) {
451 _kvm_err(kd, kd->program, "unsupported architecture");
452 goto failed;
456 * Non-native kernels require a symbol resolver.
458 if (!kd->arch->ka_native(kd) && kd->resolve_symbol == NULL) {
459 _kvm_err(kd, kd->program,
460 "non-native kernel requires a symbol resolver");
461 goto failed;
465 * Initialize the virtual address translation machinery.
467 if (kd->arch->ka_initvtop(kd) < 0)
468 goto failed;
469 return (kd);
470 failed:
472 * Copy out the error if doing sane error semantics.
474 if (errout != NULL)
475 strlcpy(errout, kd->errbuf, _POSIX2_LINE_MAX);
476 (void)kvm_close(kd);
477 return (0);
480 kvm_t *
481 kvm_openfiles(const char *uf, const char *mf, const char *sf __unused, int flag,
482 char *errout)
484 kvm_t *kd;
486 if ((kd = calloc(1, sizeof(*kd))) == NULL) {
487 if (errout != NULL)
488 (void)strlcpy(errout, strerror(errno),
489 _POSIX2_LINE_MAX);
490 return (0);
492 return (_kvm_open(kd, uf, mf, flag, errout));
495 kvm_t *
496 kvm_open(const char *uf, const char *mf, const char *sf __unused, int flag,
497 const char *errstr)
499 kvm_t *kd;
501 if ((kd = calloc(1, sizeof(*kd))) == NULL) {
502 if (errstr != NULL)
503 (void)fprintf(stderr, "%s: %s\n",
504 errstr, strerror(errno));
505 return (0);
507 kd->program = errstr;
508 return (_kvm_open(kd, uf, mf, flag, NULL));
511 kvm_t *
512 kvm_open2(const char *uf, const char *mf, int flag, char *errout,
513 int (*resolver)(const char *, kvaddr_t *))
515 kvm_t *kd;
517 if ((kd = calloc(1, sizeof(*kd))) == NULL) {
518 if (errout != NULL)
519 (void)strlcpy(errout, strerror(errno),
520 _POSIX2_LINE_MAX);
521 return (0);
523 kd->resolve_symbol = resolver;
524 return (_kvm_open(kd, uf, mf, flag, errout));
528 kvm_close(kvm_t *kd)
530 int error = 0;
532 if (kd->vmst != NULL)
533 kd->arch->ka_freevtop(kd);
534 if (kd->pmfd >= 0)
535 error |= close(kd->pmfd);
536 if (kd->vmfd >= 0)
537 error |= close(kd->vmfd);
538 if (kd->nlfd >= 0)
539 error |= close(kd->nlfd);
540 if (kd->procbase != 0)
541 free((void *)kd->procbase);
542 if (kd->argbuf != 0)
543 free((void *) kd->argbuf);
544 if (kd->argspc != 0)
545 free((void *) kd->argspc);
546 if (kd->argv != 0)
547 free((void *)kd->argv);
548 free((void *)kd);
550 return (0);
554 * Walk the list of unresolved symbols, generate a new list and prefix the
555 * symbol names, try again, and merge back what we could resolve.
557 static int
558 kvm_fdnlist_prefix(kvm_t *kd, struct kvm_nlist *nl, int missing,
559 const char *prefix, kvaddr_t (*validate_fn)(kvm_t *, kvaddr_t))
561 struct kvm_nlist *n, *np, *p;
562 char *cp, *ce;
563 const char *ccp;
564 size_t len;
565 int slen, unresolved;
568 * Calculate the space we need to malloc for nlist and names.
569 * We are going to store the name twice for later lookups: once
570 * with the prefix and once the unmodified name delmited by \0.
572 len = 0;
573 unresolved = 0;
574 for (p = nl; p->n_name && p->n_name[0]; ++p) {
575 if (p->n_type != N_UNDF)
576 continue;
577 len += sizeof(struct kvm_nlist) + strlen(prefix) +
578 2 * (strlen(p->n_name) + 1);
579 unresolved++;
581 if (unresolved == 0)
582 return (unresolved);
583 /* Add space for the terminating nlist entry. */
584 len += sizeof(struct kvm_nlist);
585 unresolved++;
587 /* Alloc one chunk for (nlist, [names]) and setup pointers. */
588 n = np = malloc(len);
589 bzero(n, len);
590 if (n == NULL)
591 return (missing);
592 cp = ce = (char *)np;
593 cp += unresolved * sizeof(struct kvm_nlist);
594 ce += len;
596 /* Generate shortened nlist with special prefix. */
597 unresolved = 0;
598 for (p = nl; p->n_name && p->n_name[0]; ++p) {
599 if (p->n_type != N_UNDF)
600 continue;
601 *np = *p;
602 /* Save the new\0orig. name so we can later match it again. */
603 slen = snprintf(cp, ce - cp, "%s%s%c%s", prefix,
604 (prefix[0] != '\0' && p->n_name[0] == '_') ?
605 (p->n_name + 1) : p->n_name, '\0', p->n_name);
606 if (slen < 0 || slen >= ce - cp)
607 continue;
608 np->n_name = cp;
609 cp += slen + 1;
610 np++;
611 unresolved++;
614 /* Do lookup on the reduced list. */
615 np = n;
616 unresolved = kvm_fdnlist(kd, np);
618 /* Check if we could resolve further symbols and update the list. */
619 if (unresolved >= 0 && unresolved < missing) {
620 /* Find the first freshly resolved entry. */
621 for (; np->n_name && np->n_name[0]; np++)
622 if (np->n_type != N_UNDF)
623 break;
625 * The lists are both in the same order,
626 * so we can walk them in parallel.
628 for (p = nl; np->n_name && np->n_name[0] &&
629 p->n_name && p->n_name[0]; ++p) {
630 if (p->n_type != N_UNDF)
631 continue;
632 /* Skip expanded name and compare to orig. one. */
633 ccp = np->n_name + strlen(np->n_name) + 1;
634 if (strcmp(ccp, p->n_name) != 0)
635 continue;
636 /* Update nlist with new, translated results. */
637 p->n_type = np->n_type;
638 if (validate_fn)
639 p->n_value = (*validate_fn)(kd, np->n_value);
640 else
641 p->n_value = np->n_value;
642 missing--;
643 /* Find next freshly resolved entry. */
644 for (np++; np->n_name && np->n_name[0]; np++)
645 if (np->n_type != N_UNDF)
646 break;
649 /* We could assert missing = unresolved here. */
651 free(n);
652 return (unresolved);
656 _kvm_nlist(kvm_t *kd, struct kvm_nlist *nl, int initialize)
658 struct kvm_nlist *p;
659 int nvalid;
660 struct kld_sym_lookup lookup;
661 int error;
662 const char *prefix = "";
663 char symname[1024]; /* XXX-BZ symbol name length limit? */
664 int tried_vnet, tried_dpcpu;
667 * If we can't use the kld symbol lookup, revert to the
668 * slow library call.
670 if (!ISALIVE(kd)) {
671 error = kvm_fdnlist(kd, nl);
672 if (error <= 0) /* Hard error or success. */
673 return (error);
675 if (_kvm_vnet_initialized(kd, initialize))
676 error = kvm_fdnlist_prefix(kd, nl, error,
677 VNET_SYMPREFIX, _kvm_vnet_validaddr);
679 if (error > 0 && _kvm_dpcpu_initialized(kd, initialize))
680 error = kvm_fdnlist_prefix(kd, nl, error,
681 DPCPU_SYMPREFIX, _kvm_dpcpu_validaddr);
683 return (error);
687 * We can use the kld lookup syscall. Go through each nlist entry
688 * and look it up with a kldsym(2) syscall.
690 nvalid = 0;
691 tried_vnet = 0;
692 tried_dpcpu = 0;
693 again:
694 for (p = nl; p->n_name && p->n_name[0]; ++p) {
695 if (p->n_type != N_UNDF)
696 continue;
698 lookup.version = sizeof(lookup);
699 lookup.symvalue = 0;
700 lookup.symsize = 0;
702 error = snprintf(symname, sizeof(symname), "%s%s", prefix,
703 (prefix[0] != '\0' && p->n_name[0] == '_') ?
704 (p->n_name + 1) : p->n_name);
705 if (error < 0 || error >= (int)sizeof(symname))
706 continue;
707 lookup.symname = symname;
708 if (lookup.symname[0] == '_')
709 lookup.symname++;
711 if (kldsym(0, KLDSYM_LOOKUP, &lookup) != -1) {
712 p->n_type = N_TEXT;
713 if (_kvm_vnet_initialized(kd, initialize) &&
714 strcmp(prefix, VNET_SYMPREFIX) == 0)
715 p->n_value =
716 _kvm_vnet_validaddr(kd, lookup.symvalue);
717 else if (_kvm_dpcpu_initialized(kd, initialize) &&
718 strcmp(prefix, DPCPU_SYMPREFIX) == 0)
719 p->n_value =
720 _kvm_dpcpu_validaddr(kd, lookup.symvalue);
721 else
722 p->n_value = lookup.symvalue;
723 ++nvalid;
724 /* lookup.symsize */
729 * Check the number of entries that weren't found. If they exist,
730 * try again with a prefix for virtualized or DPCPU symbol names.
732 error = ((p - nl) - nvalid);
733 if (error && _kvm_vnet_initialized(kd, initialize) && !tried_vnet) {
734 tried_vnet = 1;
735 prefix = VNET_SYMPREFIX;
736 goto again;
738 if (error && _kvm_dpcpu_initialized(kd, initialize) && !tried_dpcpu) {
739 tried_dpcpu = 1;
740 prefix = DPCPU_SYMPREFIX;
741 goto again;
745 * Return the number of entries that weren't found. If they exist,
746 * also fill internal error buffer.
748 error = ((p - nl) - nvalid);
749 if (error)
750 _kvm_syserr(kd, kd->program, "kvm_nlist");
751 return (error);
755 kvm_nlist2(kvm_t *kd, struct kvm_nlist *nl)
759 * If called via the public interface, permit initialization of
760 * further virtualized modules on demand.
762 return (_kvm_nlist(kd, nl, 1));
766 kvm_nlist(kvm_t *kd, struct nlist *nl)
768 struct kvm_nlist *kl;
769 int count, i, nfail;
772 * Avoid reporting truncated addresses by failing for non-native
773 * cores.
775 if (!kvm_native(kd)) {
776 _kvm_err(kd, kd->program, "kvm_nlist of non-native vmcore");
777 return (-1);
780 for (count = 0; nl[count].n_name != NULL && nl[count].n_name[0] != '\0';
781 count++)
783 if (count == 0)
784 return (0);
785 kl = calloc(count + 1, sizeof(*kl));
786 for (i = 0; i < count; i++)
787 kl[i].n_name = nl[i].n_name;
788 nfail = kvm_nlist2(kd, kl);
789 for (i = 0; i < count; i++) {
790 nl[i].n_type = kl[i].n_type;
791 nl[i].n_other = 0;
792 nl[i].n_desc = 0;
793 nl[i].n_value = kl[i].n_value;
795 return (nfail);
798 ssize_t
799 kvm_read(kvm_t *kd, u_long kva, void *buf, size_t len)
802 return (kvm_read2(kd, kva, buf, len));
805 ssize_t
806 kvm_read2(kvm_t *kd, kvaddr_t kva, void *buf, size_t len)
808 int cc;
809 ssize_t cr;
810 off_t pa;
811 char *cp;
813 if (ISALIVE(kd)) {
815 * We're using /dev/kmem. Just read straight from the
816 * device and let the active kernel do the address translation.
818 errno = 0;
819 if (lseek(kd->vmfd, (off_t)kva, 0) == -1 && errno != 0) {
820 _kvm_err(kd, 0, "invalid address (0x%jx)",
821 (uintmax_t)kva);
822 return (-1);
824 cr = read(kd->vmfd, buf, len);
825 if (cr < 0) {
826 _kvm_syserr(kd, 0, "kvm_read");
827 return (-1);
828 } else if (cr < (ssize_t)len)
829 _kvm_err(kd, kd->program, "short read");
830 return (cr);
833 cp = buf;
834 while (len > 0) {
835 cc = kd->arch->ka_kvatop(kd, kva, &pa);
836 if (cc == 0)
837 return (-1);
838 if (cc > (ssize_t)len)
839 cc = len;
840 errno = 0;
841 if (lseek(kd->pmfd, pa, 0) == -1 && errno != 0) {
842 _kvm_syserr(kd, 0, _PATH_MEM);
843 break;
845 cr = read(kd->pmfd, cp, cc);
846 if (cr < 0) {
847 _kvm_syserr(kd, kd->program, "kvm_read");
848 break;
851 * If ka_kvatop returns a bogus value or our core file is
852 * truncated, we might wind up seeking beyond the end of the
853 * core file in which case the read will return 0 (EOF).
855 if (cr == 0)
856 break;
857 cp += cr;
858 kva += cr;
859 len -= cr;
862 return (cp - (char *)buf);
865 ssize_t
866 kvm_write(kvm_t *kd, u_long kva, const void *buf, size_t len)
868 int cc;
870 if (ISALIVE(kd)) {
872 * Just like kvm_read, only we write.
874 errno = 0;
875 if (lseek(kd->vmfd, (off_t)kva, 0) == -1 && errno != 0) {
876 _kvm_err(kd, 0, "invalid address (%lx)", kva);
877 return (-1);
879 cc = write(kd->vmfd, buf, len);
880 if (cc < 0) {
881 _kvm_syserr(kd, 0, "kvm_write");
882 return (-1);
883 } else if ((size_t)cc < len)
884 _kvm_err(kd, kd->program, "short write");
885 return (cc);
886 } else {
887 _kvm_err(kd, kd->program,
888 "kvm_write not implemented for dead kernels");
889 return (-1);
891 /* NOTREACHED */
895 kvm_native(kvm_t *kd)
898 if (ISALIVE(kd))
899 return (1);
900 return (kd->arch->ka_native(kd));