Add a vclean_unlocked() call that allows HAMMER to try to get rid of a
[dragonfly.git] / lib / libkvm / kvm.c
blobcec68010b8161062984b03aca6e41ad6797ca189
1 /*-
2 * Copyright (c) 1989, 1992, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software developed by the Computer Systems
6 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
7 * BG 91-66 and contributed to Berkeley.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
37 * @(#)kvm.c 8.2 (Berkeley) 2/13/94
38 * $FreeBSD: src/lib/libkvm/kvm.c,v 1.12.2.3 2002/09/13 14:53:43 nectar Exp $
39 * $DragonFly: src/lib/libkvm/kvm.c,v 1.12 2008/02/13 00:12:46 corecode Exp $
42 #include <sys/user.h> /* MUST BE FIRST */
43 #include <sys/param.h>
44 #include <sys/proc.h>
45 #include <sys/ioctl.h>
46 #include <sys/stat.h>
47 #include <sys/sysctl.h>
48 #include <sys/linker.h>
50 #include <vm/vm.h>
51 #include <vm/vm_param.h>
52 #include <vm/swap_pager.h>
54 #include <machine/vmparam.h>
56 #include <ctype.h>
57 #include <fcntl.h>
58 #include <kvm.h>
59 #include <limits.h>
60 #include <nlist.h>
61 #include <paths.h>
62 #include <stdio.h>
63 #include <stdlib.h>
64 #include <string.h>
65 #include <stdarg.h>
66 #include <unistd.h>
68 #include "kvm_private.h"
70 /* from src/lib/libc/gen/nlist.c */
71 int __fdnlist (int, struct nlist *);
73 char *
74 kvm_geterr(kvm_t *kd)
76 return (kd->errbuf);
80 * Report an error using printf style arguments. "program" is kd->program
81 * on hard errors, and 0 on soft errors, so that under sun error emulation,
82 * only hard errors are printed out (otherwise, programs like gdb will
83 * generate tons of error messages when trying to access bogus pointers).
85 void
86 _kvm_err(kvm_t *kd, const char *program, const char *fmt, ...)
88 va_list ap;
90 va_start(ap, fmt);
91 if (program != NULL) {
92 (void)fprintf(stderr, "%s: ", program);
93 (void)vfprintf(stderr, fmt, ap);
94 (void)fputc('\n', stderr);
95 } else
96 (void)vsnprintf(kd->errbuf,
97 sizeof(kd->errbuf), (char *)fmt, ap);
99 va_end(ap);
102 void
103 _kvm_syserr(kvm_t *kd, const char *program, const char *fmt, ...)
105 va_list ap;
106 int n;
108 va_start(ap, fmt);
109 if (program != NULL) {
110 (void)fprintf(stderr, "%s: ", program);
111 (void)vfprintf(stderr, fmt, ap);
112 (void)fprintf(stderr, ": %s\n", strerror(errno));
113 } else {
114 char *cp = kd->errbuf;
116 (void)vsnprintf(cp, sizeof(kd->errbuf), (char *)fmt, ap);
117 n = strlen(cp);
118 (void)snprintf(&cp[n], sizeof(kd->errbuf) - n, ": %s",
119 strerror(errno));
121 va_end(ap);
124 void *
125 _kvm_malloc(kvm_t *kd, size_t n)
127 void *p;
129 if ((p = calloc(n, sizeof(char))) == NULL)
130 _kvm_err(kd, kd->program, "can't allocate %u bytes: %s",
131 n, strerror(errno));
132 return (p);
135 static kvm_t *
136 _kvm_open(kvm_t *kd, const char *uf, const char *mf, int flag, char *errout)
138 struct stat st;
140 kd->vmfd = -1;
141 kd->pmfd = -1;
142 kd->nlfd = -1;
143 kd->vmst = 0;
144 kd->procbase = NULL;
145 kd->procend = NULL;
146 kd->argspc = 0;
147 kd->argv = 0;
149 if (uf == 0)
150 uf = getbootfile();
151 else if (strlen(uf) >= MAXPATHLEN) {
152 _kvm_err(kd, kd->program, "exec file name too long");
153 goto failed;
155 if (flag & ~O_RDWR) {
156 _kvm_err(kd, kd->program, "bad flags arg");
157 goto failed;
159 if (mf == 0)
160 mf = _PATH_MEM;
162 if ((kd->pmfd = open(mf, flag, 0)) < 0) {
163 _kvm_syserr(kd, kd->program, "%s", mf);
164 goto failed;
166 if (fstat(kd->pmfd, &st) < 0) {
167 _kvm_syserr(kd, kd->program, "%s", mf);
168 goto failed;
170 if (S_ISREG(st.st_mode) && st.st_size <= 0) {
171 errno = EINVAL;
172 _kvm_syserr(kd, kd->program, "empty file");
173 goto failed;
175 if (fcntl(kd->pmfd, F_SETFD, FD_CLOEXEC) < 0) {
176 _kvm_syserr(kd, kd->program, "%s", mf);
177 goto failed;
179 if (S_ISCHR(st.st_mode)) {
181 * If this is a character special device, then check that
182 * it's /dev/mem. If so, open kmem too. (Maybe we should
183 * make it work for either /dev/mem or /dev/kmem -- in either
184 * case you're working with a live kernel.)
186 if (strcmp(mf, _PATH_DEVNULL) == 0) {
187 kd->vmfd = open(_PATH_DEVNULL, O_RDONLY);
188 } else {
189 if ((kd->vmfd = open(_PATH_KMEM, flag)) < 0) {
190 _kvm_syserr(kd, kd->program, "%s", _PATH_KMEM);
191 goto failed;
193 if (fcntl(kd->vmfd, F_SETFD, FD_CLOEXEC) < 0) {
194 _kvm_syserr(kd, kd->program, "%s", _PATH_KMEM);
195 goto failed;
198 } else {
200 * This is a crash dump.
201 * Initialize the virtual address translation machinery,
202 * but first setup the namelist fd.
204 if ((kd->nlfd = open(uf, O_RDONLY, 0)) < 0) {
205 _kvm_syserr(kd, kd->program, "%s", uf);
206 goto failed;
208 if (fcntl(kd->nlfd, F_SETFD, FD_CLOEXEC) < 0) {
209 _kvm_syserr(kd, kd->program, "%s", uf);
210 goto failed;
212 if (_kvm_initvtop(kd) < 0)
213 goto failed;
215 return (kd);
216 failed:
218 * Copy out the error if doing sane error semantics.
220 if (errout != 0)
221 strlcpy(errout, kd->errbuf, _POSIX2_LINE_MAX);
222 (void)kvm_close(kd);
223 return (0);
226 kvm_t *
227 kvm_openfiles(const char *uf, const char *mf, const char *sf, int flag,
228 char *errout)
230 kvm_t *kd;
232 if ((kd = malloc(sizeof(*kd))) == NULL) {
233 (void)strlcpy(errout, strerror(errno), _POSIX2_LINE_MAX);
234 return (0);
236 memset(kd, 0, sizeof(*kd));
237 kd->program = 0;
238 return (_kvm_open(kd, uf, mf, flag, errout));
241 kvm_t *
242 kvm_open(const char *uf, const char *mf, const char *sf, int flag,
243 const char *errstr)
245 kvm_t *kd;
247 if ((kd = malloc(sizeof(*kd))) == NULL) {
248 if (errstr != NULL)
249 (void)fprintf(stderr, "%s: %s\n",
250 errstr, strerror(errno));
251 return (0);
253 memset(kd, 0, sizeof(*kd));
254 kd->program = errstr;
255 return (_kvm_open(kd, uf, mf, flag, NULL));
259 kvm_close(kvm_t *kd)
261 int error = 0;
263 if (kd->pmfd >= 0)
264 error |= close(kd->pmfd);
265 if (kd->vmfd >= 0)
266 error |= close(kd->vmfd);
267 if (kd->nlfd >= 0)
268 error |= close(kd->nlfd);
269 if (kd->vmst)
270 _kvm_freevtop(kd);
271 if (kd->procbase != NULL)
272 free(kd->procbase);
273 if (kd->argv != 0)
274 free((void *)kd->argv);
275 free((void *)kd);
277 return (0);
281 kvm_nlist(kvm_t *kd, struct nlist *nl)
283 struct nlist *p;
284 int nvalid;
285 struct kld_sym_lookup lookup;
286 int error;
289 * If we can't use the kld symbol lookup, revert to the
290 * slow library call.
292 if (!ISALIVE(kd))
293 return (__fdnlist(kd->nlfd, nl));
296 * We can use the kld lookup syscall. Go through each nlist entry
297 * and look it up with a kldsym(2) syscall.
299 nvalid = 0;
300 for (p = nl; p->n_name && p->n_name[0]; ++p) {
301 lookup.version = sizeof(lookup);
302 lookup.symname = p->n_name;
303 lookup.symvalue = 0;
304 lookup.symsize = 0;
306 if (lookup.symname[0] == '_')
307 lookup.symname++;
309 if (kldsym(0, KLDSYM_LOOKUP, &lookup) != -1) {
310 p->n_type = N_TEXT;
311 p->n_other = 0;
312 p->n_desc = 0;
313 p->n_value = lookup.symvalue;
314 ++nvalid;
315 /* lookup.symsize */
319 * Return the number of entries that weren't found. If they exist,
320 * also fill internal error buffer.
322 error = ((p - nl) - nvalid);
323 if (error)
324 _kvm_syserr(kd, kd->program, "kvm_nlist");
325 return (error);
328 ssize_t
329 kvm_read(kvm_t *kd, u_long kva, void *buf, size_t len)
331 int cc;
332 void *cp;
334 if (ISALIVE(kd)) {
336 * We're using /dev/kmem. Just read straight from the
337 * device and let the active kernel do the address translation.
339 errno = 0;
340 if (lseek(kd->vmfd, (off_t)kva, 0) == -1 && errno != 0) {
341 _kvm_err(kd, 0, "invalid address (%x)", kva);
342 return (-1);
346 * Try to pre-fault the user memory to reduce instances of
347 * races within the kernel. XXX workaround for kernel bug
348 * where kernel does a sanity check, but user faults during
349 * the copy can block and race against another kernel entity
350 * unmapping the memory in question.
352 bzero(buf, len);
353 cc = read(kd->vmfd, buf, len);
354 if (cc < 0) {
355 _kvm_syserr(kd, 0, "kvm_read");
356 return (-1);
357 } else if (cc < len)
358 _kvm_err(kd, kd->program, "short read");
359 return (cc);
360 } else {
361 cp = buf;
362 while (len > 0) {
363 u_long pa;
365 cc = _kvm_kvatop(kd, kva, &pa);
366 if (cc == 0)
367 return (-1);
368 if (cc > len)
369 cc = len;
370 errno = 0;
371 if (lseek(kd->pmfd, (off_t)pa, 0) == -1 && errno != 0) {
372 _kvm_syserr(kd, 0, _PATH_MEM);
373 break;
375 bzero(cp, cc);
376 cc = read(kd->pmfd, cp, cc);
377 if (cc < 0) {
378 _kvm_syserr(kd, kd->program, "kvm_read");
379 break;
382 * If kvm_kvatop returns a bogus value or our core
383 * file is truncated, we might wind up seeking beyond
384 * the end of the core file in which case the read will
385 * return 0 (EOF).
387 if (cc == 0)
388 break;
389 cp = (char *)cp + cc;
390 kva += cc;
391 len -= cc;
393 return ((char *)cp - (char *)buf);
395 /* NOTREACHED */
398 char *
399 kvm_readstr(kvm_t *kd, u_long kva, char *buf, size_t *lenp)
401 size_t len, cc, pos;
402 char ch;
403 int asize = -1;
405 if (buf == NULL) {
406 asize = len = 16;
407 buf = malloc(len);
408 if (buf == NULL) {
409 _kvm_syserr(kd, kd->program, "kvm_readstr");
410 return NULL;
412 } else {
413 len = *lenp;
416 if (ISALIVE(kd)) {
418 * We're using /dev/kmem. Just read straight from the
419 * device and let the active kernel do the address translation.
421 errno = 0;
422 if (lseek(kd->vmfd, (off_t)kva, 0) == -1 && errno != 0) {
423 _kvm_err(kd, 0, "invalid address (%x)", kva);
424 return NULL;
427 for (pos = 0, ch = -1; ch != 0; pos++) {
428 cc = read(kd->vmfd, &ch, 1);
429 if ((ssize_t)cc < 0) {
430 _kvm_syserr(kd, 0, "kvm_readstr");
431 return NULL;
432 } else if (cc < 1)
433 _kvm_err(kd, kd->program, "short read");
434 if (pos == asize) {
435 buf = realloc(buf, asize *= 2);
436 if (buf == NULL) {
437 _kvm_syserr(kd, kd->program, "kvm_readstr");
438 return NULL;
440 len = asize;
442 if (pos < len)
443 buf[pos] = ch;
446 if (lenp != NULL)
447 *lenp = pos;
448 if (pos > len)
449 return NULL;
450 else
451 return buf;
452 } else {
453 size_t left = 0;
454 for (pos = 0, ch = -1; ch != 0; pos++, left--, kva++) {
455 if (left == 0) {
456 u_long pa;
458 left = _kvm_kvatop(kd, kva, &pa);
459 if (left == 0)
460 return NULL;
461 errno = 0;
462 if (lseek(kd->pmfd, (off_t)pa, 0) == -1 && errno != 0) {
463 _kvm_syserr(kd, 0, _PATH_MEM);
464 return NULL;
467 cc = read(kd->pmfd, &ch, 1);
468 if ((ssize_t)cc < 0) {
469 _kvm_syserr(kd, 0, "kvm_readstr");
470 return NULL;
471 } else if (cc < 1)
472 _kvm_err(kd, kd->program, "short read");
473 if (pos == asize) {
474 buf = realloc(buf, asize *= 2);
475 if (buf == NULL) {
476 _kvm_syserr(kd, kd->program, "kvm_readstr");
477 return NULL;
479 len = asize;
481 if (pos < len)
482 buf[pos] = ch;
485 if (lenp != NULL)
486 *lenp = pos;
487 if (pos > len)
488 return NULL;
489 else
490 return buf;
492 /* NOTREACHED */
495 ssize_t
496 kvm_write(kvm_t *kd, u_long kva, const void *buf, size_t len)
498 int cc;
500 if (ISALIVE(kd)) {
502 * Just like kvm_read, only we write.
504 errno = 0;
505 if (lseek(kd->vmfd, (off_t)kva, 0) == -1 && errno != 0) {
506 _kvm_err(kd, 0, "invalid address (%x)", kva);
507 return (-1);
509 cc = write(kd->vmfd, buf, len);
510 if (cc < 0) {
511 _kvm_syserr(kd, 0, "kvm_write");
512 return (-1);
513 } else if (cc < len)
514 _kvm_err(kd, kd->program, "short write");
515 return (cc);
516 } else {
517 _kvm_err(kd, kd->program,
518 "kvm_write not implemented for dead kernels");
519 return (-1);
521 /* NOTREACHED */