kernel/firewire: Remove #ifdef __DragonFly__ jungle.
[dragonfly.git] / lib / libkvm / kvm_sparc.c
blob1b1930ef9d8ee514ab7a18c42f62d315e480c195
1 /*-
2 * Copyright (c) 1992, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software developed by the Computer Systems
6 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
7 * BG 91-66 and contributed to Berkeley.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
37 * @(#)kvm_sparc.c 8.1 (Berkeley) 6/4/93
38 * $FreeBSD: src/lib/libkvm/kvm_sparc.c,v 1.3 1999/12/27 07:14:58 peter Exp $
42 * Sparc machine dependent routines for kvm. Hopefully, the forthcoming
43 * vm code will one day obsolete this module.
46 #include <sys/user.h> /* MUST BE FIRST */
47 #include <sys/param.h>
48 #include <sys/proc.h>
49 #include <sys/stat.h>
50 #include <unistd.h>
51 #include <nlist.h>
52 #include <kvm.h>
54 #include <vm/vm.h>
55 #include <vm/vm_param.h>
57 #include <limits.h>
59 #include "kvm_private.h"
61 #define NPMEG 128
63 /* XXX from sparc/pmap.c */
64 #define MAXMEM (128 * 1024 * 1024) /* no more than 128 MB phys mem */
65 #define NPGBANK 16 /* 2^4 pages per bank (64K / bank) */
66 #define BSHIFT 4 /* log2(NPGBANK) */
67 #define BOFFSET (NPGBANK - 1)
68 #define BTSIZE (MAXMEM / NBPG / NPGBANK)
69 #define HWTOSW(pmap_stod, pg) (pmap_stod[(pg) >> BSHIFT] | ((pg) & BOFFSET))
71 struct vmstate {
72 pmeg_t segmap[NKSEG];
73 int pmeg[NPMEG][NPTESG];
74 int pmap_stod[BTSIZE]; /* dense to sparse */
77 void
78 _kvm_freevtop(kvm_t *kd)
80 if (kd->vmst != 0)
81 free(kd->vmst);
84 int
85 _kvm_initvtop(kvm_t *kd)
87 int i;
88 int off;
89 struct vmstate *vm;
90 struct stat st;
91 struct nlist nlist[2];
93 vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
94 if (vm == NULL)
95 return (-1);
97 kd->vmst = vm;
99 if (fstat(kd->pmfd, &st) < 0)
100 return (-1);
102 * Read segment table.
104 off = st.st_size - ctob(btoc(sizeof(vm->segmap)));
105 errno = 0;
106 if (lseek(kd->pmfd, (off_t)off, 0) == -1 && errno != 0 ||
107 read(kd->pmfd, (char *)vm->segmap, sizeof(vm->segmap)) < 0) {
108 _kvm_err(kd, kd->program, "cannot read segment map");
109 return (-1);
112 * Read PMEGs.
114 off = st.st_size - ctob(btoc(sizeof(vm->pmeg)) +
115 btoc(sizeof(vm->segmap)));
116 errno = 0;
117 if (lseek(kd->pmfd, (off_t)off, 0) == -1 && errno != 0 ||
118 read(kd->pmfd, (char *)vm->pmeg, sizeof(vm->pmeg)) < 0) {
119 _kvm_err(kd, kd->program, "cannot read PMEG table");
120 return (-1);
123 * Make pmap_stod be an identity map so we can bootstrap it in.
124 * We assume it's in the first contiguous chunk of physical memory.
126 for (i = 0; i < BTSIZE; ++i)
127 vm->pmap_stod[i] = i << 4;
130 * It's okay to do this nlist separately from the one kvm_getprocs()
131 * does, since the only time we could gain anything by combining
132 * them is if we do a kvm_getprocs() on a dead kernel, which is
133 * not too common.
135 nlist[0].n_name = "_pmap_stod";
136 nlist[1].n_name = 0;
137 if (kvm_nlist(kd, nlist) != 0) {
138 _kvm_err(kd, kd->program, "pmap_stod: no such symbol");
139 return (-1);
141 if (kvm_read(kd, (u_long)nlist[0].n_value,
142 (char *)vm->pmap_stod, sizeof(vm->pmap_stod))
143 != sizeof(vm->pmap_stod)) {
144 _kvm_err(kd, kd->program, "cannot read pmap_stod");
145 return (-1);
147 return (0);
150 #define VA_OFF(va) (va & (NBPG - 1))
153 * Translate a user virtual address to a physical address.
156 _kvm_uvatop(kvm_t *kd, const struct proc *p, u_long va, u_long *pa)
158 int kva, pte;
159 int off, frame;
160 struct vmspace *vms = p->p_vmspace;
162 if ((u_long)vms < KERNBASE) {
163 _kvm_err(kd, kd->program, "_kvm_uvatop: corrupt proc");
164 return (0);
166 if (va >= KERNBASE)
167 return (0);
169 * Get the PTE. This takes two steps. We read the
170 * base address of the table, then we index it.
171 * Note that the index pte table is indexed by
172 * virtual segment rather than physical segment.
174 kva = (u_long)&vms->vm_pmap.pm_rpte[VA_VSEG(va)];
175 if (kvm_read(kd, kva, (char *)&kva, 4) != 4 || kva == 0)
176 goto invalid;
177 kva += sizeof(vms->vm_pmap.pm_rpte[0]) * VA_VPG(va);
178 if (kvm_read(kd, kva, (char *)&pte, 4) == 4 && (pte & PG_V)) {
179 off = VA_OFF(va);
181 * /dev/mem adheres to the hardware model of physical memory
182 * (with holes in the address space), while crashdumps
183 * adhere to the contiguous software model.
185 if (kvm_ishost(kd))
186 frame = pte & PG_PFNUM;
187 else
188 frame = HWTOSW(kd->vmst->pmap_stod, pte & PG_PFNUM);
189 *pa = (frame << PGSHIFT) | off;
190 return (NBPG - off);
192 invalid:
193 _kvm_err(kd, 0, "invalid address (%x)", va);
194 return (0);
198 * Translate a kernel virtual address to a physical address using the
199 * mapping information in kd->vm. Returns the result in pa, and returns
200 * the number of bytes that are contiguously available from this
201 * physical address. This routine is used only for crashdumps.
204 _kvm_kvatop(kvm_t *kd, u_long va, u_long *pa)
206 struct vmstate *vm;
207 int s;
208 int pte;
209 int off;
211 if (va >= KERNBASE) {
212 vm = kd->vmst;
213 s = vm->segmap[VA_VSEG(va) - NUSEG];
214 pte = vm->pmeg[s][VA_VPG(va)];
215 if ((pte & PG_V) != 0) {
216 off = VA_OFF(va);
217 *pa = (HWTOSW(vm->pmap_stod, pte & PG_PFNUM)
218 << PGSHIFT) | off;
220 return (NBPG - off);
223 _kvm_err(kd, 0, "invalid address (%x)", va);
224 return (0);