2 * Copyright (c) 2006 Peter Wemm
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * AMD64 machine dependent routines for kvm and minidumps.
30 #include <sys/user.h> /* MUST BE FIRST */
31 #include <sys/param.h>
35 #include <sys/fnv_hash.h>
44 #include <vm/vm_param.h>
46 #include <machine/elf.h>
47 #include <machine/cpufunc.h>
48 #include <machine/minidump.h>
52 #include "kvm_private.h"
62 /* minidump must be the first item! */
64 int minidump
; /* 1 = minidump mode */
65 int pgtable
; /* pagetable mode */
66 void *hpt_head
[HPT_SIZE
];
76 hpt_insert(kvm_t
*kd
, vm_paddr_t pa
, int64_t off
)
79 uint32_t fnv
= FNV1_32_INIT
;
81 fnv
= fnv_32_buf(&pa
, sizeof(pa
), fnv
);
82 fnv
&= (HPT_SIZE
- 1);
83 hpte
= malloc(sizeof(*hpte
));
86 hpte
->next
= kd
->vmst
->hpt_head
[fnv
];
87 kd
->vmst
->hpt_head
[fnv
] = hpte
;
91 hpt_find(kvm_t
*kd
, vm_paddr_t pa
)
94 uint32_t fnv
= FNV1_32_INIT
;
96 fnv
= fnv_32_buf(&pa
, sizeof(pa
), fnv
);
97 fnv
&= (HPT_SIZE
- 1);
98 for (hpte
= kd
->vmst
->hpt_head
[fnv
]; hpte
!= NULL
; hpte
= hpte
->next
) {
106 inithash(kvm_t
*kd
, uint64_t *base
, int len
, off_t off
)
112 for (idx
= 0; idx
< len
/ sizeof(*base
); idx
++) {
116 bits
&= ~(1ul << bit
);
117 pa
= (idx
* sizeof(*base
) * NBBY
+ bit
) * PAGE_SIZE
;
118 hpt_insert(kd
, pa
, off
);
126 _kvm_minidump_freevtop(kvm_t
*kd
)
128 struct vmstate
*vm
= kd
->vmst
;
138 static int _kvm_minidump_init_hdr1(kvm_t
*kd
, struct vmstate
*vmst
,
139 struct minidumphdr1
*hdr
);
140 static int _kvm_minidump_init_hdr2(kvm_t
*kd
, struct vmstate
*vmst
,
141 struct minidumphdr2
*hdr
);
144 _kvm_minidump_initvtop(kvm_t
*kd
)
146 struct vmstate
*vmst
;
149 struct minidumphdr1 hdr1
;
150 struct minidumphdr2 hdr2
;
153 vmst
= _kvm_malloc(kd
, sizeof(*vmst
));
155 _kvm_err(kd
, kd
->program
, "cannot allocate vm");
159 bzero(vmst
, sizeof(*vmst
));
162 if (pread(kd
->pmfd
, &u
, sizeof(u
), 0) != sizeof(u
)) {
163 _kvm_err(kd
, kd
->program
, "cannot read dump header");
166 if (strncmp(MINIDUMP1_MAGIC
, u
.hdr1
.magic
, sizeof(u
.hdr1
.magic
)) == 0 &&
167 u
.hdr1
.version
== MINIDUMP1_VERSION
) {
168 error
= _kvm_minidump_init_hdr1(kd
, vmst
, &u
.hdr1
);
170 if (strncmp(MINIDUMP2_MAGIC
, u
.hdr1
.magic
, sizeof(u
.hdr1
.magic
)) == 0 &&
171 u
.hdr2
.version
== MINIDUMP2_VERSION
) {
172 error
= _kvm_minidump_init_hdr2(kd
, vmst
, &u
.hdr2
);
174 _kvm_err(kd
, kd
->program
, "not a minidump for this platform");
182 _kvm_minidump_init_hdr1(kvm_t
*kd
, struct vmstate
*vmst
,
183 struct minidumphdr1
*hdr
)
187 /* Skip header and msgbuf */
188 off
= PAGE_SIZE
+ round_page(hdr
->msgbufsize
);
190 vmst
->bitmap
= _kvm_malloc(kd
, hdr
->bitmapsize
);
191 if (vmst
->bitmap
== NULL
) {
192 _kvm_err(kd
, kd
->program
,
193 "cannot allocate %jd bytes for bitmap",
194 (intmax_t)hdr
->bitmapsize
);
197 if (pread(kd
->pmfd
, vmst
->bitmap
, hdr
->bitmapsize
, off
) !=
199 _kvm_err(kd
, kd
->program
,
200 "cannot read %jd bytes for page bitmap",
201 (intmax_t)hdr
->bitmapsize
);
204 off
+= round_page(vmst
->bitmapsize
);
206 vmst
->ptemap
= _kvm_malloc(kd
, hdr
->ptesize
);
207 if (vmst
->ptemap
== NULL
) {
208 _kvm_err(kd
, kd
->program
,
209 "cannot allocate %jd bytes for ptemap",
210 (intmax_t)hdr
->ptesize
);
213 if (pread(kd
->pmfd
, vmst
->ptemap
, hdr
->ptesize
, off
) !=
215 _kvm_err(kd
, kd
->program
,
216 "cannot read %jd bytes for ptemap",
217 (intmax_t)hdr
->ptesize
);
222 vmst
->kernbase
= hdr
->kernbase
;
223 vmst
->dmapbase
= hdr
->dmapbase
;
224 vmst
->dmapend
= hdr
->dmapend
;
225 vmst
->bitmapsize
= hdr
->bitmapsize
;
228 /* build physical address hash table for sparse pages */
229 inithash(kd
, vmst
->bitmap
, hdr
->bitmapsize
, off
);
236 _kvm_minidump_init_hdr2(kvm_t
*kd
, struct vmstate
*vmst
,
237 struct minidumphdr2
*hdr
)
241 /* Skip header and msgbuf */
242 off
= PAGE_SIZE
+ round_page(hdr
->msgbufsize
);
244 vmst
->bitmap
= _kvm_malloc(kd
, hdr
->bitmapsize
);
245 if (vmst
->bitmap
== NULL
) {
246 _kvm_err(kd
, kd
->program
,
247 "cannot allocate %jd bytes for bitmap",
248 (intmax_t)hdr
->bitmapsize
);
251 if (pread(kd
->pmfd
, vmst
->bitmap
, hdr
->bitmapsize
, off
) !=
253 _kvm_err(kd
, kd
->program
,
254 "cannot read %jd bytes for page bitmap",
255 (intmax_t)hdr
->bitmapsize
);
258 off
+= round_page(hdr
->bitmapsize
);
260 vmst
->ptemap
= _kvm_malloc(kd
, hdr
->ptesize
);
261 if (vmst
->ptemap
== NULL
) {
262 _kvm_err(kd
, kd
->program
,
263 "cannot allocate %jd bytes for ptemap",
264 (intmax_t)hdr
->ptesize
);
267 if (pread(kd
->pmfd
, vmst
->ptemap
, hdr
->ptesize
, off
) !=
269 _kvm_err(kd
, kd
->program
,
270 "cannot read %jd bytes for ptemap",
271 (intmax_t)hdr
->ptesize
);
276 vmst
->kernbase
= hdr
->kernbase
;
277 vmst
->dmapbase
= hdr
->dmapbase
;
278 vmst
->bitmapsize
= hdr
->bitmapsize
;
281 /* build physical address hash table for sparse pages */
282 inithash(kd
, vmst
->bitmap
, hdr
->bitmapsize
, off
);
288 _kvm_minidump_vatop(kvm_t
*kd
, u_long va
, off_t
*pa
)
298 offset
= va
& (PAGE_SIZE
- 1);
300 if (va
>= vm
->kernbase
) {
301 switch (vm
->pgtable
) {
304 * Page tables are specifically dumped (old style)
306 pteindex
= (va
- vm
->kernbase
) >> PAGE_SHIFT
;
307 pte
= vm
->ptemap
[pteindex
];
308 if (((u_long
)pte
& X86_PG_V
) == 0) {
309 _kvm_err(kd
, kd
->program
,
310 "_kvm_vatop: pte not valid");
317 * Kernel page table pages are included in the
318 * sparse map. We only dump the contents of
319 * the PDs (zero-filling any empty entries).
321 * Index of PD entry in PDP & PDP in PML4E together.
323 * First shift by 30 (1GB) - gives us an index
324 * into PD entries. We do not PDP entries in the
325 * PML4E, so there are 512 * 512 PD entries possible.
327 pteindex
= (va
>> PDPSHIFT
) & (512 * 512 - 1);
328 pte
= vm
->ptemap
[pteindex
];
329 if ((pte
& X86_PG_V
) == 0) {
330 _kvm_err(kd
, kd
->program
,
331 "_kvm_vatop: pd not valid");
334 if (pte
& X86_PG_PS
) { /* 1GB pages */
335 pte
+= va
& (1024 * 1024 * 1024 - 1);
338 ofs
= hpt_find(kd
, pte
& PG_FRAME
);
340 _kvm_err(kd
, kd
->program
,
341 "_kvm_vatop: no phys page for pd");
346 * Index of PT entry in PD
348 pteindex
= (va
>> PDRSHIFT
) & 511;
349 if (pread(kd
->pmfd
, &pte
, sizeof(pte
),
350 ofs
+ pteindex
* sizeof(pte
)) != sizeof(pte
)) {
351 _kvm_err(kd
, kd
->program
,
352 "_kvm_vatop: pd lookup not valid");
355 if ((pte
& X86_PG_V
) == 0) {
356 _kvm_err(kd
, kd
->program
,
357 "_kvm_vatop: pt not valid");
360 if (pte
& X86_PG_PS
) { /* 2MB pages */
361 pte
+= va
& (2048 * 1024 - 1);
364 ofs
= hpt_find(kd
, pte
& PG_FRAME
);
366 _kvm_err(kd
, kd
->program
,
367 "_kvm_vatop: no phys page for pt");
372 * Index of pte entry in PT
374 pteindex
= (va
>> PAGE_SHIFT
) & 511;
375 if (pread(kd
->pmfd
, &pte
, sizeof(pte
),
376 ofs
+ pteindex
* sizeof(pte
)) != sizeof(pte
)) {
377 _kvm_err(kd
, kd
->program
,
378 "_kvm_vatop: pte lookup not valid");
389 _kvm_err(kd
, kd
->program
,
390 "_kvm_vatop: bad pgtable mode ");
393 ofs
= hpt_find(kd
, a
);
395 _kvm_err(kd
, kd
->program
, "_kvm_vatop: physical address 0x%lx not in minidump", a
);
399 return (PAGE_SIZE
- offset
);
400 } else if (va
>= vm
->dmapbase
&& va
< vm
->dmapend
) {
401 a
= (va
- vm
->dmapbase
) & ~PAGE_MASK
;
402 ofs
= hpt_find(kd
, a
);
404 _kvm_err(kd
, kd
->program
, "_kvm_vatop: direct map address 0x%lx not in minidump", va
);
408 return (PAGE_SIZE
- offset
);
410 _kvm_err(kd
, kd
->program
, "_kvm_vatop: virtual address 0x%lx not minidumped", va
);
415 _kvm_err(kd
, 0, "invalid address (0x%lx)", va
);
420 _kvm_minidump_kvatop(kvm_t
*kd
, u_long va
, off_t
*pa
)
422 if (kvm_ishost(kd
)) {
423 _kvm_err(kd
, 0, "kvm_vatop called in live kernel!");
427 return (_kvm_minidump_vatop(kd
, va
, pa
));