Fix typos in manual pages and user visible messages.
[dragonfly.git] / sys / platform / pc64 / x86_64 / minidump_machdep.c
blob10ae3bbc091c78f4ad2332d6812da2a624b47ae7
1 /*-
2 * Copyright (c) 2006 Peter Wemm
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * $FreeBSD: src/sys/amd64/amd64/minidump_machdep.c,v 1.10 2009/05/29 21:27:12 jamie Exp $
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/conf.h>
32 #include <sys/cons.h>
33 #include <sys/device.h>
34 #include <sys/globaldata.h>
35 #include <sys/kernel.h>
36 #include <sys/kerneldump.h>
37 #include <sys/msgbuf.h>
38 #include <vm/vm.h>
39 #include <vm/vm_kern.h>
40 #include <vm/pmap.h>
41 #include <machine/atomic.h>
42 #include <machine/elf.h>
43 #include <machine/globaldata.h>
44 #include <machine/md_var.h>
45 #include <machine/vmparam.h>
46 #include <machine/minidump.h>
48 CTASSERT(sizeof(struct kerneldumpheader) == 512);
51 * Don't touch the first SIZEOF_METADATA bytes on the dump device. This
52 * is to protect us from metadata and to protect metadata from us.
54 #define SIZEOF_METADATA (64*1024)
56 #define MD_ALIGN(x) (((off_t)(x) + PAGE_MASK) & ~PAGE_MASK)
57 #define DEV_ALIGN(x) (((off_t)(x) + (DEV_BSIZE-1)) & ~(DEV_BSIZE-1))
59 extern uint64_t KPDPphys;
61 uint64_t *vm_page_dump;
62 int vm_page_dump_size;
64 static struct kerneldumpheader kdh;
65 static off_t dumplo;
67 /* Handle chunked writes. */
68 static size_t fragsz;
69 static void *dump_va;
70 static size_t counter, progress;
72 CTASSERT(sizeof(*vm_page_dump) == 8);
74 static int
75 is_dumpable(vm_paddr_t pa)
77 int i;
79 for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
80 if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
81 return (1);
83 return (0);
86 #define PG2MB(pgs) (((pgs) + (1 << 8) - 1) >> 8)
88 static int
89 blk_flush(struct dumperinfo *di)
91 int error;
93 if (fragsz == 0)
94 return (0);
96 error = dev_ddump(di->priv, dump_va, 0, dumplo, fragsz);
97 dumplo += fragsz;
98 fragsz = 0;
99 return (error);
102 static int
103 blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz)
105 size_t len;
106 int error, i, c;
108 error = 0;
109 if ((sz % PAGE_SIZE) != 0) {
110 kprintf("size not page aligned\n");
111 return (EINVAL);
113 if (ptr != NULL && pa != 0) {
114 kprintf("can't have both va and pa!\n");
115 return (EINVAL);
117 if (pa != 0 && (((uintptr_t)ptr) % PAGE_SIZE) != 0) {
118 kprintf("address not page aligned\n");
119 return (EINVAL);
121 if (ptr != NULL) {
122 /* If we're doing a virtual dump, flush any pre-existing pa pages */
123 error = blk_flush(di);
124 if (error)
125 return (error);
127 while (sz) {
128 len = (MAXDUMPPGS * PAGE_SIZE) - fragsz;
129 if (len > sz)
130 len = sz;
131 counter += len;
132 progress -= len;
133 if (counter >> 24) {
134 kprintf(" %ld", PG2MB(progress >> PAGE_SHIFT));
135 counter &= (1<<24) - 1;
137 if (ptr) {
138 error = dev_ddump(di->priv, ptr, 0, dumplo, len);
139 if (error)
140 return (error);
141 dumplo += len;
142 ptr += len;
143 sz -= len;
144 } else {
145 for (i = 0; i < len; i += PAGE_SIZE)
146 dump_va = pmap_kenter_temporary(pa + i, (i + fragsz) >> PAGE_SHIFT);
147 fragsz += len;
148 pa += len;
149 sz -= len;
150 if (fragsz == (MAXDUMPPGS * PAGE_SIZE)) {
151 error = blk_flush(di);
152 if (error)
153 return (error);
157 /* Check for user abort. */
158 c = cncheckc();
159 if (c == 0x03)
160 return (ECANCELED);
161 if (c != -1)
162 kprintf(" (CTRL-C to abort) ");
165 return (0);
168 /* A fake page table page, to avoid having to handle both 4K and 2M pages */
169 static pt_entry_t fakept[NPTEPG];
171 void
172 minidumpsys(struct dumperinfo *di)
174 uint64_t dumpsize;
175 uint32_t ptesize;
176 vm_offset_t va;
177 vm_offset_t kern_end;
178 int error;
179 uint64_t bits;
180 uint64_t *pdp, *pd, *pt, pa;
181 int i, j, k, bit;
182 struct minidumphdr mdhdr;
183 struct mdglobaldata *md;
185 counter = 0;
187 * Walk page table pages, set bits in vm_page_dump.
189 * NOTE: kernel_vm_end can actually be below KERNBASE.
190 * Just use KvaEnd. Also note that loops which go
191 * all the way to the end of the address space might
192 * overflow the loop variable.
194 ptesize = 0;
196 md = (struct mdglobaldata *)globaldata_find(0);
198 kern_end = KvaEnd;
199 if (kern_end < (vm_offset_t)&(md[ncpus]))
200 kern_end = (vm_offset_t)&(md[ncpus]);
202 pdp = (uint64_t *)PHYS_TO_DMAP(KPDPphys);
203 for (va = VM_MIN_KERNEL_ADDRESS; va < kern_end; va += NBPDR) {
205 * The loop probably overflows a 64-bit int due to NBPDR.
207 if (va < VM_MIN_KERNEL_ADDRESS)
208 break;
211 * We always write a page, even if it is zero. Each
212 * page written corresponds to 2MB of space
214 i = (va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1);
215 ptesize += PAGE_SIZE;
216 if ((pdp[i] & PG_V) == 0)
217 continue;
218 pd = (uint64_t *)PHYS_TO_DMAP(pdp[i] & PG_FRAME);
219 j = ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
220 if ((pd[j] & (PG_PS | PG_V)) == (PG_PS | PG_V)) {
221 /* This is an entire 2M page. */
222 pa = pd[j] & PG_PS_FRAME;
223 for (k = 0; k < NPTEPG; k++) {
224 if (is_dumpable(pa))
225 dump_add_page(pa);
226 pa += PAGE_SIZE;
228 continue;
230 if ((pd[j] & PG_V) == PG_V) {
231 /* set bit for each valid page in this 2MB block */
232 pt = (uint64_t *)PHYS_TO_DMAP(pd[j] & PG_FRAME);
233 for (k = 0; k < NPTEPG; k++) {
234 if ((pt[k] & PG_V) == PG_V) {
235 pa = pt[k] & PG_FRAME;
236 if (is_dumpable(pa))
237 dump_add_page(pa);
240 } else {
241 /* nothing, we're going to dump a null page */
245 /* Calculate dump size. */
246 dumpsize = ptesize;
247 dumpsize += round_page(msgbufp->msg_size);
248 dumpsize += round_page(vm_page_dump_size);
249 for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
250 bits = vm_page_dump[i];
251 while (bits) {
252 bit = bsfq(bits);
253 pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) + bit) * PAGE_SIZE;
254 /* Clear out undumpable pages now if needed */
255 if (is_dumpable(pa)) {
256 dumpsize += PAGE_SIZE;
257 } else {
258 dump_drop_page(pa);
260 bits &= ~(1ul << bit);
263 dumpsize += PAGE_SIZE;
265 /* Determine dump offset on device. */
266 if (di->mediasize < SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) {
267 error = ENOSPC;
268 goto fail;
270 dumplo = di->mediaoffset + di->mediasize - dumpsize;
271 dumplo -= sizeof(kdh) * 2;
272 progress = dumpsize;
274 /* Initialize mdhdr */
275 bzero(&mdhdr, sizeof(mdhdr));
276 strcpy(mdhdr.magic, MINIDUMP_MAGIC);
277 mdhdr.version = MINIDUMP_VERSION;
278 mdhdr.msgbufsize = msgbufp->msg_size;
279 mdhdr.bitmapsize = vm_page_dump_size;
280 mdhdr.ptesize = ptesize;
281 mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS;
282 mdhdr.dmapbase = DMAP_MIN_ADDRESS;
283 mdhdr.dmapend = DMAP_MAX_ADDRESS;
285 mkdumpheader(&kdh, KERNELDUMPMAGIC, KERNELDUMP_AMD64_VERSION,
286 dumpsize, di->blocksize);
288 kprintf("Physical memory: %jd MB\n", (intmax_t)ptoa(physmem) / 1048576);
289 kprintf("Dumping %jd MB:", (intmax_t)dumpsize >> 20);
291 /* Dump leader */
292 error = dev_ddump(di->priv, &kdh, 0, dumplo, sizeof(kdh));
293 if (error)
294 goto fail;
295 dumplo += sizeof(kdh);
297 /* Dump my header */
298 bzero(&fakept, sizeof(fakept));
299 bcopy(&mdhdr, &fakept, sizeof(mdhdr));
300 error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE);
301 if (error)
302 goto fail;
304 /* Dump msgbuf up front */
305 error = blk_write(di, (char *)msgbufp->msg_ptr, 0, round_page(msgbufp->msg_size));
306 if (error)
307 goto fail;
309 /* Dump bitmap */
310 error = blk_write(di, (char *)vm_page_dump, 0, round_page(vm_page_dump_size));
311 if (error)
312 goto fail;
314 /* Dump kernel page table pages */
315 pdp = (uint64_t *)PHYS_TO_DMAP(KPDPphys);
316 for (va = VM_MIN_KERNEL_ADDRESS; va < kern_end; va += NBPDR) {
318 * The loop probably overflows a 64-bit int due to NBPDR.
320 if (va < VM_MIN_KERNEL_ADDRESS)
321 break;
324 * We always write a page, even if it is zero
326 i = (va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1);
327 if ((pdp[i] & PG_V) == 0) {
328 bzero(fakept, sizeof(fakept));
329 error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE);
330 if (error)
331 goto fail;
332 /* flush, in case we reuse fakept in the same block */
333 error = blk_flush(di);
334 if (error)
335 goto fail;
336 continue;
338 pd = (uint64_t *)PHYS_TO_DMAP(pdp[i] & PG_FRAME);
339 j = ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
340 if ((pd[j] & (PG_PS | PG_V)) == (PG_PS | PG_V)) {
341 /* This is a single 2M block. Generate a fake PTP */
342 pa = pd[j] & PG_PS_FRAME;
343 for (k = 0; k < NPTEPG; k++) {
344 fakept[k] = (pa + (k * PAGE_SIZE)) | PG_V | PG_RW | PG_A | PG_M;
346 error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE);
347 if (error)
348 goto fail;
349 /* flush, in case we reuse fakept in the same block */
350 error = blk_flush(di);
351 if (error)
352 goto fail;
353 continue;
355 if ((pd[j] & PG_V) == PG_V) {
356 pt = (uint64_t *)PHYS_TO_DMAP(pd[j] & PG_FRAME);
357 error = blk_write(di, (char *)pt, 0, PAGE_SIZE);
358 if (error)
359 goto fail;
360 } else {
361 bzero(fakept, sizeof(fakept));
362 error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE);
363 if (error)
364 goto fail;
365 /* flush, in case we reuse fakept in the same block */
366 error = blk_flush(di);
367 if (error)
368 goto fail;
372 /* Dump memory chunks */
373 /* XXX cluster it up and use blk_dump() */
374 for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
375 bits = vm_page_dump[i];
376 while (bits) {
377 bit = bsfq(bits);
378 pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) + bit) * PAGE_SIZE;
379 error = blk_write(di, 0, pa, PAGE_SIZE);
380 if (error)
381 goto fail;
382 bits &= ~(1ul << bit);
386 error = blk_flush(di);
387 if (error)
388 goto fail;
390 /* Dump trailer */
391 error = dev_ddump(di->priv, &kdh, 0, dumplo, sizeof(kdh));
392 if (error)
393 goto fail;
394 dumplo += sizeof(kdh);
396 /* Signal completion, signoff and exit stage left. */
397 dev_ddump(di->priv, NULL, 0, 0, 0);
398 kprintf("\nDump complete\n");
399 return;
401 fail:
402 if (error < 0)
403 error = -error;
405 if (error == ECANCELED)
406 kprintf("\nDump aborted\n");
407 else if (error == ENOSPC)
408 kprintf("\nDump failed. Partition too small.\n");
409 else
410 kprintf("\n** DUMP FAILED (ERROR %d) **\n", error);
413 void
414 dump_add_page(vm_paddr_t pa)
416 int idx, bit;
418 pa >>= PAGE_SHIFT;
419 idx = pa >> 6; /* 2^6 = 64 */
420 bit = pa & 63;
421 atomic_set_long(&vm_page_dump[idx], 1ul << bit);
424 void
425 dump_drop_page(vm_paddr_t pa)
427 int idx, bit;
429 pa >>= PAGE_SHIFT;
430 idx = pa >> 6; /* 2^6 = 64 */
431 bit = pa & 63;
432 atomic_clear_long(&vm_page_dump[idx], 1ul << bit);