fs: rename AT_* to VATTR_*
[unleashed/lotheac.git] / kernel / os / dumpsubr.c
blob7b26141d3efe4808de81709b54d2afbd2cba31a8
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2014, Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
25 * Copyright 2018 Joyent, Inc.
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/vm.h>
32 #include <sys/proc.h>
33 #include <sys/file.h>
34 #include <sys/conf.h>
35 #include <sys/kmem.h>
36 #include <sys/mem.h>
37 #include <sys/mman.h>
38 #include <sys/vnode.h>
39 #include <sys/errno.h>
40 #include <sys/memlist.h>
41 #include <sys/dumphdr.h>
42 #include <sys/dumpadm.h>
43 #include <sys/ksyms.h>
44 #include <sys/compress.h>
45 #include <sys/stream.h>
46 #include <sys/strsun.h>
47 #include <sys/cmn_err.h>
48 #include <sys/bitmap.h>
49 #include <sys/modctl.h>
50 #include <sys/utsname.h>
51 #include <sys/systeminfo.h>
52 #include <sys/vmem.h>
53 #include <sys/log.h>
54 #include <sys/var.h>
55 #include <sys/debug.h>
56 #include <sys/sunddi.h>
57 #include <sys/fs_subr.h>
58 #include <sys/fs/snode.h>
59 #include <sys/ontrap.h>
60 #include <sys/panic.h>
61 #include <sys/dkio.h>
62 #include <sys/vtoc.h>
63 #include <sys/errorq.h>
64 #include <sys/fm/util.h>
65 #include <sys/fs/zfs.h>
67 #include <vm/hat.h>
68 #include <vm/as.h>
69 #include <vm/page.h>
70 #include <vm/pvn.h>
71 #include <vm/seg.h>
72 #include <vm/seg_kmem.h>
73 #include <sys/clock_impl.h>
74 #include <sys/hold_page.h>
76 #define ONE_GIG (1024 * 1024 * 1024UL)
79 * exported vars
81 kmutex_t dump_lock; /* lock for dump configuration */
82 dumphdr_t *dumphdr; /* dump header */
83 int dump_conflags = DUMP_KERNEL; /* dump configuration flags */
84 vnode_t *dumpvp; /* dump device vnode pointer */
85 uoff_t dumpvp_size; /* size of dump device, in bytes */
86 char *dumppath; /* pathname of dump device */
87 int dump_timeout = 120; /* timeout for dumping pages */
88 int dump_timeleft; /* portion of dump_timeout remaining */
89 int dump_ioerr; /* dump i/o error */
90 char *dump_stack_scratch; /* scratch area for saving stack summary */
93 * Tunables for dump. These can be set via /etc/system.
95 * dump_metrics_on if set, metrics are collected in the kernel, passed
96 * to savecore via the dump file, and recorded by savecore in
97 * METRICS.txt.
100 /* tunables for pre-reserved heap */
101 uint_t dump_kmem_permap = 1024;
102 uint_t dump_kmem_pages = 0;
105 * Compression metrics are accumulated nano-second subtotals. The
106 * results are normalized by the number of pages dumped. A report is
107 * generated when dumpsys() completes and is saved in the dump image
108 * after the trailing dump header.
110 * Metrics are always collected. Set the variable dump_metrics_on to
111 * cause metrics to be saved in the crash file, where savecore will
112 * save it in the file METRICS.txt.
114 #define PERPAGES \
115 PERPAGE(bitmap) PERPAGE(map) PERPAGE(unmap) \
116 PERPAGE(compress) \
117 PERPAGE(write)
119 typedef struct perpage {
120 #define PERPAGE(x) hrtime_t x;
121 PERPAGES
122 #undef PERPAGE
123 } perpage_t;
126 * If dump_metrics_on is set to 1, the timing information is passed to
127 * savecore via the crash file, where it is appended to the file
128 * dump-dir/METRICS.txt.
130 uint_t dump_metrics_on = 0; /* set to 1 to enable recording metrics */
132 #define HRSTART(v, m) v##ts.m = gethrtime()
133 #define HRSTOP(v, m) v.m += gethrtime() - v##ts.m
135 static char dump_osimage_uuid[36 + 1];
137 #define isdigit(ch) ((ch) >= '0' && (ch) <= '9')
138 #define isxdigit(ch) (isdigit(ch) || ((ch) >= 'a' && (ch) <= 'f') || \
139 ((ch) >= 'A' && (ch) <= 'F'))
142 * configuration vars for dumpsys
144 typedef struct dumpcfg {
145 char *page; /* buffer for page copy */
146 char *lzbuf; /* lzjb output */
148 char *cmap; /* array of input (map) buffers */
149 ulong_t *bitmap; /* bitmap for marking pages to dump */
150 pgcnt_t bitmapsize; /* size of bitmap */
151 pid_t *pids; /* list of process IDs at dump time */
154 * statistics
156 perpage_t perpage; /* per page metrics */
157 perpage_t perpagets; /* per page metrics (timestamps) */
158 pgcnt_t npages; /* subtotal of pages dumped */
159 pgcnt_t pages_mapped; /* subtotal of pages mapped */
160 pgcnt_t pages_used; /* subtotal of pages used per map */
161 size_t nwrite; /* subtotal of bytes written */
162 hrtime_t elapsed; /* elapsed time when completed */
163 hrtime_t iotime; /* time spent writing nwrite bytes */
164 hrtime_t iowait; /* time spent waiting for output */
165 hrtime_t iowaitts; /* iowait timestamp */
168 * I/O buffer
170 * There is one I/O buffer used by dumpvp_write and dumvp_flush. It
171 * is sized according to the optimum device transfer speed.
173 struct {
174 vnode_t *cdev_vp; /* VCHR open of the dump device */
175 len_t vp_limit; /* maximum write offset */
176 offset_t vp_off; /* current dump device offset */
177 char *cur; /* dump write pointer */
178 char *start; /* dump buffer address */
179 char *end; /* dump buffer end */
180 size_t size; /* size of dump buf in bytes */
181 size_t iosize; /* best transfer size for device */
182 } buf;
183 } dumpcfg_t;
185 static dumpcfg_t dumpcfg; /* config vars */
188 * The dump I/O buffer must be at least one page, at most xfer_size bytes,
189 * and should scale with physmem in between. The transfer size passed in
190 * will either represent a global default (maxphys) or the best size for the
191 * device. The size of the dump I/O buffer is limited by dumpbuf_limit (8MB
192 * by default) because the dump performance saturates beyond a certain size.
193 * The default is to select 1/4096 of the memory.
195 static int dumpbuf_fraction = 12; /* memory size scale factor */
196 static size_t dumpbuf_limit = 8 << 20; /* max I/O buf size */
198 static size_t
199 dumpbuf_iosize(size_t xfer_size)
201 size_t iosize = ptob(physmem >> dumpbuf_fraction);
203 if (iosize < PAGESIZE)
204 iosize = PAGESIZE;
205 else if (iosize > xfer_size)
206 iosize = xfer_size;
207 if (iosize > dumpbuf_limit)
208 iosize = dumpbuf_limit;
209 return (iosize & PAGEMASK);
213 * resize the I/O buffer
215 static void
216 dumpbuf_resize(void)
218 char *old_buf = dumpcfg.buf.start;
219 size_t old_size = dumpcfg.buf.size;
220 char *new_buf;
221 size_t new_size;
223 ASSERT(MUTEX_HELD(&dump_lock));
225 new_size = dumpbuf_iosize(MAX(dumpcfg.buf.iosize, maxphys));
226 if (new_size <= old_size)
227 return; /* no need to reallocate buffer */
229 new_buf = kmem_alloc(new_size, KM_SLEEP);
230 dumpcfg.buf.size = new_size;
231 dumpcfg.buf.start = new_buf;
232 dumpcfg.buf.end = new_buf + new_size;
233 kmem_free(old_buf, old_size);
237 * dump_update_clevel is called when dumpadm configures the dump device.
238 * Allocate the minimum configuration for now.
240 * When the dump file is configured we reserve a minimum amount of
241 * memory for use at crash time. But we reserve VA for all the memory
242 * we really want in order to do the fastest dump possible. The VA is
243 * backed by pages not being dumped, according to the bitmap. If
244 * there is insufficient spare memory, however, we fall back to the
245 * minimum.
247 * Live dump (savecore -L) always uses the minimum config.
249 static void
250 dump_update_clevel()
252 dumpcfg_t *old = &dumpcfg;
253 dumpcfg_t newcfg = *old;
254 dumpcfg_t *new = &newcfg;
256 ASSERT(MUTEX_HELD(&dump_lock));
259 * Free the previously allocated bufs and VM.
261 if (old->lzbuf)
262 kmem_free(old->lzbuf, PAGESIZE);
263 if (old->page)
264 kmem_free(old->page, PAGESIZE);
266 if (old->cmap)
267 /* VM space for mapping pages */
268 vmem_xfree(heap_arena, old->cmap, PAGESIZE);
271 * Allocate new data structures and buffers, and also figure the max
272 * desired size.
274 new->lzbuf = kmem_alloc(PAGESIZE, KM_SLEEP);
275 new->page = kmem_alloc(PAGESIZE, KM_SLEEP);
277 new->cmap = vmem_xalloc(heap_arena, PAGESIZE, PAGESIZE,
278 0, 0, NULL, NULL, VM_SLEEP);
281 * Reserve memory for kmem allocation calls made during crash dump. The
282 * hat layer allocates memory for each mapping created, and the I/O path
283 * allocates buffers and data structs.
285 * On larger systems, we easily exceed the lower amount, so we need some
286 * more space; the cut-over point is relatively arbitrary. If we run
287 * out, the only impact is that kmem state in the dump becomes
288 * inconsistent.
291 if (dump_kmem_pages == 0) {
292 if (physmem > (16 * ONE_GIG) / PAGESIZE)
293 dump_kmem_pages = 20;
294 else
295 dump_kmem_pages = 8;
298 kmem_dump_init(dump_kmem_permap + (dump_kmem_pages * PAGESIZE));
300 /* set new config pointers */
301 *old = *new;
305 * Define a struct memlist walker to optimize bitnum to pfn
306 * lookup. The walker maintains the state of the list traversal.
308 typedef struct dumpmlw {
309 struct memlist *mp; /* current memlist */
310 pgcnt_t basenum; /* bitnum base offset */
311 pgcnt_t mppages; /* current memlist size */
312 pgcnt_t mpleft; /* size to end of current memlist */
313 pfn_t mpaddr; /* first pfn in memlist */
314 } dumpmlw_t;
316 /* initialize the walker */
317 static inline void
318 dump_init_memlist_walker(dumpmlw_t *pw)
320 pw->mp = phys_install;
321 pw->basenum = 0;
322 pw->mppages = pw->mp->ml_size >> PAGESHIFT;
323 pw->mpleft = pw->mppages;
324 pw->mpaddr = pw->mp->ml_address >> PAGESHIFT;
328 * Lookup pfn given bitnum. The memlist can be quite long on some
329 * systems (e.g.: one per board). To optimize sequential lookups, the
330 * caller initializes and presents a memlist walker.
332 static pfn_t
333 dump_bitnum_to_pfn(pgcnt_t bitnum, dumpmlw_t *pw)
335 bitnum -= pw->basenum;
336 while (pw->mp != NULL) {
337 if (bitnum < pw->mppages) {
338 pw->mpleft = pw->mppages - bitnum;
339 return (pw->mpaddr + bitnum);
341 bitnum -= pw->mppages;
342 pw->basenum += pw->mppages;
343 pw->mp = pw->mp->ml_next;
344 if (pw->mp != NULL) {
345 pw->mppages = pw->mp->ml_size >> PAGESHIFT;
346 pw->mpleft = pw->mppages;
347 pw->mpaddr = pw->mp->ml_address >> PAGESHIFT;
350 return (PFN_INVALID);
353 static pgcnt_t
354 dump_pfn_to_bitnum(pfn_t pfn)
356 struct memlist *mp;
357 pgcnt_t bitnum = 0;
359 for (mp = phys_install; mp != NULL; mp = mp->ml_next) {
360 if (pfn >= (mp->ml_address >> PAGESHIFT) &&
361 pfn < ((mp->ml_address + mp->ml_size) >> PAGESHIFT))
362 return (bitnum + pfn - (mp->ml_address >> PAGESHIFT));
363 bitnum += mp->ml_size >> PAGESHIFT;
365 return ((pgcnt_t)-1);
368 static void
369 dumphdr_init(void)
371 pgcnt_t npages;
373 ASSERT(MUTEX_HELD(&dump_lock));
375 if (dumphdr == NULL) {
376 dumphdr = kmem_zalloc(sizeof (dumphdr_t), KM_SLEEP);
377 dumphdr->dump_magic = DUMP_MAGIC;
378 dumphdr->dump_version = DUMP_VERSION;
379 dumphdr->dump_wordsize = DUMP_WORDSIZE;
380 dumphdr->dump_pageshift = PAGESHIFT;
381 dumphdr->dump_pagesize = PAGESIZE;
382 dumphdr->dump_utsname = utsname;
383 (void) strcpy(dumphdr->dump_platform, platform);
384 dumpcfg.buf.size = dumpbuf_iosize(maxphys);
385 dumpcfg.buf.start = kmem_alloc(dumpcfg.buf.size, KM_SLEEP);
386 dumpcfg.buf.end = dumpcfg.buf.start + dumpcfg.buf.size;
387 dumpcfg.pids = kmem_alloc(v.v_proc * sizeof (pid_t), KM_SLEEP);
388 dump_stack_scratch = kmem_alloc(STACK_BUF_SIZE, KM_SLEEP);
389 (void) strncpy(dumphdr->dump_uuid, dump_get_uuid(),
390 sizeof (dumphdr->dump_uuid));
393 npages = num_phys_pages();
395 if (dumpcfg.bitmapsize != npages) {
396 void *map = kmem_alloc(BT_SIZEOFMAP(npages), KM_SLEEP);
398 if (dumpcfg.bitmap != NULL)
399 kmem_free(dumpcfg.bitmap, BT_SIZEOFMAP(dumpcfg.
400 bitmapsize));
401 dumpcfg.bitmap = map;
402 dumpcfg.bitmapsize = npages;
407 * Establish a new dump device.
410 dumpinit(vnode_t *vp, char *name, int justchecking)
412 vnode_t *cvp;
413 vattr_t vattr;
414 vnode_t *cdev_vp;
415 int error = 0;
417 ASSERT(MUTEX_HELD(&dump_lock));
419 dumphdr_init();
421 cvp = common_specvp(vp);
422 if (cvp == dumpvp)
423 return (0);
426 * Determine whether this is a plausible dump device. We want either:
427 * (1) a real device that's not mounted and has a cb_dump routine, or
428 * (2) a swapfile on some filesystem that has a vop_dump routine.
430 if ((error = fop_open(&cvp, FREAD | FWRITE, kcred, NULL)) != 0)
431 return (error);
433 vattr.va_mask = VATTR_SIZE | VATTR_TYPE | VATTR_RDEV;
434 if ((error = fop_getattr(cvp, &vattr, 0, kcred, NULL)) == 0) {
435 if (vattr.va_type == VBLK || vattr.va_type == VCHR) {
436 if (devopsp[getmajor(vattr.va_rdev)]->
437 devo_cb_ops->cb_dump == nodev)
438 error = ENOTSUP;
439 else if (vfs_devismounted(vattr.va_rdev))
440 error = EBUSY;
441 if (strcmp(ddi_driver_name(VTOS(cvp)->s_dip),
442 ZFS_DRIVER) == 0 &&
443 IS_SWAPVP(common_specvp(cvp)))
444 error = EBUSY;
445 } else {
446 if (cvp->v_op->vop_dump == fs_nosys ||
447 cvp->v_op->vop_dump == NULL ||
448 !IS_SWAPVP(cvp))
449 error = ENOTSUP;
453 if (error == 0 && vattr.va_size < 2 * DUMP_LOGSIZE + DUMP_ERPTSIZE)
454 error = ENOSPC;
456 if (error || justchecking) {
457 (void) fop_close(cvp, FREAD | FWRITE, 1, 0,
458 kcred, NULL);
459 return (error);
462 VN_HOLD(cvp);
464 if (dumpvp != NULL)
465 dumpfini(); /* unconfigure the old dump device */
467 dumpvp = cvp;
468 dumpvp_size = vattr.va_size & -DUMP_OFFSET;
469 dumppath = kmem_alloc(strlen(name) + 1, KM_SLEEP);
470 (void) strcpy(dumppath, name);
471 dumpcfg.buf.iosize = 0;
474 * If the dump device is a block device, attempt to open up the
475 * corresponding character device and determine its maximum transfer
476 * size. We use this information to potentially resize dump buffer
477 * to a larger and more optimal size for performing i/o to the dump
478 * device.
480 if (cvp->v_type == VBLK &&
481 (cdev_vp = makespecvp(VTOS(cvp)->s_dev, VCHR)) != NULL) {
482 if (fop_open(&cdev_vp, FREAD | FWRITE, kcred, NULL) == 0) {
483 size_t blk_size;
484 struct dk_cinfo dki;
485 struct dk_minfo minf;
487 if (fop_ioctl(cdev_vp, DKIOCGMEDIAINFO,
488 (intptr_t)&minf, FKIOCTL, kcred, NULL, NULL)
489 == 0 && minf.dki_lbsize != 0)
490 blk_size = minf.dki_lbsize;
491 else
492 blk_size = DEV_BSIZE;
494 if (fop_ioctl(cdev_vp, DKIOCINFO, (intptr_t)&dki,
495 FKIOCTL, kcred, NULL, NULL) == 0) {
496 dumpcfg.buf.iosize = dki.dki_maxtransfer * blk_size;
497 dumpbuf_resize();
500 * If we are working with a zvol then dumpify it
501 * if it's not being used as swap.
503 if (strcmp(dki.dki_dname, ZVOL_DRIVER) == 0) {
504 if (IS_SWAPVP(common_specvp(cvp)))
505 error = EBUSY;
506 else if ((error = fop_ioctl(cdev_vp,
507 DKIOCDUMPINIT, (intptr_t)NULL, FKIOCTL,
508 kcred, NULL, NULL)) != 0)
509 dumpfini();
512 (void) fop_close(cdev_vp, FREAD | FWRITE, 1, 0,
513 kcred, NULL);
516 VN_RELE(cdev_vp);
519 cmn_err(CE_CONT, "?dump on %s size %llu MB\n", name, dumpvp_size >> 20);
521 dump_update_clevel();
523 return (error);
526 void
527 dumpfini(void)
529 vattr_t vattr;
530 boolean_t is_zfs = B_FALSE;
531 vnode_t *cdev_vp;
532 ASSERT(MUTEX_HELD(&dump_lock));
534 kmem_free(dumppath, strlen(dumppath) + 1);
537 * Determine if we are using zvols for our dump device
539 vattr.va_mask = VATTR_RDEV;
540 if (fop_getattr(dumpvp, &vattr, 0, kcred, NULL) == 0) {
541 is_zfs = (getmajor(vattr.va_rdev) ==
542 ddi_name_to_major(ZFS_DRIVER)) ? B_TRUE : B_FALSE;
546 * If we have a zvol dump device then we call into zfs so
547 * that it may have a chance to cleanup.
549 if (is_zfs &&
550 (cdev_vp = makespecvp(VTOS(dumpvp)->s_dev, VCHR)) != NULL) {
551 if (fop_open(&cdev_vp, FREAD | FWRITE, kcred, NULL) == 0) {
552 (void) fop_ioctl(cdev_vp, DKIOCDUMPFINI, (intptr_t)NULL,
553 FKIOCTL, kcred, NULL, NULL);
554 (void) fop_close(cdev_vp, FREAD | FWRITE, 1, 0,
555 kcred, NULL);
557 VN_RELE(cdev_vp);
560 (void) fop_close(dumpvp, FREAD | FWRITE, 1, 0, kcred, NULL);
562 VN_RELE(dumpvp);
564 dumpvp = NULL;
565 dumpvp_size = 0;
566 dumppath = NULL;
569 static offset_t
570 dumpvp_flush(void)
572 size_t size = P2ROUNDUP(dumpcfg.buf.cur - dumpcfg.buf.start, PAGESIZE);
573 hrtime_t iotime;
574 int err;
576 if (dumpcfg.buf.vp_off + size > dumpcfg.buf.vp_limit) {
577 dump_ioerr = ENOSPC;
578 dumpcfg.buf.vp_off = dumpcfg.buf.vp_limit;
579 } else if (size != 0) {
580 iotime = gethrtime();
581 dumpcfg.iowait += iotime - dumpcfg.iowaitts;
582 if (panicstr)
583 err = fop_dump(dumpvp, dumpcfg.buf.start,
584 lbtodb(dumpcfg.buf.vp_off), btod(size), NULL);
585 else
586 err = vn_rdwr(UIO_WRITE, dumpcfg.buf.cdev_vp != NULL ?
587 dumpcfg.buf.cdev_vp : dumpvp, dumpcfg.buf.start, size,
588 dumpcfg.buf.vp_off, UIO_SYSSPACE, 0, dumpcfg.buf.vp_limit,
589 kcred, 0);
590 if (err && dump_ioerr == 0)
591 dump_ioerr = err;
592 dumpcfg.iowaitts = gethrtime();
593 dumpcfg.iotime += dumpcfg.iowaitts - iotime;
594 dumpcfg.nwrite += size;
595 dumpcfg.buf.vp_off += size;
597 dumpcfg.buf.cur = dumpcfg.buf.start;
598 dump_timeleft = dump_timeout;
599 return (dumpcfg.buf.vp_off);
602 /* maximize write speed by keeping seek offset aligned with size */
603 void
604 dumpvp_write(const void *va, size_t size)
606 size_t len, off, sz;
608 while (size != 0) {
609 len = MIN(size, dumpcfg.buf.end - dumpcfg.buf.cur);
610 if (len == 0) {
611 off = P2PHASE(dumpcfg.buf.vp_off, dumpcfg.buf.size);
612 if (off == 0 || !ISP2(dumpcfg.buf.size)) {
613 (void) dumpvp_flush();
614 } else {
615 sz = dumpcfg.buf.size - off;
616 dumpcfg.buf.cur = dumpcfg.buf.start + sz;
617 (void) dumpvp_flush();
618 ovbcopy(dumpcfg.buf.start + sz, dumpcfg.buf.start, off);
619 dumpcfg.buf.cur += off;
621 } else {
622 bcopy(va, dumpcfg.buf.cur, len);
623 va = (char *)va + len;
624 dumpcfg.buf.cur += len;
625 size -= len;
630 /*ARGSUSED*/
631 static void
632 dumpvp_ksyms_write(const void *src, void *dst, size_t size)
634 dumpvp_write(src, size);
638 * Mark 'pfn' in the bitmap and dump its translation table entry.
640 void
641 dump_addpage(struct as *as, void *va, pfn_t pfn)
643 mem_vtop_t mem_vtop;
644 pgcnt_t bitnum;
646 if ((bitnum = dump_pfn_to_bitnum(pfn)) != (pgcnt_t)-1) {
647 if (!BT_TEST(dumpcfg.bitmap, bitnum)) {
648 dumphdr->dump_npages++;
649 BT_SET(dumpcfg.bitmap, bitnum);
651 dumphdr->dump_nvtop++;
652 mem_vtop.m_as = as;
653 mem_vtop.m_va = va;
654 mem_vtop.m_pfn = pfn;
655 dumpvp_write(&mem_vtop, sizeof (mem_vtop_t));
657 dump_timeleft = dump_timeout;
661 * Mark 'pfn' in the bitmap
663 void
664 dump_page(pfn_t pfn)
666 pgcnt_t bitnum;
668 if ((bitnum = dump_pfn_to_bitnum(pfn)) != (pgcnt_t)-1) {
669 if (!BT_TEST(dumpcfg.bitmap, bitnum)) {
670 dumphdr->dump_npages++;
671 BT_SET(dumpcfg.bitmap, bitnum);
674 dump_timeleft = dump_timeout;
678 * Dump the <as, va, pfn> information for a given address space.
679 * segop_dump() will call dump_addpage() for each page in the segment.
681 static void
682 dump_as(struct as *as)
684 struct seg *seg;
686 AS_LOCK_ENTER(as, RW_READER);
687 for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) {
688 if (seg->s_as != as)
689 break;
690 if (seg->s_ops == NULL)
691 continue;
692 segop_dump(seg);
694 AS_LOCK_EXIT(as);
696 if (seg != NULL)
697 cmn_err(CE_WARN, "invalid segment %p in address space %p",
698 (void *)seg, (void *)as);
701 static int
702 dump_process(pid_t pid)
704 proc_t *p = sprlock(pid);
706 if (p == NULL)
707 return (-1);
708 if (p->p_as != &kas) {
709 mutex_exit(&p->p_lock);
710 dump_as(p->p_as);
711 mutex_enter(&p->p_lock);
714 sprunlock(p);
716 return (0);
720 * The following functions (dump_summary(), dump_ereports(), and
721 * dump_messages()), write data to an uncompressed area within the
722 * crashdump. The layout of these is
724 * +------------------------------------------------------------+
725 * | compressed pages | summary | ereports | messages |
726 * +------------------------------------------------------------+
728 * With the advent of saving a compressed crash dump by default, we
729 * need to save a little more data to describe the failure mode in
730 * an uncompressed buffer available before savecore uncompresses
731 * the dump. Initially this is a copy of the stack trace. Additional
732 * summary information should be added here.
735 void
736 dump_summary(void)
738 uoff_t dumpvp_start;
739 summary_dump_t sd;
741 if (dumpvp == NULL || dumphdr == NULL)
742 return;
744 dumpcfg.buf.cur = dumpcfg.buf.start;
746 dumpcfg.buf.vp_limit = dumpvp_size - (DUMP_OFFSET + DUMP_LOGSIZE +
747 DUMP_ERPTSIZE);
748 dumpvp_start = dumpcfg.buf.vp_limit - DUMP_SUMMARYSIZE;
749 dumpcfg.buf.vp_off = dumpvp_start;
751 sd.sd_magic = SUMMARY_MAGIC;
752 sd.sd_ssum = checksum32(dump_stack_scratch, STACK_BUF_SIZE);
753 dumpvp_write(&sd, sizeof (sd));
754 dumpvp_write(dump_stack_scratch, STACK_BUF_SIZE);
756 sd.sd_magic = 0; /* indicate end of summary */
757 dumpvp_write(&sd, sizeof (sd));
758 (void) dumpvp_flush();
761 void
762 dump_ereports(void)
764 uoff_t dumpvp_start;
765 erpt_dump_t ed;
767 if (dumpvp == NULL || dumphdr == NULL)
768 return;
770 dumpcfg.buf.cur = dumpcfg.buf.start;
771 dumpcfg.buf.vp_limit = dumpvp_size - (DUMP_OFFSET + DUMP_LOGSIZE);
772 dumpvp_start = dumpcfg.buf.vp_limit - DUMP_ERPTSIZE;
773 dumpcfg.buf.vp_off = dumpvp_start;
775 fm_ereport_dump();
776 if (panicstr)
777 errorq_dump();
779 bzero(&ed, sizeof (ed)); /* indicate end of ereports */
780 dumpvp_write(&ed, sizeof (ed));
781 (void) dumpvp_flush();
783 if (!panicstr) {
784 (void) fop_putpage(dumpvp, dumpvp_start,
785 (size_t)(dumpcfg.buf.vp_off - dumpvp_start),
786 B_INVAL | B_FORCE, kcred, NULL);
790 void
791 dump_messages(void)
793 log_dump_t ld;
794 mblk_t *mctl, *mdata;
795 queue_t *q, *qlast;
796 uoff_t dumpvp_start;
798 if (dumpvp == NULL || dumphdr == NULL || log_consq == NULL)
799 return;
801 dumpcfg.buf.cur = dumpcfg.buf.start;
802 dumpcfg.buf.vp_limit = dumpvp_size - DUMP_OFFSET;
803 dumpvp_start = dumpcfg.buf.vp_limit - DUMP_LOGSIZE;
804 dumpcfg.buf.vp_off = dumpvp_start;
806 qlast = NULL;
807 do {
808 for (q = log_consq; q->q_next != qlast; q = q->q_next)
809 continue;
810 for (mctl = q->q_first; mctl != NULL; mctl = mctl->b_next) {
811 dump_timeleft = dump_timeout;
812 mdata = mctl->b_cont;
813 ld.ld_magic = LOG_MAGIC;
814 ld.ld_msgsize = MBLKL(mctl->b_cont);
815 ld.ld_csum = checksum32(mctl->b_rptr, MBLKL(mctl));
816 ld.ld_msum = checksum32(mdata->b_rptr, MBLKL(mdata));
817 dumpvp_write(&ld, sizeof (ld));
818 dumpvp_write(mctl->b_rptr, MBLKL(mctl));
819 dumpvp_write(mdata->b_rptr, MBLKL(mdata));
821 } while ((qlast = q) != log_consq);
823 ld.ld_magic = 0; /* indicate end of messages */
824 dumpvp_write(&ld, sizeof (ld));
825 (void) dumpvp_flush();
826 if (!panicstr) {
827 (void) fop_putpage(dumpvp, dumpvp_start,
828 (size_t)(dumpcfg.buf.vp_off - dumpvp_start),
829 B_INVAL | B_FORCE, kcred, NULL);
834 * Copy pages, trapping ECC errors. Also, for robustness, trap data
835 * access in case something goes wrong in the hat layer and the
836 * mapping is broken.
838 static void
839 dump_pagecopy(void *src, void *dst)
841 long *wsrc = (long *)src;
842 long *wdst = (long *)dst;
843 const ulong_t ncopies = PAGESIZE / sizeof (long);
844 volatile int w = 0;
845 volatile int ueoff = -1;
846 on_trap_data_t otd;
848 if (on_trap(&otd, OT_DATA_EC | OT_DATA_ACCESS)) {
849 if (ueoff == -1)
850 ueoff = w * sizeof (long);
851 /* report "bad ECC" or "bad address" */
852 #ifdef _LP64
853 if (otd.ot_trap & OT_DATA_EC)
854 wdst[w++] = 0x00badecc00badecc;
855 else
856 wdst[w++] = 0x00badadd00badadd;
857 #else
858 if (otd.ot_trap & OT_DATA_EC)
859 wdst[w++] = 0x00badecc;
860 else
861 wdst[w++] = 0x00badadd;
862 #endif
864 while (w < ncopies) {
865 wdst[w] = wsrc[w];
866 w++;
868 no_trap();
871 size_t
872 dumpsys_metrics(char *buf, size_t size)
874 dumpcfg_t *cfg = &dumpcfg;
875 int compress_ratio;
876 int sec, iorate;
877 char *e = buf + size;
878 char *p = buf;
880 sec = cfg->elapsed / (1000 * 1000 * 1000ULL);
881 if (sec < 1)
882 sec = 1;
884 if (cfg->iotime < 1)
885 cfg->iotime = 1;
886 iorate = (cfg->nwrite * 100000ULL) / cfg->iotime;
888 compress_ratio = 100LL * cfg->npages / btopr(cfg->nwrite + 1);
890 #define P(...) (p += p < e ? snprintf(p, e - p, __VA_ARGS__) : 0)
892 P("Master cpu_seqid,%d\n", CPU->cpu_seqid);
893 P("Master cpu_id,%d\n", CPU->cpu_id);
894 P("dump_flags,0x%x\n", dumphdr->dump_flags);
895 P("dump_ioerr,%d\n", dump_ioerr);
897 P("Compression type,serial lzjb\n");
898 P("Compression ratio,%d.%02d\n", compress_ratio / 100, compress_ratio %
899 100);
901 P("Dump I/O rate MBS,%d.%02d\n", iorate / 100, iorate % 100);
902 P("..total bytes,%lld\n", (u_longlong_t)cfg->nwrite);
903 P("..total nsec,%lld\n", (u_longlong_t)cfg->iotime);
904 P("dumpbuf.iosize,%ld\n", dumpcfg.buf.iosize);
905 P("dumpbuf.size,%ld\n", dumpcfg.buf.size);
907 P("Dump pages/sec,%llu\n", (u_longlong_t)cfg->npages / sec);
908 P("Dump pages,%llu\n", (u_longlong_t)cfg->npages);
909 P("Dump time,%d\n", sec);
911 if (cfg->pages_mapped > 0)
912 P("per-cent map utilization,%d\n", (int)((100 * cfg->pages_used)
913 / cfg->pages_mapped));
915 P("\nPer-page metrics:\n");
916 if (cfg->npages > 0) {
917 #define PERPAGE(x) \
918 P("%s nsec/page,%d\n", #x, (int)(cfg->perpage.x / cfg->npages));
919 PERPAGES;
920 #undef PERPAGE
922 P("I/O wait nsec/page,%llu\n", (u_longlong_t)(cfg->iowait /
923 cfg->npages));
925 #undef P
926 if (p < e)
927 bzero(p, e - p);
928 return (p - buf);
932 * Dump the system.
934 void
935 dumpsys(void)
937 dumpcfg_t *cfg = &dumpcfg;
938 uint_t percent_done; /* dump progress reported */
939 int sec_done;
940 hrtime_t start; /* start time */
941 pfn_t pfn;
942 pgcnt_t bitnum;
943 proc_t *p;
944 pid_t npids, pidx;
945 char *content;
946 char *buf;
947 size_t size;
948 dumpmlw_t mlw;
949 dumpcsize_t datatag;
950 dumpdatahdr_t datahdr;
952 if (dumpvp == NULL || dumphdr == NULL) {
953 uprintf("skipping system dump - no dump device configured\n");
954 return;
956 dumpcfg.buf.cur = dumpcfg.buf.start;
958 /* clear the sync variables */
959 cfg->npages = 0;
960 cfg->pages_mapped = 0;
961 cfg->pages_used = 0;
962 cfg->nwrite = 0;
963 cfg->elapsed = 0;
964 cfg->iotime = 0;
965 cfg->iowait = 0;
966 cfg->iowaitts = 0;
969 * Calculate the starting block for dump. If we're dumping on a
970 * swap device, start 1/5 of the way in; otherwise, start at the
971 * beginning. And never use the first page -- it may be a disk label.
973 if (dumpvp->v_flag & VISSWAP)
974 dumphdr->dump_start = P2ROUNDUP(dumpvp_size / 5, DUMP_OFFSET);
975 else
976 dumphdr->dump_start = DUMP_OFFSET;
978 dumphdr->dump_flags = DF_VALID | DF_COMPLETE | DF_LIVE | DF_COMPRESSED;
979 dumphdr->dump_crashtime = gethrestime_sec();
980 dumphdr->dump_npages = 0;
981 dumphdr->dump_nvtop = 0;
982 bzero(dumpcfg.bitmap, BT_SIZEOFMAP(dumpcfg.bitmapsize));
983 dump_timeleft = dump_timeout;
985 if (panicstr) {
986 dumphdr->dump_flags &= ~DF_LIVE;
987 (void) fop_dumpctl(dumpvp, DUMP_FREE, NULL, NULL);
988 (void) fop_dumpctl(dumpvp, DUMP_ALLOC, NULL, NULL);
989 (void) vsnprintf(dumphdr->dump_panicstring, DUMP_PANICSIZE,
990 panicstr, panicargs);
993 if (dump_conflags & DUMP_ALL)
994 content = "all";
995 else if (dump_conflags & DUMP_CURPROC)
996 content = "kernel + curproc";
997 else
998 content = "kernel";
999 uprintf("dumping to %s, offset %lld, content: %s\n", dumppath,
1000 dumphdr->dump_start, content);
1002 /* Make sure nodename is current */
1003 bcopy(utsname.nodename, dumphdr->dump_utsname.nodename, SYS_NMLN);
1006 * If this is a live dump, try to open a VCHR vnode for better
1007 * performance. We must take care to flush the buffer cache
1008 * first.
1010 if (!panicstr) {
1011 vnode_t *cdev_vp, *cmn_cdev_vp;
1013 ASSERT(dumpcfg.buf.cdev_vp == NULL);
1014 cdev_vp = makespecvp(VTOS(dumpvp)->s_dev, VCHR);
1015 if (cdev_vp != NULL) {
1016 cmn_cdev_vp = common_specvp(cdev_vp);
1017 if (fop_open(&cmn_cdev_vp, FREAD | FWRITE, kcred, NULL)
1018 == 0) {
1019 if (vn_has_cached_data(dumpvp))
1020 (void) pvn_vplist_dirty(dumpvp, 0, NULL,
1021 B_INVAL | B_TRUNC, kcred);
1022 dumpcfg.buf.cdev_vp = cmn_cdev_vp;
1023 } else {
1024 VN_RELE(cdev_vp);
1030 * Store a hires timestamp so we can look it up during debugging.
1032 lbolt_debug_entry();
1035 * Leave room for the message and ereport save areas and terminal dump
1036 * header.
1038 dumpcfg.buf.vp_limit = dumpvp_size - DUMP_LOGSIZE - DUMP_OFFSET -
1039 DUMP_ERPTSIZE;
1042 * Write out the symbol table. It's no longer compressed,
1043 * so its 'size' and 'csize' are equal.
1045 dumpcfg.buf.vp_off = dumphdr->dump_ksyms = dumphdr->dump_start + PAGESIZE;
1046 dumphdr->dump_ksyms_size = dumphdr->dump_ksyms_csize =
1047 ksyms_snapshot(dumpvp_ksyms_write, NULL, LONG_MAX);
1050 * Write out the translation map.
1052 dumphdr->dump_map = dumpvp_flush();
1053 dump_as(&kas);
1054 dumphdr->dump_nvtop += dump_plat_addr();
1057 * call into hat, which may have unmapped pages that also need to
1058 * be in the dump
1060 hat_dump();
1062 if (dump_conflags & DUMP_ALL) {
1063 mutex_enter(&pidlock);
1065 for (npids = 0, p = practive; p != NULL; p = p->p_next)
1066 dumpcfg.pids[npids++] = p->p_pid;
1068 mutex_exit(&pidlock);
1070 for (pidx = 0; pidx < npids; pidx++)
1071 (void) dump_process(dumpcfg.pids[pidx]);
1073 dump_init_memlist_walker(&mlw);
1074 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum++) {
1075 dump_timeleft = dump_timeout;
1076 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
1078 * Some hypervisors do not have all pages available to
1079 * be accessed by the guest OS. Check for page
1080 * accessibility.
1082 if (plat_hold_page(pfn, PLAT_HOLD_NO_LOCK, NULL) !=
1083 PLAT_HOLD_OK)
1084 continue;
1085 BT_SET(dumpcfg.bitmap, bitnum);
1087 dumphdr->dump_npages = dumpcfg.bitmapsize;
1088 dumphdr->dump_flags |= DF_ALL;
1090 } else if (dump_conflags & DUMP_CURPROC) {
1092 * Determine which pid is to be dumped. If we're panicking, we
1093 * dump the process associated with panic_thread (if any). If
1094 * this is a live dump, we dump the process associated with
1095 * curthread.
1097 npids = 0;
1098 if (panicstr) {
1099 if (panic_thread != NULL &&
1100 panic_thread->t_procp != NULL &&
1101 panic_thread->t_procp != &p0) {
1102 dumpcfg.pids[npids++] =
1103 panic_thread->t_procp->p_pid;
1105 } else {
1106 dumpcfg.pids[npids++] = curthread->t_procp->p_pid;
1109 if (npids && dump_process(dumpcfg.pids[0]) == 0)
1110 dumphdr->dump_flags |= DF_CURPROC;
1111 else
1112 dumphdr->dump_flags |= DF_KERNEL;
1114 } else {
1115 dumphdr->dump_flags |= DF_KERNEL;
1118 dumphdr->dump_hashmask = (1 << highbit(dumphdr->dump_nvtop - 1)) - 1;
1121 * Write out the pfn table.
1123 dumphdr->dump_pfn = dumpvp_flush();
1124 dump_init_memlist_walker(&mlw);
1125 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum++) {
1126 dump_timeleft = dump_timeout;
1127 if (!BT_TEST(dumpcfg.bitmap, bitnum))
1128 continue;
1129 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
1130 ASSERT(pfn != PFN_INVALID);
1131 dumpvp_write(&pfn, sizeof (pfn_t));
1133 dump_plat_pfn();
1136 * Write out all the pages.
1137 * Map pages, copy them handling UEs, compress, and write them out.
1139 dumphdr->dump_data = dumpvp_flush();
1141 ASSERT(dumpcfg.page);
1142 bzero(&dumpcfg.perpage, sizeof (dumpcfg.perpage));
1144 start = gethrtime();
1145 cfg->iowaitts = start;
1147 if (panicstr)
1148 kmem_dump_begin();
1150 percent_done = 0;
1151 sec_done = 0;
1153 dump_init_memlist_walker(&mlw);
1154 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum++) {
1155 dumpcsize_t csize;
1156 uint_t percent;
1157 int sec;
1159 dump_timeleft = dump_timeout;
1160 HRSTART(cfg->perpage, bitmap);
1161 if (!BT_TEST(dumpcfg.bitmap, bitnum)) {
1162 HRSTOP(cfg->perpage, bitmap);
1163 continue;
1165 HRSTOP(cfg->perpage, bitmap);
1167 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
1168 ASSERT(pfn != PFN_INVALID);
1170 HRSTART(cfg->perpage, map);
1171 hat_devload(kas.a_hat, dumpcfg.cmap, PAGESIZE, pfn, PROT_READ,
1172 HAT_LOAD_NOCONSIST);
1173 HRSTOP(cfg->perpage, map);
1175 dump_pagecopy(dumpcfg.cmap, dumpcfg.page);
1177 HRSTART(cfg->perpage, unmap);
1178 hat_unload(kas.a_hat, dumpcfg.cmap, PAGESIZE, HAT_UNLOAD);
1179 HRSTOP(cfg->perpage, unmap);
1181 HRSTART(dumpcfg.perpage, compress);
1182 csize = compress(dumpcfg.page, dumpcfg.lzbuf, PAGESIZE);
1183 HRSTOP(dumpcfg.perpage, compress);
1185 HRSTART(dumpcfg.perpage, write);
1186 dumpvp_write(&csize, sizeof (csize));
1187 dumpvp_write(dumpcfg.lzbuf, csize);
1188 HRSTOP(dumpcfg.perpage, write);
1190 if (dump_ioerr) {
1191 dumphdr->dump_flags &= ~DF_COMPLETE;
1192 dumphdr->dump_npages = cfg->npages;
1193 break;
1196 sec = (gethrtime() - start) / NANOSEC;
1197 percent = ++cfg->npages * 100LL / dumphdr->dump_npages;
1200 * Render a simple progress display on the system console to
1201 * make clear to the operator that the system has not hung.
1202 * Emit an update when dump progress has advanced by one
1203 * percent, or when no update has been drawn in the last
1204 * second.
1206 if (percent > percent_done || sec > sec_done) {
1207 percent_done = percent;
1208 sec_done = sec;
1210 uprintf("^\r%2d:%02d %3d%% done", sec / 60, sec % 60,
1211 percent_done);
1212 if (!panicstr)
1213 delay(1); /* let the output be sent */
1217 cfg->elapsed = gethrtime() - start;
1218 if (cfg->elapsed < 1)
1219 cfg->elapsed = 1;
1221 /* record actual pages dumped */
1222 dumphdr->dump_npages = cfg->npages;
1224 /* platform-specific data */
1225 dumphdr->dump_npages += dump_plat_data(dumpcfg.page);
1227 /* note any errors by clearing DF_COMPLETE */
1228 if (dump_ioerr || cfg->npages < dumphdr->dump_npages)
1229 dumphdr->dump_flags &= ~DF_COMPLETE;
1231 /* end of stream blocks */
1232 datatag = 0;
1233 dumpvp_write(&datatag, sizeof (datatag));
1235 bzero(&datahdr, sizeof (datahdr));
1237 /* buffer for metrics */
1238 buf = dumpcfg.page;
1239 size = MIN(PAGESIZE, DUMP_OFFSET - sizeof (dumphdr_t) -
1240 sizeof (dumpdatahdr_t));
1242 /* finish the kmem intercepts, collect kmem verbose info */
1243 if (panicstr) {
1244 datahdr.dump_metrics = kmem_dump_finish(buf, size);
1245 buf += datahdr.dump_metrics;
1246 size -= datahdr.dump_metrics;
1249 /* record in the header whether this is a fault-management panic */
1250 if (panicstr)
1251 dumphdr->dump_fm_panic = is_fm_panic();
1253 /* compression info in data header */
1254 datahdr.dump_datahdr_magic = DUMP_DATAHDR_MAGIC;
1255 datahdr.dump_datahdr_version = DUMP_DATAHDR_VERSION;
1256 datahdr.dump_maxcsize = PAGESIZE;
1257 datahdr.dump_maxrange = 1;
1258 datahdr.dump_nstreams = 1;
1259 datahdr.dump_clevel = 0;
1261 if (dump_metrics_on)
1262 datahdr.dump_metrics += dumpsys_metrics(buf, size);
1264 datahdr.dump_data_csize = dumpvp_flush() - dumphdr->dump_data;
1267 * Write out the initial and terminal dump headers.
1269 dumpcfg.buf.vp_off = dumphdr->dump_start;
1270 dumpvp_write(dumphdr, sizeof (dumphdr_t));
1271 (void) dumpvp_flush();
1273 dumpcfg.buf.vp_limit = dumpvp_size;
1274 dumpcfg.buf.vp_off = dumpcfg.buf.vp_limit - DUMP_OFFSET;
1275 dumpvp_write(dumphdr, sizeof (dumphdr_t));
1276 dumpvp_write(&datahdr, sizeof (dumpdatahdr_t));
1277 dumpvp_write(dumpcfg.page, datahdr.dump_metrics);
1279 (void) dumpvp_flush();
1281 uprintf("\r%3d%% done: %llu pages dumped, ",
1282 percent_done, (u_longlong_t)cfg->npages);
1284 if (dump_ioerr == 0) {
1285 uprintf("dump succeeded\n");
1286 } else {
1287 uprintf("dump failed: error %d\n", dump_ioerr);
1288 #ifdef DEBUG
1289 if (panicstr)
1290 debug_enter("dump failed");
1291 #endif
1295 * Write out all undelivered messages. This has to be the *last*
1296 * thing we do because the dump process itself emits messages.
1298 if (panicstr) {
1299 dump_summary();
1300 dump_ereports();
1301 dump_messages();
1304 ddi_sleep(2); /* let people see the 'done' message */
1305 dump_timeleft = 0;
1306 dump_ioerr = 0;
1308 /* restore settings after live dump completes */
1309 if (!panicstr) {
1310 /* release any VCHR open of the dump device */
1311 if (dumpcfg.buf.cdev_vp != NULL) {
1312 (void) fop_close(dumpcfg.buf.cdev_vp, FREAD | FWRITE, 1, 0,
1313 kcred, NULL);
1314 VN_RELE(dumpcfg.buf.cdev_vp);
1315 dumpcfg.buf.cdev_vp = NULL;
1321 * This function is called whenever the memory size, as represented
1322 * by the phys_install list, changes.
1324 void
1325 dump_resize()
1327 mutex_enter(&dump_lock);
1328 dumphdr_init();
1329 dumpbuf_resize();
1330 dump_update_clevel();
1331 mutex_exit(&dump_lock);
1335 * This function allows for dynamic resizing of a dump area. It assumes that
1336 * the underlying device has update its appropriate size(9P).
1339 dumpvp_resize()
1341 int error;
1342 vattr_t vattr;
1344 mutex_enter(&dump_lock);
1345 vattr.va_mask = VATTR_SIZE;
1346 if ((error = fop_getattr(dumpvp, &vattr, 0, kcred, NULL)) != 0) {
1347 mutex_exit(&dump_lock);
1348 return (error);
1351 if (error == 0 && vattr.va_size < 2 * DUMP_LOGSIZE + DUMP_ERPTSIZE) {
1352 mutex_exit(&dump_lock);
1353 return (ENOSPC);
1356 dumpvp_size = vattr.va_size & -DUMP_OFFSET;
1357 mutex_exit(&dump_lock);
1358 return (0);
1362 dump_set_uuid(const char *uuidstr)
1364 const char *ptr;
1365 int i;
1367 if (uuidstr == NULL || strnlen(uuidstr, 36 + 1) != 36)
1368 return (EINVAL);
1370 /* uuid_parse is not common code so check manually */
1371 for (i = 0, ptr = uuidstr; i < 36; i++, ptr++) {
1372 switch (i) {
1373 case 8:
1374 case 13:
1375 case 18:
1376 case 23:
1377 if (*ptr != '-')
1378 return (EINVAL);
1379 break;
1381 default:
1382 if (!isxdigit(*ptr))
1383 return (EINVAL);
1384 break;
1388 if (dump_osimage_uuid[0] != '\0')
1389 return (EALREADY);
1391 (void) strncpy(dump_osimage_uuid, uuidstr, 36 + 1);
1393 cmn_err(CE_CONT, "?This Solaris instance has UUID %s\n",
1394 dump_osimage_uuid);
1396 return (0);
1399 const char *
1400 dump_get_uuid(void)
1402 return (dump_osimage_uuid[0] != '\0' ? dump_osimage_uuid : "");