uts: make emu10k non-verbose
[unleashed.git] / kernel / os / dumpsubr.c
blob4c24f08fec1b195804780de8c28db0a9dc3d7b21
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2014, Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
25 * Copyright 2016 Joyent, Inc.
28 #include <sys/types.h>
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/vm.h>
32 #include <sys/proc.h>
33 #include <sys/file.h>
34 #include <sys/conf.h>
35 #include <sys/kmem.h>
36 #include <sys/mem.h>
37 #include <sys/mman.h>
38 #include <sys/vnode.h>
39 #include <sys/errno.h>
40 #include <sys/memlist.h>
41 #include <sys/dumphdr.h>
42 #include <sys/dumpadm.h>
43 #include <sys/ksyms.h>
44 #include <sys/compress.h>
45 #include <sys/stream.h>
46 #include <sys/strsun.h>
47 #include <sys/cmn_err.h>
48 #include <sys/bitmap.h>
49 #include <sys/modctl.h>
50 #include <sys/utsname.h>
51 #include <sys/systeminfo.h>
52 #include <sys/vmem.h>
53 #include <sys/log.h>
54 #include <sys/var.h>
55 #include <sys/debug.h>
56 #include <sys/sunddi.h>
57 #include <sys/fs_subr.h>
58 #include <sys/fs/snode.h>
59 #include <sys/ontrap.h>
60 #include <sys/panic.h>
61 #include <sys/dkio.h>
62 #include <sys/vtoc.h>
63 #include <sys/errorq.h>
64 #include <sys/fm/util.h>
65 #include <sys/fs/zfs.h>
67 #include <vm/hat.h>
68 #include <vm/as.h>
69 #include <vm/page.h>
70 #include <vm/pvn.h>
71 #include <vm/seg.h>
72 #include <vm/seg_kmem.h>
73 #include <sys/clock_impl.h>
74 #include <sys/hold_page.h>
77 * exported vars
79 kmutex_t dump_lock; /* lock for dump configuration */
80 dumphdr_t *dumphdr; /* dump header */
81 int dump_conflags = DUMP_KERNEL; /* dump configuration flags */
82 vnode_t *dumpvp; /* dump device vnode pointer */
83 uoff_t dumpvp_size; /* size of dump device, in bytes */
84 char *dumppath; /* pathname of dump device */
85 int dump_timeout = 120; /* timeout for dumping pages */
86 int dump_timeleft; /* portion of dump_timeout remaining */
87 int dump_ioerr; /* dump i/o error */
88 char *dump_stack_scratch; /* scratch area for saving stack summary */
91 * Tunables for dump. These can be set via /etc/system.
93 * dump_metrics_on if set, metrics are collected in the kernel, passed
94 * to savecore via the dump file, and recorded by savecore in
95 * METRICS.txt.
98 /* tunables for pre-reserved heap */
99 uint_t dump_kmem_permap = 1024;
100 uint_t dump_kmem_pages = 8;
103 * Compression metrics are accumulated nano-second subtotals. The
104 * results are normalized by the number of pages dumped. A report is
105 * generated when dumpsys() completes and is saved in the dump image
106 * after the trailing dump header.
108 * Metrics are always collected. Set the variable dump_metrics_on to
109 * cause metrics to be saved in the crash file, where savecore will
110 * save it in the file METRICS.txt.
112 #define PERPAGES \
113 PERPAGE(bitmap) PERPAGE(map) PERPAGE(unmap) \
114 PERPAGE(compress) \
115 PERPAGE(write)
117 typedef struct perpage {
118 #define PERPAGE(x) hrtime_t x;
119 PERPAGES
120 #undef PERPAGE
121 } perpage_t;
124 * If dump_metrics_on is set to 1, the timing information is passed to
125 * savecore via the crash file, where it is appended to the file
126 * dump-dir/METRICS.txt.
128 uint_t dump_metrics_on = 0; /* set to 1 to enable recording metrics */
130 #define HRSTART(v, m) v##ts.m = gethrtime()
131 #define HRSTOP(v, m) v.m += gethrtime() - v##ts.m
133 static char dump_osimage_uuid[36 + 1];
135 #define isdigit(ch) ((ch) >= '0' && (ch) <= '9')
136 #define isxdigit(ch) (isdigit(ch) || ((ch) >= 'a' && (ch) <= 'f') || \
137 ((ch) >= 'A' && (ch) <= 'F'))
140 * configuration vars for dumpsys
142 typedef struct dumpcfg {
143 char *page; /* buffer for page copy */
144 char *lzbuf; /* lzjb output */
146 char *cmap; /* array of input (map) buffers */
147 ulong_t *bitmap; /* bitmap for marking pages to dump */
148 pgcnt_t bitmapsize; /* size of bitmap */
149 pid_t *pids; /* list of process IDs at dump time */
152 * statistics
154 perpage_t perpage; /* per page metrics */
155 perpage_t perpagets; /* per page metrics (timestamps) */
156 pgcnt_t npages; /* subtotal of pages dumped */
157 pgcnt_t pages_mapped; /* subtotal of pages mapped */
158 pgcnt_t pages_used; /* subtotal of pages used per map */
159 size_t nwrite; /* subtotal of bytes written */
160 hrtime_t elapsed; /* elapsed time when completed */
161 hrtime_t iotime; /* time spent writing nwrite bytes */
162 hrtime_t iowait; /* time spent waiting for output */
163 hrtime_t iowaitts; /* iowait timestamp */
166 * I/O buffer
168 * There is one I/O buffer used by dumpvp_write and dumvp_flush. It
169 * is sized according to the optimum device transfer speed.
171 struct {
172 vnode_t *cdev_vp; /* VCHR open of the dump device */
173 len_t vp_limit; /* maximum write offset */
174 offset_t vp_off; /* current dump device offset */
175 char *cur; /* dump write pointer */
176 char *start; /* dump buffer address */
177 char *end; /* dump buffer end */
178 size_t size; /* size of dump buf in bytes */
179 size_t iosize; /* best transfer size for device */
180 } buf;
181 } dumpcfg_t;
183 static dumpcfg_t dumpcfg; /* config vars */
186 * The dump I/O buffer must be at least one page, at most xfer_size bytes,
187 * and should scale with physmem in between. The transfer size passed in
188 * will either represent a global default (maxphys) or the best size for the
189 * device. The size of the dump I/O buffer is limited by dumpbuf_limit (8MB
190 * by default) because the dump performance saturates beyond a certain size.
191 * The default is to select 1/4096 of the memory.
193 static int dumpbuf_fraction = 12; /* memory size scale factor */
194 static size_t dumpbuf_limit = 8 << 20; /* max I/O buf size */
196 static size_t
197 dumpbuf_iosize(size_t xfer_size)
199 size_t iosize = ptob(physmem >> dumpbuf_fraction);
201 if (iosize < PAGESIZE)
202 iosize = PAGESIZE;
203 else if (iosize > xfer_size)
204 iosize = xfer_size;
205 if (iosize > dumpbuf_limit)
206 iosize = dumpbuf_limit;
207 return (iosize & PAGEMASK);
211 * resize the I/O buffer
213 static void
214 dumpbuf_resize(void)
216 char *old_buf = dumpcfg.buf.start;
217 size_t old_size = dumpcfg.buf.size;
218 char *new_buf;
219 size_t new_size;
221 ASSERT(MUTEX_HELD(&dump_lock));
223 new_size = dumpbuf_iosize(MAX(dumpcfg.buf.iosize, maxphys));
224 if (new_size <= old_size)
225 return; /* no need to reallocate buffer */
227 new_buf = kmem_alloc(new_size, KM_SLEEP);
228 dumpcfg.buf.size = new_size;
229 dumpcfg.buf.start = new_buf;
230 dumpcfg.buf.end = new_buf + new_size;
231 kmem_free(old_buf, old_size);
235 * dump_update_clevel is called when dumpadm configures the dump device.
236 * Allocate the minimum configuration for now.
238 * When the dump file is configured we reserve a minimum amount of
239 * memory for use at crash time. But we reserve VA for all the memory
240 * we really want in order to do the fastest dump possible. The VA is
241 * backed by pages not being dumped, according to the bitmap. If
242 * there is insufficient spare memory, however, we fall back to the
243 * minimum.
245 * Live dump (savecore -L) always uses the minimum config.
247 static void
248 dump_update_clevel()
250 dumpcfg_t *old = &dumpcfg;
251 dumpcfg_t newcfg = *old;
252 dumpcfg_t *new = &newcfg;
254 ASSERT(MUTEX_HELD(&dump_lock));
257 * Free the previously allocated bufs and VM.
259 if (old->lzbuf)
260 kmem_free(old->lzbuf, PAGESIZE);
261 if (old->page)
262 kmem_free(old->page, PAGESIZE);
264 if (old->cmap)
265 /* VM space for mapping pages */
266 vmem_xfree(heap_arena, old->cmap, PAGESIZE);
269 * Allocate new data structures and buffers, and also figure the max
270 * desired size.
272 new->lzbuf = kmem_alloc(PAGESIZE, KM_SLEEP);
273 new->page = kmem_alloc(PAGESIZE, KM_SLEEP);
275 new->cmap = vmem_xalloc(heap_arena, PAGESIZE, PAGESIZE,
276 0, 0, NULL, NULL, VM_SLEEP);
279 * Reserve memory for kmem allocation calls made during crash
280 * dump. The hat layer allocates memory for each mapping
281 * created, and the I/O path allocates buffers and data structs.
282 * Add a few pages for safety.
284 kmem_dump_init(dump_kmem_permap + (dump_kmem_pages * PAGESIZE));
286 /* set new config pointers */
287 *old = *new;
291 * Define a struct memlist walker to optimize bitnum to pfn
292 * lookup. The walker maintains the state of the list traversal.
294 typedef struct dumpmlw {
295 struct memlist *mp; /* current memlist */
296 pgcnt_t basenum; /* bitnum base offset */
297 pgcnt_t mppages; /* current memlist size */
298 pgcnt_t mpleft; /* size to end of current memlist */
299 pfn_t mpaddr; /* first pfn in memlist */
300 } dumpmlw_t;
302 /* initialize the walker */
303 static inline void
304 dump_init_memlist_walker(dumpmlw_t *pw)
306 pw->mp = phys_install;
307 pw->basenum = 0;
308 pw->mppages = pw->mp->ml_size >> PAGESHIFT;
309 pw->mpleft = pw->mppages;
310 pw->mpaddr = pw->mp->ml_address >> PAGESHIFT;
314 * Lookup pfn given bitnum. The memlist can be quite long on some
315 * systems (e.g.: one per board). To optimize sequential lookups, the
316 * caller initializes and presents a memlist walker.
318 static pfn_t
319 dump_bitnum_to_pfn(pgcnt_t bitnum, dumpmlw_t *pw)
321 bitnum -= pw->basenum;
322 while (pw->mp != NULL) {
323 if (bitnum < pw->mppages) {
324 pw->mpleft = pw->mppages - bitnum;
325 return (pw->mpaddr + bitnum);
327 bitnum -= pw->mppages;
328 pw->basenum += pw->mppages;
329 pw->mp = pw->mp->ml_next;
330 if (pw->mp != NULL) {
331 pw->mppages = pw->mp->ml_size >> PAGESHIFT;
332 pw->mpleft = pw->mppages;
333 pw->mpaddr = pw->mp->ml_address >> PAGESHIFT;
336 return (PFN_INVALID);
339 static pgcnt_t
340 dump_pfn_to_bitnum(pfn_t pfn)
342 struct memlist *mp;
343 pgcnt_t bitnum = 0;
345 for (mp = phys_install; mp != NULL; mp = mp->ml_next) {
346 if (pfn >= (mp->ml_address >> PAGESHIFT) &&
347 pfn < ((mp->ml_address + mp->ml_size) >> PAGESHIFT))
348 return (bitnum + pfn - (mp->ml_address >> PAGESHIFT));
349 bitnum += mp->ml_size >> PAGESHIFT;
351 return ((pgcnt_t)-1);
354 static void
355 dumphdr_init(void)
357 pgcnt_t npages;
359 ASSERT(MUTEX_HELD(&dump_lock));
361 if (dumphdr == NULL) {
362 dumphdr = kmem_zalloc(sizeof (dumphdr_t), KM_SLEEP);
363 dumphdr->dump_magic = DUMP_MAGIC;
364 dumphdr->dump_version = DUMP_VERSION;
365 dumphdr->dump_wordsize = DUMP_WORDSIZE;
366 dumphdr->dump_pageshift = PAGESHIFT;
367 dumphdr->dump_pagesize = PAGESIZE;
368 dumphdr->dump_utsname = utsname;
369 (void) strcpy(dumphdr->dump_platform, platform);
370 dumpcfg.buf.size = dumpbuf_iosize(maxphys);
371 dumpcfg.buf.start = kmem_alloc(dumpcfg.buf.size, KM_SLEEP);
372 dumpcfg.buf.end = dumpcfg.buf.start + dumpcfg.buf.size;
373 dumpcfg.pids = kmem_alloc(v.v_proc * sizeof (pid_t), KM_SLEEP);
374 dump_stack_scratch = kmem_alloc(STACK_BUF_SIZE, KM_SLEEP);
375 (void) strncpy(dumphdr->dump_uuid, dump_get_uuid(),
376 sizeof (dumphdr->dump_uuid));
379 npages = num_phys_pages();
381 if (dumpcfg.bitmapsize != npages) {
382 void *map = kmem_alloc(BT_SIZEOFMAP(npages), KM_SLEEP);
384 if (dumpcfg.bitmap != NULL)
385 kmem_free(dumpcfg.bitmap, BT_SIZEOFMAP(dumpcfg.
386 bitmapsize));
387 dumpcfg.bitmap = map;
388 dumpcfg.bitmapsize = npages;
393 * Establish a new dump device.
396 dumpinit(vnode_t *vp, char *name, int justchecking)
398 vnode_t *cvp;
399 vattr_t vattr;
400 vnode_t *cdev_vp;
401 int error = 0;
403 ASSERT(MUTEX_HELD(&dump_lock));
405 dumphdr_init();
407 cvp = common_specvp(vp);
408 if (cvp == dumpvp)
409 return (0);
412 * Determine whether this is a plausible dump device. We want either:
413 * (1) a real device that's not mounted and has a cb_dump routine, or
414 * (2) a swapfile on some filesystem that has a vop_dump routine.
416 if ((error = fop_open(&cvp, FREAD | FWRITE, kcred, NULL)) != 0)
417 return (error);
419 vattr.va_mask = AT_SIZE | AT_TYPE | AT_RDEV;
420 if ((error = fop_getattr(cvp, &vattr, 0, kcred, NULL)) == 0) {
421 if (vattr.va_type == VBLK || vattr.va_type == VCHR) {
422 if (devopsp[getmajor(vattr.va_rdev)]->
423 devo_cb_ops->cb_dump == nodev)
424 error = ENOTSUP;
425 else if (vfs_devismounted(vattr.va_rdev))
426 error = EBUSY;
427 if (strcmp(ddi_driver_name(VTOS(cvp)->s_dip),
428 ZFS_DRIVER) == 0 &&
429 IS_SWAPVP(common_specvp(cvp)))
430 error = EBUSY;
431 } else {
432 if (vn_matchopval(cvp, VOPNAME_DUMP, fs_nosys) ||
433 !IS_SWAPVP(cvp))
434 error = ENOTSUP;
438 if (error == 0 && vattr.va_size < 2 * DUMP_LOGSIZE + DUMP_ERPTSIZE)
439 error = ENOSPC;
441 if (error || justchecking) {
442 (void) fop_close(cvp, FREAD | FWRITE, 1, (offset_t)0,
443 kcred, NULL);
444 return (error);
447 VN_HOLD(cvp);
449 if (dumpvp != NULL)
450 dumpfini(); /* unconfigure the old dump device */
452 dumpvp = cvp;
453 dumpvp_size = vattr.va_size & -DUMP_OFFSET;
454 dumppath = kmem_alloc(strlen(name) + 1, KM_SLEEP);
455 (void) strcpy(dumppath, name);
456 dumpcfg.buf.iosize = 0;
459 * If the dump device is a block device, attempt to open up the
460 * corresponding character device and determine its maximum transfer
461 * size. We use this information to potentially resize dump buffer
462 * to a larger and more optimal size for performing i/o to the dump
463 * device.
465 if (cvp->v_type == VBLK &&
466 (cdev_vp = makespecvp(VTOS(cvp)->s_dev, VCHR)) != NULL) {
467 if (fop_open(&cdev_vp, FREAD | FWRITE, kcred, NULL) == 0) {
468 size_t blk_size;
469 struct dk_cinfo dki;
470 struct dk_minfo minf;
472 if (fop_ioctl(cdev_vp, DKIOCGMEDIAINFO,
473 (intptr_t)&minf, FKIOCTL, kcred, NULL, NULL)
474 == 0 && minf.dki_lbsize != 0)
475 blk_size = minf.dki_lbsize;
476 else
477 blk_size = DEV_BSIZE;
479 if (fop_ioctl(cdev_vp, DKIOCINFO, (intptr_t)&dki,
480 FKIOCTL, kcred, NULL, NULL) == 0) {
481 dumpcfg.buf.iosize = dki.dki_maxtransfer * blk_size;
482 dumpbuf_resize();
485 * If we are working with a zvol then dumpify it
486 * if it's not being used as swap.
488 if (strcmp(dki.dki_dname, ZVOL_DRIVER) == 0) {
489 if (IS_SWAPVP(common_specvp(cvp)))
490 error = EBUSY;
491 else if ((error = fop_ioctl(cdev_vp,
492 DKIOCDUMPINIT, (intptr_t)NULL, FKIOCTL,
493 kcred, NULL, NULL)) != 0)
494 dumpfini();
497 (void) fop_close(cdev_vp, FREAD | FWRITE, 1, 0,
498 kcred, NULL);
501 VN_RELE(cdev_vp);
504 cmn_err(CE_CONT, "?dump on %s size %llu MB\n", name, dumpvp_size >> 20);
506 dump_update_clevel();
508 return (error);
511 void
512 dumpfini(void)
514 vattr_t vattr;
515 boolean_t is_zfs = B_FALSE;
516 vnode_t *cdev_vp;
517 ASSERT(MUTEX_HELD(&dump_lock));
519 kmem_free(dumppath, strlen(dumppath) + 1);
522 * Determine if we are using zvols for our dump device
524 vattr.va_mask = AT_RDEV;
525 if (fop_getattr(dumpvp, &vattr, 0, kcred, NULL) == 0) {
526 is_zfs = (getmajor(vattr.va_rdev) ==
527 ddi_name_to_major(ZFS_DRIVER)) ? B_TRUE : B_FALSE;
531 * If we have a zvol dump device then we call into zfs so
532 * that it may have a chance to cleanup.
534 if (is_zfs &&
535 (cdev_vp = makespecvp(VTOS(dumpvp)->s_dev, VCHR)) != NULL) {
536 if (fop_open(&cdev_vp, FREAD | FWRITE, kcred, NULL) == 0) {
537 (void) fop_ioctl(cdev_vp, DKIOCDUMPFINI, (intptr_t)NULL,
538 FKIOCTL, kcred, NULL, NULL);
539 (void) fop_close(cdev_vp, FREAD | FWRITE, 1, 0,
540 kcred, NULL);
542 VN_RELE(cdev_vp);
545 (void) fop_close(dumpvp, FREAD | FWRITE, 1, (offset_t)0, kcred, NULL);
547 VN_RELE(dumpvp);
549 dumpvp = NULL;
550 dumpvp_size = 0;
551 dumppath = NULL;
554 static offset_t
555 dumpvp_flush(void)
557 size_t size = P2ROUNDUP(dumpcfg.buf.cur - dumpcfg.buf.start, PAGESIZE);
558 hrtime_t iotime;
559 int err;
561 if (dumpcfg.buf.vp_off + size > dumpcfg.buf.vp_limit) {
562 dump_ioerr = ENOSPC;
563 dumpcfg.buf.vp_off = dumpcfg.buf.vp_limit;
564 } else if (size != 0) {
565 iotime = gethrtime();
566 dumpcfg.iowait += iotime - dumpcfg.iowaitts;
567 if (panicstr)
568 err = fop_dump(dumpvp, dumpcfg.buf.start,
569 lbtodb(dumpcfg.buf.vp_off), btod(size), NULL);
570 else
571 err = vn_rdwr(UIO_WRITE, dumpcfg.buf.cdev_vp != NULL ?
572 dumpcfg.buf.cdev_vp : dumpvp, dumpcfg.buf.start, size,
573 dumpcfg.buf.vp_off, UIO_SYSSPACE, 0, dumpcfg.buf.vp_limit,
574 kcred, 0);
575 if (err && dump_ioerr == 0)
576 dump_ioerr = err;
577 dumpcfg.iowaitts = gethrtime();
578 dumpcfg.iotime += dumpcfg.iowaitts - iotime;
579 dumpcfg.nwrite += size;
580 dumpcfg.buf.vp_off += size;
582 dumpcfg.buf.cur = dumpcfg.buf.start;
583 dump_timeleft = dump_timeout;
584 return (dumpcfg.buf.vp_off);
587 /* maximize write speed by keeping seek offset aligned with size */
588 void
589 dumpvp_write(const void *va, size_t size)
591 size_t len, off, sz;
593 while (size != 0) {
594 len = MIN(size, dumpcfg.buf.end - dumpcfg.buf.cur);
595 if (len == 0) {
596 off = P2PHASE(dumpcfg.buf.vp_off, dumpcfg.buf.size);
597 if (off == 0 || !ISP2(dumpcfg.buf.size)) {
598 (void) dumpvp_flush();
599 } else {
600 sz = dumpcfg.buf.size - off;
601 dumpcfg.buf.cur = dumpcfg.buf.start + sz;
602 (void) dumpvp_flush();
603 ovbcopy(dumpcfg.buf.start + sz, dumpcfg.buf.start, off);
604 dumpcfg.buf.cur += off;
606 } else {
607 bcopy(va, dumpcfg.buf.cur, len);
608 va = (char *)va + len;
609 dumpcfg.buf.cur += len;
610 size -= len;
615 /*ARGSUSED*/
616 static void
617 dumpvp_ksyms_write(const void *src, void *dst, size_t size)
619 dumpvp_write(src, size);
623 * Mark 'pfn' in the bitmap and dump its translation table entry.
625 void
626 dump_addpage(struct as *as, void *va, pfn_t pfn)
628 mem_vtop_t mem_vtop;
629 pgcnt_t bitnum;
631 if ((bitnum = dump_pfn_to_bitnum(pfn)) != (pgcnt_t)-1) {
632 if (!BT_TEST(dumpcfg.bitmap, bitnum)) {
633 dumphdr->dump_npages++;
634 BT_SET(dumpcfg.bitmap, bitnum);
636 dumphdr->dump_nvtop++;
637 mem_vtop.m_as = as;
638 mem_vtop.m_va = va;
639 mem_vtop.m_pfn = pfn;
640 dumpvp_write(&mem_vtop, sizeof (mem_vtop_t));
642 dump_timeleft = dump_timeout;
646 * Mark 'pfn' in the bitmap
648 void
649 dump_page(pfn_t pfn)
651 pgcnt_t bitnum;
653 if ((bitnum = dump_pfn_to_bitnum(pfn)) != (pgcnt_t)-1) {
654 if (!BT_TEST(dumpcfg.bitmap, bitnum)) {
655 dumphdr->dump_npages++;
656 BT_SET(dumpcfg.bitmap, bitnum);
659 dump_timeleft = dump_timeout;
663 * Dump the <as, va, pfn> information for a given address space.
664 * segop_dump() will call dump_addpage() for each page in the segment.
666 static void
667 dump_as(struct as *as)
669 struct seg *seg;
671 AS_LOCK_ENTER(as, RW_READER);
672 for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) {
673 if (seg->s_as != as)
674 break;
675 if (seg->s_ops == NULL)
676 continue;
677 segop_dump(seg);
679 AS_LOCK_EXIT(as);
681 if (seg != NULL)
682 cmn_err(CE_WARN, "invalid segment %p in address space %p",
683 (void *)seg, (void *)as);
686 static int
687 dump_process(pid_t pid)
689 proc_t *p = sprlock(pid);
691 if (p == NULL)
692 return (-1);
693 if (p->p_as != &kas) {
694 mutex_exit(&p->p_lock);
695 dump_as(p->p_as);
696 mutex_enter(&p->p_lock);
699 sprunlock(p);
701 return (0);
705 * The following functions (dump_summary(), dump_ereports(), and
706 * dump_messages()), write data to an uncompressed area within the
707 * crashdump. The layout of these is
709 * +------------------------------------------------------------+
710 * | compressed pages | summary | ereports | messages |
711 * +------------------------------------------------------------+
713 * With the advent of saving a compressed crash dump by default, we
714 * need to save a little more data to describe the failure mode in
715 * an uncompressed buffer available before savecore uncompresses
716 * the dump. Initially this is a copy of the stack trace. Additional
717 * summary information should be added here.
720 void
721 dump_summary(void)
723 uoff_t dumpvp_start;
724 summary_dump_t sd;
726 if (dumpvp == NULL || dumphdr == NULL)
727 return;
729 dumpcfg.buf.cur = dumpcfg.buf.start;
731 dumpcfg.buf.vp_limit = dumpvp_size - (DUMP_OFFSET + DUMP_LOGSIZE +
732 DUMP_ERPTSIZE);
733 dumpvp_start = dumpcfg.buf.vp_limit - DUMP_SUMMARYSIZE;
734 dumpcfg.buf.vp_off = dumpvp_start;
736 sd.sd_magic = SUMMARY_MAGIC;
737 sd.sd_ssum = checksum32(dump_stack_scratch, STACK_BUF_SIZE);
738 dumpvp_write(&sd, sizeof (sd));
739 dumpvp_write(dump_stack_scratch, STACK_BUF_SIZE);
741 sd.sd_magic = 0; /* indicate end of summary */
742 dumpvp_write(&sd, sizeof (sd));
743 (void) dumpvp_flush();
746 void
747 dump_ereports(void)
749 uoff_t dumpvp_start;
750 erpt_dump_t ed;
752 if (dumpvp == NULL || dumphdr == NULL)
753 return;
755 dumpcfg.buf.cur = dumpcfg.buf.start;
756 dumpcfg.buf.vp_limit = dumpvp_size - (DUMP_OFFSET + DUMP_LOGSIZE);
757 dumpvp_start = dumpcfg.buf.vp_limit - DUMP_ERPTSIZE;
758 dumpcfg.buf.vp_off = dumpvp_start;
760 fm_ereport_dump();
761 if (panicstr)
762 errorq_dump();
764 bzero(&ed, sizeof (ed)); /* indicate end of ereports */
765 dumpvp_write(&ed, sizeof (ed));
766 (void) dumpvp_flush();
768 if (!panicstr) {
769 (void) fop_putpage(dumpvp, dumpvp_start,
770 (size_t)(dumpcfg.buf.vp_off - dumpvp_start),
771 B_INVAL | B_FORCE, kcred, NULL);
775 void
776 dump_messages(void)
778 log_dump_t ld;
779 mblk_t *mctl, *mdata;
780 queue_t *q, *qlast;
781 uoff_t dumpvp_start;
783 if (dumpvp == NULL || dumphdr == NULL || log_consq == NULL)
784 return;
786 dumpcfg.buf.cur = dumpcfg.buf.start;
787 dumpcfg.buf.vp_limit = dumpvp_size - DUMP_OFFSET;
788 dumpvp_start = dumpcfg.buf.vp_limit - DUMP_LOGSIZE;
789 dumpcfg.buf.vp_off = dumpvp_start;
791 qlast = NULL;
792 do {
793 for (q = log_consq; q->q_next != qlast; q = q->q_next)
794 continue;
795 for (mctl = q->q_first; mctl != NULL; mctl = mctl->b_next) {
796 dump_timeleft = dump_timeout;
797 mdata = mctl->b_cont;
798 ld.ld_magic = LOG_MAGIC;
799 ld.ld_msgsize = MBLKL(mctl->b_cont);
800 ld.ld_csum = checksum32(mctl->b_rptr, MBLKL(mctl));
801 ld.ld_msum = checksum32(mdata->b_rptr, MBLKL(mdata));
802 dumpvp_write(&ld, sizeof (ld));
803 dumpvp_write(mctl->b_rptr, MBLKL(mctl));
804 dumpvp_write(mdata->b_rptr, MBLKL(mdata));
806 } while ((qlast = q) != log_consq);
808 ld.ld_magic = 0; /* indicate end of messages */
809 dumpvp_write(&ld, sizeof (ld));
810 (void) dumpvp_flush();
811 if (!panicstr) {
812 (void) fop_putpage(dumpvp, dumpvp_start,
813 (size_t)(dumpcfg.buf.vp_off - dumpvp_start),
814 B_INVAL | B_FORCE, kcred, NULL);
819 * Copy pages, trapping ECC errors. Also, for robustness, trap data
820 * access in case something goes wrong in the hat layer and the
821 * mapping is broken.
823 static void
824 dump_pagecopy(void *src, void *dst)
826 long *wsrc = (long *)src;
827 long *wdst = (long *)dst;
828 const ulong_t ncopies = PAGESIZE / sizeof (long);
829 volatile int w = 0;
830 volatile int ueoff = -1;
831 on_trap_data_t otd;
833 if (on_trap(&otd, OT_DATA_EC | OT_DATA_ACCESS)) {
834 if (ueoff == -1)
835 ueoff = w * sizeof (long);
836 /* report "bad ECC" or "bad address" */
837 #ifdef _LP64
838 if (otd.ot_trap & OT_DATA_EC)
839 wdst[w++] = 0x00badecc00badecc;
840 else
841 wdst[w++] = 0x00badadd00badadd;
842 #else
843 if (otd.ot_trap & OT_DATA_EC)
844 wdst[w++] = 0x00badecc;
845 else
846 wdst[w++] = 0x00badadd;
847 #endif
849 while (w < ncopies) {
850 wdst[w] = wsrc[w];
851 w++;
853 no_trap();
856 size_t
857 dumpsys_metrics(char *buf, size_t size)
859 dumpcfg_t *cfg = &dumpcfg;
860 int compress_ratio;
861 int sec, iorate;
862 char *e = buf + size;
863 char *p = buf;
865 sec = cfg->elapsed / (1000 * 1000 * 1000ULL);
866 if (sec < 1)
867 sec = 1;
869 if (cfg->iotime < 1)
870 cfg->iotime = 1;
871 iorate = (cfg->nwrite * 100000ULL) / cfg->iotime;
873 compress_ratio = 100LL * cfg->npages / btopr(cfg->nwrite + 1);
875 #define P(...) (p += p < e ? snprintf(p, e - p, __VA_ARGS__) : 0)
877 P("Master cpu_seqid,%d\n", CPU->cpu_seqid);
878 P("Master cpu_id,%d\n", CPU->cpu_id);
879 P("dump_flags,0x%x\n", dumphdr->dump_flags);
880 P("dump_ioerr,%d\n", dump_ioerr);
882 P("Compression type,serial lzjb\n");
883 P("Compression ratio,%d.%02d\n", compress_ratio / 100, compress_ratio %
884 100);
886 P("Dump I/O rate MBS,%d.%02d\n", iorate / 100, iorate % 100);
887 P("..total bytes,%lld\n", (u_longlong_t)cfg->nwrite);
888 P("..total nsec,%lld\n", (u_longlong_t)cfg->iotime);
889 P("dumpbuf.iosize,%ld\n", dumpcfg.buf.iosize);
890 P("dumpbuf.size,%ld\n", dumpcfg.buf.size);
892 P("Dump pages/sec,%llu\n", (u_longlong_t)cfg->npages / sec);
893 P("Dump pages,%llu\n", (u_longlong_t)cfg->npages);
894 P("Dump time,%d\n", sec);
896 if (cfg->pages_mapped > 0)
897 P("per-cent map utilization,%d\n", (int)((100 * cfg->pages_used)
898 / cfg->pages_mapped));
900 P("\nPer-page metrics:\n");
901 if (cfg->npages > 0) {
902 #define PERPAGE(x) \
903 P("%s nsec/page,%d\n", #x, (int)(cfg->perpage.x / cfg->npages));
904 PERPAGES;
905 #undef PERPAGE
907 P("I/O wait nsec/page,%llu\n", (u_longlong_t)(cfg->iowait /
908 cfg->npages));
910 #undef P
911 if (p < e)
912 bzero(p, e - p);
913 return (p - buf);
917 * Dump the system.
919 void
920 dumpsys(void)
922 dumpcfg_t *cfg = &dumpcfg;
923 uint_t percent_done; /* dump progress reported */
924 int sec_done;
925 hrtime_t start; /* start time */
926 pfn_t pfn;
927 pgcnt_t bitnum;
928 proc_t *p;
929 pid_t npids, pidx;
930 char *content;
931 char *buf;
932 size_t size;
933 dumpmlw_t mlw;
934 dumpcsize_t datatag;
935 dumpdatahdr_t datahdr;
937 if (dumpvp == NULL || dumphdr == NULL) {
938 uprintf("skipping system dump - no dump device configured\n");
939 return;
941 dumpcfg.buf.cur = dumpcfg.buf.start;
943 /* clear the sync variables */
944 cfg->npages = 0;
945 cfg->pages_mapped = 0;
946 cfg->pages_used = 0;
947 cfg->nwrite = 0;
948 cfg->elapsed = 0;
949 cfg->iotime = 0;
950 cfg->iowait = 0;
951 cfg->iowaitts = 0;
954 * Calculate the starting block for dump. If we're dumping on a
955 * swap device, start 1/5 of the way in; otherwise, start at the
956 * beginning. And never use the first page -- it may be a disk label.
958 if (dumpvp->v_flag & VISSWAP)
959 dumphdr->dump_start = P2ROUNDUP(dumpvp_size / 5, DUMP_OFFSET);
960 else
961 dumphdr->dump_start = DUMP_OFFSET;
963 dumphdr->dump_flags = DF_VALID | DF_COMPLETE | DF_LIVE | DF_COMPRESSED;
964 dumphdr->dump_crashtime = gethrestime_sec();
965 dumphdr->dump_npages = 0;
966 dumphdr->dump_nvtop = 0;
967 bzero(dumpcfg.bitmap, BT_SIZEOFMAP(dumpcfg.bitmapsize));
968 dump_timeleft = dump_timeout;
970 if (panicstr) {
971 dumphdr->dump_flags &= ~DF_LIVE;
972 (void) fop_dumpctl(dumpvp, DUMP_FREE, NULL, NULL);
973 (void) fop_dumpctl(dumpvp, DUMP_ALLOC, NULL, NULL);
974 (void) vsnprintf(dumphdr->dump_panicstring, DUMP_PANICSIZE,
975 panicstr, panicargs);
978 if (dump_conflags & DUMP_ALL)
979 content = "all";
980 else if (dump_conflags & DUMP_CURPROC)
981 content = "kernel + curproc";
982 else
983 content = "kernel";
984 uprintf("dumping to %s, offset %lld, content: %s\n", dumppath,
985 dumphdr->dump_start, content);
987 /* Make sure nodename is current */
988 bcopy(utsname.nodename, dumphdr->dump_utsname.nodename, SYS_NMLN);
991 * If this is a live dump, try to open a VCHR vnode for better
992 * performance. We must take care to flush the buffer cache
993 * first.
995 if (!panicstr) {
996 vnode_t *cdev_vp, *cmn_cdev_vp;
998 ASSERT(dumpcfg.buf.cdev_vp == NULL);
999 cdev_vp = makespecvp(VTOS(dumpvp)->s_dev, VCHR);
1000 if (cdev_vp != NULL) {
1001 cmn_cdev_vp = common_specvp(cdev_vp);
1002 if (fop_open(&cmn_cdev_vp, FREAD | FWRITE, kcred, NULL)
1003 == 0) {
1004 if (vn_has_cached_data(dumpvp))
1005 (void) pvn_vplist_dirty(dumpvp, 0, NULL,
1006 B_INVAL | B_TRUNC, kcred);
1007 dumpcfg.buf.cdev_vp = cmn_cdev_vp;
1008 } else {
1009 VN_RELE(cdev_vp);
1015 * Store a hires timestamp so we can look it up during debugging.
1017 lbolt_debug_entry();
1020 * Leave room for the message and ereport save areas and terminal dump
1021 * header.
1023 dumpcfg.buf.vp_limit = dumpvp_size - DUMP_LOGSIZE - DUMP_OFFSET -
1024 DUMP_ERPTSIZE;
1027 * Write out the symbol table. It's no longer compressed,
1028 * so its 'size' and 'csize' are equal.
1030 dumpcfg.buf.vp_off = dumphdr->dump_ksyms = dumphdr->dump_start + PAGESIZE;
1031 dumphdr->dump_ksyms_size = dumphdr->dump_ksyms_csize =
1032 ksyms_snapshot(dumpvp_ksyms_write, NULL, LONG_MAX);
1035 * Write out the translation map.
1037 dumphdr->dump_map = dumpvp_flush();
1038 dump_as(&kas);
1039 dumphdr->dump_nvtop += dump_plat_addr();
1042 * call into hat, which may have unmapped pages that also need to
1043 * be in the dump
1045 hat_dump();
1047 if (dump_conflags & DUMP_ALL) {
1048 mutex_enter(&pidlock);
1050 for (npids = 0, p = practive; p != NULL; p = p->p_next)
1051 dumpcfg.pids[npids++] = p->p_pid;
1053 mutex_exit(&pidlock);
1055 for (pidx = 0; pidx < npids; pidx++)
1056 (void) dump_process(dumpcfg.pids[pidx]);
1058 dump_init_memlist_walker(&mlw);
1059 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum++) {
1060 dump_timeleft = dump_timeout;
1061 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
1063 * Some hypervisors do not have all pages available to
1064 * be accessed by the guest OS. Check for page
1065 * accessibility.
1067 if (plat_hold_page(pfn, PLAT_HOLD_NO_LOCK, NULL) !=
1068 PLAT_HOLD_OK)
1069 continue;
1070 BT_SET(dumpcfg.bitmap, bitnum);
1072 dumphdr->dump_npages = dumpcfg.bitmapsize;
1073 dumphdr->dump_flags |= DF_ALL;
1075 } else if (dump_conflags & DUMP_CURPROC) {
1077 * Determine which pid is to be dumped. If we're panicking, we
1078 * dump the process associated with panic_thread (if any). If
1079 * this is a live dump, we dump the process associated with
1080 * curthread.
1082 npids = 0;
1083 if (panicstr) {
1084 if (panic_thread != NULL &&
1085 panic_thread->t_procp != NULL &&
1086 panic_thread->t_procp != &p0) {
1087 dumpcfg.pids[npids++] =
1088 panic_thread->t_procp->p_pid;
1090 } else {
1091 dumpcfg.pids[npids++] = curthread->t_procp->p_pid;
1094 if (npids && dump_process(dumpcfg.pids[0]) == 0)
1095 dumphdr->dump_flags |= DF_CURPROC;
1096 else
1097 dumphdr->dump_flags |= DF_KERNEL;
1099 } else {
1100 dumphdr->dump_flags |= DF_KERNEL;
1103 dumphdr->dump_hashmask = (1 << highbit(dumphdr->dump_nvtop - 1)) - 1;
1106 * Write out the pfn table.
1108 dumphdr->dump_pfn = dumpvp_flush();
1109 dump_init_memlist_walker(&mlw);
1110 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum++) {
1111 dump_timeleft = dump_timeout;
1112 if (!BT_TEST(dumpcfg.bitmap, bitnum))
1113 continue;
1114 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
1115 ASSERT(pfn != PFN_INVALID);
1116 dumpvp_write(&pfn, sizeof (pfn_t));
1118 dump_plat_pfn();
1121 * Write out all the pages.
1122 * Map pages, copy them handling UEs, compress, and write them out.
1124 dumphdr->dump_data = dumpvp_flush();
1126 ASSERT(dumpcfg.page);
1127 bzero(&dumpcfg.perpage, sizeof (dumpcfg.perpage));
1129 start = gethrtime();
1130 cfg->iowaitts = start;
1132 if (panicstr)
1133 kmem_dump_begin();
1135 percent_done = 0;
1136 sec_done = 0;
1138 dump_init_memlist_walker(&mlw);
1139 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum++) {
1140 dumpcsize_t csize;
1141 uint_t percent;
1142 int sec;
1144 dump_timeleft = dump_timeout;
1145 HRSTART(cfg->perpage, bitmap);
1146 if (!BT_TEST(dumpcfg.bitmap, bitnum)) {
1147 HRSTOP(cfg->perpage, bitmap);
1148 continue;
1150 HRSTOP(cfg->perpage, bitmap);
1152 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
1153 ASSERT(pfn != PFN_INVALID);
1155 HRSTART(cfg->perpage, map);
1156 hat_devload(kas.a_hat, dumpcfg.cmap, PAGESIZE, pfn, PROT_READ,
1157 HAT_LOAD_NOCONSIST);
1158 HRSTOP(cfg->perpage, map);
1160 dump_pagecopy(dumpcfg.cmap, dumpcfg.page);
1162 HRSTART(cfg->perpage, unmap);
1163 hat_unload(kas.a_hat, dumpcfg.cmap, PAGESIZE, HAT_UNLOAD);
1164 HRSTOP(cfg->perpage, unmap);
1166 HRSTART(dumpcfg.perpage, compress);
1167 csize = compress(dumpcfg.page, dumpcfg.lzbuf, PAGESIZE);
1168 HRSTOP(dumpcfg.perpage, compress);
1170 HRSTART(dumpcfg.perpage, write);
1171 dumpvp_write(&csize, sizeof (csize));
1172 dumpvp_write(dumpcfg.lzbuf, csize);
1173 HRSTOP(dumpcfg.perpage, write);
1175 if (dump_ioerr) {
1176 dumphdr->dump_flags &= ~DF_COMPLETE;
1177 dumphdr->dump_npages = cfg->npages;
1178 break;
1181 sec = (gethrtime() - start) / NANOSEC;
1182 percent = ++cfg->npages * 100LL / dumphdr->dump_npages;
1185 * Render a simple progress display on the system console to
1186 * make clear to the operator that the system has not hung.
1187 * Emit an update when dump progress has advanced by one
1188 * percent, or when no update has been drawn in the last
1189 * second.
1191 if (percent > percent_done || sec > sec_done) {
1192 percent_done = percent;
1193 sec_done = sec;
1195 uprintf("^\r%2d:%02d %3d%% done", sec / 60, sec % 60,
1196 percent_done);
1197 if (!panicstr)
1198 delay(1); /* let the output be sent */
1202 cfg->elapsed = gethrtime() - start;
1203 if (cfg->elapsed < 1)
1204 cfg->elapsed = 1;
1206 /* record actual pages dumped */
1207 dumphdr->dump_npages = cfg->npages;
1209 /* platform-specific data */
1210 dumphdr->dump_npages += dump_plat_data(dumpcfg.page);
1212 /* note any errors by clearing DF_COMPLETE */
1213 if (dump_ioerr || cfg->npages < dumphdr->dump_npages)
1214 dumphdr->dump_flags &= ~DF_COMPLETE;
1216 /* end of stream blocks */
1217 datatag = 0;
1218 dumpvp_write(&datatag, sizeof (datatag));
1220 bzero(&datahdr, sizeof (datahdr));
1222 /* buffer for metrics */
1223 buf = dumpcfg.page;
1224 size = MIN(PAGESIZE, DUMP_OFFSET - sizeof (dumphdr_t) -
1225 sizeof (dumpdatahdr_t));
1227 /* finish the kmem intercepts, collect kmem verbose info */
1228 if (panicstr) {
1229 datahdr.dump_metrics = kmem_dump_finish(buf, size);
1230 buf += datahdr.dump_metrics;
1231 size -= datahdr.dump_metrics;
1234 /* record in the header whether this is a fault-management panic */
1235 if (panicstr)
1236 dumphdr->dump_fm_panic = is_fm_panic();
1238 /* compression info in data header */
1239 datahdr.dump_datahdr_magic = DUMP_DATAHDR_MAGIC;
1240 datahdr.dump_datahdr_version = DUMP_DATAHDR_VERSION;
1241 datahdr.dump_maxcsize = PAGESIZE;
1242 datahdr.dump_maxrange = 1;
1243 datahdr.dump_nstreams = 1;
1244 datahdr.dump_clevel = 0;
1246 if (dump_metrics_on)
1247 datahdr.dump_metrics += dumpsys_metrics(buf, size);
1249 datahdr.dump_data_csize = dumpvp_flush() - dumphdr->dump_data;
1252 * Write out the initial and terminal dump headers.
1254 dumpcfg.buf.vp_off = dumphdr->dump_start;
1255 dumpvp_write(dumphdr, sizeof (dumphdr_t));
1256 (void) dumpvp_flush();
1258 dumpcfg.buf.vp_limit = dumpvp_size;
1259 dumpcfg.buf.vp_off = dumpcfg.buf.vp_limit - DUMP_OFFSET;
1260 dumpvp_write(dumphdr, sizeof (dumphdr_t));
1261 dumpvp_write(&datahdr, sizeof (dumpdatahdr_t));
1262 dumpvp_write(dumpcfg.page, datahdr.dump_metrics);
1264 (void) dumpvp_flush();
1266 uprintf("\r%3d%% done: %llu pages dumped, ",
1267 percent_done, (u_longlong_t)cfg->npages);
1269 if (dump_ioerr == 0) {
1270 uprintf("dump succeeded\n");
1271 } else {
1272 uprintf("dump failed: error %d\n", dump_ioerr);
1273 #ifdef DEBUG
1274 if (panicstr)
1275 debug_enter("dump failed");
1276 #endif
1280 * Write out all undelivered messages. This has to be the *last*
1281 * thing we do because the dump process itself emits messages.
1283 if (panicstr) {
1284 dump_summary();
1285 dump_ereports();
1286 dump_messages();
1289 delay(2 * hz); /* let people see the 'done' message */
1290 dump_timeleft = 0;
1291 dump_ioerr = 0;
1293 /* restore settings after live dump completes */
1294 if (!panicstr) {
1295 /* release any VCHR open of the dump device */
1296 if (dumpcfg.buf.cdev_vp != NULL) {
1297 (void) fop_close(dumpcfg.buf.cdev_vp, FREAD | FWRITE, 1, 0,
1298 kcred, NULL);
1299 VN_RELE(dumpcfg.buf.cdev_vp);
1300 dumpcfg.buf.cdev_vp = NULL;
1306 * This function is called whenever the memory size, as represented
1307 * by the phys_install list, changes.
1309 void
1310 dump_resize()
1312 mutex_enter(&dump_lock);
1313 dumphdr_init();
1314 dumpbuf_resize();
1315 dump_update_clevel();
1316 mutex_exit(&dump_lock);
1320 * This function allows for dynamic resizing of a dump area. It assumes that
1321 * the underlying device has update its appropriate size(9P).
1324 dumpvp_resize()
1326 int error;
1327 vattr_t vattr;
1329 mutex_enter(&dump_lock);
1330 vattr.va_mask = AT_SIZE;
1331 if ((error = fop_getattr(dumpvp, &vattr, 0, kcred, NULL)) != 0) {
1332 mutex_exit(&dump_lock);
1333 return (error);
1336 if (error == 0 && vattr.va_size < 2 * DUMP_LOGSIZE + DUMP_ERPTSIZE) {
1337 mutex_exit(&dump_lock);
1338 return (ENOSPC);
1341 dumpvp_size = vattr.va_size & -DUMP_OFFSET;
1342 mutex_exit(&dump_lock);
1343 return (0);
1347 dump_set_uuid(const char *uuidstr)
1349 const char *ptr;
1350 int i;
1352 if (uuidstr == NULL || strnlen(uuidstr, 36 + 1) != 36)
1353 return (EINVAL);
1355 /* uuid_parse is not common code so check manually */
1356 for (i = 0, ptr = uuidstr; i < 36; i++, ptr++) {
1357 switch (i) {
1358 case 8:
1359 case 13:
1360 case 18:
1361 case 23:
1362 if (*ptr != '-')
1363 return (EINVAL);
1364 break;
1366 default:
1367 if (!isxdigit(*ptr))
1368 return (EINVAL);
1369 break;
1373 if (dump_osimage_uuid[0] != '\0')
1374 return (EALREADY);
1376 (void) strncpy(dump_osimage_uuid, uuidstr, 36 + 1);
1378 cmn_err(CE_CONT, "?This Solaris instance has UUID %s\n",
1379 dump_osimage_uuid);
1381 return (0);
1384 const char *
1385 dump_get_uuid(void)
1387 return (dump_osimage_uuid[0] != '\0' ? dump_osimage_uuid : "");