PR fortran/77666
[official-gcc.git] / libgo / runtime / heapdump.c
blob18fe913c4eb1b3759fe43f552e0a99842b1ebca9
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Implementation of runtime/debug.WriteHeapDump. Writes all
6 // objects in the heap plus additional info (roots, threads,
7 // finalizers, etc.) to a file.
9 // The format of the dumped file is described at
10 // http://code.google.com/p/go-wiki/wiki/heapdump13
12 #include "runtime.h"
13 #include "arch.h"
14 #include "malloc.h"
15 #include "mgc0.h"
16 #include "go-type.h"
17 #include "go-panic.h"
19 #define hash __hash
20 #define KindNoPointers GO_NO_POINTERS
22 enum {
23 FieldKindEol = 0,
24 FieldKindPtr = 1,
25 FieldKindString = 2,
26 FieldKindSlice = 3,
27 FieldKindIface = 4,
28 FieldKindEface = 5,
30 TagEOF = 0,
31 TagObject = 1,
32 TagOtherRoot = 2,
33 TagType = 3,
34 TagGoRoutine = 4,
35 TagStackFrame = 5,
36 TagParams = 6,
37 TagFinalizer = 7,
38 TagItab = 8,
39 TagOSThread = 9,
40 TagMemStats = 10,
41 TagQueuedFinalizer = 11,
42 TagData = 12,
43 TagBss = 13,
44 TagDefer = 14,
45 TagPanic = 15,
46 TagMemProf = 16,
47 TagAllocSample = 17,
49 TypeInfo_Conservative = 127,
52 // static uintptr* playgcprog(uintptr offset, uintptr *prog, void (*callback)(void*,uintptr,uintptr), void *arg);
53 // static void dumpfields(uintptr *prog);
54 static void dumpefacetypes(void *obj, uintptr size, const Type *type, uintptr kind);
56 // fd to write the dump to.
57 static uintptr dumpfd;
59 // buffer of pending write data
60 enum {
61 BufSize = 4096,
63 static byte buf[BufSize];
64 static uintptr nbuf;
66 static void
67 hwrite(const byte *data, uintptr len)
69 if(len + nbuf <= BufSize) {
70 runtime_memmove(buf + nbuf, data, len);
71 nbuf += len;
72 return;
74 runtime_write(dumpfd, buf, nbuf);
75 if(len >= BufSize) {
76 runtime_write(dumpfd, data, len);
77 nbuf = 0;
78 } else {
79 runtime_memmove(buf, data, len);
80 nbuf = len;
84 static void
85 flush(void)
87 runtime_write(dumpfd, buf, nbuf);
88 nbuf = 0;
91 // Cache of types that have been serialized already.
92 // We use a type's hash field to pick a bucket.
93 // Inside a bucket, we keep a list of types that
94 // have been serialized so far, most recently used first.
95 // Note: when a bucket overflows we may end up
96 // serializing a type more than once. That's ok.
97 enum {
98 TypeCacheBuckets = 256, // must be a power of 2
99 TypeCacheAssoc = 4,
101 typedef struct TypeCacheBucket TypeCacheBucket;
102 struct TypeCacheBucket {
103 const Type *t[TypeCacheAssoc];
105 static TypeCacheBucket typecache[TypeCacheBuckets];
107 // dump a uint64 in a varint format parseable by encoding/binary
108 static void
109 dumpint(uint64 v)
111 byte buf[10];
112 int32 n;
113 n = 0;
114 while(v >= 0x80) {
115 buf[n++] = v | 0x80;
116 v >>= 7;
118 buf[n++] = v;
119 hwrite(buf, n);
122 static void
123 dumpbool(bool b)
125 dumpint(b ? 1 : 0);
128 // dump varint uint64 length followed by memory contents
129 static void
130 dumpmemrange(const byte *data, uintptr len)
132 dumpint(len);
133 hwrite(data, len);
136 static void
137 dumpstr(String s)
139 dumpmemrange(s.str, s.len);
142 static void
143 dumpcstr(const int8 *c)
145 dumpmemrange((const byte*)c, runtime_findnull((const byte*)c));
148 // dump information for a type
149 static void
150 dumptype(const Type *t)
152 TypeCacheBucket *b;
153 int32 i, j;
155 if(t == nil) {
156 return;
159 // If we've definitely serialized the type before,
160 // no need to do it again.
161 b = &typecache[t->hash & (TypeCacheBuckets-1)];
162 if(t == b->t[0]) return;
163 for(i = 1; i < TypeCacheAssoc; i++) {
164 if(t == b->t[i]) {
165 // Move-to-front
166 for(j = i; j > 0; j--) {
167 b->t[j] = b->t[j-1];
169 b->t[0] = t;
170 return;
173 // Might not have been dumped yet. Dump it and
174 // remember we did so.
175 for(j = TypeCacheAssoc-1; j > 0; j--) {
176 b->t[j] = b->t[j-1];
178 b->t[0] = t;
180 // dump the type
181 dumpint(TagType);
182 dumpint((uintptr)t);
183 dumpint(t->__size);
184 if(t->__uncommon == nil || t->__uncommon->__pkg_path == nil || t->__uncommon->__name == nil) {
185 dumpstr(*t->__reflection);
186 } else {
187 dumpint(t->__uncommon->__pkg_path->len + 1 + t->__uncommon->__name->len);
188 hwrite(t->__uncommon->__pkg_path->str, t->__uncommon->__pkg_path->len);
189 hwrite((const byte*)".", 1);
190 hwrite(t->__uncommon->__name->str, t->__uncommon->__name->len);
192 dumpbool(t->__size > PtrSize || (t->__code & KindNoPointers) == 0);
193 // dumpfields((uintptr*)t->gc + 1);
196 // returns true if object is scannable
197 static bool
198 scannable(byte *obj)
200 uintptr *b, off, shift;
202 off = (uintptr*)obj - (uintptr*)runtime_mheap.arena_start; // word offset
203 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
204 shift = off % wordsPerBitmapWord;
205 return ((*b >> shift) & bitScan) != 0;
208 // dump an object
209 static void
210 dumpobj(byte *obj, uintptr size, const Type *type, uintptr kind)
212 if(type != nil) {
213 dumptype(type);
214 dumpefacetypes(obj, size, type, kind);
217 dumpint(TagObject);
218 dumpint((uintptr)obj);
219 dumpint((uintptr)type);
220 dumpint(kind);
221 dumpmemrange(obj, size);
224 static void
225 dumpotherroot(const char *description, byte *to)
227 dumpint(TagOtherRoot);
228 dumpcstr((const int8 *)description);
229 dumpint((uintptr)to);
232 static void
233 dumpfinalizer(byte *obj, FuncVal *fn, const FuncType* ft, const PtrType *ot)
235 dumpint(TagFinalizer);
236 dumpint((uintptr)obj);
237 dumpint((uintptr)fn);
238 dumpint((uintptr)fn->fn);
239 dumpint((uintptr)ft);
240 dumpint((uintptr)ot);
243 typedef struct ChildInfo ChildInfo;
244 struct ChildInfo {
245 // Information passed up from the callee frame about
246 // the layout of the outargs region.
247 uintptr argoff; // where the arguments start in the frame
248 uintptr arglen; // size of args region
249 BitVector args; // if args.n >= 0, pointer map of args region
251 byte *sp; // callee sp
252 uintptr depth; // depth in call stack (0 == most recent)
255 static void
256 dumpgoroutine(G *gp)
258 // ChildInfo child;
259 Defer *d;
260 Panic *p;
262 dumpint(TagGoRoutine);
263 dumpint((uintptr)gp);
264 dumpint((uintptr)0);
265 dumpint(gp->goid);
266 dumpint(gp->gopc);
267 dumpint(gp->atomicstatus);
268 dumpbool(gp->issystem);
269 dumpbool(gp->isbackground);
270 dumpint(gp->waitsince);
271 dumpstr(gp->waitreason);
272 dumpint((uintptr)0);
273 dumpint((uintptr)gp->m);
274 dumpint((uintptr)gp->_defer);
275 dumpint((uintptr)gp->_panic);
277 // dump stack
278 // child.args.n = -1;
279 // child.arglen = 0;
280 // child.sp = nil;
281 // child.depth = 0;
282 // if(!ScanStackByFrames)
283 // runtime_throw("need frame info to dump stacks");
284 // runtime_gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, &child, false);
286 // dump defer & panic records
287 for(d = gp->_defer; d != nil; d = d->next) {
288 dumpint(TagDefer);
289 dumpint((uintptr)d);
290 dumpint((uintptr)gp);
291 dumpint((uintptr)d->arg);
292 dumpint((uintptr)d->frame);
293 dumpint((uintptr)d->pfn);
294 dumpint((uintptr)0);
295 dumpint((uintptr)d->next);
297 for (p = gp->_panic; p != nil; p = p->next) {
298 dumpint(TagPanic);
299 dumpint((uintptr)p);
300 dumpint((uintptr)gp);
301 dumpint((uintptr)p->arg.__type_descriptor);
302 dumpint((uintptr)p->arg.__object);
303 dumpint((uintptr)0);
304 dumpint((uintptr)p->next);
308 static void
309 dumpgs(void)
311 G *gp;
312 uint32 i;
314 // goroutines & stacks
315 for(i = 0; i < runtime_allglen; i++) {
316 gp = runtime_allg[i];
317 switch(gp->atomicstatus){
318 default:
319 runtime_printf("unexpected G.status %d\n", gp->atomicstatus);
320 runtime_throw("mark - bad status");
321 case _Gdead:
322 break;
323 case _Grunnable:
324 case _Gsyscall:
325 case _Gwaiting:
326 dumpgoroutine(gp);
327 break;
332 static void
333 finq_callback(FuncVal *fn, void *obj, const FuncType *ft, const PtrType *ot)
335 dumpint(TagQueuedFinalizer);
336 dumpint((uintptr)obj);
337 dumpint((uintptr)fn);
338 dumpint((uintptr)fn->fn);
339 dumpint((uintptr)ft);
340 dumpint((uintptr)ot);
344 static void
345 dumproots(void)
347 MSpan *s, **allspans;
348 uint32 spanidx;
349 Special *sp;
350 SpecialFinalizer *spf;
351 byte *p;
353 // data segment
354 // dumpint(TagData);
355 // dumpint((uintptr)data);
356 // dumpmemrange(data, edata - data);
357 // dumpfields((uintptr*)gcdata + 1);
359 // bss segment
360 // dumpint(TagBss);
361 // dumpint((uintptr)bss);
362 // dumpmemrange(bss, ebss - bss);
363 // dumpfields((uintptr*)gcbss + 1);
365 // MSpan.types
366 allspans = runtime_mheap.allspans;
367 for(spanidx=0; spanidx<runtime_mheap.nspan; spanidx++) {
368 s = allspans[spanidx];
369 if(s->state == MSpanInUse) {
370 // The garbage collector ignores type pointers stored in MSpan.types:
371 // - Compiler-generated types are stored outside of heap.
372 // - The reflect package has runtime-generated types cached in its data structures.
373 // The garbage collector relies on finding the references via that cache.
374 switch(s->types.compression) {
375 case MTypes_Empty:
376 case MTypes_Single:
377 break;
378 case MTypes_Words:
379 case MTypes_Bytes:
380 dumpotherroot("runtime type info", (byte*)s->types.data);
381 break;
384 // Finalizers
385 for(sp = s->specials; sp != nil; sp = sp->next) {
386 if(sp->kind != KindSpecialFinalizer)
387 continue;
388 spf = (SpecialFinalizer*)sp;
389 p = (byte*)((s->start << PageShift) + spf->offset);
390 dumpfinalizer(p, spf->fn, spf->ft, spf->ot);
395 // Finalizer queue
396 runtime_iterate_finq(finq_callback);
399 // Bit vector of free marks.
400 // Needs to be as big as the largest number of objects per span.
401 static byte hfree[PageSize/8];
403 static void
404 dumpobjs(void)
406 uintptr i, j, size, n, off, shift, *bitp, bits, ti, kind;
407 MSpan *s;
408 MLink *l;
409 byte *p;
410 const Type *t;
412 for(i = 0; i < runtime_mheap.nspan; i++) {
413 s = runtime_mheap.allspans[i];
414 if(s->state != MSpanInUse)
415 continue;
416 p = (byte*)(s->start << PageShift);
417 size = s->elemsize;
418 n = (s->npages << PageShift) / size;
419 if(n > PageSize/8)
420 runtime_throw("free array doesn't have enough entries");
421 for(l = s->freelist; l != nil; l = l->next) {
422 hfree[((byte*)l - p) / size] = true;
424 for(j = 0; j < n; j++, p += size) {
425 if(hfree[j]) {
426 hfree[j] = false;
427 continue;
429 off = (uintptr*)p - (uintptr*)runtime_mheap.arena_start;
430 bitp = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
431 shift = off % wordsPerBitmapWord;
432 bits = *bitp >> shift;
434 // Skip FlagNoGC allocations (stacks)
435 if((bits & bitAllocated) == 0)
436 continue;
438 // extract type and kind
439 ti = runtime_gettype(p);
440 t = (Type*)(ti & ~(uintptr)(PtrSize-1));
441 kind = ti & (PtrSize-1);
443 // dump it
444 if(kind == TypeInfo_Chan)
445 t = ((const ChanType*)t)->__element_type; // use element type for chan encoding
446 if(t == nil && scannable(p))
447 kind = TypeInfo_Conservative; // special kind for conservatively scanned objects
448 dumpobj(p, size, t, kind);
453 static void
454 dumpparams(void)
456 byte *x;
458 dumpint(TagParams);
459 x = (byte*)1;
460 if(*(byte*)&x == 1)
461 dumpbool(false); // little-endian ptrs
462 else
463 dumpbool(true); // big-endian ptrs
464 dumpint(PtrSize);
465 dumpint(runtime_Hchansize);
466 dumpint((uintptr)runtime_mheap.arena_start);
467 dumpint((uintptr)runtime_mheap.arena_used);
468 dumpint(0);
469 dumpcstr((const int8 *)"");
470 dumpint(runtime_ncpu);
473 static void
474 dumpms(void)
476 M *mp;
478 for(mp = runtime_allm; mp != nil; mp = mp->alllink) {
479 dumpint(TagOSThread);
480 dumpint((uintptr)mp);
481 dumpint(mp->id);
482 dumpint(0);
486 static void
487 dumpmemstats(void)
489 int32 i;
491 dumpint(TagMemStats);
492 dumpint(mstats.alloc);
493 dumpint(mstats.total_alloc);
494 dumpint(mstats.sys);
495 dumpint(mstats.nlookup);
496 dumpint(mstats.nmalloc);
497 dumpint(mstats.nfree);
498 dumpint(mstats.heap_alloc);
499 dumpint(mstats.heap_sys);
500 dumpint(mstats.heap_idle);
501 dumpint(mstats.heap_inuse);
502 dumpint(mstats.heap_released);
503 dumpint(mstats.heap_objects);
504 dumpint(mstats.stacks_inuse);
505 dumpint(mstats.stacks_sys);
506 dumpint(mstats.mspan_inuse);
507 dumpint(mstats.mspan_sys);
508 dumpint(mstats.mcache_inuse);
509 dumpint(mstats.mcache_sys);
510 dumpint(mstats.buckhash_sys);
511 dumpint(mstats.gc_sys);
512 dumpint(mstats.other_sys);
513 dumpint(mstats.next_gc);
514 dumpint(mstats.last_gc);
515 dumpint(mstats.pause_total_ns);
516 for(i = 0; i < 256; i++)
517 dumpint(mstats.pause_ns[i]);
518 dumpint(mstats.numgc);
521 static void
522 dumpmemprof_callback(Bucket *b, uintptr nstk, Location *stk, uintptr size, uintptr allocs, uintptr frees)
524 uintptr i, pc;
525 byte buf[20];
527 dumpint(TagMemProf);
528 dumpint((uintptr)b);
529 dumpint(size);
530 dumpint(nstk);
531 for(i = 0; i < nstk; i++) {
532 pc = stk[i].pc;
533 if(stk[i].function.len == 0) {
534 runtime_snprintf(buf, sizeof(buf), "%X", (uint64)pc);
535 dumpcstr((int8*)buf);
536 dumpcstr((const int8*)"?");
537 dumpint(0);
538 } else {
539 dumpstr(stk[i].function);
540 dumpstr(stk[i].filename);
541 dumpint(stk[i].lineno);
544 dumpint(allocs);
545 dumpint(frees);
548 static void
549 dumpmemprof(void)
551 MSpan *s, **allspans;
552 uint32 spanidx;
553 Special *sp;
554 SpecialProfile *spp;
555 byte *p;
557 runtime_iterate_memprof(dumpmemprof_callback);
559 allspans = runtime_mheap.allspans;
560 for(spanidx=0; spanidx<runtime_mheap.nspan; spanidx++) {
561 s = allspans[spanidx];
562 if(s->state != MSpanInUse)
563 continue;
564 for(sp = s->specials; sp != nil; sp = sp->next) {
565 if(sp->kind != KindSpecialProfile)
566 continue;
567 spp = (SpecialProfile*)sp;
568 p = (byte*)((s->start << PageShift) + spp->offset);
569 dumpint(TagAllocSample);
570 dumpint((uintptr)p);
571 dumpint((uintptr)spp->b);
576 static void
577 mdump(G *gp)
579 const byte *hdr;
580 uintptr i;
581 MSpan *s;
583 // make sure we're done sweeping
584 for(i = 0; i < runtime_mheap.nspan; i++) {
585 s = runtime_mheap.allspans[i];
586 if(s->state == MSpanInUse)
587 runtime_MSpan_EnsureSwept(s);
590 runtime_memclr((byte*)&typecache[0], sizeof(typecache));
591 hdr = (const byte*)"go1.3 heap dump\n";
592 hwrite(hdr, runtime_findnull(hdr));
593 dumpparams();
594 dumpobjs();
595 dumpgs();
596 dumpms();
597 dumproots();
598 dumpmemstats();
599 dumpmemprof();
600 dumpint(TagEOF);
601 flush();
603 gp->param = nil;
604 gp->atomicstatus = _Grunning;
605 runtime_gogo(gp);
608 void runtime_debug_WriteHeapDump(uintptr)
609 __asm__(GOSYM_PREFIX "runtime_debug.WriteHeapDump");
611 void
612 runtime_debug_WriteHeapDump(uintptr fd)
614 M *m;
615 G *g;
617 // Stop the world.
618 runtime_semacquire(&runtime_worldsema, false);
619 m = runtime_m();
620 m->gcing = 1;
621 m->locks++;
622 runtime_stoptheworld();
624 // Update stats so we can dump them.
625 // As a side effect, flushes all the MCaches so the MSpan.freelist
626 // lists contain all the free objects.
627 runtime_updatememstats(nil);
629 // Set dump file.
630 dumpfd = fd;
632 // Call dump routine on M stack.
633 g = runtime_g();
634 g->atomicstatus = _Gwaiting;
635 g->waitreason = runtime_gostringnocopy((const byte*)"dumping heap");
636 runtime_mcall(mdump);
638 // Reset dump file.
639 dumpfd = 0;
641 // Start up the world again.
642 m->gcing = 0;
643 runtime_semrelease(&runtime_worldsema);
644 runtime_starttheworld();
645 m->locks--;
648 // Runs the specified gc program. Calls the callback for every
649 // pointer-like field specified by the program and passes to the
650 // callback the kind and offset of that field within the object.
651 // offset is the offset in the object of the start of the program.
652 // Returns a pointer to the opcode that ended the gc program (either
653 // GC_END or GC_ARRAY_NEXT).
655 static uintptr*
656 playgcprog(uintptr offset, uintptr *prog, void (*callback)(void*,uintptr,uintptr), void *arg)
658 uintptr len, elemsize, i, *end;
660 for(;;) {
661 switch(prog[0]) {
662 case GC_END:
663 return prog;
664 case GC_PTR:
665 callback(arg, FieldKindPtr, offset + prog[1]);
666 prog += 3;
667 break;
668 case GC_APTR:
669 callback(arg, FieldKindPtr, offset + prog[1]);
670 prog += 2;
671 break;
672 case GC_ARRAY_START:
673 len = prog[2];
674 elemsize = prog[3];
675 end = nil;
676 for(i = 0; i < len; i++) {
677 end = playgcprog(offset + prog[1] + i * elemsize, prog + 4, callback, arg);
678 if(end[0] != GC_ARRAY_NEXT)
679 runtime_throw("GC_ARRAY_START did not have matching GC_ARRAY_NEXT");
681 prog = end + 1;
682 break;
683 case GC_ARRAY_NEXT:
684 return prog;
685 case GC_CALL:
686 playgcprog(offset + prog[1], (uintptr*)((byte*)prog + *(int32*)&prog[2]), callback, arg);
687 prog += 3;
688 break;
689 case GC_CHAN_PTR:
690 callback(arg, FieldKindPtr, offset + prog[1]);
691 prog += 3;
692 break;
693 case GC_STRING:
694 callback(arg, FieldKindString, offset + prog[1]);
695 prog += 2;
696 break;
697 case GC_EFACE:
698 callback(arg, FieldKindEface, offset + prog[1]);
699 prog += 2;
700 break;
701 case GC_IFACE:
702 callback(arg, FieldKindIface, offset + prog[1]);
703 prog += 2;
704 break;
705 case GC_SLICE:
706 callback(arg, FieldKindSlice, offset + prog[1]);
707 prog += 3;
708 break;
709 case GC_REGION:
710 playgcprog(offset + prog[1], (uintptr*)prog[3] + 1, callback, arg);
711 prog += 4;
712 break;
713 default:
714 runtime_printf("%D\n", (uint64)prog[0]);
715 runtime_throw("bad gc op");
720 static void
721 dump_callback(void *p, uintptr kind, uintptr offset)
723 USED(&p);
724 dumpint(kind);
725 dumpint(offset);
728 // dumpint() the kind & offset of each field in an object.
729 static void
730 dumpfields(uintptr *prog)
732 playgcprog(0, prog, dump_callback, nil);
733 dumpint(FieldKindEol);
736 static void
737 dumpeface_callback(void *p, uintptr kind, uintptr offset)
739 Eface *e;
741 if(kind != FieldKindEface)
742 return;
743 e = (Eface*)((byte*)p + offset);
744 dumptype(e->__type_descriptor);
748 // The heap dump reader needs to be able to disambiguate
749 // Eface entries. So it needs to know every type that might
750 // appear in such an entry. The following two routines accomplish
751 // that.
753 // Dump all the types that appear in the type field of
754 // any Eface contained in obj.
755 static void
756 dumpefacetypes(void *obj __attribute__ ((unused)), uintptr size, const Type *type, uintptr kind)
758 uintptr i;
760 switch(kind) {
761 case TypeInfo_SingleObject:
762 //playgcprog(0, (uintptr*)type->gc + 1, dumpeface_callback, obj);
763 break;
764 case TypeInfo_Array:
765 for(i = 0; i <= size - type->__size; i += type->__size) {
766 //playgcprog(i, (uintptr*)type->gc + 1, dumpeface_callback, obj);
768 break;
769 case TypeInfo_Chan:
770 if(type->__size == 0) // channels may have zero-sized objects in them
771 break;
772 for(i = runtime_Hchansize; i <= size - type->__size; i += type->__size) {
773 //playgcprog(i, (uintptr*)type->gc + 1, dumpeface_callback, obj);
775 break;