2016-07-13 Thomas Preud'homme <thomas.preudhomme@arm.com>
[official-gcc.git] / libgo / runtime / heapdump.c
blobd0cfb01478d79dc3641238dfb709ff53c8765dfe
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Implementation of runtime/debug.WriteHeapDump. Writes all
6 // objects in the heap plus additional info (roots, threads,
7 // finalizers, etc.) to a file.
9 // The format of the dumped file is described at
10 // http://code.google.com/p/go-wiki/wiki/heapdump13
12 #include "runtime.h"
13 #include "arch.h"
14 #include "malloc.h"
15 #include "mgc0.h"
16 #include "go-type.h"
17 #include "go-defer.h"
18 #include "go-panic.h"
20 #define hash __hash
21 #define KindNoPointers GO_NO_POINTERS
23 enum {
24 FieldKindEol = 0,
25 FieldKindPtr = 1,
26 FieldKindString = 2,
27 FieldKindSlice = 3,
28 FieldKindIface = 4,
29 FieldKindEface = 5,
31 TagEOF = 0,
32 TagObject = 1,
33 TagOtherRoot = 2,
34 TagType = 3,
35 TagGoRoutine = 4,
36 TagStackFrame = 5,
37 TagParams = 6,
38 TagFinalizer = 7,
39 TagItab = 8,
40 TagOSThread = 9,
41 TagMemStats = 10,
42 TagQueuedFinalizer = 11,
43 TagData = 12,
44 TagBss = 13,
45 TagDefer = 14,
46 TagPanic = 15,
47 TagMemProf = 16,
48 TagAllocSample = 17,
50 TypeInfo_Conservative = 127,
53 // static uintptr* playgcprog(uintptr offset, uintptr *prog, void (*callback)(void*,uintptr,uintptr), void *arg);
54 // static void dumpfields(uintptr *prog);
55 static void dumpefacetypes(void *obj, uintptr size, const Type *type, uintptr kind);
57 // fd to write the dump to.
58 static uintptr dumpfd;
60 // buffer of pending write data
61 enum {
62 BufSize = 4096,
64 static byte buf[BufSize];
65 static uintptr nbuf;
67 static void
68 hwrite(const byte *data, uintptr len)
70 if(len + nbuf <= BufSize) {
71 runtime_memmove(buf + nbuf, data, len);
72 nbuf += len;
73 return;
75 runtime_write(dumpfd, buf, nbuf);
76 if(len >= BufSize) {
77 runtime_write(dumpfd, data, len);
78 nbuf = 0;
79 } else {
80 runtime_memmove(buf, data, len);
81 nbuf = len;
85 static void
86 flush(void)
88 runtime_write(dumpfd, buf, nbuf);
89 nbuf = 0;
92 // Cache of types that have been serialized already.
93 // We use a type's hash field to pick a bucket.
94 // Inside a bucket, we keep a list of types that
95 // have been serialized so far, most recently used first.
96 // Note: when a bucket overflows we may end up
97 // serializing a type more than once. That's ok.
98 enum {
99 TypeCacheBuckets = 256, // must be a power of 2
100 TypeCacheAssoc = 4,
102 typedef struct TypeCacheBucket TypeCacheBucket;
103 struct TypeCacheBucket {
104 const Type *t[TypeCacheAssoc];
106 static TypeCacheBucket typecache[TypeCacheBuckets];
108 // dump a uint64 in a varint format parseable by encoding/binary
109 static void
110 dumpint(uint64 v)
112 byte buf[10];
113 int32 n;
114 n = 0;
115 while(v >= 0x80) {
116 buf[n++] = v | 0x80;
117 v >>= 7;
119 buf[n++] = v;
120 hwrite(buf, n);
123 static void
124 dumpbool(bool b)
126 dumpint(b ? 1 : 0);
129 // dump varint uint64 length followed by memory contents
130 static void
131 dumpmemrange(const byte *data, uintptr len)
133 dumpint(len);
134 hwrite(data, len);
137 static void
138 dumpstr(String s)
140 dumpmemrange(s.str, s.len);
143 static void
144 dumpcstr(const int8 *c)
146 dumpmemrange((const byte*)c, runtime_findnull((const byte*)c));
149 // dump information for a type
150 static void
151 dumptype(const Type *t)
153 TypeCacheBucket *b;
154 int32 i, j;
156 if(t == nil) {
157 return;
160 // If we've definitely serialized the type before,
161 // no need to do it again.
162 b = &typecache[t->hash & (TypeCacheBuckets-1)];
163 if(t == b->t[0]) return;
164 for(i = 1; i < TypeCacheAssoc; i++) {
165 if(t == b->t[i]) {
166 // Move-to-front
167 for(j = i; j > 0; j--) {
168 b->t[j] = b->t[j-1];
170 b->t[0] = t;
171 return;
174 // Might not have been dumped yet. Dump it and
175 // remember we did so.
176 for(j = TypeCacheAssoc-1; j > 0; j--) {
177 b->t[j] = b->t[j-1];
179 b->t[0] = t;
181 // dump the type
182 dumpint(TagType);
183 dumpint((uintptr)t);
184 dumpint(t->__size);
185 if(t->__uncommon == nil || t->__uncommon->__pkg_path == nil || t->__uncommon->__name == nil) {
186 dumpstr(*t->__reflection);
187 } else {
188 dumpint(t->__uncommon->__pkg_path->len + 1 + t->__uncommon->__name->len);
189 hwrite(t->__uncommon->__pkg_path->str, t->__uncommon->__pkg_path->len);
190 hwrite((const byte*)".", 1);
191 hwrite(t->__uncommon->__name->str, t->__uncommon->__name->len);
193 dumpbool(t->__size > PtrSize || (t->__code & KindNoPointers) == 0);
194 // dumpfields((uintptr*)t->gc + 1);
197 // returns true if object is scannable
198 static bool
199 scannable(byte *obj)
201 uintptr *b, off, shift;
203 off = (uintptr*)obj - (uintptr*)runtime_mheap.arena_start; // word offset
204 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
205 shift = off % wordsPerBitmapWord;
206 return ((*b >> shift) & bitScan) != 0;
209 // dump an object
210 static void
211 dumpobj(byte *obj, uintptr size, const Type *type, uintptr kind)
213 if(type != nil) {
214 dumptype(type);
215 dumpefacetypes(obj, size, type, kind);
218 dumpint(TagObject);
219 dumpint((uintptr)obj);
220 dumpint((uintptr)type);
221 dumpint(kind);
222 dumpmemrange(obj, size);
225 static void
226 dumpotherroot(const char *description, byte *to)
228 dumpint(TagOtherRoot);
229 dumpcstr((const int8 *)description);
230 dumpint((uintptr)to);
233 static void
234 dumpfinalizer(byte *obj, FuncVal *fn, const FuncType* ft, const PtrType *ot)
236 dumpint(TagFinalizer);
237 dumpint((uintptr)obj);
238 dumpint((uintptr)fn);
239 dumpint((uintptr)fn->fn);
240 dumpint((uintptr)ft);
241 dumpint((uintptr)ot);
244 typedef struct ChildInfo ChildInfo;
245 struct ChildInfo {
246 // Information passed up from the callee frame about
247 // the layout of the outargs region.
248 uintptr argoff; // where the arguments start in the frame
249 uintptr arglen; // size of args region
250 BitVector args; // if args.n >= 0, pointer map of args region
252 byte *sp; // callee sp
253 uintptr depth; // depth in call stack (0 == most recent)
256 static void
257 dumpgoroutine(G *gp)
259 // ChildInfo child;
260 Defer *d;
261 Panic *p;
263 dumpint(TagGoRoutine);
264 dumpint((uintptr)gp);
265 dumpint((uintptr)0);
266 dumpint(gp->goid);
267 dumpint(gp->gopc);
268 dumpint(gp->status);
269 dumpbool(gp->issystem);
270 dumpbool(gp->isbackground);
271 dumpint(gp->waitsince);
272 dumpcstr((const int8 *)gp->waitreason);
273 dumpint((uintptr)0);
274 dumpint((uintptr)gp->m);
275 dumpint((uintptr)gp->defer);
276 dumpint((uintptr)gp->panic);
278 // dump stack
279 // child.args.n = -1;
280 // child.arglen = 0;
281 // child.sp = nil;
282 // child.depth = 0;
283 // if(!ScanStackByFrames)
284 // runtime_throw("need frame info to dump stacks");
285 // runtime_gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, &child, false);
287 // dump defer & panic records
288 for(d = gp->defer; d != nil; d = d->__next) {
289 dumpint(TagDefer);
290 dumpint((uintptr)d);
291 dumpint((uintptr)gp);
292 dumpint((uintptr)d->__arg);
293 dumpint((uintptr)d->__frame);
294 dumpint((uintptr)d->__pfn);
295 dumpint((uintptr)0);
296 dumpint((uintptr)d->__next);
298 for (p = gp->panic; p != nil; p = p->__next) {
299 dumpint(TagPanic);
300 dumpint((uintptr)p);
301 dumpint((uintptr)gp);
302 dumpint((uintptr)p->__arg.__type_descriptor);
303 dumpint((uintptr)p->__arg.__object);
304 dumpint((uintptr)0);
305 dumpint((uintptr)p->__next);
309 static void
310 dumpgs(void)
312 G *gp;
313 uint32 i;
315 // goroutines & stacks
316 for(i = 0; i < runtime_allglen; i++) {
317 gp = runtime_allg[i];
318 switch(gp->status){
319 default:
320 runtime_printf("unexpected G.status %d\n", gp->status);
321 runtime_throw("mark - bad status");
322 case Gdead:
323 break;
324 case Grunnable:
325 case Gsyscall:
326 case Gwaiting:
327 dumpgoroutine(gp);
328 break;
333 static void
334 finq_callback(FuncVal *fn, void *obj, const FuncType *ft, const PtrType *ot)
336 dumpint(TagQueuedFinalizer);
337 dumpint((uintptr)obj);
338 dumpint((uintptr)fn);
339 dumpint((uintptr)fn->fn);
340 dumpint((uintptr)ft);
341 dumpint((uintptr)ot);
345 static void
346 dumproots(void)
348 MSpan *s, **allspans;
349 uint32 spanidx;
350 Special *sp;
351 SpecialFinalizer *spf;
352 byte *p;
354 // data segment
355 // dumpint(TagData);
356 // dumpint((uintptr)data);
357 // dumpmemrange(data, edata - data);
358 // dumpfields((uintptr*)gcdata + 1);
360 // bss segment
361 // dumpint(TagBss);
362 // dumpint((uintptr)bss);
363 // dumpmemrange(bss, ebss - bss);
364 // dumpfields((uintptr*)gcbss + 1);
366 // MSpan.types
367 allspans = runtime_mheap.allspans;
368 for(spanidx=0; spanidx<runtime_mheap.nspan; spanidx++) {
369 s = allspans[spanidx];
370 if(s->state == MSpanInUse) {
371 // The garbage collector ignores type pointers stored in MSpan.types:
372 // - Compiler-generated types are stored outside of heap.
373 // - The reflect package has runtime-generated types cached in its data structures.
374 // The garbage collector relies on finding the references via that cache.
375 switch(s->types.compression) {
376 case MTypes_Empty:
377 case MTypes_Single:
378 break;
379 case MTypes_Words:
380 case MTypes_Bytes:
381 dumpotherroot("runtime type info", (byte*)s->types.data);
382 break;
385 // Finalizers
386 for(sp = s->specials; sp != nil; sp = sp->next) {
387 if(sp->kind != KindSpecialFinalizer)
388 continue;
389 spf = (SpecialFinalizer*)sp;
390 p = (byte*)((s->start << PageShift) + spf->offset);
391 dumpfinalizer(p, spf->fn, spf->ft, spf->ot);
396 // Finalizer queue
397 runtime_iterate_finq(finq_callback);
400 // Bit vector of free marks.
401 // Needs to be as big as the largest number of objects per span.
402 static byte hfree[PageSize/8];
404 static void
405 dumpobjs(void)
407 uintptr i, j, size, n, off, shift, *bitp, bits, ti, kind;
408 MSpan *s;
409 MLink *l;
410 byte *p;
411 const Type *t;
413 for(i = 0; i < runtime_mheap.nspan; i++) {
414 s = runtime_mheap.allspans[i];
415 if(s->state != MSpanInUse)
416 continue;
417 p = (byte*)(s->start << PageShift);
418 size = s->elemsize;
419 n = (s->npages << PageShift) / size;
420 if(n > PageSize/8)
421 runtime_throw("free array doesn't have enough entries");
422 for(l = s->freelist; l != nil; l = l->next) {
423 hfree[((byte*)l - p) / size] = true;
425 for(j = 0; j < n; j++, p += size) {
426 if(hfree[j]) {
427 hfree[j] = false;
428 continue;
430 off = (uintptr*)p - (uintptr*)runtime_mheap.arena_start;
431 bitp = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
432 shift = off % wordsPerBitmapWord;
433 bits = *bitp >> shift;
435 // Skip FlagNoGC allocations (stacks)
436 if((bits & bitAllocated) == 0)
437 continue;
439 // extract type and kind
440 ti = runtime_gettype(p);
441 t = (Type*)(ti & ~(uintptr)(PtrSize-1));
442 kind = ti & (PtrSize-1);
444 // dump it
445 if(kind == TypeInfo_Chan)
446 t = ((const ChanType*)t)->__element_type; // use element type for chan encoding
447 if(t == nil && scannable(p))
448 kind = TypeInfo_Conservative; // special kind for conservatively scanned objects
449 dumpobj(p, size, t, kind);
454 static void
455 dumpparams(void)
457 byte *x;
459 dumpint(TagParams);
460 x = (byte*)1;
461 if(*(byte*)&x == 1)
462 dumpbool(false); // little-endian ptrs
463 else
464 dumpbool(true); // big-endian ptrs
465 dumpint(PtrSize);
466 dumpint(runtime_Hchansize);
467 dumpint((uintptr)runtime_mheap.arena_start);
468 dumpint((uintptr)runtime_mheap.arena_used);
469 dumpint(0);
470 dumpcstr((const int8 *)"");
471 dumpint(runtime_ncpu);
474 static void
475 dumpms(void)
477 M *mp;
479 for(mp = runtime_allm; mp != nil; mp = mp->alllink) {
480 dumpint(TagOSThread);
481 dumpint((uintptr)mp);
482 dumpint(mp->id);
483 dumpint(0);
487 static void
488 dumpmemstats(void)
490 int32 i;
492 dumpint(TagMemStats);
493 dumpint(mstats.alloc);
494 dumpint(mstats.total_alloc);
495 dumpint(mstats.sys);
496 dumpint(mstats.nlookup);
497 dumpint(mstats.nmalloc);
498 dumpint(mstats.nfree);
499 dumpint(mstats.heap_alloc);
500 dumpint(mstats.heap_sys);
501 dumpint(mstats.heap_idle);
502 dumpint(mstats.heap_inuse);
503 dumpint(mstats.heap_released);
504 dumpint(mstats.heap_objects);
505 dumpint(mstats.stacks_inuse);
506 dumpint(mstats.stacks_sys);
507 dumpint(mstats.mspan_inuse);
508 dumpint(mstats.mspan_sys);
509 dumpint(mstats.mcache_inuse);
510 dumpint(mstats.mcache_sys);
511 dumpint(mstats.buckhash_sys);
512 dumpint(mstats.gc_sys);
513 dumpint(mstats.other_sys);
514 dumpint(mstats.next_gc);
515 dumpint(mstats.last_gc);
516 dumpint(mstats.pause_total_ns);
517 for(i = 0; i < 256; i++)
518 dumpint(mstats.pause_ns[i]);
519 dumpint(mstats.numgc);
522 static void
523 dumpmemprof_callback(Bucket *b, uintptr nstk, Location *stk, uintptr size, uintptr allocs, uintptr frees)
525 uintptr i, pc;
526 byte buf[20];
528 dumpint(TagMemProf);
529 dumpint((uintptr)b);
530 dumpint(size);
531 dumpint(nstk);
532 for(i = 0; i < nstk; i++) {
533 pc = stk[i].pc;
534 if(stk[i].function.len == 0) {
535 runtime_snprintf(buf, sizeof(buf), "%X", (uint64)pc);
536 dumpcstr((int8*)buf);
537 dumpcstr((const int8*)"?");
538 dumpint(0);
539 } else {
540 dumpstr(stk[i].function);
541 dumpstr(stk[i].filename);
542 dumpint(stk[i].lineno);
545 dumpint(allocs);
546 dumpint(frees);
549 static void
550 dumpmemprof(void)
552 MSpan *s, **allspans;
553 uint32 spanidx;
554 Special *sp;
555 SpecialProfile *spp;
556 byte *p;
558 runtime_iterate_memprof(dumpmemprof_callback);
560 allspans = runtime_mheap.allspans;
561 for(spanidx=0; spanidx<runtime_mheap.nspan; spanidx++) {
562 s = allspans[spanidx];
563 if(s->state != MSpanInUse)
564 continue;
565 for(sp = s->specials; sp != nil; sp = sp->next) {
566 if(sp->kind != KindSpecialProfile)
567 continue;
568 spp = (SpecialProfile*)sp;
569 p = (byte*)((s->start << PageShift) + spp->offset);
570 dumpint(TagAllocSample);
571 dumpint((uintptr)p);
572 dumpint((uintptr)spp->b);
577 static void
578 mdump(G *gp)
580 const byte *hdr;
581 uintptr i;
582 MSpan *s;
584 // make sure we're done sweeping
585 for(i = 0; i < runtime_mheap.nspan; i++) {
586 s = runtime_mheap.allspans[i];
587 if(s->state == MSpanInUse)
588 runtime_MSpan_EnsureSwept(s);
591 runtime_memclr((byte*)&typecache[0], sizeof(typecache));
592 hdr = (const byte*)"go1.3 heap dump\n";
593 hwrite(hdr, runtime_findnull(hdr));
594 dumpparams();
595 dumpobjs();
596 dumpgs();
597 dumpms();
598 dumproots();
599 dumpmemstats();
600 dumpmemprof();
601 dumpint(TagEOF);
602 flush();
604 gp->param = nil;
605 gp->status = Grunning;
606 runtime_gogo(gp);
609 void runtime_debug_WriteHeapDump(uintptr)
610 __asm__(GOSYM_PREFIX "runtime_debug.WriteHeapDump");
612 void
613 runtime_debug_WriteHeapDump(uintptr fd)
615 M *m;
616 G *g;
618 // Stop the world.
619 runtime_semacquire(&runtime_worldsema, false);
620 m = runtime_m();
621 m->gcing = 1;
622 m->locks++;
623 runtime_stoptheworld();
625 // Update stats so we can dump them.
626 // As a side effect, flushes all the MCaches so the MSpan.freelist
627 // lists contain all the free objects.
628 runtime_updatememstats(nil);
630 // Set dump file.
631 dumpfd = fd;
633 // Call dump routine on M stack.
634 g = runtime_g();
635 g->status = Gwaiting;
636 g->waitreason = "dumping heap";
637 runtime_mcall(mdump);
639 // Reset dump file.
640 dumpfd = 0;
642 // Start up the world again.
643 m->gcing = 0;
644 runtime_semrelease(&runtime_worldsema);
645 runtime_starttheworld();
646 m->locks--;
649 // Runs the specified gc program. Calls the callback for every
650 // pointer-like field specified by the program and passes to the
651 // callback the kind and offset of that field within the object.
652 // offset is the offset in the object of the start of the program.
653 // Returns a pointer to the opcode that ended the gc program (either
654 // GC_END or GC_ARRAY_NEXT).
656 static uintptr*
657 playgcprog(uintptr offset, uintptr *prog, void (*callback)(void*,uintptr,uintptr), void *arg)
659 uintptr len, elemsize, i, *end;
661 for(;;) {
662 switch(prog[0]) {
663 case GC_END:
664 return prog;
665 case GC_PTR:
666 callback(arg, FieldKindPtr, offset + prog[1]);
667 prog += 3;
668 break;
669 case GC_APTR:
670 callback(arg, FieldKindPtr, offset + prog[1]);
671 prog += 2;
672 break;
673 case GC_ARRAY_START:
674 len = prog[2];
675 elemsize = prog[3];
676 end = nil;
677 for(i = 0; i < len; i++) {
678 end = playgcprog(offset + prog[1] + i * elemsize, prog + 4, callback, arg);
679 if(end[0] != GC_ARRAY_NEXT)
680 runtime_throw("GC_ARRAY_START did not have matching GC_ARRAY_NEXT");
682 prog = end + 1;
683 break;
684 case GC_ARRAY_NEXT:
685 return prog;
686 case GC_CALL:
687 playgcprog(offset + prog[1], (uintptr*)((byte*)prog + *(int32*)&prog[2]), callback, arg);
688 prog += 3;
689 break;
690 case GC_CHAN_PTR:
691 callback(arg, FieldKindPtr, offset + prog[1]);
692 prog += 3;
693 break;
694 case GC_STRING:
695 callback(arg, FieldKindString, offset + prog[1]);
696 prog += 2;
697 break;
698 case GC_EFACE:
699 callback(arg, FieldKindEface, offset + prog[1]);
700 prog += 2;
701 break;
702 case GC_IFACE:
703 callback(arg, FieldKindIface, offset + prog[1]);
704 prog += 2;
705 break;
706 case GC_SLICE:
707 callback(arg, FieldKindSlice, offset + prog[1]);
708 prog += 3;
709 break;
710 case GC_REGION:
711 playgcprog(offset + prog[1], (uintptr*)prog[3] + 1, callback, arg);
712 prog += 4;
713 break;
714 default:
715 runtime_printf("%D\n", (uint64)prog[0]);
716 runtime_throw("bad gc op");
721 static void
722 dump_callback(void *p, uintptr kind, uintptr offset)
724 USED(&p);
725 dumpint(kind);
726 dumpint(offset);
729 // dumpint() the kind & offset of each field in an object.
730 static void
731 dumpfields(uintptr *prog)
733 playgcprog(0, prog, dump_callback, nil);
734 dumpint(FieldKindEol);
737 static void
738 dumpeface_callback(void *p, uintptr kind, uintptr offset)
740 Eface *e;
742 if(kind != FieldKindEface)
743 return;
744 e = (Eface*)((byte*)p + offset);
745 dumptype(e->__type_descriptor);
749 // The heap dump reader needs to be able to disambiguate
750 // Eface entries. So it needs to know every type that might
751 // appear in such an entry. The following two routines accomplish
752 // that.
754 // Dump all the types that appear in the type field of
755 // any Eface contained in obj.
756 static void
757 dumpefacetypes(void *obj __attribute__ ((unused)), uintptr size, const Type *type, uintptr kind)
759 uintptr i;
761 switch(kind) {
762 case TypeInfo_SingleObject:
763 //playgcprog(0, (uintptr*)type->gc + 1, dumpeface_callback, obj);
764 break;
765 case TypeInfo_Array:
766 for(i = 0; i <= size - type->__size; i += type->__size)
767 //playgcprog(i, (uintptr*)type->gc + 1, dumpeface_callback, obj);
768 break;
769 case TypeInfo_Chan:
770 if(type->__size == 0) // channels may have zero-sized objects in them
771 break;
772 for(i = runtime_Hchansize; i <= size - type->__size; i += type->__size)
773 //playgcprog(i, (uintptr*)type->gc + 1, dumpeface_callback, obj);
774 break;