1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Implementation of runtime/debug.WriteHeapDump. Writes all
6 // objects in the heap plus additional info (roots, threads,
7 // finalizers, etc.) to a file.
9 // The format of the dumped file is described at
10 // http://code.google.com/p/go-wiki/wiki/heapdump13
20 #define KindNoPointers GO_NO_POINTERS
41 TagQueuedFinalizer
= 11,
49 TypeInfo_Conservative
= 127,
52 // static uintptr* playgcprog(uintptr offset, uintptr *prog, void (*callback)(void*,uintptr,uintptr), void *arg);
53 // static void dumpfields(uintptr *prog);
54 static void dumpefacetypes(void *obj
, uintptr size
, const Type
*type
, uintptr kind
);
56 // fd to write the dump to.
57 static uintptr dumpfd
;
59 // buffer of pending write data
63 static byte buf
[BufSize
];
67 hwrite(const byte
*data
, uintptr len
)
69 if(len
+ nbuf
<= BufSize
) {
70 runtime_memmove(buf
+ nbuf
, data
, len
);
74 runtime_write(dumpfd
, buf
, nbuf
);
76 runtime_write(dumpfd
, data
, len
);
79 runtime_memmove(buf
, data
, len
);
87 runtime_write(dumpfd
, buf
, nbuf
);
91 // Cache of types that have been serialized already.
92 // We use a type's hash field to pick a bucket.
93 // Inside a bucket, we keep a list of types that
94 // have been serialized so far, most recently used first.
95 // Note: when a bucket overflows we may end up
96 // serializing a type more than once. That's ok.
98 TypeCacheBuckets
= 256, // must be a power of 2
101 typedef struct TypeCacheBucket TypeCacheBucket
;
102 struct TypeCacheBucket
{
103 const Type
*t
[TypeCacheAssoc
];
105 static TypeCacheBucket typecache
[TypeCacheBuckets
];
107 // dump a uint64 in a varint format parseable by encoding/binary
128 // dump varint uint64 length followed by memory contents
130 dumpmemrange(const byte
*data
, uintptr len
)
139 dumpmemrange(s
.str
, s
.len
);
143 dumpcstr(const int8
*c
)
145 dumpmemrange((const byte
*)c
, runtime_findnull((const byte
*)c
));
148 // dump information for a type
150 dumptype(const Type
*t
)
159 // If we've definitely serialized the type before,
160 // no need to do it again.
161 b
= &typecache
[t
->hash
& (TypeCacheBuckets
-1)];
162 if(t
== b
->t
[0]) return;
163 for(i
= 1; i
< TypeCacheAssoc
; i
++) {
166 for(j
= i
; j
> 0; j
--) {
173 // Might not have been dumped yet. Dump it and
174 // remember we did so.
175 for(j
= TypeCacheAssoc
-1; j
> 0; j
--) {
184 if(t
->__uncommon
== nil
|| t
->__uncommon
->__pkg_path
== nil
|| t
->__uncommon
->__name
== nil
) {
185 dumpstr(*t
->__reflection
);
187 dumpint(t
->__uncommon
->__pkg_path
->len
+ 1 + t
->__uncommon
->__name
->len
);
188 hwrite(t
->__uncommon
->__pkg_path
->str
, t
->__uncommon
->__pkg_path
->len
);
189 hwrite((const byte
*)".", 1);
190 hwrite(t
->__uncommon
->__name
->str
, t
->__uncommon
->__name
->len
);
192 dumpbool(t
->__size
> PtrSize
|| (t
->__code
& KindNoPointers
) == 0);
193 // dumpfields((uintptr*)t->gc + 1);
196 // returns true if object is scannable
200 uintptr
*b
, off
, shift
;
202 off
= (uintptr
*)obj
- (uintptr
*)runtime_mheap
.arena_start
; // word offset
203 b
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
204 shift
= off
% wordsPerBitmapWord
;
205 return ((*b
>> shift
) & bitScan
) != 0;
210 dumpobj(byte
*obj
, uintptr size
, const Type
*type
, uintptr kind
)
214 dumpefacetypes(obj
, size
, type
, kind
);
218 dumpint((uintptr
)obj
);
219 dumpint((uintptr
)type
);
221 dumpmemrange(obj
, size
);
225 dumpotherroot(const char *description
, byte
*to
)
227 dumpint(TagOtherRoot
);
228 dumpcstr((const int8
*)description
);
229 dumpint((uintptr
)to
);
233 dumpfinalizer(byte
*obj
, FuncVal
*fn
, const FuncType
* ft
, const PtrType
*ot
)
235 dumpint(TagFinalizer
);
236 dumpint((uintptr
)obj
);
237 dumpint((uintptr
)fn
);
238 dumpint((uintptr
)fn
->fn
);
239 dumpint((uintptr
)ft
);
240 dumpint((uintptr
)ot
);
243 typedef struct ChildInfo ChildInfo
;
245 // Information passed up from the callee frame about
246 // the layout of the outargs region.
247 uintptr argoff
; // where the arguments start in the frame
248 uintptr arglen
; // size of args region
249 BitVector args
; // if args.n >= 0, pointer map of args region
251 byte
*sp
; // callee sp
252 uintptr depth
; // depth in call stack (0 == most recent)
262 dumpint(TagGoRoutine
);
263 dumpint((uintptr
)gp
);
267 dumpint(gp
->atomicstatus
);
268 dumpbool(gp
->issystem
);
269 dumpbool(gp
->isbackground
);
270 dumpint(gp
->waitsince
);
271 dumpstr(gp
->waitreason
);
273 dumpint((uintptr
)gp
->m
);
274 dumpint((uintptr
)gp
->_defer
);
275 dumpint((uintptr
)gp
->_panic
);
278 // child.args.n = -1;
282 // if(!ScanStackByFrames)
283 // runtime_throw("need frame info to dump stacks");
284 // runtime_gentraceback(pc, sp, lr, gp, 0, nil, 0x7fffffff, dumpframe, &child, false);
286 // dump defer & panic records
287 for(d
= gp
->_defer
; d
!= nil
; d
= d
->next
) {
290 dumpint((uintptr
)gp
);
291 dumpint((uintptr
)d
->arg
);
292 dumpint((uintptr
)d
->frame
);
293 dumpint((uintptr
)d
->pfn
);
295 dumpint((uintptr
)d
->next
);
297 for (p
= gp
->_panic
; p
!= nil
; p
= p
->next
) {
300 dumpint((uintptr
)gp
);
301 dumpint((uintptr
)p
->arg
.__type_descriptor
);
302 dumpint((uintptr
)p
->arg
.__object
);
304 dumpint((uintptr
)p
->next
);
314 // goroutines & stacks
315 for(i
= 0; i
< runtime_allglen
; i
++) {
316 gp
= runtime_allg
[i
];
317 switch(gp
->atomicstatus
){
319 runtime_printf("unexpected G.status %d\n", gp
->atomicstatus
);
320 runtime_throw("mark - bad status");
333 finq_callback(FuncVal
*fn
, void *obj
, const FuncType
*ft
, const PtrType
*ot
)
335 dumpint(TagQueuedFinalizer
);
336 dumpint((uintptr
)obj
);
337 dumpint((uintptr
)fn
);
338 dumpint((uintptr
)fn
->fn
);
339 dumpint((uintptr
)ft
);
340 dumpint((uintptr
)ot
);
347 MSpan
*s
, **allspans
;
350 SpecialFinalizer
*spf
;
355 // dumpint((uintptr)data);
356 // dumpmemrange(data, edata - data);
357 // dumpfields((uintptr*)gcdata + 1);
361 // dumpint((uintptr)bss);
362 // dumpmemrange(bss, ebss - bss);
363 // dumpfields((uintptr*)gcbss + 1);
366 allspans
= runtime_mheap
.allspans
;
367 for(spanidx
=0; spanidx
<runtime_mheap
.nspan
; spanidx
++) {
368 s
= allspans
[spanidx
];
369 if(s
->state
== MSpanInUse
) {
370 // The garbage collector ignores type pointers stored in MSpan.types:
371 // - Compiler-generated types are stored outside of heap.
372 // - The reflect package has runtime-generated types cached in its data structures.
373 // The garbage collector relies on finding the references via that cache.
374 switch(s
->types
.compression
) {
380 dumpotherroot("runtime type info", (byte
*)s
->types
.data
);
385 for(sp
= s
->specials
; sp
!= nil
; sp
= sp
->next
) {
386 if(sp
->kind
!= KindSpecialFinalizer
)
388 spf
= (SpecialFinalizer
*)sp
;
389 p
= (byte
*)((s
->start
<< PageShift
) + spf
->offset
);
390 dumpfinalizer(p
, spf
->fn
, spf
->ft
, spf
->ot
);
396 runtime_iterate_finq(finq_callback
);
399 // Bit vector of free marks.
400 // Needs to be as big as the largest number of objects per span.
401 static byte hfree
[PageSize
/8];
406 uintptr i
, j
, size
, n
, off
, shift
, *bitp
, bits
, ti
, kind
;
412 for(i
= 0; i
< runtime_mheap
.nspan
; i
++) {
413 s
= runtime_mheap
.allspans
[i
];
414 if(s
->state
!= MSpanInUse
)
416 p
= (byte
*)(s
->start
<< PageShift
);
418 n
= (s
->npages
<< PageShift
) / size
;
420 runtime_throw("free array doesn't have enough entries");
421 for(l
= s
->freelist
; l
!= nil
; l
= l
->next
) {
422 hfree
[((byte
*)l
- p
) / size
] = true;
424 for(j
= 0; j
< n
; j
++, p
+= size
) {
429 off
= (uintptr
*)p
- (uintptr
*)runtime_mheap
.arena_start
;
430 bitp
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
431 shift
= off
% wordsPerBitmapWord
;
432 bits
= *bitp
>> shift
;
434 // Skip FlagNoGC allocations (stacks)
435 if((bits
& bitAllocated
) == 0)
438 // extract type and kind
439 ti
= runtime_gettype(p
);
440 t
= (Type
*)(ti
& ~(uintptr
)(PtrSize
-1));
441 kind
= ti
& (PtrSize
-1);
444 if(kind
== TypeInfo_Chan
)
445 t
= ((const ChanType
*)t
)->__element_type
; // use element type for chan encoding
446 if(t
== nil
&& scannable(p
))
447 kind
= TypeInfo_Conservative
; // special kind for conservatively scanned objects
448 dumpobj(p
, size
, t
, kind
);
461 dumpbool(false); // little-endian ptrs
463 dumpbool(true); // big-endian ptrs
465 dumpint(runtime_Hchansize
);
466 dumpint((uintptr
)runtime_mheap
.arena_start
);
467 dumpint((uintptr
)runtime_mheap
.arena_used
);
469 dumpcstr((const int8
*)"");
470 dumpint(runtime_ncpu
);
478 for(mp
= runtime_allm
; mp
!= nil
; mp
= mp
->alllink
) {
479 dumpint(TagOSThread
);
480 dumpint((uintptr
)mp
);
491 dumpint(TagMemStats
);
492 dumpint(mstats
.alloc
);
493 dumpint(mstats
.total_alloc
);
495 dumpint(mstats
.nlookup
);
496 dumpint(mstats
.nmalloc
);
497 dumpint(mstats
.nfree
);
498 dumpint(mstats
.heap_alloc
);
499 dumpint(mstats
.heap_sys
);
500 dumpint(mstats
.heap_idle
);
501 dumpint(mstats
.heap_inuse
);
502 dumpint(mstats
.heap_released
);
503 dumpint(mstats
.heap_objects
);
504 dumpint(mstats
.stacks_inuse
);
505 dumpint(mstats
.stacks_sys
);
506 dumpint(mstats
.mspan_inuse
);
507 dumpint(mstats
.mspan_sys
);
508 dumpint(mstats
.mcache_inuse
);
509 dumpint(mstats
.mcache_sys
);
510 dumpint(mstats
.buckhash_sys
);
511 dumpint(mstats
.gc_sys
);
512 dumpint(mstats
.other_sys
);
513 dumpint(mstats
.next_gc
);
514 dumpint(mstats
.last_gc
);
515 dumpint(mstats
.pause_total_ns
);
516 for(i
= 0; i
< 256; i
++)
517 dumpint(mstats
.pause_ns
[i
]);
518 dumpint(mstats
.numgc
);
522 dumpmemprof_callback(Bucket
*b
, uintptr nstk
, Location
*stk
, uintptr size
, uintptr allocs
, uintptr frees
)
531 for(i
= 0; i
< nstk
; i
++) {
533 if(stk
[i
].function
.len
== 0) {
534 runtime_snprintf(buf
, sizeof(buf
), "%X", (uint64
)pc
);
535 dumpcstr((int8
*)buf
);
536 dumpcstr((const int8
*)"?");
539 dumpstr(stk
[i
].function
);
540 dumpstr(stk
[i
].filename
);
541 dumpint(stk
[i
].lineno
);
551 MSpan
*s
, **allspans
;
557 runtime_iterate_memprof(dumpmemprof_callback
);
559 allspans
= runtime_mheap
.allspans
;
560 for(spanidx
=0; spanidx
<runtime_mheap
.nspan
; spanidx
++) {
561 s
= allspans
[spanidx
];
562 if(s
->state
!= MSpanInUse
)
564 for(sp
= s
->specials
; sp
!= nil
; sp
= sp
->next
) {
565 if(sp
->kind
!= KindSpecialProfile
)
567 spp
= (SpecialProfile
*)sp
;
568 p
= (byte
*)((s
->start
<< PageShift
) + spp
->offset
);
569 dumpint(TagAllocSample
);
571 dumpint((uintptr
)spp
->b
);
583 // make sure we're done sweeping
584 for(i
= 0; i
< runtime_mheap
.nspan
; i
++) {
585 s
= runtime_mheap
.allspans
[i
];
586 if(s
->state
== MSpanInUse
)
587 runtime_MSpan_EnsureSwept(s
);
590 runtime_memclr((byte
*)&typecache
[0], sizeof(typecache
));
591 hdr
= (const byte
*)"go1.3 heap dump\n";
592 hwrite(hdr
, runtime_findnull(hdr
));
604 gp
->atomicstatus
= _Grunning
;
608 void runtime_debug_WriteHeapDump(uintptr
)
609 __asm__(GOSYM_PREFIX
"runtime_debug.WriteHeapDump");
612 runtime_debug_WriteHeapDump(uintptr fd
)
618 runtime_semacquire(&runtime_worldsema
, false);
622 runtime_stoptheworld();
624 // Update stats so we can dump them.
625 // As a side effect, flushes all the MCaches so the MSpan.freelist
626 // lists contain all the free objects.
627 runtime_updatememstats(nil
);
632 // Call dump routine on M stack.
634 g
->atomicstatus
= _Gwaiting
;
635 g
->waitreason
= runtime_gostringnocopy((const byte
*)"dumping heap");
636 runtime_mcall(mdump
);
641 // Start up the world again.
643 runtime_semrelease(&runtime_worldsema
);
644 runtime_starttheworld();
648 // Runs the specified gc program. Calls the callback for every
649 // pointer-like field specified by the program and passes to the
650 // callback the kind and offset of that field within the object.
651 // offset is the offset in the object of the start of the program.
652 // Returns a pointer to the opcode that ended the gc program (either
653 // GC_END or GC_ARRAY_NEXT).
656 playgcprog(uintptr offset, uintptr *prog, void (*callback)(void*,uintptr,uintptr), void *arg)
658 uintptr len, elemsize, i, *end;
665 callback(arg, FieldKindPtr, offset + prog[1]);
669 callback(arg, FieldKindPtr, offset + prog[1]);
676 for(i = 0; i < len; i++) {
677 end = playgcprog(offset + prog[1] + i * elemsize, prog + 4, callback, arg);
678 if(end[0] != GC_ARRAY_NEXT)
679 runtime_throw("GC_ARRAY_START did not have matching GC_ARRAY_NEXT");
686 playgcprog(offset + prog[1], (uintptr*)((byte*)prog + *(int32*)&prog[2]), callback, arg);
690 callback(arg, FieldKindPtr, offset + prog[1]);
694 callback(arg, FieldKindString, offset + prog[1]);
698 callback(arg, FieldKindEface, offset + prog[1]);
702 callback(arg, FieldKindIface, offset + prog[1]);
706 callback(arg, FieldKindSlice, offset + prog[1]);
710 playgcprog(offset + prog[1], (uintptr*)prog[3] + 1, callback, arg);
714 runtime_printf("%D\n", (uint64)prog[0]);
715 runtime_throw("bad gc op");
721 dump_callback(void *p, uintptr kind, uintptr offset)
728 // dumpint() the kind & offset of each field in an object.
730 dumpfields(uintptr *prog)
732 playgcprog(0, prog, dump_callback, nil);
733 dumpint(FieldKindEol);
737 dumpeface_callback(void *p, uintptr kind, uintptr offset)
741 if(kind != FieldKindEface)
743 e = (Eface*)((byte*)p + offset);
744 dumptype(e->__type_descriptor);
748 // The heap dump reader needs to be able to disambiguate
749 // Eface entries. So it needs to know every type that might
750 // appear in such an entry. The following two routines accomplish
753 // Dump all the types that appear in the type field of
754 // any Eface contained in obj.
756 dumpefacetypes(void *obj
__attribute__ ((unused
)), uintptr size
, const Type
*type
, uintptr kind
)
761 case TypeInfo_SingleObject
:
762 //playgcprog(0, (uintptr*)type->gc + 1, dumpeface_callback, obj);
765 for(i
= 0; i
<= size
- type
->__size
; i
+= type
->__size
) {
766 //playgcprog(i, (uintptr*)type->gc + 1, dumpeface_callback, obj);
770 if(type
->__size
== 0) // channels may have zero-sized objects in them
772 for(i
= runtime_Hchansize
; i
<= size
- type
->__size
; i
+= type
->__size
) {
773 //playgcprog(i, (uintptr*)type->gc + 1, dumpeface_callback, obj);