2018-23-01 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / libgo / go / runtime / heapdump.go
bloba4b168d7313a98c4f71327040901cbb9a6e69cca
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Implementation of runtime/debug.WriteHeapDump. Writes all
6 // objects in the heap plus additional info (roots, threads,
7 // finalizers, etc.) to a file.
9 // The format of the dumped file is described at
10 // https://golang.org/s/go15heapdump.
12 package runtime
14 import (
15 "runtime/internal/sys"
16 "unsafe"
19 //go:linkname runtime_debug_WriteHeapDump runtime_debug.WriteHeapDump
20 func runtime_debug_WriteHeapDump(fd uintptr) {
21 stopTheWorld("write heap dump")
23 systemstack(func() {
24 writeheapdump_m(fd)
27 startTheWorld()
30 const (
31 fieldKindEol = 0
32 fieldKindPtr = 1
33 fieldKindIface = 2
34 fieldKindEface = 3
35 tagEOF = 0
36 tagObject = 1
37 tagOtherRoot = 2
38 tagType = 3
39 tagGoroutine = 4
40 tagStackFrame = 5
41 tagParams = 6
42 tagFinalizer = 7
43 tagItab = 8
44 tagOSThread = 9
45 tagMemStats = 10
46 tagQueuedFinalizer = 11
47 tagData = 12
48 tagBSS = 13
49 tagDefer = 14
50 tagPanic = 15
51 tagMemProf = 16
52 tagAllocSample = 17
55 var dumpfd uintptr // fd to write the dump to.
56 var tmpbuf []byte
58 // buffer of pending write data
59 const (
60 bufSize = 4096
63 var buf [bufSize]byte
64 var nbuf uintptr
66 func dwrite(data unsafe.Pointer, len uintptr) {
67 if len == 0 {
68 return
70 if nbuf+len <= bufSize {
71 copy(buf[nbuf:], (*[bufSize]byte)(data)[:len])
72 nbuf += len
73 return
76 write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
77 if len >= bufSize {
78 write(dumpfd, data, int32(len))
79 nbuf = 0
80 } else {
81 copy(buf[:], (*[bufSize]byte)(data)[:len])
82 nbuf = len
86 func dwritebyte(b byte) {
87 dwrite(unsafe.Pointer(&b), 1)
90 func flush() {
91 write(dumpfd, unsafe.Pointer(&buf), int32(nbuf))
92 nbuf = 0
95 // Cache of types that have been serialized already.
96 // We use a type's hash field to pick a bucket.
97 // Inside a bucket, we keep a list of types that
98 // have been serialized so far, most recently used first.
99 // Note: when a bucket overflows we may end up
100 // serializing a type more than once. That's ok.
101 const (
102 typeCacheBuckets = 256
103 typeCacheAssoc = 4
106 type typeCacheBucket struct {
107 t [typeCacheAssoc]*_type
110 var typecache [typeCacheBuckets]typeCacheBucket
112 // dump a uint64 in a varint format parseable by encoding/binary
113 func dumpint(v uint64) {
114 var buf [10]byte
115 var n int
116 for v >= 0x80 {
117 buf[n] = byte(v | 0x80)
119 v >>= 7
121 buf[n] = byte(v)
123 dwrite(unsafe.Pointer(&buf), uintptr(n))
126 func dumpbool(b bool) {
127 if b {
128 dumpint(1)
129 } else {
130 dumpint(0)
134 // dump varint uint64 length followed by memory contents
135 func dumpmemrange(data unsafe.Pointer, len uintptr) {
136 dumpint(uint64(len))
137 dwrite(data, len)
140 func dumpslice(b []byte) {
141 dumpint(uint64(len(b)))
142 if len(b) > 0 {
143 dwrite(unsafe.Pointer(&b[0]), uintptr(len(b)))
147 func dumpstr(s string) {
148 sp := stringStructOf(&s)
149 dumpmemrange(sp.str, uintptr(sp.len))
152 // dump information for a type
153 func dumptype(t *_type) {
154 if t == nil {
155 return
158 // If we've definitely serialized the type before,
159 // no need to do it again.
160 b := &typecache[t.hash&(typeCacheBuckets-1)]
161 if t == b.t[0] {
162 return
164 for i := 1; i < typeCacheAssoc; i++ {
165 if t == b.t[i] {
166 // Move-to-front
167 for j := i; j > 0; j-- {
168 b.t[j] = b.t[j-1]
170 b.t[0] = t
171 return
175 // Might not have been dumped yet. Dump it and
176 // remember we did so.
177 for j := typeCacheAssoc - 1; j > 0; j-- {
178 b.t[j] = b.t[j-1]
180 b.t[0] = t
182 // dump the type
183 dumpint(tagType)
184 dumpint(uint64(uintptr(unsafe.Pointer(t))))
185 dumpint(uint64(t.size))
186 if x := t.uncommontype; x == nil || t.pkgPath == nil || *t.pkgPath == "" {
187 dumpstr(*t.string)
188 } else {
189 pkgpathstr := *t.pkgPath
190 pkgpath := stringStructOf(&pkgpathstr)
191 namestr := *t.name
192 name := stringStructOf(&namestr)
193 dumpint(uint64(uintptr(pkgpath.len) + 1 + uintptr(name.len)))
194 dwrite(pkgpath.str, uintptr(pkgpath.len))
195 dwritebyte('.')
196 dwrite(name.str, uintptr(name.len))
198 dumpbool(t.kind&kindDirectIface == 0 || t.kind&kindNoPointers == 0)
201 // dump an object
202 func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector) {
203 dumpint(tagObject)
204 dumpint(uint64(uintptr(obj)))
205 dumpmemrange(obj, size)
206 dumpfields(bv)
209 func dumpotherroot(description string, to unsafe.Pointer) {
210 dumpint(tagOtherRoot)
211 dumpstr(description)
212 dumpint(uint64(uintptr(to)))
215 func dumpfinalizer(obj unsafe.Pointer, fn *funcval, ft *functype, ot *ptrtype) {
216 dumpint(tagFinalizer)
217 dumpint(uint64(uintptr(obj)))
218 dumpint(uint64(uintptr(unsafe.Pointer(fn))))
219 dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
220 dumpint(uint64(uintptr(unsafe.Pointer(ft))))
221 dumpint(uint64(uintptr(unsafe.Pointer(ot))))
224 type childInfo struct {
225 // Information passed up from the callee frame about
226 // the layout of the outargs region.
227 argoff uintptr // where the arguments start in the frame
228 arglen uintptr // size of args region
229 args bitvector // if args.n >= 0, pointer map of args region
230 sp *uint8 // callee sp
231 depth uintptr // depth in call stack (0 == most recent)
234 // dump kinds & offsets of interesting fields in bv
235 func dumpbv(cbv *bitvector, offset uintptr) {
236 bv := gobv(*cbv)
237 for i := uintptr(0); i < bv.n; i++ {
238 if bv.bytedata[i/8]>>(i%8)&1 == 1 {
239 dumpint(fieldKindPtr)
240 dumpint(uint64(offset + i*sys.PtrSize))
245 func dumpgoroutine(gp *g) {
246 sp := gp.syscallsp
248 dumpint(tagGoroutine)
249 dumpint(uint64(uintptr(unsafe.Pointer(gp))))
250 dumpint(uint64(sp))
251 dumpint(uint64(gp.goid))
252 dumpint(uint64(gp.gopc))
253 dumpint(uint64(readgstatus(gp)))
254 dumpbool(isSystemGoroutine(gp))
255 dumpbool(false) // isbackground
256 dumpint(uint64(gp.waitsince))
257 dumpstr(gp.waitreason)
258 dumpint(0)
259 dumpint(uint64(uintptr(unsafe.Pointer(gp.m))))
260 dumpint(uint64(uintptr(unsafe.Pointer(gp._defer))))
261 dumpint(uint64(uintptr(unsafe.Pointer(gp._panic))))
263 // dump defer & panic records
264 for d := gp._defer; d != nil; d = d.link {
265 dumpint(tagDefer)
266 dumpint(uint64(uintptr(unsafe.Pointer(d))))
267 dumpint(uint64(uintptr(unsafe.Pointer(gp))))
268 dumpint(0)
269 dumpint(0)
270 dumpint(uint64(uintptr(unsafe.Pointer(d.pfn))))
271 dumpint(0)
272 dumpint(uint64(uintptr(unsafe.Pointer(d.link))))
274 for p := gp._panic; p != nil; p = p.link {
275 dumpint(tagPanic)
276 dumpint(uint64(uintptr(unsafe.Pointer(p))))
277 dumpint(uint64(uintptr(unsafe.Pointer(gp))))
278 eface := efaceOf(&p.arg)
279 dumpint(uint64(uintptr(unsafe.Pointer(eface._type))))
280 dumpint(uint64(uintptr(unsafe.Pointer(eface.data))))
281 dumpint(0) // was p->defer, no longer recorded
282 dumpint(uint64(uintptr(unsafe.Pointer(p.link))))
286 func dumpgs() {
287 // goroutines & stacks
288 for i := 0; uintptr(i) < allglen; i++ {
289 gp := allgs[i]
290 status := readgstatus(gp) // The world is stopped so gp will not be in a scan state.
291 switch status {
292 default:
293 print("runtime: unexpected G.status ", hex(status), "\n")
294 throw("dumpgs in STW - bad status")
295 case _Gdead:
296 // ok
297 case _Grunnable,
298 _Gsyscall,
299 _Gwaiting:
300 dumpgoroutine(gp)
305 func finq_callback(fn *funcval, obj unsafe.Pointer, ft *functype, ot *ptrtype) {
306 dumpint(tagQueuedFinalizer)
307 dumpint(uint64(uintptr(obj)))
308 dumpint(uint64(uintptr(unsafe.Pointer(fn))))
309 dumpint(uint64(uintptr(unsafe.Pointer(fn.fn))))
310 dumpint(uint64(uintptr(unsafe.Pointer(ft))))
311 dumpint(uint64(uintptr(unsafe.Pointer(ot))))
314 func dumproots() {
315 // MSpan.types
316 for _, s := range mheap_.allspans {
317 if s.state == _MSpanInUse {
318 // Finalizers
319 for sp := s.specials; sp != nil; sp = sp.next {
320 if sp.kind != _KindSpecialFinalizer {
321 continue
323 spf := (*specialfinalizer)(unsafe.Pointer(sp))
324 p := unsafe.Pointer(s.base() + uintptr(spf.special.offset))
325 dumpfinalizer(p, spf.fn, spf.ft, spf.ot)
330 // Finalizer queue
331 iterate_finq(finq_callback)
334 // Bit vector of free marks.
335 // Needs to be as big as the largest number of objects per span.
336 var freemark [_PageSize / 8]bool
338 func dumpobjs() {
339 for _, s := range mheap_.allspans {
340 if s.state != _MSpanInUse {
341 continue
343 p := s.base()
344 size := s.elemsize
345 n := (s.npages << _PageShift) / size
346 if n > uintptr(len(freemark)) {
347 throw("freemark array doesn't have enough entries")
350 for freeIndex := uintptr(0); freeIndex < s.nelems; freeIndex++ {
351 if s.isFree(freeIndex) {
352 freemark[freeIndex] = true
356 for j := uintptr(0); j < n; j, p = j+1, p+size {
357 if freemark[j] {
358 freemark[j] = false
359 continue
361 dumpobj(unsafe.Pointer(p), size, makeheapobjbv(p, size))
366 func dumpparams() {
367 dumpint(tagParams)
368 x := uintptr(1)
369 if *(*byte)(unsafe.Pointer(&x)) == 1 {
370 dumpbool(false) // little-endian ptrs
371 } else {
372 dumpbool(true) // big-endian ptrs
374 dumpint(sys.PtrSize)
375 dumpint(uint64(mheap_.arena_start))
376 dumpint(uint64(mheap_.arena_used))
377 dumpstr(sys.GOARCH)
378 dumpstr(sys.Goexperiment)
379 dumpint(uint64(ncpu))
382 func dumpms() {
383 for mp := allm; mp != nil; mp = mp.alllink {
384 dumpint(tagOSThread)
385 dumpint(uint64(uintptr(unsafe.Pointer(mp))))
386 dumpint(uint64(mp.id))
387 dumpint(mp.procid)
391 func dumpmemstats() {
392 dumpint(tagMemStats)
393 dumpint(memstats.alloc)
394 dumpint(memstats.total_alloc)
395 dumpint(memstats.sys)
396 dumpint(memstats.nlookup)
397 dumpint(memstats.nmalloc)
398 dumpint(memstats.nfree)
399 dumpint(memstats.heap_alloc)
400 dumpint(memstats.heap_sys)
401 dumpint(memstats.heap_idle)
402 dumpint(memstats.heap_inuse)
403 dumpint(memstats.heap_released)
404 dumpint(memstats.heap_objects)
405 dumpint(memstats.stacks_inuse)
406 dumpint(memstats.stacks_sys)
407 dumpint(memstats.mspan_inuse)
408 dumpint(memstats.mspan_sys)
409 dumpint(memstats.mcache_inuse)
410 dumpint(memstats.mcache_sys)
411 dumpint(memstats.buckhash_sys)
412 dumpint(memstats.gc_sys)
413 dumpint(memstats.other_sys)
414 dumpint(memstats.next_gc)
415 dumpint(memstats.last_gc_unix)
416 dumpint(memstats.pause_total_ns)
417 for i := 0; i < 256; i++ {
418 dumpint(memstats.pause_ns[i])
420 dumpint(uint64(memstats.numgc))
423 func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *location, size, allocs, frees uintptr) {
424 stk := (*[100000]location)(unsafe.Pointer(pstk))
425 dumpint(tagMemProf)
426 dumpint(uint64(uintptr(unsafe.Pointer(b))))
427 dumpint(uint64(size))
428 dumpint(uint64(nstk))
429 for i := uintptr(0); i < nstk; i++ {
430 pc := stk[i].pc
431 fn := stk[i].function
432 file := stk[i].filename
433 line := stk[i].lineno
434 if fn == "" {
435 var buf [64]byte
436 n := len(buf)
438 buf[n] = ')'
439 if pc == 0 {
441 buf[n] = '0'
442 } else {
443 for pc > 0 {
445 buf[n] = "0123456789abcdef"[pc&15]
446 pc >>= 4
450 buf[n] = 'x'
452 buf[n] = '0'
454 buf[n] = '('
455 dumpslice(buf[n:])
456 dumpstr("?")
457 dumpint(0)
458 } else {
459 dumpstr(fn)
460 dumpstr(file)
461 dumpint(uint64(line))
464 dumpint(uint64(allocs))
465 dumpint(uint64(frees))
468 func dumpmemprof() {
469 iterate_memprof(dumpmemprof_callback)
470 for _, s := range mheap_.allspans {
471 if s.state != _MSpanInUse {
472 continue
474 for sp := s.specials; sp != nil; sp = sp.next {
475 if sp.kind != _KindSpecialProfile {
476 continue
478 spp := (*specialprofile)(unsafe.Pointer(sp))
479 p := s.base() + uintptr(spp.special.offset)
480 dumpint(tagAllocSample)
481 dumpint(uint64(p))
482 dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
487 var dumphdr = []byte("go1.7 heap dump\n")
489 func mdump() {
490 // make sure we're done sweeping
491 for _, s := range mheap_.allspans {
492 if s.state == _MSpanInUse {
493 s.ensureSwept()
496 memclrNoHeapPointers(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
497 dwrite(unsafe.Pointer(&dumphdr[0]), uintptr(len(dumphdr)))
498 dumpparams()
499 dumpobjs()
500 dumpgs()
501 dumpms()
502 dumproots()
503 dumpmemstats()
504 dumpmemprof()
505 dumpint(tagEOF)
506 flush()
509 func writeheapdump_m(fd uintptr) {
510 _g_ := getg()
511 casgstatus(_g_.m.curg, _Grunning, _Gwaiting)
512 _g_.waitreason = "dumping heap"
514 // Update stats so we can dump them.
515 // As a side effect, flushes all the MCaches so the MSpan.freelist
516 // lists contain all the free objects.
517 updatememstats()
519 // Set dump file.
520 dumpfd = fd
522 // Call dump routine.
523 mdump()
525 // Reset dump file.
526 dumpfd = 0
527 if tmpbuf != nil {
528 sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
529 tmpbuf = nil
532 casgstatus(_g_.m.curg, _Gwaiting, _Grunning)
535 // dumpint() the kind & offset of each field in an object.
536 func dumpfields(bv bitvector) {
537 dumpbv(&bv, 0)
538 dumpint(fieldKindEol)
541 func makeheapobjbv(p uintptr, size uintptr) bitvector {
542 // Extend the temp buffer if necessary.
543 nptr := size / sys.PtrSize
544 if uintptr(len(tmpbuf)) < nptr/8+1 {
545 if tmpbuf != nil {
546 sysFree(unsafe.Pointer(&tmpbuf[0]), uintptr(len(tmpbuf)), &memstats.other_sys)
548 n := nptr/8 + 1
549 p := sysAlloc(n, &memstats.other_sys)
550 if p == nil {
551 throw("heapdump: out of memory")
553 tmpbuf = (*[1 << 30]byte)(p)[:n]
555 // Convert heap bitmap to pointer bitmap.
556 for i := uintptr(0); i < nptr/8+1; i++ {
557 tmpbuf[i] = 0
559 i := uintptr(0)
560 hbits := heapBitsForAddr(p)
561 for ; i < nptr; i++ {
562 if i != 1 && !hbits.morePointers() {
563 break // end of object
565 if hbits.isPointer() {
566 tmpbuf[i/8] |= 1 << (i % 8)
568 hbits = hbits.next()
570 return bitvector{int32(i), &tmpbuf[0]}
573 type gobitvector struct {
574 n uintptr
575 bytedata []uint8
578 func gobv(bv bitvector) gobitvector {
579 return gobitvector{
580 uintptr(bv.n),
581 (*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8],