1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Go execution tracer.
6 // The tracer captures a wide range of execution events like goroutine
7 // creation/blocking/unblocking, syscall enter/exit/block, GC-related events,
8 // changes of heap size, processor start/stop, etc and writes them to a buffer
9 // in a compact form. A precise nanosecond-precision timestamp and a stack
10 // trace is captured for most events.
11 // See https://golang.org/s/go15trace for more info.
16 "runtime/internal/sys"
20 // Event types in the trace, args are given in square brackets.
22 traceEvNone
= 0 // unused
23 traceEvBatch
= 1 // start of per-P batch of events [pid, timestamp]
24 traceEvFrequency
= 2 // contains tracer timer frequency [frequency (ticks per second)]
25 traceEvStack
= 3 // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}]
26 traceEvGomaxprocs
= 4 // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
27 traceEvProcStart
= 5 // start of P [timestamp, thread id]
28 traceEvProcStop
= 6 // stop of P [timestamp]
29 traceEvGCStart
= 7 // GC start [timestamp, seq, stack id]
30 traceEvGCDone
= 8 // GC done [timestamp]
31 traceEvGCSTWStart
= 9 // GC STW start [timestamp, kind]
32 traceEvGCSTWDone
= 10 // GC STW done [timestamp]
33 traceEvGCSweepStart
= 11 // GC sweep start [timestamp, stack id]
34 traceEvGCSweepDone
= 12 // GC sweep done [timestamp, swept, reclaimed]
35 traceEvGoCreate
= 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id]
36 traceEvGoStart
= 14 // goroutine starts running [timestamp, goroutine id, seq]
37 traceEvGoEnd
= 15 // goroutine ends [timestamp]
38 traceEvGoStop
= 16 // goroutine stops (like in select{}) [timestamp, stack]
39 traceEvGoSched
= 17 // goroutine calls Gosched [timestamp, stack]
40 traceEvGoPreempt
= 18 // goroutine is preempted [timestamp, stack]
41 traceEvGoSleep
= 19 // goroutine calls Sleep [timestamp, stack]
42 traceEvGoBlock
= 20 // goroutine blocks [timestamp, stack]
43 traceEvGoUnblock
= 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack]
44 traceEvGoBlockSend
= 22 // goroutine blocks on chan send [timestamp, stack]
45 traceEvGoBlockRecv
= 23 // goroutine blocks on chan recv [timestamp, stack]
46 traceEvGoBlockSelect
= 24 // goroutine blocks on select [timestamp, stack]
47 traceEvGoBlockSync
= 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
48 traceEvGoBlockCond
= 26 // goroutine blocks on Cond [timestamp, stack]
49 traceEvGoBlockNet
= 27 // goroutine blocks on network [timestamp, stack]
50 traceEvGoSysCall
= 28 // syscall enter [timestamp, stack]
51 traceEvGoSysExit
= 29 // syscall exit [timestamp, goroutine id, seq, real timestamp]
52 traceEvGoSysBlock
= 30 // syscall blocks [timestamp]
53 traceEvGoWaiting
= 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id]
54 traceEvGoInSyscall
= 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id]
55 traceEvHeapAlloc
= 33 // memstats.heap_live change [timestamp, heap_alloc]
56 traceEvNextGC
= 34 // memstats.next_gc change [timestamp, next_gc]
57 traceEvTimerGoroutine
= 35 // denotes timer goroutine [timer goroutine id]
58 traceEvFutileWakeup
= 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
59 traceEvString
= 37 // string dictionary entry [ID, length, string]
60 traceEvGoStartLocal
= 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id]
61 traceEvGoUnblockLocal
= 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack]
62 traceEvGoSysExitLocal
= 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp]
63 traceEvGoStartLabel
= 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id]
64 traceEvGoBlockGC
= 42 // goroutine blocks on GC assist [timestamp, stack]
65 traceEvGCMarkAssistStart
= 43 // GC mark assist start [timestamp, stack]
66 traceEvGCMarkAssistDone
= 44 // GC mark assist done [timestamp]
71 // Timestamps in trace are cputicks/traceTickDiv.
72 // This makes absolute values of timestamp diffs smaller,
73 // and so they are encoded in less number of bytes.
74 // 64 on x86 is somewhat arbitrary (one tick is ~20ns on a 3GHz machine).
75 // The suggested increment frequency for PowerPC's time base register is
76 // 512 MHz according to Power ISA v2.07 section 6.2, so we use 16 on ppc64
78 // Tracing won't work reliably for architectures where cputicks is emulated
79 // by nanotime, so the value doesn't matter for those architectures.
80 traceTickDiv
= 16 + 48*(sys
.Goarch386|sys
.GoarchAmd64|sys
.GoarchAmd64p32
)
81 // Maximum number of PCs in a single stack trace.
82 // Since events contain only stack id rather than whole stack trace,
83 // we can allow quite large values here.
85 // Identifier of a fake P that is used when we trace without a real P.
87 // Maximum number of bytes to encode uint64 in base-128.
88 traceBytesPerNumber
= 10
89 // Shift of the number of arguments in the first event byte.
90 traceArgCountShift
= 6
91 // Flag passed to traceGoPark to denote that the previous wakeup of this
92 // goroutine was futile. For example, a goroutine was unblocked on a mutex,
93 // but another goroutine got ahead and acquired the mutex before the first
94 // goroutine is scheduled, so the first goroutine has to block again.
95 // Such wakeups happen on buffered channels and sync.Mutex,
96 // but are generally not interesting for end user.
97 traceFutileWakeup
byte = 128
100 // trace is global tracing context.
102 lock mutex
// protects the following members
103 lockOwner
*g
// to avoid deadlocks during recursive lock locks
104 enabled
bool // when set runtime traces events
105 shutdown
bool // set when we are waiting for trace reader to finish after setting enabled to false
106 headerWritten
bool // whether ReadTrace has emitted trace header
107 footerWritten
bool // whether ReadTrace has emitted trace footer
108 shutdownSema
uint32 // used to wait for ReadTrace completion
109 seqStart
uint64 // sequence number when tracing was started
110 ticksStart
int64 // cputicks when tracing was started
111 ticksEnd
int64 // cputicks when tracing was stopped
112 timeStart
int64 // nanotime when tracing was started
113 timeEnd
int64 // nanotime when tracing was stopped
114 seqGC
uint64 // GC start/done sequencer
115 reading traceBufPtr
// buffer currently handed off to user
116 empty traceBufPtr
// stack of empty buffers
117 fullHead traceBufPtr
// queue of full buffers
119 reader guintptr
// goroutine that called ReadTrace, or nil
120 stackTab traceStackTable
// maps stack traces to unique ids
122 // Dictionary for traceEvString.
124 // Currently this is used only at trace setup and for
125 // func/file:line info after tracing session, so we assume
126 // single-threaded access.
127 strings
map[string]uint64
130 // markWorkerLabels maps gcMarkWorkerMode to string ID.
131 markWorkerLabels
[len(gcMarkWorkerModeStrings
)]uint64
133 bufLock mutex
// protects buf
134 buf traceBufPtr
// global trace buffer, used when running without a p
137 // traceBufHeader is per-P tracing buffer.
139 type traceBufHeader
struct {
140 link traceBufPtr
// in trace.empty/full
141 lastTicks
uint64 // when we wrote the last event
142 pos
int // next write offset in arr
143 stk
[traceStackSize
]location
// scratch buffer for traceback
146 // traceBuf is per-P tracing buffer.
149 type traceBuf
struct {
151 arr
[64<<10 - unsafe
.Sizeof(traceBufHeader
{})]byte // underlying buffer for traceBufHeader.buf
154 // traceBufPtr is a *traceBuf that is not traced by the garbage
155 // collector and doesn't have write barriers. traceBufs are not
156 // allocated from the GC'd heap, so this is safe, and are often
157 // manipulated in contexts where write barriers are not allowed, so
158 // this is necessary.
160 // TODO: Since traceBuf is now go:notinheap, this isn't necessary.
161 type traceBufPtr
uintptr
163 func (tp traceBufPtr
) ptr() *traceBuf
{ return (*traceBuf
)(unsafe
.Pointer(tp
)) }
164 func (tp
*traceBufPtr
) set(b
*traceBuf
) { *tp
= traceBufPtr(unsafe
.Pointer(b
)) }
165 func traceBufPtrOf(b
*traceBuf
) traceBufPtr
{
166 return traceBufPtr(unsafe
.Pointer(b
))
169 // StartTrace enables tracing for the current process.
170 // While tracing, the data will be buffered and available via ReadTrace.
171 // StartTrace returns an error if tracing is already enabled.
172 // Most clients should use the runtime/trace package or the testing package's
173 // -test.trace flag instead of calling StartTrace directly.
174 func StartTrace() error
{
175 // Stop the world, so that we can take a consistent snapshot
176 // of all goroutines at the beginning of the trace.
177 stopTheWorld("start tracing")
179 // We are in stop-the-world, but syscalls can finish and write to trace concurrently.
180 // Exitsyscall could check trace.enabled long before and then suddenly wake up
181 // and decide to write to trace at a random point in time.
182 // However, such syscall will use the global trace.buf buffer, because we've
183 // acquired all p's by doing stop-the-world. So this protects us from such races.
186 if trace
.enabled || trace
.shutdown
{
187 unlock(&trace
.bufLock
)
189 return errorString("tracing is already enabled")
192 // Can't set trace.enabled yet. While the world is stopped, exitsyscall could
193 // already emit a delayed event (see exitTicks in exitsyscall) if we set trace.enabled here.
194 // That would lead to an inconsistent trace:
195 // - either GoSysExit appears before EvGoInSyscall,
196 // - or GoSysExit appears for a goroutine for which we don't emit EvGoInSyscall below.
197 // To instruct traceEvent that it must not ignore events below, we set startingtrace.
198 // trace.enabled is set afterwards once we have emitted all preliminary events.
200 _g_
.m
.startingtrace
= true
202 // Obtain current stack ID to use in all traceEvGoCreate events below.
204 stkBuf
:= make([]location
, traceStackSize
)
205 stackID
:= traceStackID(mp
, stkBuf
, 2)
208 for _
, gp
:= range allgs
{
209 status
:= readgstatus(gp
)
210 if status
!= _Gdead
{
212 gp
.tracelastp
= getg().m
.p
213 // +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
214 id
:= trace
.stackTab
.put([]location
{location
{pc
: gp
.startpc
+ sys
.PCQuantum
}})
215 traceEvent(traceEvGoCreate
, -1, uint64(gp
.goid
), uint64(id
), stackID
)
217 if status
== _Gwaiting
{
218 // traceEvGoWaiting is implied to have seq=1.
220 traceEvent(traceEvGoWaiting
, -1, uint64(gp
.goid
))
222 if status
== _Gsyscall
{
224 traceEvent(traceEvGoInSyscall
, -1, uint64(gp
.goid
))
226 gp
.sysblocktraced
= false
231 // Note: ticksStart needs to be set after we emit traceEvGoInSyscall events.
232 // If we do it the other way around, it is possible that exitsyscall will
233 // query sysexitticks after ticksStart but before traceEvGoInSyscall timestamp.
234 // It will lead to a false conclusion that cputicks is broken.
235 trace
.ticksStart
= cputicks()
236 trace
.timeStart
= nanotime()
237 trace
.headerWritten
= false
238 trace
.footerWritten
= false
240 // string to id mapping
241 // 0 : reserved for an empty string
242 // remaining: other strings registered by traceString
244 trace
.strings
= make(map[string]uint64)
247 _g_
.m
.startingtrace
= false
250 // Register runtime goroutine labels.
251 _
, pid
, bufp
:= traceAcquireBuffer()
252 for i
, label
:= range gcMarkWorkerModeStrings
[:] {
253 trace
.markWorkerLabels
[i
], bufp
= traceString(bufp
, pid
, label
)
255 traceReleaseBuffer(pid
)
257 unlock(&trace
.bufLock
)
263 // StopTrace stops tracing, if it was previously enabled.
264 // StopTrace only returns after all the reads for the trace have completed.
266 // Stop the world so that we can collect the trace buffers from all p's below,
267 // and also to avoid races with traceEvent.
268 stopTheWorld("stop tracing")
270 // See the comment in StartTrace.
274 unlock(&trace
.bufLock
)
281 // Loop over all allocated Ps because dead Ps may still have
283 for _
, p
:= range allp
[:cap(allp
)] {
293 if buf
.ptr().pos
!= 0 {
299 trace
.ticksEnd
= cputicks()
300 trace
.timeEnd
= nanotime()
301 // Windows time can tick only every 15ms, wait for at least one tick.
302 if trace
.timeEnd
!= trace
.timeStart
{
308 trace
.enabled
= false
309 trace
.shutdown
= true
310 unlock(&trace
.bufLock
)
314 // The world is started but we've set trace.shutdown, so new tracing can't start.
315 // Wait for the trace reader to flush pending buffers and stop.
316 semacquire(&trace
.shutdownSema
)
318 raceacquire(unsafe
.Pointer(&trace
.shutdownSema
))
321 // The lock protects us from races with StartTrace/StopTrace because they do stop-the-world.
323 for _
, p
:= range allp
[:cap(allp
)] {
325 throw("trace: non-empty trace buffer in proc")
329 throw("trace: non-empty global trace buffer")
331 if trace
.fullHead
!= 0 || trace
.fullTail
!= 0 {
332 throw("trace: non-empty full trace buffer")
334 if trace
.reading
!= 0 || trace
.reader
!= 0 {
335 throw("trace: reading after shutdown")
337 for trace
.empty
!= 0 {
339 trace
.empty
= buf
.ptr().link
340 sysFree(unsafe
.Pointer(buf
), unsafe
.Sizeof(*buf
.ptr()), &memstats
.other_sys
)
343 trace
.shutdown
= false
347 // ReadTrace returns the next chunk of binary tracing data, blocking until data
348 // is available. If tracing is turned off and all the data accumulated while it
349 // was on has been returned, ReadTrace returns nil. The caller must copy the
350 // returned data before calling ReadTrace again.
351 // ReadTrace must be called from one goroutine at a time.
352 func ReadTrace() []byte {
353 // This function may need to lock trace.lock recursively
354 // (goparkunlock -> traceGoPark -> traceEvent -> traceFlush).
355 // To allow this we use trace.lockOwner.
356 // Also this function must not allocate while holding trace.lock:
357 // allocation can call heap allocate, which will try to emit a trace
358 // event while holding heap lock.
360 trace
.lockOwner
= getg()
362 if trace
.reader
!= 0 {
363 // More than one goroutine reads trace. This is bad.
364 // But we rather do not crash the program because of tracing,
365 // because tracing can be enabled at runtime on prod servers.
366 trace
.lockOwner
= nil
368 println("runtime: ReadTrace called from multiple goroutines simultaneously")
371 // Recycle the old buffer.
372 if buf
:= trace
.reading
; buf
!= 0 {
373 buf
.ptr().link
= trace
.empty
377 // Write trace header.
378 if !trace
.headerWritten
{
379 trace
.headerWritten
= true
380 trace
.lockOwner
= nil
382 return []byte("go 1.10 trace\x00\x00\x00")
384 // Wait for new data.
385 if trace
.fullHead
== 0 && !trace
.shutdown
{
386 trace
.reader
.set(getg())
387 goparkunlock(&trace
.lock
, "trace reader (blocked)", traceEvGoBlock
, 2)
391 if trace
.fullHead
!= 0 {
392 buf
:= traceFullDequeue()
394 trace
.lockOwner
= nil
396 return buf
.ptr().arr
[:buf
.ptr().pos
]
398 // Write footer with timer frequency.
399 if !trace
.footerWritten
{
400 trace
.footerWritten
= true
401 // Use float64 because (trace.ticksEnd - trace.ticksStart) * 1e9 can overflow int64.
402 freq
:= float64(trace
.ticksEnd
-trace
.ticksStart
) * 1e9
/ float64(trace
.timeEnd
-trace
.timeStart
) / traceTickDiv
403 trace
.lockOwner
= nil
406 data
= append(data
, traceEvFrequency|
0<<traceArgCountShift
)
407 data
= traceAppend(data
, uint64(freq
))
408 for i
:= range timers
{
411 data
= append(data
, traceEvTimerGoroutine|
0<<traceArgCountShift
)
412 data
= traceAppend(data
, uint64(tb
.gp
.goid
))
415 // This will emit a bunch of full buffers, we will pick them up
416 // on the next iteration.
417 trace
.stackTab
.dump()
422 trace
.lockOwner
= nil
425 // Model synchronization on trace.shutdownSema, which race
426 // detector does not see. This is required to avoid false
427 // race reports on writer passed to trace.Start.
428 racerelease(unsafe
.Pointer(&trace
.shutdownSema
))
430 // trace.enabled is already reset, so can call traceable functions.
431 semrelease(&trace
.shutdownSema
)
434 // Also bad, but see the comment above.
435 trace
.lockOwner
= nil
437 println("runtime: spurious wakeup of trace reader")
441 // traceReader returns the trace reader that should be woken up, if any.
442 func traceReader() *g
{
443 if trace
.reader
== 0 ||
(trace
.fullHead
== 0 && !trace
.shutdown
) {
447 if trace
.reader
== 0 ||
(trace
.fullHead
== 0 && !trace
.shutdown
) {
451 gp
:= trace
.reader
.ptr()
452 trace
.reader
.set(nil)
457 // traceProcFree frees trace buffer associated with pp.
458 func traceProcFree(pp
*p
) {
469 // traceFullQueue queues buf into queue of full buffers.
470 func traceFullQueue(buf traceBufPtr
) {
472 if trace
.fullHead
== 0 {
475 trace
.fullTail
.ptr().link
= buf
480 // traceFullDequeue dequeues from queue of full buffers.
481 func traceFullDequeue() traceBufPtr
{
482 buf
:= trace
.fullHead
486 trace
.fullHead
= buf
.ptr().link
487 if trace
.fullHead
== 0 {
494 // traceEvent writes a single event to trace buffer, flushing the buffer if necessary.
496 // If skip > 0, write current stack id as the last argument (skipping skip top frames).
497 // If skip = 0, this event type should contain a stack, but we don't want
498 // to collect and remember it for this particular call.
499 func traceEvent(ev
byte, skip
int, args
...uint64) {
500 mp
, pid
, bufp
:= traceAcquireBuffer()
501 // Double-check trace.enabled now that we've done m.locks++ and acquired bufLock.
502 // This protects from races between traceEvent and StartTrace/StopTrace.
504 // The caller checked that trace.enabled == true, but trace.enabled might have been
505 // turned off between the check and now. Check again. traceLockBuffer did mp.locks++,
506 // StopTrace does stopTheWorld, and stopTheWorld waits for mp.locks to go back to zero,
507 // so if we see trace.enabled == true now, we know it's true for the rest of the function.
508 // Exitsyscall can run even during stopTheWorld. The race with StartTrace/StopTrace
509 // during tracing in exitsyscall is resolved by locking trace.bufLock in traceLockBuffer.
510 if !trace
.enabled
&& !mp
.startingtrace
{
511 traceReleaseBuffer(pid
)
515 const maxSize
= 2 + 5*traceBytesPerNumber
// event type, length, sequence, timestamp, stack id and two add params
516 if buf
== nil ||
len(buf
.arr
)-buf
.pos
< maxSize
{
517 buf
= traceFlush(traceBufPtrOf(buf
), pid
).ptr()
521 ticks
:= uint64(cputicks()) / traceTickDiv
522 tickDiff
:= ticks
- buf
.lastTicks
523 buf
.lastTicks
= ticks
524 narg
:= byte(len(args
))
528 // We have only 2 bits for number of arguments.
529 // If number is >= 3, then the event type is followed by event length in bytes.
534 buf
.byte(ev | narg
<<traceArgCountShift
)
537 // Reserve the byte for length assuming that length < 128.
539 lenp
= &buf
.arr
[buf
.pos
-1]
542 for _
, a
:= range args
{
548 buf
.varint(traceStackID(mp
, buf
.stk
[:], skip
))
550 evSize
:= buf
.pos
- startPos
551 if evSize
> maxSize
{
552 throw("invalid length of trace event")
555 // Fill in actual length.
556 *lenp
= byte(evSize
- 2)
558 traceReleaseBuffer(pid
)
561 func traceStackID(mp
*m
, buf
[]location
, skip
int) uint64 {
566 nstk
= callers(skip
+1, buf
[:])
567 } else if gp
!= nil {
568 // FIXME: get stack trace of different goroutine.
571 nstk
-- // skip runtime.goexit
573 if nstk
> 0 && gp
.goid
== 1 {
574 nstk
-- // skip runtime.main
576 id
:= trace
.stackTab
.put(buf
[:nstk
])
580 // traceAcquireBuffer returns trace buffer to use and, if necessary, locks it.
581 func traceAcquireBuffer() (mp
*m
, pid
int32, bufp
*traceBufPtr
) {
583 if p
:= mp
.p
.ptr(); p
!= nil {
584 return mp
, p
.id
, &p
.tracebuf
587 return mp
, traceGlobProc
, &trace
.buf
590 // traceReleaseBuffer releases a buffer previously acquired with traceAcquireBuffer.
591 func traceReleaseBuffer(pid
int32) {
592 if pid
== traceGlobProc
{
593 unlock(&trace
.bufLock
)
598 // traceFlush puts buf onto stack of full buffers and returns an empty buffer.
599 func traceFlush(buf traceBufPtr
, pid
int32) traceBufPtr
{
600 owner
:= trace
.lockOwner
601 dolock
:= owner
== nil || owner
!= getg().m
.curg
608 if trace
.empty
!= 0 {
610 trace
.empty
= buf
.ptr().link
612 buf
= traceBufPtr(sysAlloc(unsafe
.Sizeof(traceBuf
{}), &memstats
.other_sys
))
614 throw("trace: out of memory")
621 // initialize the buffer for a new batch
622 ticks
:= uint64(cputicks()) / traceTickDiv
623 bufp
.lastTicks
= ticks
624 bufp
.byte(traceEvBatch |
1<<traceArgCountShift
)
625 bufp
.varint(uint64(pid
))
634 // traceString adds a string to the trace.strings and returns the id.
635 func traceString(bufp
*traceBufPtr
, pid
int32, s
string) (uint64, *traceBufPtr
) {
639 if id
, ok
:= trace
.strings
[s
]; ok
{
644 id
:= trace
.stringSeq
645 trace
.strings
[s
] = id
647 // memory allocation in above may trigger tracing and
648 // cause *bufp changes. Following code now works with *bufp,
649 // so there must be no memory allocation or any activities
650 // that causes tracing after this point.
653 size
:= 1 + 2*traceBytesPerNumber
+ len(s
)
654 if buf
== nil ||
len(buf
.arr
)-buf
.pos
< size
{
655 buf
= traceFlush(traceBufPtrOf(buf
), pid
).ptr()
658 buf
.byte(traceEvString
)
660 buf
.varint(uint64(len(s
)))
661 buf
.pos
+= copy(buf
.arr
[buf
.pos
:], s
)
667 // traceAppend appends v to buf in little-endian-base-128 encoding.
668 func traceAppend(buf
[]byte, v
uint64) []byte {
669 for ; v
>= 0x80; v
>>= 7 {
670 buf
= append(buf
, 0x80|
byte(v
))
672 buf
= append(buf
, byte(v
))
676 // varint appends v to buf in little-endian-base-128 encoding.
677 func (buf
*traceBuf
) varint(v
uint64) {
679 for ; v
>= 0x80; v
>>= 7 {
680 buf
.arr
[pos
] = 0x80 |
byte(v
)
683 buf
.arr
[pos
] = byte(v
)
688 // byte appends v to buf.
689 func (buf
*traceBuf
) byte(v
byte) {
694 // traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids.
695 // It is lock-free for reading.
696 type traceStackTable
struct {
700 tab
[1 << 13]traceStackPtr
703 // traceStack is a single stack in traceStackTable.
704 type traceStack
struct {
709 stk
[0]location
// real type [n]location
712 type traceStackPtr
uintptr
714 func (tp traceStackPtr
) ptr() *traceStack
{ return (*traceStack
)(unsafe
.Pointer(tp
)) }
716 // stack returns slice of PCs.
717 func (ts
*traceStack
) stack() []location
{
718 return (*[traceStackSize
]location
)(unsafe
.Pointer(&ts
.stk
))[:ts
.n
]
721 // put returns a unique id for the stack trace pcs and caches it in the table,
722 // if it sees the trace for the first time.
723 func (tab
*traceStackTable
) put(pcs
[]location
) uint32 {
728 for _
, loc
:= range pcs
{
733 // First, search the hashtable w/o the mutex.
734 if id
:= tab
.find(pcs
, hash
); id
!= 0 {
737 // Now, double check under the mutex.
739 if id
:= tab
.find(pcs
, hash
); id
!= 0 {
743 // Create new record.
745 stk
:= tab
.newStack(len(pcs
))
750 for i
, pc
:= range pcs
{
751 // Use memmove to avoid write barrier.
752 memmove(unsafe
.Pointer(&stkpc
[i
]), unsafe
.Pointer(&pc
), unsafe
.Sizeof(pc
))
754 part
:= int(hash
% uintptr(len(tab
.tab
)))
755 stk
.link
= tab
.tab
[part
]
756 atomicstorep(unsafe
.Pointer(&tab
.tab
[part
]), unsafe
.Pointer(stk
))
761 // find checks if the stack trace pcs is already present in the table.
762 func (tab
*traceStackTable
) find(pcs
[]location
, hash
uintptr) uint32 {
763 part
:= int(hash
% uintptr(len(tab
.tab
)))
765 for stk
:= tab
.tab
[part
].ptr(); stk
!= nil; stk
= stk
.link
.ptr() {
766 if stk
.hash
== hash
&& stk
.n
== len(pcs
) {
767 for i
, stkpc
:= range stk
.stack() {
778 // newStack allocates a new stack of size n.
779 func (tab
*traceStackTable
) newStack(n
int) *traceStack
{
780 return (*traceStack
)(tab
.mem
.alloc(unsafe
.Sizeof(traceStack
{}) + uintptr(n
)*unsafe
.Sizeof(location
{})))
783 // dump writes all previously cached stacks to trace buffers,
784 // releases all memory and resets state.
785 func (tab
*traceStackTable
) dump() {
786 var tmp
[(2 + 4*traceStackSize
) * traceBytesPerNumber
]byte
787 bufp
:= traceFlush(0, 0)
788 for _
, stk
:= range tab
.tab
{
790 for ; stk
!= nil; stk
= stk
.link
.ptr() {
792 tmpbuf
= traceAppend(tmpbuf
, uint64(stk
.id
))
793 frames
:= stk
.stack()
794 tmpbuf
= traceAppend(tmpbuf
, uint64(len(frames
)))
795 for _
, f
:= range frames
{
797 frame
, bufp
= traceFrameForPC(bufp
, 0, f
)
798 tmpbuf
= traceAppend(tmpbuf
, uint64(f
.pc
))
799 tmpbuf
= traceAppend(tmpbuf
, uint64(frame
.funcID
))
800 tmpbuf
= traceAppend(tmpbuf
, uint64(frame
.fileID
))
801 tmpbuf
= traceAppend(tmpbuf
, uint64(frame
.line
))
803 // Now copy to the buffer.
804 size
:= 1 + traceBytesPerNumber
+ len(tmpbuf
)
805 if buf
:= bufp
.ptr(); len(buf
.arr
)-buf
.pos
< size
{
806 bufp
= traceFlush(bufp
, 0)
809 buf
.byte(traceEvStack |
3<<traceArgCountShift
)
810 buf
.varint(uint64(len(tmpbuf
)))
811 buf
.pos
+= copy(buf
.arr
[buf
.pos
:], tmpbuf
)
820 *tab
= traceStackTable
{}
823 type traceFrame
struct {
829 // traceFrameForPC records the frame information.
830 // It may allocate memory.
831 func traceFrameForPC(buf traceBufPtr
, pid
int32, f location
) (traceFrame
, traceBufPtr
) {
836 const maxLen
= 1 << 10
837 if len(fn
) > maxLen
{
838 fn
= fn
[len(fn
)-maxLen
:]
840 frame
.funcID
, bufp
= traceString(bufp
, pid
, fn
)
841 frame
.line
= uint64(f
.lineno
)
843 if len(file
) > maxLen
{
844 file
= file
[len(file
)-maxLen
:]
846 frame
.fileID
, bufp
= traceString(bufp
, pid
, file
)
847 return frame
, (*bufp
)
850 // traceAlloc is a non-thread-safe region allocator.
851 // It holds a linked list of traceAllocBlock.
852 type traceAlloc
struct {
853 head traceAllocBlockPtr
857 // traceAllocBlock is a block in traceAlloc.
859 // traceAllocBlock is allocated from non-GC'd memory, so it must not
860 // contain heap pointers. Writes to pointers to traceAllocBlocks do
861 // not need write barriers.
864 type traceAllocBlock
struct {
865 next traceAllocBlockPtr
866 data
[64<<10 - sys
.PtrSize
]byte
869 // TODO: Since traceAllocBlock is now go:notinheap, this isn't necessary.
870 type traceAllocBlockPtr
uintptr
872 func (p traceAllocBlockPtr
) ptr() *traceAllocBlock
{ return (*traceAllocBlock
)(unsafe
.Pointer(p
)) }
873 func (p
*traceAllocBlockPtr
) set(x
*traceAllocBlock
) { *p
= traceAllocBlockPtr(unsafe
.Pointer(x
)) }
875 // alloc allocates n-byte block.
876 func (a
*traceAlloc
) alloc(n
uintptr) unsafe
.Pointer
{
877 n
= round(n
, sys
.PtrSize
)
878 if a
.head
== 0 || a
.off
+n
> uintptr(len(a
.head
.ptr().data
)) {
879 if n
> uintptr(len(a
.head
.ptr().data
)) {
880 throw("trace: alloc too large")
882 // This is only safe because the strings returned by callers
883 // are stored in a location that is not in the Go heap.
884 block
:= (*traceAllocBlock
)(sysAlloc(unsafe
.Sizeof(traceAllocBlock
{}), &memstats
.other_sys
))
886 throw("trace: out of memory")
888 block
.next
.set(a
.head
.ptr())
892 p
:= &a
.head
.ptr().data
[a
.off
]
894 return unsafe
.Pointer(p
)
897 // drop frees all previously allocated memory and resets the allocator.
898 func (a
*traceAlloc
) drop() {
900 block
:= a
.head
.ptr()
901 a
.head
.set(block
.next
.ptr())
902 sysFree(unsafe
.Pointer(block
), unsafe
.Sizeof(traceAllocBlock
{}), &memstats
.other_sys
)
906 // The following functions write specific events to trace.
908 func traceGomaxprocs(procs
int32) {
909 traceEvent(traceEvGomaxprocs
, 1, uint64(procs
))
912 func traceProcStart() {
913 traceEvent(traceEvProcStart
, -1, uint64(getg().m
.id
))
916 func traceProcStop(pp
*p
) {
917 // Sysmon and stopTheWorld can stop Ps blocked in syscalls,
918 // to handle this we temporary employ the P.
922 traceEvent(traceEvProcStop
, -1)
927 func traceGCStart() {
928 traceEvent(traceEvGCStart
, 3, trace
.seqGC
)
933 traceEvent(traceEvGCDone
, -1)
936 func traceGCSTWStart(kind
int) {
937 traceEvent(traceEvGCSTWStart
, -1, uint64(kind
))
940 func traceGCSTWDone() {
941 traceEvent(traceEvGCSTWDone
, -1)
944 // traceGCSweepStart prepares to trace a sweep loop. This does not
945 // emit any events until traceGCSweepSpan is called.
947 // traceGCSweepStart must be paired with traceGCSweepDone and there
948 // must be no preemption points between these two calls.
949 func traceGCSweepStart() {
950 // Delay the actual GCSweepStart event until the first span
951 // sweep. If we don't sweep anything, don't emit any events.
952 _p_
:= getg().m
.p
.ptr()
954 throw("double traceGCSweepStart")
956 _p_
.traceSweep
, _p_
.traceSwept
, _p_
.traceReclaimed
= true, 0, 0
959 // traceGCSweepSpan traces the sweep of a single page.
961 // This may be called outside a traceGCSweepStart/traceGCSweepDone
962 // pair; however, it will not emit any trace events in this case.
963 func traceGCSweepSpan(bytesSwept
uintptr) {
964 _p_
:= getg().m
.p
.ptr()
966 if _p_
.traceSwept
== 0 {
967 traceEvent(traceEvGCSweepStart
, 1)
969 _p_
.traceSwept
+= bytesSwept
973 func traceGCSweepDone() {
974 _p_
:= getg().m
.p
.ptr()
976 throw("missing traceGCSweepStart")
978 if _p_
.traceSwept
!= 0 {
979 traceEvent(traceEvGCSweepDone
, -1, uint64(_p_
.traceSwept
), uint64(_p_
.traceReclaimed
))
981 _p_
.traceSweep
= false
984 func traceGCMarkAssistStart() {
985 traceEvent(traceEvGCMarkAssistStart
, 1)
988 func traceGCMarkAssistDone() {
989 traceEvent(traceEvGCMarkAssistDone
, -1)
992 func traceGoCreate(newg
*g
, pc
uintptr) {
994 newg
.tracelastp
= getg().m
.p
995 // +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
996 id
:= trace
.stackTab
.put([]location
{location
{pc
: pc
+ sys
.PCQuantum
}})
997 traceEvent(traceEvGoCreate
, 2, uint64(newg
.goid
), uint64(id
))
1000 func traceGoStart() {
1001 _g_
:= getg().m
.curg
1004 if _g_
== _p_
.ptr().gcBgMarkWorker
.ptr() {
1005 traceEvent(traceEvGoStartLabel
, -1, uint64(_g_
.goid
), _g_
.traceseq
, trace
.markWorkerLabels
[_p_
.ptr().gcMarkWorkerMode
])
1006 } else if _g_
.tracelastp
== _p_
{
1007 traceEvent(traceEvGoStartLocal
, -1, uint64(_g_
.goid
))
1009 _g_
.tracelastp
= _p_
1010 traceEvent(traceEvGoStart
, -1, uint64(_g_
.goid
), _g_
.traceseq
)
1015 traceEvent(traceEvGoEnd
, -1)
1018 func traceGoSched() {
1020 _g_
.tracelastp
= _g_
.m
.p
1021 traceEvent(traceEvGoSched
, 1)
1024 func traceGoPreempt() {
1026 _g_
.tracelastp
= _g_
.m
.p
1027 traceEvent(traceEvGoPreempt
, 1)
1030 func traceGoPark(traceEv
byte, skip
int) {
1031 if traceEv
&traceFutileWakeup
!= 0 {
1032 traceEvent(traceEvFutileWakeup
, -1)
1034 traceEvent(traceEv
& ^traceFutileWakeup
, skip
)
1037 func traceGoUnpark(gp
*g
, skip
int) {
1040 if gp
.tracelastp
== _p_
{
1041 traceEvent(traceEvGoUnblockLocal
, skip
, uint64(gp
.goid
))
1044 traceEvent(traceEvGoUnblock
, skip
, uint64(gp
.goid
), gp
.traceseq
)
1048 func traceGoSysCall() {
1049 traceEvent(traceEvGoSysCall
, 1)
1052 func traceGoSysExit(ts
int64) {
1053 if ts
!= 0 && ts
< trace
.ticksStart
{
1054 // There is a race between the code that initializes sysexitticks
1055 // (in exitsyscall, which runs without a P, and therefore is not
1056 // stopped with the rest of the world) and the code that initializes
1057 // a new trace. The recorded sysexitticks must therefore be treated
1058 // as "best effort". If they are valid for this trace, then great,
1059 // use them for greater accuracy. But if they're not valid for this
1060 // trace, assume that the trace was started after the actual syscall
1061 // exit (but before we actually managed to start the goroutine,
1062 // aka right now), and assign a fresh time stamp to keep the log consistent.
1065 _g_
:= getg().m
.curg
1067 _g_
.tracelastp
= _g_
.m
.p
1068 traceEvent(traceEvGoSysExit
, -1, uint64(_g_
.goid
), _g_
.traceseq
, uint64(ts
)/traceTickDiv
)
1071 func traceGoSysBlock(pp
*p
) {
1072 // Sysmon and stopTheWorld can declare syscalls running on remote Ps as blocked,
1073 // to handle this we temporary employ the P.
1077 traceEvent(traceEvGoSysBlock
, -1)
1082 func traceHeapAlloc() {
1083 traceEvent(traceEvHeapAlloc
, -1, memstats
.heap_live
)
1086 func traceNextGC() {
1087 if memstats
.next_gc
== ^uint64(0) {
1088 // Heap-based triggering is disabled.
1089 traceEvent(traceEvNextGC
, -1, 0)
1091 traceEvent(traceEvNextGC
, -1, memstats
.next_gc
)