1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Go execution tracer.
6 // The tracer captures a wide range of execution events like goroutine
7 // creation/blocking/unblocking, syscall enter/exit/block, GC-related events,
8 // changes of heap size, processor start/stop, etc and writes them to a buffer
9 // in a compact form. A precise nanosecond-precision timestamp and a stack
10 // trace is captured for most events.
11 // See https://golang.org/s/go15trace for more info.
16 "runtime/internal/sys"
20 // Event types in the trace, args are given in square brackets.
22 traceEvNone
= 0 // unused
23 traceEvBatch
= 1 // start of per-P batch of events [pid, timestamp]
24 traceEvFrequency
= 2 // contains tracer timer frequency [frequency (ticks per second)]
25 traceEvStack
= 3 // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}]
26 traceEvGomaxprocs
= 4 // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
27 traceEvProcStart
= 5 // start of P [timestamp, thread id]
28 traceEvProcStop
= 6 // stop of P [timestamp]
29 traceEvGCStart
= 7 // GC start [timestamp, seq, stack id]
30 traceEvGCDone
= 8 // GC done [timestamp]
31 traceEvGCSTWStart
= 9 // GC STW start [timestamp, kind]
32 traceEvGCSTWDone
= 10 // GC STW done [timestamp]
33 traceEvGCSweepStart
= 11 // GC sweep start [timestamp, stack id]
34 traceEvGCSweepDone
= 12 // GC sweep done [timestamp, swept, reclaimed]
35 traceEvGoCreate
= 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id]
36 traceEvGoStart
= 14 // goroutine starts running [timestamp, goroutine id, seq]
37 traceEvGoEnd
= 15 // goroutine ends [timestamp]
38 traceEvGoStop
= 16 // goroutine stops (like in select{}) [timestamp, stack]
39 traceEvGoSched
= 17 // goroutine calls Gosched [timestamp, stack]
40 traceEvGoPreempt
= 18 // goroutine is preempted [timestamp, stack]
41 traceEvGoSleep
= 19 // goroutine calls Sleep [timestamp, stack]
42 traceEvGoBlock
= 20 // goroutine blocks [timestamp, stack]
43 traceEvGoUnblock
= 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack]
44 traceEvGoBlockSend
= 22 // goroutine blocks on chan send [timestamp, stack]
45 traceEvGoBlockRecv
= 23 // goroutine blocks on chan recv [timestamp, stack]
46 traceEvGoBlockSelect
= 24 // goroutine blocks on select [timestamp, stack]
47 traceEvGoBlockSync
= 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
48 traceEvGoBlockCond
= 26 // goroutine blocks on Cond [timestamp, stack]
49 traceEvGoBlockNet
= 27 // goroutine blocks on network [timestamp, stack]
50 traceEvGoSysCall
= 28 // syscall enter [timestamp, stack]
51 traceEvGoSysExit
= 29 // syscall exit [timestamp, goroutine id, seq, real timestamp]
52 traceEvGoSysBlock
= 30 // syscall blocks [timestamp]
53 traceEvGoWaiting
= 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id]
54 traceEvGoInSyscall
= 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id]
55 traceEvHeapAlloc
= 33 // memstats.heap_live change [timestamp, heap_alloc]
56 traceEvNextGC
= 34 // memstats.next_gc change [timestamp, next_gc]
57 traceEvTimerGoroutine
= 35 // denotes timer goroutine [timer goroutine id]
58 traceEvFutileWakeup
= 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
59 traceEvString
= 37 // string dictionary entry [ID, length, string]
60 traceEvGoStartLocal
= 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id]
61 traceEvGoUnblockLocal
= 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack]
62 traceEvGoSysExitLocal
= 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp]
63 traceEvGoStartLabel
= 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id]
64 traceEvGoBlockGC
= 42 // goroutine blocks on GC assist [timestamp, stack]
65 traceEvGCMarkAssistStart
= 43 // GC mark assist start [timestamp, stack]
66 traceEvGCMarkAssistDone
= 44 // GC mark assist done [timestamp]
67 traceEvUserTaskCreate
= 45 // trace.NewContext [timestamp, internal task id, internal parent task id, stack, name string]
68 traceEvUserTaskEnd
= 46 // end of a task [timestamp, internal task id, stack]
69 traceEvUserRegion
= 47 // trace.WithRegion [timestamp, internal task id, mode(0:start, 1:end), stack, name string]
70 traceEvUserLog
= 48 // trace.Log [timestamp, internal task id, key string id, stack, value string]
72 // Byte is used but only 6 bits are available for event type.
73 // The remaining 2 bits are used to specify the number of arguments.
74 // That means, the max event type value is 63.
78 // Timestamps in trace are cputicks/traceTickDiv.
79 // This makes absolute values of timestamp diffs smaller,
80 // and so they are encoded in less number of bytes.
81 // 64 on x86 is somewhat arbitrary (one tick is ~20ns on a 3GHz machine).
82 // The suggested increment frequency for PowerPC's time base register is
83 // 512 MHz according to Power ISA v2.07 section 6.2, so we use 16 on ppc64
85 // Tracing won't work reliably for architectures where cputicks is emulated
86 // by nanotime, so the value doesn't matter for those architectures.
87 traceTickDiv
= 16 + 48*(sys
.Goarch386|sys
.GoarchAmd64|sys
.GoarchAmd64p32
)
88 // Maximum number of PCs in a single stack trace.
89 // Since events contain only stack id rather than whole stack trace,
90 // we can allow quite large values here.
92 // Identifier of a fake P that is used when we trace without a real P.
94 // Maximum number of bytes to encode uint64 in base-128.
95 traceBytesPerNumber
= 10
96 // Shift of the number of arguments in the first event byte.
97 traceArgCountShift
= 6
98 // Flag passed to traceGoPark to denote that the previous wakeup of this
99 // goroutine was futile. For example, a goroutine was unblocked on a mutex,
100 // but another goroutine got ahead and acquired the mutex before the first
101 // goroutine is scheduled, so the first goroutine has to block again.
102 // Such wakeups happen on buffered channels and sync.Mutex,
103 // but are generally not interesting for end user.
104 traceFutileWakeup
byte = 128
107 // trace is global tracing context.
109 lock mutex
// protects the following members
110 lockOwner
*g
// to avoid deadlocks during recursive lock locks
111 enabled
bool // when set runtime traces events
112 shutdown
bool // set when we are waiting for trace reader to finish after setting enabled to false
113 headerWritten
bool // whether ReadTrace has emitted trace header
114 footerWritten
bool // whether ReadTrace has emitted trace footer
115 shutdownSema
uint32 // used to wait for ReadTrace completion
116 seqStart
uint64 // sequence number when tracing was started
117 ticksStart
int64 // cputicks when tracing was started
118 ticksEnd
int64 // cputicks when tracing was stopped
119 timeStart
int64 // nanotime when tracing was started
120 timeEnd
int64 // nanotime when tracing was stopped
121 seqGC
uint64 // GC start/done sequencer
122 reading traceBufPtr
// buffer currently handed off to user
123 empty traceBufPtr
// stack of empty buffers
124 fullHead traceBufPtr
// queue of full buffers
126 reader guintptr
// goroutine that called ReadTrace, or nil
127 stackTab traceStackTable
// maps stack traces to unique ids
129 // Dictionary for traceEvString.
131 // TODO: central lock to access the map is not ideal.
132 // option: pre-assign ids to all user annotation region names and tags
133 // option: per-P cache
134 // option: sync.Map like data structure
136 strings
map[string]uint64
139 // markWorkerLabels maps gcMarkWorkerMode to string ID.
140 markWorkerLabels
[len(gcMarkWorkerModeStrings
)]uint64
142 bufLock mutex
// protects buf
143 buf traceBufPtr
// global trace buffer, used when running without a p
146 // traceBufHeader is per-P tracing buffer.
148 type traceBufHeader
struct {
149 link traceBufPtr
// in trace.empty/full
150 lastTicks
uint64 // when we wrote the last event
151 pos
int // next write offset in arr
152 stk
[traceStackSize
]location
// scratch buffer for traceback
155 // traceBuf is per-P tracing buffer.
158 type traceBuf
struct {
160 arr
[64<<10 - unsafe
.Sizeof(traceBufHeader
{})]byte // underlying buffer for traceBufHeader.buf
163 // traceBufPtr is a *traceBuf that is not traced by the garbage
164 // collector and doesn't have write barriers. traceBufs are not
165 // allocated from the GC'd heap, so this is safe, and are often
166 // manipulated in contexts where write barriers are not allowed, so
167 // this is necessary.
169 // TODO: Since traceBuf is now go:notinheap, this isn't necessary.
170 type traceBufPtr
uintptr
172 func (tp traceBufPtr
) ptr() *traceBuf
{ return (*traceBuf
)(unsafe
.Pointer(tp
)) }
173 func (tp
*traceBufPtr
) set(b
*traceBuf
) { *tp
= traceBufPtr(unsafe
.Pointer(b
)) }
174 func traceBufPtrOf(b
*traceBuf
) traceBufPtr
{
175 return traceBufPtr(unsafe
.Pointer(b
))
178 // StartTrace enables tracing for the current process.
179 // While tracing, the data will be buffered and available via ReadTrace.
180 // StartTrace returns an error if tracing is already enabled.
181 // Most clients should use the runtime/trace package or the testing package's
182 // -test.trace flag instead of calling StartTrace directly.
183 func StartTrace() error
{
184 // Stop the world, so that we can take a consistent snapshot
185 // of all goroutines at the beginning of the trace.
186 stopTheWorld("start tracing")
188 // We are in stop-the-world, but syscalls can finish and write to trace concurrently.
189 // Exitsyscall could check trace.enabled long before and then suddenly wake up
190 // and decide to write to trace at a random point in time.
191 // However, such syscall will use the global trace.buf buffer, because we've
192 // acquired all p's by doing stop-the-world. So this protects us from such races.
195 if trace
.enabled || trace
.shutdown
{
196 unlock(&trace
.bufLock
)
198 return errorString("tracing is already enabled")
201 // Can't set trace.enabled yet. While the world is stopped, exitsyscall could
202 // already emit a delayed event (see exitTicks in exitsyscall) if we set trace.enabled here.
203 // That would lead to an inconsistent trace:
204 // - either GoSysExit appears before EvGoInSyscall,
205 // - or GoSysExit appears for a goroutine for which we don't emit EvGoInSyscall below.
206 // To instruct traceEvent that it must not ignore events below, we set startingtrace.
207 // trace.enabled is set afterwards once we have emitted all preliminary events.
209 _g_
.m
.startingtrace
= true
211 // Obtain current stack ID to use in all traceEvGoCreate events below.
213 stkBuf
:= make([]location
, traceStackSize
)
214 stackID
:= traceStackID(mp
, stkBuf
, 2)
217 for _
, gp
:= range allgs
{
218 status
:= readgstatus(gp
)
219 if status
!= _Gdead
{
221 gp
.tracelastp
= getg().m
.p
222 // +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
223 id
:= trace
.stackTab
.put([]location
{location
{pc
: gp
.startpc
+ sys
.PCQuantum
}})
224 traceEvent(traceEvGoCreate
, -1, uint64(gp
.goid
), uint64(id
), stackID
)
226 if status
== _Gwaiting
{
227 // traceEvGoWaiting is implied to have seq=1.
229 traceEvent(traceEvGoWaiting
, -1, uint64(gp
.goid
))
231 if status
== _Gsyscall
{
233 traceEvent(traceEvGoInSyscall
, -1, uint64(gp
.goid
))
235 gp
.sysblocktraced
= false
240 // Note: ticksStart needs to be set after we emit traceEvGoInSyscall events.
241 // If we do it the other way around, it is possible that exitsyscall will
242 // query sysexitticks after ticksStart but before traceEvGoInSyscall timestamp.
243 // It will lead to a false conclusion that cputicks is broken.
244 trace
.ticksStart
= cputicks()
245 trace
.timeStart
= nanotime()
246 trace
.headerWritten
= false
247 trace
.footerWritten
= false
249 // string to id mapping
250 // 0 : reserved for an empty string
251 // remaining: other strings registered by traceString
253 trace
.strings
= make(map[string]uint64)
256 _g_
.m
.startingtrace
= false
259 // Register runtime goroutine labels.
260 _
, pid
, bufp
:= traceAcquireBuffer()
261 for i
, label
:= range gcMarkWorkerModeStrings
[:] {
262 trace
.markWorkerLabels
[i
], bufp
= traceString(bufp
, pid
, label
)
264 traceReleaseBuffer(pid
)
266 unlock(&trace
.bufLock
)
272 // StopTrace stops tracing, if it was previously enabled.
273 // StopTrace only returns after all the reads for the trace have completed.
275 // Stop the world so that we can collect the trace buffers from all p's below,
276 // and also to avoid races with traceEvent.
277 stopTheWorld("stop tracing")
279 // See the comment in StartTrace.
283 unlock(&trace
.bufLock
)
290 // Loop over all allocated Ps because dead Ps may still have
292 for _
, p
:= range allp
[:cap(allp
)] {
302 if buf
.ptr().pos
!= 0 {
308 trace
.ticksEnd
= cputicks()
309 trace
.timeEnd
= nanotime()
310 // Windows time can tick only every 15ms, wait for at least one tick.
311 if trace
.timeEnd
!= trace
.timeStart
{
317 trace
.enabled
= false
318 trace
.shutdown
= true
319 unlock(&trace
.bufLock
)
323 // The world is started but we've set trace.shutdown, so new tracing can't start.
324 // Wait for the trace reader to flush pending buffers and stop.
325 semacquire(&trace
.shutdownSema
)
327 raceacquire(unsafe
.Pointer(&trace
.shutdownSema
))
330 // The lock protects us from races with StartTrace/StopTrace because they do stop-the-world.
332 for _
, p
:= range allp
[:cap(allp
)] {
334 throw("trace: non-empty trace buffer in proc")
338 throw("trace: non-empty global trace buffer")
340 if trace
.fullHead
!= 0 || trace
.fullTail
!= 0 {
341 throw("trace: non-empty full trace buffer")
343 if trace
.reading
!= 0 || trace
.reader
!= 0 {
344 throw("trace: reading after shutdown")
346 for trace
.empty
!= 0 {
348 trace
.empty
= buf
.ptr().link
349 sysFree(unsafe
.Pointer(buf
), unsafe
.Sizeof(*buf
.ptr()), &memstats
.other_sys
)
352 trace
.shutdown
= false
356 // ReadTrace returns the next chunk of binary tracing data, blocking until data
357 // is available. If tracing is turned off and all the data accumulated while it
358 // was on has been returned, ReadTrace returns nil. The caller must copy the
359 // returned data before calling ReadTrace again.
360 // ReadTrace must be called from one goroutine at a time.
361 func ReadTrace() []byte {
362 // This function may need to lock trace.lock recursively
363 // (goparkunlock -> traceGoPark -> traceEvent -> traceFlush).
364 // To allow this we use trace.lockOwner.
365 // Also this function must not allocate while holding trace.lock:
366 // allocation can call heap allocate, which will try to emit a trace
367 // event while holding heap lock.
369 trace
.lockOwner
= getg()
371 if trace
.reader
!= 0 {
372 // More than one goroutine reads trace. This is bad.
373 // But we rather do not crash the program because of tracing,
374 // because tracing can be enabled at runtime on prod servers.
375 trace
.lockOwner
= nil
377 println("runtime: ReadTrace called from multiple goroutines simultaneously")
380 // Recycle the old buffer.
381 if buf
:= trace
.reading
; buf
!= 0 {
382 buf
.ptr().link
= trace
.empty
386 // Write trace header.
387 if !trace
.headerWritten
{
388 trace
.headerWritten
= true
389 trace
.lockOwner
= nil
391 return []byte("go 1.11 trace\x00\x00\x00")
393 // Wait for new data.
394 if trace
.fullHead
== 0 && !trace
.shutdown
{
395 trace
.reader
.set(getg())
396 goparkunlock(&trace
.lock
, waitReasonTraceReaderBlocked
, traceEvGoBlock
, 2)
400 if trace
.fullHead
!= 0 {
401 buf
:= traceFullDequeue()
403 trace
.lockOwner
= nil
405 return buf
.ptr().arr
[:buf
.ptr().pos
]
407 // Write footer with timer frequency.
408 if !trace
.footerWritten
{
409 trace
.footerWritten
= true
410 // Use float64 because (trace.ticksEnd - trace.ticksStart) * 1e9 can overflow int64.
411 freq
:= float64(trace
.ticksEnd
-trace
.ticksStart
) * 1e9
/ float64(trace
.timeEnd
-trace
.timeStart
) / traceTickDiv
412 trace
.lockOwner
= nil
415 data
= append(data
, traceEvFrequency|
0<<traceArgCountShift
)
416 data
= traceAppend(data
, uint64(freq
))
417 for i
:= range timers
{
420 data
= append(data
, traceEvTimerGoroutine|
0<<traceArgCountShift
)
421 data
= traceAppend(data
, uint64(tb
.gp
.goid
))
424 // This will emit a bunch of full buffers, we will pick them up
425 // on the next iteration.
426 trace
.stackTab
.dump()
431 trace
.lockOwner
= nil
434 // Model synchronization on trace.shutdownSema, which race
435 // detector does not see. This is required to avoid false
436 // race reports on writer passed to trace.Start.
437 racerelease(unsafe
.Pointer(&trace
.shutdownSema
))
439 // trace.enabled is already reset, so can call traceable functions.
440 semrelease(&trace
.shutdownSema
)
443 // Also bad, but see the comment above.
444 trace
.lockOwner
= nil
446 println("runtime: spurious wakeup of trace reader")
450 // traceReader returns the trace reader that should be woken up, if any.
451 func traceReader() *g
{
452 if trace
.reader
== 0 ||
(trace
.fullHead
== 0 && !trace
.shutdown
) {
456 if trace
.reader
== 0 ||
(trace
.fullHead
== 0 && !trace
.shutdown
) {
460 gp
:= trace
.reader
.ptr()
461 trace
.reader
.set(nil)
466 // traceProcFree frees trace buffer associated with pp.
467 func traceProcFree(pp
*p
) {
478 // traceFullQueue queues buf into queue of full buffers.
479 func traceFullQueue(buf traceBufPtr
) {
481 if trace
.fullHead
== 0 {
484 trace
.fullTail
.ptr().link
= buf
489 // traceFullDequeue dequeues from queue of full buffers.
490 func traceFullDequeue() traceBufPtr
{
491 buf
:= trace
.fullHead
495 trace
.fullHead
= buf
.ptr().link
496 if trace
.fullHead
== 0 {
503 // traceEvent writes a single event to trace buffer, flushing the buffer if necessary.
505 // If skip > 0, write current stack id as the last argument (skipping skip top frames).
506 // If skip = 0, this event type should contain a stack, but we don't want
507 // to collect and remember it for this particular call.
508 func traceEvent(ev
byte, skip
int, args
...uint64) {
509 mp
, pid
, bufp
:= traceAcquireBuffer()
510 // Double-check trace.enabled now that we've done m.locks++ and acquired bufLock.
511 // This protects from races between traceEvent and StartTrace/StopTrace.
513 // The caller checked that trace.enabled == true, but trace.enabled might have been
514 // turned off between the check and now. Check again. traceLockBuffer did mp.locks++,
515 // StopTrace does stopTheWorld, and stopTheWorld waits for mp.locks to go back to zero,
516 // so if we see trace.enabled == true now, we know it's true for the rest of the function.
517 // Exitsyscall can run even during stopTheWorld. The race with StartTrace/StopTrace
518 // during tracing in exitsyscall is resolved by locking trace.bufLock in traceLockBuffer.
520 // Note trace_userTaskCreate runs the same check.
521 if !trace
.enabled
&& !mp
.startingtrace
{
522 traceReleaseBuffer(pid
)
527 if getg() == mp
.curg
{
528 skip
++ // +1 because stack is captured in traceEventLocked.
531 traceEventLocked(0, mp
, pid
, bufp
, ev
, skip
, args
...)
532 traceReleaseBuffer(pid
)
535 func traceEventLocked(extraBytes
int, mp
*m
, pid
int32, bufp
*traceBufPtr
, ev
byte, skip
int, args
...uint64) {
537 // TODO: test on non-zero extraBytes param.
538 maxSize
:= 2 + 5*traceBytesPerNumber
+ extraBytes
// event type, length, sequence, timestamp, stack id and two add params
539 if buf
== nil ||
len(buf
.arr
)-buf
.pos
< maxSize
{
540 buf
= traceFlush(traceBufPtrOf(buf
), pid
).ptr()
544 ticks
:= uint64(cputicks()) / traceTickDiv
545 tickDiff
:= ticks
- buf
.lastTicks
546 buf
.lastTicks
= ticks
547 narg
:= byte(len(args
))
551 // We have only 2 bits for number of arguments.
552 // If number is >= 3, then the event type is followed by event length in bytes.
557 buf
.byte(ev | narg
<<traceArgCountShift
)
560 // Reserve the byte for length assuming that length < 128.
562 lenp
= &buf
.arr
[buf
.pos
-1]
565 for _
, a
:= range args
{
571 buf
.varint(traceStackID(mp
, buf
.stk
[:], skip
))
573 evSize
:= buf
.pos
- startPos
574 if evSize
> maxSize
{
575 throw("invalid length of trace event")
578 // Fill in actual length.
579 *lenp
= byte(evSize
- 2)
583 func traceStackID(mp
*m
, buf
[]location
, skip
int) uint64 {
588 nstk
= callers(skip
+1, buf
[:])
589 } else if gp
!= nil {
590 // FIXME: get stack trace of different goroutine.
593 nstk
-- // skip runtime.goexit
595 if nstk
> 0 && gp
.goid
== 1 {
596 nstk
-- // skip runtime.main
598 id
:= trace
.stackTab
.put(buf
[:nstk
])
602 // traceAcquireBuffer returns trace buffer to use and, if necessary, locks it.
603 func traceAcquireBuffer() (mp
*m
, pid
int32, bufp
*traceBufPtr
) {
605 if p
:= mp
.p
.ptr(); p
!= nil {
606 return mp
, p
.id
, &p
.tracebuf
609 return mp
, traceGlobProc
, &trace
.buf
612 // traceReleaseBuffer releases a buffer previously acquired with traceAcquireBuffer.
613 func traceReleaseBuffer(pid
int32) {
614 if pid
== traceGlobProc
{
615 unlock(&trace
.bufLock
)
620 // traceFlush puts buf onto stack of full buffers and returns an empty buffer.
621 func traceFlush(buf traceBufPtr
, pid
int32) traceBufPtr
{
622 owner
:= trace
.lockOwner
623 dolock
:= owner
== nil || owner
!= getg().m
.curg
630 if trace
.empty
!= 0 {
632 trace
.empty
= buf
.ptr().link
634 buf
= traceBufPtr(sysAlloc(unsafe
.Sizeof(traceBuf
{}), &memstats
.other_sys
))
636 throw("trace: out of memory")
643 // initialize the buffer for a new batch
644 ticks
:= uint64(cputicks()) / traceTickDiv
645 bufp
.lastTicks
= ticks
646 bufp
.byte(traceEvBatch |
1<<traceArgCountShift
)
647 bufp
.varint(uint64(pid
))
656 // traceString adds a string to the trace.strings and returns the id.
657 func traceString(bufp
*traceBufPtr
, pid
int32, s
string) (uint64, *traceBufPtr
) {
662 lock(&trace
.stringsLock
)
664 // raceacquire is necessary because the map access
665 // below is race annotated.
666 raceacquire(unsafe
.Pointer(&trace
.stringsLock
))
669 if id
, ok
:= trace
.strings
[s
]; ok
{
671 racerelease(unsafe
.Pointer(&trace
.stringsLock
))
673 unlock(&trace
.stringsLock
)
679 id
:= trace
.stringSeq
680 trace
.strings
[s
] = id
683 racerelease(unsafe
.Pointer(&trace
.stringsLock
))
685 unlock(&trace
.stringsLock
)
687 // memory allocation in above may trigger tracing and
688 // cause *bufp changes. Following code now works with *bufp,
689 // so there must be no memory allocation or any activities
690 // that causes tracing after this point.
693 size
:= 1 + 2*traceBytesPerNumber
+ len(s
)
694 if buf
== nil ||
len(buf
.arr
)-buf
.pos
< size
{
695 buf
= traceFlush(traceBufPtrOf(buf
), pid
).ptr()
698 buf
.byte(traceEvString
)
701 // double-check the string and the length can fit.
702 // Otherwise, truncate the string.
704 if room
:= len(buf
.arr
) - buf
.pos
; room
< slen
+traceBytesPerNumber
{
708 buf
.varint(uint64(slen
))
709 buf
.pos
+= copy(buf
.arr
[buf
.pos
:], s
[:slen
])
715 // traceAppend appends v to buf in little-endian-base-128 encoding.
716 func traceAppend(buf
[]byte, v
uint64) []byte {
717 for ; v
>= 0x80; v
>>= 7 {
718 buf
= append(buf
, 0x80|
byte(v
))
720 buf
= append(buf
, byte(v
))
724 // varint appends v to buf in little-endian-base-128 encoding.
725 func (buf
*traceBuf
) varint(v
uint64) {
727 for ; v
>= 0x80; v
>>= 7 {
728 buf
.arr
[pos
] = 0x80 |
byte(v
)
731 buf
.arr
[pos
] = byte(v
)
736 // byte appends v to buf.
737 func (buf
*traceBuf
) byte(v
byte) {
742 // traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids.
743 // It is lock-free for reading.
744 type traceStackTable
struct {
748 tab
[1 << 13]traceStackPtr
751 // traceStack is a single stack in traceStackTable.
752 type traceStack
struct {
757 stk
[0]location
// real type [n]location
760 type traceStackPtr
uintptr
762 func (tp traceStackPtr
) ptr() *traceStack
{ return (*traceStack
)(unsafe
.Pointer(tp
)) }
764 // stack returns slice of PCs.
765 func (ts
*traceStack
) stack() []location
{
766 return (*[traceStackSize
]location
)(unsafe
.Pointer(&ts
.stk
))[:ts
.n
]
769 // put returns a unique id for the stack trace pcs and caches it in the table,
770 // if it sees the trace for the first time.
771 func (tab
*traceStackTable
) put(pcs
[]location
) uint32 {
776 for _
, loc
:= range pcs
{
781 // First, search the hashtable w/o the mutex.
782 if id
:= tab
.find(pcs
, hash
); id
!= 0 {
785 // Now, double check under the mutex.
787 if id
:= tab
.find(pcs
, hash
); id
!= 0 {
791 // Create new record.
793 stk
:= tab
.newStack(len(pcs
))
798 for i
, pc
:= range pcs
{
799 // Use memmove to avoid write barrier.
800 memmove(unsafe
.Pointer(&stkpc
[i
]), unsafe
.Pointer(&pc
), unsafe
.Sizeof(pc
))
802 part
:= int(hash
% uintptr(len(tab
.tab
)))
803 stk
.link
= tab
.tab
[part
]
804 atomicstorep(unsafe
.Pointer(&tab
.tab
[part
]), unsafe
.Pointer(stk
))
809 // find checks if the stack trace pcs is already present in the table.
810 func (tab
*traceStackTable
) find(pcs
[]location
, hash
uintptr) uint32 {
811 part
:= int(hash
% uintptr(len(tab
.tab
)))
813 for stk
:= tab
.tab
[part
].ptr(); stk
!= nil; stk
= stk
.link
.ptr() {
814 if stk
.hash
== hash
&& stk
.n
== len(pcs
) {
815 for i
, stkpc
:= range stk
.stack() {
826 // newStack allocates a new stack of size n.
827 func (tab
*traceStackTable
) newStack(n
int) *traceStack
{
828 return (*traceStack
)(tab
.mem
.alloc(unsafe
.Sizeof(traceStack
{}) + uintptr(n
)*unsafe
.Sizeof(location
{})))
831 // dump writes all previously cached stacks to trace buffers,
832 // releases all memory and resets state.
833 func (tab
*traceStackTable
) dump() {
834 var tmp
[(2 + 4*traceStackSize
) * traceBytesPerNumber
]byte
835 bufp
:= traceFlush(0, 0)
836 for _
, stk
:= range tab
.tab
{
838 for ; stk
!= nil; stk
= stk
.link
.ptr() {
840 tmpbuf
= traceAppend(tmpbuf
, uint64(stk
.id
))
841 frames
:= stk
.stack()
842 tmpbuf
= traceAppend(tmpbuf
, uint64(len(frames
)))
843 for _
, f
:= range frames
{
845 frame
, bufp
= traceFrameForPC(bufp
, 0, f
)
846 tmpbuf
= traceAppend(tmpbuf
, uint64(f
.pc
))
847 tmpbuf
= traceAppend(tmpbuf
, uint64(frame
.funcID
))
848 tmpbuf
= traceAppend(tmpbuf
, uint64(frame
.fileID
))
849 tmpbuf
= traceAppend(tmpbuf
, uint64(frame
.line
))
851 // Now copy to the buffer.
852 size
:= 1 + traceBytesPerNumber
+ len(tmpbuf
)
853 if buf
:= bufp
.ptr(); len(buf
.arr
)-buf
.pos
< size
{
854 bufp
= traceFlush(bufp
, 0)
857 buf
.byte(traceEvStack |
3<<traceArgCountShift
)
858 buf
.varint(uint64(len(tmpbuf
)))
859 buf
.pos
+= copy(buf
.arr
[buf
.pos
:], tmpbuf
)
868 *tab
= traceStackTable
{}
871 type traceFrame
struct {
877 // traceFrameForPC records the frame information.
878 // It may allocate memory.
879 func traceFrameForPC(buf traceBufPtr
, pid
int32, f location
) (traceFrame
, traceBufPtr
) {
884 const maxLen
= 1 << 10
885 if len(fn
) > maxLen
{
886 fn
= fn
[len(fn
)-maxLen
:]
888 frame
.funcID
, bufp
= traceString(bufp
, pid
, fn
)
889 frame
.line
= uint64(f
.lineno
)
891 if len(file
) > maxLen
{
892 file
= file
[len(file
)-maxLen
:]
894 frame
.fileID
, bufp
= traceString(bufp
, pid
, file
)
895 return frame
, (*bufp
)
898 // traceAlloc is a non-thread-safe region allocator.
899 // It holds a linked list of traceAllocBlock.
900 type traceAlloc
struct {
901 head traceAllocBlockPtr
905 // traceAllocBlock is a block in traceAlloc.
907 // traceAllocBlock is allocated from non-GC'd memory, so it must not
908 // contain heap pointers. Writes to pointers to traceAllocBlocks do
909 // not need write barriers.
912 type traceAllocBlock
struct {
913 next traceAllocBlockPtr
914 data
[64<<10 - sys
.PtrSize
]byte
917 // TODO: Since traceAllocBlock is now go:notinheap, this isn't necessary.
918 type traceAllocBlockPtr
uintptr
920 func (p traceAllocBlockPtr
) ptr() *traceAllocBlock
{ return (*traceAllocBlock
)(unsafe
.Pointer(p
)) }
921 func (p
*traceAllocBlockPtr
) set(x
*traceAllocBlock
) { *p
= traceAllocBlockPtr(unsafe
.Pointer(x
)) }
923 // alloc allocates n-byte block.
924 func (a
*traceAlloc
) alloc(n
uintptr) unsafe
.Pointer
{
925 n
= round(n
, sys
.PtrSize
)
926 if a
.head
== 0 || a
.off
+n
> uintptr(len(a
.head
.ptr().data
)) {
927 if n
> uintptr(len(a
.head
.ptr().data
)) {
928 throw("trace: alloc too large")
930 // This is only safe because the strings returned by callers
931 // are stored in a location that is not in the Go heap.
932 block
:= (*traceAllocBlock
)(sysAlloc(unsafe
.Sizeof(traceAllocBlock
{}), &memstats
.other_sys
))
934 throw("trace: out of memory")
936 block
.next
.set(a
.head
.ptr())
940 p
:= &a
.head
.ptr().data
[a
.off
]
942 return unsafe
.Pointer(p
)
945 // drop frees all previously allocated memory and resets the allocator.
946 func (a
*traceAlloc
) drop() {
948 block
:= a
.head
.ptr()
949 a
.head
.set(block
.next
.ptr())
950 sysFree(unsafe
.Pointer(block
), unsafe
.Sizeof(traceAllocBlock
{}), &memstats
.other_sys
)
954 // The following functions write specific events to trace.
956 func traceGomaxprocs(procs
int32) {
957 traceEvent(traceEvGomaxprocs
, 1, uint64(procs
))
960 func traceProcStart() {
961 traceEvent(traceEvProcStart
, -1, uint64(getg().m
.id
))
964 func traceProcStop(pp
*p
) {
965 // Sysmon and stopTheWorld can stop Ps blocked in syscalls,
966 // to handle this we temporary employ the P.
970 traceEvent(traceEvProcStop
, -1)
975 func traceGCStart() {
976 traceEvent(traceEvGCStart
, 3, trace
.seqGC
)
981 traceEvent(traceEvGCDone
, -1)
984 func traceGCSTWStart(kind
int) {
985 traceEvent(traceEvGCSTWStart
, -1, uint64(kind
))
988 func traceGCSTWDone() {
989 traceEvent(traceEvGCSTWDone
, -1)
992 // traceGCSweepStart prepares to trace a sweep loop. This does not
993 // emit any events until traceGCSweepSpan is called.
995 // traceGCSweepStart must be paired with traceGCSweepDone and there
996 // must be no preemption points between these two calls.
997 func traceGCSweepStart() {
998 // Delay the actual GCSweepStart event until the first span
999 // sweep. If we don't sweep anything, don't emit any events.
1000 _p_
:= getg().m
.p
.ptr()
1002 throw("double traceGCSweepStart")
1004 _p_
.traceSweep
, _p_
.traceSwept
, _p_
.traceReclaimed
= true, 0, 0
1007 // traceGCSweepSpan traces the sweep of a single page.
1009 // This may be called outside a traceGCSweepStart/traceGCSweepDone
1010 // pair; however, it will not emit any trace events in this case.
1011 func traceGCSweepSpan(bytesSwept
uintptr) {
1012 _p_
:= getg().m
.p
.ptr()
1014 if _p_
.traceSwept
== 0 {
1015 traceEvent(traceEvGCSweepStart
, 1)
1017 _p_
.traceSwept
+= bytesSwept
1021 func traceGCSweepDone() {
1022 _p_
:= getg().m
.p
.ptr()
1023 if !_p_
.traceSweep
{
1024 throw("missing traceGCSweepStart")
1026 if _p_
.traceSwept
!= 0 {
1027 traceEvent(traceEvGCSweepDone
, -1, uint64(_p_
.traceSwept
), uint64(_p_
.traceReclaimed
))
1029 _p_
.traceSweep
= false
1032 func traceGCMarkAssistStart() {
1033 traceEvent(traceEvGCMarkAssistStart
, 1)
1036 func traceGCMarkAssistDone() {
1037 traceEvent(traceEvGCMarkAssistDone
, -1)
1040 func traceGoCreate(newg
*g
, pc
uintptr) {
1042 newg
.tracelastp
= getg().m
.p
1043 // +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
1044 id
:= trace
.stackTab
.put([]location
{location
{pc
: pc
+ sys
.PCQuantum
}})
1045 traceEvent(traceEvGoCreate
, 2, uint64(newg
.goid
), uint64(id
))
1048 func traceGoStart() {
1049 _g_
:= getg().m
.curg
1052 if _g_
== _p_
.ptr().gcBgMarkWorker
.ptr() {
1053 traceEvent(traceEvGoStartLabel
, -1, uint64(_g_
.goid
), _g_
.traceseq
, trace
.markWorkerLabels
[_p_
.ptr().gcMarkWorkerMode
])
1054 } else if _g_
.tracelastp
== _p_
{
1055 traceEvent(traceEvGoStartLocal
, -1, uint64(_g_
.goid
))
1057 _g_
.tracelastp
= _p_
1058 traceEvent(traceEvGoStart
, -1, uint64(_g_
.goid
), _g_
.traceseq
)
1063 traceEvent(traceEvGoEnd
, -1)
1066 func traceGoSched() {
1068 _g_
.tracelastp
= _g_
.m
.p
1069 traceEvent(traceEvGoSched
, 1)
1072 func traceGoPreempt() {
1074 _g_
.tracelastp
= _g_
.m
.p
1075 traceEvent(traceEvGoPreempt
, 1)
1078 func traceGoPark(traceEv
byte, skip
int) {
1079 if traceEv
&traceFutileWakeup
!= 0 {
1080 traceEvent(traceEvFutileWakeup
, -1)
1082 traceEvent(traceEv
& ^traceFutileWakeup
, skip
)
1085 func traceGoUnpark(gp
*g
, skip
int) {
1088 if gp
.tracelastp
== _p_
{
1089 traceEvent(traceEvGoUnblockLocal
, skip
, uint64(gp
.goid
))
1092 traceEvent(traceEvGoUnblock
, skip
, uint64(gp
.goid
), gp
.traceseq
)
1096 func traceGoSysCall() {
1097 traceEvent(traceEvGoSysCall
, 1)
1100 func traceGoSysExit(ts
int64) {
1101 if ts
!= 0 && ts
< trace
.ticksStart
{
1102 // There is a race between the code that initializes sysexitticks
1103 // (in exitsyscall, which runs without a P, and therefore is not
1104 // stopped with the rest of the world) and the code that initializes
1105 // a new trace. The recorded sysexitticks must therefore be treated
1106 // as "best effort". If they are valid for this trace, then great,
1107 // use them for greater accuracy. But if they're not valid for this
1108 // trace, assume that the trace was started after the actual syscall
1109 // exit (but before we actually managed to start the goroutine,
1110 // aka right now), and assign a fresh time stamp to keep the log consistent.
1113 _g_
:= getg().m
.curg
1115 _g_
.tracelastp
= _g_
.m
.p
1116 traceEvent(traceEvGoSysExit
, -1, uint64(_g_
.goid
), _g_
.traceseq
, uint64(ts
)/traceTickDiv
)
1119 func traceGoSysBlock(pp
*p
) {
1120 // Sysmon and stopTheWorld can declare syscalls running on remote Ps as blocked,
1121 // to handle this we temporary employ the P.
1125 traceEvent(traceEvGoSysBlock
, -1)
1130 func traceHeapAlloc() {
1131 traceEvent(traceEvHeapAlloc
, -1, memstats
.heap_live
)
1134 func traceNextGC() {
1135 if memstats
.next_gc
== ^uint64(0) {
1136 // Heap-based triggering is disabled.
1137 traceEvent(traceEvNextGC
, -1, 0)
1139 traceEvent(traceEvNextGC
, -1, memstats
.next_gc
)
1143 // To access runtime functions from runtime/trace.
1144 // See runtime/trace/annotation.go
1146 //go:linkname trace_userTaskCreate runtime..z2ftrace.userTaskCreate
1147 func trace_userTaskCreate(id
, parentID
uint64, taskType
string) {
1152 // Same as in traceEvent.
1153 mp
, pid
, bufp
:= traceAcquireBuffer()
1154 if !trace
.enabled
&& !mp
.startingtrace
{
1155 traceReleaseBuffer(pid
)
1159 typeStringID
, bufp
:= traceString(bufp
, pid
, taskType
)
1160 traceEventLocked(0, mp
, pid
, bufp
, traceEvUserTaskCreate
, 3, id
, parentID
, typeStringID
)
1161 traceReleaseBuffer(pid
)
1164 //go:linkname trace_userTaskEnd runtime..z2ftrace.userTaskEnd
1165 func trace_userTaskEnd(id
uint64) {
1166 traceEvent(traceEvUserTaskEnd
, 2, id
)
1169 //go:linkname trace_userRegion runtime..z2ftrace.userRegion
1170 func trace_userRegion(id
, mode
uint64, name
string) {
1175 mp
, pid
, bufp
:= traceAcquireBuffer()
1176 if !trace
.enabled
&& !mp
.startingtrace
{
1177 traceReleaseBuffer(pid
)
1181 nameStringID
, bufp
:= traceString(bufp
, pid
, name
)
1182 traceEventLocked(0, mp
, pid
, bufp
, traceEvUserRegion
, 3, id
, mode
, nameStringID
)
1183 traceReleaseBuffer(pid
)
1186 //go:linkname trace_userLog runtime..z2ftrace.userLog
1187 func trace_userLog(id
uint64, category
, message
string) {
1192 mp
, pid
, bufp
:= traceAcquireBuffer()
1193 if !trace
.enabled
&& !mp
.startingtrace
{
1194 traceReleaseBuffer(pid
)
1198 categoryID
, bufp
:= traceString(bufp
, pid
, category
)
1200 extraSpace
:= traceBytesPerNumber
+ len(message
) // extraSpace for the value string
1201 traceEventLocked(extraSpace
, mp
, pid
, bufp
, traceEvUserLog
, 3, id
, categoryID
)
1202 // traceEventLocked reserved extra space for val and len(val)
1203 // in buf, so buf now has room for the following.
1204 buf
:= (*bufp
).ptr()
1206 // double-check the message and its length can fit.
1207 // Otherwise, truncate the message.
1208 slen
:= len(message
)
1209 if room
:= len(buf
.arr
) - buf
.pos
; room
< slen
+traceBytesPerNumber
{
1212 buf
.varint(uint64(slen
))
1213 buf
.pos
+= copy(buf
.arr
[buf
.pos
:], message
[:slen
])
1215 traceReleaseBuffer(pid
)