libgo: update to go1.9
[official-gcc.git] / libgo / go / runtime / trace.go
blobaf9313be37ad406430dd5fb24d2e4a2d859c65fc
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Go execution tracer.
6 // The tracer captures a wide range of execution events like goroutine
7 // creation/blocking/unblocking, syscall enter/exit/block, GC-related events,
8 // changes of heap size, processor start/stop, etc and writes them to a buffer
9 // in a compact form. A precise nanosecond-precision timestamp and a stack
10 // trace is captured for most events.
11 // See https://golang.org/s/go15trace for more info.
13 package runtime
15 import (
16 "runtime/internal/sys"
17 "unsafe"
20 // Event types in the trace, args are given in square brackets.
21 const (
22 traceEvNone = 0 // unused
23 traceEvBatch = 1 // start of per-P batch of events [pid, timestamp]
24 traceEvFrequency = 2 // contains tracer timer frequency [frequency (ticks per second)]
25 traceEvStack = 3 // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}]
26 traceEvGomaxprocs = 4 // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id]
27 traceEvProcStart = 5 // start of P [timestamp, thread id]
28 traceEvProcStop = 6 // stop of P [timestamp]
29 traceEvGCStart = 7 // GC start [timestamp, seq, stack id]
30 traceEvGCDone = 8 // GC done [timestamp]
31 traceEvGCScanStart = 9 // GC mark termination start [timestamp]
32 traceEvGCScanDone = 10 // GC mark termination done [timestamp]
33 traceEvGCSweepStart = 11 // GC sweep start [timestamp, stack id]
34 traceEvGCSweepDone = 12 // GC sweep done [timestamp, swept, reclaimed]
35 traceEvGoCreate = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id]
36 traceEvGoStart = 14 // goroutine starts running [timestamp, goroutine id, seq]
37 traceEvGoEnd = 15 // goroutine ends [timestamp]
38 traceEvGoStop = 16 // goroutine stops (like in select{}) [timestamp, stack]
39 traceEvGoSched = 17 // goroutine calls Gosched [timestamp, stack]
40 traceEvGoPreempt = 18 // goroutine is preempted [timestamp, stack]
41 traceEvGoSleep = 19 // goroutine calls Sleep [timestamp, stack]
42 traceEvGoBlock = 20 // goroutine blocks [timestamp, stack]
43 traceEvGoUnblock = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack]
44 traceEvGoBlockSend = 22 // goroutine blocks on chan send [timestamp, stack]
45 traceEvGoBlockRecv = 23 // goroutine blocks on chan recv [timestamp, stack]
46 traceEvGoBlockSelect = 24 // goroutine blocks on select [timestamp, stack]
47 traceEvGoBlockSync = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack]
48 traceEvGoBlockCond = 26 // goroutine blocks on Cond [timestamp, stack]
49 traceEvGoBlockNet = 27 // goroutine blocks on network [timestamp, stack]
50 traceEvGoSysCall = 28 // syscall enter [timestamp, stack]
51 traceEvGoSysExit = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp]
52 traceEvGoSysBlock = 30 // syscall blocks [timestamp]
53 traceEvGoWaiting = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id]
54 traceEvGoInSyscall = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id]
55 traceEvHeapAlloc = 33 // memstats.heap_live change [timestamp, heap_alloc]
56 traceEvNextGC = 34 // memstats.next_gc change [timestamp, next_gc]
57 traceEvTimerGoroutine = 35 // denotes timer goroutine [timer goroutine id]
58 traceEvFutileWakeup = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
59 traceEvString = 37 // string dictionary entry [ID, length, string]
60 traceEvGoStartLocal = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id]
61 traceEvGoUnblockLocal = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack]
62 traceEvGoSysExitLocal = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp]
63 traceEvGoStartLabel = 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id]
64 traceEvGoBlockGC = 42 // goroutine blocks on GC assist [timestamp, stack]
65 traceEvGCMarkAssistStart = 43 // GC mark assist start [timestamp, stack]
66 traceEvGCMarkAssistDone = 44 // GC mark assist done [timestamp]
67 traceEvCount = 45
70 const (
71 // Timestamps in trace are cputicks/traceTickDiv.
72 // This makes absolute values of timestamp diffs smaller,
73 // and so they are encoded in less number of bytes.
74 // 64 on x86 is somewhat arbitrary (one tick is ~20ns on a 3GHz machine).
75 // The suggested increment frequency for PowerPC's time base register is
76 // 512 MHz according to Power ISA v2.07 section 6.2, so we use 16 on ppc64
77 // and ppc64le.
78 // Tracing won't work reliably for architectures where cputicks is emulated
79 // by nanotime, so the value doesn't matter for those architectures.
80 traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64|sys.GoarchAmd64p32)
81 // Maximum number of PCs in a single stack trace.
82 // Since events contain only stack id rather than whole stack trace,
83 // we can allow quite large values here.
84 traceStackSize = 128
85 // Identifier of a fake P that is used when we trace without a real P.
86 traceGlobProc = -1
87 // Maximum number of bytes to encode uint64 in base-128.
88 traceBytesPerNumber = 10
89 // Shift of the number of arguments in the first event byte.
90 traceArgCountShift = 6
91 // Flag passed to traceGoPark to denote that the previous wakeup of this
92 // goroutine was futile. For example, a goroutine was unblocked on a mutex,
93 // but another goroutine got ahead and acquired the mutex before the first
94 // goroutine is scheduled, so the first goroutine has to block again.
95 // Such wakeups happen on buffered channels and sync.Mutex,
96 // but are generally not interesting for end user.
97 traceFutileWakeup byte = 128
100 // trace is global tracing context.
101 var trace struct {
102 lock mutex // protects the following members
103 lockOwner *g // to avoid deadlocks during recursive lock locks
104 enabled bool // when set runtime traces events
105 shutdown bool // set when we are waiting for trace reader to finish after setting enabled to false
106 headerWritten bool // whether ReadTrace has emitted trace header
107 footerWritten bool // whether ReadTrace has emitted trace footer
108 shutdownSema uint32 // used to wait for ReadTrace completion
109 seqStart uint64 // sequence number when tracing was started
110 ticksStart int64 // cputicks when tracing was started
111 ticksEnd int64 // cputicks when tracing was stopped
112 timeStart int64 // nanotime when tracing was started
113 timeEnd int64 // nanotime when tracing was stopped
114 seqGC uint64 // GC start/done sequencer
115 reading traceBufPtr // buffer currently handed off to user
116 empty traceBufPtr // stack of empty buffers
117 fullHead traceBufPtr // queue of full buffers
118 fullTail traceBufPtr
119 reader guintptr // goroutine that called ReadTrace, or nil
120 stackTab traceStackTable // maps stack traces to unique ids
122 // Dictionary for traceEvString.
124 // Currently this is used only at trace setup and for
125 // func/file:line info after tracing session, so we assume
126 // single-threaded access.
127 strings map[string]uint64
128 stringSeq uint64
130 // markWorkerLabels maps gcMarkWorkerMode to string ID.
131 markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64
133 bufLock mutex // protects buf
134 buf traceBufPtr // global trace buffer, used when running without a p
137 // traceBufHeader is per-P tracing buffer.
138 type traceBufHeader struct {
139 link traceBufPtr // in trace.empty/full
140 lastTicks uint64 // when we wrote the last event
141 pos int // next write offset in arr
142 stk [traceStackSize]location // scratch buffer for traceback
145 // traceBuf is per-P tracing buffer.
147 //go:notinheap
148 type traceBuf struct {
149 traceBufHeader
150 arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
153 // traceBufPtr is a *traceBuf that is not traced by the garbage
154 // collector and doesn't have write barriers. traceBufs are not
155 // allocated from the GC'd heap, so this is safe, and are often
156 // manipulated in contexts where write barriers are not allowed, so
157 // this is necessary.
159 // TODO: Since traceBuf is now go:notinheap, this isn't necessary.
160 type traceBufPtr uintptr
162 func (tp traceBufPtr) ptr() *traceBuf { return (*traceBuf)(unsafe.Pointer(tp)) }
163 func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
164 func traceBufPtrOf(b *traceBuf) traceBufPtr {
165 return traceBufPtr(unsafe.Pointer(b))
168 // StartTrace enables tracing for the current process.
169 // While tracing, the data will be buffered and available via ReadTrace.
170 // StartTrace returns an error if tracing is already enabled.
171 // Most clients should use the runtime/trace package or the testing package's
172 // -test.trace flag instead of calling StartTrace directly.
173 func StartTrace() error {
174 // Stop the world, so that we can take a consistent snapshot
175 // of all goroutines at the beginning of the trace.
176 stopTheWorld("start tracing")
178 // We are in stop-the-world, but syscalls can finish and write to trace concurrently.
179 // Exitsyscall could check trace.enabled long before and then suddenly wake up
180 // and decide to write to trace at a random point in time.
181 // However, such syscall will use the global trace.buf buffer, because we've
182 // acquired all p's by doing stop-the-world. So this protects us from such races.
183 lock(&trace.bufLock)
185 if trace.enabled || trace.shutdown {
186 unlock(&trace.bufLock)
187 startTheWorld()
188 return errorString("tracing is already enabled")
191 // Can't set trace.enabled yet. While the world is stopped, exitsyscall could
192 // already emit a delayed event (see exitTicks in exitsyscall) if we set trace.enabled here.
193 // That would lead to an inconsistent trace:
194 // - either GoSysExit appears before EvGoInSyscall,
195 // - or GoSysExit appears for a goroutine for which we don't emit EvGoInSyscall below.
196 // To instruct traceEvent that it must not ignore events below, we set startingtrace.
197 // trace.enabled is set afterwards once we have emitted all preliminary events.
198 _g_ := getg()
199 _g_.m.startingtrace = true
201 // Obtain current stack ID to use in all traceEvGoCreate events below.
202 mp := acquirem()
203 stkBuf := make([]location, traceStackSize)
204 stackID := traceStackID(mp, stkBuf, 2)
205 releasem(mp)
207 for _, gp := range allgs {
208 status := readgstatus(gp)
209 if status != _Gdead {
210 gp.traceseq = 0
211 gp.tracelastp = getg().m.p
212 // +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
213 id := trace.stackTab.put([]location{location{pc: gp.startpc + sys.PCQuantum}})
214 traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID)
216 if status == _Gwaiting {
217 // traceEvGoWaiting is implied to have seq=1.
218 gp.traceseq++
219 traceEvent(traceEvGoWaiting, -1, uint64(gp.goid))
221 if status == _Gsyscall {
222 gp.traceseq++
223 traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
224 } else {
225 gp.sysblocktraced = false
228 traceProcStart()
229 traceGoStart()
230 // Note: ticksStart needs to be set after we emit traceEvGoInSyscall events.
231 // If we do it the other way around, it is possible that exitsyscall will
232 // query sysexitticks after ticksStart but before traceEvGoInSyscall timestamp.
233 // It will lead to a false conclusion that cputicks is broken.
234 trace.ticksStart = cputicks()
235 trace.timeStart = nanotime()
236 trace.headerWritten = false
237 trace.footerWritten = false
238 trace.strings = make(map[string]uint64)
239 trace.stringSeq = 0
240 trace.seqGC = 0
241 _g_.m.startingtrace = false
242 trace.enabled = true
244 // Register runtime goroutine labels.
245 _, pid, bufp := traceAcquireBuffer()
246 buf := (*bufp).ptr()
247 if buf == nil {
248 buf = traceFlush(0).ptr()
249 (*bufp).set(buf)
251 for i, label := range gcMarkWorkerModeStrings[:] {
252 trace.markWorkerLabels[i], buf = traceString(buf, label)
254 traceReleaseBuffer(pid)
256 unlock(&trace.bufLock)
258 startTheWorld()
259 return nil
262 // StopTrace stops tracing, if it was previously enabled.
263 // StopTrace only returns after all the reads for the trace have completed.
264 func StopTrace() {
265 // Stop the world so that we can collect the trace buffers from all p's below,
266 // and also to avoid races with traceEvent.
267 stopTheWorld("stop tracing")
269 // See the comment in StartTrace.
270 lock(&trace.bufLock)
272 if !trace.enabled {
273 unlock(&trace.bufLock)
274 startTheWorld()
275 return
278 traceGoSched()
280 for _, p := range &allp {
281 if p == nil {
282 break
284 buf := p.tracebuf
285 if buf != 0 {
286 traceFullQueue(buf)
287 p.tracebuf = 0
290 if trace.buf != 0 {
291 buf := trace.buf
292 trace.buf = 0
293 if buf.ptr().pos != 0 {
294 traceFullQueue(buf)
298 for {
299 trace.ticksEnd = cputicks()
300 trace.timeEnd = nanotime()
301 // Windows time can tick only every 15ms, wait for at least one tick.
302 if trace.timeEnd != trace.timeStart {
303 break
305 osyield()
308 trace.enabled = false
309 trace.shutdown = true
310 unlock(&trace.bufLock)
312 startTheWorld()
314 // The world is started but we've set trace.shutdown, so new tracing can't start.
315 // Wait for the trace reader to flush pending buffers and stop.
316 semacquire(&trace.shutdownSema)
317 if raceenabled {
318 raceacquire(unsafe.Pointer(&trace.shutdownSema))
321 // The lock protects us from races with StartTrace/StopTrace because they do stop-the-world.
322 lock(&trace.lock)
323 for _, p := range &allp {
324 if p == nil {
325 break
327 if p.tracebuf != 0 {
328 throw("trace: non-empty trace buffer in proc")
331 if trace.buf != 0 {
332 throw("trace: non-empty global trace buffer")
334 if trace.fullHead != 0 || trace.fullTail != 0 {
335 throw("trace: non-empty full trace buffer")
337 if trace.reading != 0 || trace.reader != 0 {
338 throw("trace: reading after shutdown")
340 for trace.empty != 0 {
341 buf := trace.empty
342 trace.empty = buf.ptr().link
343 sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
345 trace.strings = nil
346 trace.shutdown = false
347 unlock(&trace.lock)
350 // ReadTrace returns the next chunk of binary tracing data, blocking until data
351 // is available. If tracing is turned off and all the data accumulated while it
352 // was on has been returned, ReadTrace returns nil. The caller must copy the
353 // returned data before calling ReadTrace again.
354 // ReadTrace must be called from one goroutine at a time.
355 func ReadTrace() []byte {
356 // This function may need to lock trace.lock recursively
357 // (goparkunlock -> traceGoPark -> traceEvent -> traceFlush).
358 // To allow this we use trace.lockOwner.
359 // Also this function must not allocate while holding trace.lock:
360 // allocation can call heap allocate, which will try to emit a trace
361 // event while holding heap lock.
362 lock(&trace.lock)
363 trace.lockOwner = getg()
365 if trace.reader != 0 {
366 // More than one goroutine reads trace. This is bad.
367 // But we rather do not crash the program because of tracing,
368 // because tracing can be enabled at runtime on prod servers.
369 trace.lockOwner = nil
370 unlock(&trace.lock)
371 println("runtime: ReadTrace called from multiple goroutines simultaneously")
372 return nil
374 // Recycle the old buffer.
375 if buf := trace.reading; buf != 0 {
376 buf.ptr().link = trace.empty
377 trace.empty = buf
378 trace.reading = 0
380 // Write trace header.
381 if !trace.headerWritten {
382 trace.headerWritten = true
383 trace.lockOwner = nil
384 unlock(&trace.lock)
385 return []byte("go 1.9 trace\x00\x00\x00\x00")
387 // Wait for new data.
388 if trace.fullHead == 0 && !trace.shutdown {
389 trace.reader.set(getg())
390 goparkunlock(&trace.lock, "trace reader (blocked)", traceEvGoBlock, 2)
391 lock(&trace.lock)
393 // Write a buffer.
394 if trace.fullHead != 0 {
395 buf := traceFullDequeue()
396 trace.reading = buf
397 trace.lockOwner = nil
398 unlock(&trace.lock)
399 return buf.ptr().arr[:buf.ptr().pos]
401 // Write footer with timer frequency.
402 if !trace.footerWritten {
403 trace.footerWritten = true
404 // Use float64 because (trace.ticksEnd - trace.ticksStart) * 1e9 can overflow int64.
405 freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
406 trace.lockOwner = nil
407 unlock(&trace.lock)
408 var data []byte
409 data = append(data, traceEvFrequency|0<<traceArgCountShift)
410 data = traceAppend(data, uint64(freq))
411 if timers.gp != nil {
412 data = append(data, traceEvTimerGoroutine|0<<traceArgCountShift)
413 data = traceAppend(data, uint64(timers.gp.goid))
415 // This will emit a bunch of full buffers, we will pick them up
416 // on the next iteration.
417 trace.stackTab.dump()
418 return data
420 // Done.
421 if trace.shutdown {
422 trace.lockOwner = nil
423 unlock(&trace.lock)
424 if raceenabled {
425 // Model synchronization on trace.shutdownSema, which race
426 // detector does not see. This is required to avoid false
427 // race reports on writer passed to trace.Start.
428 racerelease(unsafe.Pointer(&trace.shutdownSema))
430 // trace.enabled is already reset, so can call traceable functions.
431 semrelease(&trace.shutdownSema)
432 return nil
434 // Also bad, but see the comment above.
435 trace.lockOwner = nil
436 unlock(&trace.lock)
437 println("runtime: spurious wakeup of trace reader")
438 return nil
441 // traceReader returns the trace reader that should be woken up, if any.
442 func traceReader() *g {
443 if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
444 return nil
446 lock(&trace.lock)
447 if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
448 unlock(&trace.lock)
449 return nil
451 gp := trace.reader.ptr()
452 trace.reader.set(nil)
453 unlock(&trace.lock)
454 return gp
457 // traceProcFree frees trace buffer associated with pp.
458 func traceProcFree(pp *p) {
459 buf := pp.tracebuf
460 pp.tracebuf = 0
461 if buf == 0 {
462 return
464 lock(&trace.lock)
465 traceFullQueue(buf)
466 unlock(&trace.lock)
469 // traceFullQueue queues buf into queue of full buffers.
470 func traceFullQueue(buf traceBufPtr) {
471 buf.ptr().link = 0
472 if trace.fullHead == 0 {
473 trace.fullHead = buf
474 } else {
475 trace.fullTail.ptr().link = buf
477 trace.fullTail = buf
480 // traceFullDequeue dequeues from queue of full buffers.
481 func traceFullDequeue() traceBufPtr {
482 buf := trace.fullHead
483 if buf == 0 {
484 return 0
486 trace.fullHead = buf.ptr().link
487 if trace.fullHead == 0 {
488 trace.fullTail = 0
490 buf.ptr().link = 0
491 return buf
494 // traceEvent writes a single event to trace buffer, flushing the buffer if necessary.
495 // ev is event type.
496 // If skip > 0, write current stack id as the last argument (skipping skip top frames).
497 // If skip = 0, this event type should contain a stack, but we don't want
498 // to collect and remember it for this particular call.
499 func traceEvent(ev byte, skip int, args ...uint64) {
500 mp, pid, bufp := traceAcquireBuffer()
501 // Double-check trace.enabled now that we've done m.locks++ and acquired bufLock.
502 // This protects from races between traceEvent and StartTrace/StopTrace.
504 // The caller checked that trace.enabled == true, but trace.enabled might have been
505 // turned off between the check and now. Check again. traceLockBuffer did mp.locks++,
506 // StopTrace does stopTheWorld, and stopTheWorld waits for mp.locks to go back to zero,
507 // so if we see trace.enabled == true now, we know it's true for the rest of the function.
508 // Exitsyscall can run even during stopTheWorld. The race with StartTrace/StopTrace
509 // during tracing in exitsyscall is resolved by locking trace.bufLock in traceLockBuffer.
510 if !trace.enabled && !mp.startingtrace {
511 traceReleaseBuffer(pid)
512 return
514 buf := (*bufp).ptr()
515 const maxSize = 2 + 5*traceBytesPerNumber // event type, length, sequence, timestamp, stack id and two add params
516 if buf == nil || len(buf.arr)-buf.pos < maxSize {
517 buf = traceFlush(traceBufPtrOf(buf)).ptr()
518 (*bufp).set(buf)
521 ticks := uint64(cputicks()) / traceTickDiv
522 tickDiff := ticks - buf.lastTicks
523 if buf.pos == 0 {
524 buf.byte(traceEvBatch | 1<<traceArgCountShift)
525 buf.varint(uint64(pid))
526 buf.varint(ticks)
527 tickDiff = 0
529 buf.lastTicks = ticks
530 narg := byte(len(args))
531 if skip >= 0 {
532 narg++
534 // We have only 2 bits for number of arguments.
535 // If number is >= 3, then the event type is followed by event length in bytes.
536 if narg > 3 {
537 narg = 3
539 startPos := buf.pos
540 buf.byte(ev | narg<<traceArgCountShift)
541 var lenp *byte
542 if narg == 3 {
543 // Reserve the byte for length assuming that length < 128.
544 buf.varint(0)
545 lenp = &buf.arr[buf.pos-1]
547 buf.varint(tickDiff)
548 for _, a := range args {
549 buf.varint(a)
551 if skip == 0 {
552 buf.varint(0)
553 } else if skip > 0 {
554 buf.varint(traceStackID(mp, buf.stk[:], skip))
556 evSize := buf.pos - startPos
557 if evSize > maxSize {
558 throw("invalid length of trace event")
560 if lenp != nil {
561 // Fill in actual length.
562 *lenp = byte(evSize - 2)
564 traceReleaseBuffer(pid)
567 func traceStackID(mp *m, buf []location, skip int) uint64 {
568 _g_ := getg()
569 gp := mp.curg
570 var nstk int
571 if gp == _g_ {
572 nstk = callers(skip+1, buf[:])
573 } else if gp != nil {
574 // FIXME: get stack trace of different goroutine.
576 if nstk > 0 {
577 nstk-- // skip runtime.goexit
579 if nstk > 0 && gp.goid == 1 {
580 nstk-- // skip runtime.main
582 id := trace.stackTab.put(buf[:nstk])
583 return uint64(id)
586 // traceAcquireBuffer returns trace buffer to use and, if necessary, locks it.
587 func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
588 mp = acquirem()
589 if p := mp.p.ptr(); p != nil {
590 return mp, p.id, &p.tracebuf
592 lock(&trace.bufLock)
593 return mp, traceGlobProc, &trace.buf
596 // traceReleaseBuffer releases a buffer previously acquired with traceAcquireBuffer.
597 func traceReleaseBuffer(pid int32) {
598 if pid == traceGlobProc {
599 unlock(&trace.bufLock)
601 releasem(getg().m)
604 // traceFlush puts buf onto stack of full buffers and returns an empty buffer.
605 func traceFlush(buf traceBufPtr) traceBufPtr {
606 owner := trace.lockOwner
607 dolock := owner == nil || owner != getg().m.curg
608 if dolock {
609 lock(&trace.lock)
611 if buf != 0 {
612 traceFullQueue(buf)
614 if trace.empty != 0 {
615 buf = trace.empty
616 trace.empty = buf.ptr().link
617 } else {
618 buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
619 if buf == 0 {
620 throw("trace: out of memory")
623 bufp := buf.ptr()
624 bufp.link.set(nil)
625 bufp.pos = 0
626 bufp.lastTicks = 0
627 if dolock {
628 unlock(&trace.lock)
630 return buf
633 func traceString(buf *traceBuf, s string) (uint64, *traceBuf) {
634 if s == "" {
635 return 0, buf
637 if id, ok := trace.strings[s]; ok {
638 return id, buf
641 trace.stringSeq++
642 id := trace.stringSeq
643 trace.strings[s] = id
645 size := 1 + 2*traceBytesPerNumber + len(s)
646 if len(buf.arr)-buf.pos < size {
647 buf = traceFlush(traceBufPtrOf(buf)).ptr()
649 buf.byte(traceEvString)
650 buf.varint(id)
651 buf.varint(uint64(len(s)))
652 buf.pos += copy(buf.arr[buf.pos:], s)
653 return id, buf
656 // traceAppend appends v to buf in little-endian-base-128 encoding.
657 func traceAppend(buf []byte, v uint64) []byte {
658 for ; v >= 0x80; v >>= 7 {
659 buf = append(buf, 0x80|byte(v))
661 buf = append(buf, byte(v))
662 return buf
665 // varint appends v to buf in little-endian-base-128 encoding.
666 func (buf *traceBuf) varint(v uint64) {
667 pos := buf.pos
668 for ; v >= 0x80; v >>= 7 {
669 buf.arr[pos] = 0x80 | byte(v)
670 pos++
672 buf.arr[pos] = byte(v)
673 pos++
674 buf.pos = pos
677 // byte appends v to buf.
678 func (buf *traceBuf) byte(v byte) {
679 buf.arr[buf.pos] = v
680 buf.pos++
683 // traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids.
684 // It is lock-free for reading.
685 type traceStackTable struct {
686 lock mutex
687 seq uint32
688 mem traceAlloc
689 tab [1 << 13]traceStackPtr
692 // traceStack is a single stack in traceStackTable.
693 type traceStack struct {
694 link traceStackPtr
695 hash uintptr
696 id uint32
697 n int
698 stk [0]location // real type [n]location
701 type traceStackPtr uintptr
703 func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
705 // stack returns slice of PCs.
706 func (ts *traceStack) stack() []location {
707 return (*[traceStackSize]location)(unsafe.Pointer(&ts.stk))[:ts.n]
710 // put returns a unique id for the stack trace pcs and caches it in the table,
711 // if it sees the trace for the first time.
712 func (tab *traceStackTable) put(pcs []location) uint32 {
713 if len(pcs) == 0 {
714 return 0
716 var hash uintptr
717 for _, loc := range pcs {
718 hash += loc.pc
719 hash += hash << 10
720 hash ^= hash >> 6
722 // First, search the hashtable w/o the mutex.
723 if id := tab.find(pcs, hash); id != 0 {
724 return id
726 // Now, double check under the mutex.
727 lock(&tab.lock)
728 if id := tab.find(pcs, hash); id != 0 {
729 unlock(&tab.lock)
730 return id
732 // Create new record.
733 tab.seq++
734 stk := tab.newStack(len(pcs))
735 stk.hash = hash
736 stk.id = tab.seq
737 stk.n = len(pcs)
738 stkpc := stk.stack()
739 for i, pc := range pcs {
740 stkpc[i] = pc
742 part := int(hash % uintptr(len(tab.tab)))
743 stk.link = tab.tab[part]
744 atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
745 unlock(&tab.lock)
746 return stk.id
749 // find checks if the stack trace pcs is already present in the table.
750 func (tab *traceStackTable) find(pcs []location, hash uintptr) uint32 {
751 part := int(hash % uintptr(len(tab.tab)))
752 Search:
753 for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
754 if stk.hash == hash && stk.n == len(pcs) {
755 for i, stkpc := range stk.stack() {
756 if stkpc != pcs[i] {
757 continue Search
760 return stk.id
763 return 0
766 // newStack allocates a new stack of size n.
767 func (tab *traceStackTable) newStack(n int) *traceStack {
768 return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*unsafe.Sizeof(location{})))
771 // dump writes all previously cached stacks to trace buffers,
772 // releases all memory and resets state.
773 func (tab *traceStackTable) dump() {
774 var tmp [(2 + 4*traceStackSize) * traceBytesPerNumber]byte
775 buf := traceFlush(0).ptr()
776 for _, stk := range tab.tab {
777 stk := stk.ptr()
778 for ; stk != nil; stk = stk.link.ptr() {
779 tmpbuf := tmp[:0]
780 tmpbuf = traceAppend(tmpbuf, uint64(stk.id))
781 frames := stk.stack()
782 tmpbuf = traceAppend(tmpbuf, uint64(len(frames)))
783 for _, f := range frames {
784 var frame traceFrame
785 frame, buf = traceFrameForPC(buf, f)
786 tmpbuf = traceAppend(tmpbuf, uint64(f.pc))
787 tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID))
788 tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID))
789 tmpbuf = traceAppend(tmpbuf, uint64(frame.line))
791 // Now copy to the buffer.
792 size := 1 + traceBytesPerNumber + len(tmpbuf)
793 if len(buf.arr)-buf.pos < size {
794 buf = traceFlush(traceBufPtrOf(buf)).ptr()
796 buf.byte(traceEvStack | 3<<traceArgCountShift)
797 buf.varint(uint64(len(tmpbuf)))
798 buf.pos += copy(buf.arr[buf.pos:], tmpbuf)
802 lock(&trace.lock)
803 traceFullQueue(traceBufPtrOf(buf))
804 unlock(&trace.lock)
806 tab.mem.drop()
807 *tab = traceStackTable{}
810 type traceFrame struct {
811 funcID uint64
812 fileID uint64
813 line uint64
816 func traceFrameForPC(buf *traceBuf, f location) (traceFrame, *traceBuf) {
817 var frame traceFrame
819 fn := f.function
820 const maxLen = 1 << 10
821 if len(fn) > maxLen {
822 fn = fn[len(fn)-maxLen:]
824 frame.funcID, buf = traceString(buf, fn)
825 frame.line = uint64(f.lineno)
826 file := f.filename
827 if len(file) > maxLen {
828 file = file[len(file)-maxLen:]
830 frame.fileID, buf = traceString(buf, file)
831 return frame, buf
834 // traceAlloc is a non-thread-safe region allocator.
835 // It holds a linked list of traceAllocBlock.
836 type traceAlloc struct {
837 head traceAllocBlockPtr
838 off uintptr
841 // traceAllocBlock is a block in traceAlloc.
843 // traceAllocBlock is allocated from non-GC'd memory, so it must not
844 // contain heap pointers. Writes to pointers to traceAllocBlocks do
845 // not need write barriers.
847 //go:notinheap
848 type traceAllocBlock struct {
849 next traceAllocBlockPtr
850 data [64<<10 - sys.PtrSize]byte
853 // TODO: Since traceAllocBlock is now go:notinheap, this isn't necessary.
854 type traceAllocBlockPtr uintptr
856 func (p traceAllocBlockPtr) ptr() *traceAllocBlock { return (*traceAllocBlock)(unsafe.Pointer(p)) }
857 func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
859 // alloc allocates n-byte block.
860 func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
861 n = round(n, sys.PtrSize)
862 if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
863 if n > uintptr(len(a.head.ptr().data)) {
864 throw("trace: alloc too large")
866 // This is only safe because the strings returned by callers
867 // are stored in a location that is not in the Go heap.
868 block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
869 if block == nil {
870 throw("trace: out of memory")
872 block.next.set(a.head.ptr())
873 a.head.set(block)
874 a.off = 0
876 p := &a.head.ptr().data[a.off]
877 a.off += n
878 return unsafe.Pointer(p)
881 // drop frees all previously allocated memory and resets the allocator.
882 func (a *traceAlloc) drop() {
883 for a.head != 0 {
884 block := a.head.ptr()
885 a.head.set(block.next.ptr())
886 sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
890 // The following functions write specific events to trace.
892 func traceGomaxprocs(procs int32) {
893 traceEvent(traceEvGomaxprocs, 1, uint64(procs))
896 func traceProcStart() {
897 traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
900 func traceProcStop(pp *p) {
901 // Sysmon and stopTheWorld can stop Ps blocked in syscalls,
902 // to handle this we temporary employ the P.
903 mp := acquirem()
904 oldp := mp.p
905 mp.p.set(pp)
906 traceEvent(traceEvProcStop, -1)
907 mp.p = oldp
908 releasem(mp)
911 func traceGCStart() {
912 traceEvent(traceEvGCStart, 3, trace.seqGC)
913 trace.seqGC++
916 func traceGCDone() {
917 traceEvent(traceEvGCDone, -1)
920 func traceGCScanStart() {
921 traceEvent(traceEvGCScanStart, -1)
924 func traceGCScanDone() {
925 traceEvent(traceEvGCScanDone, -1)
928 // traceGCSweepStart prepares to trace a sweep loop. This does not
929 // emit any events until traceGCSweepSpan is called.
931 // traceGCSweepStart must be paired with traceGCSweepDone and there
932 // must be no preemption points between these two calls.
933 func traceGCSweepStart() {
934 // Delay the actual GCSweepStart event until the first span
935 // sweep. If we don't sweep anything, don't emit any events.
936 _p_ := getg().m.p.ptr()
937 if _p_.traceSweep {
938 throw("double traceGCSweepStart")
940 _p_.traceSweep, _p_.traceSwept, _p_.traceReclaimed = true, 0, 0
943 // traceGCSweepSpan traces the sweep of a single page.
945 // This may be called outside a traceGCSweepStart/traceGCSweepDone
946 // pair; however, it will not emit any trace events in this case.
947 func traceGCSweepSpan(bytesSwept uintptr) {
948 _p_ := getg().m.p.ptr()
949 if _p_.traceSweep {
950 if _p_.traceSwept == 0 {
951 traceEvent(traceEvGCSweepStart, 1)
953 _p_.traceSwept += bytesSwept
957 func traceGCSweepDone() {
958 _p_ := getg().m.p.ptr()
959 if !_p_.traceSweep {
960 throw("missing traceGCSweepStart")
962 if _p_.traceSwept != 0 {
963 traceEvent(traceEvGCSweepDone, -1, uint64(_p_.traceSwept), uint64(_p_.traceReclaimed))
965 _p_.traceSweep = false
968 func traceGCMarkAssistStart() {
969 traceEvent(traceEvGCMarkAssistStart, 1)
972 func traceGCMarkAssistDone() {
973 traceEvent(traceEvGCMarkAssistDone, -1)
976 func traceGoCreate(newg *g, pc uintptr) {
977 newg.traceseq = 0
978 newg.tracelastp = getg().m.p
979 // +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
980 id := trace.stackTab.put([]location{location{pc: pc + sys.PCQuantum}})
981 traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id))
984 func traceGoStart() {
985 _g_ := getg().m.curg
986 _p_ := _g_.m.p
987 _g_.traceseq++
988 if _g_ == _p_.ptr().gcBgMarkWorker.ptr() {
989 traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[_p_.ptr().gcMarkWorkerMode])
990 } else if _g_.tracelastp == _p_ {
991 traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid))
992 } else {
993 _g_.tracelastp = _p_
994 traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq)
998 func traceGoEnd() {
999 traceEvent(traceEvGoEnd, -1)
1002 func traceGoSched() {
1003 _g_ := getg()
1004 _g_.tracelastp = _g_.m.p
1005 traceEvent(traceEvGoSched, 1)
1008 func traceGoPreempt() {
1009 _g_ := getg()
1010 _g_.tracelastp = _g_.m.p
1011 traceEvent(traceEvGoPreempt, 1)
1014 func traceGoPark(traceEv byte, skip int) {
1015 if traceEv&traceFutileWakeup != 0 {
1016 traceEvent(traceEvFutileWakeup, -1)
1018 traceEvent(traceEv & ^traceFutileWakeup, skip)
1021 func traceGoUnpark(gp *g, skip int) {
1022 _p_ := getg().m.p
1023 gp.traceseq++
1024 if gp.tracelastp == _p_ {
1025 traceEvent(traceEvGoUnblockLocal, skip, uint64(gp.goid))
1026 } else {
1027 gp.tracelastp = _p_
1028 traceEvent(traceEvGoUnblock, skip, uint64(gp.goid), gp.traceseq)
1032 func traceGoSysCall() {
1033 traceEvent(traceEvGoSysCall, 1)
1036 func traceGoSysExit(ts int64) {
1037 if ts != 0 && ts < trace.ticksStart {
1038 // There is a race between the code that initializes sysexitticks
1039 // (in exitsyscall, which runs without a P, and therefore is not
1040 // stopped with the rest of the world) and the code that initializes
1041 // a new trace. The recorded sysexitticks must therefore be treated
1042 // as "best effort". If they are valid for this trace, then great,
1043 // use them for greater accuracy. But if they're not valid for this
1044 // trace, assume that the trace was started after the actual syscall
1045 // exit (but before we actually managed to start the goroutine,
1046 // aka right now), and assign a fresh time stamp to keep the log consistent.
1047 ts = 0
1049 _g_ := getg().m.curg
1050 _g_.traceseq++
1051 _g_.tracelastp = _g_.m.p
1052 traceEvent(traceEvGoSysExit, -1, uint64(_g_.goid), _g_.traceseq, uint64(ts)/traceTickDiv)
1055 func traceGoSysBlock(pp *p) {
1056 // Sysmon and stopTheWorld can declare syscalls running on remote Ps as blocked,
1057 // to handle this we temporary employ the P.
1058 mp := acquirem()
1059 oldp := mp.p
1060 mp.p.set(pp)
1061 traceEvent(traceEvGoSysBlock, -1)
1062 mp.p = oldp
1063 releasem(mp)
1066 func traceHeapAlloc() {
1067 traceEvent(traceEvHeapAlloc, -1, memstats.heap_live)
1070 func traceNextGC() {
1071 if memstats.next_gc == ^uint64(0) {
1072 // Heap-based triggering is disabled.
1073 traceEvent(traceEvNextGC, -1, 0)
1074 } else {
1075 traceEvent(traceEvNextGC, -1, memstats.next_gc)