1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
6 // Based on algorithms and data structures used in
7 // http://code.google.com/p/google-perftools/.
9 // The main difference between this code and the google-perftools
10 // code is that this code is written to allow copying the profile data
11 // to an arbitrary io.Writer, while the google-perftools code always
12 // writes to an operating system file.
14 // The signal handler for the profiling clock tick adds a new stack trace
15 // to a hash table tracking counts for recent traces. Most clock ticks
16 // hit in the cache. In the event of a cache miss, an entry must be
17 // evicted from the hash table, copied to a log that will eventually be
18 // written as profile data. The google-perftools code flushed the
19 // log itself during the signal handler. This code cannot do that, because
20 // the io.Writer might block or need system calls or locks that are not
21 // safe to use from within the signal handler. Instead, we split the log
22 // into two halves and let the signal handler fill one half while a goroutine
23 // is writing out the other half. When the signal handler fills its half, it
24 // offers to swap with the goroutine. If the writer is not done with its half,
25 // we lose the stack trace for this clock tick (and record that loss).
26 // The goroutine interacts with the signal handler by calling getprofile() to
27 // get the next log piece to write, implicitly handing back the last log
30 // The state of this dance between the signal handler and the goroutine
31 // is encoded in the Profile.handoff field. If handoff == 0, then the goroutine
32 // is not using either log half and is waiting (or will soon be waiting) for
33 // a new piece by calling notesleep(&p.wait). If the signal handler
34 // changes handoff from 0 to non-zero, it must call notewakeup(&p.wait)
35 // to wake the goroutine. The value indicates the number of entries in the
36 // log half being handed off. The goroutine leaves the non-zero value in
37 // place until it has finished processing the log half and then flips the number
38 // back to zero. Setting the high bit in handoff means that the profiling is over,
39 // and the goroutine is now in charge of flushing the data left in the hash table
40 // to the log and returning that data.
42 // The handoff field is manipulated using atomic operations.
43 // For the most part, the manipulation of handoff is orderly: if handoff == 0
44 // then the signal handler owns it and can change it to non-zero.
45 // If handoff != 0 then the goroutine owns it and can change it to zero.
46 // If that were the end of the story then we would not need to manipulate
47 // handoff using atomic operations. The operations are needed, however,
48 // in order to let the log closer set the high bit to indicate "EOF" safely
49 // in the situation when normally the goroutine "owns" handoff.
54 "runtime/internal/atomic"
65 type cpuprofEntry
struct {
68 stack
[maxCPUProfStack
]uintptr
71 type cpuProfile
struct {
72 on
bool // profiling is on
73 wait note
// goroutine waits here
74 count
uintptr // tick count
75 evicts
uintptr // eviction count
76 lost
uintptr // lost ticks that need to be logged
78 // Active recent stack traces.
79 hash
[numBuckets
]struct {
80 entry
[assoc
]cpuprofEntry
83 // Log of traces evicted from hash.
84 // Signal handler has filled log[toggle][:nlog].
85 // Goroutine is writing log[1-toggle][:handoff].
86 log
[2][logSize
/ 2]uintptr
92 // Writer maintains its own toggle to avoid races
93 // looking at signal handler's toggle.
95 wholding
bool // holding & need to release a log half
96 flushing
bool // flushing hash table - profile is over
97 eodSent
bool // special end-of-data record sent; => flushing
104 eod
= [3]uintptr{0, 1, 0}
107 func setcpuprofilerate(hz
int32) {
109 setcpuprofilerate_m(hz
)
113 // lostProfileData is a no-op function used in profiles
114 // to mark the number of profiling stack traces that were
115 // discarded due to slow data writers.
116 func lostProfileData() {}
118 // SetCPUProfileRate sets the CPU profiling rate to hz samples per second.
119 // If hz <= 0, SetCPUProfileRate turns off profiling.
120 // If the profiler is on, the rate cannot be changed without first turning it off.
122 // Most clients should use the runtime/pprof package or
123 // the testing package's -test.cpuprofile flag instead of calling
124 // SetCPUProfileRate directly.
125 func SetCPUProfileRate(hz
int) {
126 // Clamp hz to something reasonable.
137 cpuprof
= (*cpuProfile
)(sysAlloc(unsafe
.Sizeof(cpuProfile
{}), &memstats
.other_sys
))
139 print("runtime: cpu profiling cannot allocate memory\n")
144 if cpuprof
.on || cpuprof
.handoff
!= 0 {
145 print("runtime: cannot set cpu profile rate until previous profile has finished.\n")
151 // pprof binary header format.
152 // https://github.com/gperftools/gperftools/blob/master/src/profiledata.cc#L119
154 p
[0] = 0 // count for header
155 p
[1] = 3 // depth for header
156 p
[2] = 0 // version number
157 p
[3] = uintptr(1e6
/ hz
) // period (microseconds)
161 cpuprof
.wholding
= false
163 cpuprof
.flushing
= false
164 cpuprof
.eodSent
= false
165 noteclear(&cpuprof
.wait
)
167 setcpuprofilerate(int32(hz
))
168 } else if cpuprof
!= nil && cpuprof
.on
{
172 // Now add is not running anymore, and getprofile owns the entire log.
173 // Set the high bit in cpuprof.handoff to tell getprofile.
176 if n
&0x80000000 != 0 {
177 print("runtime: setcpuprofile(off) twice\n")
179 if atomic
.Cas(&cpuprof
.handoff
, n
, n|
0x80000000) {
181 // we did the transition from 0 -> nonzero so we wake getprofile
182 notewakeup(&cpuprof
.wait
)
191 // add adds the stack trace to the profile.
192 // It is called from signal handlers and other limited environments
193 // and cannot allocate memory or acquire locks that might be
194 // held at the time of the signal, nor can it use substantial amounts
195 // of stack. It is allowed to call evict.
196 //go:nowritebarrierrec
197 func (p
*cpuProfile
) add(pc
[]uintptr) {
198 p
.addWithFlushlog(pc
, p
.flushlog
)
201 // addWithFlushlog implements add and addNonGo.
202 // It is called from signal handlers and other limited environments
203 // and cannot allocate memory or acquire locks that might be
204 // held at the time of the signal, nor can it use substantial amounts
205 // of stack. It may be called by a signal handler with no g or m.
206 // It is allowed to call evict, passing the flushlog parameter.
208 //go:nowritebarrierrec
209 func (p
*cpuProfile
) addWithFlushlog(pc
[]uintptr, flushlog
func() bool) {
210 if len(pc
) > maxCPUProfStack
{
211 pc
= pc
[:maxCPUProfStack
]
216 for _
, x
:= range pc
{
217 h
= h
<<8 |
(h
>> (8 * (unsafe
.Sizeof(h
) - 1)))
222 // Add to entry count if already present in table.
223 b
:= &p
.hash
[h%numBuckets
]
225 for i
:= range b
.entry
{
227 if e
.depth
!= len(pc
) {
231 if e
.stack
[j
] != pc
[j
] {
239 // Evict entry with smallest count.
241 for i
:= range b
.entry
{
242 if e
== nil || b
.entry
[i
].count
< e
.count
{
247 if !p
.evict(e
, flushlog
) {
248 // Could not evict entry. Record lost stack.
255 // Reuse the newly evicted entry.
261 // evict copies the given entry's data into the log, so that
262 // the entry can be reused. evict is called from add, which
263 // is called from the profiling signal handler, so it must not
264 // allocate memory or block, and it may be called with no g or m.
265 // It is safe to call flushlog. evict returns true if the entry was
266 // copied to the log, false if there was no room available.
268 //go:nowritebarrierrec
269 func (p
*cpuProfile
) evict(e
*cpuprofEntry
, flushlog
func() bool) bool {
272 log
:= &p
.log
[p
.toggle
]
273 if p
.nlog
+nslot
> len(log
) {
277 log
= &p
.log
[p
.toggle
]
285 copy(log
[q
:], e
.stack
[:d
])
292 // flushlog tries to flush the current log and switch to the other one.
293 // flushlog is called from evict, called from add, called from the signal handler,
294 // so it cannot allocate memory or block. It can try to swap logs with
295 // the writing goroutine, as explained in the comment at the top of this file.
296 //go:nowritebarrierrec
297 func (p
*cpuProfile
) flushlog() bool {
298 if !atomic
.Cas(&p
.handoff
, 0, uint32(p
.nlog
)) {
303 p
.toggle
= 1 - p
.toggle
304 log
:= &p
.log
[p
.toggle
]
307 lostPC
:= funcPC(lostProfileData
)
318 // addNonGo is like add, but runs on a non-Go thread.
319 // It can't do anything that might need a g or an m.
320 // With this entry point, we don't try to flush the log when evicting an
321 // old entry. Instead, we just drop the stack trace if we're out of space.
323 //go:nowritebarrierrec
324 func (p
*cpuProfile
) addNonGo(pc
[]uintptr) {
325 p
.addWithFlushlog(pc
, func() bool { return false })
328 // getprofile blocks until the next block of profiling data is available
329 // and returns it as a []byte. It is called from the writing goroutine.
330 func (p
*cpuProfile
) getprofile() []byte {
336 // Release previous log to signal handling side.
337 // Loop because we are racing against SetCPUProfileRate(0).
341 print("runtime: phase error during cpu profile handoff\n")
344 if n
&0x80000000 != 0 {
345 p
.wtoggle
= 1 - p
.wtoggle
350 if atomic
.Cas(&p
.handoff
, n
, 0) {
354 p
.wtoggle
= 1 - p
.wtoggle
362 if !p
.on
&& p
.handoff
== 0 {
367 notetsleepg(&p
.wait
, -1)
370 switch n
:= p
.handoff
; {
372 print("runtime: phase error during cpu profile wait\n")
374 case n
== 0x80000000:
380 // Return new log to caller.
383 return uintptrBytes(p
.log
[p
.wtoggle
][:n
])
387 // Add is no longer being called. We own the log.
388 // Also, p.handoff is non-zero, so flushlog will return false.
389 // Evict the hash table into the log and return it.
391 for i
:= range p
.hash
{
393 for j
:= range b
.entry
{
395 if e
.count
> 0 && !p
.evict(e
, p
.flushlog
) {
396 // Filled the log. Stop the loop and return what we've got.
402 // Return pending log data.
404 // Note that we're using toggle now, not wtoggle,
405 // because we're working on the log directly.
408 return uintptrBytes(p
.log
[p
.toggle
][:n
])
411 // Made it through the table without finding anything to log.
413 // We may not have space to append this to the partial log buf,
414 // so we always return a new slice for the end-of-data marker.
416 return uintptrBytes(eod
[:])
419 // Finally done. Clean up and return nil.
421 if !atomic
.Cas(&p
.handoff
, p
.handoff
, 0) {
422 print("runtime: profile flush racing with something\n")
427 func uintptrBytes(p
[]uintptr) (ret
[]byte) {
428 pp
:= (*slice
)(unsafe
.Pointer(&p
))
429 rp
:= (*slice
)(unsafe
.Pointer(&ret
))
432 rp
.len = pp
.len * int(unsafe
.Sizeof(p
[0]))
438 // CPUProfile returns the next chunk of binary CPU profiling stack trace data,
439 // blocking until data is available. If profiling is turned off and all the profile
440 // data accumulated while it was on has been returned, CPUProfile returns nil.
441 // The caller must save the returned data before calling CPUProfile again.
443 // Most clients should use the runtime/pprof package or
444 // the testing package's -test.cpuprofile flag instead of calling
445 // CPUProfile directly.
446 func CPUProfile() []byte {
447 return cpuprof
.getprofile()
450 //go:linkname runtime_pprof_runtime_cyclesPerSecond runtime_pprof.runtime_cyclesPerSecond
451 func runtime_pprof_runtime_cyclesPerSecond() int64 {
452 return tickspersecond()