1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
6 // Based on algorithms and data structures used in
7 // http://code.google.com/p/google-perftools/.
9 // The main difference between this code and the google-perftools
10 // code is that this code is written to allow copying the profile data
11 // to an arbitrary io.Writer, while the google-perftools code always
12 // writes to an operating system file.
14 // The signal handler for the profiling clock tick adds a new stack trace
15 // to a hash table tracking counts for recent traces. Most clock ticks
16 // hit in the cache. In the event of a cache miss, an entry must be
17 // evicted from the hash table, copied to a log that will eventually be
18 // written as profile data. The google-perftools code flushed the
19 // log itself during the signal handler. This code cannot do that, because
20 // the io.Writer might block or need system calls or locks that are not
21 // safe to use from within the signal handler. Instead, we split the log
22 // into two halves and let the signal handler fill one half while a goroutine
23 // is writing out the other half. When the signal handler fills its half, it
24 // offers to swap with the goroutine. If the writer is not done with its half,
25 // we lose the stack trace for this clock tick (and record that loss).
26 // The goroutine interacts with the signal handler by calling getprofile() to
27 // get the next log piece to write, implicitly handing back the last log
30 // The state of this dance between the signal handler and the goroutine
31 // is encoded in the Profile.handoff field. If handoff == 0, then the goroutine
32 // is not using either log half and is waiting (or will soon be waiting) for
33 // a new piece by calling notesleep(&p->wait). If the signal handler
34 // changes handoff from 0 to non-zero, it must call notewakeup(&p->wait)
35 // to wake the goroutine. The value indicates the number of entries in the
36 // log half being handed off. The goroutine leaves the non-zero value in
37 // place until it has finished processing the log half and then flips the number
38 // back to zero. Setting the high bit in handoff means that the profiling is over,
39 // and the goroutine is now in charge of flushing the data left in the hash table
40 // to the log and returning that data.
42 // The handoff field is manipulated using atomic operations.
43 // For the most part, the manipulation of handoff is orderly: if handoff == 0
44 // then the signal handler owns it and can change it to non-zero.
45 // If handoff != 0 then the goroutine owns it and can change it to zero.
46 // If that were the end of the story then we would not need to manipulate
47 // handoff using atomic operations. The operations are needed, however,
48 // in order to let the log closer set the high bit to indicate "EOF" safely
49 // in the situation when normally the goroutine "owns" handoff.
56 typedef struct __go_open_array Slice
;
57 #define array __values
59 #define cap __capacity
69 typedef struct Profile Profile
;
70 typedef struct Bucket Bucket
;
71 typedef struct Entry Entry
;
76 uintptr stack
[MaxStack
];
84 bool on
; // profiling is on
85 Note wait
; // goroutine waits here
86 uintptr count
; // tick count
87 uintptr evicts
; // eviction count
88 uintptr lost
; // lost ticks that need to be logged
89 uintptr totallost
; // total lost ticks
91 // Active recent stack traces.
92 Bucket hash
[HashSize
];
94 // Log of traces evicted from hash.
95 // Signal handler has filled log[toggle][:nlog].
96 // Goroutine is writing log[1-toggle][:handoff].
97 uintptr log
[2][LogSize
/2];
103 // Writer maintains its own toggle to avoid races
104 // looking at signal handler's toggle.
106 bool wholding
; // holding & need to release a log half
107 bool flushing
; // flushing hash table - profile is over
108 bool eod_sent
; // special end-of-data record sent; => flushing
112 static Profile
*prof
;
114 static void tick(uintptr
*, int32
);
115 static void add(Profile
*, uintptr
*, int32
);
116 static bool evict(Profile
*, Entry
*);
117 static bool flushlog(Profile
*);
119 static uintptr eod
[3] = {0, 1, 0};
121 // LostProfileData is a no-op function used in profiles
122 // to mark the number of profiling stack traces that were
123 // discarded due to slow data writers.
124 static void LostProfileData(void) {
127 extern void runtime_SetCPUProfileRate(int32
)
128 __asm__("runtime.SetCPUProfileRate");
130 // SetCPUProfileRate sets the CPU profiling rate.
131 // The user documentation is in debug.go.
133 runtime_SetCPUProfileRate(intgo hz
)
138 // Clamp hz to something reasonable.
147 prof
= runtime_SysAlloc(sizeof *prof
);
149 runtime_printf("runtime: cpu profiling cannot allocate memory\n");
154 if(prof
->on
|| prof
->handoff
!= 0) {
155 runtime_printf("runtime: cannot set cpu profile rate until previous profile has finished.\n");
162 // pprof binary header format.
163 // http://code.google.com/p/google-perftools/source/browse/trunk/src/profiledata.cc#117
164 *p
++ = 0; // count for header
165 *p
++ = 3; // depth for header
166 *p
++ = 0; // version number
167 *p
++ = 1000000 / hz
; // period (microseconds)
169 prof
->nlog
= p
- prof
->log
[0];
171 prof
->wholding
= false;
173 prof
->flushing
= false;
174 prof
->eod_sent
= false;
175 runtime_noteclear(&prof
->wait
);
177 runtime_setcpuprofilerate(tick
, hz
);
178 } else if(prof
->on
) {
179 runtime_setcpuprofilerate(nil
, 0);
182 // Now add is not running anymore, and getprofile owns the entire log.
183 // Set the high bit in prof->handoff to tell getprofile.
187 runtime_printf("runtime: setcpuprofile(off) twice");
188 if(runtime_cas(&prof
->handoff
, n
, n
|0x80000000))
192 // we did the transition from 0 -> nonzero so we wake getprofile
193 runtime_notewakeup(&prof
->wait
);
200 tick(uintptr
*pc
, int32 n
)
205 // add adds the stack trace to the profile.
206 // It is called from signal handlers and other limited environments
207 // and cannot allocate memory or acquire locks that might be
208 // held at the time of the signal, nor can it use substantial amounts
209 // of stack. It is allowed to call evict.
211 add(Profile
*p
, uintptr
*pc
, int32 n
)
224 h
= h
<<8 | (h
>>(8*(sizeof(h
)-1)));
226 h
+= x
*31 + x
*7 + x
*3;
230 // Add to entry count if already present in table.
231 b
= &p
->hash
[h
%HashSize
];
232 for(i
=0; i
<Assoc
; i
++) {
234 if(e
->depth
!= (uintptr
)n
)
237 if(e
->stack
[j
] != pc
[j
])
244 // Evict entry with smallest count.
246 for(i
=1; i
<Assoc
; i
++)
247 if(b
->entry
[i
].count
< e
->count
)
251 // Could not evict entry. Record lost stack.
259 // Reuse the newly evicted entry.
266 // evict copies the given entry's data into the log, so that
267 // the entry can be reused. evict is called from add, which
268 // is called from the profiling signal handler, so it must not
269 // allocate memory or block. It is safe to call flushLog.
270 // evict returns true if the entry was copied to the log,
271 // false if there was no room available.
273 evict(Profile
*p
, Entry
*e
)
280 log
= p
->log
[p
->toggle
];
281 if(p
->nlog
+nslot
> nelem(p
->log
[0])) {
284 log
= p
->log
[p
->toggle
];
297 // flushlog tries to flush the current log and switch to the other one.
298 // flushlog is called from evict, called from add, called from the signal handler,
299 // so it cannot allocate memory or block. It can try to swap logs with
300 // the writing goroutine, as explained in the comment at the top of this file.
306 if(!runtime_cas(&p
->handoff
, 0, p
->nlog
))
308 runtime_notewakeup(&p
->wait
);
310 p
->toggle
= 1 - p
->toggle
;
311 log
= p
->log
[p
->toggle
];
316 *q
++ = (uintptr
)LostProfileData
;
322 // getprofile blocks until the next block of profiling data is available
323 // and returns it as a []byte. It is called from the writing goroutine.
325 getprofile(Profile
*p
)
340 // Release previous log to signal handling side.
341 // Loop because we are racing against setprofile(off).
345 runtime_printf("runtime: phase error during cpu profile handoff\n");
349 p
->wtoggle
= 1 - p
->wtoggle
;
354 if(runtime_cas(&p
->handoff
, n
, 0))
357 p
->wtoggle
= 1 - p
->wtoggle
;
364 if(!p
->on
&& p
->handoff
== 0)
368 runtime_entersyscall();
369 runtime_notesleep(&p
->wait
);
370 runtime_exitsyscall();
371 runtime_noteclear(&p
->wait
);
375 runtime_printf("runtime: phase error during cpu profile wait\n");
378 if(n
== 0x80000000) {
384 // Return new log to caller.
387 ret
.array
= (byte
*)p
->log
[p
->wtoggle
];
388 ret
.len
= n
*sizeof(uintptr
);
394 // Add is no longer being called. We own the log.
395 // Also, p->handoff is non-zero, so flushlog will return false.
396 // Evict the hash table into the log and return it.
397 for(i
=0; i
<HashSize
; i
++) {
399 for(j
=0; j
<Assoc
; j
++) {
401 if(e
->count
> 0 && !evict(p
, e
)) {
402 // Filled the log. Stop the loop and return what we've got.
409 // Return pending log data.
411 // Note that we're using toggle now, not wtoggle,
412 // because we're working on the log directly.
413 ret
.array
= (byte
*)p
->log
[p
->toggle
];
414 ret
.len
= p
->nlog
*sizeof(uintptr
);
420 // Made it through the table without finding anything to log.
422 // We may not have space to append this to the partial log buf,
423 // so we always return a new slice for the end-of-data marker.
425 ret
.array
= (byte
*)eod
;
426 ret
.len
= sizeof eod
;
431 // Finally done. Clean up and return nil.
433 if(!runtime_cas(&p
->handoff
, p
->handoff
, 0))
434 runtime_printf("runtime: profile flush racing with something\n");
435 return ret
; // set to nil at top of function
438 extern Slice
runtime_CPUProfile(void)
439 __asm__("runtime.CPUProfile");
441 // CPUProfile returns the next cpu profile block as a []byte.
442 // The user documentation is in debug.go.
444 runtime_CPUProfile(void)
446 return getprofile(prof
);