compiler: rationalize external symbol names
[official-gcc.git] / libgo / go / runtime / pprof / pprof_test.go
blobae6ec6d0cfc8169cb383a41e5dd643b7ce56ef48
1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // +build !nacl
7 package pprof
9 import (
10 "bytes"
11 "context"
12 "fmt"
13 "internal/testenv"
14 "io"
15 "io/ioutil"
16 "math/big"
17 "os"
18 "os/exec"
19 "regexp"
20 "runtime"
21 "runtime/pprof/internal/profile"
22 "strings"
23 "sync"
24 "sync/atomic"
25 "testing"
26 "time"
29 func cpuHogger(f func(x int) int, y *int, dur time.Duration) {
30 // We only need to get one 100 Hz clock tick, so we've got
31 // a large safety buffer.
32 // But do at least 500 iterations (which should take about 100ms),
33 // otherwise TestCPUProfileMultithreaded can fail if only one
34 // thread is scheduled during the testing period.
35 t0 := time.Now()
36 accum := *y
37 for i := 0; i < 500 || time.Since(t0) < dur; i++ {
38 accum = f(accum)
40 *y = accum
43 var (
44 salt1 = 0
45 salt2 = 0
48 // The actual CPU hogging function.
49 // Must not call other functions nor access heap/globals in the loop,
50 // otherwise under race detector the samples will be in the race runtime.
51 func cpuHog1(x int) int {
52 foo := x
53 for i := 0; i < 1e5; i++ {
54 if foo > 0 {
55 foo *= foo
56 } else {
57 foo *= foo + 1
60 return foo
63 func cpuHog2(x int) int {
64 foo := x
65 for i := 0; i < 1e5; i++ {
66 if foo > 0 {
67 foo *= foo
68 } else {
69 foo *= foo + 2
72 return foo
75 func TestCPUProfile(t *testing.T) {
76 testCPUProfile(t, []string{"pprof.cpuHog1"}, func(dur time.Duration) {
77 cpuHogger(cpuHog1, &salt1, dur)
81 func TestCPUProfileMultithreaded(t *testing.T) {
82 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
83 testCPUProfile(t, []string{"pprof.cpuHog1", "pprof.cpuHog2"}, func(dur time.Duration) {
84 c := make(chan int)
85 go func() {
86 cpuHogger(cpuHog1, &salt1, dur)
87 c <- 1
88 }()
89 cpuHogger(cpuHog2, &salt2, dur)
90 <-c
94 func TestCPUProfileInlining(t *testing.T) {
95 testCPUProfile(t, []string{"pprof.inlinedCallee", "pprof.inlinedCaller"}, func(dur time.Duration) {
96 cpuHogger(inlinedCaller, &salt1, dur)
100 func inlinedCaller(x int) int {
101 x = inlinedCallee(x)
102 return x
105 func inlinedCallee(x int) int {
106 // We could just use cpuHog1, but for loops prevent inlining
107 // right now. :(
108 foo := x
109 i := 0
110 loop:
111 if foo > 0 {
112 foo *= foo
113 } else {
114 foo *= foo + 1
116 if i++; i < 1e5 {
117 goto loop
119 return foo
122 func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Location, map[string][]string)) {
123 p, err := profile.Parse(bytes.NewReader(valBytes))
124 if err != nil {
125 t.Fatal(err)
127 for _, sample := range p.Sample {
128 count := uintptr(sample.Value[0])
129 f(count, sample.Location, sample.Label)
133 func testCPUProfile(t *testing.T, need []string, f func(dur time.Duration)) {
134 switch runtime.GOOS {
135 case "darwin":
136 switch runtime.GOARCH {
137 case "arm", "arm64":
138 // nothing
139 default:
140 out, err := exec.Command("uname", "-a").CombinedOutput()
141 if err != nil {
142 t.Fatal(err)
144 vers := string(out)
145 t.Logf("uname -a: %v", vers)
147 case "plan9":
148 t.Skip("skipping on plan9")
151 const maxDuration = 5 * time.Second
152 // If we're running a long test, start with a long duration
153 // for tests that try to make sure something *doesn't* happen.
154 duration := 5 * time.Second
155 if testing.Short() {
156 duration = 200 * time.Millisecond
159 // Profiling tests are inherently flaky, especially on a
160 // loaded system, such as when this test is running with
161 // several others under go test std. If a test fails in a way
162 // that could mean it just didn't run long enough, try with a
163 // longer duration.
164 for duration <= maxDuration {
165 var prof bytes.Buffer
166 if err := StartCPUProfile(&prof); err != nil {
167 t.Fatal(err)
169 f(duration)
170 StopCPUProfile()
172 if profileOk(t, need, prof, duration) {
173 return
176 duration *= 2
177 if duration <= maxDuration {
178 t.Logf("retrying with %s duration", duration)
182 switch runtime.GOOS {
183 case "darwin", "dragonfly", "netbsd", "solaris":
184 t.Skipf("ignoring failure on %s; see golang.org/issue/13841", runtime.GOOS)
186 // Ignore the failure if the tests are running in a QEMU-based emulator,
187 // QEMU is not perfect at emulating everything.
188 // IN_QEMU environmental variable is set by some of the Go builders.
189 // IN_QEMU=1 indicates that the tests are running in QEMU. See issue 9605.
190 if os.Getenv("IN_QEMU") == "1" {
191 t.Skip("ignore the failure in QEMU; see golang.org/issue/9605")
193 t.FailNow()
196 func contains(slice []string, s string) bool {
197 for i := range slice {
198 if slice[i] == s {
199 return true
202 return false
205 func profileOk(t *testing.T, need []string, prof bytes.Buffer, duration time.Duration) (ok bool) {
206 ok = true
208 // Check that profile is well formed and contains need.
209 have := make([]uintptr, len(need))
210 var samples uintptr
211 var buf bytes.Buffer
212 parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) {
213 fmt.Fprintf(&buf, "%d:", count)
214 fprintStack(&buf, stk)
215 samples += count
216 for i, name := range need {
217 if semi := strings.Index(name, ";"); semi > -1 {
218 kv := strings.SplitN(name[semi+1:], "=", 2)
219 if len(kv) != 2 || !contains(labels[kv[0]], kv[1]) {
220 continue
222 name = name[:semi]
224 for _, loc := range stk {
225 for _, line := range loc.Line {
226 if strings.Contains(line.Function.Name, name) {
227 have[i] += count
232 fmt.Fprintf(&buf, "\n")
234 t.Logf("total %d CPU profile samples collected:\n%s", samples, buf.String())
236 if samples < 10 && runtime.GOOS == "windows" {
237 // On some windows machines we end up with
238 // not enough samples due to coarse timer
239 // resolution. Let it go.
240 t.Log("too few samples on Windows (golang.org/issue/10842)")
241 return false
244 // Check that we got a reasonable number of samples.
245 // We used to always require at least ideal/4 samples,
246 // but that is too hard to guarantee on a loaded system.
247 // Now we accept 10 or more samples, which we take to be
248 // enough to show that at least some profiling is occurring.
249 if ideal := uintptr(duration * 100 / time.Second); samples == 0 || (samples < ideal/4 && samples < 10) {
250 t.Logf("too few samples; got %d, want at least %d, ideally %d", samples, ideal/4, ideal)
251 ok = false
254 if len(need) == 0 {
255 return ok
258 var total uintptr
259 for i, name := range need {
260 total += have[i]
261 t.Logf("%s: %d\n", name, have[i])
263 if total == 0 {
264 t.Logf("no samples in expected functions")
265 ok = false
267 // We'd like to check a reasonable minimum, like
268 // total / len(have) / smallconstant, but this test is
269 // pretty flaky (see bug 7095). So we'll just test to
270 // make sure we got at least one sample.
271 min := uintptr(1)
272 for i, name := range need {
273 if have[i] < min {
274 t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have)))
275 ok = false
278 return ok
281 // Fork can hang if preempted with signals frequently enough (see issue 5517).
282 // Ensure that we do not do this.
283 func TestCPUProfileWithFork(t *testing.T) {
284 testenv.MustHaveExec(t)
286 heap := 1 << 30
287 if runtime.GOOS == "android" {
288 // Use smaller size for Android to avoid crash.
289 heap = 100 << 20
291 if testing.Short() {
292 heap = 100 << 20
294 // This makes fork slower.
295 garbage := make([]byte, heap)
296 // Need to touch the slice, otherwise it won't be paged in.
297 done := make(chan bool)
298 go func() {
299 for i := range garbage {
300 garbage[i] = 42
302 done <- true
304 <-done
306 var prof bytes.Buffer
307 if err := StartCPUProfile(&prof); err != nil {
308 t.Fatal(err)
310 defer StopCPUProfile()
312 for i := 0; i < 10; i++ {
313 exec.Command(os.Args[0], "-h").CombinedOutput()
317 // Test that profiler does not observe runtime.gogo as "user" goroutine execution.
318 // If it did, it would see inconsistent state and would either record an incorrect stack
319 // or crash because the stack was malformed.
320 func TestGoroutineSwitch(t *testing.T) {
321 // How much to try. These defaults take about 1 seconds
322 // on a 2012 MacBook Pro. The ones in short mode take
323 // about 0.1 seconds.
324 tries := 10
325 count := 1000000
326 if testing.Short() {
327 tries = 1
329 for try := 0; try < tries; try++ {
330 var prof bytes.Buffer
331 if err := StartCPUProfile(&prof); err != nil {
332 t.Fatal(err)
334 for i := 0; i < count; i++ {
335 runtime.Gosched()
337 StopCPUProfile()
339 // Read profile to look for entries for runtime.gogo with an attempt at a traceback.
340 // The special entry
341 parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, _ map[string][]string) {
342 // An entry with two frames with 'System' in its top frame
343 // exists to record a PC without a traceback. Those are okay.
344 if len(stk) == 2 {
345 name := stk[1].Line[0].Function.Name
346 if name == "runtime._System" || name == "runtime._ExternalCode" || name == "runtime._GC" {
347 return
351 // Otherwise, should not see runtime.gogo.
352 // The place we'd see it would be the inner most frame.
353 name := stk[0].Line[0].Function.Name
354 if name == "runtime.gogo" {
355 var buf bytes.Buffer
356 fprintStack(&buf, stk)
357 t.Fatalf("found profile entry for runtime.gogo:\n%s", buf.String())
363 func fprintStack(w io.Writer, stk []*profile.Location) {
364 for _, loc := range stk {
365 fmt.Fprintf(w, " %#x", loc.Address)
366 fmt.Fprintf(w, " (")
367 for i, line := range loc.Line {
368 if i > 0 {
369 fmt.Fprintf(w, " ")
371 fmt.Fprintf(w, "%s:%d", line.Function.Name, line.Line)
373 fmt.Fprintf(w, ")")
375 fmt.Fprintf(w, "\n")
378 // Test that profiling of division operations is okay, especially on ARM. See issue 6681.
379 func TestMathBigDivide(t *testing.T) {
380 testCPUProfile(t, nil, func(duration time.Duration) {
381 t := time.After(duration)
382 pi := new(big.Int)
383 for {
384 for i := 0; i < 100; i++ {
385 n := big.NewInt(2646693125139304345)
386 d := big.NewInt(842468587426513207)
387 pi.Div(n, d)
389 select {
390 case <-t:
391 return
392 default:
398 func TestBlockProfile(t *testing.T) {
399 t.Skip("lots of details are different for gccgo; FIXME")
400 type TestCase struct {
401 name string
402 f func()
403 stk []string
404 re string
406 tests := [...]TestCase{
408 name: "chan recv",
409 f: blockChanRecv,
410 stk: []string{
411 "runtime.chanrecv1",
412 "runtime/pprof.blockChanRecv",
413 "runtime/pprof.TestBlockProfile",
415 re: `
416 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
417 # 0x[0-9a-f]+ runtime\.chanrecv1\+0x[0-9a-f]+ .*/src/runtime/chan.go:[0-9]+
418 # 0x[0-9a-f]+ runtime/pprof\.blockChanRecv\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
419 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
422 name: "chan send",
423 f: blockChanSend,
424 stk: []string{
425 "runtime.chansend1",
426 "runtime/pprof.blockChanSend",
427 "runtime/pprof.TestBlockProfile",
429 re: `
430 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
431 # 0x[0-9a-f]+ runtime\.chansend1\+0x[0-9a-f]+ .*/src/runtime/chan.go:[0-9]+
432 # 0x[0-9a-f]+ runtime/pprof\.blockChanSend\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
433 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
436 name: "chan close",
437 f: blockChanClose,
438 stk: []string{
439 "runtime.chanrecv1",
440 "runtime/pprof.blockChanClose",
441 "runtime/pprof.TestBlockProfile",
443 re: `
444 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
445 # 0x[0-9a-f]+ runtime\.chanrecv1\+0x[0-9a-f]+ .*/src/runtime/chan.go:[0-9]+
446 # 0x[0-9a-f]+ runtime/pprof\.blockChanClose\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
447 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
450 name: "select recv async",
451 f: blockSelectRecvAsync,
452 stk: []string{
453 "runtime.selectgo",
454 "runtime/pprof.blockSelectRecvAsync",
455 "runtime/pprof.TestBlockProfile",
457 re: `
458 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
459 # 0x[0-9a-f]+ runtime\.selectgo\+0x[0-9a-f]+ .*/src/runtime/select.go:[0-9]+
460 # 0x[0-9a-f]+ runtime/pprof\.blockSelectRecvAsync\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
461 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
464 name: "select send sync",
465 f: blockSelectSendSync,
466 stk: []string{
467 "runtime.selectgo",
468 "runtime/pprof.blockSelectSendSync",
469 "runtime/pprof.TestBlockProfile",
471 re: `
472 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
473 # 0x[0-9a-f]+ runtime\.selectgo\+0x[0-9a-f]+ .*/src/runtime/select.go:[0-9]+
474 # 0x[0-9a-f]+ runtime/pprof\.blockSelectSendSync\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
475 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
478 name: "mutex",
479 f: blockMutex,
480 stk: []string{
481 "sync.(*Mutex).Lock",
482 "runtime/pprof.blockMutex",
483 "runtime/pprof.TestBlockProfile",
485 re: `
486 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
487 # 0x[0-9a-f]+ sync\.\(\*Mutex\)\.Lock\+0x[0-9a-f]+ .*/src/sync/mutex\.go:[0-9]+
488 # 0x[0-9a-f]+ runtime/pprof\.blockMutex\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
489 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
492 name: "cond",
493 f: blockCond,
494 stk: []string{
495 "sync.(*Cond).Wait",
496 "runtime/pprof.blockCond",
497 "runtime/pprof.TestBlockProfile",
499 re: `
500 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
501 # 0x[0-9a-f]+ sync\.\(\*Cond\)\.Wait\+0x[0-9a-f]+ .*/src/sync/cond\.go:[0-9]+
502 # 0x[0-9a-f]+ runtime/pprof\.blockCond\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
503 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+
507 // Generate block profile
508 runtime.SetBlockProfileRate(1)
509 defer runtime.SetBlockProfileRate(0)
510 for _, test := range tests {
511 test.f()
514 t.Run("debug=1", func(t *testing.T) {
515 var w bytes.Buffer
516 Lookup("block").WriteTo(&w, 1)
517 prof := w.String()
519 if !strings.HasPrefix(prof, "--- contention:\ncycles/second=") {
520 t.Fatalf("Bad profile header:\n%v", prof)
523 if strings.HasSuffix(prof, "#\t0x0\n\n") {
524 t.Errorf("Useless 0 suffix:\n%v", prof)
527 for _, test := range tests {
528 if !regexp.MustCompile(strings.Replace(test.re, "\t", "\t+", -1)).MatchString(prof) {
529 t.Errorf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof)
534 t.Run("proto", func(t *testing.T) {
535 // proto format
536 var w bytes.Buffer
537 Lookup("block").WriteTo(&w, 0)
538 p, err := profile.Parse(&w)
539 if err != nil {
540 t.Fatalf("failed to parse profile: %v", err)
542 t.Logf("parsed proto: %s", p)
543 if err := p.CheckValid(); err != nil {
544 t.Fatalf("invalid profile: %v", err)
547 stks := stacks(p)
548 for _, test := range tests {
549 if !containsStack(stks, test.stk) {
550 t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk)
557 func stacks(p *profile.Profile) (res [][]string) {
558 for _, s := range p.Sample {
559 var stk []string
560 for _, l := range s.Location {
561 for _, line := range l.Line {
562 stk = append(stk, line.Function.Name)
565 res = append(res, stk)
567 return res
570 func containsStack(got [][]string, want []string) bool {
571 for _, stk := range got {
572 if len(stk) < len(want) {
573 continue
575 for i, f := range want {
576 if f != stk[i] {
577 break
579 if i == len(want)-1 {
580 return true
584 return false
587 const blockDelay = 10 * time.Millisecond
589 func blockChanRecv() {
590 c := make(chan bool)
591 go func() {
592 time.Sleep(blockDelay)
593 c <- true
598 func blockChanSend() {
599 c := make(chan bool)
600 go func() {
601 time.Sleep(blockDelay)
604 c <- true
607 func blockChanClose() {
608 c := make(chan bool)
609 go func() {
610 time.Sleep(blockDelay)
611 close(c)
616 func blockSelectRecvAsync() {
617 const numTries = 3
618 c := make(chan bool, 1)
619 c2 := make(chan bool, 1)
620 go func() {
621 for i := 0; i < numTries; i++ {
622 time.Sleep(blockDelay)
623 c <- true
626 for i := 0; i < numTries; i++ {
627 select {
628 case <-c:
629 case <-c2:
634 func blockSelectSendSync() {
635 c := make(chan bool)
636 c2 := make(chan bool)
637 go func() {
638 time.Sleep(blockDelay)
641 select {
642 case c <- true:
643 case c2 <- true:
647 func blockMutex() {
648 var mu sync.Mutex
649 mu.Lock()
650 go func() {
651 time.Sleep(blockDelay)
652 mu.Unlock()
654 // Note: Unlock releases mu before recording the mutex event,
655 // so it's theoretically possible for this to proceed and
656 // capture the profile before the event is recorded. As long
657 // as this is blocked before the unlock happens, it's okay.
658 mu.Lock()
661 func blockCond() {
662 var mu sync.Mutex
663 c := sync.NewCond(&mu)
664 mu.Lock()
665 go func() {
666 time.Sleep(blockDelay)
667 mu.Lock()
668 c.Signal()
669 mu.Unlock()
671 c.Wait()
672 mu.Unlock()
675 func TestMutexProfile(t *testing.T) {
676 // Generate mutex profile
678 old := runtime.SetMutexProfileFraction(1)
679 defer runtime.SetMutexProfileFraction(old)
680 if old != 0 {
681 t.Fatalf("need MutexProfileRate 0, got %d", old)
684 blockMutex()
686 t.Run("debug=1", func(t *testing.T) {
687 var w bytes.Buffer
688 Lookup("mutex").WriteTo(&w, 1)
689 prof := w.String()
690 t.Logf("received profile: %v", prof)
692 if !strings.HasPrefix(prof, "--- mutex:\ncycles/second=") {
693 t.Errorf("Bad profile header:\n%v", prof)
695 prof = strings.Trim(prof, "\n")
696 lines := strings.Split(prof, "\n")
697 if len(lines) != 6 {
698 t.Errorf("expected 6 lines, got %d %q\n%s", len(lines), prof, prof)
700 if len(lines) < 6 {
701 return
703 // checking that the line is like "35258904 1 @ 0x48288d 0x47cd28 0x458931"
704 r2 := `^\d+ 1 @(?: 0x[[:xdigit:]]+)+`
705 //r2 := "^[0-9]+ 1 @ 0x[0-9a-f x]+$"
706 if ok, err := regexp.MatchString(r2, lines[3]); err != nil || !ok {
707 t.Errorf("%q didn't match %q", lines[3], r2)
709 if runtime.Compiler != "gccgo" {
710 r3 := "^#.*pprof.blockMutex.*$"
711 if ok, err := regexp.MatchString(r3, lines[5]); err != nil || !ok {
712 t.Errorf("%q didn't match %q", lines[5], r3)
715 t.Logf(prof)
717 t.Run("proto", func(t *testing.T) {
718 // proto format
719 var w bytes.Buffer
720 Lookup("mutex").WriteTo(&w, 0)
721 p, err := profile.Parse(&w)
722 if err != nil {
723 t.Fatalf("failed to parse profile: %v", err)
725 t.Logf("parsed proto: %s", p)
726 if err := p.CheckValid(); err != nil {
727 t.Fatalf("invalid profile: %v", err)
730 stks := stacks(p)
731 for _, want := range [][]string{
732 // {"sync.(*Mutex).Unlock", "pprof.blockMutex.func1"},
733 {".1sync.Mutex.Unlock", "pprof.blockMutex..func1"},
735 if !containsStack(stks, want) {
736 t.Errorf("No matching stack entry for %+v", want)
742 func func1(c chan int) { <-c }
743 func func2(c chan int) { <-c }
744 func func3(c chan int) { <-c }
745 func func4(c chan int) { <-c }
747 func TestGoroutineCounts(t *testing.T) {
748 if runtime.Compiler == "gccgo" {
749 t.Skip("goroutine stacks not supported on gccgo")
752 // Setting GOMAXPROCS to 1 ensures we can force all goroutines to the
753 // desired blocking point.
754 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
756 c := make(chan int)
757 for i := 0; i < 100; i++ {
758 switch {
759 case i%10 == 0:
760 go func1(c)
761 case i%2 == 0:
762 go func2(c)
763 default:
764 go func3(c)
766 // Let goroutines block on channel
767 for j := 0; j < 5; j++ {
768 runtime.Gosched()
772 var w bytes.Buffer
773 goroutineProf := Lookup("goroutine")
775 // Check debug profile
776 goroutineProf.WriteTo(&w, 1)
777 prof := w.String()
779 if !containsInOrder(prof, "\n50 @ ", "\n40 @", "\n10 @", "\n1 @") {
780 t.Errorf("expected sorted goroutine counts:\n%s", prof)
783 // Check proto profile
784 w.Reset()
785 goroutineProf.WriteTo(&w, 0)
786 p, err := profile.Parse(&w)
787 if err != nil {
788 t.Errorf("error parsing protobuf profile: %v", err)
790 if err := p.CheckValid(); err != nil {
791 t.Errorf("protobuf profile is invalid: %v", err)
793 if !containsCounts(p, []int64{50, 40, 10, 1}) {
794 t.Errorf("expected count profile to contain goroutines with counts %v, got %v",
795 []int64{50, 40, 10, 1}, p)
798 close(c)
800 time.Sleep(10 * time.Millisecond) // let goroutines exit
803 func containsInOrder(s string, all ...string) bool {
804 for _, t := range all {
805 i := strings.Index(s, t)
806 if i < 0 {
807 return false
809 s = s[i+len(t):]
811 return true
814 func containsCounts(prof *profile.Profile, counts []int64) bool {
815 m := make(map[int64]int)
816 for _, c := range counts {
817 m[c]++
819 for _, s := range prof.Sample {
820 // The count is the single value in the sample
821 if len(s.Value) != 1 {
822 return false
824 m[s.Value[0]]--
826 for _, n := range m {
827 if n > 0 {
828 return false
831 return true
834 // Issue 18836.
835 func TestEmptyCallStack(t *testing.T) {
836 t.Parallel()
837 var buf bytes.Buffer
838 p := NewProfile("test18836")
839 p.Add("foo", 47674)
840 p.WriteTo(&buf, 1)
841 p.Remove("foo")
842 got := buf.String()
843 prefix := "test18836 profile: total 1\n"
844 if !strings.HasPrefix(got, prefix) {
845 t.Fatalf("got:\n\t%q\nwant prefix:\n\t%q\n", got, prefix)
847 lostevent := "lostProfileEvent"
848 if !strings.Contains(got, lostevent) {
849 t.Fatalf("got:\n\t%q\ndoes not contain:\n\t%q\n", got, lostevent)
853 func TestCPUProfileLabel(t *testing.T) {
854 testCPUProfile(t, []string{"pprof.cpuHogger;key=value"}, func(dur time.Duration) {
855 Do(context.Background(), Labels("key", "value"), func(context.Context) {
856 cpuHogger(cpuHog1, &salt1, dur)
861 func TestLabelRace(t *testing.T) {
862 // Test the race detector annotations for synchronization
863 // between settings labels and consuming them from the
864 // profile.
865 testCPUProfile(t, []string{"pprof.cpuHogger;key=value"}, func(dur time.Duration) {
866 start := time.Now()
867 var wg sync.WaitGroup
868 for time.Since(start) < dur {
869 var salts [10]int
870 for i := 0; i < 10; i++ {
871 wg.Add(1)
872 go func(j int) {
873 Do(context.Background(), Labels("key", "value"), func(context.Context) {
874 cpuHogger(cpuHog1, &salts[j], time.Millisecond)
876 wg.Done()
877 }(i)
879 wg.Wait()
884 // Check that there is no deadlock when the program receives SIGPROF while in
885 // 64bit atomics' critical section. Used to happen on mips{,le}. See #20146.
886 func TestAtomicLoadStore64(t *testing.T) {
887 f, err := ioutil.TempFile("", "profatomic")
888 if err != nil {
889 t.Fatalf("TempFile: %v", err)
891 defer os.Remove(f.Name())
892 defer f.Close()
894 if err := StartCPUProfile(f); err != nil {
895 t.Fatal(err)
897 defer StopCPUProfile()
899 var flag uint64
900 done := make(chan bool, 1)
902 go func() {
903 for atomic.LoadUint64(&flag) == 0 {
904 runtime.Gosched()
906 done <- true
908 time.Sleep(50 * time.Millisecond)
909 atomic.StoreUint64(&flag, 1)
910 <-done