1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
20 var stop
= make(chan bool, 1)
22 func perpetuumMobile() {
30 func TestStopTheWorldDeadlock(t
*testing
.T
) {
32 t
.Skip("skipping during short test")
34 maxprocs
:= runtime
.GOMAXPROCS(3)
35 compl
:= make(chan bool, 2)
37 for i
:= 0; i
!= 1000; i
+= 1 {
43 for i
:= 0; i
!= 1000; i
+= 1 {
52 runtime
.GOMAXPROCS(maxprocs
)
55 func TestYieldProgress(t
*testing
.T
) {
56 testYieldProgress(false)
59 func TestYieldLockedProgress(t
*testing
.T
) {
60 testYieldProgress(true)
63 func testYieldProgress(locked
bool) {
65 cack
:= make(chan bool)
68 runtime
.LockOSThread()
80 time
.Sleep(10 * time
.Millisecond
)
85 func TestYieldLocked(t
*testing
.T
) {
89 runtime
.LockOSThread()
90 for i
:= 0; i
< N
; i
++ {
92 time
.Sleep(time
.Millisecond
)
95 // runtime.UnlockOSThread() is deliberately omitted
100 func TestGoroutineParallelism(t
*testing
.T
) {
101 if runtime
.NumCPU() == 1 {
102 // Takes too long, too easy to deadlock, etc.
103 t
.Skip("skipping on uniprocessor")
111 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(P
))
112 // If runtime triggers a forced GC during this test then it will deadlock,
113 // since the goroutines can't be stopped/preempted.
114 // Disable GC for this test (see issue #10958).
115 defer debug
.SetGCPercent(debug
.SetGCPercent(-1))
116 for try
:= 0; try
< N
; try
++ {
117 done
:= make(chan bool)
119 for p
:= 0; p
< P
; p
++ {
120 // Test that all P goroutines are scheduled at the same time
122 for i
:= 0; i
< 3; i
++ {
123 expected
:= uint32(P
*i
+ p
)
124 for atomic
.LoadUint32(&x
) != expected
{
126 atomic
.StoreUint32(&x
, expected
+1)
131 for p
:= 0; p
< P
; p
++ {
137 // Test that all runnable goroutines are scheduled at the same time.
138 func TestGoroutineParallelism2(t
*testing
.T
) {
139 //testGoroutineParallelism2(t, false, false)
140 testGoroutineParallelism2(t
, true, false)
141 testGoroutineParallelism2(t
, false, true)
142 testGoroutineParallelism2(t
, true, true)
145 func testGoroutineParallelism2(t
*testing
.T
, load
, netpoll
bool) {
146 if runtime
.NumCPU() == 1 {
147 // Takes too long, too easy to deadlock, etc.
148 t
.Skip("skipping on uniprocessor")
155 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(P
))
156 // If runtime triggers a forced GC during this test then it will deadlock,
157 // since the goroutines can't be stopped/preempted.
158 // Disable GC for this test (see issue #10958).
159 defer debug
.SetGCPercent(debug
.SetGCPercent(-1))
160 for try
:= 0; try
< N
; try
++ {
162 // Create P goroutines and wait until they all run.
163 // When we run the actual test below, worker threads
164 // running the goroutines will start parking.
165 done
:= make(chan bool)
167 for p
:= 0; p
< P
; p
++ {
169 if atomic
.AddUint32(&x
, 1) == uint32(P
) {
173 for atomic
.LoadUint32(&x
) != uint32(P
) {
180 // Enable netpoller, affects schedler behavior.
181 laddr
:= "localhost:0"
182 if runtime
.GOOS
== "android" {
183 // On some Android devices, there are no records for localhost,
184 // see https://golang.org/issues/14486.
185 // Don't use 127.0.0.1 for every case, it won't work on IPv6-only systems.
186 laddr
= "127.0.0.1:0"
188 ln
, err
:= net
.Listen("tcp", laddr
)
190 defer ln
.Close() // yup, defer in a loop
193 done
:= make(chan bool)
195 // Spawn P goroutines in a nested fashion just to differ from TestGoroutineParallelism.
196 for p
:= 0; p
< P
/2; p
++ {
198 for p2
:= 0; p2
< 2; p2
++ {
200 for i
:= 0; i
< 3; i
++ {
201 expected
:= uint32(P
*i
+ p
*2 + p2
)
202 for atomic
.LoadUint32(&x
) != expected
{
204 atomic
.StoreUint32(&x
, expected
+1)
211 for p
:= 0; p
< P
; p
++ {
217 func TestBlockLocked(t
*testing
.T
) {
221 runtime
.LockOSThread()
222 for i
:= 0; i
< N
; i
++ {
225 runtime
.UnlockOSThread()
227 for i
:= 0; i
< N
; i
++ {
232 func TestTimerFairness(t
*testing
.T
) {
233 done
:= make(chan bool)
235 for i
:= 0; i
< 2; i
++ {
247 timer
:= time
.After(20 * time
.Millisecond
)
258 func TestTimerFairness2(t
*testing
.T
) {
259 done
:= make(chan bool)
261 for i
:= 0; i
< 2; i
++ {
263 timer
:= time
.After(20 * time
.Millisecond
)
266 syscall
.Read(0, buf
[0:0])
281 // The function is used to test preemption at split stack checks.
282 // Declaring a var avoids inlining at the call site.
283 var preempt
= func() int {
286 for _
, v
:= range a
{
292 func TestPreemption(t
*testing
.T
) {
293 t
.Skip("gccgo does not implement preemption")
294 // Test that goroutines are preempted at function calls.
301 for g
:= 0; g
< 2; g
++ {
303 for i
:= 0; i
< N
; i
++ {
304 for atomic
.LoadUint32(&x
) != uint32(g
) {
307 atomic
.StoreUint32(&x
, uint32(1-g
))
316 func TestPreemptionGC(t
*testing
.T
) {
317 t
.Skip("gccgo does not implement preemption")
318 // Test that pending GC preempts running goroutines.
325 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(P
+ 1))
327 for i
:= 0; i
< P
; i
++ {
329 for atomic
.LoadUint32(&stop
) == 0 {
334 for i
:= 0; i
< N
; i
++ {
338 atomic
.StoreUint32(&stop
, 1)
341 func TestGCFairness(t
*testing
.T
) {
342 output
:= runTestProg(t
, "testprog", "GCFairness")
345 t
.Fatalf("want %s, got %s\n", want
, output
)
349 func TestGCFairness2(t
*testing
.T
) {
350 output
:= runTestProg(t
, "testprog", "GCFairness2")
353 t
.Fatalf("want %s, got %s\n", want
, output
)
357 func TestNumGoroutine(t
*testing
.T
) {
358 output
:= runTestProg(t
, "testprog", "NumGoroutine")
361 t
.Fatalf("want %q, got %q", want
, output
)
364 buf
:= make([]byte, 1<<20)
366 // Try up to 10 times for a match before giving up.
367 // This is a fundamentally racy check but it's important
368 // to notice if NumGoroutine and Stack are _always_ out of sync.
370 // Give goroutines about to exit a chance to exit.
371 // The NumGoroutine and Stack below need to see
372 // the same state of the world, so anything we can do
373 // to keep it quiet is good.
376 n
:= runtime
.NumGoroutine()
377 buf
= buf
[:runtime
.Stack(buf
, true)]
379 nstk
:= strings
.Count(string(buf
), "goroutine ")
384 t
.Fatalf("NumGoroutine=%d, but found %d goroutines in stack dump: %s", n
, nstk
, buf
)
389 func TestPingPongHog(t
*testing
.T
) {
391 t
.Skip("skipping in -short mode")
394 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(1))
395 done
:= make(chan bool)
396 hogChan
, lightChan
:= make(chan bool), make(chan bool)
397 hogCount
, lightCount
:= 0, 0
399 run
:= func(limit
int, counter
*int, wake
chan bool) {
406 for i
:= 0; i
< limit
; i
++ {
414 // Start two co-scheduled hog goroutines.
415 for i
:= 0; i
< 2; i
++ {
416 go run(1e6
, &hogCount
, hogChan
)
419 // Start two co-scheduled light goroutines.
420 for i
:= 0; i
< 2; i
++ {
421 go run(1e3
, &lightCount
, lightChan
)
424 // Start goroutine pairs and wait for a few preemption rounds.
427 time
.Sleep(100 * time
.Millisecond
)
432 // Check that hogCount and lightCount are within a factor of
433 // 5, which indicates that both pairs of goroutines handed off
434 // the P within a time-slice to their buddy. We can use a
435 // fairly large factor here to make this robust: if the
436 // scheduler isn't working right, the gap should be ~1000X.
438 if hogCount
> lightCount
*factor || lightCount
> hogCount
*factor
{
439 t
.Fatalf("want hogCount/lightCount in [%v, %v]; got %d/%d = %g", 1.0/factor
, factor
, hogCount
, lightCount
, float64(hogCount
)/float64(lightCount
))
443 func BenchmarkPingPongHog(b
*testing
.B
) {
447 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(1))
450 stop
, done
:= make(chan bool), make(chan bool)
462 // Ping-pong b.N times
463 ping
, pong
:= make(chan bool), make(chan bool)
465 for j
:= 0; j
< b
.N
; j
++ {
472 for i
:= 0; i
< b
.N
; i
++ {
478 ping
<- true // Start ping-pong
481 <-ping
// Let last ponger exit
482 <-done
// Make sure goroutines exit
487 func stackGrowthRecursive(i
int) {
489 if i
!= 0 && pad
[0] == 0 {
490 stackGrowthRecursive(i
- 1)
494 func TestPreemptSplitBig(t
*testing
.T
) {
496 t
.Skip("skipping in -short mode")
498 t
.Skip("gccgo does not implement preemption")
499 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(2))
500 stop
:= make(chan int)
502 for i
:= 0; i
< 3; i
++ {
503 time
.Sleep(10 * time
.Microsecond
) // let big start running
509 func big(stop
chan int) int {
512 // delay so that gc is sure to have asked for a preemption
513 for i
:= 0; i
< 1e9
; i
++ {
517 // call bigframe, which used to miss the preemption in its prologue.
520 // check if we've been asked to stop.
528 func bigframe(stop
chan int) int {
529 // not splitting the stack will overflow.
530 // small will notice that it needs a stack split and will
531 // catch the overflow.
533 return small(stop
, &x
)
536 func small(stop
chan int, x
*[8192]byte) int {
545 // keep small from being a leaf function, which might
546 // make it not do any stack check at all.
552 func nonleaf(stop
chan int) bool {
553 // do something that won't be inlined:
562 func TestSchedLocalQueue(t
*testing
.T
) {
563 runtime
.RunSchedLocalQueueTest()
566 func TestSchedLocalQueueSteal(t
*testing
.T
) {
567 runtime
.RunSchedLocalQueueStealTest()
570 func TestSchedLocalQueueEmpty(t
*testing
.T
) {
571 if runtime
.NumCPU() == 1 {
572 // Takes too long and does not trigger the race.
573 t
.Skip("skipping on uniprocessor")
575 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(4))
577 // If runtime triggers a forced GC during this test then it will deadlock,
578 // since the goroutines can't be stopped/preempted during spin wait.
579 defer debug
.SetGCPercent(debug
.SetGCPercent(-1))
585 runtime
.RunSchedLocalQueueEmptyTest(iters
)
588 func benchmarkStackGrowth(b
*testing
.B
, rec
int) {
589 b
.RunParallel(func(pb
*testing
.PB
) {
591 stackGrowthRecursive(rec
)
596 func BenchmarkStackGrowth(b
*testing
.B
) {
597 benchmarkStackGrowth(b
, 10)
600 func BenchmarkStackGrowthDeep(b
*testing
.B
) {
601 benchmarkStackGrowth(b
, 1024)
604 func BenchmarkCreateGoroutines(b
*testing
.B
) {
605 benchmarkCreateGoroutines(b
, 1)
608 func BenchmarkCreateGoroutinesParallel(b
*testing
.B
) {
609 benchmarkCreateGoroutines(b
, runtime
.GOMAXPROCS(-1))
612 func benchmarkCreateGoroutines(b
*testing
.B
, procs
int) {
622 for i
:= 0; i
< procs
; i
++ {
625 for i
:= 0; i
< procs
; i
++ {
630 func BenchmarkCreateGoroutinesCapture(b
*testing
.B
) {
632 for i
:= 0; i
< b
.N
; i
++ {
634 var wg sync
.WaitGroup
636 for i
:= 0; i
< N
; i
++ {
640 b
.Logf("bad") // just to capture b
649 func BenchmarkClosureCall(b
*testing
.B
) {
652 for i
:= 0; i
< b
.N
; i
++ {
655 sum
+= i
+ off1
+ off2
661 func benchmarkWakeupParallel(b
*testing
.B
, spin
func(time
.Duration
)) {
662 if runtime
.GOMAXPROCS(0) == 1 {
663 b
.Skip("skipping: GOMAXPROCS=1")
666 wakeDelay
:= 5 * time
.Microsecond
667 for _
, delay
:= range []time
.Duration
{
669 1 * time
.Microsecond
,
670 2 * time
.Microsecond
,
671 5 * time
.Microsecond
,
672 10 * time
.Microsecond
,
673 20 * time
.Microsecond
,
674 50 * time
.Microsecond
,
675 100 * time
.Microsecond
,
677 b
.Run(delay
.String(), func(b
*testing
.B
) {
681 // Start two goroutines, which alternate between being
682 // sender and receiver in the following protocol:
684 // - The receiver spins for `delay` and then does a
685 // blocking receive on a channel.
687 // - The sender spins for `delay+wakeDelay` and then
688 // sends to the same channel. (The addition of
689 // `wakeDelay` improves the probability that the
690 // receiver will be blocking when the send occurs when
691 // the goroutines execute in parallel.)
693 // In each iteration of the benchmark, each goroutine
694 // acts once as sender and once as receiver, so each
695 // goroutine spins for delay twice.
697 // BenchmarkWakeupParallel is used to estimate how
698 // efficiently the scheduler parallelizes goroutines in
699 // the presence of blocking:
701 // - If both goroutines are executed on the same core,
702 // an increase in delay by N will increase the time per
703 // iteration by 4*N, because all 4 delays are
706 // - Otherwise, an increase in delay by N will increase
707 // the time per iteration by 2*N, and the time per
708 // iteration is 2 * (runtime overhead + chan
709 // send/receive pair + delay + wakeDelay). This allows
710 // the runtime overhead, including the time it takes
711 // for the unblocked goroutine to be scheduled, to be
713 ping
, pong
:= make(chan struct{}), make(chan struct{})
714 start
:= make(chan struct{})
715 done
:= make(chan struct{})
718 for i
:= 0; i
< b
.N
; i
++ {
720 spin(delay
+ wakeDelay
)
729 for i
:= 0; i
< b
.N
; i
++ {
734 spin(delay
+ wakeDelay
)
747 func BenchmarkWakeupParallelSpinning(b
*testing
.B
) {
748 benchmarkWakeupParallel(b
, func(d time
.Duration
) {
749 end
:= time
.Now().Add(d
)
750 for time
.Now().Before(end
) {
756 // sysNanosleep is defined by OS-specific files (such as runtime_linux_test.go)
757 // to sleep for the given duration. If nil, dependent tests are skipped.
758 // The implementation should invoke a blocking system call and not
759 // call time.Sleep, which would deschedule the goroutine.
760 var sysNanosleep
func(d time
.Duration
)
762 func BenchmarkWakeupParallelSyscall(b
*testing
.B
) {
763 if sysNanosleep
== nil {
764 b
.Skipf("skipping on %v; sysNanosleep not defined", runtime
.GOOS
)
766 benchmarkWakeupParallel(b
, func(d time
.Duration
) {
771 type Matrix
[][]float64
773 func BenchmarkMatmult(b
*testing
.B
) {
775 // matmult is O(N**3) but testing expects O(b.N),
776 // so we need to take cube root of b.N
777 n
:= int(math
.Cbrt(float64(b
.N
))) + 1
782 matmult(nil, A
, B
, C
, 0, n
, 0, n
, 0, n
, 8)
785 func makeMatrix(n
int) Matrix
{
787 for i
:= 0; i
< n
; i
++ {
788 m
[i
] = make([]float64, n
)
789 for j
:= 0; j
< n
; j
++ {
790 m
[i
][j
] = float64(i
*n
+ j
)
796 func matmult(done
chan<- struct{}, A
, B
, C Matrix
, i0
, i1
, j0
, j1
, k0
, k1
, threshold
int) {
800 if di
>= dj
&& di
>= dk
&& di
>= threshold
{
801 // divide in two by y axis
803 done1
:= make(chan struct{}, 1)
804 go matmult(done1
, A
, B
, C
, i0
, mi
, j0
, j1
, k0
, k1
, threshold
)
805 matmult(nil, A
, B
, C
, mi
, i1
, j0
, j1
, k0
, k1
, threshold
)
807 } else if dj
>= dk
&& dj
>= threshold
{
808 // divide in two by x axis
810 done1
:= make(chan struct{}, 1)
811 go matmult(done1
, A
, B
, C
, i0
, i1
, j0
, mj
, k0
, k1
, threshold
)
812 matmult(nil, A
, B
, C
, i0
, i1
, mj
, j1
, k0
, k1
, threshold
)
814 } else if dk
>= threshold
{
815 // divide in two by "k" axis
816 // deliberately not parallel because of data races
818 matmult(nil, A
, B
, C
, i0
, i1
, j0
, j1
, k0
, mk
, threshold
)
819 matmult(nil, A
, B
, C
, i0
, i1
, j0
, j1
, mk
, k1
, threshold
)
821 // the matrices are small enough, compute directly
822 for i
:= i0
; i
< i1
; i
++ {
823 for j
:= j0
; j
< j1
; j
++ {
824 for k
:= k0
; k
< k1
; k
++ {
825 C
[i
][j
] += A
[i
][k
] * B
[k
][j
]
835 func TestStealOrder(t
*testing
.T
) {
836 runtime
.RunStealOrderTest()
839 func TestLockOSThreadNesting(t
*testing
.T
) {
841 e
, i
:= runtime
.LockOSCounts()
842 if e
!= 0 || i
!= 0 {
843 t
.Errorf("want locked counts 0, 0; got %d, %d", e
, i
)
846 runtime
.LockOSThread()
847 runtime
.LockOSThread()
848 runtime
.UnlockOSThread()
849 e
, i
= runtime
.LockOSCounts()
850 if e
!= 1 || i
!= 0 {
851 t
.Errorf("want locked counts 1, 0; got %d, %d", e
, i
)
854 runtime
.UnlockOSThread()
855 e
, i
= runtime
.LockOSCounts()
856 if e
!= 0 || i
!= 0 {
857 t
.Errorf("want locked counts 0, 0; got %d, %d", e
, i
)
863 func TestLockOSThreadExit(t
*testing
.T
) {
864 testLockOSThreadExit(t
, "testprog")
867 func testLockOSThreadExit(t
*testing
.T
, prog
string) {
868 output
:= runTestProg(t
, prog
, "LockOSThreadMain", "GOMAXPROCS=1")
871 t
.Errorf("want %s, got %s\n", want
, output
)
874 output
= runTestProg(t
, prog
, "LockOSThreadAlt")
876 t
.Errorf("want %s, got %s\n", want
, output
)