1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
23 var stop
= make(chan bool, 1)
25 func perpetuumMobile() {
33 func TestStopTheWorldDeadlock(t
*testing
.T
) {
34 if runtime
.GOARCH
== "wasm" {
35 t
.Skip("no preemption on wasm yet")
38 t
.Skip("skipping during short test")
40 maxprocs
:= runtime
.GOMAXPROCS(3)
41 compl
:= make(chan bool, 2)
43 for i
:= 0; i
!= 1000; i
+= 1 {
49 for i
:= 0; i
!= 1000; i
+= 1 {
58 runtime
.GOMAXPROCS(maxprocs
)
61 func TestYieldProgress(t
*testing
.T
) {
62 testYieldProgress(false)
65 func TestYieldLockedProgress(t
*testing
.T
) {
66 testYieldProgress(true)
69 func testYieldProgress(locked
bool) {
71 cack
:= make(chan bool)
74 runtime
.LockOSThread()
86 time
.Sleep(10 * time
.Millisecond
)
91 func TestYieldLocked(t
*testing
.T
) {
95 runtime
.LockOSThread()
96 for i
:= 0; i
< N
; i
++ {
98 time
.Sleep(time
.Millisecond
)
101 // runtime.UnlockOSThread() is deliberately omitted
106 func TestGoroutineParallelism(t
*testing
.T
) {
107 if runtime
.NumCPU() == 1 {
108 // Takes too long, too easy to deadlock, etc.
109 t
.Skip("skipping on uniprocessor")
117 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(P
))
118 // If runtime triggers a forced GC during this test then it will deadlock,
119 // since the goroutines can't be stopped/preempted.
120 // Disable GC for this test (see issue #10958).
121 defer debug
.SetGCPercent(debug
.SetGCPercent(-1))
122 // SetGCPercent waits until the mark phase is over, but the runtime
123 // also preempts at the start of the sweep phase, so make sure that's
124 // done too. See #45867.
126 for try
:= 0; try
< N
; try
++ {
127 done
:= make(chan bool)
129 for p
:= 0; p
< P
; p
++ {
130 // Test that all P goroutines are scheduled at the same time
132 for i
:= 0; i
< 3; i
++ {
133 expected
:= uint32(P
*i
+ p
)
134 for atomic
.LoadUint32(&x
) != expected
{
136 atomic
.StoreUint32(&x
, expected
+1)
141 for p
:= 0; p
< P
; p
++ {
147 // Test that all runnable goroutines are scheduled at the same time.
148 func TestGoroutineParallelism2(t
*testing
.T
) {
149 //testGoroutineParallelism2(t, false, false)
150 testGoroutineParallelism2(t
, true, false)
151 testGoroutineParallelism2(t
, false, true)
152 testGoroutineParallelism2(t
, true, true)
155 func testGoroutineParallelism2(t
*testing
.T
, load
, netpoll
bool) {
156 if runtime
.NumCPU() == 1 {
157 // Takes too long, too easy to deadlock, etc.
158 t
.Skip("skipping on uniprocessor")
165 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(P
))
166 // If runtime triggers a forced GC during this test then it will deadlock,
167 // since the goroutines can't be stopped/preempted.
168 // Disable GC for this test (see issue #10958).
169 defer debug
.SetGCPercent(debug
.SetGCPercent(-1))
170 // SetGCPercent waits until the mark phase is over, but the runtime
171 // also preempts at the start of the sweep phase, so make sure that's
172 // done too. See #45867.
174 for try
:= 0; try
< N
; try
++ {
176 // Create P goroutines and wait until they all run.
177 // When we run the actual test below, worker threads
178 // running the goroutines will start parking.
179 done
:= make(chan bool)
181 for p
:= 0; p
< P
; p
++ {
183 if atomic
.AddUint32(&x
, 1) == uint32(P
) {
187 for atomic
.LoadUint32(&x
) != uint32(P
) {
194 // Enable netpoller, affects schedler behavior.
195 laddr
:= "localhost:0"
196 if runtime
.GOOS
== "android" {
197 // On some Android devices, there are no records for localhost,
198 // see https://golang.org/issues/14486.
199 // Don't use 127.0.0.1 for every case, it won't work on IPv6-only systems.
200 laddr
= "127.0.0.1:0"
202 ln
, err
:= net
.Listen("tcp", laddr
)
204 defer ln
.Close() // yup, defer in a loop
207 done
:= make(chan bool)
209 // Spawn P goroutines in a nested fashion just to differ from TestGoroutineParallelism.
210 for p
:= 0; p
< P
/2; p
++ {
212 for p2
:= 0; p2
< 2; p2
++ {
214 for i
:= 0; i
< 3; i
++ {
215 expected
:= uint32(P
*i
+ p
*2 + p2
)
216 for atomic
.LoadUint32(&x
) != expected
{
218 atomic
.StoreUint32(&x
, expected
+1)
225 for p
:= 0; p
< P
; p
++ {
231 func TestBlockLocked(t
*testing
.T
) {
235 runtime
.LockOSThread()
236 for i
:= 0; i
< N
; i
++ {
239 runtime
.UnlockOSThread()
241 for i
:= 0; i
< N
; i
++ {
246 func TestTimerFairness(t
*testing
.T
) {
247 if runtime
.GOARCH
== "wasm" {
248 t
.Skip("no preemption on wasm yet")
251 done
:= make(chan bool)
253 for i
:= 0; i
< 2; i
++ {
265 timer
:= time
.After(20 * time
.Millisecond
)
276 func TestTimerFairness2(t
*testing
.T
) {
277 if runtime
.GOARCH
== "wasm" {
278 t
.Skip("no preemption on wasm yet")
281 done
:= make(chan bool)
283 for i
:= 0; i
< 2; i
++ {
285 timer
:= time
.After(20 * time
.Millisecond
)
288 syscall
.Read(0, buf
[0:0])
303 // The function is used to test preemption at split stack checks.
304 // Declaring a var avoids inlining at the call site.
305 var preempt
= func() int {
308 for _
, v
:= range a
{
314 func TestPreemption(t
*testing
.T
) {
315 if runtime
.Compiler
== "gccgo" {
316 t
.Skip("gccgo does not implement preemption")
318 if runtime
.GOARCH
== "wasm" {
319 t
.Skip("no preemption on wasm yet")
322 // Test that goroutines are preempted at function calls.
329 for g
:= 0; g
< 2; g
++ {
331 for i
:= 0; i
< N
; i
++ {
332 for atomic
.LoadUint32(&x
) != uint32(g
) {
335 atomic
.StoreUint32(&x
, uint32(1-g
))
344 func TestPreemptionGC(t
*testing
.T
) {
345 if runtime
.Compiler
== "gccgo" {
346 t
.Skip("gccgo does not implement preemption")
348 if runtime
.GOARCH
== "wasm" {
349 t
.Skip("no preemption on wasm yet")
352 // Test that pending GC preempts running goroutines.
359 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(P
+ 1))
361 for i
:= 0; i
< P
; i
++ {
363 for atomic
.LoadUint32(&stop
) == 0 {
368 for i
:= 0; i
< N
; i
++ {
372 atomic
.StoreUint32(&stop
, 1)
375 func TestAsyncPreempt(t
*testing
.T
) {
376 if !runtime
.PreemptMSupported
{
377 t
.Skip("asynchronous preemption not supported on this platform")
379 output
:= runTestProg(t
, "testprog", "AsyncPreempt")
382 t
.Fatalf("want %s, got %s\n", want
, output
)
386 func TestGCFairness(t
*testing
.T
) {
387 output
:= runTestProg(t
, "testprog", "GCFairness")
390 t
.Fatalf("want %s, got %s\n", want
, output
)
394 func TestGCFairness2(t
*testing
.T
) {
395 output
:= runTestProg(t
, "testprog", "GCFairness2")
398 t
.Fatalf("want %s, got %s\n", want
, output
)
402 func TestNumGoroutine(t
*testing
.T
) {
403 output
:= runTestProg(t
, "testprog", "NumGoroutine")
406 t
.Fatalf("want %q, got %q", want
, output
)
409 buf
:= make([]byte, 1<<20)
411 // Try up to 10 times for a match before giving up.
412 // This is a fundamentally racy check but it's important
413 // to notice if NumGoroutine and Stack are _always_ out of sync.
415 // Give goroutines about to exit a chance to exit.
416 // The NumGoroutine and Stack below need to see
417 // the same state of the world, so anything we can do
418 // to keep it quiet is good.
421 n
:= runtime
.NumGoroutine()
422 buf
= buf
[:runtime
.Stack(buf
, true)]
424 nstk
:= strings
.Count(string(buf
), "goroutine ")
429 t
.Fatalf("NumGoroutine=%d, but found %d goroutines in stack dump: %s", n
, nstk
, buf
)
434 func TestPingPongHog(t
*testing
.T
) {
435 if runtime
.GOARCH
== "wasm" {
436 t
.Skip("no preemption on wasm yet")
439 t
.Skip("skipping in -short mode")
442 // The race detector randomizes the scheduler,
443 // which causes this test to fail (#38266).
444 t
.Skip("skipping in -race mode")
447 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(1))
448 done
:= make(chan bool)
449 hogChan
, lightChan
:= make(chan bool), make(chan bool)
450 hogCount
, lightCount
:= 0, 0
452 run
:= func(limit
int, counter
*int, wake
chan bool) {
459 for i
:= 0; i
< limit
; i
++ {
467 // Start two co-scheduled hog goroutines.
468 for i
:= 0; i
< 2; i
++ {
469 go run(1e6
, &hogCount
, hogChan
)
472 // Start two co-scheduled light goroutines.
473 for i
:= 0; i
< 2; i
++ {
474 go run(1e3
, &lightCount
, lightChan
)
477 // Start goroutine pairs and wait for a few preemption rounds.
480 time
.Sleep(100 * time
.Millisecond
)
485 // Check that hogCount and lightCount are within a factor of
486 // 5, which indicates that both pairs of goroutines handed off
487 // the P within a time-slice to their buddy. We can use a
488 // fairly large factor here to make this robust: if the
489 // scheduler isn't working right, the gap should be ~1000X.
491 if hogCount
> lightCount
*factor || lightCount
> hogCount
*factor
{
492 t
.Fatalf("want hogCount/lightCount in [%v, %v]; got %d/%d = %g", 1.0/factor
, factor
, hogCount
, lightCount
, float64(hogCount
)/float64(lightCount
))
496 func BenchmarkPingPongHog(b
*testing
.B
) {
500 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(1))
503 stop
, done
:= make(chan bool), make(chan bool)
515 // Ping-pong b.N times
516 ping
, pong
:= make(chan bool), make(chan bool)
518 for j
:= 0; j
< b
.N
; j
++ {
525 for i
:= 0; i
< b
.N
; i
++ {
531 ping
<- true // Start ping-pong
534 <-ping
// Let last ponger exit
535 <-done
// Make sure goroutines exit
540 var padData
[128]uint64
542 func stackGrowthRecursive(i
int) {
551 stackGrowthRecursive(i
- 1)
555 func TestPreemptSplitBig(t
*testing
.T
) {
557 t
.Skip("skipping in -short mode")
559 t
.Skip("gccgo does not implement preemption")
560 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(2))
561 stop
:= make(chan int)
563 for i
:= 0; i
< 3; i
++ {
564 time
.Sleep(10 * time
.Microsecond
) // let big start running
570 func big(stop
chan int) int {
573 // delay so that gc is sure to have asked for a preemption
574 for i
:= 0; i
< 1e9
; i
++ {
578 // call bigframe, which used to miss the preemption in its prologue.
581 // check if we've been asked to stop.
589 func bigframe(stop
chan int) int {
590 // not splitting the stack will overflow.
591 // small will notice that it needs a stack split and will
592 // catch the overflow.
594 return small(stop
, &x
)
597 func small(stop
chan int, x
*[8192]byte) int {
606 // keep small from being a leaf function, which might
607 // make it not do any stack check at all.
613 func nonleaf(stop
chan int) bool {
614 // do something that won't be inlined:
623 func TestSchedLocalQueue(t
*testing
.T
) {
624 runtime
.RunSchedLocalQueueTest()
627 func TestSchedLocalQueueSteal(t
*testing
.T
) {
628 runtime
.RunSchedLocalQueueStealTest()
631 func TestSchedLocalQueueEmpty(t
*testing
.T
) {
632 if runtime
.NumCPU() == 1 {
633 // Takes too long and does not trigger the race.
634 t
.Skip("skipping on uniprocessor")
636 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(4))
638 // If runtime triggers a forced GC during this test then it will deadlock,
639 // since the goroutines can't be stopped/preempted during spin wait.
640 defer debug
.SetGCPercent(debug
.SetGCPercent(-1))
641 // SetGCPercent waits until the mark phase is over, but the runtime
642 // also preempts at the start of the sweep phase, so make sure that's
643 // done too. See #45867.
650 runtime
.RunSchedLocalQueueEmptyTest(iters
)
653 func benchmarkStackGrowth(b
*testing
.B
, rec
int) {
654 b
.RunParallel(func(pb
*testing
.PB
) {
656 stackGrowthRecursive(rec
)
661 func BenchmarkStackGrowth(b
*testing
.B
) {
662 benchmarkStackGrowth(b
, 10)
665 func BenchmarkStackGrowthDeep(b
*testing
.B
) {
666 benchmarkStackGrowth(b
, 1024)
669 func BenchmarkCreateGoroutines(b
*testing
.B
) {
670 benchmarkCreateGoroutines(b
, 1)
673 func BenchmarkCreateGoroutinesParallel(b
*testing
.B
) {
674 benchmarkCreateGoroutines(b
, runtime
.GOMAXPROCS(-1))
677 func benchmarkCreateGoroutines(b
*testing
.B
, procs
int) {
687 for i
:= 0; i
< procs
; i
++ {
690 for i
:= 0; i
< procs
; i
++ {
695 func BenchmarkCreateGoroutinesCapture(b
*testing
.B
) {
697 for i
:= 0; i
< b
.N
; i
++ {
699 var wg sync
.WaitGroup
701 for i
:= 0; i
< N
; i
++ {
705 b
.Logf("bad") // just to capture b
714 // warmupScheduler ensures the scheduler has at least targetThreadCount threads
715 // in its thread pool.
716 func warmupScheduler(targetThreadCount
int) {
717 var wg sync
.WaitGroup
719 for i
:= 0; i
< targetThreadCount
; i
++ {
722 atomic
.AddInt32(&count
, 1)
723 for atomic
.LoadInt32(&count
) < int32(targetThreadCount
) {
724 // spin until all threads started
727 // spin a bit more to ensure they are all running on separate CPUs.
728 doWork(time
.Millisecond
)
735 func doWork(dur time
.Duration
) {
737 for time
.Since(start
) < dur
{
741 // BenchmarkCreateGoroutinesSingle creates many goroutines, all from a single
742 // producer (the main benchmark goroutine).
744 // Compared to BenchmarkCreateGoroutines, this causes different behavior in the
745 // scheduler because Ms are much more likely to need to steal work from the
746 // main P rather than having work in the local run queue.
747 func BenchmarkCreateGoroutinesSingle(b
*testing
.B
) {
748 // Since we are interested in stealing behavior, warm the scheduler to
749 // get all the Ps running first.
750 warmupScheduler(runtime
.GOMAXPROCS(0))
753 var wg sync
.WaitGroup
755 for i
:= 0; i
< b
.N
; i
++ {
763 func BenchmarkClosureCall(b
*testing
.B
) {
766 for i
:= 0; i
< b
.N
; i
++ {
769 sum
+= i
+ off1
+ off2
775 func benchmarkWakeupParallel(b
*testing
.B
, spin
func(time
.Duration
)) {
776 if runtime
.GOMAXPROCS(0) == 1 {
777 b
.Skip("skipping: GOMAXPROCS=1")
780 wakeDelay
:= 5 * time
.Microsecond
781 for _
, delay
:= range []time
.Duration
{
783 1 * time
.Microsecond
,
784 2 * time
.Microsecond
,
785 5 * time
.Microsecond
,
786 10 * time
.Microsecond
,
787 20 * time
.Microsecond
,
788 50 * time
.Microsecond
,
789 100 * time
.Microsecond
,
791 b
.Run(delay
.String(), func(b
*testing
.B
) {
795 // Start two goroutines, which alternate between being
796 // sender and receiver in the following protocol:
798 // - The receiver spins for `delay` and then does a
799 // blocking receive on a channel.
801 // - The sender spins for `delay+wakeDelay` and then
802 // sends to the same channel. (The addition of
803 // `wakeDelay` improves the probability that the
804 // receiver will be blocking when the send occurs when
805 // the goroutines execute in parallel.)
807 // In each iteration of the benchmark, each goroutine
808 // acts once as sender and once as receiver, so each
809 // goroutine spins for delay twice.
811 // BenchmarkWakeupParallel is used to estimate how
812 // efficiently the scheduler parallelizes goroutines in
813 // the presence of blocking:
815 // - If both goroutines are executed on the same core,
816 // an increase in delay by N will increase the time per
817 // iteration by 4*N, because all 4 delays are
820 // - Otherwise, an increase in delay by N will increase
821 // the time per iteration by 2*N, and the time per
822 // iteration is 2 * (runtime overhead + chan
823 // send/receive pair + delay + wakeDelay). This allows
824 // the runtime overhead, including the time it takes
825 // for the unblocked goroutine to be scheduled, to be
827 ping
, pong
:= make(chan struct{}), make(chan struct{})
828 start
:= make(chan struct{})
829 done
:= make(chan struct{})
832 for i
:= 0; i
< b
.N
; i
++ {
834 spin(delay
+ wakeDelay
)
843 for i
:= 0; i
< b
.N
; i
++ {
848 spin(delay
+ wakeDelay
)
861 func BenchmarkWakeupParallelSpinning(b
*testing
.B
) {
862 benchmarkWakeupParallel(b
, func(d time
.Duration
) {
863 end
:= time
.Now().Add(d
)
864 for time
.Now().Before(end
) {
870 // sysNanosleep is defined by OS-specific files (such as runtime_linux_test.go)
871 // to sleep for the given duration. If nil, dependent tests are skipped.
872 // The implementation should invoke a blocking system call and not
873 // call time.Sleep, which would deschedule the goroutine.
874 var sysNanosleep
func(d time
.Duration
)
876 func BenchmarkWakeupParallelSyscall(b
*testing
.B
) {
877 if sysNanosleep
== nil {
878 b
.Skipf("skipping on %v; sysNanosleep not defined", runtime
.GOOS
)
880 benchmarkWakeupParallel(b
, func(d time
.Duration
) {
885 type Matrix
[][]float64
887 func BenchmarkMatmult(b
*testing
.B
) {
889 // matmult is O(N**3) but testing expects O(b.N),
890 // so we need to take cube root of b.N
891 n
:= int(math
.Cbrt(float64(b
.N
))) + 1
896 matmult(nil, A
, B
, C
, 0, n
, 0, n
, 0, n
, 8)
899 func makeMatrix(n
int) Matrix
{
901 for i
:= 0; i
< n
; i
++ {
902 m
[i
] = make([]float64, n
)
903 for j
:= 0; j
< n
; j
++ {
904 m
[i
][j
] = float64(i
*n
+ j
)
910 func matmult(done
chan<- struct{}, A
, B
, C Matrix
, i0
, i1
, j0
, j1
, k0
, k1
, threshold
int) {
914 if di
>= dj
&& di
>= dk
&& di
>= threshold
{
915 // divide in two by y axis
917 done1
:= make(chan struct{}, 1)
918 go matmult(done1
, A
, B
, C
, i0
, mi
, j0
, j1
, k0
, k1
, threshold
)
919 matmult(nil, A
, B
, C
, mi
, i1
, j0
, j1
, k0
, k1
, threshold
)
921 } else if dj
>= dk
&& dj
>= threshold
{
922 // divide in two by x axis
924 done1
:= make(chan struct{}, 1)
925 go matmult(done1
, A
, B
, C
, i0
, i1
, j0
, mj
, k0
, k1
, threshold
)
926 matmult(nil, A
, B
, C
, i0
, i1
, mj
, j1
, k0
, k1
, threshold
)
928 } else if dk
>= threshold
{
929 // divide in two by "k" axis
930 // deliberately not parallel because of data races
932 matmult(nil, A
, B
, C
, i0
, i1
, j0
, j1
, k0
, mk
, threshold
)
933 matmult(nil, A
, B
, C
, i0
, i1
, j0
, j1
, mk
, k1
, threshold
)
935 // the matrices are small enough, compute directly
936 for i
:= i0
; i
< i1
; i
++ {
937 for j
:= j0
; j
< j1
; j
++ {
938 for k
:= k0
; k
< k1
; k
++ {
939 C
[i
][j
] += A
[i
][k
] * B
[k
][j
]
949 func TestStealOrder(t
*testing
.T
) {
950 runtime
.RunStealOrderTest()
953 func TestLockOSThreadNesting(t
*testing
.T
) {
954 if runtime
.GOARCH
== "wasm" {
955 t
.Skip("no threads on wasm yet")
959 e
, i
:= runtime
.LockOSCounts()
960 if e
!= 0 || i
!= 0 {
961 t
.Errorf("want locked counts 0, 0; got %d, %d", e
, i
)
964 runtime
.LockOSThread()
965 runtime
.LockOSThread()
966 runtime
.UnlockOSThread()
967 e
, i
= runtime
.LockOSCounts()
968 if e
!= 1 || i
!= 0 {
969 t
.Errorf("want locked counts 1, 0; got %d, %d", e
, i
)
972 runtime
.UnlockOSThread()
973 e
, i
= runtime
.LockOSCounts()
974 if e
!= 0 || i
!= 0 {
975 t
.Errorf("want locked counts 0, 0; got %d, %d", e
, i
)
981 func TestLockOSThreadExit(t
*testing
.T
) {
982 testLockOSThreadExit(t
, "testprog")
985 func testLockOSThreadExit(t
*testing
.T
, prog
string) {
986 output
:= runTestProg(t
, prog
, "LockOSThreadMain", "GOMAXPROCS=1")
989 t
.Errorf("want %q, got %q", want
, output
)
992 output
= runTestProg(t
, prog
, "LockOSThreadAlt")
994 t
.Errorf("want %q, got %q", want
, output
)
998 func TestLockOSThreadAvoidsStatePropagation(t
*testing
.T
) {
1000 skip
:= "unshare not permitted\n"
1001 output
:= runTestProg(t
, "testprog", "LockOSThreadAvoidsStatePropagation", "GOMAXPROCS=1")
1003 t
.Skip("unshare syscall not permitted on this system")
1004 } else if output
!= want
{
1005 t
.Errorf("want %q, got %q", want
, output
)
1009 func TestLockOSThreadTemplateThreadRace(t
*testing
.T
) {
1010 testenv
.MustHaveGoRun(t
)
1012 exe
, err
:= buildTestProg(t
, "testprog")
1018 if testing
.Short() {
1019 // Reduce run time to ~100ms, with much lower probability of
1023 for i
:= 0; i
< iterations
; i
++ {
1025 output
:= runBuiltTestProg(t
, exe
, "LockOSThreadTemplateThreadRace")
1027 t
.Fatalf("run %d: want %q, got %q", i
, want
, output
)
1032 // fakeSyscall emulates a system call.
1034 func fakeSyscall(duration time
.Duration
) {
1035 runtime
.Entersyscall()
1036 for start
:= runtime
.Nanotime(); runtime
.Nanotime()-start
< int64(duration
); {
1038 runtime
.Exitsyscall()
1041 // Check that a goroutine will be preempted if it is calling short system calls.
1042 func testPreemptionAfterSyscall(t
*testing
.T
, syscallDuration time
.Duration
) {
1043 if runtime
.GOARCH
== "wasm" {
1044 t
.Skip("no preemption on wasm yet")
1047 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(2))
1050 if testing
.Short() {
1054 maxDuration
= 5 * time
.Second
1058 for i
:= 0; i
< interations
; i
++ {
1059 c
:= make(chan bool, nroutines
)
1063 for g
:= 0; g
< nroutines
; g
++ {
1064 go func(stop
*uint32) {
1066 for atomic
.LoadUint32(stop
) == 0 {
1067 fakeSyscall(syscallDuration
)
1072 // wait until all goroutines have started.
1073 for g
:= 0; g
< nroutines
; g
++ {
1076 atomic
.StoreUint32(&stop
, 1)
1077 // wait until all goroutines have finished.
1078 for g
:= 0; g
< nroutines
; g
++ {
1081 duration
:= time
.Since(start
)
1083 if duration
> maxDuration
{
1084 t
.Errorf("timeout exceeded: %v (%v)", duration
, maxDuration
)
1089 func TestPreemptionAfterSyscall(t
*testing
.T
) {
1090 if runtime
.GOOS
== "plan9" {
1091 testenv
.SkipFlaky(t
, 41015)
1094 for _
, i
:= range []time
.Duration
{10, 100, 1000} {
1095 d
:= i
* time
.Microsecond
1096 t
.Run(fmt
.Sprint(d
), func(t
*testing
.T
) {
1097 testPreemptionAfterSyscall(t
, d
)
1102 func TestGetgThreadSwitch(t
*testing
.T
) {
1103 runtime
.RunGetgThreadSwitchTest()
1106 // TestNetpollBreak tests that netpollBreak can break a netpoll.
1107 // This test is not particularly safe since the call to netpoll
1108 // will pick up any stray files that are ready, but it should work
1109 // OK as long it is not run in parallel.
1110 func TestNetpollBreak(t
*testing
.T
) {
1111 if runtime
.GOMAXPROCS(0) == 1 {
1112 t
.Skip("skipping: GOMAXPROCS=1")
1115 // Make sure that netpoll is initialized.
1116 runtime
.NetpollGenericInit()
1119 c
:= make(chan bool, 2)
1122 runtime
.Netpoll(10 * time
.Second
.Nanoseconds())
1126 // Loop because the break might get eaten by the scheduler.
1127 // Break twice to break both the netpoll we started and the
1128 // scheduler netpoll.
1132 runtime
.NetpollBreak()
1133 runtime
.NetpollBreak()
1140 if dur
:= time
.Since(start
); dur
> 5*time
.Second
{
1141 t
.Errorf("netpollBreak did not interrupt netpoll: slept for: %v", dur
)
1145 // TestBigGOMAXPROCS tests that setting GOMAXPROCS to a large value
1146 // doesn't cause a crash at startup. See issue 38474.
1147 func TestBigGOMAXPROCS(t
*testing
.T
) {
1149 output
:= runTestProg(t
, "testprog", "NonexistentTest", "GOMAXPROCS=1024")
1150 // Ignore error conditions on small machines.
1151 for _
, errstr
:= range []string{
1152 "failed to create new OS thread",
1153 "cannot allocate memory",
1155 if strings
.Contains(output
, errstr
) {
1156 t
.Skipf("failed to create 1024 threads")
1159 if !strings
.Contains(output
, "unknown function: NonexistentTest") {
1160 t
.Errorf("output:\n%s\nwanted:\nunknown function: NonexistentTest", output
)