c++/modules: Propagate TYPE_CANONICAL for partial specialisations [PR113814]
[official-gcc.git] / libgo / go / runtime / proc_test.go
blob41548a2778849d6e48573f3f6e2070b465a96ec0
1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime_test
7 import (
8 "fmt"
9 "internal/race"
10 "internal/testenv"
11 "math"
12 "net"
13 "runtime"
14 "runtime/debug"
15 "strings"
16 "sync"
17 "sync/atomic"
18 "syscall"
19 "testing"
20 "time"
23 var stop = make(chan bool, 1)
25 func perpetuumMobile() {
26 select {
27 case <-stop:
28 default:
29 go perpetuumMobile()
33 func TestStopTheWorldDeadlock(t *testing.T) {
34 if runtime.GOARCH == "wasm" {
35 t.Skip("no preemption on wasm yet")
37 if testing.Short() {
38 t.Skip("skipping during short test")
40 maxprocs := runtime.GOMAXPROCS(3)
41 compl := make(chan bool, 2)
42 go func() {
43 for i := 0; i != 1000; i += 1 {
44 runtime.GC()
46 compl <- true
47 }()
48 go func() {
49 for i := 0; i != 1000; i += 1 {
50 runtime.GOMAXPROCS(3)
52 compl <- true
53 }()
54 go perpetuumMobile()
55 <-compl
56 <-compl
57 stop <- true
58 runtime.GOMAXPROCS(maxprocs)
61 func TestYieldProgress(t *testing.T) {
62 testYieldProgress(false)
65 func TestYieldLockedProgress(t *testing.T) {
66 testYieldProgress(true)
69 func testYieldProgress(locked bool) {
70 c := make(chan bool)
71 cack := make(chan bool)
72 go func() {
73 if locked {
74 runtime.LockOSThread()
76 for {
77 select {
78 case <-c:
79 cack <- true
80 return
81 default:
82 runtime.Gosched()
85 }()
86 time.Sleep(10 * time.Millisecond)
87 c <- true
88 <-cack
91 func TestYieldLocked(t *testing.T) {
92 const N = 10
93 c := make(chan bool)
94 go func() {
95 runtime.LockOSThread()
96 for i := 0; i < N; i++ {
97 runtime.Gosched()
98 time.Sleep(time.Millisecond)
100 c <- true
101 // runtime.UnlockOSThread() is deliberately omitted
106 func TestGoroutineParallelism(t *testing.T) {
107 if runtime.NumCPU() == 1 {
108 // Takes too long, too easy to deadlock, etc.
109 t.Skip("skipping on uniprocessor")
111 P := 4
112 N := 10
113 if testing.Short() {
114 P = 3
115 N = 3
117 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
118 // If runtime triggers a forced GC during this test then it will deadlock,
119 // since the goroutines can't be stopped/preempted.
120 // Disable GC for this test (see issue #10958).
121 defer debug.SetGCPercent(debug.SetGCPercent(-1))
122 // SetGCPercent waits until the mark phase is over, but the runtime
123 // also preempts at the start of the sweep phase, so make sure that's
124 // done too. See #45867.
125 runtime.GC()
126 for try := 0; try < N; try++ {
127 done := make(chan bool)
128 x := uint32(0)
129 for p := 0; p < P; p++ {
130 // Test that all P goroutines are scheduled at the same time
131 go func(p int) {
132 for i := 0; i < 3; i++ {
133 expected := uint32(P*i + p)
134 for atomic.LoadUint32(&x) != expected {
136 atomic.StoreUint32(&x, expected+1)
138 done <- true
139 }(p)
141 for p := 0; p < P; p++ {
142 <-done
147 // Test that all runnable goroutines are scheduled at the same time.
148 func TestGoroutineParallelism2(t *testing.T) {
149 //testGoroutineParallelism2(t, false, false)
150 testGoroutineParallelism2(t, true, false)
151 testGoroutineParallelism2(t, false, true)
152 testGoroutineParallelism2(t, true, true)
155 func testGoroutineParallelism2(t *testing.T, load, netpoll bool) {
156 if runtime.NumCPU() == 1 {
157 // Takes too long, too easy to deadlock, etc.
158 t.Skip("skipping on uniprocessor")
160 P := 4
161 N := 10
162 if testing.Short() {
163 N = 3
165 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
166 // If runtime triggers a forced GC during this test then it will deadlock,
167 // since the goroutines can't be stopped/preempted.
168 // Disable GC for this test (see issue #10958).
169 defer debug.SetGCPercent(debug.SetGCPercent(-1))
170 // SetGCPercent waits until the mark phase is over, but the runtime
171 // also preempts at the start of the sweep phase, so make sure that's
172 // done too. See #45867.
173 runtime.GC()
174 for try := 0; try < N; try++ {
175 if load {
176 // Create P goroutines and wait until they all run.
177 // When we run the actual test below, worker threads
178 // running the goroutines will start parking.
179 done := make(chan bool)
180 x := uint32(0)
181 for p := 0; p < P; p++ {
182 go func() {
183 if atomic.AddUint32(&x, 1) == uint32(P) {
184 done <- true
185 return
187 for atomic.LoadUint32(&x) != uint32(P) {
191 <-done
193 if netpoll {
194 // Enable netpoller, affects schedler behavior.
195 laddr := "localhost:0"
196 if runtime.GOOS == "android" {
197 // On some Android devices, there are no records for localhost,
198 // see https://golang.org/issues/14486.
199 // Don't use 127.0.0.1 for every case, it won't work on IPv6-only systems.
200 laddr = "127.0.0.1:0"
202 ln, err := net.Listen("tcp", laddr)
203 if err != nil {
204 defer ln.Close() // yup, defer in a loop
207 done := make(chan bool)
208 x := uint32(0)
209 // Spawn P goroutines in a nested fashion just to differ from TestGoroutineParallelism.
210 for p := 0; p < P/2; p++ {
211 go func(p int) {
212 for p2 := 0; p2 < 2; p2++ {
213 go func(p2 int) {
214 for i := 0; i < 3; i++ {
215 expected := uint32(P*i + p*2 + p2)
216 for atomic.LoadUint32(&x) != expected {
218 atomic.StoreUint32(&x, expected+1)
220 done <- true
221 }(p2)
223 }(p)
225 for p := 0; p < P; p++ {
226 <-done
231 func TestBlockLocked(t *testing.T) {
232 const N = 10
233 c := make(chan bool)
234 go func() {
235 runtime.LockOSThread()
236 for i := 0; i < N; i++ {
237 c <- true
239 runtime.UnlockOSThread()
241 for i := 0; i < N; i++ {
246 func TestTimerFairness(t *testing.T) {
247 if runtime.GOARCH == "wasm" {
248 t.Skip("no preemption on wasm yet")
251 done := make(chan bool)
252 c := make(chan bool)
253 for i := 0; i < 2; i++ {
254 go func() {
255 for {
256 select {
257 case c <- true:
258 case <-done:
259 return
265 timer := time.After(20 * time.Millisecond)
266 for {
267 select {
268 case <-c:
269 case <-timer:
270 close(done)
271 return
276 func TestTimerFairness2(t *testing.T) {
277 if runtime.GOARCH == "wasm" {
278 t.Skip("no preemption on wasm yet")
281 done := make(chan bool)
282 c := make(chan bool)
283 for i := 0; i < 2; i++ {
284 go func() {
285 timer := time.After(20 * time.Millisecond)
286 var buf [1]byte
287 for {
288 syscall.Read(0, buf[0:0])
289 select {
290 case c <- true:
291 case <-c:
292 case <-timer:
293 done <- true
294 return
299 <-done
300 <-done
303 // The function is used to test preemption at split stack checks.
304 // Declaring a var avoids inlining at the call site.
305 var preempt = func() int {
306 var a [128]int
307 sum := 0
308 for _, v := range a {
309 sum += v
311 return sum
314 func TestPreemption(t *testing.T) {
315 if runtime.Compiler == "gccgo" {
316 t.Skip("gccgo does not implement preemption")
318 if runtime.GOARCH == "wasm" {
319 t.Skip("no preemption on wasm yet")
322 // Test that goroutines are preempted at function calls.
323 N := 5
324 if testing.Short() {
325 N = 2
327 c := make(chan bool)
328 var x uint32
329 for g := 0; g < 2; g++ {
330 go func(g int) {
331 for i := 0; i < N; i++ {
332 for atomic.LoadUint32(&x) != uint32(g) {
333 preempt()
335 atomic.StoreUint32(&x, uint32(1-g))
337 c <- true
338 }(g)
344 func TestPreemptionGC(t *testing.T) {
345 if runtime.Compiler == "gccgo" {
346 t.Skip("gccgo does not implement preemption")
348 if runtime.GOARCH == "wasm" {
349 t.Skip("no preemption on wasm yet")
352 // Test that pending GC preempts running goroutines.
353 P := 5
354 N := 10
355 if testing.Short() {
356 P = 3
357 N = 2
359 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P + 1))
360 var stop uint32
361 for i := 0; i < P; i++ {
362 go func() {
363 for atomic.LoadUint32(&stop) == 0 {
364 preempt()
368 for i := 0; i < N; i++ {
369 runtime.Gosched()
370 runtime.GC()
372 atomic.StoreUint32(&stop, 1)
375 func TestAsyncPreempt(t *testing.T) {
376 if !runtime.PreemptMSupported {
377 t.Skip("asynchronous preemption not supported on this platform")
379 output := runTestProg(t, "testprog", "AsyncPreempt")
380 want := "OK\n"
381 if output != want {
382 t.Fatalf("want %s, got %s\n", want, output)
386 func TestGCFairness(t *testing.T) {
387 output := runTestProg(t, "testprog", "GCFairness")
388 want := "OK\n"
389 if output != want {
390 t.Fatalf("want %s, got %s\n", want, output)
394 func TestGCFairness2(t *testing.T) {
395 output := runTestProg(t, "testprog", "GCFairness2")
396 want := "OK\n"
397 if output != want {
398 t.Fatalf("want %s, got %s\n", want, output)
402 func TestNumGoroutine(t *testing.T) {
403 output := runTestProg(t, "testprog", "NumGoroutine")
404 want := "1\n"
405 if output != want {
406 t.Fatalf("want %q, got %q", want, output)
409 buf := make([]byte, 1<<20)
411 // Try up to 10 times for a match before giving up.
412 // This is a fundamentally racy check but it's important
413 // to notice if NumGoroutine and Stack are _always_ out of sync.
414 for i := 0; ; i++ {
415 // Give goroutines about to exit a chance to exit.
416 // The NumGoroutine and Stack below need to see
417 // the same state of the world, so anything we can do
418 // to keep it quiet is good.
419 runtime.Gosched()
421 n := runtime.NumGoroutine()
422 buf = buf[:runtime.Stack(buf, true)]
424 nstk := strings.Count(string(buf), "goroutine ")
425 if n == nstk {
426 break
428 if i >= 10 {
429 t.Fatalf("NumGoroutine=%d, but found %d goroutines in stack dump: %s", n, nstk, buf)
434 func TestPingPongHog(t *testing.T) {
435 if runtime.GOARCH == "wasm" {
436 t.Skip("no preemption on wasm yet")
438 if testing.Short() {
439 t.Skip("skipping in -short mode")
441 if race.Enabled {
442 // The race detector randomizes the scheduler,
443 // which causes this test to fail (#38266).
444 t.Skip("skipping in -race mode")
447 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
448 done := make(chan bool)
449 hogChan, lightChan := make(chan bool), make(chan bool)
450 hogCount, lightCount := 0, 0
452 run := func(limit int, counter *int, wake chan bool) {
453 for {
454 select {
455 case <-done:
456 return
458 case <-wake:
459 for i := 0; i < limit; i++ {
460 *counter++
462 wake <- true
467 // Start two co-scheduled hog goroutines.
468 for i := 0; i < 2; i++ {
469 go run(1e6, &hogCount, hogChan)
472 // Start two co-scheduled light goroutines.
473 for i := 0; i < 2; i++ {
474 go run(1e3, &lightCount, lightChan)
477 // Start goroutine pairs and wait for a few preemption rounds.
478 hogChan <- true
479 lightChan <- true
480 time.Sleep(100 * time.Millisecond)
481 close(done)
482 <-hogChan
483 <-lightChan
485 // Check that hogCount and lightCount are within a factor of
486 // 5, which indicates that both pairs of goroutines handed off
487 // the P within a time-slice to their buddy. We can use a
488 // fairly large factor here to make this robust: if the
489 // scheduler isn't working right, the gap should be ~1000X.
490 const factor = 5
491 if hogCount > lightCount*factor || lightCount > hogCount*factor {
492 t.Fatalf("want hogCount/lightCount in [%v, %v]; got %d/%d = %g", 1.0/factor, factor, hogCount, lightCount, float64(hogCount)/float64(lightCount))
496 func BenchmarkPingPongHog(b *testing.B) {
497 if b.N == 0 {
498 return
500 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
502 // Create a CPU hog
503 stop, done := make(chan bool), make(chan bool)
504 go func() {
505 for {
506 select {
507 case <-stop:
508 done <- true
509 return
510 default:
515 // Ping-pong b.N times
516 ping, pong := make(chan bool), make(chan bool)
517 go func() {
518 for j := 0; j < b.N; j++ {
519 pong <- <-ping
521 close(stop)
522 done <- true
524 go func() {
525 for i := 0; i < b.N; i++ {
526 ping <- <-pong
528 done <- true
530 b.ResetTimer()
531 ping <- true // Start ping-pong
532 <-stop
533 b.StopTimer()
534 <-ping // Let last ponger exit
535 <-done // Make sure goroutines exit
536 <-done
537 <-done
540 var padData [128]uint64
542 func stackGrowthRecursive(i int) {
543 var pad [128]uint64
544 pad = padData
545 for j := range pad {
546 if pad[j] != 0 {
547 return
550 if i != 0 {
551 stackGrowthRecursive(i - 1)
555 func TestPreemptSplitBig(t *testing.T) {
556 if testing.Short() {
557 t.Skip("skipping in -short mode")
559 t.Skip("gccgo does not implement preemption")
560 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
561 stop := make(chan int)
562 go big(stop)
563 for i := 0; i < 3; i++ {
564 time.Sleep(10 * time.Microsecond) // let big start running
565 runtime.GC()
567 close(stop)
570 func big(stop chan int) int {
571 n := 0
572 for {
573 // delay so that gc is sure to have asked for a preemption
574 for i := 0; i < 1e9; i++ {
578 // call bigframe, which used to miss the preemption in its prologue.
579 bigframe(stop)
581 // check if we've been asked to stop.
582 select {
583 case <-stop:
584 return n
589 func bigframe(stop chan int) int {
590 // not splitting the stack will overflow.
591 // small will notice that it needs a stack split and will
592 // catch the overflow.
593 var x [8192]byte
594 return small(stop, &x)
597 func small(stop chan int, x *[8192]byte) int {
598 for i := range x {
599 x[i] = byte(i)
601 sum := 0
602 for i := range x {
603 sum += int(x[i])
606 // keep small from being a leaf function, which might
607 // make it not do any stack check at all.
608 nonleaf(stop)
610 return sum
613 func nonleaf(stop chan int) bool {
614 // do something that won't be inlined:
615 select {
616 case <-stop:
617 return true
618 default:
619 return false
623 func TestSchedLocalQueue(t *testing.T) {
624 runtime.RunSchedLocalQueueTest()
627 func TestSchedLocalQueueSteal(t *testing.T) {
628 runtime.RunSchedLocalQueueStealTest()
631 func TestSchedLocalQueueEmpty(t *testing.T) {
632 if runtime.NumCPU() == 1 {
633 // Takes too long and does not trigger the race.
634 t.Skip("skipping on uniprocessor")
636 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
638 // If runtime triggers a forced GC during this test then it will deadlock,
639 // since the goroutines can't be stopped/preempted during spin wait.
640 defer debug.SetGCPercent(debug.SetGCPercent(-1))
641 // SetGCPercent waits until the mark phase is over, but the runtime
642 // also preempts at the start of the sweep phase, so make sure that's
643 // done too. See #45867.
644 runtime.GC()
646 iters := int(1e5)
647 if testing.Short() {
648 iters = 1e2
650 runtime.RunSchedLocalQueueEmptyTest(iters)
653 func benchmarkStackGrowth(b *testing.B, rec int) {
654 b.RunParallel(func(pb *testing.PB) {
655 for pb.Next() {
656 stackGrowthRecursive(rec)
661 func BenchmarkStackGrowth(b *testing.B) {
662 benchmarkStackGrowth(b, 10)
665 func BenchmarkStackGrowthDeep(b *testing.B) {
666 benchmarkStackGrowth(b, 1024)
669 func BenchmarkCreateGoroutines(b *testing.B) {
670 benchmarkCreateGoroutines(b, 1)
673 func BenchmarkCreateGoroutinesParallel(b *testing.B) {
674 benchmarkCreateGoroutines(b, runtime.GOMAXPROCS(-1))
677 func benchmarkCreateGoroutines(b *testing.B, procs int) {
678 c := make(chan bool)
679 var f func(n int)
680 f = func(n int) {
681 if n == 0 {
682 c <- true
683 return
685 go f(n - 1)
687 for i := 0; i < procs; i++ {
688 go f(b.N / procs)
690 for i := 0; i < procs; i++ {
695 func BenchmarkCreateGoroutinesCapture(b *testing.B) {
696 b.ReportAllocs()
697 for i := 0; i < b.N; i++ {
698 const N = 4
699 var wg sync.WaitGroup
700 wg.Add(N)
701 for i := 0; i < N; i++ {
702 i := i
703 go func() {
704 if i >= N {
705 b.Logf("bad") // just to capture b
707 wg.Done()
710 wg.Wait()
714 // warmupScheduler ensures the scheduler has at least targetThreadCount threads
715 // in its thread pool.
716 func warmupScheduler(targetThreadCount int) {
717 var wg sync.WaitGroup
718 var count int32
719 for i := 0; i < targetThreadCount; i++ {
720 wg.Add(1)
721 go func() {
722 atomic.AddInt32(&count, 1)
723 for atomic.LoadInt32(&count) < int32(targetThreadCount) {
724 // spin until all threads started
727 // spin a bit more to ensure they are all running on separate CPUs.
728 doWork(time.Millisecond)
729 wg.Done()
732 wg.Wait()
735 func doWork(dur time.Duration) {
736 start := time.Now()
737 for time.Since(start) < dur {
741 // BenchmarkCreateGoroutinesSingle creates many goroutines, all from a single
742 // producer (the main benchmark goroutine).
744 // Compared to BenchmarkCreateGoroutines, this causes different behavior in the
745 // scheduler because Ms are much more likely to need to steal work from the
746 // main P rather than having work in the local run queue.
747 func BenchmarkCreateGoroutinesSingle(b *testing.B) {
748 // Since we are interested in stealing behavior, warm the scheduler to
749 // get all the Ps running first.
750 warmupScheduler(runtime.GOMAXPROCS(0))
751 b.ResetTimer()
753 var wg sync.WaitGroup
754 wg.Add(b.N)
755 for i := 0; i < b.N; i++ {
756 go func() {
757 wg.Done()
760 wg.Wait()
763 func BenchmarkClosureCall(b *testing.B) {
764 sum := 0
765 off1 := 1
766 for i := 0; i < b.N; i++ {
767 off2 := 2
768 func() {
769 sum += i + off1 + off2
772 _ = sum
775 func benchmarkWakeupParallel(b *testing.B, spin func(time.Duration)) {
776 if runtime.GOMAXPROCS(0) == 1 {
777 b.Skip("skipping: GOMAXPROCS=1")
780 wakeDelay := 5 * time.Microsecond
781 for _, delay := range []time.Duration{
783 1 * time.Microsecond,
784 2 * time.Microsecond,
785 5 * time.Microsecond,
786 10 * time.Microsecond,
787 20 * time.Microsecond,
788 50 * time.Microsecond,
789 100 * time.Microsecond,
791 b.Run(delay.String(), func(b *testing.B) {
792 if b.N == 0 {
793 return
795 // Start two goroutines, which alternate between being
796 // sender and receiver in the following protocol:
798 // - The receiver spins for `delay` and then does a
799 // blocking receive on a channel.
801 // - The sender spins for `delay+wakeDelay` and then
802 // sends to the same channel. (The addition of
803 // `wakeDelay` improves the probability that the
804 // receiver will be blocking when the send occurs when
805 // the goroutines execute in parallel.)
807 // In each iteration of the benchmark, each goroutine
808 // acts once as sender and once as receiver, so each
809 // goroutine spins for delay twice.
811 // BenchmarkWakeupParallel is used to estimate how
812 // efficiently the scheduler parallelizes goroutines in
813 // the presence of blocking:
815 // - If both goroutines are executed on the same core,
816 // an increase in delay by N will increase the time per
817 // iteration by 4*N, because all 4 delays are
818 // serialized.
820 // - Otherwise, an increase in delay by N will increase
821 // the time per iteration by 2*N, and the time per
822 // iteration is 2 * (runtime overhead + chan
823 // send/receive pair + delay + wakeDelay). This allows
824 // the runtime overhead, including the time it takes
825 // for the unblocked goroutine to be scheduled, to be
826 // estimated.
827 ping, pong := make(chan struct{}), make(chan struct{})
828 start := make(chan struct{})
829 done := make(chan struct{})
830 go func() {
831 <-start
832 for i := 0; i < b.N; i++ {
833 // sender
834 spin(delay + wakeDelay)
835 ping <- struct{}{}
836 // receiver
837 spin(delay)
838 <-pong
840 done <- struct{}{}
842 go func() {
843 for i := 0; i < b.N; i++ {
844 // receiver
845 spin(delay)
846 <-ping
847 // sender
848 spin(delay + wakeDelay)
849 pong <- struct{}{}
851 done <- struct{}{}
853 b.ResetTimer()
854 start <- struct{}{}
855 <-done
856 <-done
861 func BenchmarkWakeupParallelSpinning(b *testing.B) {
862 benchmarkWakeupParallel(b, func(d time.Duration) {
863 end := time.Now().Add(d)
864 for time.Now().Before(end) {
865 // do nothing
870 // sysNanosleep is defined by OS-specific files (such as runtime_linux_test.go)
871 // to sleep for the given duration. If nil, dependent tests are skipped.
872 // The implementation should invoke a blocking system call and not
873 // call time.Sleep, which would deschedule the goroutine.
874 var sysNanosleep func(d time.Duration)
876 func BenchmarkWakeupParallelSyscall(b *testing.B) {
877 if sysNanosleep == nil {
878 b.Skipf("skipping on %v; sysNanosleep not defined", runtime.GOOS)
880 benchmarkWakeupParallel(b, func(d time.Duration) {
881 sysNanosleep(d)
885 type Matrix [][]float64
887 func BenchmarkMatmult(b *testing.B) {
888 b.StopTimer()
889 // matmult is O(N**3) but testing expects O(b.N),
890 // so we need to take cube root of b.N
891 n := int(math.Cbrt(float64(b.N))) + 1
892 A := makeMatrix(n)
893 B := makeMatrix(n)
894 C := makeMatrix(n)
895 b.StartTimer()
896 matmult(nil, A, B, C, 0, n, 0, n, 0, n, 8)
899 func makeMatrix(n int) Matrix {
900 m := make(Matrix, n)
901 for i := 0; i < n; i++ {
902 m[i] = make([]float64, n)
903 for j := 0; j < n; j++ {
904 m[i][j] = float64(i*n + j)
907 return m
910 func matmult(done chan<- struct{}, A, B, C Matrix, i0, i1, j0, j1, k0, k1, threshold int) {
911 di := i1 - i0
912 dj := j1 - j0
913 dk := k1 - k0
914 if di >= dj && di >= dk && di >= threshold {
915 // divide in two by y axis
916 mi := i0 + di/2
917 done1 := make(chan struct{}, 1)
918 go matmult(done1, A, B, C, i0, mi, j0, j1, k0, k1, threshold)
919 matmult(nil, A, B, C, mi, i1, j0, j1, k0, k1, threshold)
920 <-done1
921 } else if dj >= dk && dj >= threshold {
922 // divide in two by x axis
923 mj := j0 + dj/2
924 done1 := make(chan struct{}, 1)
925 go matmult(done1, A, B, C, i0, i1, j0, mj, k0, k1, threshold)
926 matmult(nil, A, B, C, i0, i1, mj, j1, k0, k1, threshold)
927 <-done1
928 } else if dk >= threshold {
929 // divide in two by "k" axis
930 // deliberately not parallel because of data races
931 mk := k0 + dk/2
932 matmult(nil, A, B, C, i0, i1, j0, j1, k0, mk, threshold)
933 matmult(nil, A, B, C, i0, i1, j0, j1, mk, k1, threshold)
934 } else {
935 // the matrices are small enough, compute directly
936 for i := i0; i < i1; i++ {
937 for j := j0; j < j1; j++ {
938 for k := k0; k < k1; k++ {
939 C[i][j] += A[i][k] * B[k][j]
944 if done != nil {
945 done <- struct{}{}
949 func TestStealOrder(t *testing.T) {
950 runtime.RunStealOrderTest()
953 func TestLockOSThreadNesting(t *testing.T) {
954 if runtime.GOARCH == "wasm" {
955 t.Skip("no threads on wasm yet")
958 go func() {
959 e, i := runtime.LockOSCounts()
960 if e != 0 || i != 0 {
961 t.Errorf("want locked counts 0, 0; got %d, %d", e, i)
962 return
964 runtime.LockOSThread()
965 runtime.LockOSThread()
966 runtime.UnlockOSThread()
967 e, i = runtime.LockOSCounts()
968 if e != 1 || i != 0 {
969 t.Errorf("want locked counts 1, 0; got %d, %d", e, i)
970 return
972 runtime.UnlockOSThread()
973 e, i = runtime.LockOSCounts()
974 if e != 0 || i != 0 {
975 t.Errorf("want locked counts 0, 0; got %d, %d", e, i)
976 return
981 func TestLockOSThreadExit(t *testing.T) {
982 testLockOSThreadExit(t, "testprog")
985 func testLockOSThreadExit(t *testing.T, prog string) {
986 output := runTestProg(t, prog, "LockOSThreadMain", "GOMAXPROCS=1")
987 want := "OK\n"
988 if output != want {
989 t.Errorf("want %q, got %q", want, output)
992 output = runTestProg(t, prog, "LockOSThreadAlt")
993 if output != want {
994 t.Errorf("want %q, got %q", want, output)
998 func TestLockOSThreadAvoidsStatePropagation(t *testing.T) {
999 want := "OK\n"
1000 skip := "unshare not permitted\n"
1001 output := runTestProg(t, "testprog", "LockOSThreadAvoidsStatePropagation", "GOMAXPROCS=1")
1002 if output == skip {
1003 t.Skip("unshare syscall not permitted on this system")
1004 } else if output != want {
1005 t.Errorf("want %q, got %q", want, output)
1009 func TestLockOSThreadTemplateThreadRace(t *testing.T) {
1010 testenv.MustHaveGoRun(t)
1012 exe, err := buildTestProg(t, "testprog")
1013 if err != nil {
1014 t.Fatal(err)
1017 iterations := 100
1018 if testing.Short() {
1019 // Reduce run time to ~100ms, with much lower probability of
1020 // catching issues.
1021 iterations = 5
1023 for i := 0; i < iterations; i++ {
1024 want := "OK\n"
1025 output := runBuiltTestProg(t, exe, "LockOSThreadTemplateThreadRace")
1026 if output != want {
1027 t.Fatalf("run %d: want %q, got %q", i, want, output)
1032 // fakeSyscall emulates a system call.
1033 //go:nosplit
1034 func fakeSyscall(duration time.Duration) {
1035 runtime.Entersyscall()
1036 for start := runtime.Nanotime(); runtime.Nanotime()-start < int64(duration); {
1038 runtime.Exitsyscall()
1041 // Check that a goroutine will be preempted if it is calling short system calls.
1042 func testPreemptionAfterSyscall(t *testing.T, syscallDuration time.Duration) {
1043 if runtime.GOARCH == "wasm" {
1044 t.Skip("no preemption on wasm yet")
1047 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
1049 interations := 10
1050 if testing.Short() {
1051 interations = 1
1053 const (
1054 maxDuration = 5 * time.Second
1055 nroutines = 8
1058 for i := 0; i < interations; i++ {
1059 c := make(chan bool, nroutines)
1060 stop := uint32(0)
1062 start := time.Now()
1063 for g := 0; g < nroutines; g++ {
1064 go func(stop *uint32) {
1065 c <- true
1066 for atomic.LoadUint32(stop) == 0 {
1067 fakeSyscall(syscallDuration)
1069 c <- true
1070 }(&stop)
1072 // wait until all goroutines have started.
1073 for g := 0; g < nroutines; g++ {
1076 atomic.StoreUint32(&stop, 1)
1077 // wait until all goroutines have finished.
1078 for g := 0; g < nroutines; g++ {
1081 duration := time.Since(start)
1083 if duration > maxDuration {
1084 t.Errorf("timeout exceeded: %v (%v)", duration, maxDuration)
1089 func TestPreemptionAfterSyscall(t *testing.T) {
1090 if runtime.GOOS == "plan9" {
1091 testenv.SkipFlaky(t, 41015)
1094 for _, i := range []time.Duration{10, 100, 1000} {
1095 d := i * time.Microsecond
1096 t.Run(fmt.Sprint(d), func(t *testing.T) {
1097 testPreemptionAfterSyscall(t, d)
1102 func TestGetgThreadSwitch(t *testing.T) {
1103 runtime.RunGetgThreadSwitchTest()
1106 // TestNetpollBreak tests that netpollBreak can break a netpoll.
1107 // This test is not particularly safe since the call to netpoll
1108 // will pick up any stray files that are ready, but it should work
1109 // OK as long it is not run in parallel.
1110 func TestNetpollBreak(t *testing.T) {
1111 if runtime.GOMAXPROCS(0) == 1 {
1112 t.Skip("skipping: GOMAXPROCS=1")
1115 // Make sure that netpoll is initialized.
1116 runtime.NetpollGenericInit()
1118 start := time.Now()
1119 c := make(chan bool, 2)
1120 go func() {
1121 c <- true
1122 runtime.Netpoll(10 * time.Second.Nanoseconds())
1123 c <- true
1126 // Loop because the break might get eaten by the scheduler.
1127 // Break twice to break both the netpoll we started and the
1128 // scheduler netpoll.
1129 loop:
1130 for {
1131 runtime.Usleep(100)
1132 runtime.NetpollBreak()
1133 runtime.NetpollBreak()
1134 select {
1135 case <-c:
1136 break loop
1137 default:
1140 if dur := time.Since(start); dur > 5*time.Second {
1141 t.Errorf("netpollBreak did not interrupt netpoll: slept for: %v", dur)
1145 // TestBigGOMAXPROCS tests that setting GOMAXPROCS to a large value
1146 // doesn't cause a crash at startup. See issue 38474.
1147 func TestBigGOMAXPROCS(t *testing.T) {
1148 t.Parallel()
1149 output := runTestProg(t, "testprog", "NonexistentTest", "GOMAXPROCS=1024")
1150 // Ignore error conditions on small machines.
1151 for _, errstr := range []string{
1152 "failed to create new OS thread",
1153 "cannot allocate memory",
1155 if strings.Contains(output, errstr) {
1156 t.Skipf("failed to create 1024 threads")
1159 if !strings.Contains(output, "unknown function: NonexistentTest") {
1160 t.Errorf("output:\n%s\nwanted:\nunknown function: NonexistentTest", output)