runtime: scan register backing store on ia64
[official-gcc.git] / libgo / go / runtime / proc_test.go
blob672e1fa0148f627a308bc11acd61b6a734626ad0
1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime_test
7 import (
8 "math"
9 "net"
10 "runtime"
11 "runtime/debug"
12 "strings"
13 "sync"
14 "sync/atomic"
15 "syscall"
16 "testing"
17 "time"
20 var stop = make(chan bool, 1)
22 func perpetuumMobile() {
23 select {
24 case <-stop:
25 default:
26 go perpetuumMobile()
30 func TestStopTheWorldDeadlock(t *testing.T) {
31 if testing.Short() {
32 t.Skip("skipping during short test")
34 maxprocs := runtime.GOMAXPROCS(3)
35 compl := make(chan bool, 2)
36 go func() {
37 for i := 0; i != 1000; i += 1 {
38 runtime.GC()
40 compl <- true
41 }()
42 go func() {
43 for i := 0; i != 1000; i += 1 {
44 runtime.GOMAXPROCS(3)
46 compl <- true
47 }()
48 go perpetuumMobile()
49 <-compl
50 <-compl
51 stop <- true
52 runtime.GOMAXPROCS(maxprocs)
55 func TestYieldProgress(t *testing.T) {
56 testYieldProgress(false)
59 func TestYieldLockedProgress(t *testing.T) {
60 testYieldProgress(true)
63 func testYieldProgress(locked bool) {
64 c := make(chan bool)
65 cack := make(chan bool)
66 go func() {
67 if locked {
68 runtime.LockOSThread()
70 for {
71 select {
72 case <-c:
73 cack <- true
74 return
75 default:
76 runtime.Gosched()
79 }()
80 time.Sleep(10 * time.Millisecond)
81 c <- true
82 <-cack
85 func TestYieldLocked(t *testing.T) {
86 const N = 10
87 c := make(chan bool)
88 go func() {
89 runtime.LockOSThread()
90 for i := 0; i < N; i++ {
91 runtime.Gosched()
92 time.Sleep(time.Millisecond)
94 c <- true
95 // runtime.UnlockOSThread() is deliberately omitted
96 }()
97 <-c
100 func TestGoroutineParallelism(t *testing.T) {
101 if runtime.NumCPU() == 1 {
102 // Takes too long, too easy to deadlock, etc.
103 t.Skip("skipping on uniprocessor")
105 P := 4
106 N := 10
107 if testing.Short() {
108 P = 3
109 N = 3
111 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
112 // If runtime triggers a forced GC during this test then it will deadlock,
113 // since the goroutines can't be stopped/preempted.
114 // Disable GC for this test (see issue #10958).
115 defer debug.SetGCPercent(debug.SetGCPercent(-1))
116 for try := 0; try < N; try++ {
117 done := make(chan bool)
118 x := uint32(0)
119 for p := 0; p < P; p++ {
120 // Test that all P goroutines are scheduled at the same time
121 go func(p int) {
122 for i := 0; i < 3; i++ {
123 expected := uint32(P*i + p)
124 for atomic.LoadUint32(&x) != expected {
126 atomic.StoreUint32(&x, expected+1)
128 done <- true
129 }(p)
131 for p := 0; p < P; p++ {
132 <-done
137 // Test that all runnable goroutines are scheduled at the same time.
138 func TestGoroutineParallelism2(t *testing.T) {
139 //testGoroutineParallelism2(t, false, false)
140 testGoroutineParallelism2(t, true, false)
141 testGoroutineParallelism2(t, false, true)
142 testGoroutineParallelism2(t, true, true)
145 func testGoroutineParallelism2(t *testing.T, load, netpoll bool) {
146 if runtime.NumCPU() == 1 {
147 // Takes too long, too easy to deadlock, etc.
148 t.Skip("skipping on uniprocessor")
150 P := 4
151 N := 10
152 if testing.Short() {
153 N = 3
155 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
156 // If runtime triggers a forced GC during this test then it will deadlock,
157 // since the goroutines can't be stopped/preempted.
158 // Disable GC for this test (see issue #10958).
159 defer debug.SetGCPercent(debug.SetGCPercent(-1))
160 for try := 0; try < N; try++ {
161 if load {
162 // Create P goroutines and wait until they all run.
163 // When we run the actual test below, worker threads
164 // running the goroutines will start parking.
165 done := make(chan bool)
166 x := uint32(0)
167 for p := 0; p < P; p++ {
168 go func() {
169 if atomic.AddUint32(&x, 1) == uint32(P) {
170 done <- true
171 return
173 for atomic.LoadUint32(&x) != uint32(P) {
177 <-done
179 if netpoll {
180 // Enable netpoller, affects schedler behavior.
181 laddr := "localhost:0"
182 if runtime.GOOS == "android" {
183 // On some Android devices, there are no records for localhost,
184 // see https://golang.org/issues/14486.
185 // Don't use 127.0.0.1 for every case, it won't work on IPv6-only systems.
186 laddr = "127.0.0.1:0"
188 ln, err := net.Listen("tcp", laddr)
189 if err != nil {
190 defer ln.Close() // yup, defer in a loop
193 done := make(chan bool)
194 x := uint32(0)
195 // Spawn P goroutines in a nested fashion just to differ from TestGoroutineParallelism.
196 for p := 0; p < P/2; p++ {
197 go func(p int) {
198 for p2 := 0; p2 < 2; p2++ {
199 go func(p2 int) {
200 for i := 0; i < 3; i++ {
201 expected := uint32(P*i + p*2 + p2)
202 for atomic.LoadUint32(&x) != expected {
204 atomic.StoreUint32(&x, expected+1)
206 done <- true
207 }(p2)
209 }(p)
211 for p := 0; p < P; p++ {
212 <-done
217 func TestBlockLocked(t *testing.T) {
218 const N = 10
219 c := make(chan bool)
220 go func() {
221 runtime.LockOSThread()
222 for i := 0; i < N; i++ {
223 c <- true
225 runtime.UnlockOSThread()
227 for i := 0; i < N; i++ {
232 func TestTimerFairness(t *testing.T) {
233 done := make(chan bool)
234 c := make(chan bool)
235 for i := 0; i < 2; i++ {
236 go func() {
237 for {
238 select {
239 case c <- true:
240 case <-done:
241 return
247 timer := time.After(20 * time.Millisecond)
248 for {
249 select {
250 case <-c:
251 case <-timer:
252 close(done)
253 return
258 func TestTimerFairness2(t *testing.T) {
259 done := make(chan bool)
260 c := make(chan bool)
261 for i := 0; i < 2; i++ {
262 go func() {
263 timer := time.After(20 * time.Millisecond)
264 var buf [1]byte
265 for {
266 syscall.Read(0, buf[0:0])
267 select {
268 case c <- true:
269 case <-c:
270 case <-timer:
271 done <- true
272 return
277 <-done
278 <-done
281 // The function is used to test preemption at split stack checks.
282 // Declaring a var avoids inlining at the call site.
283 var preempt = func() int {
284 var a [128]int
285 sum := 0
286 for _, v := range a {
287 sum += v
289 return sum
292 func TestPreemption(t *testing.T) {
293 t.Skip("gccgo does not implement preemption")
294 // Test that goroutines are preempted at function calls.
295 N := 5
296 if testing.Short() {
297 N = 2
299 c := make(chan bool)
300 var x uint32
301 for g := 0; g < 2; g++ {
302 go func(g int) {
303 for i := 0; i < N; i++ {
304 for atomic.LoadUint32(&x) != uint32(g) {
305 preempt()
307 atomic.StoreUint32(&x, uint32(1-g))
309 c <- true
310 }(g)
316 func TestPreemptionGC(t *testing.T) {
317 t.Skip("gccgo does not implement preemption")
318 // Test that pending GC preempts running goroutines.
319 P := 5
320 N := 10
321 if testing.Short() {
322 P = 3
323 N = 2
325 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P + 1))
326 var stop uint32
327 for i := 0; i < P; i++ {
328 go func() {
329 for atomic.LoadUint32(&stop) == 0 {
330 preempt()
334 for i := 0; i < N; i++ {
335 runtime.Gosched()
336 runtime.GC()
338 atomic.StoreUint32(&stop, 1)
341 func TestGCFairness(t *testing.T) {
342 output := runTestProg(t, "testprog", "GCFairness")
343 want := "OK\n"
344 if output != want {
345 t.Fatalf("want %s, got %s\n", want, output)
349 func TestGCFairness2(t *testing.T) {
350 output := runTestProg(t, "testprog", "GCFairness2")
351 want := "OK\n"
352 if output != want {
353 t.Fatalf("want %s, got %s\n", want, output)
357 func TestNumGoroutine(t *testing.T) {
358 output := runTestProg(t, "testprog", "NumGoroutine")
359 want := "1\n"
360 if output != want {
361 t.Fatalf("want %q, got %q", want, output)
364 buf := make([]byte, 1<<20)
366 // Try up to 10 times for a match before giving up.
367 // This is a fundamentally racy check but it's important
368 // to notice if NumGoroutine and Stack are _always_ out of sync.
369 for i := 0; ; i++ {
370 // Give goroutines about to exit a chance to exit.
371 // The NumGoroutine and Stack below need to see
372 // the same state of the world, so anything we can do
373 // to keep it quiet is good.
374 runtime.Gosched()
376 n := runtime.NumGoroutine()
377 buf = buf[:runtime.Stack(buf, true)]
379 nstk := strings.Count(string(buf), "goroutine ")
380 if n == nstk {
381 break
383 if i >= 10 {
384 t.Fatalf("NumGoroutine=%d, but found %d goroutines in stack dump: %s", n, nstk, buf)
389 func TestPingPongHog(t *testing.T) {
390 if testing.Short() {
391 t.Skip("skipping in -short mode")
394 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
395 done := make(chan bool)
396 hogChan, lightChan := make(chan bool), make(chan bool)
397 hogCount, lightCount := 0, 0
399 run := func(limit int, counter *int, wake chan bool) {
400 for {
401 select {
402 case <-done:
403 return
405 case <-wake:
406 for i := 0; i < limit; i++ {
407 *counter++
409 wake <- true
414 // Start two co-scheduled hog goroutines.
415 for i := 0; i < 2; i++ {
416 go run(1e6, &hogCount, hogChan)
419 // Start two co-scheduled light goroutines.
420 for i := 0; i < 2; i++ {
421 go run(1e3, &lightCount, lightChan)
424 // Start goroutine pairs and wait for a few preemption rounds.
425 hogChan <- true
426 lightChan <- true
427 time.Sleep(100 * time.Millisecond)
428 close(done)
429 <-hogChan
430 <-lightChan
432 // Check that hogCount and lightCount are within a factor of
433 // 5, which indicates that both pairs of goroutines handed off
434 // the P within a time-slice to their buddy. We can use a
435 // fairly large factor here to make this robust: if the
436 // scheduler isn't working right, the gap should be ~1000X.
437 const factor = 5
438 if hogCount > lightCount*factor || lightCount > hogCount*factor {
439 t.Fatalf("want hogCount/lightCount in [%v, %v]; got %d/%d = %g", 1.0/factor, factor, hogCount, lightCount, float64(hogCount)/float64(lightCount))
443 func BenchmarkPingPongHog(b *testing.B) {
444 if b.N == 0 {
445 return
447 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
449 // Create a CPU hog
450 stop, done := make(chan bool), make(chan bool)
451 go func() {
452 for {
453 select {
454 case <-stop:
455 done <- true
456 return
457 default:
462 // Ping-pong b.N times
463 ping, pong := make(chan bool), make(chan bool)
464 go func() {
465 for j := 0; j < b.N; j++ {
466 pong <- <-ping
468 close(stop)
469 done <- true
471 go func() {
472 for i := 0; i < b.N; i++ {
473 ping <- <-pong
475 done <- true
477 b.ResetTimer()
478 ping <- true // Start ping-pong
479 <-stop
480 b.StopTimer()
481 <-ping // Let last ponger exit
482 <-done // Make sure goroutines exit
483 <-done
484 <-done
487 func stackGrowthRecursive(i int) {
488 var pad [128]uint64
489 if i != 0 && pad[0] == 0 {
490 stackGrowthRecursive(i - 1)
494 func TestPreemptSplitBig(t *testing.T) {
495 if testing.Short() {
496 t.Skip("skipping in -short mode")
498 t.Skip("gccgo does not implement preemption")
499 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
500 stop := make(chan int)
501 go big(stop)
502 for i := 0; i < 3; i++ {
503 time.Sleep(10 * time.Microsecond) // let big start running
504 runtime.GC()
506 close(stop)
509 func big(stop chan int) int {
510 n := 0
511 for {
512 // delay so that gc is sure to have asked for a preemption
513 for i := 0; i < 1e9; i++ {
517 // call bigframe, which used to miss the preemption in its prologue.
518 bigframe(stop)
520 // check if we've been asked to stop.
521 select {
522 case <-stop:
523 return n
528 func bigframe(stop chan int) int {
529 // not splitting the stack will overflow.
530 // small will notice that it needs a stack split and will
531 // catch the overflow.
532 var x [8192]byte
533 return small(stop, &x)
536 func small(stop chan int, x *[8192]byte) int {
537 for i := range x {
538 x[i] = byte(i)
540 sum := 0
541 for i := range x {
542 sum += int(x[i])
545 // keep small from being a leaf function, which might
546 // make it not do any stack check at all.
547 nonleaf(stop)
549 return sum
552 func nonleaf(stop chan int) bool {
553 // do something that won't be inlined:
554 select {
555 case <-stop:
556 return true
557 default:
558 return false
562 func TestSchedLocalQueue(t *testing.T) {
563 runtime.RunSchedLocalQueueTest()
566 func TestSchedLocalQueueSteal(t *testing.T) {
567 runtime.RunSchedLocalQueueStealTest()
570 func TestSchedLocalQueueEmpty(t *testing.T) {
571 if runtime.NumCPU() == 1 {
572 // Takes too long and does not trigger the race.
573 t.Skip("skipping on uniprocessor")
575 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
577 // If runtime triggers a forced GC during this test then it will deadlock,
578 // since the goroutines can't be stopped/preempted during spin wait.
579 defer debug.SetGCPercent(debug.SetGCPercent(-1))
581 iters := int(1e5)
582 if testing.Short() {
583 iters = 1e2
585 runtime.RunSchedLocalQueueEmptyTest(iters)
588 func benchmarkStackGrowth(b *testing.B, rec int) {
589 b.RunParallel(func(pb *testing.PB) {
590 for pb.Next() {
591 stackGrowthRecursive(rec)
596 func BenchmarkStackGrowth(b *testing.B) {
597 benchmarkStackGrowth(b, 10)
600 func BenchmarkStackGrowthDeep(b *testing.B) {
601 benchmarkStackGrowth(b, 1024)
604 func BenchmarkCreateGoroutines(b *testing.B) {
605 benchmarkCreateGoroutines(b, 1)
608 func BenchmarkCreateGoroutinesParallel(b *testing.B) {
609 benchmarkCreateGoroutines(b, runtime.GOMAXPROCS(-1))
612 func benchmarkCreateGoroutines(b *testing.B, procs int) {
613 c := make(chan bool)
614 var f func(n int)
615 f = func(n int) {
616 if n == 0 {
617 c <- true
618 return
620 go f(n - 1)
622 for i := 0; i < procs; i++ {
623 go f(b.N / procs)
625 for i := 0; i < procs; i++ {
630 func BenchmarkCreateGoroutinesCapture(b *testing.B) {
631 b.ReportAllocs()
632 for i := 0; i < b.N; i++ {
633 const N = 4
634 var wg sync.WaitGroup
635 wg.Add(N)
636 for i := 0; i < N; i++ {
637 i := i
638 go func() {
639 if i >= N {
640 b.Logf("bad") // just to capture b
642 wg.Done()
645 wg.Wait()
649 func BenchmarkClosureCall(b *testing.B) {
650 sum := 0
651 off1 := 1
652 for i := 0; i < b.N; i++ {
653 off2 := 2
654 func() {
655 sum += i + off1 + off2
658 _ = sum
661 func benchmarkWakeupParallel(b *testing.B, spin func(time.Duration)) {
662 if runtime.GOMAXPROCS(0) == 1 {
663 b.Skip("skipping: GOMAXPROCS=1")
666 wakeDelay := 5 * time.Microsecond
667 for _, delay := range []time.Duration{
669 1 * time.Microsecond,
670 2 * time.Microsecond,
671 5 * time.Microsecond,
672 10 * time.Microsecond,
673 20 * time.Microsecond,
674 50 * time.Microsecond,
675 100 * time.Microsecond,
677 b.Run(delay.String(), func(b *testing.B) {
678 if b.N == 0 {
679 return
681 // Start two goroutines, which alternate between being
682 // sender and receiver in the following protocol:
684 // - The receiver spins for `delay` and then does a
685 // blocking receive on a channel.
687 // - The sender spins for `delay+wakeDelay` and then
688 // sends to the same channel. (The addition of
689 // `wakeDelay` improves the probability that the
690 // receiver will be blocking when the send occurs when
691 // the goroutines execute in parallel.)
693 // In each iteration of the benchmark, each goroutine
694 // acts once as sender and once as receiver, so each
695 // goroutine spins for delay twice.
697 // BenchmarkWakeupParallel is used to estimate how
698 // efficiently the scheduler parallelizes goroutines in
699 // the presence of blocking:
701 // - If both goroutines are executed on the same core,
702 // an increase in delay by N will increase the time per
703 // iteration by 4*N, because all 4 delays are
704 // serialized.
706 // - Otherwise, an increase in delay by N will increase
707 // the time per iteration by 2*N, and the time per
708 // iteration is 2 * (runtime overhead + chan
709 // send/receive pair + delay + wakeDelay). This allows
710 // the runtime overhead, including the time it takes
711 // for the unblocked goroutine to be scheduled, to be
712 // estimated.
713 ping, pong := make(chan struct{}), make(chan struct{})
714 start := make(chan struct{})
715 done := make(chan struct{})
716 go func() {
717 <-start
718 for i := 0; i < b.N; i++ {
719 // sender
720 spin(delay + wakeDelay)
721 ping <- struct{}{}
722 // receiver
723 spin(delay)
724 <-pong
726 done <- struct{}{}
728 go func() {
729 for i := 0; i < b.N; i++ {
730 // receiver
731 spin(delay)
732 <-ping
733 // sender
734 spin(delay + wakeDelay)
735 pong <- struct{}{}
737 done <- struct{}{}
739 b.ResetTimer()
740 start <- struct{}{}
741 <-done
742 <-done
747 func BenchmarkWakeupParallelSpinning(b *testing.B) {
748 benchmarkWakeupParallel(b, func(d time.Duration) {
749 end := time.Now().Add(d)
750 for time.Now().Before(end) {
751 // do nothing
756 // sysNanosleep is defined by OS-specific files (such as runtime_linux_test.go)
757 // to sleep for the given duration. If nil, dependent tests are skipped.
758 // The implementation should invoke a blocking system call and not
759 // call time.Sleep, which would deschedule the goroutine.
760 var sysNanosleep func(d time.Duration)
762 func BenchmarkWakeupParallelSyscall(b *testing.B) {
763 if sysNanosleep == nil {
764 b.Skipf("skipping on %v; sysNanosleep not defined", runtime.GOOS)
766 benchmarkWakeupParallel(b, func(d time.Duration) {
767 sysNanosleep(d)
771 type Matrix [][]float64
773 func BenchmarkMatmult(b *testing.B) {
774 b.StopTimer()
775 // matmult is O(N**3) but testing expects O(b.N),
776 // so we need to take cube root of b.N
777 n := int(math.Cbrt(float64(b.N))) + 1
778 A := makeMatrix(n)
779 B := makeMatrix(n)
780 C := makeMatrix(n)
781 b.StartTimer()
782 matmult(nil, A, B, C, 0, n, 0, n, 0, n, 8)
785 func makeMatrix(n int) Matrix {
786 m := make(Matrix, n)
787 for i := 0; i < n; i++ {
788 m[i] = make([]float64, n)
789 for j := 0; j < n; j++ {
790 m[i][j] = float64(i*n + j)
793 return m
796 func matmult(done chan<- struct{}, A, B, C Matrix, i0, i1, j0, j1, k0, k1, threshold int) {
797 di := i1 - i0
798 dj := j1 - j0
799 dk := k1 - k0
800 if di >= dj && di >= dk && di >= threshold {
801 // divide in two by y axis
802 mi := i0 + di/2
803 done1 := make(chan struct{}, 1)
804 go matmult(done1, A, B, C, i0, mi, j0, j1, k0, k1, threshold)
805 matmult(nil, A, B, C, mi, i1, j0, j1, k0, k1, threshold)
806 <-done1
807 } else if dj >= dk && dj >= threshold {
808 // divide in two by x axis
809 mj := j0 + dj/2
810 done1 := make(chan struct{}, 1)
811 go matmult(done1, A, B, C, i0, i1, j0, mj, k0, k1, threshold)
812 matmult(nil, A, B, C, i0, i1, mj, j1, k0, k1, threshold)
813 <-done1
814 } else if dk >= threshold {
815 // divide in two by "k" axis
816 // deliberately not parallel because of data races
817 mk := k0 + dk/2
818 matmult(nil, A, B, C, i0, i1, j0, j1, k0, mk, threshold)
819 matmult(nil, A, B, C, i0, i1, j0, j1, mk, k1, threshold)
820 } else {
821 // the matrices are small enough, compute directly
822 for i := i0; i < i1; i++ {
823 for j := j0; j < j1; j++ {
824 for k := k0; k < k1; k++ {
825 C[i][j] += A[i][k] * B[k][j]
830 if done != nil {
831 done <- struct{}{}
835 func TestStealOrder(t *testing.T) {
836 runtime.RunStealOrderTest()
839 func TestLockOSThreadNesting(t *testing.T) {
840 go func() {
841 e, i := runtime.LockOSCounts()
842 if e != 0 || i != 0 {
843 t.Errorf("want locked counts 0, 0; got %d, %d", e, i)
844 return
846 runtime.LockOSThread()
847 runtime.LockOSThread()
848 runtime.UnlockOSThread()
849 e, i = runtime.LockOSCounts()
850 if e != 1 || i != 0 {
851 t.Errorf("want locked counts 1, 0; got %d, %d", e, i)
852 return
854 runtime.UnlockOSThread()
855 e, i = runtime.LockOSCounts()
856 if e != 0 || i != 0 {
857 t.Errorf("want locked counts 0, 0; got %d, %d", e, i)
858 return
863 func TestLockOSThreadExit(t *testing.T) {
864 testLockOSThreadExit(t, "testprog")
867 func testLockOSThreadExit(t *testing.T, prog string) {
868 output := runTestProg(t, prog, "LockOSThreadMain", "GOMAXPROCS=1")
869 want := "OK\n"
870 if output != want {
871 t.Errorf("want %s, got %s\n", want, output)
874 output = runTestProg(t, prog, "LockOSThreadAlt")
875 if output != want {
876 t.Errorf("want %s, got %s\n", want, output)