2017-03-02 Richard Biener <rguenther@suse.de>
[official-gcc.git] / libgo / go / runtime / chan_test.go
blobb96af8af5d78433e79745d2870b910f98205136c
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime_test
7 import (
8 "runtime"
9 "sync"
10 "sync/atomic"
11 "testing"
12 "time"
15 func TestChan(t *testing.T) {
16 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
17 N := 200
18 if testing.Short() {
19 N = 20
21 for chanCap := 0; chanCap < N; chanCap++ {
23 // Ensure that receive from empty chan blocks.
24 c := make(chan int, chanCap)
25 recv1 := false
26 go func() {
27 _ = <-c
28 recv1 = true
29 }()
30 recv2 := false
31 go func() {
32 _, _ = <-c
33 recv2 = true
34 }()
35 time.Sleep(time.Millisecond)
36 if recv1 || recv2 {
37 t.Fatalf("chan[%d]: receive from empty chan", chanCap)
39 // Ensure that non-blocking receive does not block.
40 select {
41 case _ = <-c:
42 t.Fatalf("chan[%d]: receive from empty chan", chanCap)
43 default:
45 select {
46 case _, _ = <-c:
47 t.Fatalf("chan[%d]: receive from empty chan", chanCap)
48 default:
50 c <- 0
51 c <- 0
55 // Ensure that send to full chan blocks.
56 c := make(chan int, chanCap)
57 for i := 0; i < chanCap; i++ {
58 c <- i
60 sent := uint32(0)
61 go func() {
62 c <- 0
63 atomic.StoreUint32(&sent, 1)
64 }()
65 time.Sleep(time.Millisecond)
66 if atomic.LoadUint32(&sent) != 0 {
67 t.Fatalf("chan[%d]: send to full chan", chanCap)
69 // Ensure that non-blocking send does not block.
70 select {
71 case c <- 0:
72 t.Fatalf("chan[%d]: send to full chan", chanCap)
73 default:
75 <-c
79 // Ensure that we receive 0 from closed chan.
80 c := make(chan int, chanCap)
81 for i := 0; i < chanCap; i++ {
82 c <- i
84 close(c)
85 for i := 0; i < chanCap; i++ {
86 v := <-c
87 if v != i {
88 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
91 if v := <-c; v != 0 {
92 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, 0)
94 if v, ok := <-c; v != 0 || ok {
95 t.Fatalf("chan[%d]: received %v/%v, expected %v/%v", chanCap, v, ok, 0, false)
100 // Ensure that close unblocks receive.
101 c := make(chan int, chanCap)
102 done := make(chan bool)
103 go func() {
104 v, ok := <-c
105 done <- v == 0 && ok == false
107 time.Sleep(time.Millisecond)
108 close(c)
109 if !<-done {
110 t.Fatalf("chan[%d]: received non zero from closed chan", chanCap)
115 // Send 100 integers,
116 // ensure that we receive them non-corrupted in FIFO order.
117 c := make(chan int, chanCap)
118 go func() {
119 for i := 0; i < 100; i++ {
120 c <- i
123 for i := 0; i < 100; i++ {
124 v := <-c
125 if v != i {
126 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
130 // Same, but using recv2.
131 go func() {
132 for i := 0; i < 100; i++ {
133 c <- i
136 for i := 0; i < 100; i++ {
137 v, ok := <-c
138 if !ok {
139 t.Fatalf("chan[%d]: receive failed, expected %v", chanCap, i)
141 if v != i {
142 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
146 // Send 1000 integers in 4 goroutines,
147 // ensure that we receive what we send.
148 const P = 4
149 const L = 1000
150 for p := 0; p < P; p++ {
151 go func() {
152 for i := 0; i < L; i++ {
153 c <- i
157 done := make(chan map[int]int)
158 for p := 0; p < P; p++ {
159 go func() {
160 recv := make(map[int]int)
161 for i := 0; i < L; i++ {
162 v := <-c
163 recv[v] = recv[v] + 1
165 done <- recv
168 recv := make(map[int]int)
169 for p := 0; p < P; p++ {
170 for k, v := range <-done {
171 recv[k] = recv[k] + v
174 if len(recv) != L {
175 t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, len(recv), L)
177 for _, v := range recv {
178 if v != P {
179 t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, v, P)
185 // Test len/cap.
186 c := make(chan int, chanCap)
187 if len(c) != 0 || cap(c) != chanCap {
188 t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, 0, chanCap, len(c), cap(c))
190 for i := 0; i < chanCap; i++ {
191 c <- i
193 if len(c) != chanCap || cap(c) != chanCap {
194 t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, chanCap, chanCap, len(c), cap(c))
201 func TestNonblockRecvRace(t *testing.T) {
202 n := 10000
203 if testing.Short() {
204 n = 100
205 } else {
206 if runtime.GOARCH == "s390" {
207 // Test uses too much address space on 31-bit S390.
208 t.Skip("skipping long test on s390")
211 for i := 0; i < n; i++ {
212 c := make(chan int, 1)
213 c <- 1
214 go func() {
215 select {
216 case <-c:
217 default:
218 t.Error("chan is not ready")
221 close(c)
223 if t.Failed() {
224 return
229 // This test checks that select acts on the state of the channels at one
230 // moment in the execution, not over a smeared time window.
231 // In the test, one goroutine does:
232 // create c1, c2
233 // make c1 ready for receiving
234 // create second goroutine
235 // make c2 ready for receiving
236 // make c1 no longer ready for receiving (if possible)
237 // The second goroutine does a non-blocking select receiving from c1 and c2.
238 // From the time the second goroutine is created, at least one of c1 and c2
239 // is always ready for receiving, so the select in the second goroutine must
240 // always receive from one or the other. It must never execute the default case.
241 func TestNonblockSelectRace(t *testing.T) {
242 n := 100000
243 if testing.Short() {
244 n = 1000
246 done := make(chan bool, 1)
247 for i := 0; i < n; i++ {
248 c1 := make(chan int, 1)
249 c2 := make(chan int, 1)
250 c1 <- 1
251 go func() {
252 select {
253 case <-c1:
254 case <-c2:
255 default:
256 done <- false
257 return
259 done <- true
261 c2 <- 1
262 select {
263 case <-c1:
264 default:
266 if !<-done {
267 t.Fatal("no chan is ready")
272 // Same as TestNonblockSelectRace, but close(c2) replaces c2 <- 1.
273 func TestNonblockSelectRace2(t *testing.T) {
274 n := 100000
275 if testing.Short() {
276 n = 1000
278 done := make(chan bool, 1)
279 for i := 0; i < n; i++ {
280 c1 := make(chan int, 1)
281 c2 := make(chan int)
282 c1 <- 1
283 go func() {
284 select {
285 case <-c1:
286 case <-c2:
287 default:
288 done <- false
289 return
291 done <- true
293 close(c2)
294 select {
295 case <-c1:
296 default:
298 if !<-done {
299 t.Fatal("no chan is ready")
304 func TestSelfSelect(t *testing.T) {
305 // Ensure that send/recv on the same chan in select
306 // does not crash nor deadlock.
307 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
308 for _, chanCap := range []int{0, 10} {
309 var wg sync.WaitGroup
310 wg.Add(2)
311 c := make(chan int, chanCap)
312 for p := 0; p < 2; p++ {
313 p := p
314 go func() {
315 defer wg.Done()
316 for i := 0; i < 1000; i++ {
317 if p == 0 || i%2 == 0 {
318 select {
319 case c <- p:
320 case v := <-c:
321 if chanCap == 0 && v == p {
322 t.Errorf("self receive")
323 return
326 } else {
327 select {
328 case v := <-c:
329 if chanCap == 0 && v == p {
330 t.Errorf("self receive")
331 return
333 case c <- p:
339 wg.Wait()
343 func TestSelectStress(t *testing.T) {
344 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(10))
345 var c [4]chan int
346 c[0] = make(chan int)
347 c[1] = make(chan int)
348 c[2] = make(chan int, 2)
349 c[3] = make(chan int, 3)
350 N := int(1e5)
351 if testing.Short() {
352 N /= 10
354 // There are 4 goroutines that send N values on each of the chans,
355 // + 4 goroutines that receive N values on each of the chans,
356 // + 1 goroutine that sends N values on each of the chans in a single select,
357 // + 1 goroutine that receives N values on each of the chans in a single select.
358 // All these sends, receives and selects interact chaotically at runtime,
359 // but we are careful that this whole construct does not deadlock.
360 var wg sync.WaitGroup
361 wg.Add(10)
362 for k := 0; k < 4; k++ {
363 k := k
364 go func() {
365 for i := 0; i < N; i++ {
366 c[k] <- 0
368 wg.Done()
370 go func() {
371 for i := 0; i < N; i++ {
372 <-c[k]
374 wg.Done()
377 go func() {
378 var n [4]int
379 c1 := c
380 for i := 0; i < 4*N; i++ {
381 select {
382 case c1[3] <- 0:
383 n[3]++
384 if n[3] == N {
385 c1[3] = nil
387 case c1[2] <- 0:
388 n[2]++
389 if n[2] == N {
390 c1[2] = nil
392 case c1[0] <- 0:
393 n[0]++
394 if n[0] == N {
395 c1[0] = nil
397 case c1[1] <- 0:
398 n[1]++
399 if n[1] == N {
400 c1[1] = nil
404 wg.Done()
406 go func() {
407 var n [4]int
408 c1 := c
409 for i := 0; i < 4*N; i++ {
410 select {
411 case <-c1[0]:
412 n[0]++
413 if n[0] == N {
414 c1[0] = nil
416 case <-c1[1]:
417 n[1]++
418 if n[1] == N {
419 c1[1] = nil
421 case <-c1[2]:
422 n[2]++
423 if n[2] == N {
424 c1[2] = nil
426 case <-c1[3]:
427 n[3]++
428 if n[3] == N {
429 c1[3] = nil
433 wg.Done()
435 wg.Wait()
438 func TestChanSendInterface(t *testing.T) {
439 type mt struct{}
440 m := &mt{}
441 c := make(chan interface{}, 1)
442 c <- m
443 select {
444 case c <- m:
445 default:
447 select {
448 case c <- m:
449 case c <- &mt{}:
450 default:
454 func TestPseudoRandomSend(t *testing.T) {
455 n := 100
456 for _, chanCap := range []int{0, n} {
457 c := make(chan int, chanCap)
458 l := make([]int, n)
459 var m sync.Mutex
460 m.Lock()
461 go func() {
462 for i := 0; i < n; i++ {
463 runtime.Gosched()
464 l[i] = <-c
466 m.Unlock()
468 for i := 0; i < n; i++ {
469 select {
470 case c <- 1:
471 case c <- 0:
474 m.Lock() // wait
475 n0 := 0
476 n1 := 0
477 for _, i := range l {
478 n0 += (i + 1) % 2
479 n1 += i
481 if n0 <= n/10 || n1 <= n/10 {
482 t.Errorf("Want pseudorandom, got %d zeros and %d ones (chan cap %d)", n0, n1, chanCap)
487 func TestMultiConsumer(t *testing.T) {
488 const nwork = 23
489 const niter = 271828
491 pn := []int{2, 3, 7, 11, 13, 17, 19, 23, 27, 31}
493 q := make(chan int, nwork*3)
494 r := make(chan int, nwork*3)
496 // workers
497 var wg sync.WaitGroup
498 for i := 0; i < nwork; i++ {
499 wg.Add(1)
500 go func(w int) {
501 for v := range q {
502 // mess with the fifo-ish nature of range
503 if pn[w%len(pn)] == v {
504 runtime.Gosched()
506 r <- v
508 wg.Done()
509 }(i)
512 // feeder & closer
513 expect := 0
514 go func() {
515 for i := 0; i < niter; i++ {
516 v := pn[i%len(pn)]
517 expect += v
518 q <- v
520 close(q) // no more work
521 wg.Wait() // workers done
522 close(r) // ... so there can be no more results
525 // consume & check
526 n := 0
527 s := 0
528 for v := range r {
530 s += v
532 if n != niter || s != expect {
533 t.Errorf("Expected sum %d (got %d) from %d iter (saw %d)",
534 expect, s, niter, n)
538 func TestShrinkStackDuringBlockedSend(t *testing.T) {
539 // make sure that channel operations still work when we are
540 // blocked on a channel send and we shrink the stack.
541 // NOTE: this test probably won't fail unless stack1.go:stackDebug
542 // is set to >= 1.
543 const n = 10
544 c := make(chan int)
545 done := make(chan struct{})
547 go func() {
548 for i := 0; i < n; i++ {
549 c <- i
550 // use lots of stack, briefly.
551 stackGrowthRecursive(20)
553 done <- struct{}{}
556 for i := 0; i < n; i++ {
557 x := <-c
558 if x != i {
559 t.Errorf("bad channel read: want %d, got %d", i, x)
561 // Waste some time so sender can finish using lots of stack
562 // and block in channel send.
563 time.Sleep(1 * time.Millisecond)
564 // trigger GC which will shrink the stack of the sender.
565 runtime.GC()
567 <-done
570 func TestSelectDuplicateChannel(t *testing.T) {
571 // This test makes sure we can queue a G on
572 // the same channel multiple times.
573 c := make(chan int)
574 d := make(chan int)
575 e := make(chan int)
577 // goroutine A
578 go func() {
579 select {
580 case <-c:
581 case <-c:
582 case <-d:
584 e <- 9
586 time.Sleep(time.Millisecond) // make sure goroutine A gets queued first on c
588 // goroutine B
589 go func() {
592 time.Sleep(time.Millisecond) // make sure goroutine B gets queued on c before continuing
594 d <- 7 // wake up A, it dequeues itself from c. This operation used to corrupt c.recvq.
595 <-e // A tells us it's done
596 c <- 8 // wake up B. This operation used to fail because c.recvq was corrupted (it tries to wake up an already running G instead of B)
599 var selectSink interface{}
601 func TestSelectStackAdjust(t *testing.T) {
602 // Test that channel receive slots that contain local stack
603 // pointers are adjusted correctly by stack shrinking.
604 c := make(chan *int)
605 d := make(chan *int)
606 ready1 := make(chan bool)
607 ready2 := make(chan bool)
609 f := func(ready chan bool, dup bool) {
610 // Temporarily grow the stack to 10K.
611 stackGrowthRecursive((10 << 10) / (128 * 8))
613 // We're ready to trigger GC and stack shrink.
614 ready <- true
616 val := 42
617 var cx *int
618 cx = &val
620 var c2 chan *int
621 var d2 chan *int
622 if dup {
623 c2 = c
624 d2 = d
627 // Receive from d. cx won't be affected.
628 select {
629 case cx = <-c:
630 case <-c2:
631 case <-d:
632 case <-d2:
635 // Check that pointer in cx was adjusted correctly.
636 if cx != &val {
637 t.Error("cx no longer points to val")
638 } else if val != 42 {
639 t.Error("val changed")
640 } else {
641 *cx = 43
642 if val != 43 {
643 t.Error("changing *cx failed to change val")
646 ready <- true
649 go f(ready1, false)
650 go f(ready2, true)
652 // Let the goroutines get into the select.
653 <-ready1
654 <-ready2
655 time.Sleep(10 * time.Millisecond)
657 // Force concurrent GC a few times.
658 var before, after runtime.MemStats
659 runtime.ReadMemStats(&before)
660 for i := 0; i < 100; i++ {
661 selectSink = new([1 << 20]byte)
662 runtime.ReadMemStats(&after)
663 if after.NumGC-before.NumGC >= 2 {
664 goto done
667 t.Fatal("failed to trigger concurrent GC")
668 done:
669 selectSink = nil
671 // Wake selects.
672 close(d)
673 <-ready1
674 <-ready2
677 func BenchmarkChanNonblocking(b *testing.B) {
678 myc := make(chan int)
679 b.RunParallel(func(pb *testing.PB) {
680 for pb.Next() {
681 select {
682 case <-myc:
683 default:
689 func BenchmarkSelectUncontended(b *testing.B) {
690 b.RunParallel(func(pb *testing.PB) {
691 myc1 := make(chan int, 1)
692 myc2 := make(chan int, 1)
693 myc1 <- 0
694 for pb.Next() {
695 select {
696 case <-myc1:
697 myc2 <- 0
698 case <-myc2:
699 myc1 <- 0
705 func BenchmarkSelectSyncContended(b *testing.B) {
706 myc1 := make(chan int)
707 myc2 := make(chan int)
708 myc3 := make(chan int)
709 done := make(chan int)
710 b.RunParallel(func(pb *testing.PB) {
711 go func() {
712 for {
713 select {
714 case myc1 <- 0:
715 case myc2 <- 0:
716 case myc3 <- 0:
717 case <-done:
718 return
722 for pb.Next() {
723 select {
724 case <-myc1:
725 case <-myc2:
726 case <-myc3:
730 close(done)
733 func BenchmarkSelectAsyncContended(b *testing.B) {
734 procs := runtime.GOMAXPROCS(0)
735 myc1 := make(chan int, procs)
736 myc2 := make(chan int, procs)
737 b.RunParallel(func(pb *testing.PB) {
738 myc1 <- 0
739 for pb.Next() {
740 select {
741 case <-myc1:
742 myc2 <- 0
743 case <-myc2:
744 myc1 <- 0
750 func BenchmarkSelectNonblock(b *testing.B) {
751 myc1 := make(chan int)
752 myc2 := make(chan int)
753 myc3 := make(chan int, 1)
754 myc4 := make(chan int, 1)
755 b.RunParallel(func(pb *testing.PB) {
756 for pb.Next() {
757 select {
758 case <-myc1:
759 default:
761 select {
762 case myc2 <- 0:
763 default:
765 select {
766 case <-myc3:
767 default:
769 select {
770 case myc4 <- 0:
771 default:
777 func BenchmarkChanUncontended(b *testing.B) {
778 const C = 100
779 b.RunParallel(func(pb *testing.PB) {
780 myc := make(chan int, C)
781 for pb.Next() {
782 for i := 0; i < C; i++ {
783 myc <- 0
785 for i := 0; i < C; i++ {
786 <-myc
792 func BenchmarkChanContended(b *testing.B) {
793 const C = 100
794 myc := make(chan int, C*runtime.GOMAXPROCS(0))
795 b.RunParallel(func(pb *testing.PB) {
796 for pb.Next() {
797 for i := 0; i < C; i++ {
798 myc <- 0
800 for i := 0; i < C; i++ {
801 <-myc
807 func benchmarkChanSync(b *testing.B, work int) {
808 const CallsPerSched = 1000
809 procs := 2
810 N := int32(b.N / CallsPerSched / procs * procs)
811 c := make(chan bool, procs)
812 myc := make(chan int)
813 for p := 0; p < procs; p++ {
814 go func() {
815 for {
816 i := atomic.AddInt32(&N, -1)
817 if i < 0 {
818 break
820 for g := 0; g < CallsPerSched; g++ {
821 if i%2 == 0 {
822 <-myc
823 localWork(work)
824 myc <- 0
825 localWork(work)
826 } else {
827 myc <- 0
828 localWork(work)
829 <-myc
830 localWork(work)
834 c <- true
837 for p := 0; p < procs; p++ {
842 func BenchmarkChanSync(b *testing.B) {
843 benchmarkChanSync(b, 0)
846 func BenchmarkChanSyncWork(b *testing.B) {
847 benchmarkChanSync(b, 1000)
850 func benchmarkChanProdCons(b *testing.B, chanSize, localWork int) {
851 const CallsPerSched = 1000
852 procs := runtime.GOMAXPROCS(-1)
853 N := int32(b.N / CallsPerSched)
854 c := make(chan bool, 2*procs)
855 myc := make(chan int, chanSize)
856 for p := 0; p < procs; p++ {
857 go func() {
858 foo := 0
859 for atomic.AddInt32(&N, -1) >= 0 {
860 for g := 0; g < CallsPerSched; g++ {
861 for i := 0; i < localWork; i++ {
862 foo *= 2
863 foo /= 2
865 myc <- 1
868 myc <- 0
869 c <- foo == 42
871 go func() {
872 foo := 0
873 for {
874 v := <-myc
875 if v == 0 {
876 break
878 for i := 0; i < localWork; i++ {
879 foo *= 2
880 foo /= 2
883 c <- foo == 42
886 for p := 0; p < procs; p++ {
892 func BenchmarkChanProdCons0(b *testing.B) {
893 benchmarkChanProdCons(b, 0, 0)
896 func BenchmarkChanProdCons10(b *testing.B) {
897 benchmarkChanProdCons(b, 10, 0)
900 func BenchmarkChanProdCons100(b *testing.B) {
901 benchmarkChanProdCons(b, 100, 0)
904 func BenchmarkChanProdConsWork0(b *testing.B) {
905 benchmarkChanProdCons(b, 0, 100)
908 func BenchmarkChanProdConsWork10(b *testing.B) {
909 benchmarkChanProdCons(b, 10, 100)
912 func BenchmarkChanProdConsWork100(b *testing.B) {
913 benchmarkChanProdCons(b, 100, 100)
916 func BenchmarkSelectProdCons(b *testing.B) {
917 const CallsPerSched = 1000
918 procs := runtime.GOMAXPROCS(-1)
919 N := int32(b.N / CallsPerSched)
920 c := make(chan bool, 2*procs)
921 myc := make(chan int, 128)
922 myclose := make(chan bool)
923 for p := 0; p < procs; p++ {
924 go func() {
925 // Producer: sends to myc.
926 foo := 0
927 // Intended to not fire during benchmarking.
928 mytimer := time.After(time.Hour)
929 for atomic.AddInt32(&N, -1) >= 0 {
930 for g := 0; g < CallsPerSched; g++ {
931 // Model some local work.
932 for i := 0; i < 100; i++ {
933 foo *= 2
934 foo /= 2
936 select {
937 case myc <- 1:
938 case <-mytimer:
939 case <-myclose:
943 myc <- 0
944 c <- foo == 42
946 go func() {
947 // Consumer: receives from myc.
948 foo := 0
949 // Intended to not fire during benchmarking.
950 mytimer := time.After(time.Hour)
951 loop:
952 for {
953 select {
954 case v := <-myc:
955 if v == 0 {
956 break loop
958 case <-mytimer:
959 case <-myclose:
961 // Model some local work.
962 for i := 0; i < 100; i++ {
963 foo *= 2
964 foo /= 2
967 c <- foo == 42
970 for p := 0; p < procs; p++ {
976 func BenchmarkChanCreation(b *testing.B) {
977 b.RunParallel(func(pb *testing.PB) {
978 for pb.Next() {
979 myc := make(chan int, 1)
980 myc <- 0
981 <-myc
986 func BenchmarkChanSem(b *testing.B) {
987 type Empty struct{}
988 myc := make(chan Empty, runtime.GOMAXPROCS(0))
989 b.RunParallel(func(pb *testing.PB) {
990 for pb.Next() {
991 myc <- Empty{}
992 <-myc
997 func BenchmarkChanPopular(b *testing.B) {
998 const n = 1000
999 c := make(chan bool)
1000 var a []chan bool
1001 var wg sync.WaitGroup
1002 wg.Add(n)
1003 for j := 0; j < n; j++ {
1004 d := make(chan bool)
1005 a = append(a, d)
1006 go func() {
1007 for i := 0; i < b.N; i++ {
1008 select {
1009 case <-c:
1010 case <-d:
1013 wg.Done()
1016 for i := 0; i < b.N; i++ {
1017 for _, d := range a {
1018 d <- true
1021 wg.Wait()
1024 var (
1025 alwaysFalse = false
1026 workSink = 0
1029 func localWork(w int) {
1030 foo := 0
1031 for i := 0; i < w; i++ {
1032 foo /= (foo + 1)
1034 if alwaysFalse {
1035 workSink += foo