2016-08-05 Vladimir Makarov <vmakarov@redhat.com>
[official-gcc.git] / libgo / go / runtime / chan_test.go
blob4bd061dbc7b9cb242f7cce0f6e0593e27c181d1a
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime_test
7 import (
8 "runtime"
9 "sync"
10 "sync/atomic"
11 "testing"
12 "time"
15 func TestChan(t *testing.T) {
16 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
17 N := 200
18 if testing.Short() {
19 N = 20
21 for chanCap := 0; chanCap < N; chanCap++ {
23 // Ensure that receive from empty chan blocks.
24 c := make(chan int, chanCap)
25 recv1 := false
26 go func() {
27 _ = <-c
28 recv1 = true
29 }()
30 recv2 := false
31 go func() {
32 _, _ = <-c
33 recv2 = true
34 }()
35 time.Sleep(time.Millisecond)
36 if recv1 || recv2 {
37 t.Fatalf("chan[%d]: receive from empty chan", chanCap)
39 // Ensure that non-blocking receive does not block.
40 select {
41 case _ = <-c:
42 t.Fatalf("chan[%d]: receive from empty chan", chanCap)
43 default:
45 select {
46 case _, _ = <-c:
47 t.Fatalf("chan[%d]: receive from empty chan", chanCap)
48 default:
50 c <- 0
51 c <- 0
55 // Ensure that send to full chan blocks.
56 c := make(chan int, chanCap)
57 for i := 0; i < chanCap; i++ {
58 c <- i
60 sent := uint32(0)
61 go func() {
62 c <- 0
63 atomic.StoreUint32(&sent, 1)
64 }()
65 time.Sleep(time.Millisecond)
66 if atomic.LoadUint32(&sent) != 0 {
67 t.Fatalf("chan[%d]: send to full chan", chanCap)
69 // Ensure that non-blocking send does not block.
70 select {
71 case c <- 0:
72 t.Fatalf("chan[%d]: send to full chan", chanCap)
73 default:
75 <-c
79 // Ensure that we receive 0 from closed chan.
80 c := make(chan int, chanCap)
81 for i := 0; i < chanCap; i++ {
82 c <- i
84 close(c)
85 for i := 0; i < chanCap; i++ {
86 v := <-c
87 if v != i {
88 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
91 if v := <-c; v != 0 {
92 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, 0)
94 if v, ok := <-c; v != 0 || ok {
95 t.Fatalf("chan[%d]: received %v/%v, expected %v/%v", chanCap, v, ok, 0, false)
100 // Ensure that close unblocks receive.
101 c := make(chan int, chanCap)
102 done := make(chan bool)
103 go func() {
104 v, ok := <-c
105 done <- v == 0 && ok == false
107 time.Sleep(time.Millisecond)
108 close(c)
109 if !<-done {
110 t.Fatalf("chan[%d]: received non zero from closed chan", chanCap)
115 // Send 100 integers,
116 // ensure that we receive them non-corrupted in FIFO order.
117 c := make(chan int, chanCap)
118 go func() {
119 for i := 0; i < 100; i++ {
120 c <- i
123 for i := 0; i < 100; i++ {
124 v := <-c
125 if v != i {
126 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
130 // Same, but using recv2.
131 go func() {
132 for i := 0; i < 100; i++ {
133 c <- i
136 for i := 0; i < 100; i++ {
137 v, ok := <-c
138 if !ok {
139 t.Fatalf("chan[%d]: receive failed, expected %v", chanCap, i)
141 if v != i {
142 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
146 // Send 1000 integers in 4 goroutines,
147 // ensure that we receive what we send.
148 const P = 4
149 const L = 1000
150 for p := 0; p < P; p++ {
151 go func() {
152 for i := 0; i < L; i++ {
153 c <- i
157 done := make(chan map[int]int)
158 for p := 0; p < P; p++ {
159 go func() {
160 recv := make(map[int]int)
161 for i := 0; i < L; i++ {
162 v := <-c
163 recv[v] = recv[v] + 1
165 done <- recv
168 recv := make(map[int]int)
169 for p := 0; p < P; p++ {
170 for k, v := range <-done {
171 recv[k] = recv[k] + v
174 if len(recv) != L {
175 t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, len(recv), L)
177 for _, v := range recv {
178 if v != P {
179 t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, v, P)
185 // Test len/cap.
186 c := make(chan int, chanCap)
187 if len(c) != 0 || cap(c) != chanCap {
188 t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, 0, chanCap, len(c), cap(c))
190 for i := 0; i < chanCap; i++ {
191 c <- i
193 if len(c) != chanCap || cap(c) != chanCap {
194 t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, chanCap, chanCap, len(c), cap(c))
201 func TestNonblockRecvRace(t *testing.T) {
202 n := 10000
203 if testing.Short() {
204 n = 100
205 } else {
206 if runtime.GOARCH == "s390" {
207 // Test uses too much address space on 31-bit S390.
208 t.Skip("skipping long test on s390")
211 for i := 0; i < n; i++ {
212 c := make(chan int, 1)
213 c <- 1
214 go func() {
215 select {
216 case <-c:
217 default:
218 t.Fatal("chan is not ready")
221 close(c)
226 // This test checks that select acts on the state of the channels at one
227 // moment in the execution, not over a smeared time window.
228 // In the test, one goroutine does:
229 // create c1, c2
230 // make c1 ready for receiving
231 // create second goroutine
232 // make c2 ready for receiving
233 // make c1 no longer ready for receiving (if possible)
234 // The second goroutine does a non-blocking select receiving from c1 and c2.
235 // From the time the second goroutine is created, at least one of c1 and c2
236 // is always ready for receiving, so the select in the second goroutine must
237 // always receive from one or the other. It must never execute the default case.
238 func TestNonblockSelectRace(t *testing.T) {
239 n := 100000
240 if testing.Short() {
241 n = 1000
243 done := make(chan bool, 1)
244 for i := 0; i < n; i++ {
245 c1 := make(chan int, 1)
246 c2 := make(chan int, 1)
247 c1 <- 1
248 go func() {
249 select {
250 case <-c1:
251 case <-c2:
252 default:
253 done <- false
254 return
256 done <- true
258 c2 <- 1
259 select {
260 case <-c1:
261 default:
263 if !<-done {
264 t.Fatal("no chan is ready")
269 // Same as TestNonblockSelectRace, but close(c2) replaces c2 <- 1.
270 func TestNonblockSelectRace2(t *testing.T) {
271 n := 100000
272 if testing.Short() {
273 n = 1000
275 done := make(chan bool, 1)
276 for i := 0; i < n; i++ {
277 c1 := make(chan int, 1)
278 c2 := make(chan int)
279 c1 <- 1
280 go func() {
281 select {
282 case <-c1:
283 case <-c2:
284 default:
285 done <- false
286 return
288 done <- true
290 close(c2)
291 select {
292 case <-c1:
293 default:
295 if !<-done {
296 t.Fatal("no chan is ready")
301 func TestSelfSelect(t *testing.T) {
302 // Ensure that send/recv on the same chan in select
303 // does not crash nor deadlock.
304 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
305 for _, chanCap := range []int{0, 10} {
306 var wg sync.WaitGroup
307 wg.Add(2)
308 c := make(chan int, chanCap)
309 for p := 0; p < 2; p++ {
310 p := p
311 go func() {
312 defer wg.Done()
313 for i := 0; i < 1000; i++ {
314 if p == 0 || i%2 == 0 {
315 select {
316 case c <- p:
317 case v := <-c:
318 if chanCap == 0 && v == p {
319 t.Fatalf("self receive")
322 } else {
323 select {
324 case v := <-c:
325 if chanCap == 0 && v == p {
326 t.Fatalf("self receive")
328 case c <- p:
334 wg.Wait()
338 func TestSelectStress(t *testing.T) {
339 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(10))
340 var c [4]chan int
341 c[0] = make(chan int)
342 c[1] = make(chan int)
343 c[2] = make(chan int, 2)
344 c[3] = make(chan int, 3)
345 N := int(1e5)
346 if testing.Short() {
347 N /= 10
349 // There are 4 goroutines that send N values on each of the chans,
350 // + 4 goroutines that receive N values on each of the chans,
351 // + 1 goroutine that sends N values on each of the chans in a single select,
352 // + 1 goroutine that receives N values on each of the chans in a single select.
353 // All these sends, receives and selects interact chaotically at runtime,
354 // but we are careful that this whole construct does not deadlock.
355 var wg sync.WaitGroup
356 wg.Add(10)
357 for k := 0; k < 4; k++ {
358 k := k
359 go func() {
360 for i := 0; i < N; i++ {
361 c[k] <- 0
363 wg.Done()
365 go func() {
366 for i := 0; i < N; i++ {
367 <-c[k]
369 wg.Done()
372 go func() {
373 var n [4]int
374 c1 := c
375 for i := 0; i < 4*N; i++ {
376 select {
377 case c1[3] <- 0:
378 n[3]++
379 if n[3] == N {
380 c1[3] = nil
382 case c1[2] <- 0:
383 n[2]++
384 if n[2] == N {
385 c1[2] = nil
387 case c1[0] <- 0:
388 n[0]++
389 if n[0] == N {
390 c1[0] = nil
392 case c1[1] <- 0:
393 n[1]++
394 if n[1] == N {
395 c1[1] = nil
399 wg.Done()
401 go func() {
402 var n [4]int
403 c1 := c
404 for i := 0; i < 4*N; i++ {
405 select {
406 case <-c1[0]:
407 n[0]++
408 if n[0] == N {
409 c1[0] = nil
411 case <-c1[1]:
412 n[1]++
413 if n[1] == N {
414 c1[1] = nil
416 case <-c1[2]:
417 n[2]++
418 if n[2] == N {
419 c1[2] = nil
421 case <-c1[3]:
422 n[3]++
423 if n[3] == N {
424 c1[3] = nil
428 wg.Done()
430 wg.Wait()
433 func TestChanSendInterface(t *testing.T) {
434 type mt struct{}
435 m := &mt{}
436 c := make(chan interface{}, 1)
437 c <- m
438 select {
439 case c <- m:
440 default:
442 select {
443 case c <- m:
444 case c <- &mt{}:
445 default:
449 func TestPseudoRandomSend(t *testing.T) {
450 n := 100
451 for _, chanCap := range []int{0, n} {
452 c := make(chan int, chanCap)
453 l := make([]int, n)
454 var m sync.Mutex
455 m.Lock()
456 go func() {
457 for i := 0; i < n; i++ {
458 runtime.Gosched()
459 l[i] = <-c
461 m.Unlock()
463 for i := 0; i < n; i++ {
464 select {
465 case c <- 1:
466 case c <- 0:
469 m.Lock() // wait
470 n0 := 0
471 n1 := 0
472 for _, i := range l {
473 n0 += (i + 1) % 2
474 n1 += i
476 if n0 <= n/10 || n1 <= n/10 {
477 t.Errorf("Want pseudorandom, got %d zeros and %d ones (chan cap %d)", n0, n1, chanCap)
482 func TestMultiConsumer(t *testing.T) {
483 const nwork = 23
484 const niter = 271828
486 pn := []int{2, 3, 7, 11, 13, 17, 19, 23, 27, 31}
488 q := make(chan int, nwork*3)
489 r := make(chan int, nwork*3)
491 // workers
492 var wg sync.WaitGroup
493 for i := 0; i < nwork; i++ {
494 wg.Add(1)
495 go func(w int) {
496 for v := range q {
497 // mess with the fifo-ish nature of range
498 if pn[w%len(pn)] == v {
499 runtime.Gosched()
501 r <- v
503 wg.Done()
504 }(i)
507 // feeder & closer
508 expect := 0
509 go func() {
510 for i := 0; i < niter; i++ {
511 v := pn[i%len(pn)]
512 expect += v
513 q <- v
515 close(q) // no more work
516 wg.Wait() // workers done
517 close(r) // ... so there can be no more results
520 // consume & check
521 n := 0
522 s := 0
523 for v := range r {
525 s += v
527 if n != niter || s != expect {
528 t.Errorf("Expected sum %d (got %d) from %d iter (saw %d)",
529 expect, s, niter, n)
533 func TestShrinkStackDuringBlockedSend(t *testing.T) {
534 // make sure that channel operations still work when we are
535 // blocked on a channel send and we shrink the stack.
536 // NOTE: this test probably won't fail unless stack1.go:stackDebug
537 // is set to >= 1.
538 const n = 10
539 c := make(chan int)
540 done := make(chan struct{})
542 go func() {
543 for i := 0; i < n; i++ {
544 c <- i
545 // use lots of stack, briefly.
546 stackGrowthRecursive(20)
548 done <- struct{}{}
551 for i := 0; i < n; i++ {
552 x := <-c
553 if x != i {
554 t.Errorf("bad channel read: want %d, got %d", i, x)
556 // Waste some time so sender can finish using lots of stack
557 // and block in channel send.
558 time.Sleep(1 * time.Millisecond)
559 // trigger GC which will shrink the stack of the sender.
560 runtime.GC()
562 <-done
565 func TestSelectDuplicateChannel(t *testing.T) {
566 // This test makes sure we can queue a G on
567 // the same channel multiple times.
568 c := make(chan int)
569 d := make(chan int)
570 e := make(chan int)
572 // goroutine A
573 go func() {
574 select {
575 case <-c:
576 case <-c:
577 case <-d:
579 e <- 9
581 time.Sleep(time.Millisecond) // make sure goroutine A gets queued first on c
583 // goroutine B
584 go func() {
587 time.Sleep(time.Millisecond) // make sure goroutine B gets queued on c before continuing
589 d <- 7 // wake up A, it dequeues itself from c. This operation used to corrupt c.recvq.
590 <-e // A tells us it's done
591 c <- 8 // wake up B. This operation used to fail because c.recvq was corrupted (it tries to wake up an already running G instead of B)
594 var selectSink interface{}
596 func TestSelectStackAdjust(t *testing.T) {
597 // Test that channel receive slots that contain local stack
598 // pointers are adjusted correctly by stack shrinking.
599 c := make(chan *int)
600 d := make(chan *int)
601 ready1 := make(chan bool)
602 ready2 := make(chan bool)
604 f := func(ready chan bool, dup bool) {
605 // Temporarily grow the stack to 10K.
606 stackGrowthRecursive((10 << 10) / (128 * 8))
608 // We're ready to trigger GC and stack shrink.
609 ready <- true
611 val := 42
612 var cx *int
613 cx = &val
615 var c2 chan *int
616 var d2 chan *int
617 if dup {
618 c2 = c
619 d2 = d
622 // Receive from d. cx won't be affected.
623 select {
624 case cx = <-c:
625 case <-c2:
626 case <-d:
627 case <-d2:
630 // Check that pointer in cx was adjusted correctly.
631 if cx != &val {
632 t.Error("cx no longer points to val")
633 } else if val != 42 {
634 t.Error("val changed")
635 } else {
636 *cx = 43
637 if val != 43 {
638 t.Error("changing *cx failed to change val")
641 ready <- true
644 go f(ready1, false)
645 go f(ready2, true)
647 // Let the goroutines get into the select.
648 <-ready1
649 <-ready2
650 time.Sleep(10 * time.Millisecond)
652 // Force concurrent GC a few times.
653 var before, after runtime.MemStats
654 runtime.ReadMemStats(&before)
655 for i := 0; i < 100; i++ {
656 selectSink = new([1 << 20]byte)
657 runtime.ReadMemStats(&after)
658 if after.NumGC-before.NumGC >= 2 {
659 goto done
662 t.Fatal("failed to trigger concurrent GC")
663 done:
664 selectSink = nil
666 // Wake selects.
667 close(d)
668 <-ready1
669 <-ready2
672 func BenchmarkChanNonblocking(b *testing.B) {
673 myc := make(chan int)
674 b.RunParallel(func(pb *testing.PB) {
675 for pb.Next() {
676 select {
677 case <-myc:
678 default:
684 func BenchmarkSelectUncontended(b *testing.B) {
685 b.RunParallel(func(pb *testing.PB) {
686 myc1 := make(chan int, 1)
687 myc2 := make(chan int, 1)
688 myc1 <- 0
689 for pb.Next() {
690 select {
691 case <-myc1:
692 myc2 <- 0
693 case <-myc2:
694 myc1 <- 0
700 func BenchmarkSelectSyncContended(b *testing.B) {
701 myc1 := make(chan int)
702 myc2 := make(chan int)
703 myc3 := make(chan int)
704 done := make(chan int)
705 b.RunParallel(func(pb *testing.PB) {
706 go func() {
707 for {
708 select {
709 case myc1 <- 0:
710 case myc2 <- 0:
711 case myc3 <- 0:
712 case <-done:
713 return
717 for pb.Next() {
718 select {
719 case <-myc1:
720 case <-myc2:
721 case <-myc3:
725 close(done)
728 func BenchmarkSelectAsyncContended(b *testing.B) {
729 procs := runtime.GOMAXPROCS(0)
730 myc1 := make(chan int, procs)
731 myc2 := make(chan int, procs)
732 b.RunParallel(func(pb *testing.PB) {
733 myc1 <- 0
734 for pb.Next() {
735 select {
736 case <-myc1:
737 myc2 <- 0
738 case <-myc2:
739 myc1 <- 0
745 func BenchmarkSelectNonblock(b *testing.B) {
746 myc1 := make(chan int)
747 myc2 := make(chan int)
748 myc3 := make(chan int, 1)
749 myc4 := make(chan int, 1)
750 b.RunParallel(func(pb *testing.PB) {
751 for pb.Next() {
752 select {
753 case <-myc1:
754 default:
756 select {
757 case myc2 <- 0:
758 default:
760 select {
761 case <-myc3:
762 default:
764 select {
765 case myc4 <- 0:
766 default:
772 func BenchmarkChanUncontended(b *testing.B) {
773 const C = 100
774 b.RunParallel(func(pb *testing.PB) {
775 myc := make(chan int, C)
776 for pb.Next() {
777 for i := 0; i < C; i++ {
778 myc <- 0
780 for i := 0; i < C; i++ {
781 <-myc
787 func BenchmarkChanContended(b *testing.B) {
788 const C = 100
789 myc := make(chan int, C*runtime.GOMAXPROCS(0))
790 b.RunParallel(func(pb *testing.PB) {
791 for pb.Next() {
792 for i := 0; i < C; i++ {
793 myc <- 0
795 for i := 0; i < C; i++ {
796 <-myc
802 func benchmarkChanSync(b *testing.B, work int) {
803 const CallsPerSched = 1000
804 procs := 2
805 N := int32(b.N / CallsPerSched / procs * procs)
806 c := make(chan bool, procs)
807 myc := make(chan int)
808 for p := 0; p < procs; p++ {
809 go func() {
810 for {
811 i := atomic.AddInt32(&N, -1)
812 if i < 0 {
813 break
815 for g := 0; g < CallsPerSched; g++ {
816 if i%2 == 0 {
817 <-myc
818 localWork(work)
819 myc <- 0
820 localWork(work)
821 } else {
822 myc <- 0
823 localWork(work)
824 <-myc
825 localWork(work)
829 c <- true
832 for p := 0; p < procs; p++ {
837 func BenchmarkChanSync(b *testing.B) {
838 benchmarkChanSync(b, 0)
841 func BenchmarkChanSyncWork(b *testing.B) {
842 benchmarkChanSync(b, 1000)
845 func benchmarkChanProdCons(b *testing.B, chanSize, localWork int) {
846 const CallsPerSched = 1000
847 procs := runtime.GOMAXPROCS(-1)
848 N := int32(b.N / CallsPerSched)
849 c := make(chan bool, 2*procs)
850 myc := make(chan int, chanSize)
851 for p := 0; p < procs; p++ {
852 go func() {
853 foo := 0
854 for atomic.AddInt32(&N, -1) >= 0 {
855 for g := 0; g < CallsPerSched; g++ {
856 for i := 0; i < localWork; i++ {
857 foo *= 2
858 foo /= 2
860 myc <- 1
863 myc <- 0
864 c <- foo == 42
866 go func() {
867 foo := 0
868 for {
869 v := <-myc
870 if v == 0 {
871 break
873 for i := 0; i < localWork; i++ {
874 foo *= 2
875 foo /= 2
878 c <- foo == 42
881 for p := 0; p < procs; p++ {
887 func BenchmarkChanProdCons0(b *testing.B) {
888 benchmarkChanProdCons(b, 0, 0)
891 func BenchmarkChanProdCons10(b *testing.B) {
892 benchmarkChanProdCons(b, 10, 0)
895 func BenchmarkChanProdCons100(b *testing.B) {
896 benchmarkChanProdCons(b, 100, 0)
899 func BenchmarkChanProdConsWork0(b *testing.B) {
900 benchmarkChanProdCons(b, 0, 100)
903 func BenchmarkChanProdConsWork10(b *testing.B) {
904 benchmarkChanProdCons(b, 10, 100)
907 func BenchmarkChanProdConsWork100(b *testing.B) {
908 benchmarkChanProdCons(b, 100, 100)
911 func BenchmarkSelectProdCons(b *testing.B) {
912 const CallsPerSched = 1000
913 procs := runtime.GOMAXPROCS(-1)
914 N := int32(b.N / CallsPerSched)
915 c := make(chan bool, 2*procs)
916 myc := make(chan int, 128)
917 myclose := make(chan bool)
918 for p := 0; p < procs; p++ {
919 go func() {
920 // Producer: sends to myc.
921 foo := 0
922 // Intended to not fire during benchmarking.
923 mytimer := time.After(time.Hour)
924 for atomic.AddInt32(&N, -1) >= 0 {
925 for g := 0; g < CallsPerSched; g++ {
926 // Model some local work.
927 for i := 0; i < 100; i++ {
928 foo *= 2
929 foo /= 2
931 select {
932 case myc <- 1:
933 case <-mytimer:
934 case <-myclose:
938 myc <- 0
939 c <- foo == 42
941 go func() {
942 // Consumer: receives from myc.
943 foo := 0
944 // Intended to not fire during benchmarking.
945 mytimer := time.After(time.Hour)
946 loop:
947 for {
948 select {
949 case v := <-myc:
950 if v == 0 {
951 break loop
953 case <-mytimer:
954 case <-myclose:
956 // Model some local work.
957 for i := 0; i < 100; i++ {
958 foo *= 2
959 foo /= 2
962 c <- foo == 42
965 for p := 0; p < procs; p++ {
971 func BenchmarkChanCreation(b *testing.B) {
972 b.RunParallel(func(pb *testing.PB) {
973 for pb.Next() {
974 myc := make(chan int, 1)
975 myc <- 0
976 <-myc
981 func BenchmarkChanSem(b *testing.B) {
982 type Empty struct{}
983 myc := make(chan Empty, runtime.GOMAXPROCS(0))
984 b.RunParallel(func(pb *testing.PB) {
985 for pb.Next() {
986 myc <- Empty{}
987 <-myc
992 func BenchmarkChanPopular(b *testing.B) {
993 const n = 1000
994 c := make(chan bool)
995 var a []chan bool
996 var wg sync.WaitGroup
997 wg.Add(n)
998 for j := 0; j < n; j++ {
999 d := make(chan bool)
1000 a = append(a, d)
1001 go func() {
1002 for i := 0; i < b.N; i++ {
1003 select {
1004 case <-c:
1005 case <-d:
1008 wg.Done()
1011 for i := 0; i < b.N; i++ {
1012 for _, d := range a {
1013 d <- true
1016 wg.Wait()
1019 var (
1020 alwaysFalse = false
1021 workSink = 0
1024 func localWork(w int) {
1025 foo := 0
1026 for i := 0; i < w; i++ {
1027 foo /= (foo + 1)
1029 if alwaysFalse {
1030 workSink += foo