1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
15 func TestChan(t
*testing
.T
) {
16 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(4))
21 for chanCap
:= 0; chanCap
< N
; chanCap
++ {
23 // Ensure that receive from empty chan blocks.
24 c
:= make(chan int, chanCap
)
35 time
.Sleep(time
.Millisecond
)
37 t
.Fatalf("chan[%d]: receive from empty chan", chanCap
)
39 // Ensure that non-blocking receive does not block.
42 t
.Fatalf("chan[%d]: receive from empty chan", chanCap
)
47 t
.Fatalf("chan[%d]: receive from empty chan", chanCap
)
55 // Ensure that send to full chan blocks.
56 c
:= make(chan int, chanCap
)
57 for i
:= 0; i
< chanCap
; i
++ {
63 atomic
.StoreUint32(&sent
, 1)
65 time
.Sleep(time
.Millisecond
)
66 if atomic
.LoadUint32(&sent
) != 0 {
67 t
.Fatalf("chan[%d]: send to full chan", chanCap
)
69 // Ensure that non-blocking send does not block.
72 t
.Fatalf("chan[%d]: send to full chan", chanCap
)
79 // Ensure that we receive 0 from closed chan.
80 c
:= make(chan int, chanCap
)
81 for i
:= 0; i
< chanCap
; i
++ {
85 for i
:= 0; i
< chanCap
; i
++ {
88 t
.Fatalf("chan[%d]: received %v, expected %v", chanCap
, v
, i
)
92 t
.Fatalf("chan[%d]: received %v, expected %v", chanCap
, v
, 0)
94 if v
, ok
:= <-c
; v
!= 0 || ok
{
95 t
.Fatalf("chan[%d]: received %v/%v, expected %v/%v", chanCap
, v
, ok
, 0, false)
100 // Ensure that close unblocks receive.
101 c
:= make(chan int, chanCap
)
102 done
:= make(chan bool)
105 done
<- v
== 0 && ok
== false
107 time
.Sleep(time
.Millisecond
)
110 t
.Fatalf("chan[%d]: received non zero from closed chan", chanCap
)
115 // Send 100 integers,
116 // ensure that we receive them non-corrupted in FIFO order.
117 c
:= make(chan int, chanCap
)
119 for i
:= 0; i
< 100; i
++ {
123 for i
:= 0; i
< 100; i
++ {
126 t
.Fatalf("chan[%d]: received %v, expected %v", chanCap
, v
, i
)
130 // Same, but using recv2.
132 for i
:= 0; i
< 100; i
++ {
136 for i
:= 0; i
< 100; i
++ {
139 t
.Fatalf("chan[%d]: receive failed, expected %v", chanCap
, i
)
142 t
.Fatalf("chan[%d]: received %v, expected %v", chanCap
, v
, i
)
146 // Send 1000 integers in 4 goroutines,
147 // ensure that we receive what we send.
150 for p
:= 0; p
< P
; p
++ {
152 for i
:= 0; i
< L
; i
++ {
157 done
:= make(chan map[int]int)
158 for p
:= 0; p
< P
; p
++ {
160 recv
:= make(map[int]int)
161 for i
:= 0; i
< L
; i
++ {
163 recv
[v
] = recv
[v
] + 1
168 recv
:= make(map[int]int)
169 for p
:= 0; p
< P
; p
++ {
170 for k
, v
:= range <-done
{
171 recv
[k
] = recv
[k
] + v
175 t
.Fatalf("chan[%d]: received %v values, expected %v", chanCap
, len(recv
), L
)
177 for _
, v
:= range recv
{
179 t
.Fatalf("chan[%d]: received %v values, expected %v", chanCap
, v
, P
)
186 c
:= make(chan int, chanCap
)
187 if len(c
) != 0 ||
cap(c
) != chanCap
{
188 t
.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap
, 0, chanCap
, len(c
), cap(c
))
190 for i
:= 0; i
< chanCap
; i
++ {
193 if len(c
) != chanCap ||
cap(c
) != chanCap
{
194 t
.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap
, chanCap
, chanCap
, len(c
), cap(c
))
201 func TestNonblockRecvRace(t
*testing
.T
) {
206 if runtime
.GOARCH
== "s390" {
207 // Test uses too much address space on 31-bit S390.
208 t
.Skip("skipping long test on s390")
211 for i
:= 0; i
< n
; i
++ {
212 c
:= make(chan int, 1)
218 t
.Fatal("chan is not ready")
226 // This test checks that select acts on the state of the channels at one
227 // moment in the execution, not over a smeared time window.
228 // In the test, one goroutine does:
230 // make c1 ready for receiving
231 // create second goroutine
232 // make c2 ready for receiving
233 // make c1 no longer ready for receiving (if possible)
234 // The second goroutine does a non-blocking select receiving from c1 and c2.
235 // From the time the second goroutine is created, at least one of c1 and c2
236 // is always ready for receiving, so the select in the second goroutine must
237 // always receive from one or the other. It must never execute the default case.
238 func TestNonblockSelectRace(t
*testing
.T
) {
243 done
:= make(chan bool, 1)
244 for i
:= 0; i
< n
; i
++ {
245 c1
:= make(chan int, 1)
246 c2
:= make(chan int, 1)
264 t
.Fatal("no chan is ready")
269 // Same as TestNonblockSelectRace, but close(c2) replaces c2 <- 1.
270 func TestNonblockSelectRace2(t
*testing
.T
) {
275 done
:= make(chan bool, 1)
276 for i
:= 0; i
< n
; i
++ {
277 c1
:= make(chan int, 1)
296 t
.Fatal("no chan is ready")
301 func TestSelfSelect(t
*testing
.T
) {
302 // Ensure that send/recv on the same chan in select
303 // does not crash nor deadlock.
304 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(2))
305 for _
, chanCap
:= range []int{0, 10} {
306 var wg sync
.WaitGroup
308 c
:= make(chan int, chanCap
)
309 for p
:= 0; p
< 2; p
++ {
313 for i
:= 0; i
< 1000; i
++ {
314 if p
== 0 || i%2
== 0 {
318 if chanCap
== 0 && v
== p
{
319 t
.Fatalf("self receive")
325 if chanCap
== 0 && v
== p
{
326 t
.Fatalf("self receive")
338 func TestSelectStress(t
*testing
.T
) {
339 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(10))
341 c
[0] = make(chan int)
342 c
[1] = make(chan int)
343 c
[2] = make(chan int, 2)
344 c
[3] = make(chan int, 3)
349 // There are 4 goroutines that send N values on each of the chans,
350 // + 4 goroutines that receive N values on each of the chans,
351 // + 1 goroutine that sends N values on each of the chans in a single select,
352 // + 1 goroutine that receives N values on each of the chans in a single select.
353 // All these sends, receives and selects interact chaotically at runtime,
354 // but we are careful that this whole construct does not deadlock.
355 var wg sync
.WaitGroup
357 for k
:= 0; k
< 4; k
++ {
360 for i
:= 0; i
< N
; i
++ {
366 for i
:= 0; i
< N
; i
++ {
375 for i
:= 0; i
< 4*N
; i
++ {
404 for i
:= 0; i
< 4*N
; i
++ {
433 func TestChanSendInterface(t
*testing
.T
) {
436 c
:= make(chan interface{}, 1)
449 func TestPseudoRandomSend(t
*testing
.T
) {
451 for _
, chanCap
:= range []int{0, n
} {
452 c
:= make(chan int, chanCap
)
457 for i
:= 0; i
< n
; i
++ {
463 for i
:= 0; i
< n
; i
++ {
472 for _
, i
:= range l
{
476 if n0
<= n
/10 || n1
<= n
/10 {
477 t
.Errorf("Want pseudorandom, got %d zeros and %d ones (chan cap %d)", n0
, n1
, chanCap
)
482 func TestMultiConsumer(t
*testing
.T
) {
486 pn
:= []int{2, 3, 7, 11, 13, 17, 19, 23, 27, 31}
488 q
:= make(chan int, nwork
*3)
489 r
:= make(chan int, nwork
*3)
492 var wg sync
.WaitGroup
493 for i
:= 0; i
< nwork
; i
++ {
497 // mess with the fifo-ish nature of range
498 if pn
[w%len
(pn
)] == v
{
510 for i
:= 0; i
< niter
; i
++ {
515 close(q
) // no more work
516 wg
.Wait() // workers done
517 close(r
) // ... so there can be no more results
527 if n
!= niter || s
!= expect
{
528 t
.Errorf("Expected sum %d (got %d) from %d iter (saw %d)",
533 func TestShrinkStackDuringBlockedSend(t
*testing
.T
) {
534 // make sure that channel operations still work when we are
535 // blocked on a channel send and we shrink the stack.
536 // NOTE: this test probably won't fail unless stack1.go:stackDebug
540 done
:= make(chan struct{})
543 for i
:= 0; i
< n
; i
++ {
545 // use lots of stack, briefly.
546 stackGrowthRecursive(20)
551 for i
:= 0; i
< n
; i
++ {
554 t
.Errorf("bad channel read: want %d, got %d", i
, x
)
556 // Waste some time so sender can finish using lots of stack
557 // and block in channel send.
558 time
.Sleep(1 * time
.Millisecond
)
559 // trigger GC which will shrink the stack of the sender.
565 func TestSelectDuplicateChannel(t
*testing
.T
) {
566 // This test makes sure we can queue a G on
567 // the same channel multiple times.
581 time
.Sleep(time
.Millisecond
) // make sure goroutine A gets queued first on c
587 time
.Sleep(time
.Millisecond
) // make sure goroutine B gets queued on c before continuing
589 d
<- 7 // wake up A, it dequeues itself from c. This operation used to corrupt c.recvq.
590 <-e
// A tells us it's done
591 c
<- 8 // wake up B. This operation used to fail because c.recvq was corrupted (it tries to wake up an already running G instead of B)
594 var selectSink
interface{}
596 func TestSelectStackAdjust(t
*testing
.T
) {
597 // Test that channel receive slots that contain local stack
598 // pointers are adjusted correctly by stack shrinking.
601 ready1
:= make(chan bool)
602 ready2
:= make(chan bool)
604 f
:= func(ready
chan bool, dup
bool) {
605 // Temporarily grow the stack to 10K.
606 stackGrowthRecursive((10 << 10) / (128 * 8))
608 // We're ready to trigger GC and stack shrink.
622 // Receive from d. cx won't be affected.
630 // Check that pointer in cx was adjusted correctly.
632 t
.Error("cx no longer points to val")
633 } else if val
!= 42 {
634 t
.Error("val changed")
638 t
.Error("changing *cx failed to change val")
647 // Let the goroutines get into the select.
650 time
.Sleep(10 * time
.Millisecond
)
652 // Force concurrent GC a few times.
653 var before
, after runtime
.MemStats
654 runtime
.ReadMemStats(&before
)
655 for i
:= 0; i
< 100; i
++ {
656 selectSink
= new([1 << 20]byte)
657 runtime
.ReadMemStats(&after
)
658 if after
.NumGC
-before
.NumGC
>= 2 {
662 t
.Fatal("failed to trigger concurrent GC")
672 func BenchmarkChanNonblocking(b
*testing
.B
) {
673 myc
:= make(chan int)
674 b
.RunParallel(func(pb
*testing
.PB
) {
684 func BenchmarkSelectUncontended(b
*testing
.B
) {
685 b
.RunParallel(func(pb
*testing
.PB
) {
686 myc1
:= make(chan int, 1)
687 myc2
:= make(chan int, 1)
700 func BenchmarkSelectSyncContended(b
*testing
.B
) {
701 myc1
:= make(chan int)
702 myc2
:= make(chan int)
703 myc3
:= make(chan int)
704 done
:= make(chan int)
705 b
.RunParallel(func(pb
*testing
.PB
) {
728 func BenchmarkSelectAsyncContended(b
*testing
.B
) {
729 procs
:= runtime
.GOMAXPROCS(0)
730 myc1
:= make(chan int, procs
)
731 myc2
:= make(chan int, procs
)
732 b
.RunParallel(func(pb
*testing
.PB
) {
745 func BenchmarkSelectNonblock(b
*testing
.B
) {
746 myc1
:= make(chan int)
747 myc2
:= make(chan int)
748 myc3
:= make(chan int, 1)
749 myc4
:= make(chan int, 1)
750 b
.RunParallel(func(pb
*testing
.PB
) {
772 func BenchmarkChanUncontended(b
*testing
.B
) {
774 b
.RunParallel(func(pb
*testing
.PB
) {
775 myc
:= make(chan int, C
)
777 for i
:= 0; i
< C
; i
++ {
780 for i
:= 0; i
< C
; i
++ {
787 func BenchmarkChanContended(b
*testing
.B
) {
789 myc
:= make(chan int, C
*runtime
.GOMAXPROCS(0))
790 b
.RunParallel(func(pb
*testing
.PB
) {
792 for i
:= 0; i
< C
; i
++ {
795 for i
:= 0; i
< C
; i
++ {
802 func benchmarkChanSync(b
*testing
.B
, work
int) {
803 const CallsPerSched
= 1000
805 N
:= int32(b
.N
/ CallsPerSched
/ procs
* procs
)
806 c
:= make(chan bool, procs
)
807 myc
:= make(chan int)
808 for p
:= 0; p
< procs
; p
++ {
811 i
:= atomic
.AddInt32(&N
, -1)
815 for g
:= 0; g
< CallsPerSched
; g
++ {
832 for p
:= 0; p
< procs
; p
++ {
837 func BenchmarkChanSync(b
*testing
.B
) {
838 benchmarkChanSync(b
, 0)
841 func BenchmarkChanSyncWork(b
*testing
.B
) {
842 benchmarkChanSync(b
, 1000)
845 func benchmarkChanProdCons(b
*testing
.B
, chanSize
, localWork
int) {
846 const CallsPerSched
= 1000
847 procs
:= runtime
.GOMAXPROCS(-1)
848 N
:= int32(b
.N
/ CallsPerSched
)
849 c
:= make(chan bool, 2*procs
)
850 myc
:= make(chan int, chanSize
)
851 for p
:= 0; p
< procs
; p
++ {
854 for atomic
.AddInt32(&N
, -1) >= 0 {
855 for g
:= 0; g
< CallsPerSched
; g
++ {
856 for i
:= 0; i
< localWork
; i
++ {
873 for i
:= 0; i
< localWork
; i
++ {
881 for p
:= 0; p
< procs
; p
++ {
887 func BenchmarkChanProdCons0(b
*testing
.B
) {
888 benchmarkChanProdCons(b
, 0, 0)
891 func BenchmarkChanProdCons10(b
*testing
.B
) {
892 benchmarkChanProdCons(b
, 10, 0)
895 func BenchmarkChanProdCons100(b
*testing
.B
) {
896 benchmarkChanProdCons(b
, 100, 0)
899 func BenchmarkChanProdConsWork0(b
*testing
.B
) {
900 benchmarkChanProdCons(b
, 0, 100)
903 func BenchmarkChanProdConsWork10(b
*testing
.B
) {
904 benchmarkChanProdCons(b
, 10, 100)
907 func BenchmarkChanProdConsWork100(b
*testing
.B
) {
908 benchmarkChanProdCons(b
, 100, 100)
911 func BenchmarkSelectProdCons(b
*testing
.B
) {
912 const CallsPerSched
= 1000
913 procs
:= runtime
.GOMAXPROCS(-1)
914 N
:= int32(b
.N
/ CallsPerSched
)
915 c
:= make(chan bool, 2*procs
)
916 myc
:= make(chan int, 128)
917 myclose
:= make(chan bool)
918 for p
:= 0; p
< procs
; p
++ {
920 // Producer: sends to myc.
922 // Intended to not fire during benchmarking.
923 mytimer
:= time
.After(time
.Hour
)
924 for atomic
.AddInt32(&N
, -1) >= 0 {
925 for g
:= 0; g
< CallsPerSched
; g
++ {
926 // Model some local work.
927 for i
:= 0; i
< 100; i
++ {
942 // Consumer: receives from myc.
944 // Intended to not fire during benchmarking.
945 mytimer
:= time
.After(time
.Hour
)
956 // Model some local work.
957 for i
:= 0; i
< 100; i
++ {
965 for p
:= 0; p
< procs
; p
++ {
971 func BenchmarkChanCreation(b
*testing
.B
) {
972 b
.RunParallel(func(pb
*testing
.PB
) {
974 myc
:= make(chan int, 1)
981 func BenchmarkChanSem(b
*testing
.B
) {
983 myc
:= make(chan Empty
, runtime
.GOMAXPROCS(0))
984 b
.RunParallel(func(pb
*testing
.PB
) {
992 func BenchmarkChanPopular(b
*testing
.B
) {
996 var wg sync
.WaitGroup
998 for j
:= 0; j
< n
; j
++ {
1002 for i
:= 0; i
< b
.N
; i
++ {
1011 for i
:= 0; i
< b
.N
; i
++ {
1012 for _
, d
:= range a
{
1024 func localWork(w
int) {
1026 for i
:= 0; i
< w
; i
++ {