runtime: scan register backing store on ia64
[official-gcc.git] / libgo / go / runtime / chan_test.go
blob29fb321c926b7d2314b11ac3b21202c3e51ebc9e
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime_test
7 import (
8 "internal/testenv"
9 "math"
10 "runtime"
11 "sync"
12 "sync/atomic"
13 "testing"
14 "time"
17 func TestChan(t *testing.T) {
18 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
19 N := 200
20 if testing.Short() {
21 N = 20
23 for chanCap := 0; chanCap < N; chanCap++ {
25 // Ensure that receive from empty chan blocks.
26 c := make(chan int, chanCap)
27 recv1 := false
28 go func() {
29 _ = <-c
30 recv1 = true
31 }()
32 recv2 := false
33 go func() {
34 _, _ = <-c
35 recv2 = true
36 }()
37 time.Sleep(time.Millisecond)
38 if recv1 || recv2 {
39 t.Fatalf("chan[%d]: receive from empty chan", chanCap)
41 // Ensure that non-blocking receive does not block.
42 select {
43 case _ = <-c:
44 t.Fatalf("chan[%d]: receive from empty chan", chanCap)
45 default:
47 select {
48 case _, _ = <-c:
49 t.Fatalf("chan[%d]: receive from empty chan", chanCap)
50 default:
52 c <- 0
53 c <- 0
57 // Ensure that send to full chan blocks.
58 c := make(chan int, chanCap)
59 for i := 0; i < chanCap; i++ {
60 c <- i
62 sent := uint32(0)
63 go func() {
64 c <- 0
65 atomic.StoreUint32(&sent, 1)
66 }()
67 time.Sleep(time.Millisecond)
68 if atomic.LoadUint32(&sent) != 0 {
69 t.Fatalf("chan[%d]: send to full chan", chanCap)
71 // Ensure that non-blocking send does not block.
72 select {
73 case c <- 0:
74 t.Fatalf("chan[%d]: send to full chan", chanCap)
75 default:
77 <-c
81 // Ensure that we receive 0 from closed chan.
82 c := make(chan int, chanCap)
83 for i := 0; i < chanCap; i++ {
84 c <- i
86 close(c)
87 for i := 0; i < chanCap; i++ {
88 v := <-c
89 if v != i {
90 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
93 if v := <-c; v != 0 {
94 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, 0)
96 if v, ok := <-c; v != 0 || ok {
97 t.Fatalf("chan[%d]: received %v/%v, expected %v/%v", chanCap, v, ok, 0, false)
102 // Ensure that close unblocks receive.
103 c := make(chan int, chanCap)
104 done := make(chan bool)
105 go func() {
106 v, ok := <-c
107 done <- v == 0 && ok == false
109 time.Sleep(time.Millisecond)
110 close(c)
111 if !<-done {
112 t.Fatalf("chan[%d]: received non zero from closed chan", chanCap)
117 // Send 100 integers,
118 // ensure that we receive them non-corrupted in FIFO order.
119 c := make(chan int, chanCap)
120 go func() {
121 for i := 0; i < 100; i++ {
122 c <- i
125 for i := 0; i < 100; i++ {
126 v := <-c
127 if v != i {
128 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
132 // Same, but using recv2.
133 go func() {
134 for i := 0; i < 100; i++ {
135 c <- i
138 for i := 0; i < 100; i++ {
139 v, ok := <-c
140 if !ok {
141 t.Fatalf("chan[%d]: receive failed, expected %v", chanCap, i)
143 if v != i {
144 t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
148 // Send 1000 integers in 4 goroutines,
149 // ensure that we receive what we send.
150 const P = 4
151 const L = 1000
152 for p := 0; p < P; p++ {
153 go func() {
154 for i := 0; i < L; i++ {
155 c <- i
159 done := make(chan map[int]int)
160 for p := 0; p < P; p++ {
161 go func() {
162 recv := make(map[int]int)
163 for i := 0; i < L; i++ {
164 v := <-c
165 recv[v] = recv[v] + 1
167 done <- recv
170 recv := make(map[int]int)
171 for p := 0; p < P; p++ {
172 for k, v := range <-done {
173 recv[k] = recv[k] + v
176 if len(recv) != L {
177 t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, len(recv), L)
179 for _, v := range recv {
180 if v != P {
181 t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, v, P)
187 // Test len/cap.
188 c := make(chan int, chanCap)
189 if len(c) != 0 || cap(c) != chanCap {
190 t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, 0, chanCap, len(c), cap(c))
192 for i := 0; i < chanCap; i++ {
193 c <- i
195 if len(c) != chanCap || cap(c) != chanCap {
196 t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, chanCap, chanCap, len(c), cap(c))
203 func TestNonblockRecvRace(t *testing.T) {
204 n := 10000
205 if testing.Short() {
206 n = 100
207 } else {
208 if runtime.GOARCH == "s390" {
209 // Test uses too much address space on 31-bit S390.
210 t.Skip("skipping long test on s390")
213 for i := 0; i < n; i++ {
214 c := make(chan int, 1)
215 c <- 1
216 go func() {
217 select {
218 case <-c:
219 default:
220 t.Error("chan is not ready")
223 close(c)
225 if t.Failed() {
226 return
231 // This test checks that select acts on the state of the channels at one
232 // moment in the execution, not over a smeared time window.
233 // In the test, one goroutine does:
234 // create c1, c2
235 // make c1 ready for receiving
236 // create second goroutine
237 // make c2 ready for receiving
238 // make c1 no longer ready for receiving (if possible)
239 // The second goroutine does a non-blocking select receiving from c1 and c2.
240 // From the time the second goroutine is created, at least one of c1 and c2
241 // is always ready for receiving, so the select in the second goroutine must
242 // always receive from one or the other. It must never execute the default case.
243 func TestNonblockSelectRace(t *testing.T) {
244 n := 100000
245 if testing.Short() {
246 n = 1000
248 done := make(chan bool, 1)
249 for i := 0; i < n; i++ {
250 c1 := make(chan int, 1)
251 c2 := make(chan int, 1)
252 c1 <- 1
253 go func() {
254 select {
255 case <-c1:
256 case <-c2:
257 default:
258 done <- false
259 return
261 done <- true
263 c2 <- 1
264 select {
265 case <-c1:
266 default:
268 if !<-done {
269 t.Fatal("no chan is ready")
274 // Same as TestNonblockSelectRace, but close(c2) replaces c2 <- 1.
275 func TestNonblockSelectRace2(t *testing.T) {
276 n := 100000
277 if testing.Short() {
278 n = 1000
280 done := make(chan bool, 1)
281 for i := 0; i < n; i++ {
282 c1 := make(chan int, 1)
283 c2 := make(chan int)
284 c1 <- 1
285 go func() {
286 select {
287 case <-c1:
288 case <-c2:
289 default:
290 done <- false
291 return
293 done <- true
295 close(c2)
296 select {
297 case <-c1:
298 default:
300 if !<-done {
301 t.Fatal("no chan is ready")
306 func TestSelfSelect(t *testing.T) {
307 // Ensure that send/recv on the same chan in select
308 // does not crash nor deadlock.
309 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
310 for _, chanCap := range []int{0, 10} {
311 var wg sync.WaitGroup
312 wg.Add(2)
313 c := make(chan int, chanCap)
314 for p := 0; p < 2; p++ {
315 p := p
316 go func() {
317 defer wg.Done()
318 for i := 0; i < 1000; i++ {
319 if p == 0 || i%2 == 0 {
320 select {
321 case c <- p:
322 case v := <-c:
323 if chanCap == 0 && v == p {
324 t.Errorf("self receive")
325 return
328 } else {
329 select {
330 case v := <-c:
331 if chanCap == 0 && v == p {
332 t.Errorf("self receive")
333 return
335 case c <- p:
341 wg.Wait()
345 func TestSelectStress(t *testing.T) {
346 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(10))
347 var c [4]chan int
348 c[0] = make(chan int)
349 c[1] = make(chan int)
350 c[2] = make(chan int, 2)
351 c[3] = make(chan int, 3)
352 N := int(1e5)
353 if testing.Short() {
354 N /= 10
356 // There are 4 goroutines that send N values on each of the chans,
357 // + 4 goroutines that receive N values on each of the chans,
358 // + 1 goroutine that sends N values on each of the chans in a single select,
359 // + 1 goroutine that receives N values on each of the chans in a single select.
360 // All these sends, receives and selects interact chaotically at runtime,
361 // but we are careful that this whole construct does not deadlock.
362 var wg sync.WaitGroup
363 wg.Add(10)
364 for k := 0; k < 4; k++ {
365 k := k
366 go func() {
367 for i := 0; i < N; i++ {
368 c[k] <- 0
370 wg.Done()
372 go func() {
373 for i := 0; i < N; i++ {
374 <-c[k]
376 wg.Done()
379 go func() {
380 var n [4]int
381 c1 := c
382 for i := 0; i < 4*N; i++ {
383 select {
384 case c1[3] <- 0:
385 n[3]++
386 if n[3] == N {
387 c1[3] = nil
389 case c1[2] <- 0:
390 n[2]++
391 if n[2] == N {
392 c1[2] = nil
394 case c1[0] <- 0:
395 n[0]++
396 if n[0] == N {
397 c1[0] = nil
399 case c1[1] <- 0:
400 n[1]++
401 if n[1] == N {
402 c1[1] = nil
406 wg.Done()
408 go func() {
409 var n [4]int
410 c1 := c
411 for i := 0; i < 4*N; i++ {
412 select {
413 case <-c1[0]:
414 n[0]++
415 if n[0] == N {
416 c1[0] = nil
418 case <-c1[1]:
419 n[1]++
420 if n[1] == N {
421 c1[1] = nil
423 case <-c1[2]:
424 n[2]++
425 if n[2] == N {
426 c1[2] = nil
428 case <-c1[3]:
429 n[3]++
430 if n[3] == N {
431 c1[3] = nil
435 wg.Done()
437 wg.Wait()
440 func TestSelectFairness(t *testing.T) {
441 const trials = 10000
442 if runtime.GOOS == "linux" && runtime.GOARCH == "ppc64le" {
443 testenv.SkipFlaky(t, 22047)
445 c1 := make(chan byte, trials+1)
446 c2 := make(chan byte, trials+1)
447 for i := 0; i < trials+1; i++ {
448 c1 <- 1
449 c2 <- 2
451 c3 := make(chan byte)
452 c4 := make(chan byte)
453 out := make(chan byte)
454 done := make(chan byte)
455 var wg sync.WaitGroup
456 wg.Add(1)
457 go func() {
458 defer wg.Done()
459 for {
460 var b byte
461 select {
462 case b = <-c3:
463 case b = <-c4:
464 case b = <-c1:
465 case b = <-c2:
467 select {
468 case out <- b:
469 case <-done:
470 return
474 cnt1, cnt2 := 0, 0
475 for i := 0; i < trials; i++ {
476 switch b := <-out; b {
477 case 1:
478 cnt1++
479 case 2:
480 cnt2++
481 default:
482 t.Fatalf("unexpected value %d on channel", b)
485 // If the select in the goroutine is fair,
486 // cnt1 and cnt2 should be about the same value.
487 // With 10,000 trials, the expected margin of error at
488 // a confidence level of five nines is 4.4172 / (2 * Sqrt(10000)).
489 r := float64(cnt1) / trials
490 e := math.Abs(r - 0.5)
491 t.Log(cnt1, cnt2, r, e)
492 if e > 4.4172/(2*math.Sqrt(trials)) {
493 t.Errorf("unfair select: in %d trials, results were %d, %d", trials, cnt1, cnt2)
495 close(done)
496 wg.Wait()
499 func TestChanSendInterface(t *testing.T) {
500 type mt struct{}
501 m := &mt{}
502 c := make(chan interface{}, 1)
503 c <- m
504 select {
505 case c <- m:
506 default:
508 select {
509 case c <- m:
510 case c <- &mt{}:
511 default:
515 func TestPseudoRandomSend(t *testing.T) {
516 n := 100
517 for _, chanCap := range []int{0, n} {
518 c := make(chan int, chanCap)
519 l := make([]int, n)
520 var m sync.Mutex
521 m.Lock()
522 go func() {
523 for i := 0; i < n; i++ {
524 runtime.Gosched()
525 l[i] = <-c
527 m.Unlock()
529 for i := 0; i < n; i++ {
530 select {
531 case c <- 1:
532 case c <- 0:
535 m.Lock() // wait
536 n0 := 0
537 n1 := 0
538 for _, i := range l {
539 n0 += (i + 1) % 2
540 n1 += i
542 if n0 <= n/10 || n1 <= n/10 {
543 t.Errorf("Want pseudorandom, got %d zeros and %d ones (chan cap %d)", n0, n1, chanCap)
548 func TestMultiConsumer(t *testing.T) {
549 const nwork = 23
550 const niter = 271828
552 pn := []int{2, 3, 7, 11, 13, 17, 19, 23, 27, 31}
554 q := make(chan int, nwork*3)
555 r := make(chan int, nwork*3)
557 // workers
558 var wg sync.WaitGroup
559 for i := 0; i < nwork; i++ {
560 wg.Add(1)
561 go func(w int) {
562 for v := range q {
563 // mess with the fifo-ish nature of range
564 if pn[w%len(pn)] == v {
565 runtime.Gosched()
567 r <- v
569 wg.Done()
570 }(i)
573 // feeder & closer
574 expect := 0
575 go func() {
576 for i := 0; i < niter; i++ {
577 v := pn[i%len(pn)]
578 expect += v
579 q <- v
581 close(q) // no more work
582 wg.Wait() // workers done
583 close(r) // ... so there can be no more results
586 // consume & check
587 n := 0
588 s := 0
589 for v := range r {
591 s += v
593 if n != niter || s != expect {
594 t.Errorf("Expected sum %d (got %d) from %d iter (saw %d)",
595 expect, s, niter, n)
599 func TestShrinkStackDuringBlockedSend(t *testing.T) {
600 // make sure that channel operations still work when we are
601 // blocked on a channel send and we shrink the stack.
602 // NOTE: this test probably won't fail unless stack1.go:stackDebug
603 // is set to >= 1.
604 const n = 10
605 c := make(chan int)
606 done := make(chan struct{})
608 go func() {
609 for i := 0; i < n; i++ {
610 c <- i
611 // use lots of stack, briefly.
612 stackGrowthRecursive(20)
614 done <- struct{}{}
617 for i := 0; i < n; i++ {
618 x := <-c
619 if x != i {
620 t.Errorf("bad channel read: want %d, got %d", i, x)
622 // Waste some time so sender can finish using lots of stack
623 // and block in channel send.
624 time.Sleep(1 * time.Millisecond)
625 // trigger GC which will shrink the stack of the sender.
626 runtime.GC()
628 <-done
631 func TestSelectDuplicateChannel(t *testing.T) {
632 // This test makes sure we can queue a G on
633 // the same channel multiple times.
634 c := make(chan int)
635 d := make(chan int)
636 e := make(chan int)
638 // goroutine A
639 go func() {
640 select {
641 case <-c:
642 case <-c:
643 case <-d:
645 e <- 9
647 time.Sleep(time.Millisecond) // make sure goroutine A gets queued first on c
649 // goroutine B
650 go func() {
653 time.Sleep(time.Millisecond) // make sure goroutine B gets queued on c before continuing
655 d <- 7 // wake up A, it dequeues itself from c. This operation used to corrupt c.recvq.
656 <-e // A tells us it's done
657 c <- 8 // wake up B. This operation used to fail because c.recvq was corrupted (it tries to wake up an already running G instead of B)
660 var selectSink interface{}
662 func TestSelectStackAdjust(t *testing.T) {
663 // Test that channel receive slots that contain local stack
664 // pointers are adjusted correctly by stack shrinking.
665 c := make(chan *int)
666 d := make(chan *int)
667 ready1 := make(chan bool)
668 ready2 := make(chan bool)
670 f := func(ready chan bool, dup bool) {
671 // Temporarily grow the stack to 10K.
672 stackGrowthRecursive((10 << 10) / (128 * 8))
674 // We're ready to trigger GC and stack shrink.
675 ready <- true
677 val := 42
678 var cx *int
679 cx = &val
681 var c2 chan *int
682 var d2 chan *int
683 if dup {
684 c2 = c
685 d2 = d
688 // Receive from d. cx won't be affected.
689 select {
690 case cx = <-c:
691 case <-c2:
692 case <-d:
693 case <-d2:
696 // Check that pointer in cx was adjusted correctly.
697 if cx != &val {
698 t.Error("cx no longer points to val")
699 } else if val != 42 {
700 t.Error("val changed")
701 } else {
702 *cx = 43
703 if val != 43 {
704 t.Error("changing *cx failed to change val")
707 ready <- true
710 go f(ready1, false)
711 go f(ready2, true)
713 // Let the goroutines get into the select.
714 <-ready1
715 <-ready2
716 time.Sleep(10 * time.Millisecond)
718 // Force concurrent GC a few times.
719 var before, after runtime.MemStats
720 runtime.ReadMemStats(&before)
721 for i := 0; i < 100; i++ {
722 selectSink = new([1 << 20]byte)
723 runtime.ReadMemStats(&after)
724 if after.NumGC-before.NumGC >= 2 {
725 goto done
728 t.Fatal("failed to trigger concurrent GC")
729 done:
730 selectSink = nil
732 // Wake selects.
733 close(d)
734 <-ready1
735 <-ready2
738 type struct0 struct{}
740 func BenchmarkMakeChan(b *testing.B) {
741 b.Run("Byte", func(b *testing.B) {
742 var x chan byte
743 for i := 0; i < b.N; i++ {
744 x = make(chan byte, 8)
746 close(x)
748 b.Run("Int", func(b *testing.B) {
749 var x chan int
750 for i := 0; i < b.N; i++ {
751 x = make(chan int, 8)
753 close(x)
755 b.Run("Ptr", func(b *testing.B) {
756 var x chan *byte
757 for i := 0; i < b.N; i++ {
758 x = make(chan *byte, 8)
760 close(x)
762 b.Run("Struct", func(b *testing.B) {
763 b.Run("0", func(b *testing.B) {
764 var x chan struct0
765 for i := 0; i < b.N; i++ {
766 x = make(chan struct0, 8)
768 close(x)
770 b.Run("32", func(b *testing.B) {
771 var x chan struct32
772 for i := 0; i < b.N; i++ {
773 x = make(chan struct32, 8)
775 close(x)
777 b.Run("40", func(b *testing.B) {
778 var x chan struct40
779 for i := 0; i < b.N; i++ {
780 x = make(chan struct40, 8)
782 close(x)
787 func BenchmarkChanNonblocking(b *testing.B) {
788 myc := make(chan int)
789 b.RunParallel(func(pb *testing.PB) {
790 for pb.Next() {
791 select {
792 case <-myc:
793 default:
799 func BenchmarkSelectUncontended(b *testing.B) {
800 b.RunParallel(func(pb *testing.PB) {
801 myc1 := make(chan int, 1)
802 myc2 := make(chan int, 1)
803 myc1 <- 0
804 for pb.Next() {
805 select {
806 case <-myc1:
807 myc2 <- 0
808 case <-myc2:
809 myc1 <- 0
815 func BenchmarkSelectSyncContended(b *testing.B) {
816 myc1 := make(chan int)
817 myc2 := make(chan int)
818 myc3 := make(chan int)
819 done := make(chan int)
820 b.RunParallel(func(pb *testing.PB) {
821 go func() {
822 for {
823 select {
824 case myc1 <- 0:
825 case myc2 <- 0:
826 case myc3 <- 0:
827 case <-done:
828 return
832 for pb.Next() {
833 select {
834 case <-myc1:
835 case <-myc2:
836 case <-myc3:
840 close(done)
843 func BenchmarkSelectAsyncContended(b *testing.B) {
844 procs := runtime.GOMAXPROCS(0)
845 myc1 := make(chan int, procs)
846 myc2 := make(chan int, procs)
847 b.RunParallel(func(pb *testing.PB) {
848 myc1 <- 0
849 for pb.Next() {
850 select {
851 case <-myc1:
852 myc2 <- 0
853 case <-myc2:
854 myc1 <- 0
860 func BenchmarkSelectNonblock(b *testing.B) {
861 myc1 := make(chan int)
862 myc2 := make(chan int)
863 myc3 := make(chan int, 1)
864 myc4 := make(chan int, 1)
865 b.RunParallel(func(pb *testing.PB) {
866 for pb.Next() {
867 select {
868 case <-myc1:
869 default:
871 select {
872 case myc2 <- 0:
873 default:
875 select {
876 case <-myc3:
877 default:
879 select {
880 case myc4 <- 0:
881 default:
887 func BenchmarkChanUncontended(b *testing.B) {
888 const C = 100
889 b.RunParallel(func(pb *testing.PB) {
890 myc := make(chan int, C)
891 for pb.Next() {
892 for i := 0; i < C; i++ {
893 myc <- 0
895 for i := 0; i < C; i++ {
896 <-myc
902 func BenchmarkChanContended(b *testing.B) {
903 const C = 100
904 myc := make(chan int, C*runtime.GOMAXPROCS(0))
905 b.RunParallel(func(pb *testing.PB) {
906 for pb.Next() {
907 for i := 0; i < C; i++ {
908 myc <- 0
910 for i := 0; i < C; i++ {
911 <-myc
917 func benchmarkChanSync(b *testing.B, work int) {
918 const CallsPerSched = 1000
919 procs := 2
920 N := int32(b.N / CallsPerSched / procs * procs)
921 c := make(chan bool, procs)
922 myc := make(chan int)
923 for p := 0; p < procs; p++ {
924 go func() {
925 for {
926 i := atomic.AddInt32(&N, -1)
927 if i < 0 {
928 break
930 for g := 0; g < CallsPerSched; g++ {
931 if i%2 == 0 {
932 <-myc
933 localWork(work)
934 myc <- 0
935 localWork(work)
936 } else {
937 myc <- 0
938 localWork(work)
939 <-myc
940 localWork(work)
944 c <- true
947 for p := 0; p < procs; p++ {
952 func BenchmarkChanSync(b *testing.B) {
953 benchmarkChanSync(b, 0)
956 func BenchmarkChanSyncWork(b *testing.B) {
957 benchmarkChanSync(b, 1000)
960 func benchmarkChanProdCons(b *testing.B, chanSize, localWork int) {
961 const CallsPerSched = 1000
962 procs := runtime.GOMAXPROCS(-1)
963 N := int32(b.N / CallsPerSched)
964 c := make(chan bool, 2*procs)
965 myc := make(chan int, chanSize)
966 for p := 0; p < procs; p++ {
967 go func() {
968 foo := 0
969 for atomic.AddInt32(&N, -1) >= 0 {
970 for g := 0; g < CallsPerSched; g++ {
971 for i := 0; i < localWork; i++ {
972 foo *= 2
973 foo /= 2
975 myc <- 1
978 myc <- 0
979 c <- foo == 42
981 go func() {
982 foo := 0
983 for {
984 v := <-myc
985 if v == 0 {
986 break
988 for i := 0; i < localWork; i++ {
989 foo *= 2
990 foo /= 2
993 c <- foo == 42
996 for p := 0; p < procs; p++ {
1002 func BenchmarkChanProdCons0(b *testing.B) {
1003 benchmarkChanProdCons(b, 0, 0)
1006 func BenchmarkChanProdCons10(b *testing.B) {
1007 benchmarkChanProdCons(b, 10, 0)
1010 func BenchmarkChanProdCons100(b *testing.B) {
1011 benchmarkChanProdCons(b, 100, 0)
1014 func BenchmarkChanProdConsWork0(b *testing.B) {
1015 benchmarkChanProdCons(b, 0, 100)
1018 func BenchmarkChanProdConsWork10(b *testing.B) {
1019 benchmarkChanProdCons(b, 10, 100)
1022 func BenchmarkChanProdConsWork100(b *testing.B) {
1023 benchmarkChanProdCons(b, 100, 100)
1026 func BenchmarkSelectProdCons(b *testing.B) {
1027 const CallsPerSched = 1000
1028 procs := runtime.GOMAXPROCS(-1)
1029 N := int32(b.N / CallsPerSched)
1030 c := make(chan bool, 2*procs)
1031 myc := make(chan int, 128)
1032 myclose := make(chan bool)
1033 for p := 0; p < procs; p++ {
1034 go func() {
1035 // Producer: sends to myc.
1036 foo := 0
1037 // Intended to not fire during benchmarking.
1038 mytimer := time.After(time.Hour)
1039 for atomic.AddInt32(&N, -1) >= 0 {
1040 for g := 0; g < CallsPerSched; g++ {
1041 // Model some local work.
1042 for i := 0; i < 100; i++ {
1043 foo *= 2
1044 foo /= 2
1046 select {
1047 case myc <- 1:
1048 case <-mytimer:
1049 case <-myclose:
1053 myc <- 0
1054 c <- foo == 42
1056 go func() {
1057 // Consumer: receives from myc.
1058 foo := 0
1059 // Intended to not fire during benchmarking.
1060 mytimer := time.After(time.Hour)
1061 loop:
1062 for {
1063 select {
1064 case v := <-myc:
1065 if v == 0 {
1066 break loop
1068 case <-mytimer:
1069 case <-myclose:
1071 // Model some local work.
1072 for i := 0; i < 100; i++ {
1073 foo *= 2
1074 foo /= 2
1077 c <- foo == 42
1080 for p := 0; p < procs; p++ {
1086 func BenchmarkChanCreation(b *testing.B) {
1087 b.RunParallel(func(pb *testing.PB) {
1088 for pb.Next() {
1089 myc := make(chan int, 1)
1090 myc <- 0
1091 <-myc
1096 func BenchmarkChanSem(b *testing.B) {
1097 type Empty struct{}
1098 myc := make(chan Empty, runtime.GOMAXPROCS(0))
1099 b.RunParallel(func(pb *testing.PB) {
1100 for pb.Next() {
1101 myc <- Empty{}
1102 <-myc
1107 func BenchmarkChanPopular(b *testing.B) {
1108 const n = 1000
1109 c := make(chan bool)
1110 var a []chan bool
1111 var wg sync.WaitGroup
1112 wg.Add(n)
1113 for j := 0; j < n; j++ {
1114 d := make(chan bool)
1115 a = append(a, d)
1116 go func() {
1117 for i := 0; i < b.N; i++ {
1118 select {
1119 case <-c:
1120 case <-d:
1123 wg.Done()
1126 for i := 0; i < b.N; i++ {
1127 for _, d := range a {
1128 d <- true
1131 wg.Wait()
1134 var (
1135 alwaysFalse = false
1136 workSink = 0
1139 func localWork(w int) {
1140 foo := 0
1141 for i := 0; i < w; i++ {
1142 foo /= (foo + 1)
1144 if alwaysFalse {
1145 workSink += foo