* reload1.c (eliminate_regs_1): Call gen_rtx_raw_SUBREG for SUBREGs
[official-gcc.git] / libgo / go / sync / waitgroup_test.go
blob8ec34fd343bee1332934b676c9b33a598b9c6afa
1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package sync_test
7 import (
8 "internal/race"
9 "runtime"
10 . "sync"
11 "sync/atomic"
12 "testing"
15 func testWaitGroup(t *testing.T, wg1 *WaitGroup, wg2 *WaitGroup) {
16 n := 16
17 wg1.Add(n)
18 wg2.Add(n)
19 exited := make(chan bool, n)
20 for i := 0; i != n; i++ {
21 go func(i int) {
22 wg1.Done()
23 wg2.Wait()
24 exited <- true
25 }(i)
27 wg1.Wait()
28 for i := 0; i != n; i++ {
29 select {
30 case <-exited:
31 t.Fatal("WaitGroup released group too soon")
32 default:
34 wg2.Done()
36 for i := 0; i != n; i++ {
37 <-exited // Will block if barrier fails to unlock someone.
41 func TestWaitGroup(t *testing.T) {
42 wg1 := &WaitGroup{}
43 wg2 := &WaitGroup{}
45 // Run the same test a few times to ensure barrier is in a proper state.
46 for i := 0; i != 8; i++ {
47 testWaitGroup(t, wg1, wg2)
51 func knownRacy(t *testing.T) {
52 if race.Enabled {
53 t.Skip("skipping known-racy test under the race detector")
57 func TestWaitGroupMisuse(t *testing.T) {
58 defer func() {
59 err := recover()
60 if err != "sync: negative WaitGroup counter" {
61 t.Fatalf("Unexpected panic: %#v", err)
63 }()
64 wg := &WaitGroup{}
65 wg.Add(1)
66 wg.Done()
67 wg.Done()
68 t.Fatal("Should panic")
71 func TestWaitGroupMisuse2(t *testing.T) {
72 knownRacy(t)
73 if testing.Short() {
74 t.Skip("skipping flaky test in short mode; see issue 11443")
76 if runtime.NumCPU() <= 2 {
77 t.Skip("NumCPU<=2, skipping: this test requires parallelism")
79 defer func() {
80 err := recover()
81 if err != "sync: negative WaitGroup counter" &&
82 err != "sync: WaitGroup misuse: Add called concurrently with Wait" &&
83 err != "sync: WaitGroup is reused before previous Wait has returned" {
84 t.Fatalf("Unexpected panic: %#v", err)
86 }()
87 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
88 done := make(chan interface{}, 2)
89 // The detection is opportunistically, so we want it to panic
90 // at least in one run out of a million.
91 for i := 0; i < 1e6; i++ {
92 var wg WaitGroup
93 wg.Add(1)
94 go func() {
95 defer func() {
96 done <- recover()
97 }()
98 wg.Wait()
99 }()
100 go func() {
101 defer func() {
102 done <- recover()
104 wg.Add(1) // This is the bad guy.
105 wg.Done()
107 wg.Done()
108 for j := 0; j < 2; j++ {
109 if err := <-done; err != nil {
110 panic(err)
114 t.Fatal("Should panic")
117 func TestWaitGroupMisuse3(t *testing.T) {
118 knownRacy(t)
119 if runtime.NumCPU() <= 1 {
120 t.Skip("NumCPU==1, skipping: this test requires parallelism")
122 defer func() {
123 err := recover()
124 if err != "sync: negative WaitGroup counter" &&
125 err != "sync: WaitGroup misuse: Add called concurrently with Wait" &&
126 err != "sync: WaitGroup is reused before previous Wait has returned" {
127 t.Fatalf("Unexpected panic: %#v", err)
130 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
131 done := make(chan interface{}, 2)
132 // The detection is opportunistically, so we want it to panic
133 // at least in one run out of a million.
134 for i := 0; i < 1e6; i++ {
135 var wg WaitGroup
136 wg.Add(1)
137 go func() {
138 defer func() {
139 done <- recover()
141 wg.Done()
143 go func() {
144 defer func() {
145 done <- recover()
147 wg.Wait()
148 // Start reusing the wg before waiting for the Wait below to return.
149 wg.Add(1)
150 go func() {
151 wg.Done()
153 wg.Wait()
155 wg.Wait()
156 for j := 0; j < 2; j++ {
157 if err := <-done; err != nil {
158 panic(err)
162 t.Fatal("Should panic")
165 func TestWaitGroupRace(t *testing.T) {
166 // Run this test for about 1ms.
167 for i := 0; i < 1000; i++ {
168 wg := &WaitGroup{}
169 n := new(int32)
170 // spawn goroutine 1
171 wg.Add(1)
172 go func() {
173 atomic.AddInt32(n, 1)
174 wg.Done()
176 // spawn goroutine 2
177 wg.Add(1)
178 go func() {
179 atomic.AddInt32(n, 1)
180 wg.Done()
182 // Wait for goroutine 1 and 2
183 wg.Wait()
184 if atomic.LoadInt32(n) != 2 {
185 t.Fatal("Spurious wakeup from Wait")
190 func TestWaitGroupAlign(t *testing.T) {
191 type X struct {
192 x byte
193 wg WaitGroup
195 var x X
196 x.wg.Add(1)
197 go func(x *X) {
198 x.wg.Done()
199 }(&x)
200 x.wg.Wait()
203 func BenchmarkWaitGroupUncontended(b *testing.B) {
204 type PaddedWaitGroup struct {
205 WaitGroup
206 pad [128]uint8
208 b.RunParallel(func(pb *testing.PB) {
209 var wg PaddedWaitGroup
210 for pb.Next() {
211 wg.Add(1)
212 wg.Done()
213 wg.Wait()
218 func benchmarkWaitGroupAddDone(b *testing.B, localWork int) {
219 var wg WaitGroup
220 b.RunParallel(func(pb *testing.PB) {
221 foo := 0
222 for pb.Next() {
223 wg.Add(1)
224 for i := 0; i < localWork; i++ {
225 foo *= 2
226 foo /= 2
228 wg.Done()
230 _ = foo
234 func BenchmarkWaitGroupAddDone(b *testing.B) {
235 benchmarkWaitGroupAddDone(b, 0)
238 func BenchmarkWaitGroupAddDoneWork(b *testing.B) {
239 benchmarkWaitGroupAddDone(b, 100)
242 func benchmarkWaitGroupWait(b *testing.B, localWork int) {
243 var wg WaitGroup
244 b.RunParallel(func(pb *testing.PB) {
245 foo := 0
246 for pb.Next() {
247 wg.Wait()
248 for i := 0; i < localWork; i++ {
249 foo *= 2
250 foo /= 2
253 _ = foo
257 func BenchmarkWaitGroupWait(b *testing.B) {
258 benchmarkWaitGroupWait(b, 0)
261 func BenchmarkWaitGroupWaitWork(b *testing.B) {
262 benchmarkWaitGroupWait(b, 100)
265 func BenchmarkWaitGroupActuallyWait(b *testing.B) {
266 b.ReportAllocs()
267 b.RunParallel(func(pb *testing.PB) {
268 for pb.Next() {
269 var wg WaitGroup
270 wg.Add(1)
271 go func() {
272 wg.Done()
274 wg.Wait()