syscall: RawSockaddr fix for ppc64, ppc64le
[official-gcc.git] / libgo / go / sync / waitgroup.go
blob92cc57d2cc87eaf9490203c3e3a45d5ca39081a6
1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package sync
7 import (
8 "sync/atomic"
9 "unsafe"
12 // A WaitGroup waits for a collection of goroutines to finish.
13 // The main goroutine calls Add to set the number of
14 // goroutines to wait for. Then each of the goroutines
15 // runs and calls Done when finished. At the same time,
16 // Wait can be used to block until all goroutines have finished.
17 type WaitGroup struct {
18 m Mutex
19 counter int32
20 waiters int32
21 sema *uint32
24 // WaitGroup creates a new semaphore each time the old semaphore
25 // is released. This is to avoid the following race:
27 // G1: Add(1)
28 // G1: go G2()
29 // G1: Wait() // Context switch after Unlock() and before Semacquire().
30 // G2: Done() // Release semaphore: sema == 1, waiters == 0. G1 doesn't run yet.
31 // G3: Wait() // Finds counter == 0, waiters == 0, doesn't block.
32 // G3: Add(1) // Makes counter == 1, waiters == 0.
33 // G3: go G4()
34 // G3: Wait() // G1 still hasn't run, G3 finds sema == 1, unblocked! Bug.
36 // Add adds delta, which may be negative, to the WaitGroup counter.
37 // If the counter becomes zero, all goroutines blocked on Wait are released.
38 // If the counter goes negative, Add panics.
40 // Note that calls with a positive delta that occur when the counter is zero
41 // must happen before a Wait. Calls with a negative delta, or calls with a
42 // positive delta that start when the counter is greater than zero, may happen
43 // at any time.
44 // Typically this means the calls to Add should execute before the statement
45 // creating the goroutine or other event to be waited for.
46 // See the WaitGroup example.
47 func (wg *WaitGroup) Add(delta int) {
48 if raceenabled {
49 _ = wg.m.state // trigger nil deref early
50 if delta < 0 {
51 // Synchronize decrements with Wait.
52 raceReleaseMerge(unsafe.Pointer(wg))
54 raceDisable()
55 defer raceEnable()
57 v := atomic.AddInt32(&wg.counter, int32(delta))
58 if raceenabled {
59 if delta > 0 && v == int32(delta) {
60 // The first increment must be synchronized with Wait.
61 // Need to model this as a read, because there can be
62 // several concurrent wg.counter transitions from 0.
63 raceRead(unsafe.Pointer(&wg.sema))
66 if v < 0 {
67 panic("sync: negative WaitGroup counter")
69 if v > 0 || atomic.LoadInt32(&wg.waiters) == 0 {
70 return
72 wg.m.Lock()
73 if atomic.LoadInt32(&wg.counter) == 0 {
74 for i := int32(0); i < wg.waiters; i++ {
75 runtime_Semrelease(wg.sema)
77 wg.waiters = 0
78 wg.sema = nil
80 wg.m.Unlock()
83 // Done decrements the WaitGroup counter.
84 func (wg *WaitGroup) Done() {
85 wg.Add(-1)
88 // Wait blocks until the WaitGroup counter is zero.
89 func (wg *WaitGroup) Wait() {
90 if raceenabled {
91 _ = wg.m.state // trigger nil deref early
92 raceDisable()
94 if atomic.LoadInt32(&wg.counter) == 0 {
95 if raceenabled {
96 raceEnable()
97 raceAcquire(unsafe.Pointer(wg))
99 return
101 wg.m.Lock()
102 w := atomic.AddInt32(&wg.waiters, 1)
103 // This code is racing with the unlocked path in Add above.
104 // The code above modifies counter and then reads waiters.
105 // We must modify waiters and then read counter (the opposite order)
106 // to avoid missing an Add.
107 if atomic.LoadInt32(&wg.counter) == 0 {
108 atomic.AddInt32(&wg.waiters, -1)
109 if raceenabled {
110 raceEnable()
111 raceAcquire(unsafe.Pointer(wg))
112 raceDisable()
114 wg.m.Unlock()
115 if raceenabled {
116 raceEnable()
118 return
120 if raceenabled && w == 1 {
121 // Wait must be synchronized with the first Add.
122 // Need to model this is as a write to race with the read in Add.
123 // As a consequence, can do the write only for the first waiter,
124 // otherwise concurrent Waits will race with each other.
125 raceWrite(unsafe.Pointer(&wg.sema))
127 if wg.sema == nil {
128 wg.sema = new(uint32)
130 s := wg.sema
131 wg.m.Unlock()
132 runtime_Semacquire(s)
133 if raceenabled {
134 raceEnable()
135 raceAcquire(unsafe.Pointer(wg))