runtime: copy Go 1.7 runtime semaphore code
[official-gcc.git] / libgo / go / runtime / sema.go
blob855d73ef4f485eea68ac37e87f4c561aa4f6290d
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Semaphore implementation exposed to Go.
6 // Intended use is provide a sleep and wakeup
7 // primitive that can be used in the contended case
8 // of other synchronization primitives.
9 // Thus it targets the same goal as Linux's futex,
10 // but it has much simpler semantics.
12 // That is, don't think of these as semaphores.
13 // Think of them as a way to implement sleep and wakeup
14 // such that every sleep is paired with a single wakeup,
15 // even if, due to races, the wakeup happens before the sleep.
17 // See Mullender and Cox, ``Semaphores in Plan 9,''
18 // http://swtch.com/semaphore.pdf
20 package runtime
22 // Export temporarily for gccgo's C code to call:
23 //go:linkname semacquire runtime.semacquire
24 //go:linkname semrelease runtime.semrelease
26 import (
27 "runtime/internal/atomic"
28 "runtime/internal/sys"
29 "unsafe"
32 // Asynchronous semaphore for sync.Mutex.
34 type semaRoot struct {
35 lock mutex
36 head *sudog
37 tail *sudog
38 nwait uint32 // Number of waiters. Read w/o the lock.
41 // Prime to not correlate with any user patterns.
42 const semTabSize = 251
44 var semtable [semTabSize]struct {
45 root semaRoot
46 pad [sys.CacheLineSize - unsafe.Sizeof(semaRoot{})]byte
49 //go:linkname sync_runtime_Semacquire sync.runtime_Semacquire
50 func sync_runtime_Semacquire(addr *uint32) {
51 semacquire(addr, true)
54 //go:linkname net_runtime_Semacquire net.runtime_Semacquire
55 func net_runtime_Semacquire(addr *uint32) {
56 semacquire(addr, true)
59 //go:linkname sync_runtime_Semrelease sync.runtime_Semrelease
60 func sync_runtime_Semrelease(addr *uint32) {
61 semrelease(addr)
64 //go:linkname net_runtime_Semrelease net.runtime_Semrelease
65 func net_runtime_Semrelease(addr *uint32) {
66 semrelease(addr)
69 func readyWithTime(s *sudog, traceskip int) {
70 if s.releasetime != 0 {
71 s.releasetime = cputicks()
73 goready(s.g, traceskip)
76 // Called from runtime.
77 func semacquire(addr *uint32, profile bool) {
78 gp := getg()
79 if gp != gp.m.curg {
80 throw("semacquire not on the G stack")
83 // Easy case.
84 if cansemacquire(addr) {
85 return
88 // Harder case:
89 // increment waiter count
90 // try cansemacquire one more time, return if succeeded
91 // enqueue itself as a waiter
92 // sleep
93 // (waiter descriptor is dequeued by signaler)
94 s := acquireSudog()
95 root := semroot(addr)
96 t0 := int64(0)
97 s.releasetime = 0
98 if profile && blockprofilerate > 0 {
99 t0 = cputicks()
100 s.releasetime = -1
102 for {
103 lock(&root.lock)
104 // Add ourselves to nwait to disable "easy case" in semrelease.
105 atomic.Xadd(&root.nwait, 1)
106 // Check cansemacquire to avoid missed wakeup.
107 if cansemacquire(addr) {
108 atomic.Xadd(&root.nwait, -1)
109 unlock(&root.lock)
110 break
112 // Any semrelease after the cansemacquire knows we're waiting
113 // (we set nwait above), so go to sleep.
114 root.queue(addr, s)
115 goparkunlock(&root.lock, "semacquire", traceEvGoBlockSync, 4)
116 if cansemacquire(addr) {
117 break
120 if s.releasetime > 0 {
121 blockevent(s.releasetime-t0, 3)
123 releaseSudog(s)
126 func semrelease(addr *uint32) {
127 root := semroot(addr)
128 atomic.Xadd(addr, 1)
130 // Easy case: no waiters?
131 // This check must happen after the xadd, to avoid a missed wakeup
132 // (see loop in semacquire).
133 if atomic.Load(&root.nwait) == 0 {
134 return
137 // Harder case: search for a waiter and wake it.
138 lock(&root.lock)
139 if atomic.Load(&root.nwait) == 0 {
140 // The count is already consumed by another goroutine,
141 // so no need to wake up another goroutine.
142 unlock(&root.lock)
143 return
145 s := root.head
146 for ; s != nil; s = s.next {
147 if s.elem == unsafe.Pointer(addr) {
148 atomic.Xadd(&root.nwait, -1)
149 root.dequeue(s)
150 break
153 unlock(&root.lock)
154 if s != nil {
155 readyWithTime(s, 5)
159 func semroot(addr *uint32) *semaRoot {
160 return &semtable[(uintptr(unsafe.Pointer(addr))>>3)%semTabSize].root
163 func cansemacquire(addr *uint32) bool {
164 for {
165 v := atomic.Load(addr)
166 if v == 0 {
167 return false
169 if atomic.Cas(addr, v, v-1) {
170 return true
175 func (root *semaRoot) queue(addr *uint32, s *sudog) {
176 s.g = getg()
177 s.elem = unsafe.Pointer(addr)
178 s.next = nil
179 s.prev = root.tail
180 if root.tail != nil {
181 root.tail.next = s
182 } else {
183 root.head = s
185 root.tail = s
188 func (root *semaRoot) dequeue(s *sudog) {
189 if s.next != nil {
190 s.next.prev = s.prev
191 } else {
192 root.tail = s.prev
194 if s.prev != nil {
195 s.prev.next = s.next
196 } else {
197 root.head = s.next
199 s.elem = nil
200 s.next = nil
201 s.prev = nil
204 // notifyList is a ticket-based notification list used to implement sync.Cond.
206 // It must be kept in sync with the sync package.
207 type notifyList struct {
208 // wait is the ticket number of the next waiter. It is atomically
209 // incremented outside the lock.
210 wait uint32
212 // notify is the ticket number of the next waiter to be notified. It can
213 // be read outside the lock, but is only written to with lock held.
215 // Both wait & notify can wrap around, and such cases will be correctly
216 // handled as long as their "unwrapped" difference is bounded by 2^31.
217 // For this not to be the case, we'd need to have 2^31+ goroutines
218 // blocked on the same condvar, which is currently not possible.
219 notify uint32
221 // List of parked waiters.
222 lock mutex
223 head *sudog
224 tail *sudog
227 // less checks if a < b, considering a & b running counts that may overflow the
228 // 32-bit range, and that their "unwrapped" difference is always less than 2^31.
229 func less(a, b uint32) bool {
230 return int32(a-b) < 0
233 // notifyListAdd adds the caller to a notify list such that it can receive
234 // notifications. The caller must eventually call notifyListWait to wait for
235 // such a notification, passing the returned ticket number.
236 //go:linkname notifyListAdd sync.runtime_notifyListAdd
237 func notifyListAdd(l *notifyList) uint32 {
238 // This may be called concurrently, for example, when called from
239 // sync.Cond.Wait while holding a RWMutex in read mode.
240 return atomic.Xadd(&l.wait, 1) - 1
243 // notifyListWait waits for a notification. If one has been sent since
244 // notifyListAdd was called, it returns immediately. Otherwise, it blocks.
245 //go:linkname notifyListWait sync.runtime_notifyListWait
246 func notifyListWait(l *notifyList, t uint32) {
247 lock(&l.lock)
249 // Return right away if this ticket has already been notified.
250 if less(t, l.notify) {
251 unlock(&l.lock)
252 return
255 // Enqueue itself.
256 s := acquireSudog()
257 s.g = getg()
258 s.ticket = t
259 s.releasetime = 0
260 t0 := int64(0)
261 if blockprofilerate > 0 {
262 t0 = cputicks()
263 s.releasetime = -1
265 if l.tail == nil {
266 l.head = s
267 } else {
268 l.tail.next = s
270 l.tail = s
271 goparkunlock(&l.lock, "semacquire", traceEvGoBlockCond, 3)
272 if t0 != 0 {
273 blockevent(s.releasetime-t0, 2)
275 releaseSudog(s)
278 // notifyListNotifyAll notifies all entries in the list.
279 //go:linkname notifyListNotifyAll sync.runtime_notifyListNotifyAll
280 func notifyListNotifyAll(l *notifyList) {
281 // Fast-path: if there are no new waiters since the last notification
282 // we don't need to acquire the lock.
283 if atomic.Load(&l.wait) == atomic.Load(&l.notify) {
284 return
287 // Pull the list out into a local variable, waiters will be readied
288 // outside the lock.
289 lock(&l.lock)
290 s := l.head
291 l.head = nil
292 l.tail = nil
294 // Update the next ticket to be notified. We can set it to the current
295 // value of wait because any previous waiters are already in the list
296 // or will notice that they have already been notified when trying to
297 // add themselves to the list.
298 atomic.Store(&l.notify, atomic.Load(&l.wait))
299 unlock(&l.lock)
301 // Go through the local list and ready all waiters.
302 for s != nil {
303 next := s.next
304 s.next = nil
305 readyWithTime(s, 4)
306 s = next
310 // notifyListNotifyOne notifies one entry in the list.
311 //go:linkname notifyListNotifyOne sync.runtime_notifyListNotifyOne
312 func notifyListNotifyOne(l *notifyList) {
313 // Fast-path: if there are no new waiters since the last notification
314 // we don't need to acquire the lock at all.
315 if atomic.Load(&l.wait) == atomic.Load(&l.notify) {
316 return
319 lock(&l.lock)
321 // Re-check under the lock if we need to do anything.
322 t := l.notify
323 if t == atomic.Load(&l.wait) {
324 unlock(&l.lock)
325 return
328 // Update the next notify ticket number, and try to find the G that
329 // needs to be notified. If it hasn't made it to the list yet we won't
330 // find it, but it won't park itself once it sees the new notify number.
331 atomic.Store(&l.notify, t+1)
332 for p, s := (*sudog)(nil), l.head; s != nil; p, s = s, s.next {
333 if s.ticket == t {
334 n := s.next
335 if p != nil {
336 p.next = n
337 } else {
338 l.head = n
340 if n == nil {
341 l.tail = p
343 unlock(&l.lock)
344 s.next = nil
345 readyWithTime(s, 4)
346 return
349 unlock(&l.lock)
352 //go:linkname notifyListCheck sync.runtime_notifyListCheck
353 func notifyListCheck(sz uintptr) {
354 if sz != unsafe.Sizeof(notifyList{}) {
355 print("runtime: bad notifyList size - sync=", sz, " runtime=", unsafe.Sizeof(notifyList{}), "\n")
356 throw("bad notifyList size")