1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Semaphore implementation exposed to Go.
6 // Intended use is provide a sleep and wakeup
7 // primitive that can be used in the contended case
8 // of other synchronization primitives.
9 // Thus it targets the same goal as Linux's futex,
10 // but it has much simpler semantics.
12 // That is, don't think of these as semaphores.
13 // Think of them as a way to implement sleep and wakeup
14 // such that every sleep is paired with a single wakeup,
15 // even if, due to races, the wakeup happens before the sleep.
17 // See Mullender and Cox, ``Semaphores in Plan 9,''
18 // http://swtch.com/semaphore.pdf
22 // Export temporarily for gccgo's C code to call:
23 //go:linkname semacquire runtime.semacquire
24 //go:linkname semrelease runtime.semrelease
27 "runtime/internal/atomic"
28 "runtime/internal/sys"
32 // Asynchronous semaphore for sync.Mutex.
34 type semaRoot
struct {
38 nwait
uint32 // Number of waiters. Read w/o the lock.
41 // Prime to not correlate with any user patterns.
42 const semTabSize
= 251
44 var semtable
[semTabSize
]struct {
46 pad
[sys
.CacheLineSize
- unsafe
.Sizeof(semaRoot
{})]byte
49 //go:linkname sync_runtime_Semacquire sync.runtime_Semacquire
50 func sync_runtime_Semacquire(addr
*uint32) {
51 semacquire(addr
, true)
54 //go:linkname net_runtime_Semacquire net.runtime_Semacquire
55 func net_runtime_Semacquire(addr
*uint32) {
56 semacquire(addr
, true)
59 //go:linkname sync_runtime_Semrelease sync.runtime_Semrelease
60 func sync_runtime_Semrelease(addr
*uint32) {
64 //go:linkname net_runtime_Semrelease net.runtime_Semrelease
65 func net_runtime_Semrelease(addr
*uint32) {
69 func readyWithTime(s
*sudog
, traceskip
int) {
70 if s
.releasetime
!= 0 {
71 s
.releasetime
= cputicks()
73 goready(s
.g
, traceskip
)
76 // Called from runtime.
77 func semacquire(addr
*uint32, profile
bool) {
80 throw("semacquire not on the G stack")
84 if cansemacquire(addr
) {
89 // increment waiter count
90 // try cansemacquire one more time, return if succeeded
91 // enqueue itself as a waiter
93 // (waiter descriptor is dequeued by signaler)
98 if profile
&& blockprofilerate
> 0 {
104 // Add ourselves to nwait to disable "easy case" in semrelease.
105 atomic
.Xadd(&root
.nwait
, 1)
106 // Check cansemacquire to avoid missed wakeup.
107 if cansemacquire(addr
) {
108 atomic
.Xadd(&root
.nwait
, -1)
112 // Any semrelease after the cansemacquire knows we're waiting
113 // (we set nwait above), so go to sleep.
115 goparkunlock(&root
.lock
, "semacquire", traceEvGoBlockSync
, 4)
116 if cansemacquire(addr
) {
120 if s
.releasetime
> 0 {
121 blockevent(s
.releasetime
-t0
, 3)
126 func semrelease(addr
*uint32) {
127 root
:= semroot(addr
)
130 // Easy case: no waiters?
131 // This check must happen after the xadd, to avoid a missed wakeup
132 // (see loop in semacquire).
133 if atomic
.Load(&root
.nwait
) == 0 {
137 // Harder case: search for a waiter and wake it.
139 if atomic
.Load(&root
.nwait
) == 0 {
140 // The count is already consumed by another goroutine,
141 // so no need to wake up another goroutine.
146 for ; s
!= nil; s
= s
.next
{
147 if s
.elem
== unsafe
.Pointer(addr
) {
148 atomic
.Xadd(&root
.nwait
, -1)
159 func semroot(addr
*uint32) *semaRoot
{
160 return &semtable
[(uintptr(unsafe
.Pointer(addr
))>>3)%semTabSize
].root
163 func cansemacquire(addr
*uint32) bool {
165 v
:= atomic
.Load(addr
)
169 if atomic
.Cas(addr
, v
, v
-1) {
175 func (root
*semaRoot
) queue(addr
*uint32, s
*sudog
) {
177 s
.elem
= unsafe
.Pointer(addr
)
180 if root
.tail
!= nil {
188 func (root
*semaRoot
) dequeue(s
*sudog
) {
204 // notifyList is a ticket-based notification list used to implement sync.Cond.
206 // It must be kept in sync with the sync package.
207 type notifyList
struct {
208 // wait is the ticket number of the next waiter. It is atomically
209 // incremented outside the lock.
212 // notify is the ticket number of the next waiter to be notified. It can
213 // be read outside the lock, but is only written to with lock held.
215 // Both wait & notify can wrap around, and such cases will be correctly
216 // handled as long as their "unwrapped" difference is bounded by 2^31.
217 // For this not to be the case, we'd need to have 2^31+ goroutines
218 // blocked on the same condvar, which is currently not possible.
221 // List of parked waiters.
227 // less checks if a < b, considering a & b running counts that may overflow the
228 // 32-bit range, and that their "unwrapped" difference is always less than 2^31.
229 func less(a
, b
uint32) bool {
230 return int32(a
-b
) < 0
233 // notifyListAdd adds the caller to a notify list such that it can receive
234 // notifications. The caller must eventually call notifyListWait to wait for
235 // such a notification, passing the returned ticket number.
236 //go:linkname notifyListAdd sync.runtime_notifyListAdd
237 func notifyListAdd(l
*notifyList
) uint32 {
238 // This may be called concurrently, for example, when called from
239 // sync.Cond.Wait while holding a RWMutex in read mode.
240 return atomic
.Xadd(&l
.wait
, 1) - 1
243 // notifyListWait waits for a notification. If one has been sent since
244 // notifyListAdd was called, it returns immediately. Otherwise, it blocks.
245 //go:linkname notifyListWait sync.runtime_notifyListWait
246 func notifyListWait(l
*notifyList
, t
uint32) {
249 // Return right away if this ticket has already been notified.
250 if less(t
, l
.notify
) {
261 if blockprofilerate
> 0 {
271 goparkunlock(&l
.lock
, "semacquire", traceEvGoBlockCond
, 3)
273 blockevent(s
.releasetime
-t0
, 2)
278 // notifyListNotifyAll notifies all entries in the list.
279 //go:linkname notifyListNotifyAll sync.runtime_notifyListNotifyAll
280 func notifyListNotifyAll(l
*notifyList
) {
281 // Fast-path: if there are no new waiters since the last notification
282 // we don't need to acquire the lock.
283 if atomic
.Load(&l
.wait
) == atomic
.Load(&l
.notify
) {
287 // Pull the list out into a local variable, waiters will be readied
294 // Update the next ticket to be notified. We can set it to the current
295 // value of wait because any previous waiters are already in the list
296 // or will notice that they have already been notified when trying to
297 // add themselves to the list.
298 atomic
.Store(&l
.notify
, atomic
.Load(&l
.wait
))
301 // Go through the local list and ready all waiters.
310 // notifyListNotifyOne notifies one entry in the list.
311 //go:linkname notifyListNotifyOne sync.runtime_notifyListNotifyOne
312 func notifyListNotifyOne(l
*notifyList
) {
313 // Fast-path: if there are no new waiters since the last notification
314 // we don't need to acquire the lock at all.
315 if atomic
.Load(&l
.wait
) == atomic
.Load(&l
.notify
) {
321 // Re-check under the lock if we need to do anything.
323 if t
== atomic
.Load(&l
.wait
) {
328 // Update the next notify ticket number, and try to find the G that
329 // needs to be notified. If it hasn't made it to the list yet we won't
330 // find it, but it won't park itself once it sees the new notify number.
331 atomic
.Store(&l
.notify
, t
+1)
332 for p
, s
:= (*sudog
)(nil), l
.head
; s
!= nil; p
, s
= s
, s
.next
{
352 //go:linkname notifyListCheck sync.runtime_notifyListCheck
353 func notifyListCheck(sz
uintptr) {
354 if sz
!= unsafe
.Sizeof(notifyList
{}) {
355 print("runtime: bad notifyList size - sync=", sz
, " runtime=", unsafe
.Sizeof(notifyList
{}), "\n")
356 throw("bad notifyList size")