1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Semaphore implementation exposed to Go.
6 // Intended use is provide a sleep and wakeup
7 // primitive that can be used in the contended case
8 // of other synchronization primitives.
9 // Thus it targets the same goal as Linux's futex,
10 // but it has much simpler semantics.
12 // That is, don't think of these as semaphores.
13 // Think of them as a way to implement sleep and wakeup
14 // such that every sleep is paired with a single wakeup,
15 // even if, due to races, the wakeup happens before the sleep.
17 // See Mullender and Cox, ``Semaphores in Plan 9,''
18 // http://swtch.com/semaphore.pdf
24 typedef struct SemaWaiter SemaWaiter;
27 uint32 volatile* addr;
30 int32 nrelease; // -1 for acquire
35 typedef struct SemaRoot SemaRoot;
41 // Number of waiters. Read w/o the lock.
42 uint32 volatile nwait;
45 // Prime to not correlate with any user patterns.
46 #define SEMTABLESZ 251
51 uint8 pad[CacheLineSize-sizeof(SemaRoot)];
53 static struct semtable semtable[SEMTABLESZ];
56 semroot(uint32 volatile *addr)
58 return &semtable[((uintptr)addr >> 3) % SEMTABLESZ];
62 semqueue(SemaRoot *root, uint32 volatile *addr, SemaWaiter *s)
76 semdequeue(SemaRoot *root, SemaWaiter *s)
79 s->next->prev = s->prev;
83 s->prev->next = s->next;
91 cansemacquire(uint32 volatile *addr)
95 while((v = runtime_atomicload(addr)) > 0)
96 if(runtime_cas(addr, v, v-1))
101 static void readyWithTime(SudoG* s, int traceskip __attribute__ ((unused))) {
102 if (s->releasetime != 0) {
103 s->releasetime = runtime_cputicks();
109 runtime_semacquire(uint32 volatile *addr, bool profile)
111 SemaWaiter s; // Needs to be allocated on stack, otherwise garbage collector could deallocate it
116 if(cansemacquire(addr))
120 // increment waiter count
121 // try cansemacquire one more time, return if succeeded
122 // enqueue itself as a waiter
124 // (waiter descriptor is dequeued by signaler)
125 root = semroot(addr);
128 if(profile && runtime_blockprofilerate > 0) {
129 t0 = runtime_cputicks();
135 // Add ourselves to nwait to disable "easy case" in semrelease.
136 runtime_xadd(&root->nwait, 1);
137 // Check cansemacquire to avoid missed wakeup.
138 if(cansemacquire(addr)) {
139 runtime_xadd(&root->nwait, -1);
140 runtime_unlock(root);
143 // Any semrelease after the cansemacquire knows we're waiting
144 // (we set nwait above), so go to sleep.
145 semqueue(root, addr, &s);
146 runtime_parkunlock(root, "semacquire");
147 if(cansemacquire(addr)) {
149 runtime_blockevent(s.releasetime - t0, 3);
156 runtime_semrelease(uint32 volatile *addr)
161 root = semroot(addr);
162 runtime_xadd(addr, 1);
164 // Easy case: no waiters?
165 // This check must happen after the xadd, to avoid a missed wakeup
166 // (see loop in semacquire).
167 if(runtime_atomicload(&root->nwait) == 0)
170 // Harder case: search for a waiter and wake it.
172 if(runtime_atomicload(&root->nwait) == 0) {
173 // The count is already consumed by another goroutine,
174 // so no need to wake up another goroutine.
175 runtime_unlock(root);
178 for(s = root->head; s; s = s->next) {
179 if(s->addr == addr) {
180 runtime_xadd(&root->nwait, -1);
185 runtime_unlock(root);
188 s->releasetime = runtime_cputicks();
193 // TODO(dvyukov): move to netpoll.goc once it's used by all OSes.
194 void net_runtime_Semacquire(uint32 *addr)
195 __asm__ (GOSYM_PREFIX "net.runtime_Semacquire");
197 void net_runtime_Semacquire(uint32 *addr)
199 runtime_semacquire(addr, true);
202 void net_runtime_Semrelease(uint32 *addr)
203 __asm__ (GOSYM_PREFIX "net.runtime_Semrelease");
205 void net_runtime_Semrelease(uint32 *addr)
207 runtime_semrelease(addr);
210 func runtime_Semacquire(addr *uint32) {
211 runtime_semacquire(addr, true);
214 func runtime_Semrelease(addr *uint32) {
215 runtime_semrelease(addr);
218 typedef struct SyncSema SyncSema;
226 func runtime_Syncsemcheck(size uintptr) {
227 if(size != sizeof(SyncSema)) {
228 runtime_printf("bad SyncSema size: sync:%D runtime:%D\n", (int64)size, (int64)sizeof(SyncSema));
229 runtime_throw("bad SyncSema size");
233 // Syncsemacquire waits for a pairing Syncsemrelease on the same semaphore s.
234 func runtime_Syncsemacquire(s *SyncSema) {
243 if(runtime_blockprofilerate > 0) {
244 t0 = runtime_cputicks();
249 if(s->head && s->head->nrelease > 0) {
250 // have pending release, consume it
253 if(s->head->nrelease == 0) {
255 s->head = wake->next;
261 runtime_ready(wake->g);
269 runtime_parkunlock(s, "semacquire");
271 runtime_blockevent(w.releasetime - t0, 2);
275 // Syncsemrelease waits for n pairing Syncsemacquire on the same semaphore s.
276 func runtime_Syncsemrelease(s *SyncSema, n uint32) {
280 w.nrelease = (int32)n;
285 while(w.nrelease > 0 && s->head && s->head->nrelease < 0) {
286 // have pending acquire, satisfy it
288 s->head = wake->next;
291 if(wake->releasetime)
292 wake->releasetime = runtime_cputicks();
293 runtime_ready(wake->g);
303 runtime_parkunlock(s, "semarelease");
308 // notifyList is a ticket-based notification list used to implement sync.Cond.
310 // It must be kept in sync with the sync package.
312 // wait is the ticket number of the next waiter. It is atomically
313 // incremented outside the lock.
316 // notify is the ticket number of the next waiter to be notified. It can
317 // be read outside the lock, but is only written to with lock held.
319 // Both wait & notify can wrap around, and such cases will be correctly
320 // handled as long as their "unwrapped" difference is bounded by 2^31.
321 // For this not to be the case, we'd need to have 2^31+ goroutines
322 // blocked on the same condvar, which is currently not possible.
325 // List of parked waiters.
331 // less checks if a < b, considering a & b running counts that may overflow the
332 // 32-bit range, and that their "unwrapped" difference is always less than 2^31.
333 static bool less(uint32 a, uint32 b) {
334 return (int32)(a-b) < 0;
337 // notifyListAdd adds the caller to a notify list such that it can receive
338 // notifications. The caller must eventually call notifyListWait to wait for
339 // such a notification, passing the returned ticket number.
340 //go:linkname notifyListAdd sync.runtime_notifyListAdd
341 func runtime_notifyListAdd(l *notifyList) (r uint32) {
342 // This may be called concurrently, for example, when called from
343 // sync.Cond.Wait while holding a RWMutex in read mode.
344 r = runtime_xadd(&l->wait, 1) - 1;
347 // notifyListWait waits for a notification. If one has been sent since
348 // notifyListAdd was called, it returns immediately. Otherwise, it blocks.
349 //go:linkname notifyListWait sync.runtime_notifyListWait
350 func runtime_notifyListWait(l *notifyList, t uint32) {
354 runtime_lock(&l->lock);
356 // Return right away if this ticket has already been notified.
357 if (less(t, l->notify)) {
358 runtime_unlock(&l->lock);
363 runtime_memclr(&s, sizeof(s));
368 if (runtime_blockprofilerate > 0) {
369 t0 = runtime_cputicks();
372 if (l->tail == nil) {
378 runtime_parkunlock(&l->lock, "semacquire");
380 runtime_blockevent(s.releasetime-t0, 2);
384 // notifyListNotifyAll notifies all entries in the list.
385 //go:linkname notifyListNotifyAll sync.runtime_notifyListNotifyAll
386 func runtime_notifyListNotifyAll(l *notifyList) {
389 // Fast-path: if there are no new waiters since the last notification
390 // we don't need to acquire the lock.
391 if (runtime_atomicload(&l->wait) == runtime_atomicload(&l->notify)) {
395 // Pull the list out into a local variable, waiters will be readied
397 runtime_lock(&l->lock);
402 // Update the next ticket to be notified. We can set it to the current
403 // value of wait because any previous waiters are already in the list
404 // or will notice that they have already been notified when trying to
405 // add themselves to the list.
406 runtime_atomicstore(&l->notify, runtime_atomicload(&l->wait));
407 runtime_unlock(&l->lock);
409 // Go through the local list and ready all waiters.
411 SudoG* next = s->next;
418 // notifyListNotifyOne notifies one entry in the list.
419 //go:linkname notifyListNotifyOne sync.runtime_notifyListNotifyOne
420 func runtime_notifyListNotifyOne(l *notifyList) {
425 // Fast-path: if there are no new waiters since the last notification
426 // we don't need to acquire the lock at all.
427 if (runtime_atomicload(&l->wait) == runtime_atomicload(&l->notify)) {
431 runtime_lock(&l->lock);
433 // Re-check under the lock if we need to do anything.
435 if (t == runtime_atomicload(&l->wait)) {
436 runtime_unlock(&l->lock);
440 // Update the next notify ticket number, and try to find the G that
441 // needs to be notified. If it hasn't made it to the list yet we won't
442 // find it, but it won't park itself once it sees the new notify number.
443 runtime_atomicstore(&l->notify, t+1);
444 for (p = nil, s = l->head; s != nil; p = s, s = s->next) {
445 if (s->ticket == t) {
455 runtime_unlock(&l->lock);
461 runtime_unlock(&l->lock);
464 //go:linkname notifyListCheck sync.runtime_notifyListCheck
465 func runtime_notifyListCheck(sz uintptr) {
466 if (sz != sizeof(notifyList)) {
467 runtime_printf("runtime: bad notifyList size\n");
468 runtime_throw("bad notifyList size");