1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Semaphore implementation exposed to Go.
6 // Intended use is provide a sleep and wakeup
7 // primitive that can be used in the contended case
8 // of other synchronization primitives.
9 // Thus it targets the same goal as Linux's futex,
10 // but it has much simpler semantics.
12 // That is, don't think of these as semaphores.
13 // Think of them as a way to implement sleep and wakeup
14 // such that every sleep is paired with a single wakeup,
15 // even if, due to races, the wakeup happens before the sleep.
17 // See Mullender and Cox, ``Semaphores in Plan 9,''
18 // http://swtch.com/semaphore.pdf
25 typedef struct SemaWaiter SemaWaiter;
28 uint32 volatile* addr;
31 int32 nrelease; // -1 for acquire
36 typedef struct SemaRoot SemaRoot;
42 // Number of waiters. Read w/o the lock.
43 uint32 volatile nwait;
46 // Prime to not correlate with any user patterns.
47 #define SEMTABLESZ 251
52 uint8 pad[CacheLineSize-sizeof(SemaRoot)];
54 static struct semtable semtable[SEMTABLESZ];
57 semroot(uint32 volatile *addr)
59 return &semtable[((uintptr)addr >> 3) % SEMTABLESZ];
63 semqueue(SemaRoot *root, uint32 volatile *addr, SemaWaiter *s)
77 semdequeue(SemaRoot *root, SemaWaiter *s)
80 s->next->prev = s->prev;
84 s->prev->next = s->next;
92 cansemacquire(uint32 volatile *addr)
96 while((v = runtime_atomicload(addr)) > 0)
97 if(runtime_cas(addr, v, v-1))
102 static void readyWithTime(SudoG* s, int traceskip __attribute__ ((unused))) {
103 if (s->releasetime != 0) {
104 s->releasetime = runtime_cputicks();
110 runtime_semacquire(uint32 volatile *addr, bool profile)
112 SemaWaiter s; // Needs to be allocated on stack, otherwise garbage collector could deallocate it
117 if(cansemacquire(addr))
121 // increment waiter count
122 // try cansemacquire one more time, return if succeeded
123 // enqueue itself as a waiter
125 // (waiter descriptor is dequeued by signaler)
126 root = semroot(addr);
129 if(profile && runtime_blockprofilerate > 0) {
130 t0 = runtime_cputicks();
136 // Add ourselves to nwait to disable "easy case" in semrelease.
137 runtime_xadd(&root->nwait, 1);
138 // Check cansemacquire to avoid missed wakeup.
139 if(cansemacquire(addr)) {
140 runtime_xadd(&root->nwait, -1);
141 runtime_unlock(root);
144 // Any semrelease after the cansemacquire knows we're waiting
145 // (we set nwait above), so go to sleep.
146 semqueue(root, addr, &s);
147 runtime_parkunlock(root, "semacquire");
148 if(cansemacquire(addr)) {
150 runtime_blockevent(s.releasetime - t0, 3);
157 runtime_semrelease(uint32 volatile *addr)
162 root = semroot(addr);
163 runtime_xadd(addr, 1);
165 // Easy case: no waiters?
166 // This check must happen after the xadd, to avoid a missed wakeup
167 // (see loop in semacquire).
168 if(runtime_atomicload(&root->nwait) == 0)
171 // Harder case: search for a waiter and wake it.
173 if(runtime_atomicload(&root->nwait) == 0) {
174 // The count is already consumed by another goroutine,
175 // so no need to wake up another goroutine.
176 runtime_unlock(root);
179 for(s = root->head; s; s = s->next) {
180 if(s->addr == addr) {
181 runtime_xadd(&root->nwait, -1);
186 runtime_unlock(root);
189 s->releasetime = runtime_cputicks();
194 // TODO(dvyukov): move to netpoll.goc once it's used by all OSes.
195 void net_runtime_Semacquire(uint32 *addr)
196 __asm__ (GOSYM_PREFIX "net.runtime_Semacquire");
198 void net_runtime_Semacquire(uint32 *addr)
200 runtime_semacquire(addr, true);
203 void net_runtime_Semrelease(uint32 *addr)
204 __asm__ (GOSYM_PREFIX "net.runtime_Semrelease");
206 void net_runtime_Semrelease(uint32 *addr)
208 runtime_semrelease(addr);
211 func runtime_Semacquire(addr *uint32) {
212 runtime_semacquire(addr, true);
215 func runtime_Semrelease(addr *uint32) {
216 runtime_semrelease(addr);
219 typedef struct SyncSema SyncSema;
227 func runtime_Syncsemcheck(size uintptr) {
228 if(size != sizeof(SyncSema)) {
229 runtime_printf("bad SyncSema size: sync:%D runtime:%D\n", (int64)size, (int64)sizeof(SyncSema));
230 runtime_throw("bad SyncSema size");
234 // Syncsemacquire waits for a pairing Syncsemrelease on the same semaphore s.
235 func runtime_Syncsemacquire(s *SyncSema) {
244 if(runtime_blockprofilerate > 0) {
245 t0 = runtime_cputicks();
250 if(s->head && s->head->nrelease > 0) {
251 // have pending release, consume it
254 if(s->head->nrelease == 0) {
256 s->head = wake->next;
262 runtime_ready(wake->g);
270 runtime_parkunlock(s, "semacquire");
272 runtime_blockevent(w.releasetime - t0, 2);
276 // Syncsemrelease waits for n pairing Syncsemacquire on the same semaphore s.
277 func runtime_Syncsemrelease(s *SyncSema, n uint32) {
281 w.nrelease = (int32)n;
286 while(w.nrelease > 0 && s->head && s->head->nrelease < 0) {
287 // have pending acquire, satisfy it
289 s->head = wake->next;
292 if(wake->releasetime)
293 wake->releasetime = runtime_cputicks();
294 runtime_ready(wake->g);
304 runtime_parkunlock(s, "semarelease");
309 // notifyList is a ticket-based notification list used to implement sync.Cond.
311 // It must be kept in sync with the sync package.
313 // wait is the ticket number of the next waiter. It is atomically
314 // incremented outside the lock.
317 // notify is the ticket number of the next waiter to be notified. It can
318 // be read outside the lock, but is only written to with lock held.
320 // Both wait & notify can wrap around, and such cases will be correctly
321 // handled as long as their "unwrapped" difference is bounded by 2^31.
322 // For this not to be the case, we'd need to have 2^31+ goroutines
323 // blocked on the same condvar, which is currently not possible.
326 // List of parked waiters.
332 // less checks if a < b, considering a & b running counts that may overflow the
333 // 32-bit range, and that their "unwrapped" difference is always less than 2^31.
334 static bool less(uint32 a, uint32 b) {
335 return (int32)(a-b) < 0;
338 // notifyListAdd adds the caller to a notify list such that it can receive
339 // notifications. The caller must eventually call notifyListWait to wait for
340 // such a notification, passing the returned ticket number.
341 //go:linkname notifyListAdd sync.runtime_notifyListAdd
342 func runtime_notifyListAdd(l *notifyList) (r uint32) {
343 // This may be called concurrently, for example, when called from
344 // sync.Cond.Wait while holding a RWMutex in read mode.
345 r = runtime_xadd(&l->wait, 1) - 1;
348 // notifyListWait waits for a notification. If one has been sent since
349 // notifyListAdd was called, it returns immediately. Otherwise, it blocks.
350 //go:linkname notifyListWait sync.runtime_notifyListWait
351 func runtime_notifyListWait(l *notifyList, t uint32) {
355 runtime_lock(&l->lock);
357 // Return right away if this ticket has already been notified.
358 if (less(t, l->notify)) {
359 runtime_unlock(&l->lock);
364 runtime_memclr(&s, sizeof(s));
369 if (runtime_blockprofilerate > 0) {
370 t0 = runtime_cputicks();
373 if (l->tail == nil) {
379 runtime_parkunlock(&l->lock, "semacquire");
381 runtime_blockevent(s.releasetime-t0, 2);
385 // notifyListNotifyAll notifies all entries in the list.
386 //go:linkname notifyListNotifyAll sync.runtime_notifyListNotifyAll
387 func runtime_notifyListNotifyAll(l *notifyList) {
390 // Fast-path: if there are no new waiters since the last notification
391 // we don't need to acquire the lock.
392 if (runtime_atomicload(&l->wait) == runtime_atomicload(&l->notify)) {
396 // Pull the list out into a local variable, waiters will be readied
398 runtime_lock(&l->lock);
403 // Update the next ticket to be notified. We can set it to the current
404 // value of wait because any previous waiters are already in the list
405 // or will notice that they have already been notified when trying to
406 // add themselves to the list.
407 runtime_atomicstore(&l->notify, runtime_atomicload(&l->wait));
408 runtime_unlock(&l->lock);
410 // Go through the local list and ready all waiters.
412 SudoG* next = s->link;
419 // notifyListNotifyOne notifies one entry in the list.
420 //go:linkname notifyListNotifyOne sync.runtime_notifyListNotifyOne
421 func runtime_notifyListNotifyOne(l *notifyList) {
426 // Fast-path: if there are no new waiters since the last notification
427 // we don't need to acquire the lock at all.
428 if (runtime_atomicload(&l->wait) == runtime_atomicload(&l->notify)) {
432 runtime_lock(&l->lock);
434 // Re-check under the lock if we need to do anything.
436 if (t == runtime_atomicload(&l->wait)) {
437 runtime_unlock(&l->lock);
441 // Update the next notify ticket number, and try to find the G that
442 // needs to be notified. If it hasn't made it to the list yet we won't
443 // find it, but it won't park itself once it sees the new notify number.
444 runtime_atomicstore(&l->notify, t+1);
445 for (p = nil, s = l->head; s != nil; p = s, s = s->link) {
446 if (s->ticket == t) {
456 runtime_unlock(&l->lock);
462 runtime_unlock(&l->lock);
465 //go:linkname notifyListCheck sync.runtime_notifyListCheck
466 func runtime_notifyListCheck(sz uintptr) {
467 if (sz != sizeof(notifyList)) {
468 runtime_printf("runtime: bad notifyList size\n");
469 runtime_throw("bad notifyList size");