PR fortran/77666
[official-gcc.git] / libgo / runtime / sema.goc
blobb49c7b71051a24b1d9b906853c2fb3ccd4f92941
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Semaphore implementation exposed to Go.
6 // Intended use is provide a sleep and wakeup
7 // primitive that can be used in the contended case
8 // of other synchronization primitives.
9 // Thus it targets the same goal as Linux's futex,
10 // but it has much simpler semantics.
12 // That is, don't think of these as semaphores.
13 // Think of them as a way to implement sleep and wakeup
14 // such that every sleep is paired with a single wakeup,
15 // even if, due to races, the wakeup happens before the sleep.
17 // See Mullender and Cox, ``Semaphores in Plan 9,''
18 // http://swtch.com/semaphore.pdf
20 package sync
21 #include "runtime.h"
22 #include "chan.h"
23 #include "arch.h"
25 typedef struct SemaWaiter SemaWaiter;
26 struct SemaWaiter
28         uint32 volatile*        addr;
29         G*      g;
30         int64   releasetime;
31         int32   nrelease;       // -1 for acquire
32         SemaWaiter*     prev;
33         SemaWaiter*     next;
36 typedef struct SemaRoot SemaRoot;
37 struct SemaRoot
39         Lock;
40         SemaWaiter*     head;
41         SemaWaiter*     tail;
42         // Number of waiters. Read w/o the lock.
43         uint32 volatile nwait;
46 // Prime to not correlate with any user patterns.
47 #define SEMTABLESZ 251
49 struct semtable
51         SemaRoot;
52         uint8 pad[CacheLineSize-sizeof(SemaRoot)];
54 static struct semtable semtable[SEMTABLESZ];
56 static SemaRoot*
57 semroot(uint32 volatile *addr)
59         return &semtable[((uintptr)addr >> 3) % SEMTABLESZ];
62 static void
63 semqueue(SemaRoot *root, uint32 volatile *addr, SemaWaiter *s)
65         s->g = runtime_g();
66         s->addr = addr;
67         s->next = nil;
68         s->prev = root->tail;
69         if(root->tail)
70                 root->tail->next = s;
71         else
72                 root->head = s;
73         root->tail = s;
76 static void
77 semdequeue(SemaRoot *root, SemaWaiter *s)
79         if(s->next)
80                 s->next->prev = s->prev;
81         else
82                 root->tail = s->prev;
83         if(s->prev)
84                 s->prev->next = s->next;
85         else
86                 root->head = s->next;
87         s->prev = nil;
88         s->next = nil;
91 static int32
92 cansemacquire(uint32 volatile *addr)
94         uint32 v;
96         while((v = runtime_atomicload(addr)) > 0)
97                 if(runtime_cas(addr, v, v-1))
98                         return 1;
99         return 0;
102 static void readyWithTime(SudoG* s, int traceskip __attribute__ ((unused))) {
103         if (s->releasetime != 0) {
104                 s->releasetime = runtime_cputicks();
105         }
106         runtime_ready(s->g);
109 void
110 runtime_semacquire(uint32 volatile *addr, bool profile)
112         SemaWaiter s;   // Needs to be allocated on stack, otherwise garbage collector could deallocate it
113         SemaRoot *root;
114         int64 t0;
115         
116         // Easy case.
117         if(cansemacquire(addr))
118                 return;
120         // Harder case:
121         //      increment waiter count
122         //      try cansemacquire one more time, return if succeeded
123         //      enqueue itself as a waiter
124         //      sleep
125         //      (waiter descriptor is dequeued by signaler)
126         root = semroot(addr);
127         t0 = 0;
128         s.releasetime = 0;
129         if(profile && runtime_blockprofilerate > 0) {
130                 t0 = runtime_cputicks();
131                 s.releasetime = -1;
132         }
133         for(;;) {
135                 runtime_lock(root);
136                 // Add ourselves to nwait to disable "easy case" in semrelease.
137                 runtime_xadd(&root->nwait, 1);
138                 // Check cansemacquire to avoid missed wakeup.
139                 if(cansemacquire(addr)) {
140                         runtime_xadd(&root->nwait, -1);
141                         runtime_unlock(root);
142                         return;
143                 }
144                 // Any semrelease after the cansemacquire knows we're waiting
145                 // (we set nwait above), so go to sleep.
146                 semqueue(root, addr, &s);
147                 runtime_parkunlock(root, "semacquire");
148                 if(cansemacquire(addr)) {
149                         if(t0)
150                                 runtime_blockevent(s.releasetime - t0, 3);
151                         return;
152                 }
153         }
156 void
157 runtime_semrelease(uint32 volatile *addr)
159         SemaWaiter *s;
160         SemaRoot *root;
162         root = semroot(addr);
163         runtime_xadd(addr, 1);
165         // Easy case: no waiters?
166         // This check must happen after the xadd, to avoid a missed wakeup
167         // (see loop in semacquire).
168         if(runtime_atomicload(&root->nwait) == 0)
169                 return;
171         // Harder case: search for a waiter and wake it.
172         runtime_lock(root);
173         if(runtime_atomicload(&root->nwait) == 0) {
174                 // The count is already consumed by another goroutine,
175                 // so no need to wake up another goroutine.
176                 runtime_unlock(root);
177                 return;
178         }
179         for(s = root->head; s; s = s->next) {
180                 if(s->addr == addr) {
181                         runtime_xadd(&root->nwait, -1);
182                         semdequeue(root, s);
183                         break;
184                 }
185         }
186         runtime_unlock(root);
187         if(s) {
188                 if(s->releasetime)
189                         s->releasetime = runtime_cputicks();
190                 runtime_ready(s->g);
191         }
194 // TODO(dvyukov): move to netpoll.goc once it's used by all OSes.
195 void net_runtime_Semacquire(uint32 *addr)
196   __asm__ (GOSYM_PREFIX "net.runtime_Semacquire");
198 void net_runtime_Semacquire(uint32 *addr)
200         runtime_semacquire(addr, true);
203 void net_runtime_Semrelease(uint32 *addr)
204   __asm__ (GOSYM_PREFIX "net.runtime_Semrelease");
206 void net_runtime_Semrelease(uint32 *addr)
208         runtime_semrelease(addr);
211 func runtime_Semacquire(addr *uint32) {
212         runtime_semacquire(addr, true);
215 func runtime_Semrelease(addr *uint32) {
216         runtime_semrelease(addr);
219 typedef struct SyncSema SyncSema;
220 struct SyncSema
222         Lock;
223         SemaWaiter*     head;
224         SemaWaiter*     tail;
227 func runtime_Syncsemcheck(size uintptr) {
228         if(size != sizeof(SyncSema)) {
229                 runtime_printf("bad SyncSema size: sync:%D runtime:%D\n", (int64)size, (int64)sizeof(SyncSema));
230                 runtime_throw("bad SyncSema size");
231         }
234 // Syncsemacquire waits for a pairing Syncsemrelease on the same semaphore s.
235 func runtime_Syncsemacquire(s *SyncSema) {
236         SemaWaiter w, *wake;
237         int64 t0;
239         w.g = runtime_g();
240         w.nrelease = -1;
241         w.next = nil;
242         w.releasetime = 0;
243         t0 = 0;
244         if(runtime_blockprofilerate > 0) {
245                 t0 = runtime_cputicks();
246                 w.releasetime = -1;
247         }
249         runtime_lock(s);
250         if(s->head && s->head->nrelease > 0) {
251                 // have pending release, consume it
252                 wake = nil;
253                 s->head->nrelease--;
254                 if(s->head->nrelease == 0) {
255                         wake = s->head;
256                         s->head = wake->next;
257                         if(s->head == nil)
258                                 s->tail = nil;
259                 }
260                 runtime_unlock(s);
261                 if(wake)
262                         runtime_ready(wake->g);
263         } else {
264                 // enqueue itself
265                 if(s->tail == nil)
266                         s->head = &w;
267                 else
268                         s->tail->next = &w;
269                 s->tail = &w;
270                 runtime_parkunlock(s, "semacquire");
271                 if(t0)
272                         runtime_blockevent(w.releasetime - t0, 2);
273         }
276 // Syncsemrelease waits for n pairing Syncsemacquire on the same semaphore s.
277 func runtime_Syncsemrelease(s *SyncSema, n uint32) {
278         SemaWaiter w, *wake;
280         w.g = runtime_g();
281         w.nrelease = (int32)n;
282         w.next = nil;
283         w.releasetime = 0;
285         runtime_lock(s);
286         while(w.nrelease > 0 && s->head && s->head->nrelease < 0) {
287                 // have pending acquire, satisfy it
288                 wake = s->head;
289                 s->head = wake->next;
290                 if(s->head == nil)
291                         s->tail = nil;
292                 if(wake->releasetime)
293                         wake->releasetime = runtime_cputicks();
294                 runtime_ready(wake->g);
295                 w.nrelease--;
296         }
297         if(w.nrelease > 0) {
298                 // enqueue itself
299                 if(s->tail == nil)
300                         s->head = &w;
301                 else
302                         s->tail->next = &w;
303                 s->tail = &w;
304                 runtime_parkunlock(s, "semarelease");
305         } else
306                 runtime_unlock(s);
309 // notifyList is a ticket-based notification list used to implement sync.Cond.
311 // It must be kept in sync with the sync package.
312 typedef struct {
313         // wait is the ticket number of the next waiter. It is atomically
314         // incremented outside the lock.
315         uint32 wait;
317         // notify is the ticket number of the next waiter to be notified. It can
318         // be read outside the lock, but is only written to with lock held.
319         //
320         // Both wait & notify can wrap around, and such cases will be correctly
321         // handled as long as their "unwrapped" difference is bounded by 2^31.
322         // For this not to be the case, we'd need to have 2^31+ goroutines
323         // blocked on the same condvar, which is currently not possible.
324         uint32 notify;
326         // List of parked waiters.
327         Lock lock;
328         SudoG* head;
329         SudoG* tail;
330 } notifyList;
332 // less checks if a < b, considering a & b running counts that may overflow the
333 // 32-bit range, and that their "unwrapped" difference is always less than 2^31.
334 static bool less(uint32 a, uint32 b) {
335         return (int32)(a-b) < 0;
338 // notifyListAdd adds the caller to a notify list such that it can receive
339 // notifications. The caller must eventually call notifyListWait to wait for
340 // such a notification, passing the returned ticket number.
341 //go:linkname notifyListAdd sync.runtime_notifyListAdd
342 func runtime_notifyListAdd(l *notifyList) (r uint32) {
343         // This may be called concurrently, for example, when called from
344         // sync.Cond.Wait while holding a RWMutex in read mode.
345         r = runtime_xadd(&l->wait, 1) - 1;
348 // notifyListWait waits for a notification. If one has been sent since
349 // notifyListAdd was called, it returns immediately. Otherwise, it blocks.
350 //go:linkname notifyListWait sync.runtime_notifyListWait
351 func runtime_notifyListWait(l *notifyList, t uint32) {
352         SudoG s;
353         int64 t0;
355         runtime_lock(&l->lock);
357         // Return right away if this ticket has already been notified.
358         if (less(t, l->notify)) {
359                 runtime_unlock(&l->lock);
360                 return;
361         }
363         // Enqueue itself.
364         runtime_memclr(&s, sizeof(s));
365         s.g = runtime_g();
366         s.ticket = t;
367         s.releasetime = 0;
368         t0 = 0;
369         if (runtime_blockprofilerate > 0) {
370                 t0 = runtime_cputicks();
371                 s.releasetime = -1;
372         }
373         if (l->tail == nil) {
374                 l->head = &s;
375         } else {
376                 l->tail->link = &s;
377         }
378         l->tail = &s;
379         runtime_parkunlock(&l->lock, "semacquire");
380         if (t0 != 0) {
381                 runtime_blockevent(s.releasetime-t0, 2);
382         }
385 // notifyListNotifyAll notifies all entries in the list.
386 //go:linkname notifyListNotifyAll sync.runtime_notifyListNotifyAll
387 func runtime_notifyListNotifyAll(l *notifyList) {
388         SudoG *s;
390         // Fast-path: if there are no new waiters since the last notification
391         // we don't need to acquire the lock.
392         if (runtime_atomicload(&l->wait) == runtime_atomicload(&l->notify)) {
393                 return;
394         }
396         // Pull the list out into a local variable, waiters will be readied
397         // outside the lock.
398         runtime_lock(&l->lock);
399         s = l->head;
400         l->head = nil;
401         l->tail = nil;
403         // Update the next ticket to be notified. We can set it to the current
404         // value of wait because any previous waiters are already in the list
405         // or will notice that they have already been notified when trying to
406         // add themselves to the list.
407         runtime_atomicstore(&l->notify, runtime_atomicload(&l->wait));
408         runtime_unlock(&l->lock);
410         // Go through the local list and ready all waiters.
411         while (s != nil) {
412                 SudoG* next = s->link;
413                 s->link = nil;
414                 readyWithTime(s, 4);
415                 s = next;
416         }
419 // notifyListNotifyOne notifies one entry in the list.
420 //go:linkname notifyListNotifyOne sync.runtime_notifyListNotifyOne
421 func runtime_notifyListNotifyOne(l *notifyList) {
422         uint32 t;
423         SudoG *p;
424         SudoG *s;
426         // Fast-path: if there are no new waiters since the last notification
427         // we don't need to acquire the lock at all.
428         if (runtime_atomicload(&l->wait) == runtime_atomicload(&l->notify)) {
429                 return;
430         }
432         runtime_lock(&l->lock);
434         // Re-check under the lock if we need to do anything.
435         t = l->notify;
436         if (t == runtime_atomicload(&l->wait)) {
437                 runtime_unlock(&l->lock);
438                 return;
439         }
441         // Update the next notify ticket number, and try to find the G that
442         // needs to be notified. If it hasn't made it to the list yet we won't
443         // find it, but it won't park itself once it sees the new notify number.
444         runtime_atomicstore(&l->notify, t+1);
445         for (p = nil, s = l->head; s != nil; p = s, s = s->link) {
446                 if (s->ticket == t) {
447                         SudoG *n = s->link;
448                         if (p != nil) {
449                                 p->link = n;
450                         } else {
451                                 l->head = n;
452                         }
453                         if (n == nil) {
454                                 l->tail = p;
455                         }
456                         runtime_unlock(&l->lock);
457                         s->link = nil;
458                         readyWithTime(s, 4);
459                         return;
460                 }
461         }
462         runtime_unlock(&l->lock);
465 //go:linkname notifyListCheck sync.runtime_notifyListCheck
466 func runtime_notifyListCheck(sz uintptr) {
467         if (sz != sizeof(notifyList)) {
468                 runtime_printf("runtime: bad notifyList size\n");
469                 runtime_throw("bad notifyList size");
470         }