shrink-wrap: Testcases for separate shrink-wrapping
[official-gcc.git] / libgo / runtime / sema.goc
blobb0d198e6073446096592467ae7f3d4d287158a05
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Semaphore implementation exposed to Go.
6 // Intended use is provide a sleep and wakeup
7 // primitive that can be used in the contended case
8 // of other synchronization primitives.
9 // Thus it targets the same goal as Linux's futex,
10 // but it has much simpler semantics.
12 // That is, don't think of these as semaphores.
13 // Think of them as a way to implement sleep and wakeup
14 // such that every sleep is paired with a single wakeup,
15 // even if, due to races, the wakeup happens before the sleep.
17 // See Mullender and Cox, ``Semaphores in Plan 9,''
18 // http://swtch.com/semaphore.pdf
20 package sync
21 #include "runtime.h"
22 #include "arch.h"
24 typedef struct SemaWaiter SemaWaiter;
25 struct SemaWaiter
27         uint32 volatile*        addr;
28         G*      g;
29         int64   releasetime;
30         int32   nrelease;       // -1 for acquire
31         SemaWaiter*     prev;
32         SemaWaiter*     next;
35 typedef struct SemaRoot SemaRoot;
36 struct SemaRoot
38         Lock;
39         SemaWaiter*     head;
40         SemaWaiter*     tail;
41         // Number of waiters. Read w/o the lock.
42         uint32 volatile nwait;
45 // Prime to not correlate with any user patterns.
46 #define SEMTABLESZ 251
48 struct semtable
50         SemaRoot;
51         uint8 pad[CacheLineSize-sizeof(SemaRoot)];
53 static struct semtable semtable[SEMTABLESZ];
55 static SemaRoot*
56 semroot(uint32 volatile *addr)
58         return &semtable[((uintptr)addr >> 3) % SEMTABLESZ];
61 static void
62 semqueue(SemaRoot *root, uint32 volatile *addr, SemaWaiter *s)
64         s->g = runtime_g();
65         s->addr = addr;
66         s->next = nil;
67         s->prev = root->tail;
68         if(root->tail)
69                 root->tail->next = s;
70         else
71                 root->head = s;
72         root->tail = s;
75 static void
76 semdequeue(SemaRoot *root, SemaWaiter *s)
78         if(s->next)
79                 s->next->prev = s->prev;
80         else
81                 root->tail = s->prev;
82         if(s->prev)
83                 s->prev->next = s->next;
84         else
85                 root->head = s->next;
86         s->prev = nil;
87         s->next = nil;
90 static int32
91 cansemacquire(uint32 volatile *addr)
93         uint32 v;
95         while((v = runtime_atomicload(addr)) > 0)
96                 if(runtime_cas(addr, v, v-1))
97                         return 1;
98         return 0;
101 static void readyWithTime(SudoG* s, int traceskip __attribute__ ((unused))) {
102         if (s->releasetime != 0) {
103                 s->releasetime = runtime_cputicks();
104         }
105         runtime_ready(s->g);
108 void
109 runtime_semacquire(uint32 volatile *addr, bool profile)
111         SemaWaiter s;   // Needs to be allocated on stack, otherwise garbage collector could deallocate it
112         SemaRoot *root;
113         int64 t0;
114         
115         // Easy case.
116         if(cansemacquire(addr))
117                 return;
119         // Harder case:
120         //      increment waiter count
121         //      try cansemacquire one more time, return if succeeded
122         //      enqueue itself as a waiter
123         //      sleep
124         //      (waiter descriptor is dequeued by signaler)
125         root = semroot(addr);
126         t0 = 0;
127         s.releasetime = 0;
128         if(profile && runtime_blockprofilerate > 0) {
129                 t0 = runtime_cputicks();
130                 s.releasetime = -1;
131         }
132         for(;;) {
134                 runtime_lock(root);
135                 // Add ourselves to nwait to disable "easy case" in semrelease.
136                 runtime_xadd(&root->nwait, 1);
137                 // Check cansemacquire to avoid missed wakeup.
138                 if(cansemacquire(addr)) {
139                         runtime_xadd(&root->nwait, -1);
140                         runtime_unlock(root);
141                         return;
142                 }
143                 // Any semrelease after the cansemacquire knows we're waiting
144                 // (we set nwait above), so go to sleep.
145                 semqueue(root, addr, &s);
146                 runtime_parkunlock(root, "semacquire");
147                 if(cansemacquire(addr)) {
148                         if(t0)
149                                 runtime_blockevent(s.releasetime - t0, 3);
150                         return;
151                 }
152         }
155 void
156 runtime_semrelease(uint32 volatile *addr)
158         SemaWaiter *s;
159         SemaRoot *root;
161         root = semroot(addr);
162         runtime_xadd(addr, 1);
164         // Easy case: no waiters?
165         // This check must happen after the xadd, to avoid a missed wakeup
166         // (see loop in semacquire).
167         if(runtime_atomicload(&root->nwait) == 0)
168                 return;
170         // Harder case: search for a waiter and wake it.
171         runtime_lock(root);
172         if(runtime_atomicload(&root->nwait) == 0) {
173                 // The count is already consumed by another goroutine,
174                 // so no need to wake up another goroutine.
175                 runtime_unlock(root);
176                 return;
177         }
178         for(s = root->head; s; s = s->next) {
179                 if(s->addr == addr) {
180                         runtime_xadd(&root->nwait, -1);
181                         semdequeue(root, s);
182                         break;
183                 }
184         }
185         runtime_unlock(root);
186         if(s) {
187                 if(s->releasetime)
188                         s->releasetime = runtime_cputicks();
189                 runtime_ready(s->g);
190         }
193 // TODO(dvyukov): move to netpoll.goc once it's used by all OSes.
194 void net_runtime_Semacquire(uint32 *addr)
195   __asm__ (GOSYM_PREFIX "net.runtime_Semacquire");
197 void net_runtime_Semacquire(uint32 *addr)
199         runtime_semacquire(addr, true);
202 void net_runtime_Semrelease(uint32 *addr)
203   __asm__ (GOSYM_PREFIX "net.runtime_Semrelease");
205 void net_runtime_Semrelease(uint32 *addr)
207         runtime_semrelease(addr);
210 func runtime_Semacquire(addr *uint32) {
211         runtime_semacquire(addr, true);
214 func runtime_Semrelease(addr *uint32) {
215         runtime_semrelease(addr);
218 typedef struct SyncSema SyncSema;
219 struct SyncSema
221         Lock;
222         SemaWaiter*     head;
223         SemaWaiter*     tail;
226 func runtime_Syncsemcheck(size uintptr) {
227         if(size != sizeof(SyncSema)) {
228                 runtime_printf("bad SyncSema size: sync:%D runtime:%D\n", (int64)size, (int64)sizeof(SyncSema));
229                 runtime_throw("bad SyncSema size");
230         }
233 // Syncsemacquire waits for a pairing Syncsemrelease on the same semaphore s.
234 func runtime_Syncsemacquire(s *SyncSema) {
235         SemaWaiter w, *wake;
236         int64 t0;
238         w.g = runtime_g();
239         w.nrelease = -1;
240         w.next = nil;
241         w.releasetime = 0;
242         t0 = 0;
243         if(runtime_blockprofilerate > 0) {
244                 t0 = runtime_cputicks();
245                 w.releasetime = -1;
246         }
248         runtime_lock(s);
249         if(s->head && s->head->nrelease > 0) {
250                 // have pending release, consume it
251                 wake = nil;
252                 s->head->nrelease--;
253                 if(s->head->nrelease == 0) {
254                         wake = s->head;
255                         s->head = wake->next;
256                         if(s->head == nil)
257                                 s->tail = nil;
258                 }
259                 runtime_unlock(s);
260                 if(wake)
261                         runtime_ready(wake->g);
262         } else {
263                 // enqueue itself
264                 if(s->tail == nil)
265                         s->head = &w;
266                 else
267                         s->tail->next = &w;
268                 s->tail = &w;
269                 runtime_parkunlock(s, "semacquire");
270                 if(t0)
271                         runtime_blockevent(w.releasetime - t0, 2);
272         }
275 // Syncsemrelease waits for n pairing Syncsemacquire on the same semaphore s.
276 func runtime_Syncsemrelease(s *SyncSema, n uint32) {
277         SemaWaiter w, *wake;
279         w.g = runtime_g();
280         w.nrelease = (int32)n;
281         w.next = nil;
282         w.releasetime = 0;
284         runtime_lock(s);
285         while(w.nrelease > 0 && s->head && s->head->nrelease < 0) {
286                 // have pending acquire, satisfy it
287                 wake = s->head;
288                 s->head = wake->next;
289                 if(s->head == nil)
290                         s->tail = nil;
291                 if(wake->releasetime)
292                         wake->releasetime = runtime_cputicks();
293                 runtime_ready(wake->g);
294                 w.nrelease--;
295         }
296         if(w.nrelease > 0) {
297                 // enqueue itself
298                 if(s->tail == nil)
299                         s->head = &w;
300                 else
301                         s->tail->next = &w;
302                 s->tail = &w;
303                 runtime_parkunlock(s, "semarelease");
304         } else
305                 runtime_unlock(s);
308 // notifyList is a ticket-based notification list used to implement sync.Cond.
310 // It must be kept in sync with the sync package.
311 typedef struct {
312         // wait is the ticket number of the next waiter. It is atomically
313         // incremented outside the lock.
314         uint32 wait;
316         // notify is the ticket number of the next waiter to be notified. It can
317         // be read outside the lock, but is only written to with lock held.
318         //
319         // Both wait & notify can wrap around, and such cases will be correctly
320         // handled as long as their "unwrapped" difference is bounded by 2^31.
321         // For this not to be the case, we'd need to have 2^31+ goroutines
322         // blocked on the same condvar, which is currently not possible.
323         uint32 notify;
325         // List of parked waiters.
326         Lock lock;
327         SudoG* head;
328         SudoG* tail;
329 } notifyList;
331 // less checks if a < b, considering a & b running counts that may overflow the
332 // 32-bit range, and that their "unwrapped" difference is always less than 2^31.
333 static bool less(uint32 a, uint32 b) {
334         return (int32)(a-b) < 0;
337 // notifyListAdd adds the caller to a notify list such that it can receive
338 // notifications. The caller must eventually call notifyListWait to wait for
339 // such a notification, passing the returned ticket number.
340 //go:linkname notifyListAdd sync.runtime_notifyListAdd
341 func runtime_notifyListAdd(l *notifyList) (r uint32) {
342         // This may be called concurrently, for example, when called from
343         // sync.Cond.Wait while holding a RWMutex in read mode.
344         r = runtime_xadd(&l->wait, 1) - 1;
347 // notifyListWait waits for a notification. If one has been sent since
348 // notifyListAdd was called, it returns immediately. Otherwise, it blocks.
349 //go:linkname notifyListWait sync.runtime_notifyListWait
350 func runtime_notifyListWait(l *notifyList, t uint32) {
351         SudoG s;
352         int64 t0;
354         runtime_lock(&l->lock);
356         // Return right away if this ticket has already been notified.
357         if (less(t, l->notify)) {
358                 runtime_unlock(&l->lock);
359                 return;
360         }
362         // Enqueue itself.
363         runtime_memclr(&s, sizeof(s));
364         s.g = runtime_g();
365         s.ticket = t;
366         s.releasetime = 0;
367         t0 = 0;
368         if (runtime_blockprofilerate > 0) {
369                 t0 = runtime_cputicks();
370                 s.releasetime = -1;
371         }
372         if (l->tail == nil) {
373                 l->head = &s;
374         } else {
375                 l->tail->next = &s;
376         }
377         l->tail = &s;
378         runtime_parkunlock(&l->lock, "semacquire");
379         if (t0 != 0) {
380                 runtime_blockevent(s.releasetime-t0, 2);
381         }
384 // notifyListNotifyAll notifies all entries in the list.
385 //go:linkname notifyListNotifyAll sync.runtime_notifyListNotifyAll
386 func runtime_notifyListNotifyAll(l *notifyList) {
387         SudoG *s;
389         // Fast-path: if there are no new waiters since the last notification
390         // we don't need to acquire the lock.
391         if (runtime_atomicload(&l->wait) == runtime_atomicload(&l->notify)) {
392                 return;
393         }
395         // Pull the list out into a local variable, waiters will be readied
396         // outside the lock.
397         runtime_lock(&l->lock);
398         s = l->head;
399         l->head = nil;
400         l->tail = nil;
402         // Update the next ticket to be notified. We can set it to the current
403         // value of wait because any previous waiters are already in the list
404         // or will notice that they have already been notified when trying to
405         // add themselves to the list.
406         runtime_atomicstore(&l->notify, runtime_atomicload(&l->wait));
407         runtime_unlock(&l->lock);
409         // Go through the local list and ready all waiters.
410         while (s != nil) {
411                 SudoG* next = s->next;
412                 s->next = nil;
413                 readyWithTime(s, 4);
414                 s = next;
415         }
418 // notifyListNotifyOne notifies one entry in the list.
419 //go:linkname notifyListNotifyOne sync.runtime_notifyListNotifyOne
420 func runtime_notifyListNotifyOne(l *notifyList) {
421         uint32 t;
422         SudoG *p;
423         SudoG *s;
425         // Fast-path: if there are no new waiters since the last notification
426         // we don't need to acquire the lock at all.
427         if (runtime_atomicload(&l->wait) == runtime_atomicload(&l->notify)) {
428                 return;
429         }
431         runtime_lock(&l->lock);
433         // Re-check under the lock if we need to do anything.
434         t = l->notify;
435         if (t == runtime_atomicload(&l->wait)) {
436                 runtime_unlock(&l->lock);
437                 return;
438         }
440         // Update the next notify ticket number, and try to find the G that
441         // needs to be notified. If it hasn't made it to the list yet we won't
442         // find it, but it won't park itself once it sees the new notify number.
443         runtime_atomicstore(&l->notify, t+1);
444         for (p = nil, s = l->head; s != nil; p = s, s = s->next) {
445                 if (s->ticket == t) {
446                         SudoG *n = s->next;
447                         if (p != nil) {
448                                 p->next = n;
449                         } else {
450                                 l->head = n;
451                         }
452                         if (n == nil) {
453                                 l->tail = p;
454                         }
455                         runtime_unlock(&l->lock);
456                         s->next = nil;
457                         readyWithTime(s, 4);
458                         return;
459                 }
460         }
461         runtime_unlock(&l->lock);
464 //go:linkname notifyListCheck sync.runtime_notifyListCheck
465 func runtime_notifyListCheck(sz uintptr) {
466         if (sz != sizeof(notifyList)) {
467                 runtime_printf("runtime: bad notifyList size\n");
468                 runtime_throw("bad notifyList size");
469         }