2013-02-11 Sebastian Huber <sebastian.huber@embedded-brains.de>
[official-gcc.git] / libgo / runtime / lock_sema.c
blob8c4b3973bdc59a0f12249809631618c1941e442a
1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // +build darwin netbsd openbsd plan9 windows
7 #include "runtime.h"
9 // This implementation depends on OS-specific implementations of
11 // uintptr runtime_semacreate(void)
12 // Create a semaphore, which will be assigned to m->waitsema.
13 // The zero value is treated as absence of any semaphore,
14 // so be sure to return a non-zero value.
16 // int32 runtime_semasleep(int64 ns)
17 // If ns < 0, acquire m->waitsema and return 0.
18 // If ns >= 0, try to acquire m->waitsema for at most ns nanoseconds.
19 // Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
21 // int32 runtime_semawakeup(M *mp)
22 // Wake up mp, which is or will soon be sleeping on mp->waitsema.
25 enum
27 LOCKED = 1,
29 ACTIVE_SPIN = 4,
30 ACTIVE_SPIN_CNT = 30,
31 PASSIVE_SPIN = 1,
34 void
35 runtime_lock(Lock *l)
37 M *m;
38 uintptr v;
39 uint32 i, spin;
41 m = runtime_m();
42 if(m->locks++ < 0)
43 runtime_throw("runtime_lock: lock count");
45 // Speculative grab for lock.
46 if(runtime_casp(&l->waitm, nil, (void*)LOCKED))
47 return;
49 if(m->waitsema == 0)
50 m->waitsema = runtime_semacreate();
52 // On uniprocessor's, no point spinning.
53 // On multiprocessors, spin for ACTIVE_SPIN attempts.
54 spin = 0;
55 if(runtime_ncpu > 1)
56 spin = ACTIVE_SPIN;
58 for(i=0;; i++) {
59 v = (uintptr)runtime_atomicloadp(&l->waitm);
60 if((v&LOCKED) == 0) {
61 unlocked:
62 if(runtime_casp(&l->waitm, (void*)v, (void*)(v|LOCKED)))
63 return;
64 i = 0;
66 if(i<spin)
67 runtime_procyield(ACTIVE_SPIN_CNT);
68 else if(i<spin+PASSIVE_SPIN)
69 runtime_osyield();
70 else {
71 // Someone else has it.
72 // l->waitm points to a linked list of M's waiting
73 // for this lock, chained through m->nextwaitm.
74 // Queue this M.
75 for(;;) {
76 m->nextwaitm = (void*)(v&~LOCKED);
77 if(runtime_casp(&l->waitm, (void*)v, (void*)((uintptr)m|LOCKED)))
78 break;
79 v = (uintptr)runtime_atomicloadp(&l->waitm);
80 if((v&LOCKED) == 0)
81 goto unlocked;
83 if(v&LOCKED) {
84 // Queued. Wait.
85 runtime_semasleep(-1);
86 i = 0;
92 void
93 runtime_unlock(Lock *l)
95 uintptr v;
96 M *mp;
98 if(--runtime_m()->locks < 0)
99 runtime_throw("runtime_unlock: lock count");
101 for(;;) {
102 v = (uintptr)runtime_atomicloadp(&l->waitm);
103 if(v == LOCKED) {
104 if(runtime_casp(&l->waitm, (void*)LOCKED, nil))
105 break;
106 } else {
107 // Other M's are waiting for the lock.
108 // Dequeue an M.
109 mp = (void*)(v&~LOCKED);
110 if(runtime_casp(&l->waitm, (void*)v, mp->nextwaitm)) {
111 // Dequeued an M. Wake it.
112 runtime_semawakeup(mp);
113 break;
119 // One-time notifications.
120 void
121 runtime_noteclear(Note *n)
123 n->waitm = nil;
126 void
127 runtime_notewakeup(Note *n)
129 M *mp;
132 mp = runtime_atomicloadp(&n->waitm);
133 while(!runtime_casp(&n->waitm, mp, (void*)LOCKED));
135 // Successfully set waitm to LOCKED.
136 // What was it before?
137 if(mp == nil) {
138 // Nothing was waiting. Done.
139 } else if(mp == (M*)LOCKED) {
140 // Two notewakeups! Not allowed.
141 runtime_throw("notewakeup - double wakeup");
142 } else {
143 // Must be the waiting m. Wake it up.
144 runtime_semawakeup(mp);
148 void
149 runtime_notesleep(Note *n)
151 M *m;
153 m = runtime_m();
154 if(m->waitsema == 0)
155 m->waitsema = runtime_semacreate();
156 if(!runtime_casp(&n->waitm, nil, m)) { // must be LOCKED (got wakeup)
157 if(n->waitm != (void*)LOCKED)
158 runtime_throw("notesleep - waitm out of sync");
159 return;
161 // Queued. Sleep.
162 if(m->profilehz > 0)
163 runtime_setprof(false);
164 runtime_semasleep(-1);
165 if(m->profilehz > 0)
166 runtime_setprof(true);
169 void
170 runtime_notetsleep(Note *n, int64 ns)
172 M *m;
173 M *mp;
174 int64 deadline, now;
176 if(ns < 0) {
177 runtime_notesleep(n);
178 return;
181 m = runtime_m();
182 if(m->waitsema == 0)
183 m->waitsema = runtime_semacreate();
185 // Register for wakeup on n->waitm.
186 if(!runtime_casp(&n->waitm, nil, m)) { // must be LOCKED (got wakeup already)
187 if(n->waitm != (void*)LOCKED)
188 runtime_throw("notetsleep - waitm out of sync");
189 return;
192 if(m->profilehz > 0)
193 runtime_setprof(false);
194 deadline = runtime_nanotime() + ns;
195 for(;;) {
196 // Registered. Sleep.
197 if(runtime_semasleep(ns) >= 0) {
198 // Acquired semaphore, semawakeup unregistered us.
199 // Done.
200 if(m->profilehz > 0)
201 runtime_setprof(true);
202 return;
205 // Interrupted or timed out. Still registered. Semaphore not acquired.
206 now = runtime_nanotime();
207 if(now >= deadline)
208 break;
210 // Deadline hasn't arrived. Keep sleeping.
211 ns = deadline - now;
214 if(m->profilehz > 0)
215 runtime_setprof(true);
217 // Deadline arrived. Still registered. Semaphore not acquired.
218 // Want to give up and return, but have to unregister first,
219 // so that any notewakeup racing with the return does not
220 // try to grant us the semaphore when we don't expect it.
221 for(;;) {
222 mp = runtime_atomicloadp(&n->waitm);
223 if(mp == m) {
224 // No wakeup yet; unregister if possible.
225 if(runtime_casp(&n->waitm, mp, nil))
226 return;
227 } else if(mp == (M*)LOCKED) {
228 // Wakeup happened so semaphore is available.
229 // Grab it to avoid getting out of sync.
230 if(runtime_semasleep(-1) < 0)
231 runtime_throw("runtime: unable to acquire - semaphore out of sync");
232 return;
233 } else {
234 runtime_throw("runtime: unexpected waitm - semaphore out of sync");