toplev: gcc version information for jit
[official-gcc.git] / libgo / runtime / lock_sema.c
blobef611fb36ad5b68bea63e2ec3e49631821cc85a3
1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // +build darwin nacl netbsd openbsd plan9 solaris windows
7 #include "runtime.h"
9 // This implementation depends on OS-specific implementations of
11 // uintptr runtime_semacreate(void)
12 // Create a semaphore, which will be assigned to m->waitsema.
13 // The zero value is treated as absence of any semaphore,
14 // so be sure to return a non-zero value.
16 // int32 runtime_semasleep(int64 ns)
17 // If ns < 0, acquire m->waitsema and return 0.
18 // If ns >= 0, try to acquire m->waitsema for at most ns nanoseconds.
19 // Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
21 // int32 runtime_semawakeup(M *mp)
22 // Wake up mp, which is or will soon be sleeping on mp->waitsema.
25 enum
27 LOCKED = 1,
29 ACTIVE_SPIN = 4,
30 ACTIVE_SPIN_CNT = 30,
31 PASSIVE_SPIN = 1,
34 void
35 runtime_lock(Lock *l)
37 M *m;
38 uintptr v;
39 uint32 i, spin;
41 m = runtime_m();
42 if(m->locks++ < 0)
43 runtime_throw("runtime_lock: lock count");
45 // Speculative grab for lock.
46 if(runtime_casp((void**)&l->key, nil, (void*)LOCKED))
47 return;
49 if(m->waitsema == 0)
50 m->waitsema = runtime_semacreate();
52 // On uniprocessor's, no point spinning.
53 // On multiprocessors, spin for ACTIVE_SPIN attempts.
54 spin = 0;
55 if(runtime_ncpu > 1)
56 spin = ACTIVE_SPIN;
58 for(i=0;; i++) {
59 v = (uintptr)runtime_atomicloadp((void**)&l->key);
60 if((v&LOCKED) == 0) {
61 unlocked:
62 if(runtime_casp((void**)&l->key, (void*)v, (void*)(v|LOCKED)))
63 return;
64 i = 0;
66 if(i<spin)
67 runtime_procyield(ACTIVE_SPIN_CNT);
68 else if(i<spin+PASSIVE_SPIN)
69 runtime_osyield();
70 else {
71 // Someone else has it.
72 // l->waitm points to a linked list of M's waiting
73 // for this lock, chained through m->nextwaitm.
74 // Queue this M.
75 for(;;) {
76 m->nextwaitm = (void*)(v&~LOCKED);
77 if(runtime_casp((void**)&l->key, (void*)v, (void*)((uintptr)m|LOCKED)))
78 break;
79 v = (uintptr)runtime_atomicloadp((void**)&l->key);
80 if((v&LOCKED) == 0)
81 goto unlocked;
83 if(v&LOCKED) {
84 // Queued. Wait.
85 runtime_semasleep(-1);
86 i = 0;
92 void
93 runtime_unlock(Lock *l)
95 uintptr v;
96 M *mp;
98 for(;;) {
99 v = (uintptr)runtime_atomicloadp((void**)&l->key);
100 if(v == LOCKED) {
101 if(runtime_casp((void**)&l->key, (void*)LOCKED, nil))
102 break;
103 } else {
104 // Other M's are waiting for the lock.
105 // Dequeue an M.
106 mp = (void*)(v&~LOCKED);
107 if(runtime_casp((void**)&l->key, (void*)v, mp->nextwaitm)) {
108 // Dequeued an M. Wake it.
109 runtime_semawakeup(mp);
110 break;
115 if(--runtime_m()->locks < 0)
116 runtime_throw("runtime_unlock: lock count");
119 // One-time notifications.
120 void
121 runtime_noteclear(Note *n)
123 n->key = 0;
126 void
127 runtime_notewakeup(Note *n)
129 M *mp;
132 mp = runtime_atomicloadp((void**)&n->key);
133 while(!runtime_casp((void**)&n->key, mp, (void*)LOCKED));
135 // Successfully set waitm to LOCKED.
136 // What was it before?
137 if(mp == nil) {
138 // Nothing was waiting. Done.
139 } else if(mp == (M*)LOCKED) {
140 // Two notewakeups! Not allowed.
141 runtime_throw("notewakeup - double wakeup");
142 } else {
143 // Must be the waiting m. Wake it up.
144 runtime_semawakeup(mp);
148 void
149 runtime_notesleep(Note *n)
151 M *m;
153 m = runtime_m();
155 /* For gccgo it's OK to sleep in non-g0, and it happens in
156 stoptheworld because we have not implemented preemption.
158 if(runtime_g() != m->g0)
159 runtime_throw("notesleep not on g0");
162 if(m->waitsema == 0)
163 m->waitsema = runtime_semacreate();
164 if(!runtime_casp((void**)&n->key, nil, m)) { // must be LOCKED (got wakeup)
165 if(n->key != LOCKED)
166 runtime_throw("notesleep - waitm out of sync");
167 return;
169 // Queued. Sleep.
170 m->blocked = true;
171 runtime_semasleep(-1);
172 m->blocked = false;
175 static bool
176 notetsleep(Note *n, int64 ns, int64 deadline, M *mp)
178 M *m;
180 m = runtime_m();
182 // Conceptually, deadline and mp are local variables.
183 // They are passed as arguments so that the space for them
184 // does not count against our nosplit stack sequence.
186 // Register for wakeup on n->waitm.
187 if(!runtime_casp((void**)&n->key, nil, m)) { // must be LOCKED (got wakeup already)
188 if(n->key != LOCKED)
189 runtime_throw("notetsleep - waitm out of sync");
190 return true;
193 if(ns < 0) {
194 // Queued. Sleep.
195 m->blocked = true;
196 runtime_semasleep(-1);
197 m->blocked = false;
198 return true;
201 deadline = runtime_nanotime() + ns;
202 for(;;) {
203 // Registered. Sleep.
204 m->blocked = true;
205 if(runtime_semasleep(ns) >= 0) {
206 m->blocked = false;
207 // Acquired semaphore, semawakeup unregistered us.
208 // Done.
209 return true;
211 m->blocked = false;
213 // Interrupted or timed out. Still registered. Semaphore not acquired.
214 ns = deadline - runtime_nanotime();
215 if(ns <= 0)
216 break;
217 // Deadline hasn't arrived. Keep sleeping.
220 // Deadline arrived. Still registered. Semaphore not acquired.
221 // Want to give up and return, but have to unregister first,
222 // so that any notewakeup racing with the return does not
223 // try to grant us the semaphore when we don't expect it.
224 for(;;) {
225 mp = runtime_atomicloadp((void**)&n->key);
226 if(mp == m) {
227 // No wakeup yet; unregister if possible.
228 if(runtime_casp((void**)&n->key, mp, nil))
229 return false;
230 } else if(mp == (M*)LOCKED) {
231 // Wakeup happened so semaphore is available.
232 // Grab it to avoid getting out of sync.
233 m->blocked = true;
234 if(runtime_semasleep(-1) < 0)
235 runtime_throw("runtime: unable to acquire - semaphore out of sync");
236 m->blocked = false;
237 return true;
238 } else
239 runtime_throw("runtime: unexpected waitm - semaphore out of sync");
243 bool
244 runtime_notetsleep(Note *n, int64 ns)
246 M *m;
247 bool res;
249 m = runtime_m();
251 if(runtime_g() != m->g0 && !m->gcing)
252 runtime_throw("notetsleep not on g0");
254 if(m->waitsema == 0)
255 m->waitsema = runtime_semacreate();
257 res = notetsleep(n, ns, 0, nil);
258 return res;
261 // same as runtime_notetsleep, but called on user g (not g0)
262 // calls only nosplit functions between entersyscallblock/exitsyscall
263 bool
264 runtime_notetsleepg(Note *n, int64 ns)
266 M *m;
267 bool res;
269 m = runtime_m();
271 if(runtime_g() == m->g0)
272 runtime_throw("notetsleepg on g0");
274 if(m->waitsema == 0)
275 m->waitsema = runtime_semacreate();
277 runtime_entersyscallblock();
278 res = notetsleep(n, ns, 0, nil);
279 runtime_exitsyscall();
280 return res;