2015-11-30 Venkataramanan Kumar <Venkataramanan.Kumar@amd.com>
[official-gcc.git] / libgo / runtime / lock_futex.c
blob33ef073c907e1029a1c92d3c971f0e575b69daba
1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // +build dragonfly freebsd linux
7 #include "runtime.h"
9 // This implementation depends on OS-specific implementations of
11 // runtime_futexsleep(uint32 *addr, uint32 val, int64 ns)
12 // Atomically,
13 // if(*addr == val) sleep
14 // Might be woken up spuriously; that's allowed.
15 // Don't sleep longer than ns; ns < 0 means forever.
17 // runtime_futexwakeup(uint32 *addr, uint32 cnt)
18 // If any procs are sleeping on addr, wake up at most cnt.
20 enum
22 MUTEX_UNLOCKED = 0,
23 MUTEX_LOCKED = 1,
24 MUTEX_SLEEPING = 2,
26 ACTIVE_SPIN = 4,
27 ACTIVE_SPIN_CNT = 30,
28 PASSIVE_SPIN = 1,
31 // Possible lock states are MUTEX_UNLOCKED, MUTEX_LOCKED and MUTEX_SLEEPING.
32 // MUTEX_SLEEPING means that there is presumably at least one sleeping thread.
33 // Note that there can be spinning threads during all states - they do not
34 // affect mutex's state.
35 void
36 runtime_lock(Lock *l)
38 uint32 i, v, wait, spin;
40 if(runtime_m()->locks++ < 0)
41 runtime_throw("runtime_lock: lock count");
43 // Speculative grab for lock.
44 v = runtime_xchg((uint32*)&l->key, MUTEX_LOCKED);
45 if(v == MUTEX_UNLOCKED)
46 return;
48 // wait is either MUTEX_LOCKED or MUTEX_SLEEPING
49 // depending on whether there is a thread sleeping
50 // on this mutex. If we ever change l->key from
51 // MUTEX_SLEEPING to some other value, we must be
52 // careful to change it back to MUTEX_SLEEPING before
53 // returning, to ensure that the sleeping thread gets
54 // its wakeup call.
55 wait = v;
57 // On uniprocessor's, no point spinning.
58 // On multiprocessors, spin for ACTIVE_SPIN attempts.
59 spin = 0;
60 if(runtime_ncpu > 1)
61 spin = ACTIVE_SPIN;
63 for(;;) {
64 // Try for lock, spinning.
65 for(i = 0; i < spin; i++) {
66 while(l->key == MUTEX_UNLOCKED)
67 if(runtime_cas((uint32*)&l->key, MUTEX_UNLOCKED, wait))
68 return;
69 runtime_procyield(ACTIVE_SPIN_CNT);
72 // Try for lock, rescheduling.
73 for(i=0; i < PASSIVE_SPIN; i++) {
74 while(l->key == MUTEX_UNLOCKED)
75 if(runtime_cas((uint32*)&l->key, MUTEX_UNLOCKED, wait))
76 return;
77 runtime_osyield();
80 // Sleep.
81 v = runtime_xchg((uint32*)&l->key, MUTEX_SLEEPING);
82 if(v == MUTEX_UNLOCKED)
83 return;
84 wait = MUTEX_SLEEPING;
85 runtime_futexsleep((uint32*)&l->key, MUTEX_SLEEPING, -1);
89 void
90 runtime_unlock(Lock *l)
92 uint32 v;
94 v = runtime_xchg((uint32*)&l->key, MUTEX_UNLOCKED);
95 if(v == MUTEX_UNLOCKED)
96 runtime_throw("unlock of unlocked lock");
97 if(v == MUTEX_SLEEPING)
98 runtime_futexwakeup((uint32*)&l->key, 1);
100 if(--runtime_m()->locks < 0)
101 runtime_throw("runtime_unlock: lock count");
104 // One-time notifications.
105 void
106 runtime_noteclear(Note *n)
108 n->key = 0;
111 void
112 runtime_notewakeup(Note *n)
114 uint32 old;
116 old = runtime_xchg((uint32*)&n->key, 1);
117 if(old != 0) {
118 runtime_printf("notewakeup - double wakeup (%d)\n", old);
119 runtime_throw("notewakeup - double wakeup");
121 runtime_futexwakeup((uint32*)&n->key, 1);
124 void
125 runtime_notesleep(Note *n)
127 M *m = runtime_m();
129 /* For gccgo it's OK to sleep in non-g0, and it happens in
130 stoptheworld because we have not implemented preemption.
132 if(runtime_g() != runtime_m()->g0)
133 runtime_throw("notesleep not on g0");
135 while(runtime_atomicload((uint32*)&n->key) == 0) {
136 m->blocked = true;
137 runtime_futexsleep((uint32*)&n->key, 0, -1);
138 m->blocked = false;
142 static bool
143 notetsleep(Note *n, int64 ns, int64 deadline, int64 now)
145 M *m = runtime_m();
147 // Conceptually, deadline and now are local variables.
148 // They are passed as arguments so that the space for them
149 // does not count against our nosplit stack sequence.
151 if(ns < 0) {
152 while(runtime_atomicload((uint32*)&n->key) == 0) {
153 m->blocked = true;
154 runtime_futexsleep((uint32*)&n->key, 0, -1);
155 m->blocked = false;
157 return true;
160 if(runtime_atomicload((uint32*)&n->key) != 0)
161 return true;
163 deadline = runtime_nanotime() + ns;
164 for(;;) {
165 m->blocked = true;
166 runtime_futexsleep((uint32*)&n->key, 0, ns);
167 m->blocked = false;
168 if(runtime_atomicload((uint32*)&n->key) != 0)
169 break;
170 now = runtime_nanotime();
171 if(now >= deadline)
172 break;
173 ns = deadline - now;
175 return runtime_atomicload((uint32*)&n->key) != 0;
178 bool
179 runtime_notetsleep(Note *n, int64 ns)
181 bool res;
183 if(runtime_g() != runtime_m()->g0 && !runtime_m()->gcing)
184 runtime_throw("notetsleep not on g0");
186 res = notetsleep(n, ns, 0, 0);
187 return res;
190 // same as runtime_notetsleep, but called on user g (not g0)
191 // calls only nosplit functions between entersyscallblock/exitsyscall
192 bool
193 runtime_notetsleepg(Note *n, int64 ns)
195 bool res;
197 if(runtime_g() == runtime_m()->g0)
198 runtime_throw("notetsleepg on g0");
200 runtime_entersyscallblock();
201 res = notetsleep(n, ns, 0, 0);
202 runtime_exitsyscall();
203 return res;