* builtins.def (BUILT_IN_SETJMP): Revert latest change.
[official-gcc.git] / libgo / go / runtime / lock_sema.go
blob52a2376dc5eeff91d177a1dc2a31c6f2cf5e8121
1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // +build aix darwin nacl netbsd openbsd plan9 solaris windows
7 package runtime
9 import (
10 "runtime/internal/atomic"
11 "unsafe"
14 // For gccgo, while we still have C runtime code, use go:linkname to
15 // rename some functions to themselves, so that the compiler will
16 // export them.
18 //go:linkname lock runtime.lock
19 //go:linkname unlock runtime.unlock
20 //go:linkname noteclear runtime.noteclear
21 //go:linkname notewakeup runtime.notewakeup
22 //go:linkname notesleep runtime.notesleep
23 //go:linkname notetsleep runtime.notetsleep
24 //go:linkname notetsleepg runtime.notetsleepg
26 // This implementation depends on OS-specific implementations of
28 // func semacreate(mp *m)
29 // Create a semaphore for mp, if it does not already have one.
31 // func semasleep(ns int64) int32
32 // If ns < 0, acquire m's semaphore and return 0.
33 // If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds.
34 // Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
36 // func semawakeup(mp *m)
37 // Wake up mp, which is or will soon be sleeping on its semaphore.
39 const (
40 mutex_locked uintptr = 1
42 active_spin = 4
43 active_spin_cnt = 30
44 passive_spin = 1
47 func lock(l *mutex) {
48 gp := getg()
49 if gp.m.locks < 0 {
50 throw("runtime·lock: lock count")
52 gp.m.locks++
54 // Speculative grab for lock.
55 if atomic.Casuintptr(&l.key, 0, mutex_locked) {
56 return
58 semacreate(gp.m)
60 // On uniprocessor's, no point spinning.
61 // On multiprocessors, spin for ACTIVE_SPIN attempts.
62 spin := 0
63 if ncpu > 1 {
64 spin = active_spin
66 Loop:
67 for i := 0; ; i++ {
68 v := atomic.Loaduintptr(&l.key)
69 if v&mutex_locked == 0 {
70 // Unlocked. Try to lock.
71 if atomic.Casuintptr(&l.key, v, v|mutex_locked) {
72 return
74 i = 0
76 if i < spin {
77 procyield(active_spin_cnt)
78 } else if i < spin+passive_spin {
79 osyield()
80 } else {
81 // Someone else has it.
82 // l->waitm points to a linked list of M's waiting
83 // for this lock, chained through m->nextwaitm.
84 // Queue this M.
85 for {
86 gp.m.nextwaitm = v &^ mutex_locked
87 if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|mutex_locked) {
88 break
90 v = atomic.Loaduintptr(&l.key)
91 if v&mutex_locked == 0 {
92 continue Loop
95 if v&mutex_locked != 0 {
96 // Queued. Wait.
97 semasleep(-1)
98 i = 0
104 //go:nowritebarrier
105 // We might not be holding a p in this code.
106 func unlock(l *mutex) {
107 gp := getg()
108 var mp *m
109 for {
110 v := atomic.Loaduintptr(&l.key)
111 if v == mutex_locked {
112 if atomic.Casuintptr(&l.key, mutex_locked, 0) {
113 break
115 } else {
116 // Other M's are waiting for the lock.
117 // Dequeue an M.
118 mp = (*m)(unsafe.Pointer(v &^ mutex_locked))
119 if atomic.Casuintptr(&l.key, v, mp.nextwaitm) {
120 // Dequeued an M. Wake it.
121 semawakeup(mp)
122 break
126 gp.m.locks--
127 if gp.m.locks < 0 {
128 throw("runtime·unlock: lock count")
130 // if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
131 // gp.stackguard0 = stackPreempt
132 // }
135 // One-time notifications.
136 func noteclear(n *note) {
137 n.key = 0
140 func notewakeup(n *note) {
141 var v uintptr
142 for {
143 v = atomic.Loaduintptr(&n.key)
144 if atomic.Casuintptr(&n.key, v, mutex_locked) {
145 break
149 // Successfully set waitm to locked.
150 // What was it before?
151 switch {
152 case v == 0:
153 // Nothing was waiting. Done.
154 case v == mutex_locked:
155 // Two notewakeups! Not allowed.
156 throw("notewakeup - double wakeup")
157 default:
158 // Must be the waiting m. Wake it up.
159 semawakeup((*m)(unsafe.Pointer(v)))
163 func notesleep(n *note) {
164 gp := getg()
165 if gp != gp.m.g0 {
166 throw("notesleep not on g0")
168 semacreate(gp.m)
169 if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
170 // Must be locked (got wakeup).
171 if n.key != mutex_locked {
172 throw("notesleep - waitm out of sync")
174 return
176 // Queued. Sleep.
177 gp.m.blocked = true
178 if *cgo_yield == nil {
179 semasleep(-1)
180 } else {
181 // Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
182 const ns = 10e6
183 for atomic.Loaduintptr(&n.key) == 0 {
184 semasleep(ns)
185 asmcgocall(*cgo_yield, nil)
188 gp.m.blocked = false
191 //go:nosplit
192 func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
193 // gp and deadline are logically local variables, but they are written
194 // as parameters so that the stack space they require is charged
195 // to the caller.
196 // This reduces the nosplit footprint of notetsleep_internal.
197 gp = getg()
199 // Register for wakeup on n->waitm.
200 if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
201 // Must be locked (got wakeup).
202 if n.key != mutex_locked {
203 throw("notetsleep - waitm out of sync")
205 return true
207 if ns < 0 {
208 // Queued. Sleep.
209 gp.m.blocked = true
210 if *cgo_yield == nil {
211 semasleep(-1)
212 } else {
213 // Sleep in arbitrary-but-moderate intervals to poll libc interceptors.
214 const ns = 10e6
215 for semasleep(ns) < 0 {
216 asmcgocall(*cgo_yield, nil)
219 gp.m.blocked = false
220 return true
223 deadline = nanotime() + ns
224 for {
225 // Registered. Sleep.
226 gp.m.blocked = true
227 if *cgo_yield != nil && ns > 10e6 {
228 ns = 10e6
230 if semasleep(ns) >= 0 {
231 gp.m.blocked = false
232 // Acquired semaphore, semawakeup unregistered us.
233 // Done.
234 return true
236 if *cgo_yield != nil {
237 asmcgocall(*cgo_yield, nil)
239 gp.m.blocked = false
240 // Interrupted or timed out. Still registered. Semaphore not acquired.
241 ns = deadline - nanotime()
242 if ns <= 0 {
243 break
245 // Deadline hasn't arrived. Keep sleeping.
248 // Deadline arrived. Still registered. Semaphore not acquired.
249 // Want to give up and return, but have to unregister first,
250 // so that any notewakeup racing with the return does not
251 // try to grant us the semaphore when we don't expect it.
252 for {
253 v := atomic.Loaduintptr(&n.key)
254 switch v {
255 case uintptr(unsafe.Pointer(gp.m)):
256 // No wakeup yet; unregister if possible.
257 if atomic.Casuintptr(&n.key, v, 0) {
258 return false
260 case mutex_locked:
261 // Wakeup happened so semaphore is available.
262 // Grab it to avoid getting out of sync.
263 gp.m.blocked = true
264 if semasleep(-1) < 0 {
265 throw("runtime: unable to acquire - semaphore out of sync")
267 gp.m.blocked = false
268 return true
269 default:
270 throw("runtime: unexpected waitm - semaphore out of sync")
275 func notetsleep(n *note, ns int64) bool {
276 gp := getg()
277 if gp != gp.m.g0 && gp.m.preemptoff != "" {
278 throw("notetsleep not on g0")
280 semacreate(gp.m)
281 return notetsleep_internal(n, ns, nil, 0)
284 // same as runtime·notetsleep, but called on user g (not g0)
285 // calls only nosplit functions between entersyscallblock/exitsyscall
286 func notetsleepg(n *note, ns int64) bool {
287 gp := getg()
288 if gp == gp.m.g0 {
289 throw("notetsleepg on g0")
291 semacreate(gp.m)
292 entersyscallblock(0)
293 ok := notetsleep_internal(n, ns, nil, 0)
294 exitsyscall(0)
295 return ok