1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // +build aix darwin nacl netbsd openbsd plan9 solaris windows
10 "runtime/internal/atomic"
14 // For gccgo, while we still have C runtime code, use go:linkname to
15 // rename some functions to themselves, so that the compiler will
18 //go:linkname lock runtime.lock
19 //go:linkname unlock runtime.unlock
20 //go:linkname noteclear runtime.noteclear
21 //go:linkname notewakeup runtime.notewakeup
22 //go:linkname notesleep runtime.notesleep
23 //go:linkname notetsleep runtime.notetsleep
24 //go:linkname notetsleepg runtime.notetsleepg
26 // This implementation depends on OS-specific implementations of
28 // func semacreate(mp *m)
29 // Create a semaphore for mp, if it does not already have one.
31 // func semasleep(ns int64) int32
32 // If ns < 0, acquire m's semaphore and return 0.
33 // If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds.
34 // Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
36 // func semawakeup(mp *m)
37 // Wake up mp, which is or will soon be sleeping on its semaphore.
40 mutex_locked
uintptr = 1
50 throw("runtime·lock: lock count")
54 // Speculative grab for lock.
55 if atomic
.Casuintptr(&l
.key
, 0, mutex_locked
) {
60 // On uniprocessor's, no point spinning.
61 // On multiprocessors, spin for ACTIVE_SPIN attempts.
68 v
:= atomic
.Loaduintptr(&l
.key
)
69 if v
&mutex_locked
== 0 {
70 // Unlocked. Try to lock.
71 if atomic
.Casuintptr(&l
.key
, v
, v|mutex_locked
) {
77 procyield(active_spin_cnt
)
78 } else if i
< spin
+passive_spin
{
81 // Someone else has it.
82 // l->waitm points to a linked list of M's waiting
83 // for this lock, chained through m->nextwaitm.
86 gp
.m
.nextwaitm
= v
&^ mutex_locked
87 if atomic
.Casuintptr(&l
.key
, v
, uintptr(unsafe
.Pointer(gp
.m
))|mutex_locked
) {
90 v
= atomic
.Loaduintptr(&l
.key
)
91 if v
&mutex_locked
== 0 {
95 if v
&mutex_locked
!= 0 {
105 // We might not be holding a p in this code.
106 func unlock(l
*mutex
) {
110 v
:= atomic
.Loaduintptr(&l
.key
)
111 if v
== mutex_locked
{
112 if atomic
.Casuintptr(&l
.key
, mutex_locked
, 0) {
116 // Other M's are waiting for the lock.
118 mp
= (*m
)(unsafe
.Pointer(v
&^ mutex_locked
))
119 if atomic
.Casuintptr(&l
.key
, v
, mp
.nextwaitm
) {
120 // Dequeued an M. Wake it.
128 throw("runtime·unlock: lock count")
130 // if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
131 // gp.stackguard0 = stackPreempt
135 // One-time notifications.
136 func noteclear(n
*note
) {
140 func notewakeup(n
*note
) {
143 v
= atomic
.Loaduintptr(&n
.key
)
144 if atomic
.Casuintptr(&n
.key
, v
, mutex_locked
) {
149 // Successfully set waitm to locked.
150 // What was it before?
153 // Nothing was waiting. Done.
154 case v
== mutex_locked
:
155 // Two notewakeups! Not allowed.
156 throw("notewakeup - double wakeup")
158 // Must be the waiting m. Wake it up.
159 semawakeup((*m
)(unsafe
.Pointer(v
)))
163 func notesleep(n
*note
) {
166 throw("notesleep not on g0")
169 if !atomic
.Casuintptr(&n
.key
, 0, uintptr(unsafe
.Pointer(gp
.m
))) {
170 // Must be locked (got wakeup).
171 if n
.key
!= mutex_locked
{
172 throw("notesleep - waitm out of sync")
178 if *cgo_yield
== nil {
181 // Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
183 for atomic
.Loaduintptr(&n
.key
) == 0 {
185 asmcgocall(*cgo_yield
, nil)
192 func notetsleep_internal(n
*note
, ns
int64, gp
*g
, deadline
int64) bool {
193 // gp and deadline are logically local variables, but they are written
194 // as parameters so that the stack space they require is charged
196 // This reduces the nosplit footprint of notetsleep_internal.
199 // Register for wakeup on n->waitm.
200 if !atomic
.Casuintptr(&n
.key
, 0, uintptr(unsafe
.Pointer(gp
.m
))) {
201 // Must be locked (got wakeup).
202 if n
.key
!= mutex_locked
{
203 throw("notetsleep - waitm out of sync")
210 if *cgo_yield
== nil {
213 // Sleep in arbitrary-but-moderate intervals to poll libc interceptors.
215 for semasleep(ns
) < 0 {
216 asmcgocall(*cgo_yield
, nil)
223 deadline
= nanotime() + ns
225 // Registered. Sleep.
227 if *cgo_yield
!= nil && ns
> 10e6
{
230 if semasleep(ns
) >= 0 {
232 // Acquired semaphore, semawakeup unregistered us.
236 if *cgo_yield
!= nil {
237 asmcgocall(*cgo_yield
, nil)
240 // Interrupted or timed out. Still registered. Semaphore not acquired.
241 ns
= deadline
- nanotime()
245 // Deadline hasn't arrived. Keep sleeping.
248 // Deadline arrived. Still registered. Semaphore not acquired.
249 // Want to give up and return, but have to unregister first,
250 // so that any notewakeup racing with the return does not
251 // try to grant us the semaphore when we don't expect it.
253 v
:= atomic
.Loaduintptr(&n
.key
)
255 case uintptr(unsafe
.Pointer(gp
.m
)):
256 // No wakeup yet; unregister if possible.
257 if atomic
.Casuintptr(&n
.key
, v
, 0) {
261 // Wakeup happened so semaphore is available.
262 // Grab it to avoid getting out of sync.
264 if semasleep(-1) < 0 {
265 throw("runtime: unable to acquire - semaphore out of sync")
270 throw("runtime: unexpected waitm - semaphore out of sync")
275 func notetsleep(n
*note
, ns
int64) bool {
277 if gp
!= gp
.m
.g0
&& gp
.m
.preemptoff
!= "" {
278 throw("notetsleep not on g0")
281 return notetsleep_internal(n
, ns
, nil, 0)
284 // same as runtime·notetsleep, but called on user g (not g0)
285 // calls only nosplit functions between entersyscallblock/exitsyscall
286 func notetsleepg(n
*note
, ns
int64) bool {
289 throw("notetsleepg on g0")
293 ok
:= notetsleep_internal(n
, ns
, nil, 0)