1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // +build dragonfly freebsd linux
10 "runtime/internal/atomic"
14 // For gccgo, while we still have C runtime code, use go:linkname to
15 // export some functions.
19 //go:linkname noteclear
20 //go:linkname notewakeup
21 //go:linkname notesleep
22 //go:linkname notetsleep
23 //go:linkname notetsleepg
25 // This implementation depends on OS-specific implementations of
27 // futexsleep(addr *uint32, val uint32, ns int64)
29 // if *addr == val { sleep }
30 // Might be woken up spuriously; that's allowed.
31 // Don't sleep longer than ns; ns < 0 means forever.
33 // futexwakeup(addr *uint32, cnt uint32)
34 // If any procs are sleeping on addr, wake up at most cnt.
46 // Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
47 // mutex_sleeping means that there is presumably at least one sleeping thread.
48 // Note that there can be spinning threads during all states - they do not
49 // affect mutex's state.
51 // We use the uintptr mutex.key and note.key as a uint32.
53 func key32(p
*uintptr) *uint32 {
54 return (*uint32)(unsafe
.Pointer(p
))
61 throw("runtime·lock: lock count")
65 // Speculative grab for lock.
66 v
:= atomic
.Xchg(key32(&l
.key
), mutex_locked
)
67 if v
== mutex_unlocked
{
71 // wait is either MUTEX_LOCKED or MUTEX_SLEEPING
72 // depending on whether there is a thread sleeping
73 // on this mutex. If we ever change l->key from
74 // MUTEX_SLEEPING to some other value, we must be
75 // careful to change it back to MUTEX_SLEEPING before
76 // returning, to ensure that the sleeping thread gets
80 // On uniprocessors, no point spinning.
81 // On multiprocessors, spin for ACTIVE_SPIN attempts.
87 // Try for lock, spinning.
88 for i
:= 0; i
< spin
; i
++ {
89 for l
.key
== mutex_unlocked
{
90 if atomic
.Cas(key32(&l
.key
), mutex_unlocked
, wait
) {
94 procyield(active_spin_cnt
)
97 // Try for lock, rescheduling.
98 for i
:= 0; i
< passive_spin
; i
++ {
99 for l
.key
== mutex_unlocked
{
100 if atomic
.Cas(key32(&l
.key
), mutex_unlocked
, wait
) {
108 v
= atomic
.Xchg(key32(&l
.key
), mutex_sleeping
)
109 if v
== mutex_unlocked
{
112 wait
= mutex_sleeping
113 futexsleep(key32(&l
.key
), mutex_sleeping
, -1)
117 func unlock(l
*mutex
) {
118 v
:= atomic
.Xchg(key32(&l
.key
), mutex_unlocked
)
119 if v
== mutex_unlocked
{
120 throw("unlock of unlocked lock")
122 if v
== mutex_sleeping
{
123 futexwakeup(key32(&l
.key
), 1)
129 throw("runtime·unlock: lock count")
131 // if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
132 // gp.stackguard0 = stackPreempt
136 // One-time notifications.
137 func noteclear(n
*note
) {
141 func notewakeup(n
*note
) {
142 old
:= atomic
.Xchg(key32(&n
.key
), 1)
144 print("notewakeup - double wakeup (", old
, ")\n")
145 throw("notewakeup - double wakeup")
147 futexwakeup(key32(&n
.key
), 1)
150 func notesleep(n
*note
) {
153 throw("notesleep not on g0")
156 if *cgo_yield
!= nil {
157 // Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
160 for atomic
.Load(key32(&n
.key
)) == 0 {
162 futexsleep(key32(&n
.key
), 0, ns
)
163 if *cgo_yield
!= nil {
164 asmcgocall(*cgo_yield
, nil)
170 // May run with m.p==nil if called from notetsleep, so write barriers
175 func notetsleep_internal(n
*note
, ns
int64) bool {
179 if *cgo_yield
!= nil {
180 // Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
183 for atomic
.Load(key32(&n
.key
)) == 0 {
185 futexsleep(key32(&n
.key
), 0, ns
)
186 if *cgo_yield
!= nil {
187 asmcgocall(*cgo_yield
, nil)
194 if atomic
.Load(key32(&n
.key
)) != 0 {
198 deadline
:= nanotime() + ns
200 if *cgo_yield
!= nil && ns
> 10e6
{
204 futexsleep(key32(&n
.key
), 0, ns
)
205 if *cgo_yield
!= nil {
206 asmcgocall(*cgo_yield
, nil)
209 if atomic
.Load(key32(&n
.key
)) != 0 {
218 return atomic
.Load(key32(&n
.key
)) != 0
221 func notetsleep(n
*note
, ns
int64) bool {
223 if gp
!= gp
.m
.g0
&& gp
.m
.preemptoff
!= "" {
224 throw("notetsleep not on g0")
227 return notetsleep_internal(n
, ns
)
230 // same as runtime·notetsleep, but called on user g (not g0)
231 // calls only nosplit functions between entersyscallblock/exitsyscall
232 func notetsleepg(n
*note
, ns
int64) bool {
235 throw("notetsleepg on g0")
239 ok
:= notetsleep_internal(n
, ns
)
244 func beforeIdle(int64) bool {
248 func checkTimeouts() {}