testsuite: Skip 90020.c on AIX.
[official-gcc.git] / libgo / go / runtime / lock_futex.go
blobf672efdc74c3e49c7a9952b792b7861f8d19d424
1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // +build dragonfly freebsd linux
7 package runtime
9 import (
10 "runtime/internal/atomic"
11 "unsafe"
14 // For gccgo, while we still have C runtime code, use go:linkname to
15 // export some functions.
17 //go:linkname lock
18 //go:linkname unlock
19 //go:linkname noteclear
20 //go:linkname notewakeup
21 //go:linkname notesleep
22 //go:linkname notetsleep
23 //go:linkname notetsleepg
25 // This implementation depends on OS-specific implementations of
27 // futexsleep(addr *uint32, val uint32, ns int64)
28 // Atomically,
29 // if *addr == val { sleep }
30 // Might be woken up spuriously; that's allowed.
31 // Don't sleep longer than ns; ns < 0 means forever.
33 // futexwakeup(addr *uint32, cnt uint32)
34 // If any procs are sleeping on addr, wake up at most cnt.
36 const (
37 mutex_unlocked = 0
38 mutex_locked = 1
39 mutex_sleeping = 2
41 active_spin = 4
42 active_spin_cnt = 30
43 passive_spin = 1
46 // Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
47 // mutex_sleeping means that there is presumably at least one sleeping thread.
48 // Note that there can be spinning threads during all states - they do not
49 // affect mutex's state.
51 // We use the uintptr mutex.key and note.key as a uint32.
52 //go:nosplit
53 func key32(p *uintptr) *uint32 {
54 return (*uint32)(unsafe.Pointer(p))
57 func lock(l *mutex) {
58 gp := getg()
60 if gp.m.locks < 0 {
61 throw("runtime·lock: lock count")
63 gp.m.locks++
65 // Speculative grab for lock.
66 v := atomic.Xchg(key32(&l.key), mutex_locked)
67 if v == mutex_unlocked {
68 return
71 // wait is either MUTEX_LOCKED or MUTEX_SLEEPING
72 // depending on whether there is a thread sleeping
73 // on this mutex. If we ever change l->key from
74 // MUTEX_SLEEPING to some other value, we must be
75 // careful to change it back to MUTEX_SLEEPING before
76 // returning, to ensure that the sleeping thread gets
77 // its wakeup call.
78 wait := v
80 // On uniprocessors, no point spinning.
81 // On multiprocessors, spin for ACTIVE_SPIN attempts.
82 spin := 0
83 if ncpu > 1 {
84 spin = active_spin
86 for {
87 // Try for lock, spinning.
88 for i := 0; i < spin; i++ {
89 for l.key == mutex_unlocked {
90 if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
91 return
94 procyield(active_spin_cnt)
97 // Try for lock, rescheduling.
98 for i := 0; i < passive_spin; i++ {
99 for l.key == mutex_unlocked {
100 if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
101 return
104 osyield()
107 // Sleep.
108 v = atomic.Xchg(key32(&l.key), mutex_sleeping)
109 if v == mutex_unlocked {
110 return
112 wait = mutex_sleeping
113 futexsleep(key32(&l.key), mutex_sleeping, -1)
117 func unlock(l *mutex) {
118 v := atomic.Xchg(key32(&l.key), mutex_unlocked)
119 if v == mutex_unlocked {
120 throw("unlock of unlocked lock")
122 if v == mutex_sleeping {
123 futexwakeup(key32(&l.key), 1)
126 gp := getg()
127 gp.m.locks--
128 if gp.m.locks < 0 {
129 throw("runtime·unlock: lock count")
131 // if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
132 // gp.stackguard0 = stackPreempt
133 // }
136 // One-time notifications.
137 func noteclear(n *note) {
138 n.key = 0
141 func notewakeup(n *note) {
142 old := atomic.Xchg(key32(&n.key), 1)
143 if old != 0 {
144 print("notewakeup - double wakeup (", old, ")\n")
145 throw("notewakeup - double wakeup")
147 futexwakeup(key32(&n.key), 1)
150 func notesleep(n *note) {
151 gp := getg()
152 if gp != gp.m.g0 {
153 throw("notesleep not on g0")
155 ns := int64(-1)
156 if *cgo_yield != nil {
157 // Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
158 ns = 10e6
160 for atomic.Load(key32(&n.key)) == 0 {
161 gp.m.blocked = true
162 futexsleep(key32(&n.key), 0, ns)
163 if *cgo_yield != nil {
164 asmcgocall(*cgo_yield, nil)
166 gp.m.blocked = false
170 // May run with m.p==nil if called from notetsleep, so write barriers
171 // are not allowed.
173 //go:nosplit
174 //go:nowritebarrier
175 func notetsleep_internal(n *note, ns int64) bool {
176 gp := getg()
178 if ns < 0 {
179 if *cgo_yield != nil {
180 // Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
181 ns = 10e6
183 for atomic.Load(key32(&n.key)) == 0 {
184 gp.m.blocked = true
185 futexsleep(key32(&n.key), 0, ns)
186 if *cgo_yield != nil {
187 asmcgocall(*cgo_yield, nil)
189 gp.m.blocked = false
191 return true
194 if atomic.Load(key32(&n.key)) != 0 {
195 return true
198 deadline := nanotime() + ns
199 for {
200 if *cgo_yield != nil && ns > 10e6 {
201 ns = 10e6
203 gp.m.blocked = true
204 futexsleep(key32(&n.key), 0, ns)
205 if *cgo_yield != nil {
206 asmcgocall(*cgo_yield, nil)
208 gp.m.blocked = false
209 if atomic.Load(key32(&n.key)) != 0 {
210 break
212 now := nanotime()
213 if now >= deadline {
214 break
216 ns = deadline - now
218 return atomic.Load(key32(&n.key)) != 0
221 func notetsleep(n *note, ns int64) bool {
222 gp := getg()
223 if gp != gp.m.g0 && gp.m.preemptoff != "" {
224 throw("notetsleep not on g0")
227 return notetsleep_internal(n, ns)
230 // same as runtime·notetsleep, but called on user g (not g0)
231 // calls only nosplit functions between entersyscallblock/exitsyscall
232 func notetsleepg(n *note, ns int64) bool {
233 gp := getg()
234 if gp == gp.m.g0 {
235 throw("notetsleepg on g0")
238 entersyscallblock()
239 ok := notetsleep_internal(n, ns)
240 exitsyscall()
241 return ok
244 func beforeIdle(int64) bool {
245 return false
248 func checkTimeouts() {}