libgo: update to Go 1.11
[official-gcc.git] / libgo / go / runtime / lock_futex.go
blobf7ca1f036e55bb4952f99425a41a9ed1e800c491
1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // +build dragonfly freebsd linux
7 package runtime
9 import (
10 "runtime/internal/atomic"
11 "unsafe"
14 // For gccgo, while we still have C runtime code, use go:linkname to
15 // rename some functions to themselves, so that the compiler will
16 // export them.
18 //go:linkname lock runtime.lock
19 //go:linkname unlock runtime.unlock
20 //go:linkname noteclear runtime.noteclear
21 //go:linkname notewakeup runtime.notewakeup
22 //go:linkname notesleep runtime.notesleep
23 //go:linkname notetsleep runtime.notetsleep
24 //go:linkname notetsleepg runtime.notetsleepg
26 // This implementation depends on OS-specific implementations of
28 // futexsleep(addr *uint32, val uint32, ns int64)
29 // Atomically,
30 // if *addr == val { sleep }
31 // Might be woken up spuriously; that's allowed.
32 // Don't sleep longer than ns; ns < 0 means forever.
34 // futexwakeup(addr *uint32, cnt uint32)
35 // If any procs are sleeping on addr, wake up at most cnt.
37 const (
38 mutex_unlocked = 0
39 mutex_locked = 1
40 mutex_sleeping = 2
42 active_spin = 4
43 active_spin_cnt = 30
44 passive_spin = 1
47 // Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
48 // mutex_sleeping means that there is presumably at least one sleeping thread.
49 // Note that there can be spinning threads during all states - they do not
50 // affect mutex's state.
52 // We use the uintptr mutex.key and note.key as a uint32.
53 //go:nosplit
54 func key32(p *uintptr) *uint32 {
55 return (*uint32)(unsafe.Pointer(p))
58 func lock(l *mutex) {
59 gp := getg()
61 if gp.m.locks < 0 {
62 throw("runtime·lock: lock count")
64 gp.m.locks++
66 // Speculative grab for lock.
67 v := atomic.Xchg(key32(&l.key), mutex_locked)
68 if v == mutex_unlocked {
69 return
72 // wait is either MUTEX_LOCKED or MUTEX_SLEEPING
73 // depending on whether there is a thread sleeping
74 // on this mutex. If we ever change l->key from
75 // MUTEX_SLEEPING to some other value, we must be
76 // careful to change it back to MUTEX_SLEEPING before
77 // returning, to ensure that the sleeping thread gets
78 // its wakeup call.
79 wait := v
81 // On uniprocessors, no point spinning.
82 // On multiprocessors, spin for ACTIVE_SPIN attempts.
83 spin := 0
84 if ncpu > 1 {
85 spin = active_spin
87 for {
88 // Try for lock, spinning.
89 for i := 0; i < spin; i++ {
90 for l.key == mutex_unlocked {
91 if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
92 return
95 procyield(active_spin_cnt)
98 // Try for lock, rescheduling.
99 for i := 0; i < passive_spin; i++ {
100 for l.key == mutex_unlocked {
101 if atomic.Cas(key32(&l.key), mutex_unlocked, wait) {
102 return
105 osyield()
108 // Sleep.
109 v = atomic.Xchg(key32(&l.key), mutex_sleeping)
110 if v == mutex_unlocked {
111 return
113 wait = mutex_sleeping
114 futexsleep(key32(&l.key), mutex_sleeping, -1)
118 func unlock(l *mutex) {
119 v := atomic.Xchg(key32(&l.key), mutex_unlocked)
120 if v == mutex_unlocked {
121 throw("unlock of unlocked lock")
123 if v == mutex_sleeping {
124 futexwakeup(key32(&l.key), 1)
127 gp := getg()
128 gp.m.locks--
129 if gp.m.locks < 0 {
130 throw("runtime·unlock: lock count")
132 // if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
133 // gp.stackguard0 = stackPreempt
134 // }
137 // One-time notifications.
138 func noteclear(n *note) {
139 n.key = 0
142 func notewakeup(n *note) {
143 old := atomic.Xchg(key32(&n.key), 1)
144 if old != 0 {
145 print("notewakeup - double wakeup (", old, ")\n")
146 throw("notewakeup - double wakeup")
148 futexwakeup(key32(&n.key), 1)
151 func notesleep(n *note) {
152 gp := getg()
153 if gp != gp.m.g0 {
154 throw("notesleep not on g0")
156 ns := int64(-1)
157 if *cgo_yield != nil {
158 // Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
159 ns = 10e6
161 for atomic.Load(key32(&n.key)) == 0 {
162 gp.m.blocked = true
163 futexsleep(key32(&n.key), 0, ns)
164 if *cgo_yield != nil {
165 asmcgocall(*cgo_yield, nil)
167 gp.m.blocked = false
171 // May run with m.p==nil if called from notetsleep, so write barriers
172 // are not allowed.
174 //go:nosplit
175 //go:nowritebarrier
176 func notetsleep_internal(n *note, ns int64) bool {
177 gp := getg()
179 if ns < 0 {
180 if *cgo_yield != nil {
181 // Sleep for an arbitrary-but-moderate interval to poll libc interceptors.
182 ns = 10e6
184 for atomic.Load(key32(&n.key)) == 0 {
185 gp.m.blocked = true
186 futexsleep(key32(&n.key), 0, ns)
187 if *cgo_yield != nil {
188 asmcgocall(*cgo_yield, nil)
190 gp.m.blocked = false
192 return true
195 if atomic.Load(key32(&n.key)) != 0 {
196 return true
199 deadline := nanotime() + ns
200 for {
201 if *cgo_yield != nil && ns > 10e6 {
202 ns = 10e6
204 gp.m.blocked = true
205 futexsleep(key32(&n.key), 0, ns)
206 if *cgo_yield != nil {
207 asmcgocall(*cgo_yield, nil)
209 gp.m.blocked = false
210 if atomic.Load(key32(&n.key)) != 0 {
211 break
213 now := nanotime()
214 if now >= deadline {
215 break
217 ns = deadline - now
219 return atomic.Load(key32(&n.key)) != 0
222 func notetsleep(n *note, ns int64) bool {
223 gp := getg()
224 if gp != gp.m.g0 && gp.m.preemptoff != "" {
225 throw("notetsleep not on g0")
228 return notetsleep_internal(n, ns)
231 // same as runtime·notetsleep, but called on user g (not g0)
232 // calls only nosplit functions between entersyscallblock/exitsyscall
233 func notetsleepg(n *note, ns int64) bool {
234 gp := getg()
235 if gp == gp.m.g0 {
236 throw("notetsleepg on g0")
239 entersyscallblock()
240 ok := notetsleep_internal(n, ns)
241 exitsyscall()
242 return ok
245 func pauseSchedulerUntilCallback() bool {
246 return false
249 func checkTimeouts() {}