1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Package sync provides basic synchronization primitives such as mutual
6 // exclusion locks. Other than the Once and WaitGroup types, most are intended
7 // for use by low-level library routines. Higher-level synchronization is
8 // better done via channels and communication.
10 // Values containing the types defined in this package should not be copied.
19 func throw(string) // provided by runtime
21 // A Mutex is a mutual exclusion lock.
22 // The zero value for a Mutex is an unlocked mutex.
24 // A Mutex must not be copied after first use.
30 // A Locker represents an object that can be locked and unlocked.
31 type Locker
interface {
37 mutexLocked
= 1 << iota // mutex is locked
40 mutexWaiterShift
= iota
44 // Mutex can be in 2 modes of operations: normal and starvation.
45 // In normal mode waiters are queued in FIFO order, but a woken up waiter
46 // does not own the mutex and competes with new arriving goroutines over
47 // the ownership. New arriving goroutines have an advantage -- they are
48 // already running on CPU and there can be lots of them, so a woken up
49 // waiter has good chances of losing. In such case it is queued at front
50 // of the wait queue. If a waiter fails to acquire the mutex for more than 1ms,
51 // it switches mutex to the starvation mode.
53 // In starvation mode ownership of the mutex is directly handed off from
54 // the unlocking goroutine to the waiter at the front of the queue.
55 // New arriving goroutines don't try to acquire the mutex even if it appears
56 // to be unlocked, and don't try to spin. Instead they queue themselves at
57 // the tail of the wait queue.
59 // If a waiter receives ownership of the mutex and sees that either
60 // (1) it is the last waiter in the queue, or (2) it waited for less than 1 ms,
61 // it switches mutex back to normal operation mode.
63 // Normal mode has considerably better performance as a goroutine can acquire
64 // a mutex several times in a row even if there are blocked waiters.
65 // Starvation mode is important to prevent pathological cases of tail latency.
66 starvationThresholdNs
= 1e6
70 // If the lock is already in use, the calling goroutine
71 // blocks until the mutex is available.
72 func (m
*Mutex
) Lock() {
73 // Fast path: grab unlocked mutex.
74 if atomic
.CompareAndSwapInt32(&m
.state
, 0, mutexLocked
) {
76 race
.Acquire(unsafe
.Pointer(m
))
80 // Slow path (outlined so that the fast path can be inlined)
84 // TryLock tries to lock m and reports whether it succeeded.
86 // Note that while correct uses of TryLock do exist, they are rare,
87 // and use of TryLock is often a sign of a deeper problem
88 // in a particular use of mutexes.
89 func (m
*Mutex
) TryLock() bool {
91 if old
&(mutexLocked|mutexStarving
) != 0 {
95 // There may be a goroutine waiting for the mutex, but we are
96 // running now and can try to grab the mutex before that
97 // goroutine wakes up.
98 if !atomic
.CompareAndSwapInt32(&m
.state
, old
, old|mutexLocked
) {
103 race
.Acquire(unsafe
.Pointer(m
))
108 func (m
*Mutex
) lockSlow() {
109 var waitStartTime
int64
115 // Don't spin in starvation mode, ownership is handed off to waiters
116 // so we won't be able to acquire the mutex anyway.
117 if old
&(mutexLocked|mutexStarving
) == mutexLocked
&& runtime_canSpin(iter
) {
118 // Active spinning makes sense.
119 // Try to set mutexWoken flag to inform Unlock
120 // to not wake other blocked goroutines.
121 if !awoke
&& old
&mutexWoken
== 0 && old
>>mutexWaiterShift
!= 0 &&
122 atomic
.CompareAndSwapInt32(&m
.state
, old
, old|mutexWoken
) {
131 // Don't try to acquire starving mutex, new arriving goroutines must queue.
132 if old
&mutexStarving
== 0 {
135 if old
&(mutexLocked|mutexStarving
) != 0 {
136 new += 1 << mutexWaiterShift
138 // The current goroutine switches mutex to starvation mode.
139 // But if the mutex is currently unlocked, don't do the switch.
140 // Unlock expects that starving mutex has waiters, which will not
141 // be true in this case.
142 if starving
&& old
&mutexLocked
!= 0 {
146 // The goroutine has been woken from sleep,
147 // so we need to reset the flag in either case.
148 if new&mutexWoken
== 0 {
149 throw("sync: inconsistent mutex state")
153 if atomic
.CompareAndSwapInt32(&m
.state
, old
, new) {
154 if old
&(mutexLocked|mutexStarving
) == 0 {
155 break // locked the mutex with CAS
157 // If we were already waiting before, queue at the front of the queue.
158 queueLifo
:= waitStartTime
!= 0
159 if waitStartTime
== 0 {
160 waitStartTime
= runtime_nanotime()
162 runtime_SemacquireMutex(&m
.sema
, queueLifo
, 1)
163 starving
= starving ||
runtime_nanotime()-waitStartTime
> starvationThresholdNs
165 if old
&mutexStarving
!= 0 {
166 // If this goroutine was woken and mutex is in starvation mode,
167 // ownership was handed off to us but mutex is in somewhat
168 // inconsistent state: mutexLocked is not set and we are still
169 // accounted as waiter. Fix that.
170 if old
&(mutexLocked|mutexWoken
) != 0 || old
>>mutexWaiterShift
== 0 {
171 throw("sync: inconsistent mutex state")
173 delta
:= int32(mutexLocked
- 1<<mutexWaiterShift
)
174 if !starving || old
>>mutexWaiterShift
== 1 {
175 // Exit starvation mode.
176 // Critical to do it here and consider wait time.
177 // Starvation mode is so inefficient, that two goroutines
178 // can go lock-step infinitely once they switch mutex
179 // to starvation mode.
180 delta
-= mutexStarving
182 atomic
.AddInt32(&m
.state
, delta
)
193 race
.Acquire(unsafe
.Pointer(m
))
198 // It is a run-time error if m is not locked on entry to Unlock.
200 // A locked Mutex is not associated with a particular goroutine.
201 // It is allowed for one goroutine to lock a Mutex and then
202 // arrange for another goroutine to unlock it.
203 func (m
*Mutex
) Unlock() {
206 race
.Release(unsafe
.Pointer(m
))
209 // Fast path: drop lock bit.
210 new := atomic
.AddInt32(&m
.state
, -mutexLocked
)
212 // Outlined slow path to allow inlining the fast path.
213 // To hide unlockSlow during tracing we skip one extra frame when tracing GoUnblock.
218 func (m
*Mutex
) unlockSlow(new int32) {
219 if (new+mutexLocked
)&mutexLocked
== 0 {
220 throw("sync: unlock of unlocked mutex")
222 if new&mutexStarving
== 0 {
225 // If there are no waiters or a goroutine has already
226 // been woken or grabbed the lock, no need to wake anyone.
227 // In starvation mode ownership is directly handed off from unlocking
228 // goroutine to the next waiter. We are not part of this chain,
229 // since we did not observe mutexStarving when we unlocked the mutex above.
230 // So get off the way.
231 if old
>>mutexWaiterShift
== 0 || old
&(mutexLocked|mutexWoken|mutexStarving
) != 0 {
234 // Grab the right to wake someone.
235 new = (old
- 1<<mutexWaiterShift
) | mutexWoken
236 if atomic
.CompareAndSwapInt32(&m
.state
, old
, new) {
237 runtime_Semrelease(&m
.sema
, false, 1)
243 // Starving mode: handoff mutex ownership to the next waiter, and yield
244 // our time slice so that the next waiter can start to run immediately.
245 // Note: mutexLocked is not set, the waiter will set it after wakeup.
246 // But mutex is still considered locked if mutexStarving is set,
247 // so new coming goroutines won't acquire it.
248 runtime_Semrelease(&m
.sema
, true, 1)