4 * Mutexes: blocking mutual exclusion locks
6 * Started by Ingo Molnar:
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
18 * Also see Documentation/mutex-design.txt.
20 #include <linux/mutex.h>
21 #include <linux/sched.h>
22 #include <linux/module.h>
23 #include <linux/spinlock.h>
24 #include <linux/interrupt.h>
25 #include <linux/debug_locks.h>
28 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
29 * which forces all calls into the slowpath:
31 #ifdef CONFIG_DEBUG_MUTEXES
32 # include "mutex-debug.h"
33 # include <asm-generic/mutex-null.h>
36 # include <asm/mutex.h>
40 __mutex_init(struct mutex
*lock
, const char *name
, struct lock_class_key
*key
)
42 atomic_set(&lock
->count
, 1);
43 spin_lock_init(&lock
->wait_lock
);
44 INIT_LIST_HEAD(&lock
->wait_list
);
45 mutex_clear_owner(lock
);
47 debug_mutex_init(lock
, name
, key
);
50 EXPORT_SYMBOL(__mutex_init
);
52 #ifndef CONFIG_DEBUG_LOCK_ALLOC
54 * We split the mutex lock/unlock logic into separate fastpath and
55 * slowpath functions, to reduce the register pressure on the fastpath.
56 * We also put the fastpath first in the kernel image, to make sure the
57 * branch is predicted by the CPU as default-untaken.
59 static __used noinline
void __sched
60 __mutex_lock_slowpath(atomic_t
*lock_count
);
63 * mutex_lock - acquire the mutex
64 * @lock: the mutex to be acquired
66 * Lock the mutex exclusively for this task. If the mutex is not
67 * available right now, it will sleep until it can get it.
69 * The mutex must later on be released by the same task that
70 * acquired it. Recursive locking is not allowed. The task
71 * may not exit without first unlocking the mutex. Also, kernel
72 * memory where the mutex resides mutex must not be freed with
73 * the mutex still locked. The mutex must first be initialized
74 * (or statically defined) before it can be locked. memset()-ing
75 * the mutex to 0 is not allowed.
77 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
78 * checks that will enforce the restrictions and will also do
79 * deadlock debugging. )
81 * This function is similar to (but not equivalent to) down().
83 void __sched
mutex_lock(struct mutex
*lock
)
87 * The locking fastpath is the 1->0 transition from
88 * 'unlocked' into 'locked' state.
90 __mutex_fastpath_lock(&lock
->count
, __mutex_lock_slowpath
);
91 mutex_set_owner(lock
);
94 EXPORT_SYMBOL(mutex_lock
);
97 static __used noinline
void __sched
__mutex_unlock_slowpath(atomic_t
*lock_count
);
100 * mutex_unlock - release the mutex
101 * @lock: the mutex to be released
103 * Unlock a mutex that has been locked by this task previously.
105 * This function must not be used in interrupt context. Unlocking
106 * of a not locked mutex is not allowed.
108 * This function is similar to (but not equivalent to) up().
110 void __sched
mutex_unlock(struct mutex
*lock
)
113 * The unlocking fastpath is the 0->1 transition from 'locked'
114 * into 'unlocked' state:
116 #ifndef CONFIG_DEBUG_MUTEXES
118 * When debugging is enabled we must not clear the owner before time,
119 * the slow path will always be taken, and that clears the owner field
120 * after verifying that it was indeed current.
122 mutex_clear_owner(lock
);
124 __mutex_fastpath_unlock(&lock
->count
, __mutex_unlock_slowpath
);
127 EXPORT_SYMBOL(mutex_unlock
);
130 * Lock a mutex (possibly interruptible), slowpath:
132 static inline int __sched
133 __mutex_lock_common(struct mutex
*lock
, long state
, unsigned int subclass
,
136 struct task_struct
*task
= current
;
137 struct mutex_waiter waiter
;
141 mutex_acquire(&lock
->dep_map
, subclass
, 0, ip
);
143 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
145 * Optimistic spinning.
147 * We try to spin for acquisition when we find that there are no
148 * pending waiters and the lock owner is currently running on a
151 * The rationale is that if the lock owner is running, it is likely to
152 * release the lock soon.
154 * Since this needs the lock owner, and this mutex implementation
155 * doesn't track the owner atomically in the lock field, we need to
156 * track it non-atomically.
158 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
159 * to serialize everything.
163 struct task_struct
*owner
;
166 * If there's an owner, wait for it to either
167 * release the lock or go to sleep.
169 owner
= ACCESS_ONCE(lock
->owner
);
170 if (owner
&& !mutex_spin_on_owner(lock
, owner
))
173 if (atomic_cmpxchg(&lock
->count
, 1, 0) == 1) {
174 lock_acquired(&lock
->dep_map
, ip
);
175 mutex_set_owner(lock
);
181 * When there's no owner, we might have preempted between the
182 * owner acquiring the lock and setting the owner field. If
183 * we're an RT task that will live-lock because we won't let
184 * the owner complete.
186 if (!owner
&& (need_resched() || rt_task(task
)))
190 * The cpu_relax() call is a compiler barrier which forces
191 * everything in this loop to be re-loaded. We don't need
192 * memory barriers as we'll eventually observe the right
193 * values at the cost of a few extra spins.
195 arch_mutex_cpu_relax();
198 spin_lock_mutex(&lock
->wait_lock
, flags
);
200 debug_mutex_lock_common(lock
, &waiter
);
201 debug_mutex_add_waiter(lock
, &waiter
, task_thread_info(task
));
203 /* add waiting tasks to the end of the waitqueue (FIFO): */
204 list_add_tail(&waiter
.list
, &lock
->wait_list
);
207 if (atomic_xchg(&lock
->count
, -1) == 1)
210 lock_contended(&lock
->dep_map
, ip
);
214 * Lets try to take the lock again - this is needed even if
215 * we get here for the first time (shortly after failing to
216 * acquire the lock), to make sure that we get a wakeup once
217 * it's unlocked. Later on, if we sleep, this is the
218 * operation that gives us the lock. We xchg it to -1, so
219 * that when we release the lock, we properly wake up the
222 if (atomic_xchg(&lock
->count
, -1) == 1)
226 * got a signal? (This code gets eliminated in the
227 * TASK_UNINTERRUPTIBLE case.)
229 if (unlikely(signal_pending_state(state
, task
))) {
230 mutex_remove_waiter(lock
, &waiter
,
231 task_thread_info(task
));
232 mutex_release(&lock
->dep_map
, 1, ip
);
233 spin_unlock_mutex(&lock
->wait_lock
, flags
);
235 debug_mutex_free_waiter(&waiter
);
239 __set_task_state(task
, state
);
241 /* didn't get the lock, go to sleep: */
242 spin_unlock_mutex(&lock
->wait_lock
, flags
);
243 preempt_enable_no_resched();
246 spin_lock_mutex(&lock
->wait_lock
, flags
);
250 lock_acquired(&lock
->dep_map
, ip
);
251 /* got the lock - rejoice! */
252 mutex_remove_waiter(lock
, &waiter
, current_thread_info());
253 mutex_set_owner(lock
);
255 /* set it to 0 if there are no waiters left: */
256 if (likely(list_empty(&lock
->wait_list
)))
257 atomic_set(&lock
->count
, 0);
259 spin_unlock_mutex(&lock
->wait_lock
, flags
);
261 debug_mutex_free_waiter(&waiter
);
267 #ifdef CONFIG_DEBUG_LOCK_ALLOC
269 mutex_lock_nested(struct mutex
*lock
, unsigned int subclass
)
272 __mutex_lock_common(lock
, TASK_UNINTERRUPTIBLE
, subclass
, _RET_IP_
);
275 EXPORT_SYMBOL_GPL(mutex_lock_nested
);
278 mutex_lock_killable_nested(struct mutex
*lock
, unsigned int subclass
)
281 return __mutex_lock_common(lock
, TASK_KILLABLE
, subclass
, _RET_IP_
);
283 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested
);
286 mutex_lock_interruptible_nested(struct mutex
*lock
, unsigned int subclass
)
289 return __mutex_lock_common(lock
, TASK_INTERRUPTIBLE
,
293 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested
);
297 * Release the lock, slowpath:
300 __mutex_unlock_common_slowpath(atomic_t
*lock_count
, int nested
)
302 struct mutex
*lock
= container_of(lock_count
, struct mutex
, count
);
305 spin_lock_mutex(&lock
->wait_lock
, flags
);
306 mutex_release(&lock
->dep_map
, nested
, _RET_IP_
);
307 debug_mutex_unlock(lock
);
310 * some architectures leave the lock unlocked in the fastpath failure
311 * case, others need to leave it locked. In the later case we have to
314 if (__mutex_slowpath_needs_to_unlock())
315 atomic_set(&lock
->count
, 1);
317 if (!list_empty(&lock
->wait_list
)) {
318 /* get the first entry from the wait-list: */
319 struct mutex_waiter
*waiter
=
320 list_entry(lock
->wait_list
.next
,
321 struct mutex_waiter
, list
);
323 debug_mutex_wake_waiter(lock
, waiter
);
325 wake_up_process(waiter
->task
);
328 spin_unlock_mutex(&lock
->wait_lock
, flags
);
332 * Release the lock, slowpath:
334 static __used noinline
void
335 __mutex_unlock_slowpath(atomic_t
*lock_count
)
337 __mutex_unlock_common_slowpath(lock_count
, 1);
340 #ifndef CONFIG_DEBUG_LOCK_ALLOC
342 * Here come the less common (and hence less performance-critical) APIs:
343 * mutex_lock_interruptible() and mutex_trylock().
345 static noinline
int __sched
346 __mutex_lock_killable_slowpath(atomic_t
*lock_count
);
348 static noinline
int __sched
349 __mutex_lock_interruptible_slowpath(atomic_t
*lock_count
);
352 * mutex_lock_interruptible - acquire the mutex, interruptible
353 * @lock: the mutex to be acquired
355 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
356 * been acquired or sleep until the mutex becomes available. If a
357 * signal arrives while waiting for the lock then this function
360 * This function is similar to (but not equivalent to) down_interruptible().
362 int __sched
mutex_lock_interruptible(struct mutex
*lock
)
367 ret
= __mutex_fastpath_lock_retval
368 (&lock
->count
, __mutex_lock_interruptible_slowpath
);
370 mutex_set_owner(lock
);
375 EXPORT_SYMBOL(mutex_lock_interruptible
);
377 int __sched
mutex_lock_killable(struct mutex
*lock
)
382 ret
= __mutex_fastpath_lock_retval
383 (&lock
->count
, __mutex_lock_killable_slowpath
);
385 mutex_set_owner(lock
);
389 EXPORT_SYMBOL(mutex_lock_killable
);
391 static __used noinline
void __sched
392 __mutex_lock_slowpath(atomic_t
*lock_count
)
394 struct mutex
*lock
= container_of(lock_count
, struct mutex
, count
);
396 __mutex_lock_common(lock
, TASK_UNINTERRUPTIBLE
, 0, _RET_IP_
);
399 static noinline
int __sched
400 __mutex_lock_killable_slowpath(atomic_t
*lock_count
)
402 struct mutex
*lock
= container_of(lock_count
, struct mutex
, count
);
404 return __mutex_lock_common(lock
, TASK_KILLABLE
, 0, _RET_IP_
);
407 static noinline
int __sched
408 __mutex_lock_interruptible_slowpath(atomic_t
*lock_count
)
410 struct mutex
*lock
= container_of(lock_count
, struct mutex
, count
);
412 return __mutex_lock_common(lock
, TASK_INTERRUPTIBLE
, 0, _RET_IP_
);
417 * Spinlock based trylock, we take the spinlock and check whether we
420 static inline int __mutex_trylock_slowpath(atomic_t
*lock_count
)
422 struct mutex
*lock
= container_of(lock_count
, struct mutex
, count
);
426 spin_lock_mutex(&lock
->wait_lock
, flags
);
428 prev
= atomic_xchg(&lock
->count
, -1);
429 if (likely(prev
== 1)) {
430 mutex_set_owner(lock
);
431 mutex_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
434 /* Set it back to 0 if there are no waiters: */
435 if (likely(list_empty(&lock
->wait_list
)))
436 atomic_set(&lock
->count
, 0);
438 spin_unlock_mutex(&lock
->wait_lock
, flags
);
444 * mutex_trylock - try to acquire the mutex, without waiting
445 * @lock: the mutex to be acquired
447 * Try to acquire the mutex atomically. Returns 1 if the mutex
448 * has been acquired successfully, and 0 on contention.
450 * NOTE: this function follows the spin_trylock() convention, so
451 * it is negated from the down_trylock() return values! Be careful
452 * about this when converting semaphore users to mutexes.
454 * This function must not be used in interrupt context. The
455 * mutex must be released by the same task that acquired it.
457 int __sched
mutex_trylock(struct mutex
*lock
)
461 ret
= __mutex_fastpath_trylock(&lock
->count
, __mutex_trylock_slowpath
);
463 mutex_set_owner(lock
);
467 EXPORT_SYMBOL(mutex_trylock
);
470 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
471 * @cnt: the atomic which we are to dec
472 * @lock: the mutex to return holding if we dec to 0
474 * return true and hold lock if we dec to 0, return false otherwise
476 int atomic_dec_and_mutex_lock(atomic_t
*cnt
, struct mutex
*lock
)
478 /* dec if we can't possibly hit 0 */
479 if (atomic_add_unless(cnt
, -1, 1))
481 /* we might hit 0, so take the lock */
483 if (!atomic_dec_and_test(cnt
)) {
484 /* when we actually did the dec, we didn't hit 0 */
488 /* we hit 0, and we hold the lock */
491 EXPORT_SYMBOL(atomic_dec_and_mutex_lock
);