4 * Mutexes: blocking mutual exclusion locks
6 * Started by Ingo Molnar:
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
13 * Also see Documentation/mutex-design.txt.
15 #include <linux/mutex.h>
16 #include <linux/sched.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/interrupt.h>
20 #include <linux/debug_locks.h>
23 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
24 * which forces all calls into the slowpath:
26 #ifdef CONFIG_DEBUG_MUTEXES
27 # include "mutex-debug.h"
28 # include <asm-generic/mutex-null.h>
31 # include <asm/mutex.h>
35 * mutex_init - initialize the mutex
36 * @lock: the mutex to be initialized
38 * Initialize the mutex to unlocked state.
40 * It is not allowed to initialize an already locked mutex.
43 __mutex_init(struct mutex
*lock
, const char *name
, struct lock_class_key
*key
)
45 atomic_set(&lock
->count
, 1);
46 spin_lock_init(&lock
->wait_lock
);
47 INIT_LIST_HEAD(&lock
->wait_list
);
49 debug_mutex_init(lock
, name
, key
);
52 EXPORT_SYMBOL(__mutex_init
);
54 #ifndef CONFIG_DEBUG_LOCK_ALLOC
56 * We split the mutex lock/unlock logic into separate fastpath and
57 * slowpath functions, to reduce the register pressure on the fastpath.
58 * We also put the fastpath first in the kernel image, to make sure the
59 * branch is predicted by the CPU as default-untaken.
61 static void fastcall noinline __sched
62 __mutex_lock_slowpath(atomic_t
*lock_count
);
65 * mutex_lock - acquire the mutex
66 * @lock: the mutex to be acquired
68 * Lock the mutex exclusively for this task. If the mutex is not
69 * available right now, it will sleep until it can get it.
71 * The mutex must later on be released by the same task that
72 * acquired it. Recursive locking is not allowed. The task
73 * may not exit without first unlocking the mutex. Also, kernel
74 * memory where the mutex resides mutex must not be freed with
75 * the mutex still locked. The mutex must first be initialized
76 * (or statically defined) before it can be locked. memset()-ing
77 * the mutex to 0 is not allowed.
79 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
80 * checks that will enforce the restrictions and will also do
81 * deadlock debugging. )
83 * This function is similar to (but not equivalent to) down().
85 void inline fastcall __sched
mutex_lock(struct mutex
*lock
)
89 * The locking fastpath is the 1->0 transition from
90 * 'unlocked' into 'locked' state.
92 __mutex_fastpath_lock(&lock
->count
, __mutex_lock_slowpath
);
95 EXPORT_SYMBOL(mutex_lock
);
98 static void fastcall noinline __sched
99 __mutex_unlock_slowpath(atomic_t
*lock_count
);
102 * mutex_unlock - release the mutex
103 * @lock: the mutex to be released
105 * Unlock a mutex that has been locked by this task previously.
107 * This function must not be used in interrupt context. Unlocking
108 * of a not locked mutex is not allowed.
110 * This function is similar to (but not equivalent to) up().
112 void fastcall __sched
mutex_unlock(struct mutex
*lock
)
115 * The unlocking fastpath is the 0->1 transition from 'locked'
116 * into 'unlocked' state:
118 __mutex_fastpath_unlock(&lock
->count
, __mutex_unlock_slowpath
);
121 EXPORT_SYMBOL(mutex_unlock
);
124 * Lock a mutex (possibly interruptible), slowpath:
126 static inline int __sched
127 __mutex_lock_common(struct mutex
*lock
, long state
, unsigned int subclass
,
130 struct task_struct
*task
= current
;
131 struct mutex_waiter waiter
;
132 unsigned int old_val
;
135 spin_lock_mutex(&lock
->wait_lock
, flags
);
137 debug_mutex_lock_common(lock
, &waiter
);
138 mutex_acquire(&lock
->dep_map
, subclass
, 0, ip
);
139 debug_mutex_add_waiter(lock
, &waiter
, task_thread_info(task
));
141 /* add waiting tasks to the end of the waitqueue (FIFO): */
142 list_add_tail(&waiter
.list
, &lock
->wait_list
);
145 old_val
= atomic_xchg(&lock
->count
, -1);
149 lock_contended(&lock
->dep_map
, ip
);
153 * Lets try to take the lock again - this is needed even if
154 * we get here for the first time (shortly after failing to
155 * acquire the lock), to make sure that we get a wakeup once
156 * it's unlocked. Later on, if we sleep, this is the
157 * operation that gives us the lock. We xchg it to -1, so
158 * that when we release the lock, we properly wake up the
161 old_val
= atomic_xchg(&lock
->count
, -1);
166 * got a signal? (This code gets eliminated in the
167 * TASK_UNINTERRUPTIBLE case.)
169 if (unlikely(state
== TASK_INTERRUPTIBLE
&&
170 signal_pending(task
))) {
171 mutex_remove_waiter(lock
, &waiter
, task_thread_info(task
));
172 mutex_release(&lock
->dep_map
, 1, ip
);
173 spin_unlock_mutex(&lock
->wait_lock
, flags
);
175 debug_mutex_free_waiter(&waiter
);
178 __set_task_state(task
, state
);
180 /* didnt get the lock, go to sleep: */
181 spin_unlock_mutex(&lock
->wait_lock
, flags
);
183 spin_lock_mutex(&lock
->wait_lock
, flags
);
187 lock_acquired(&lock
->dep_map
);
188 /* got the lock - rejoice! */
189 mutex_remove_waiter(lock
, &waiter
, task_thread_info(task
));
190 debug_mutex_set_owner(lock
, task_thread_info(task
));
192 /* set it to 0 if there are no waiters left: */
193 if (likely(list_empty(&lock
->wait_list
)))
194 atomic_set(&lock
->count
, 0);
196 spin_unlock_mutex(&lock
->wait_lock
, flags
);
198 debug_mutex_free_waiter(&waiter
);
203 #ifdef CONFIG_DEBUG_LOCK_ALLOC
205 mutex_lock_nested(struct mutex
*lock
, unsigned int subclass
)
208 __mutex_lock_common(lock
, TASK_UNINTERRUPTIBLE
, subclass
, _RET_IP_
);
211 EXPORT_SYMBOL_GPL(mutex_lock_nested
);
214 mutex_lock_interruptible_nested(struct mutex
*lock
, unsigned int subclass
)
217 return __mutex_lock_common(lock
, TASK_INTERRUPTIBLE
, subclass
, _RET_IP_
);
220 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested
);
224 * Release the lock, slowpath:
226 static fastcall
inline void
227 __mutex_unlock_common_slowpath(atomic_t
*lock_count
, int nested
)
229 struct mutex
*lock
= container_of(lock_count
, struct mutex
, count
);
232 spin_lock_mutex(&lock
->wait_lock
, flags
);
233 mutex_release(&lock
->dep_map
, nested
, _RET_IP_
);
234 debug_mutex_unlock(lock
);
237 * some architectures leave the lock unlocked in the fastpath failure
238 * case, others need to leave it locked. In the later case we have to
241 if (__mutex_slowpath_needs_to_unlock())
242 atomic_set(&lock
->count
, 1);
244 if (!list_empty(&lock
->wait_list
)) {
245 /* get the first entry from the wait-list: */
246 struct mutex_waiter
*waiter
=
247 list_entry(lock
->wait_list
.next
,
248 struct mutex_waiter
, list
);
250 debug_mutex_wake_waiter(lock
, waiter
);
252 wake_up_process(waiter
->task
);
255 debug_mutex_clear_owner(lock
);
257 spin_unlock_mutex(&lock
->wait_lock
, flags
);
261 * Release the lock, slowpath:
263 static fastcall noinline
void
264 __mutex_unlock_slowpath(atomic_t
*lock_count
)
266 __mutex_unlock_common_slowpath(lock_count
, 1);
269 #ifndef CONFIG_DEBUG_LOCK_ALLOC
271 * Here come the less common (and hence less performance-critical) APIs:
272 * mutex_lock_interruptible() and mutex_trylock().
274 static int fastcall noinline __sched
275 __mutex_lock_interruptible_slowpath(atomic_t
*lock_count
);
278 * mutex_lock_interruptible - acquire the mutex, interruptable
279 * @lock: the mutex to be acquired
281 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
282 * been acquired or sleep until the mutex becomes available. If a
283 * signal arrives while waiting for the lock then this function
286 * This function is similar to (but not equivalent to) down_interruptible().
288 int fastcall __sched
mutex_lock_interruptible(struct mutex
*lock
)
291 return __mutex_fastpath_lock_retval
292 (&lock
->count
, __mutex_lock_interruptible_slowpath
);
295 EXPORT_SYMBOL(mutex_lock_interruptible
);
297 static void fastcall noinline __sched
298 __mutex_lock_slowpath(atomic_t
*lock_count
)
300 struct mutex
*lock
= container_of(lock_count
, struct mutex
, count
);
302 __mutex_lock_common(lock
, TASK_UNINTERRUPTIBLE
, 0, _RET_IP_
);
305 static int fastcall noinline __sched
306 __mutex_lock_interruptible_slowpath(atomic_t
*lock_count
)
308 struct mutex
*lock
= container_of(lock_count
, struct mutex
, count
);
310 return __mutex_lock_common(lock
, TASK_INTERRUPTIBLE
, 0, _RET_IP_
);
315 * Spinlock based trylock, we take the spinlock and check whether we
318 static inline int __mutex_trylock_slowpath(atomic_t
*lock_count
)
320 struct mutex
*lock
= container_of(lock_count
, struct mutex
, count
);
324 spin_lock_mutex(&lock
->wait_lock
, flags
);
326 prev
= atomic_xchg(&lock
->count
, -1);
327 if (likely(prev
== 1)) {
328 debug_mutex_set_owner(lock
, current_thread_info());
329 mutex_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
331 /* Set it back to 0 if there are no waiters: */
332 if (likely(list_empty(&lock
->wait_list
)))
333 atomic_set(&lock
->count
, 0);
335 spin_unlock_mutex(&lock
->wait_lock
, flags
);
341 * mutex_trylock - try acquire the mutex, without waiting
342 * @lock: the mutex to be acquired
344 * Try to acquire the mutex atomically. Returns 1 if the mutex
345 * has been acquired successfully, and 0 on contention.
347 * NOTE: this function follows the spin_trylock() convention, so
348 * it is negated to the down_trylock() return values! Be careful
349 * about this when converting semaphore users to mutexes.
351 * This function must not be used in interrupt context. The
352 * mutex must be released by the same task that acquired it.
354 int fastcall __sched
mutex_trylock(struct mutex
*lock
)
356 return __mutex_fastpath_trylock(&lock
->count
,
357 __mutex_trylock_slowpath
);
360 EXPORT_SYMBOL(mutex_trylock
);