4 * Mutexes: blocking mutual exclusion locks
6 * Started by Ingo Molnar:
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
18 * Also see Documentation/mutex-design.txt.
20 #include <linux/mutex.h>
21 #include <linux/sched.h>
22 #include <linux/sched/rt.h>
23 #include <linux/export.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/debug_locks.h>
29 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
30 * which forces all calls into the slowpath:
32 #ifdef CONFIG_DEBUG_MUTEXES
33 # include "mutex-debug.h"
34 # include <asm-generic/mutex-null.h>
37 # include <asm/mutex.h>
41 * A negative mutex count indicates that waiters are sleeping waiting for the
44 #define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0)
47 __mutex_init(struct mutex
*lock
, const char *name
, struct lock_class_key
*key
)
49 atomic_set(&lock
->count
, 1);
50 spin_lock_init(&lock
->wait_lock
);
51 INIT_LIST_HEAD(&lock
->wait_list
);
52 mutex_clear_owner(lock
);
53 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
54 lock
->spin_mlock
= NULL
;
57 debug_mutex_init(lock
, name
, key
);
60 EXPORT_SYMBOL(__mutex_init
);
62 #ifndef CONFIG_DEBUG_LOCK_ALLOC
64 * We split the mutex lock/unlock logic into separate fastpath and
65 * slowpath functions, to reduce the register pressure on the fastpath.
66 * We also put the fastpath first in the kernel image, to make sure the
67 * branch is predicted by the CPU as default-untaken.
69 static __used noinline
void __sched
70 __mutex_lock_slowpath(atomic_t
*lock_count
);
73 * mutex_lock - acquire the mutex
74 * @lock: the mutex to be acquired
76 * Lock the mutex exclusively for this task. If the mutex is not
77 * available right now, it will sleep until it can get it.
79 * The mutex must later on be released by the same task that
80 * acquired it. Recursive locking is not allowed. The task
81 * may not exit without first unlocking the mutex. Also, kernel
82 * memory where the mutex resides mutex must not be freed with
83 * the mutex still locked. The mutex must first be initialized
84 * (or statically defined) before it can be locked. memset()-ing
85 * the mutex to 0 is not allowed.
87 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
88 * checks that will enforce the restrictions and will also do
89 * deadlock debugging. )
91 * This function is similar to (but not equivalent to) down().
93 void __sched
mutex_lock(struct mutex
*lock
)
97 * The locking fastpath is the 1->0 transition from
98 * 'unlocked' into 'locked' state.
100 __mutex_fastpath_lock(&lock
->count
, __mutex_lock_slowpath
);
101 mutex_set_owner(lock
);
104 EXPORT_SYMBOL(mutex_lock
);
107 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
109 * In order to avoid a stampede of mutex spinners from acquiring the mutex
110 * more or less simultaneously, the spinners need to acquire a MCS lock
111 * first before spinning on the owner field.
113 * We don't inline mspin_lock() so that perf can correctly account for the
114 * time spent in this lock function.
117 struct mspin_node
*next
;
118 int locked
; /* 1 if lock acquired */
120 #define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock))
123 void mspin_lock(struct mspin_node
**lock
, struct mspin_node
*node
)
125 struct mspin_node
*prev
;
131 prev
= xchg(lock
, node
);
132 if (likely(prev
== NULL
)) {
137 ACCESS_ONCE(prev
->next
) = node
;
139 /* Wait until the lock holder passes the lock down */
140 while (!ACCESS_ONCE(node
->locked
))
141 arch_mutex_cpu_relax();
144 static void mspin_unlock(struct mspin_node
**lock
, struct mspin_node
*node
)
146 struct mspin_node
*next
= ACCESS_ONCE(node
->next
);
150 * Release the lock by setting it to NULL
152 if (cmpxchg(lock
, node
, NULL
) == node
)
154 /* Wait until the next pointer is set */
155 while (!(next
= ACCESS_ONCE(node
->next
)))
156 arch_mutex_cpu_relax();
158 ACCESS_ONCE(next
->locked
) = 1;
163 * Mutex spinning code migrated from kernel/sched/core.c
166 static inline bool owner_running(struct mutex
*lock
, struct task_struct
*owner
)
168 if (lock
->owner
!= owner
)
172 * Ensure we emit the owner->on_cpu, dereference _after_ checking
173 * lock->owner still matches owner, if that fails, owner might
174 * point to free()d memory, if it still matches, the rcu_read_lock()
175 * ensures the memory stays valid.
179 return owner
->on_cpu
;
183 * Look out! "owner" is an entirely speculative pointer
184 * access and not reliable.
187 int mutex_spin_on_owner(struct mutex
*lock
, struct task_struct
*owner
)
190 while (owner_running(lock
, owner
)) {
194 arch_mutex_cpu_relax();
199 * We break out the loop above on need_resched() and when the
200 * owner changed, which is a sign for heavy contention. Return
201 * success only when lock->owner is NULL.
203 return lock
->owner
== NULL
;
207 * Initial check for entering the mutex spinning loop
209 static inline int mutex_can_spin_on_owner(struct mutex
*lock
)
215 retval
= lock
->owner
->on_cpu
;
218 * if lock->owner is not set, the mutex owner may have just acquired
219 * it and not set the owner yet or the mutex has been released.
225 static __used noinline
void __sched
__mutex_unlock_slowpath(atomic_t
*lock_count
);
228 * mutex_unlock - release the mutex
229 * @lock: the mutex to be released
231 * Unlock a mutex that has been locked by this task previously.
233 * This function must not be used in interrupt context. Unlocking
234 * of a not locked mutex is not allowed.
236 * This function is similar to (but not equivalent to) up().
238 void __sched
mutex_unlock(struct mutex
*lock
)
241 * The unlocking fastpath is the 0->1 transition from 'locked'
242 * into 'unlocked' state:
244 #ifndef CONFIG_DEBUG_MUTEXES
246 * When debugging is enabled we must not clear the owner before time,
247 * the slow path will always be taken, and that clears the owner field
248 * after verifying that it was indeed current.
250 mutex_clear_owner(lock
);
252 __mutex_fastpath_unlock(&lock
->count
, __mutex_unlock_slowpath
);
255 EXPORT_SYMBOL(mutex_unlock
);
258 * ww_mutex_unlock - release the w/w mutex
259 * @lock: the mutex to be released
261 * Unlock a mutex that has been locked by this task previously with any of the
262 * ww_mutex_lock* functions (with or without an acquire context). It is
263 * forbidden to release the locks after releasing the acquire context.
265 * This function must not be used in interrupt context. Unlocking
266 * of a unlocked mutex is not allowed.
268 void __sched
ww_mutex_unlock(struct ww_mutex
*lock
)
271 * The unlocking fastpath is the 0->1 transition from 'locked'
272 * into 'unlocked' state:
275 #ifdef CONFIG_DEBUG_MUTEXES
276 DEBUG_LOCKS_WARN_ON(!lock
->ctx
->acquired
);
278 if (lock
->ctx
->acquired
> 0)
279 lock
->ctx
->acquired
--;
283 #ifndef CONFIG_DEBUG_MUTEXES
285 * When debugging is enabled we must not clear the owner before time,
286 * the slow path will always be taken, and that clears the owner field
287 * after verifying that it was indeed current.
289 mutex_clear_owner(&lock
->base
);
291 __mutex_fastpath_unlock(&lock
->base
.count
, __mutex_unlock_slowpath
);
293 EXPORT_SYMBOL(ww_mutex_unlock
);
295 static inline int __sched
296 __mutex_lock_check_stamp(struct mutex
*lock
, struct ww_acquire_ctx
*ctx
)
298 struct ww_mutex
*ww
= container_of(lock
, struct ww_mutex
, base
);
299 struct ww_acquire_ctx
*hold_ctx
= ACCESS_ONCE(ww
->ctx
);
304 if (unlikely(ctx
== hold_ctx
))
307 if (ctx
->stamp
- hold_ctx
->stamp
<= LONG_MAX
&&
308 (ctx
->stamp
!= hold_ctx
->stamp
|| ctx
> hold_ctx
)) {
309 #ifdef CONFIG_DEBUG_MUTEXES
310 DEBUG_LOCKS_WARN_ON(ctx
->contending_lock
);
311 ctx
->contending_lock
= ww
;
319 static __always_inline
void ww_mutex_lock_acquired(struct ww_mutex
*ww
,
320 struct ww_acquire_ctx
*ww_ctx
)
322 #ifdef CONFIG_DEBUG_MUTEXES
324 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
325 * but released with a normal mutex_unlock in this call.
327 * This should never happen, always use ww_mutex_unlock.
329 DEBUG_LOCKS_WARN_ON(ww
->ctx
);
332 * Not quite done after calling ww_acquire_done() ?
334 DEBUG_LOCKS_WARN_ON(ww_ctx
->done_acquire
);
336 if (ww_ctx
->contending_lock
) {
338 * After -EDEADLK you tried to
339 * acquire a different ww_mutex? Bad!
341 DEBUG_LOCKS_WARN_ON(ww_ctx
->contending_lock
!= ww
);
344 * You called ww_mutex_lock after receiving -EDEADLK,
345 * but 'forgot' to unlock everything else first?
347 DEBUG_LOCKS_WARN_ON(ww_ctx
->acquired
> 0);
348 ww_ctx
->contending_lock
= NULL
;
352 * Naughty, using a different class will lead to undefined behavior!
354 DEBUG_LOCKS_WARN_ON(ww_ctx
->ww_class
!= ww
->ww_class
);
360 * after acquiring lock with fastpath or when we lost out in contested
361 * slowpath, set ctx and wake up any waiters so they can recheck.
363 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
364 * as the fastpath and opportunistic spinning are disabled in that case.
366 static __always_inline
void
367 ww_mutex_set_context_fastpath(struct ww_mutex
*lock
,
368 struct ww_acquire_ctx
*ctx
)
371 struct mutex_waiter
*cur
;
373 ww_mutex_lock_acquired(lock
, ctx
);
378 * The lock->ctx update should be visible on all cores before
379 * the atomic read is done, otherwise contended waiters might be
380 * missed. The contended waiters will either see ww_ctx == NULL
381 * and keep spinning, or it will acquire wait_lock, add itself
382 * to waiter list and sleep.
387 * Check if lock is contended, if not there is nobody to wake up
389 if (likely(atomic_read(&lock
->base
.count
) == 0))
393 * Uh oh, we raced in fastpath, wake up everyone in this case,
394 * so they can see the new lock->ctx.
396 spin_lock_mutex(&lock
->base
.wait_lock
, flags
);
397 list_for_each_entry(cur
, &lock
->base
.wait_list
, list
) {
398 debug_mutex_wake_waiter(&lock
->base
, cur
);
399 wake_up_process(cur
->task
);
401 spin_unlock_mutex(&lock
->base
.wait_lock
, flags
);
405 * Lock a mutex (possibly interruptible), slowpath:
407 static __always_inline
int __sched
408 __mutex_lock_common(struct mutex
*lock
, long state
, unsigned int subclass
,
409 struct lockdep_map
*nest_lock
, unsigned long ip
,
410 struct ww_acquire_ctx
*ww_ctx
)
412 struct task_struct
*task
= current
;
413 struct mutex_waiter waiter
;
418 mutex_acquire_nest(&lock
->dep_map
, subclass
, 0, nest_lock
, ip
);
420 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
422 * Optimistic spinning.
424 * We try to spin for acquisition when we find that there are no
425 * pending waiters and the lock owner is currently running on a
428 * The rationale is that if the lock owner is running, it is likely to
429 * release the lock soon.
431 * Since this needs the lock owner, and this mutex implementation
432 * doesn't track the owner atomically in the lock field, we need to
433 * track it non-atomically.
435 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
436 * to serialize everything.
438 * The mutex spinners are queued up using MCS lock so that only one
439 * spinner can compete for the mutex. However, if mutex spinning isn't
440 * going to happen, there is no point in going through the lock/unlock
443 if (!mutex_can_spin_on_owner(lock
))
447 struct task_struct
*owner
;
448 struct mspin_node node
;
450 if (!__builtin_constant_p(ww_ctx
== NULL
) && ww_ctx
->acquired
> 0) {
453 ww
= container_of(lock
, struct ww_mutex
, base
);
455 * If ww->ctx is set the contents are undefined, only
456 * by acquiring wait_lock there is a guarantee that
457 * they are not invalid when reading.
459 * As such, when deadlock detection needs to be
460 * performed the optimistic spinning cannot be done.
462 if (ACCESS_ONCE(ww
->ctx
))
467 * If there's an owner, wait for it to either
468 * release the lock or go to sleep.
470 mspin_lock(MLOCK(lock
), &node
);
471 owner
= ACCESS_ONCE(lock
->owner
);
472 if (owner
&& !mutex_spin_on_owner(lock
, owner
)) {
473 mspin_unlock(MLOCK(lock
), &node
);
477 if ((atomic_read(&lock
->count
) == 1) &&
478 (atomic_cmpxchg(&lock
->count
, 1, 0) == 1)) {
479 lock_acquired(&lock
->dep_map
, ip
);
480 if (!__builtin_constant_p(ww_ctx
== NULL
)) {
482 ww
= container_of(lock
, struct ww_mutex
, base
);
484 ww_mutex_set_context_fastpath(ww
, ww_ctx
);
487 mutex_set_owner(lock
);
488 mspin_unlock(MLOCK(lock
), &node
);
492 mspin_unlock(MLOCK(lock
), &node
);
495 * When there's no owner, we might have preempted between the
496 * owner acquiring the lock and setting the owner field. If
497 * we're an RT task that will live-lock because we won't let
498 * the owner complete.
500 if (!owner
&& (need_resched() || rt_task(task
)))
504 * The cpu_relax() call is a compiler barrier which forces
505 * everything in this loop to be re-loaded. We don't need
506 * memory barriers as we'll eventually observe the right
507 * values at the cost of a few extra spins.
509 arch_mutex_cpu_relax();
513 spin_lock_mutex(&lock
->wait_lock
, flags
);
515 debug_mutex_lock_common(lock
, &waiter
);
516 debug_mutex_add_waiter(lock
, &waiter
, task_thread_info(task
));
518 /* add waiting tasks to the end of the waitqueue (FIFO): */
519 list_add_tail(&waiter
.list
, &lock
->wait_list
);
522 if (MUTEX_SHOW_NO_WAITER(lock
) && (atomic_xchg(&lock
->count
, -1) == 1))
525 lock_contended(&lock
->dep_map
, ip
);
529 * Lets try to take the lock again - this is needed even if
530 * we get here for the first time (shortly after failing to
531 * acquire the lock), to make sure that we get a wakeup once
532 * it's unlocked. Later on, if we sleep, this is the
533 * operation that gives us the lock. We xchg it to -1, so
534 * that when we release the lock, we properly wake up the
537 if (MUTEX_SHOW_NO_WAITER(lock
) &&
538 (atomic_xchg(&lock
->count
, -1) == 1))
542 * got a signal? (This code gets eliminated in the
543 * TASK_UNINTERRUPTIBLE case.)
545 if (unlikely(signal_pending_state(state
, task
))) {
550 if (!__builtin_constant_p(ww_ctx
== NULL
) && ww_ctx
->acquired
> 0) {
551 ret
= __mutex_lock_check_stamp(lock
, ww_ctx
);
556 __set_task_state(task
, state
);
558 /* didn't get the lock, go to sleep: */
559 spin_unlock_mutex(&lock
->wait_lock
, flags
);
560 schedule_preempt_disabled();
561 spin_lock_mutex(&lock
->wait_lock
, flags
);
565 lock_acquired(&lock
->dep_map
, ip
);
566 /* got the lock - rejoice! */
567 mutex_remove_waiter(lock
, &waiter
, current_thread_info());
568 mutex_set_owner(lock
);
570 if (!__builtin_constant_p(ww_ctx
== NULL
)) {
571 struct ww_mutex
*ww
= container_of(lock
,
574 struct mutex_waiter
*cur
;
577 * This branch gets optimized out for the common case,
578 * and is only important for ww_mutex_lock.
581 ww_mutex_lock_acquired(ww
, ww_ctx
);
585 * Give any possible sleeping processes the chance to wake up,
586 * so they can recheck if they have to back off.
588 list_for_each_entry(cur
, &lock
->wait_list
, list
) {
589 debug_mutex_wake_waiter(lock
, cur
);
590 wake_up_process(cur
->task
);
594 /* set it to 0 if there are no waiters left: */
595 if (likely(list_empty(&lock
->wait_list
)))
596 atomic_set(&lock
->count
, 0);
598 spin_unlock_mutex(&lock
->wait_lock
, flags
);
600 debug_mutex_free_waiter(&waiter
);
606 mutex_remove_waiter(lock
, &waiter
, task_thread_info(task
));
607 spin_unlock_mutex(&lock
->wait_lock
, flags
);
608 debug_mutex_free_waiter(&waiter
);
609 mutex_release(&lock
->dep_map
, 1, ip
);
614 #ifdef CONFIG_DEBUG_LOCK_ALLOC
616 mutex_lock_nested(struct mutex
*lock
, unsigned int subclass
)
619 __mutex_lock_common(lock
, TASK_UNINTERRUPTIBLE
,
620 subclass
, NULL
, _RET_IP_
, NULL
);
623 EXPORT_SYMBOL_GPL(mutex_lock_nested
);
626 _mutex_lock_nest_lock(struct mutex
*lock
, struct lockdep_map
*nest
)
629 __mutex_lock_common(lock
, TASK_UNINTERRUPTIBLE
,
630 0, nest
, _RET_IP_
, NULL
);
633 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock
);
636 mutex_lock_killable_nested(struct mutex
*lock
, unsigned int subclass
)
639 return __mutex_lock_common(lock
, TASK_KILLABLE
,
640 subclass
, NULL
, _RET_IP_
, NULL
);
642 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested
);
645 mutex_lock_interruptible_nested(struct mutex
*lock
, unsigned int subclass
)
648 return __mutex_lock_common(lock
, TASK_INTERRUPTIBLE
,
649 subclass
, NULL
, _RET_IP_
, NULL
);
652 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested
);
655 ww_mutex_deadlock_injection(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
657 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
660 if (ctx
->deadlock_inject_countdown
-- == 0) {
661 tmp
= ctx
->deadlock_inject_interval
;
662 if (tmp
> UINT_MAX
/4)
665 tmp
= tmp
*2 + tmp
+ tmp
/2;
667 ctx
->deadlock_inject_interval
= tmp
;
668 ctx
->deadlock_inject_countdown
= tmp
;
669 ctx
->contending_lock
= lock
;
671 ww_mutex_unlock(lock
);
681 __ww_mutex_lock(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
686 ret
= __mutex_lock_common(&lock
->base
, TASK_UNINTERRUPTIBLE
,
687 0, &ctx
->dep_map
, _RET_IP_
, ctx
);
688 if (!ret
&& ctx
->acquired
> 0)
689 return ww_mutex_deadlock_injection(lock
, ctx
);
693 EXPORT_SYMBOL_GPL(__ww_mutex_lock
);
696 __ww_mutex_lock_interruptible(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
701 ret
= __mutex_lock_common(&lock
->base
, TASK_INTERRUPTIBLE
,
702 0, &ctx
->dep_map
, _RET_IP_
, ctx
);
704 if (!ret
&& ctx
->acquired
> 0)
705 return ww_mutex_deadlock_injection(lock
, ctx
);
709 EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible
);
714 * Release the lock, slowpath:
717 __mutex_unlock_common_slowpath(atomic_t
*lock_count
, int nested
)
719 struct mutex
*lock
= container_of(lock_count
, struct mutex
, count
);
722 spin_lock_mutex(&lock
->wait_lock
, flags
);
723 mutex_release(&lock
->dep_map
, nested
, _RET_IP_
);
724 debug_mutex_unlock(lock
);
727 * some architectures leave the lock unlocked in the fastpath failure
728 * case, others need to leave it locked. In the later case we have to
731 if (__mutex_slowpath_needs_to_unlock())
732 atomic_set(&lock
->count
, 1);
734 if (!list_empty(&lock
->wait_list
)) {
735 /* get the first entry from the wait-list: */
736 struct mutex_waiter
*waiter
=
737 list_entry(lock
->wait_list
.next
,
738 struct mutex_waiter
, list
);
740 debug_mutex_wake_waiter(lock
, waiter
);
742 wake_up_process(waiter
->task
);
745 spin_unlock_mutex(&lock
->wait_lock
, flags
);
749 * Release the lock, slowpath:
751 static __used noinline
void
752 __mutex_unlock_slowpath(atomic_t
*lock_count
)
754 __mutex_unlock_common_slowpath(lock_count
, 1);
757 #ifndef CONFIG_DEBUG_LOCK_ALLOC
759 * Here come the less common (and hence less performance-critical) APIs:
760 * mutex_lock_interruptible() and mutex_trylock().
762 static noinline
int __sched
763 __mutex_lock_killable_slowpath(struct mutex
*lock
);
765 static noinline
int __sched
766 __mutex_lock_interruptible_slowpath(struct mutex
*lock
);
769 * mutex_lock_interruptible - acquire the mutex, interruptible
770 * @lock: the mutex to be acquired
772 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
773 * been acquired or sleep until the mutex becomes available. If a
774 * signal arrives while waiting for the lock then this function
777 * This function is similar to (but not equivalent to) down_interruptible().
779 int __sched
mutex_lock_interruptible(struct mutex
*lock
)
784 ret
= __mutex_fastpath_lock_retval(&lock
->count
);
786 mutex_set_owner(lock
);
789 return __mutex_lock_interruptible_slowpath(lock
);
792 EXPORT_SYMBOL(mutex_lock_interruptible
);
794 int __sched
mutex_lock_killable(struct mutex
*lock
)
799 ret
= __mutex_fastpath_lock_retval(&lock
->count
);
801 mutex_set_owner(lock
);
804 return __mutex_lock_killable_slowpath(lock
);
806 EXPORT_SYMBOL(mutex_lock_killable
);
808 static __used noinline
void __sched
809 __mutex_lock_slowpath(atomic_t
*lock_count
)
811 struct mutex
*lock
= container_of(lock_count
, struct mutex
, count
);
813 __mutex_lock_common(lock
, TASK_UNINTERRUPTIBLE
, 0,
814 NULL
, _RET_IP_
, NULL
);
817 static noinline
int __sched
818 __mutex_lock_killable_slowpath(struct mutex
*lock
)
820 return __mutex_lock_common(lock
, TASK_KILLABLE
, 0,
821 NULL
, _RET_IP_
, NULL
);
824 static noinline
int __sched
825 __mutex_lock_interruptible_slowpath(struct mutex
*lock
)
827 return __mutex_lock_common(lock
, TASK_INTERRUPTIBLE
, 0,
828 NULL
, _RET_IP_
, NULL
);
831 static noinline
int __sched
832 __ww_mutex_lock_slowpath(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
834 return __mutex_lock_common(&lock
->base
, TASK_UNINTERRUPTIBLE
, 0,
835 NULL
, _RET_IP_
, ctx
);
838 static noinline
int __sched
839 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex
*lock
,
840 struct ww_acquire_ctx
*ctx
)
842 return __mutex_lock_common(&lock
->base
, TASK_INTERRUPTIBLE
, 0,
843 NULL
, _RET_IP_
, ctx
);
849 * Spinlock based trylock, we take the spinlock and check whether we
852 static inline int __mutex_trylock_slowpath(atomic_t
*lock_count
)
854 struct mutex
*lock
= container_of(lock_count
, struct mutex
, count
);
858 spin_lock_mutex(&lock
->wait_lock
, flags
);
860 prev
= atomic_xchg(&lock
->count
, -1);
861 if (likely(prev
== 1)) {
862 mutex_set_owner(lock
);
863 mutex_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
866 /* Set it back to 0 if there are no waiters: */
867 if (likely(list_empty(&lock
->wait_list
)))
868 atomic_set(&lock
->count
, 0);
870 spin_unlock_mutex(&lock
->wait_lock
, flags
);
876 * mutex_trylock - try to acquire the mutex, without waiting
877 * @lock: the mutex to be acquired
879 * Try to acquire the mutex atomically. Returns 1 if the mutex
880 * has been acquired successfully, and 0 on contention.
882 * NOTE: this function follows the spin_trylock() convention, so
883 * it is negated from the down_trylock() return values! Be careful
884 * about this when converting semaphore users to mutexes.
886 * This function must not be used in interrupt context. The
887 * mutex must be released by the same task that acquired it.
889 int __sched
mutex_trylock(struct mutex
*lock
)
893 ret
= __mutex_fastpath_trylock(&lock
->count
, __mutex_trylock_slowpath
);
895 mutex_set_owner(lock
);
899 EXPORT_SYMBOL(mutex_trylock
);
901 #ifndef CONFIG_DEBUG_LOCK_ALLOC
903 __ww_mutex_lock(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
909 ret
= __mutex_fastpath_lock_retval(&lock
->base
.count
);
912 ww_mutex_set_context_fastpath(lock
, ctx
);
913 mutex_set_owner(&lock
->base
);
915 ret
= __ww_mutex_lock_slowpath(lock
, ctx
);
918 EXPORT_SYMBOL(__ww_mutex_lock
);
921 __ww_mutex_lock_interruptible(struct ww_mutex
*lock
, struct ww_acquire_ctx
*ctx
)
927 ret
= __mutex_fastpath_lock_retval(&lock
->base
.count
);
930 ww_mutex_set_context_fastpath(lock
, ctx
);
931 mutex_set_owner(&lock
->base
);
933 ret
= __ww_mutex_lock_interruptible_slowpath(lock
, ctx
);
936 EXPORT_SYMBOL(__ww_mutex_lock_interruptible
);
941 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
942 * @cnt: the atomic which we are to dec
943 * @lock: the mutex to return holding if we dec to 0
945 * return true and hold lock if we dec to 0, return false otherwise
947 int atomic_dec_and_mutex_lock(atomic_t
*cnt
, struct mutex
*lock
)
949 /* dec if we can't possibly hit 0 */
950 if (atomic_add_unless(cnt
, -1, 1))
952 /* we might hit 0, so take the lock */
954 if (!atomic_dec_and_test(cnt
)) {
955 /* when we actually did the dec, we didn't hit 0 */
959 /* we hit 0, and we hold the lock */
962 EXPORT_SYMBOL(atomic_dec_and_mutex_lock
);