2 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
4 * started by Ingo Molnar and Thomas Gleixner.
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9 * Copyright (C) 2006 Esben Nielsen
11 #include <linux/spinlock.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/timer.h>
16 #include "rtmutex_common.h"
18 #ifdef CONFIG_DEBUG_RT_MUTEXES
19 # include "rtmutex-debug.h"
25 * lock->owner state tracking:
27 * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1
28 * are used to keep track of the "owner is pending" and "lock has
32 * NULL 0 0 lock is free (fast acquire possible)
33 * NULL 0 1 invalid state
34 * NULL 1 0 Transitional State*
35 * NULL 1 1 invalid state
36 * taskpointer 0 0 lock is held (fast release possible)
37 * taskpointer 0 1 task is pending owner
38 * taskpointer 1 0 lock is held and has waiters
39 * taskpointer 1 1 task is pending owner and lock has more waiters
41 * Pending ownership is assigned to the top (highest priority)
42 * waiter of the lock, when the lock is released. The thread is woken
43 * up and can now take the lock. Until the lock is taken (bit 0
44 * cleared) a competing higher priority thread can steal the lock
45 * which puts the woken up thread back on the waiters list.
47 * The fast atomic compare exchange based acquire and release is only
48 * possible when bit 0 and 1 of lock->owner are 0.
50 * (*) There's a small time where the owner can be NULL and the
51 * "lock has waiters" bit is set. This can happen when grabbing the lock.
52 * To prevent a cmpxchg of the owner releasing the lock, we need to set this
53 * bit before looking at the lock, hence the reason this is a transitional
58 rt_mutex_set_owner(struct rt_mutex
*lock
, struct task_struct
*owner
,
61 unsigned long val
= (unsigned long)owner
| mask
;
63 if (rt_mutex_has_waiters(lock
))
64 val
|= RT_MUTEX_HAS_WAITERS
;
66 lock
->owner
= (struct task_struct
*)val
;
69 static inline void clear_rt_mutex_waiters(struct rt_mutex
*lock
)
71 lock
->owner
= (struct task_struct
*)
72 ((unsigned long)lock
->owner
& ~RT_MUTEX_HAS_WAITERS
);
75 static void fixup_rt_mutex_waiters(struct rt_mutex
*lock
)
77 if (!rt_mutex_has_waiters(lock
))
78 clear_rt_mutex_waiters(lock
);
82 * We can speed up the acquire/release, if the architecture
83 * supports cmpxchg and if there's no debugging state to be set up
85 #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
86 # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
87 static inline void mark_rt_mutex_waiters(struct rt_mutex
*lock
)
89 unsigned long owner
, *p
= (unsigned long *) &lock
->owner
;
93 } while (cmpxchg(p
, owner
, owner
| RT_MUTEX_HAS_WAITERS
) != owner
);
96 # define rt_mutex_cmpxchg(l,c,n) (0)
97 static inline void mark_rt_mutex_waiters(struct rt_mutex
*lock
)
99 lock
->owner
= (struct task_struct
*)
100 ((unsigned long)lock
->owner
| RT_MUTEX_HAS_WAITERS
);
105 * Calculate task priority from the waiter list priority
107 * Return task->normal_prio when the waiter list is empty or when
108 * the waiter is not allowed to do priority boosting
110 int rt_mutex_getprio(struct task_struct
*task
)
112 if (likely(!task_has_pi_waiters(task
)))
113 return task
->normal_prio
;
115 return min(task_top_pi_waiter(task
)->pi_list_entry
.prio
,
120 * Adjust the priority of a task, after its pi_waiters got modified.
122 * This can be both boosting and unboosting. task->pi_lock must be held.
124 static void __rt_mutex_adjust_prio(struct task_struct
*task
)
126 int prio
= rt_mutex_getprio(task
);
128 if (task
->prio
!= prio
)
129 rt_mutex_setprio(task
, prio
);
133 * Adjust task priority (undo boosting). Called from the exit path of
134 * rt_mutex_slowunlock() and rt_mutex_slowlock().
136 * (Note: We do this outside of the protection of lock->wait_lock to
137 * allow the lock to be taken while or before we readjust the priority
138 * of task. We do not use the spin_xx_mutex() variants here as we are
139 * outside of the debug path.)
141 static void rt_mutex_adjust_prio(struct task_struct
*task
)
145 spin_lock_irqsave(&task
->pi_lock
, flags
);
146 __rt_mutex_adjust_prio(task
);
147 spin_unlock_irqrestore(&task
->pi_lock
, flags
);
151 * Max number of times we'll walk the boosting chain:
153 int max_lock_depth
= 1024;
156 * Adjust the priority chain. Also used for deadlock detection.
157 * Decreases task's usage by one - may thus free the task.
158 * Returns 0 or -EDEADLK.
160 static int rt_mutex_adjust_prio_chain(task_t
*task
,
162 struct rt_mutex
*orig_lock
,
163 struct rt_mutex_waiter
*orig_waiter
166 struct rt_mutex
*lock
;
167 struct rt_mutex_waiter
*waiter
, *top_waiter
= orig_waiter
;
168 int detect_deadlock
, ret
= 0, depth
= 0;
171 detect_deadlock
= debug_rt_mutex_detect_deadlock(orig_waiter
,
175 * The (de)boosting is a step by step approach with a lot of
176 * pitfalls. We want this to be preemptible and we want hold a
177 * maximum of two locks per step. So we have to check
178 * carefully whether things change under us.
181 if (++depth
> max_lock_depth
) {
185 * Print this only once. If the admin changes the limit,
186 * print a new message when reaching the limit again.
188 if (prev_max
!= max_lock_depth
) {
189 prev_max
= max_lock_depth
;
190 printk(KERN_WARNING
"Maximum lock depth %d reached "
191 "task: %s (%d)\n", max_lock_depth
,
192 current
->comm
, current
->pid
);
194 put_task_struct(task
);
196 return deadlock_detect
? -EDEADLK
: 0;
200 * Task can not go away as we did a get_task() before !
202 spin_lock_irqsave(&task
->pi_lock
, flags
);
204 waiter
= task
->pi_blocked_on
;
206 * Check whether the end of the boosting chain has been
207 * reached or the state of the chain has changed while we
210 if (!waiter
|| !waiter
->task
)
213 if (top_waiter
&& (!task_has_pi_waiters(task
) ||
214 top_waiter
!= task_top_pi_waiter(task
)))
218 * When deadlock detection is off then we check, if further
219 * priority adjustment is necessary.
221 if (!detect_deadlock
&& waiter
->list_entry
.prio
== task
->prio
)
225 if (!spin_trylock(&lock
->wait_lock
)) {
226 spin_unlock_irqrestore(&task
->pi_lock
, flags
);
231 /* Deadlock detection */
232 if (lock
== orig_lock
|| rt_mutex_owner(lock
) == current
) {
233 debug_rt_mutex_deadlock(deadlock_detect
, orig_waiter
, lock
);
234 spin_unlock(&lock
->wait_lock
);
235 ret
= deadlock_detect
? -EDEADLK
: 0;
239 top_waiter
= rt_mutex_top_waiter(lock
);
241 /* Requeue the waiter */
242 plist_del(&waiter
->list_entry
, &lock
->wait_list
);
243 waiter
->list_entry
.prio
= task
->prio
;
244 plist_add(&waiter
->list_entry
, &lock
->wait_list
);
246 /* Release the task */
247 spin_unlock_irqrestore(&task
->pi_lock
, flags
);
248 put_task_struct(task
);
250 /* Grab the next task */
251 task
= rt_mutex_owner(lock
);
252 spin_lock_irqsave(&task
->pi_lock
, flags
);
254 if (waiter
== rt_mutex_top_waiter(lock
)) {
255 /* Boost the owner */
256 plist_del(&top_waiter
->pi_list_entry
, &task
->pi_waiters
);
257 waiter
->pi_list_entry
.prio
= waiter
->list_entry
.prio
;
258 plist_add(&waiter
->pi_list_entry
, &task
->pi_waiters
);
259 __rt_mutex_adjust_prio(task
);
261 } else if (top_waiter
== waiter
) {
262 /* Deboost the owner */
263 plist_del(&waiter
->pi_list_entry
, &task
->pi_waiters
);
264 waiter
= rt_mutex_top_waiter(lock
);
265 waiter
->pi_list_entry
.prio
= waiter
->list_entry
.prio
;
266 plist_add(&waiter
->pi_list_entry
, &task
->pi_waiters
);
267 __rt_mutex_adjust_prio(task
);
270 get_task_struct(task
);
271 spin_unlock_irqrestore(&task
->pi_lock
, flags
);
273 top_waiter
= rt_mutex_top_waiter(lock
);
274 spin_unlock(&lock
->wait_lock
);
276 if (!detect_deadlock
&& waiter
!= top_waiter
)
282 spin_unlock_irqrestore(&task
->pi_lock
, flags
);
284 put_task_struct(task
);
289 * Optimization: check if we can steal the lock from the
290 * assigned pending owner [which might not have taken the
293 static inline int try_to_steal_lock(struct rt_mutex
*lock
)
295 struct task_struct
*pendowner
= rt_mutex_owner(lock
);
296 struct rt_mutex_waiter
*next
;
299 if (!rt_mutex_owner_pending(lock
))
302 if (pendowner
== current
)
305 spin_lock_irqsave(&pendowner
->pi_lock
, flags
);
306 if (current
->prio
>= pendowner
->prio
) {
307 spin_unlock_irqrestore(&pendowner
->pi_lock
, flags
);
312 * Check if a waiter is enqueued on the pending owners
313 * pi_waiters list. Remove it and readjust pending owners
316 if (likely(!rt_mutex_has_waiters(lock
))) {
317 spin_unlock_irqrestore(&pendowner
->pi_lock
, flags
);
321 /* No chain handling, pending owner is not blocked on anything: */
322 next
= rt_mutex_top_waiter(lock
);
323 plist_del(&next
->pi_list_entry
, &pendowner
->pi_waiters
);
324 __rt_mutex_adjust_prio(pendowner
);
325 spin_unlock_irqrestore(&pendowner
->pi_lock
, flags
);
328 * We are going to steal the lock and a waiter was
329 * enqueued on the pending owners pi_waiters queue. So
330 * we have to enqueue this waiter into
331 * current->pi_waiters list. This covers the case,
332 * where current is boosted because it holds another
333 * lock and gets unboosted because the booster is
334 * interrupted, so we would delay a waiter with higher
335 * priority as current->normal_prio.
337 * Note: in the rare case of a SCHED_OTHER task changing
338 * its priority and thus stealing the lock, next->task
341 if (likely(next
->task
!= current
)) {
342 spin_lock_irqsave(¤t
->pi_lock
, flags
);
343 plist_add(&next
->pi_list_entry
, ¤t
->pi_waiters
);
344 __rt_mutex_adjust_prio(current
);
345 spin_unlock_irqrestore(¤t
->pi_lock
, flags
);
351 * Try to take an rt-mutex
354 * - when the lock has a real owner
355 * - when a different pending owner exists and has higher priority than current
357 * Must be called with lock->wait_lock held.
359 static int try_to_take_rt_mutex(struct rt_mutex
*lock __IP_DECL__
)
362 * We have to be careful here if the atomic speedups are
363 * enabled, such that, when
364 * - no other waiter is on the lock
365 * - the lock has been released since we did the cmpxchg
366 * the lock can be released or taken while we are doing the
367 * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
369 * The atomic acquire/release aware variant of
370 * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
371 * the WAITERS bit, the atomic release / acquire can not
372 * happen anymore and lock->wait_lock protects us from the
375 * Note, that this might set lock->owner =
376 * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
377 * any more. This is fixed up when we take the ownership.
378 * This is the transitional state explained at the top of this file.
380 mark_rt_mutex_waiters(lock
);
382 if (rt_mutex_owner(lock
) && !try_to_steal_lock(lock
))
385 /* We got the lock. */
386 debug_rt_mutex_lock(lock __IP__
);
388 rt_mutex_set_owner(lock
, current
, 0);
390 rt_mutex_deadlock_account_lock(lock
, current
);
396 * Task blocks on lock.
398 * Prepare waiter and propagate pi chain
400 * This must be called with lock->wait_lock held.
402 static int task_blocks_on_rt_mutex(struct rt_mutex
*lock
,
403 struct rt_mutex_waiter
*waiter
,
407 struct rt_mutex_waiter
*top_waiter
= waiter
;
408 task_t
*owner
= rt_mutex_owner(lock
);
412 spin_lock_irqsave(¤t
->pi_lock
, flags
);
413 __rt_mutex_adjust_prio(current
);
414 waiter
->task
= current
;
416 plist_node_init(&waiter
->list_entry
, current
->prio
);
417 plist_node_init(&waiter
->pi_list_entry
, current
->prio
);
419 /* Get the top priority waiter on the lock */
420 if (rt_mutex_has_waiters(lock
))
421 top_waiter
= rt_mutex_top_waiter(lock
);
422 plist_add(&waiter
->list_entry
, &lock
->wait_list
);
424 current
->pi_blocked_on
= waiter
;
426 spin_unlock_irqrestore(¤t
->pi_lock
, flags
);
428 if (waiter
== rt_mutex_top_waiter(lock
)) {
429 spin_lock_irqsave(&owner
->pi_lock
, flags
);
430 plist_del(&top_waiter
->pi_list_entry
, &owner
->pi_waiters
);
431 plist_add(&waiter
->pi_list_entry
, &owner
->pi_waiters
);
433 __rt_mutex_adjust_prio(owner
);
434 if (owner
->pi_blocked_on
) {
436 get_task_struct(owner
);
438 spin_unlock_irqrestore(&owner
->pi_lock
, flags
);
440 else if (debug_rt_mutex_detect_deadlock(waiter
, detect_deadlock
)) {
441 spin_lock_irqsave(&owner
->pi_lock
, flags
);
442 if (owner
->pi_blocked_on
) {
444 get_task_struct(owner
);
446 spin_unlock_irqrestore(&owner
->pi_lock
, flags
);
451 spin_unlock(&lock
->wait_lock
);
453 res
= rt_mutex_adjust_prio_chain(owner
, detect_deadlock
, lock
,
456 spin_lock(&lock
->wait_lock
);
462 * Wake up the next waiter on the lock.
464 * Remove the top waiter from the current tasks waiter list and from
465 * the lock waiter list. Set it as pending owner. Then wake it up.
467 * Called with lock->wait_lock held.
469 static void wakeup_next_waiter(struct rt_mutex
*lock
)
471 struct rt_mutex_waiter
*waiter
;
472 struct task_struct
*pendowner
;
475 spin_lock_irqsave(¤t
->pi_lock
, flags
);
477 waiter
= rt_mutex_top_waiter(lock
);
478 plist_del(&waiter
->list_entry
, &lock
->wait_list
);
481 * Remove it from current->pi_waiters. We do not adjust a
482 * possible priority boost right now. We execute wakeup in the
483 * boosted mode and go back to normal after releasing
486 plist_del(&waiter
->pi_list_entry
, ¤t
->pi_waiters
);
487 pendowner
= waiter
->task
;
490 rt_mutex_set_owner(lock
, pendowner
, RT_MUTEX_OWNER_PENDING
);
492 spin_unlock_irqrestore(¤t
->pi_lock
, flags
);
495 * Clear the pi_blocked_on variable and enqueue a possible
496 * waiter into the pi_waiters list of the pending owner. This
497 * prevents that in case the pending owner gets unboosted a
498 * waiter with higher priority than pending-owner->normal_prio
499 * is blocked on the unboosted (pending) owner.
501 spin_lock_irqsave(&pendowner
->pi_lock
, flags
);
503 WARN_ON(!pendowner
->pi_blocked_on
);
504 WARN_ON(pendowner
->pi_blocked_on
!= waiter
);
505 WARN_ON(pendowner
->pi_blocked_on
->lock
!= lock
);
507 pendowner
->pi_blocked_on
= NULL
;
509 if (rt_mutex_has_waiters(lock
)) {
510 struct rt_mutex_waiter
*next
;
512 next
= rt_mutex_top_waiter(lock
);
513 plist_add(&next
->pi_list_entry
, &pendowner
->pi_waiters
);
515 spin_unlock_irqrestore(&pendowner
->pi_lock
, flags
);
517 wake_up_process(pendowner
);
521 * Remove a waiter from a lock
523 * Must be called with lock->wait_lock held
525 static void remove_waiter(struct rt_mutex
*lock
,
526 struct rt_mutex_waiter
*waiter __IP_DECL__
)
528 int first
= (waiter
== rt_mutex_top_waiter(lock
));
530 task_t
*owner
= rt_mutex_owner(lock
);
533 spin_lock_irqsave(¤t
->pi_lock
, flags
);
534 plist_del(&waiter
->list_entry
, &lock
->wait_list
);
536 current
->pi_blocked_on
= NULL
;
537 spin_unlock_irqrestore(¤t
->pi_lock
, flags
);
539 if (first
&& owner
!= current
) {
541 spin_lock_irqsave(&owner
->pi_lock
, flags
);
543 plist_del(&waiter
->pi_list_entry
, &owner
->pi_waiters
);
545 if (rt_mutex_has_waiters(lock
)) {
546 struct rt_mutex_waiter
*next
;
548 next
= rt_mutex_top_waiter(lock
);
549 plist_add(&next
->pi_list_entry
, &owner
->pi_waiters
);
551 __rt_mutex_adjust_prio(owner
);
553 if (owner
->pi_blocked_on
) {
555 get_task_struct(owner
);
557 spin_unlock_irqrestore(&owner
->pi_lock
, flags
);
560 WARN_ON(!plist_node_empty(&waiter
->pi_list_entry
));
565 spin_unlock(&lock
->wait_lock
);
567 rt_mutex_adjust_prio_chain(owner
, 0, lock
, NULL __IP__
);
569 spin_lock(&lock
->wait_lock
);
573 * Slow path lock function:
576 rt_mutex_slowlock(struct rt_mutex
*lock
, int state
,
577 struct hrtimer_sleeper
*timeout
,
578 int detect_deadlock __IP_DECL__
)
580 struct rt_mutex_waiter waiter
;
583 debug_rt_mutex_init_waiter(&waiter
);
586 spin_lock(&lock
->wait_lock
);
588 /* Try to acquire the lock again: */
589 if (try_to_take_rt_mutex(lock __IP__
)) {
590 spin_unlock(&lock
->wait_lock
);
594 set_current_state(state
);
596 /* Setup the timer, when timeout != NULL */
597 if (unlikely(timeout
))
598 hrtimer_start(&timeout
->timer
, timeout
->timer
.expires
,
602 /* Try to acquire the lock: */
603 if (try_to_take_rt_mutex(lock __IP__
))
607 * TASK_INTERRUPTIBLE checks for signals and
608 * timeout. Ignored otherwise.
610 if (unlikely(state
== TASK_INTERRUPTIBLE
)) {
611 /* Signal pending? */
612 if (signal_pending(current
))
614 if (timeout
&& !timeout
->task
)
621 * waiter.task is NULL the first time we come here and
622 * when we have been woken up by the previous owner
623 * but the lock got stolen by a higher prio task.
626 ret
= task_blocks_on_rt_mutex(lock
, &waiter
,
627 detect_deadlock __IP__
);
629 * If we got woken up by the owner then start loop
630 * all over without going into schedule to try
631 * to get the lock now:
633 if (unlikely(!waiter
.task
))
639 spin_unlock(&lock
->wait_lock
);
641 debug_rt_mutex_print_deadlock(&waiter
);
644 schedule_rt_mutex(lock
);
646 spin_lock(&lock
->wait_lock
);
647 set_current_state(state
);
650 set_current_state(TASK_RUNNING
);
652 if (unlikely(waiter
.task
))
653 remove_waiter(lock
, &waiter __IP__
);
656 * try_to_take_rt_mutex() sets the waiter bit
657 * unconditionally. We might have to fix that up.
659 fixup_rt_mutex_waiters(lock
);
661 spin_unlock(&lock
->wait_lock
);
663 /* Remove pending timer: */
664 if (unlikely(timeout
))
665 hrtimer_cancel(&timeout
->timer
);
668 * Readjust priority, when we did not get the lock. We might
669 * have been the pending owner and boosted. Since we did not
670 * take the lock, the PI boost has to go.
673 rt_mutex_adjust_prio(current
);
675 debug_rt_mutex_free_waiter(&waiter
);
681 * Slow path try-lock function:
684 rt_mutex_slowtrylock(struct rt_mutex
*lock __IP_DECL__
)
688 spin_lock(&lock
->wait_lock
);
690 if (likely(rt_mutex_owner(lock
) != current
)) {
692 ret
= try_to_take_rt_mutex(lock __IP__
);
694 * try_to_take_rt_mutex() sets the lock waiters
695 * bit unconditionally. Clean this up.
697 fixup_rt_mutex_waiters(lock
);
700 spin_unlock(&lock
->wait_lock
);
706 * Slow path to release a rt-mutex:
709 rt_mutex_slowunlock(struct rt_mutex
*lock
)
711 spin_lock(&lock
->wait_lock
);
713 debug_rt_mutex_unlock(lock
);
715 rt_mutex_deadlock_account_unlock(current
);
717 if (!rt_mutex_has_waiters(lock
)) {
719 spin_unlock(&lock
->wait_lock
);
723 wakeup_next_waiter(lock
);
725 spin_unlock(&lock
->wait_lock
);
727 /* Undo pi boosting if necessary: */
728 rt_mutex_adjust_prio(current
);
732 * debug aware fast / slowpath lock,trylock,unlock
734 * The atomic acquire/release ops are compiled away, when either the
735 * architecture does not support cmpxchg or when debugging is enabled.
738 rt_mutex_fastlock(struct rt_mutex
*lock
, int state
,
740 int (*slowfn
)(struct rt_mutex
*lock
, int state
,
741 struct hrtimer_sleeper
*timeout
,
742 int detect_deadlock __IP_DECL__
))
744 if (!detect_deadlock
&& likely(rt_mutex_cmpxchg(lock
, NULL
, current
))) {
745 rt_mutex_deadlock_account_lock(lock
, current
);
748 return slowfn(lock
, state
, NULL
, detect_deadlock __RET_IP__
);
752 rt_mutex_timed_fastlock(struct rt_mutex
*lock
, int state
,
753 struct hrtimer_sleeper
*timeout
, int detect_deadlock
,
754 int (*slowfn
)(struct rt_mutex
*lock
, int state
,
755 struct hrtimer_sleeper
*timeout
,
756 int detect_deadlock __IP_DECL__
))
758 if (!detect_deadlock
&& likely(rt_mutex_cmpxchg(lock
, NULL
, current
))) {
759 rt_mutex_deadlock_account_lock(lock
, current
);
762 return slowfn(lock
, state
, timeout
, detect_deadlock __RET_IP__
);
766 rt_mutex_fasttrylock(struct rt_mutex
*lock
,
767 int (*slowfn
)(struct rt_mutex
*lock __IP_DECL__
))
769 if (likely(rt_mutex_cmpxchg(lock
, NULL
, current
))) {
770 rt_mutex_deadlock_account_lock(lock
, current
);
773 return slowfn(lock __RET_IP__
);
777 rt_mutex_fastunlock(struct rt_mutex
*lock
,
778 void (*slowfn
)(struct rt_mutex
*lock
))
780 if (likely(rt_mutex_cmpxchg(lock
, current
, NULL
)))
781 rt_mutex_deadlock_account_unlock(current
);
787 * rt_mutex_lock - lock a rt_mutex
789 * @lock: the rt_mutex to be locked
791 void __sched
rt_mutex_lock(struct rt_mutex
*lock
)
795 rt_mutex_fastlock(lock
, TASK_UNINTERRUPTIBLE
, 0, rt_mutex_slowlock
);
797 EXPORT_SYMBOL_GPL(rt_mutex_lock
);
800 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
802 * @lock: the rt_mutex to be locked
803 * @detect_deadlock: deadlock detection on/off
807 * -EINTR when interrupted by a signal
808 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
810 int __sched
rt_mutex_lock_interruptible(struct rt_mutex
*lock
,
815 return rt_mutex_fastlock(lock
, TASK_INTERRUPTIBLE
,
816 detect_deadlock
, rt_mutex_slowlock
);
818 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible
);
821 * rt_mutex_lock_interruptible_ktime - lock a rt_mutex interruptible
822 * the timeout structure is provided
825 * @lock: the rt_mutex to be locked
826 * @timeout: timeout structure or NULL (no timeout)
827 * @detect_deadlock: deadlock detection on/off
831 * -EINTR when interrupted by a signal
832 * -ETIMEOUT when the timeout expired
833 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
836 rt_mutex_timed_lock(struct rt_mutex
*lock
, struct hrtimer_sleeper
*timeout
,
841 return rt_mutex_timed_fastlock(lock
, TASK_INTERRUPTIBLE
, timeout
,
842 detect_deadlock
, rt_mutex_slowlock
);
844 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock
);
847 * rt_mutex_trylock - try to lock a rt_mutex
849 * @lock: the rt_mutex to be locked
851 * Returns 1 on success and 0 on contention
853 int __sched
rt_mutex_trylock(struct rt_mutex
*lock
)
855 return rt_mutex_fasttrylock(lock
, rt_mutex_slowtrylock
);
857 EXPORT_SYMBOL_GPL(rt_mutex_trylock
);
860 * rt_mutex_unlock - unlock a rt_mutex
862 * @lock: the rt_mutex to be unlocked
864 void __sched
rt_mutex_unlock(struct rt_mutex
*lock
)
866 rt_mutex_fastunlock(lock
, rt_mutex_slowunlock
);
868 EXPORT_SYMBOL_GPL(rt_mutex_unlock
);
871 * rt_mutex_destroy - mark a mutex unusable
872 * @lock: the mutex to be destroyed
874 * This function marks the mutex uninitialized, and any subsequent
875 * use of the mutex is forbidden. The mutex must not be locked when
876 * this function is called.
878 void rt_mutex_destroy(struct rt_mutex
*lock
)
880 WARN_ON(rt_mutex_is_locked(lock
));
881 #ifdef CONFIG_DEBUG_RT_MUTEXES
886 EXPORT_SYMBOL_GPL(rt_mutex_destroy
);
889 * __rt_mutex_init - initialize the rt lock
891 * @lock: the rt lock to be initialized
893 * Initialize the rt lock to unlocked state.
895 * Initializing of a locked rt lock is not allowed
897 void __rt_mutex_init(struct rt_mutex
*lock
, const char *name
)
900 spin_lock_init(&lock
->wait_lock
);
901 plist_head_init(&lock
->wait_list
, &lock
->wait_lock
);
903 debug_rt_mutex_init(lock
, name
);
905 EXPORT_SYMBOL_GPL(__rt_mutex_init
);
908 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
911 * @lock: the rt_mutex to be locked
912 * @proxy_owner:the task to set as owner
914 * No locking. Caller has to do serializing itself
915 * Special API call for PI-futex support
917 void rt_mutex_init_proxy_locked(struct rt_mutex
*lock
,
918 struct task_struct
*proxy_owner
)
920 __rt_mutex_init(lock
, NULL
);
921 debug_rt_mutex_proxy_lock(lock
, proxy_owner __RET_IP__
);
922 rt_mutex_set_owner(lock
, proxy_owner
, 0);
923 rt_mutex_deadlock_account_lock(lock
, proxy_owner
);
927 * rt_mutex_proxy_unlock - release a lock on behalf of owner
929 * @lock: the rt_mutex to be locked
931 * No locking. Caller has to do serializing itself
932 * Special API call for PI-futex support
934 void rt_mutex_proxy_unlock(struct rt_mutex
*lock
,
935 struct task_struct
*proxy_owner
)
937 debug_rt_mutex_proxy_unlock(lock
);
938 rt_mutex_set_owner(lock
, NULL
, 0);
939 rt_mutex_deadlock_account_unlock(proxy_owner
);
943 * rt_mutex_next_owner - return the next owner of the lock
945 * @lock: the rt lock query
947 * Returns the next owner of the lock or NULL
949 * Caller has to serialize against other accessors to the lock
952 * Special API call for PI-futex support
954 struct task_struct
*rt_mutex_next_owner(struct rt_mutex
*lock
)
956 if (!rt_mutex_has_waiters(lock
))
959 return rt_mutex_top_waiter(lock
)->task
;