pm3fb: various fixes
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / rtmutex_common.h
blob242ec7ee740b0aae5b624c89df610084667c28b1
1 /*
2 * RT Mutexes: blocking mutual exclusion locks with PI support
4 * started by Ingo Molnar and Thomas Gleixner:
6 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
9 * This file contains the private data structure and API definitions.
12 #ifndef __KERNEL_RTMUTEX_COMMON_H
13 #define __KERNEL_RTMUTEX_COMMON_H
15 #include <linux/rtmutex.h>
18 * The rtmutex in kernel tester is independent of rtmutex debugging. We
19 * call schedule_rt_mutex_test() instead of schedule() for the tasks which
20 * belong to the tester. That way we can delay the wakeup path of those
21 * threads to provoke lock stealing and testing of complex boosting scenarios.
23 #ifdef CONFIG_RT_MUTEX_TESTER
25 extern void schedule_rt_mutex_test(struct rt_mutex *lock);
27 #define schedule_rt_mutex(_lock) \
28 do { \
29 if (!(current->flags & PF_MUTEX_TESTER)) \
30 schedule(); \
31 else \
32 schedule_rt_mutex_test(_lock); \
33 } while (0)
35 #else
36 # define schedule_rt_mutex(_lock) schedule()
37 #endif
40 * This is the control structure for tasks blocked on a rt_mutex,
41 * which is allocated on the kernel stack on of the blocked task.
43 * @list_entry: pi node to enqueue into the mutex waiters list
44 * @pi_list_entry: pi node to enqueue into the mutex owner waiters list
45 * @task: task reference to the blocked task
47 struct rt_mutex_waiter {
48 struct plist_node list_entry;
49 struct plist_node pi_list_entry;
50 struct task_struct *task;
51 struct rt_mutex *lock;
52 #ifdef CONFIG_DEBUG_RT_MUTEXES
53 unsigned long ip;
54 pid_t deadlock_task_pid;
55 struct rt_mutex *deadlock_lock;
56 #endif
60 * Various helpers to access the waiters-plist:
62 static inline int rt_mutex_has_waiters(struct rt_mutex *lock)
64 return !plist_head_empty(&lock->wait_list);
67 static inline struct rt_mutex_waiter *
68 rt_mutex_top_waiter(struct rt_mutex *lock)
70 struct rt_mutex_waiter *w;
72 w = plist_first_entry(&lock->wait_list, struct rt_mutex_waiter,
73 list_entry);
74 BUG_ON(w->lock != lock);
76 return w;
79 static inline int task_has_pi_waiters(struct task_struct *p)
81 return !plist_head_empty(&p->pi_waiters);
84 static inline struct rt_mutex_waiter *
85 task_top_pi_waiter(struct task_struct *p)
87 return plist_first_entry(&p->pi_waiters, struct rt_mutex_waiter,
88 pi_list_entry);
92 * lock->owner state tracking:
94 #define RT_MUTEX_OWNER_PENDING 1UL
95 #define RT_MUTEX_HAS_WAITERS 2UL
96 #define RT_MUTEX_OWNER_MASKALL 3UL
98 static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
100 return (struct task_struct *)
101 ((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL);
104 static inline struct task_struct *rt_mutex_real_owner(struct rt_mutex *lock)
106 return (struct task_struct *)
107 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
110 static inline unsigned long rt_mutex_owner_pending(struct rt_mutex *lock)
112 return (unsigned long)lock->owner & RT_MUTEX_OWNER_PENDING;
116 * We can speed up the acquire/release, if the architecture
117 * supports cmpxchg and if there's no debugging state to be set up
119 #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
120 # define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
121 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
123 unsigned long owner, *p = (unsigned long *) &lock->owner;
125 do {
126 owner = *p;
127 } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
129 #else
130 # define rt_mutex_cmpxchg(l,c,n) (0)
131 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
133 lock->owner = (struct task_struct *)
134 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
136 #endif
139 * PI-futex support (proxy locking functions, etc.):
141 extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
142 extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
143 struct task_struct *proxy_owner);
144 extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
145 struct task_struct *proxy_owner);
147 extern void rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner,
148 unsigned long mask);
149 extern void __rt_mutex_adjust_prio(struct task_struct *task);
150 extern int rt_mutex_adjust_prio_chain(struct task_struct *task,
151 int deadlock_detect,
152 struct rt_mutex *orig_lock,
153 struct rt_mutex_waiter *orig_waiter,
154 struct task_struct *top_task);
155 extern void remove_waiter(struct rt_mutex *lock,
156 struct rt_mutex_waiter *waiter);
157 #endif