added 2.6.29.6 aldebaran kernel
[nao-ulib.git] / kernel / 2.6.29.6-aldebaran-rt / include / linux / mutex.h
blobb51c55806c5199ddabf4d57d500ff347f653b0a2
1 /*
2 * Mutexes: blocking mutual exclusion locks
4 * started by Ingo Molnar:
6 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 * This file contains the main data structure and API definitions.
9 */
10 #ifndef __LINUX_MUTEX_H
11 #define __LINUX_MUTEX_H
13 #include <linux/list.h>
14 #include <linux/spinlock_types.h>
15 #include <linux/rt_lock.h>
16 #include <linux/linkage.h>
17 #include <linux/lockdep.h>
19 #include <asm/atomic.h>
21 #ifdef CONFIG_DEBUG_LOCK_ALLOC
22 # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
23 , .dep_map = { .name = #lockname }
24 #else
25 # define __DEP_MAP_MUTEX_INITIALIZER(lockname)
26 #endif
28 #ifdef CONFIG_PREEMPT_RT
30 #include <linux/rtmutex.h>
32 struct mutex {
33 struct rt_mutex lock;
34 #ifdef CONFIG_DEBUG_LOCK_ALLOC
35 struct lockdep_map dep_map;
36 #endif
40 #define __MUTEX_INITIALIZER(mutexname) \
41 { \
42 .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \
43 __DEP_MAP_MUTEX_INITIALIZER(mutexname) \
46 #define DEFINE_MUTEX(mutexname) \
47 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
49 extern void
50 __mutex_init(struct mutex *lock, char *name, struct lock_class_key *key);
52 extern void __lockfunc _mutex_lock(struct mutex *lock);
53 extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
54 extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
55 extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass);
56 extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass);
57 extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass);
58 extern int __lockfunc _mutex_trylock(struct mutex *lock);
59 extern void __lockfunc _mutex_unlock(struct mutex *lock);
61 #define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock)
62 #define mutex_lock(l) _mutex_lock(l)
63 #define mutex_lock_interruptible(l) _mutex_lock_interruptible(l)
64 #define mutex_lock_killable(l) _mutex_lock_killable(l)
65 #define mutex_trylock(l) _mutex_trylock(l)
66 #define mutex_unlock(l) _mutex_unlock(l)
67 #define mutex_destroy(l) rt_mutex_destroy(&(l)->lock)
69 #ifdef CONFIG_DEBUG_LOCK_ALLOC
70 # define mutex_lock_nested(l, s) _mutex_lock_nested(l, s)
71 # define mutex_lock_interruptible_nested(l, s) \
72 _mutex_lock_interruptible_nested(l, s)
73 # define mutex_lock_killable_nested(l, s) \
74 _mutex_lock_killable_nested(l, s)
75 #else
76 # define mutex_lock_nested(l, s) _mutex_lock(l)
77 # define mutex_lock_interruptible_nested(l, s) \
78 _mutex_lock_interruptible(l)
79 # define mutex_lock_killable_nested(l, s) \
80 _mutex_lock_killable(l)
81 #endif
83 # define mutex_init(mutex) \
84 do { \
85 static struct lock_class_key __key; \
87 __mutex_init((mutex), #mutex, &__key); \
88 } while (0)
90 #else /* PREEMPT_RT */
93 * Simple, straightforward mutexes with strict semantics:
95 * - only one task can hold the mutex at a time
96 * - only the owner can unlock the mutex
97 * - multiple unlocks are not permitted
98 * - recursive locking is not permitted
99 * - a mutex object must be initialized via the API
100 * - a mutex object must not be initialized via memset or copying
101 * - task may not exit with mutex held
102 * - memory areas where held locks reside must not be freed
103 * - held mutexes must not be reinitialized
104 * - mutexes may not be used in hardware or software interrupt
105 * contexts such as tasklets and timers
107 * These semantics are fully enforced when DEBUG_MUTEXES is
108 * enabled. Furthermore, besides enforcing the above rules, the mutex
109 * debugging code also implements a number of additional features
110 * that make lock debugging easier and faster:
112 * - uses symbolic names of mutexes, whenever they are printed in debug output
113 * - point-of-acquire tracking, symbolic lookup of function names
114 * - list of all locks held in the system, printout of them
115 * - owner tracking
116 * - detects self-recursing locks and prints out all relevant info
117 * - detects multi-task circular deadlocks and prints out all affected
118 * locks and tasks (and only those tasks)
120 struct mutex {
121 /* 1: unlocked, 0: locked, negative: locked, possible waiters */
122 atomic_t count;
123 spinlock_t wait_lock;
124 struct list_head wait_list;
125 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
126 struct thread_info *owner;
127 #endif
128 #ifdef CONFIG_DEBUG_MUTEXES
129 const char *name;
130 void *magic;
131 #endif
132 #ifdef CONFIG_DEBUG_LOCK_ALLOC
133 struct lockdep_map dep_map;
134 #endif
138 * This is the control structure for tasks blocked on mutex,
139 * which resides on the blocked task's kernel stack:
141 struct mutex_waiter {
142 struct list_head list;
143 struct task_struct *task;
144 #ifdef CONFIG_DEBUG_MUTEXES
145 void *magic;
146 #endif
149 #ifdef CONFIG_DEBUG_MUTEXES
150 # include <linux/mutex-debug.h>
151 #else
152 # define __DEBUG_MUTEX_INITIALIZER(lockname)
153 # define mutex_init(mutex) \
154 do { \
155 static struct lock_class_key __key; \
157 __mutex_init((mutex), #mutex, &__key); \
158 } while (0)
159 # define mutex_destroy(mutex) do { } while (0)
160 #endif
162 #define __MUTEX_INITIALIZER(lockname) \
163 { .count = ATOMIC_INIT(1) \
164 , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
165 , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
166 __DEBUG_MUTEX_INITIALIZER(lockname) \
167 __DEP_MAP_MUTEX_INITIALIZER(lockname) }
169 #define DEFINE_MUTEX(mutexname) \
170 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
172 extern void __mutex_init(struct mutex *lock, const char *name,
173 struct lock_class_key *key);
176 * mutex_is_locked - is the mutex locked
177 * @lock: the mutex to be queried
179 * Returns 1 if the mutex is locked, 0 if unlocked.
181 static inline int mutex_is_locked(struct mutex *lock)
183 return atomic_read(&lock->count) != 1;
187 * See kernel/mutex.c for detailed documentation of these APIs.
188 * Also see Documentation/mutex-design.txt.
190 #ifdef CONFIG_DEBUG_LOCK_ALLOC
191 extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
192 extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
193 unsigned int subclass);
194 extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
195 unsigned int subclass);
197 #define mutex_lock(lock) mutex_lock_nested(lock, 0)
198 #define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
199 #define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
200 #else
201 extern void mutex_lock(struct mutex *lock);
202 extern int __must_check mutex_lock_interruptible(struct mutex *lock);
203 extern int __must_check mutex_lock_killable(struct mutex *lock);
205 # define mutex_lock_nested(lock, subclass) mutex_lock(lock)
206 # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
207 # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
208 #endif
211 * NOTE: mutex_trylock() follows the spin_trylock() convention,
212 * not the down_trylock() convention!
214 * Returns 1 if the mutex has been acquired successfully, and 0 on contention.
216 extern int mutex_trylock(struct mutex *lock);
217 extern void mutex_unlock(struct mutex *lock);
219 #endif /* !PREEMPT_RT */
222 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
223 * @cnt: the atomic which we are to dec
224 * @lock: the mutex to return holding if we dec to 0
226 * return true and hold lock if we dec to 0, return false otherwise
228 static inline int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
230 /* dec if we can't possibly hit 0 */
231 if (atomic_add_unless(cnt, -1, 1))
232 return 0;
233 /* we might hit 0, so take the lock */
234 mutex_lock(lock);
235 if (!atomic_dec_and_test(cnt)) {
236 /* when we actually did the dec, we didn't hit 0 */
237 mutex_unlock(lock);
238 return 0;
240 /* we hit 0, and we hold the lock */
241 return 1;
244 #endif