added 2.6.29.6 aldebaran kernel
[nao-ulib.git] / kernel / 2.6.29.6-aldebaran-rt / include / linux / rt_lock.h
blob18b575b686564808ba98a7553cd228a186f0632c
1 #ifndef __LINUX_RT_LOCK_H
2 #define __LINUX_RT_LOCK_H
4 /*
5 * Real-Time Preemption Support
7 * started by Ingo Molnar:
9 * Copyright (C) 2004, 2005 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
11 * This file contains the main data structure definitions.
13 #include <linux/rtmutex.h>
14 #include <asm/atomic.h>
15 #include <linux/spinlock_types.h>
17 #ifdef CONFIG_PREEMPT_RT
18 # define preempt_rt 1
20 * spinlocks - an RT mutex plus lock-break field:
22 typedef struct {
23 struct rt_mutex lock;
24 unsigned int break_lock;
25 #ifdef CONFIG_DEBUG_LOCK_ALLOC
26 struct lockdep_map dep_map;
27 #endif
28 } spinlock_t;
30 #ifdef CONFIG_DEBUG_RT_MUTEXES
31 # define __RT_SPIN_INITIALIZER(name) \
32 { .wait_lock = _RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
33 .save_state = 1, \
34 .file = __FILE__, \
35 .line = __LINE__, }
36 #else
37 # define __RT_SPIN_INITIALIZER(name) \
38 { .wait_lock = _RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) }
39 #endif
41 #define __SPIN_LOCK_UNLOCKED(name) (spinlock_t) \
42 { .lock = __RT_SPIN_INITIALIZER(name), \
43 SPIN_DEP_MAP_INIT(name) }
45 #else /* !PREEMPT_RT */
47 typedef raw_spinlock_t spinlock_t;
49 #define __SPIN_LOCK_UNLOCKED _RAW_SPIN_LOCK_UNLOCKED
51 #endif
53 #define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(spin_old_style)
56 #define __DEFINE_SPINLOCK(name) \
57 spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
59 #define DEFINE_SPINLOCK(name) \
60 spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name)
62 #ifdef CONFIG_PREEMPT_RT
65 * RW-semaphores are a spinlock plus a reader-depth count.
67 * Note that the semantics are different from the usual
68 * Linux rw-sems, in PREEMPT_RT mode we do not allow
69 * multiple readers to hold the lock at once, we only allow
70 * a read-lock owner to read-lock recursively. This is
71 * better for latency, makes the implementation inherently
72 * fair and makes it simpler as well:
74 struct rw_semaphore {
75 struct rt_mutex lock;
76 int read_depth;
77 #ifdef CONFIG_DEBUG_LOCK_ALLOC
78 struct lockdep_map dep_map;
79 #endif
83 * rwlocks - an RW semaphore plus lock-break field:
85 typedef struct {
86 struct rt_mutex lock;
87 int read_depth;
88 unsigned int break_lock;
89 #ifdef CONFIG_DEBUG_LOCK_ALLOC
90 struct lockdep_map dep_map;
91 #endif
92 } rwlock_t;
94 #define __RW_LOCK_UNLOCKED(name) (rwlock_t) \
95 { .lock = __RT_SPIN_INITIALIZER(name), \
96 RW_DEP_MAP_INIT(name) }
97 #else /* !PREEMPT_RT */
99 typedef raw_rwlock_t rwlock_t;
101 #define __RW_LOCK_UNLOCKED _RAW_RW_LOCK_UNLOCKED
103 #endif
105 #define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(rw_old_style)
108 #define DEFINE_RWLOCK(name) \
109 rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
111 #ifdef CONFIG_PREEMPT_RT
114 * Semaphores - a spinlock plus the semaphore count:
116 struct semaphore {
117 atomic_t count;
118 struct rt_mutex lock;
121 #define DECLARE_MUTEX(name) \
122 struct semaphore name = \
123 { .count = { 1 }, .lock = __RT_MUTEX_INITIALIZER(name.lock) }
125 extern void
126 __sema_init(struct semaphore *sem, int val, char *name, char *file, int line);
128 #define rt_sema_init(sem, val) \
129 __sema_init(sem, val, #sem, __FILE__, __LINE__)
131 extern void
132 __init_MUTEX(struct semaphore *sem, char *name, char *file, int line);
133 #define rt_init_MUTEX(sem) \
134 __init_MUTEX(sem, #sem, __FILE__, __LINE__)
136 extern void there_is_no_init_MUTEX_LOCKED_for_RT_semaphores(void);
139 * No locked initialization for RT semaphores
141 #define rt_init_MUTEX_LOCKED(sem) \
142 there_is_no_init_MUTEX_LOCKED_for_RT_semaphores()
143 extern void rt_down(struct semaphore *sem);
144 extern int rt_down_interruptible(struct semaphore *sem);
145 extern int rt_down_timeout(struct semaphore *sem, long jiffies);
146 extern int rt_down_trylock(struct semaphore *sem);
147 extern void rt_up(struct semaphore *sem);
149 #define rt_sem_is_locked(s) rt_mutex_is_locked(&(s)->lock)
150 #define rt_sema_count(s) atomic_read(&(s)->count)
152 extern int __bad_func_type(void);
154 #include <linux/pickop.h>
157 * PICK_SEM_OP() is a small redirector to allow less typing of the lock
158 * types struct compat_semaphore, struct semaphore, at the front of the
159 * PICK_FUNCTION macro.
161 #define PICK_SEM_OP(...) PICK_FUNCTION(struct compat_semaphore *, \
162 struct semaphore *, ##__VA_ARGS__)
163 #define PICK_SEM_OP_RET(...) PICK_FUNCTION_RET(struct compat_semaphore *,\
164 struct semaphore *, ##__VA_ARGS__)
166 #define sema_init(sem, val) \
167 PICK_SEM_OP(compat_sema_init, rt_sema_init, sem, val)
169 #define init_MUTEX(sem) PICK_SEM_OP(compat_init_MUTEX, rt_init_MUTEX, sem)
171 #define init_MUTEX_LOCKED(sem) \
172 PICK_SEM_OP(compat_init_MUTEX_LOCKED, rt_init_MUTEX_LOCKED, sem)
174 #define down(sem) PICK_SEM_OP(compat_down, rt_down, sem)
176 #define down_timeout(sem, jiff) \
177 PICK_SEM_OP_RET(compat_down_timeout, rt_down_timeout, sem, jiff)
179 #define down_interruptible(sem) \
180 PICK_SEM_OP_RET(compat_down_interruptible, rt_down_interruptible, sem)
182 #define down_trylock(sem) \
183 PICK_SEM_OP_RET(compat_down_trylock, rt_down_trylock, sem)
185 #define up(sem) PICK_SEM_OP(compat_up, rt_up, sem)
188 * rwsems:
191 #define __RWSEM_INITIALIZER(name) \
192 { .lock = __RT_MUTEX_INITIALIZER(name.lock), \
193 RW_DEP_MAP_INIT(name) }
195 #define DECLARE_RWSEM(lockname) \
196 struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
198 extern void __rt_rwsem_init(struct rw_semaphore *rwsem, char *name,
199 struct lock_class_key *key);
201 # define rt_init_rwsem(sem) \
202 do { \
203 static struct lock_class_key __key; \
205 __rt_rwsem_init((sem), #sem, &__key); \
206 } while (0)
208 extern void __dont_do_this_in_rt(struct rw_semaphore *rwsem);
210 #define rt_down_read_non_owner(rwsem) __dont_do_this_in_rt(rwsem)
211 #define rt_up_read_non_owner(rwsem) __dont_do_this_in_rt(rwsem)
213 extern void rt_down_write(struct rw_semaphore *rwsem);
214 extern void
215 rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
216 extern void
217 rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
218 extern void rt_down_read(struct rw_semaphore *rwsem);
219 extern int rt_down_write_trylock(struct rw_semaphore *rwsem);
220 extern int rt_down_read_trylock(struct rw_semaphore *rwsem);
221 extern void rt_up_read(struct rw_semaphore *rwsem);
222 extern void rt_up_write(struct rw_semaphore *rwsem);
223 extern void rt_downgrade_write(struct rw_semaphore *rwsem);
225 # define rt_rwsem_is_locked(rws) (rt_mutex_is_locked(&(rws)->lock))
227 #define PICK_RWSEM_OP(...) PICK_FUNCTION(struct compat_rw_semaphore *, \
228 struct rw_semaphore *, ##__VA_ARGS__)
229 #define PICK_RWSEM_OP_RET(...) PICK_FUNCTION_RET(struct compat_rw_semaphore *,\
230 struct rw_semaphore *, ##__VA_ARGS__)
232 #define init_rwsem(rwsem) PICK_RWSEM_OP(compat_init_rwsem, rt_init_rwsem, rwsem)
234 #define down_read(rwsem) PICK_RWSEM_OP(compat_down_read, rt_down_read, rwsem)
236 #define down_read_non_owner(rwsem) \
237 PICK_RWSEM_OP(compat_down_read_non_owner, rt_down_read_non_owner, rwsem)
239 #define down_read_trylock(rwsem) \
240 PICK_RWSEM_OP_RET(compat_down_read_trylock, rt_down_read_trylock, rwsem)
242 #define down_write(rwsem) PICK_RWSEM_OP(compat_down_write, rt_down_write, rwsem)
244 #define down_read_nested(rwsem, subclass) \
245 PICK_RWSEM_OP(compat_down_read_nested, rt_down_read_nested, \
246 rwsem, subclass)
248 #define down_write_nested(rwsem, subclass) \
249 PICK_RWSEM_OP(compat_down_write_nested, rt_down_write_nested, \
250 rwsem, subclass)
252 #define down_write_trylock(rwsem) \
253 PICK_RWSEM_OP_RET(compat_down_write_trylock, rt_down_write_trylock,\
254 rwsem)
256 #define up_read(rwsem) PICK_RWSEM_OP(compat_up_read, rt_up_read, rwsem)
258 #define up_read_non_owner(rwsem) \
259 PICK_RWSEM_OP(compat_up_read_non_owner, rt_up_read_non_owner, rwsem)
261 #define up_write(rwsem) PICK_RWSEM_OP(compat_up_write, rt_up_write, rwsem)
263 #define downgrade_write(rwsem) \
264 PICK_RWSEM_OP(compat_downgrade_write, rt_downgrade_write, rwsem)
266 #define rwsem_is_locked(rwsem) \
267 PICK_RWSEM_OP_RET(compat_rwsem_is_locked, rt_rwsem_is_locked, rwsem)
269 #else
270 # define preempt_rt 0
271 #endif /* CONFIG_PREEMPT_RT */
273 #endif