allow coexistance of N build and AC build.
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / include / asm-sh / rwsem.h
blob4931ba817d734aa8192374ef9e563d69b48f4c94
1 /*
2 * include/asm-ppc/rwsem.h: R/W semaphores for SH using the stuff
3 * in lib/rwsem.c.
4 */
6 #ifndef _ASM_SH_RWSEM_H
7 #define _ASM_SH_RWSEM_H
9 #ifdef __KERNEL__
10 #include <linux/list.h>
11 #include <linux/spinlock.h>
12 #include <asm/atomic.h>
13 #include <asm/system.h>
16 * the semaphore definition
18 struct rw_semaphore {
19 long count;
20 #define RWSEM_UNLOCKED_VALUE 0x00000000
21 #define RWSEM_ACTIVE_BIAS 0x00000001
22 #define RWSEM_ACTIVE_MASK 0x0000ffff
23 #define RWSEM_WAITING_BIAS (-0x00010000)
24 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
25 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
26 spinlock_t wait_lock;
27 struct list_head wait_list;
28 #ifdef CONFIG_DEBUG_LOCK_ALLOC
29 struct lockdep_map dep_map;
30 #endif
33 #ifdef CONFIG_DEBUG_LOCK_ALLOC
34 # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
35 #else
36 # define __RWSEM_DEP_MAP_INIT(lockname)
37 #endif
39 #define __RWSEM_INITIALIZER(name) \
40 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
41 LIST_HEAD_INIT((name).wait_list) \
42 __RWSEM_DEP_MAP_INIT(name) }
44 #define DECLARE_RWSEM(name) \
45 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
47 extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
48 extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
49 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
50 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
52 extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
53 struct lock_class_key *key);
55 #define init_rwsem(sem) \
56 do { \
57 static struct lock_class_key __key; \
59 __init_rwsem((sem), #sem, &__key); \
60 } while (0)
62 static inline void init_rwsem(struct rw_semaphore *sem)
64 sem->count = RWSEM_UNLOCKED_VALUE;
65 spin_lock_init(&sem->wait_lock);
66 INIT_LIST_HEAD(&sem->wait_list);
70 * lock for reading
72 static inline void __down_read(struct rw_semaphore *sem)
74 if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
75 smp_wmb();
76 else
77 rwsem_down_read_failed(sem);
80 static inline int __down_read_trylock(struct rw_semaphore *sem)
82 int tmp;
84 while ((tmp = sem->count) >= 0) {
85 if (tmp == cmpxchg(&sem->count, tmp,
86 tmp + RWSEM_ACTIVE_READ_BIAS)) {
87 smp_wmb();
88 return 1;
91 return 0;
95 * lock for writing
97 static inline void __down_write(struct rw_semaphore *sem)
99 int tmp;
101 tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
102 (atomic_t *)(&sem->count));
103 if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
104 smp_wmb();
105 else
106 rwsem_down_write_failed(sem);
109 static inline int __down_write_trylock(struct rw_semaphore *sem)
111 int tmp;
113 tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
114 RWSEM_ACTIVE_WRITE_BIAS);
115 smp_wmb();
116 return tmp == RWSEM_UNLOCKED_VALUE;
120 * unlock after reading
122 static inline void __up_read(struct rw_semaphore *sem)
124 int tmp;
126 smp_wmb();
127 tmp = atomic_dec_return((atomic_t *)(&sem->count));
128 if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
129 rwsem_wake(sem);
133 * unlock after writing
135 static inline void __up_write(struct rw_semaphore *sem)
137 smp_wmb();
138 if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
139 (atomic_t *)(&sem->count)) < 0)
140 rwsem_wake(sem);
144 * implement atomic add functionality
146 static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
148 atomic_add(delta, (atomic_t *)(&sem->count));
152 * downgrade write lock to read lock
154 static inline void __downgrade_write(struct rw_semaphore *sem)
156 int tmp;
158 smp_wmb();
159 tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
160 if (tmp < 0)
161 rwsem_downgrade_wake(sem);
164 static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
166 __down_write(sem);
170 * implement exchange and add functionality
172 static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
174 smp_mb();
175 return atomic_add_return(delta, (atomic_t *)(&sem->count));
178 static inline int rwsem_is_locked(struct rw_semaphore *sem)
180 return (sem->count != 0);
183 #endif /* __KERNEL__ */
184 #endif /* _ASM_SH_RWSEM_H */