Optimize andes_clear_page() and andes_copy_page() with prefetch
[linux-2.6/linux-mips.git] / include / asm-ppc / semaphore.h
blob92063a8689ca47b01f1311fe6985676ede6fe7f2
1 #ifndef _PPC_SEMAPHORE_H
2 #define _PPC_SEMAPHORE_H
4 /*
5 * Swiped from asm-sparc/semaphore.h and modified
6 * -- Cort (cort@cs.nmt.edu)
8 * Stole some rw spinlock-based semaphore stuff from asm-alpha/semaphore.h
9 * -- Ani Joshi (ajoshi@unixbox.com)
12 #ifdef __KERNEL__
14 #include <asm/atomic.h>
15 #include <asm/system.h>
16 #include <linux/wait.h>
18 struct semaphore {
19 atomic_t count;
20 atomic_t waking;
21 wait_queue_head_t wait;
22 #if WAITQUEUE_DEBUG
23 long __magic;
24 #endif
27 #if WAITQUEUE_DEBUG
28 # define __SEM_DEBUG_INIT(name) \
29 , (long)&(name).__magic
30 #else
31 # define __SEM_DEBUG_INIT(name)
32 #endif
34 #define __SEMAPHORE_INITIALIZER(name,count) \
35 { ATOMIC_INIT(count), ATOMIC_INIT(0), __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
36 __SEM_DEBUG_INIT(name) }
38 #define __MUTEX_INITIALIZER(name) \
39 __SEMAPHORE_INITIALIZER(name,1)
41 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
42 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
44 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
45 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
47 extern inline void sema_init (struct semaphore *sem, int val)
49 atomic_set(&sem->count, val);
50 atomic_set(&sem->waking, 0);
51 init_waitqueue_head(&sem->wait);
52 #if WAITQUEUE_DEBUG
53 sem->__magic = (long)&sem->__magic;
54 #endif
57 static inline void init_MUTEX (struct semaphore *sem)
59 sema_init(sem, 1);
62 static inline void init_MUTEX_LOCKED (struct semaphore *sem)
64 sema_init(sem, 0);
67 extern void __down(struct semaphore * sem);
68 extern int __down_interruptible(struct semaphore * sem);
69 extern int __down_trylock(struct semaphore * sem);
70 extern void __up(struct semaphore * sem);
72 extern inline void down(struct semaphore * sem)
74 if (atomic_dec_return(&sem->count) >= 0)
75 wmb();
76 else
77 __down(sem);
80 extern inline int down_interruptible(struct semaphore * sem)
82 int ret = 0;
84 if (atomic_dec_return(&sem->count) >= 0)
85 wmb();
86 else
87 ret = __down_interruptible(sem);
88 return ret;
91 extern inline int down_trylock(struct semaphore * sem)
93 int ret = 0;
95 if (atomic_dec_return(&sem->count) >= 0)
96 wmb();
97 else
98 ret = __down_trylock(sem);
99 return ret;
102 extern inline void up(struct semaphore * sem)
104 mb();
105 if (atomic_inc_return(&sem->count) <= 0)
106 __up(sem);
110 /* RW spinlock-based semaphores */
112 struct rw_semaphore
114 spinlock_t lock;
115 int rd, wr;
116 wait_queue_head_t wait;
117 #if WAITQUEUE_DEBUG
118 long __magic;
119 #endif
122 #define __RWSEM_INITIALIZER(name, rd, wr) \
124 SPIN_LOCK_UNLOCKED, \
125 (rd), (wr), \
126 __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
127 __SEM_DEBUG_INIT(name) \
130 #define __DECLARE_RWSEM_GENERIC(name, rd, wr) \
131 struct rw_semaphore name = __RWSEM_INITIALIZER(name, rd, wr)
133 #define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name, 0, 0)
134 #define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name, 1, 0)
135 #define DECLAER_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name, 0, 1)
137 extern inline void init_rwsem(struct rw_semaphore *sem)
139 spin_lock_init(&sem->lock);
140 sem->rd = sem->wr = 0;
141 init_waitqueue_head(&sem->wait);
142 #if WAITQUEUE_DEBUG
143 sem->__magic = (long)&sem->__magic;
144 #endif
147 #ifndef CHECK_MAGIC
148 #define CHECK_MAGIC(x)
149 #endif
151 extern void down_read_failed(struct rw_semaphore *);
152 extern void down_write_failed(struct rw_semaphore *);
154 extern inline void down_read(struct rw_semaphore *sem)
156 CHECK_MAGIC(sem->__magic);
158 spin_lock_irq(&sem->lock);
159 if (sem->wr)
160 down_read_failed(sem);
161 sem->rd++;
162 spin_unlock_irq(&sem->lock);
165 extern inline void down_write(struct rw_semaphore *sem)
167 CHECK_MAGIC(sem->__magic);
169 spin_lock(&sem->lock);
170 if(sem->rd || sem->wr)
171 down_write_failed(sem);
172 sem->wr = 1;
173 spin_unlock(&sem->lock);
176 #define up_read(sem) \
177 do { \
178 unsigned long flags; \
180 CHECK_MAGIC((sem)->__magic); \
182 spin_lock_irqsave(&(sem)->lock, flags); \
183 if (!--(sem)->rd && waitqueue_active(&(sem)->wait)) \
184 wake_up(&(sem)->wait); \
185 spin_unlock_irqrestore(&(sem)->lock, flags); \
186 } while (0)
188 #define up_write(sem) \
189 do { \
190 unsigned long flags; \
192 CHECK_MAGIC((sem)->__magic); \
194 spin_lock_irqsave(&(sem)->lock, flags); \
195 (sem)->wr = 0; \
196 if (waitqueue_active(&(sem)->wait)) \
197 wake_up(&(sem)->wait); \
198 spin_unlock_irqrestore(&(sem)->lock, flags); \
199 } while (0)
202 #endif /* __KERNEL__ */
204 #endif /* !(_PPC_SEMAPHORE_H) */