[PATCH] swsusp: prevent possible memory leak
[linux-2.6/sactl.git] / include / asm-h8300 / semaphore.h
blobfe6ef3774297ce03458185b2b0791fec57f78f51
1 #ifndef _H8300_SEMAPHORE_H
2 #define _H8300_SEMAPHORE_H
4 #define RW_LOCK_BIAS 0x01000000
6 #ifndef __ASSEMBLY__
8 #include <linux/linkage.h>
9 #include <linux/wait.h>
10 #include <linux/spinlock.h>
11 #include <linux/rwsem.h>
13 #include <asm/system.h>
14 #include <asm/atomic.h>
17 * Interrupt-safe semaphores..
19 * (C) Copyright 1996 Linus Torvalds
21 * H8/300 version by Yoshinori Sato
25 struct semaphore {
26 atomic_t count;
27 int sleepers;
28 wait_queue_head_t wait;
31 #define __SEMAPHORE_INITIALIZER(name, n) \
32 { \
33 .count = ATOMIC_INIT(n), \
34 .sleepers = 0, \
35 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
38 #define __MUTEX_INITIALIZER(name) \
39 __SEMAPHORE_INITIALIZER(name,1)
41 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
42 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
44 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
45 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
47 static inline void sema_init (struct semaphore *sem, int val)
49 *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
52 static inline void init_MUTEX (struct semaphore *sem)
54 sema_init(sem, 1);
57 static inline void init_MUTEX_LOCKED (struct semaphore *sem)
59 sema_init(sem, 0);
62 asmlinkage void __down_failed(void /* special register calling convention */);
63 asmlinkage int __down_failed_interruptible(void /* params in registers */);
64 asmlinkage int __down_failed_trylock(void /* params in registers */);
65 asmlinkage void __up_wakeup(void /* special register calling convention */);
67 asmlinkage void __down(struct semaphore * sem);
68 asmlinkage int __down_interruptible(struct semaphore * sem);
69 asmlinkage int __down_trylock(struct semaphore * sem);
70 asmlinkage void __up(struct semaphore * sem);
72 extern spinlock_t semaphore_wake_lock;
75 * This is ugly, but we want the default case to fall through.
76 * "down_failed" is a special asm handler that calls the C
77 * routine that actually waits. See arch/m68k/lib/semaphore.S
79 static inline void down(struct semaphore * sem)
81 register atomic_t *count asm("er0");
83 might_sleep();
85 count = &(sem->count);
86 __asm__ __volatile__(
87 "stc ccr,r3l\n\t"
88 "orc #0x80,ccr\n\t"
89 "mov.l %2, er1\n\t"
90 "dec.l #1,er1\n\t"
91 "mov.l er1,%0\n\t"
92 "bpl 1f\n\t"
93 "ldc r3l,ccr\n\t"
94 "mov.l %1,er0\n\t"
95 "jsr @___down\n\t"
96 "bra 2f\n"
97 "1:\n\t"
98 "ldc r3l,ccr\n"
99 "2:"
100 : "=m"(*count)
101 : "g"(sem),"m"(*count)
102 : "cc", "er1", "er2", "er3");
105 static inline int down_interruptible(struct semaphore * sem)
107 register atomic_t *count asm("er0");
109 might_sleep();
111 count = &(sem->count);
112 __asm__ __volatile__(
113 "stc ccr,r1l\n\t"
114 "orc #0x80,ccr\n\t"
115 "mov.l %3, er2\n\t"
116 "dec.l #1,er2\n\t"
117 "mov.l er2,%1\n\t"
118 "bpl 1f\n\t"
119 "ldc r1l,ccr\n\t"
120 "mov.l %2,er0\n\t"
121 "jsr @___down_interruptible\n\t"
122 "bra 2f\n"
123 "1:\n\t"
124 "ldc r1l,ccr\n\t"
125 "sub.l %0,%0\n\t"
126 "2:\n\t"
127 : "=r" (count),"=m" (*count)
128 : "g"(sem),"m"(*count)
129 : "cc", "er1", "er2", "er3");
130 return (int)count;
133 static inline int down_trylock(struct semaphore * sem)
135 register atomic_t *count asm("er0");
137 count = &(sem->count);
138 __asm__ __volatile__(
139 "stc ccr,r3l\n\t"
140 "orc #0x80,ccr\n\t"
141 "mov.l %3,er2\n\t"
142 "dec.l #1,er2\n\t"
143 "mov.l er2,%0\n\t"
144 "bpl 1f\n\t"
145 "ldc r3l,ccr\n\t"
146 "jmp @3f\n\t"
147 LOCK_SECTION_START(".align 2\n\t")
148 "3:\n\t"
149 "mov.l %2,er0\n\t"
150 "jsr @___down_trylock\n\t"
151 "jmp @2f\n\t"
152 LOCK_SECTION_END
153 "1:\n\t"
154 "ldc r3l,ccr\n\t"
155 "sub.l %1,%1\n"
156 "2:"
157 : "=m" (*count),"=r"(count)
158 : "g"(sem),"m"(*count)
159 : "cc", "er1","er2", "er3");
160 return (int)count;
164 * Note! This is subtle. We jump to wake people up only if
165 * the semaphore was negative (== somebody was waiting on it).
166 * The default case (no contention) will result in NO
167 * jumps for both down() and up().
169 static inline void up(struct semaphore * sem)
171 register atomic_t *count asm("er0");
173 count = &(sem->count);
174 __asm__ __volatile__(
175 "stc ccr,r3l\n\t"
176 "orc #0x80,ccr\n\t"
177 "mov.l %2,er1\n\t"
178 "inc.l #1,er1\n\t"
179 "mov.l er1,%0\n\t"
180 "ldc r3l,ccr\n\t"
181 "sub.l er2,er2\n\t"
182 "cmp.l er2,er1\n\t"
183 "bgt 1f\n\t"
184 "mov.l %1,er0\n\t"
185 "jsr @___up\n"
186 "1:"
187 : "=m"(*count)
188 : "g"(sem),"m"(*count)
189 : "cc", "er1", "er2", "er3");
192 #endif /* __ASSEMBLY__ */
194 #endif