1 #ifndef _ALPHA_SEMAPHORE_H
2 #define _ALPHA_SEMAPHORE_H
5 * SMP- and interrupt-safe semaphores..
7 * (C) Copyright 1996 Linus Torvalds
8 * (C) Copyright 1996 Richard Henderson
11 #include <asm/current.h>
12 #include <asm/system.h>
13 #include <asm/atomic.h>
16 /* Careful, inline assembly knows about the position of these two. */
18 atomic_t waking
; /* biased by -1 */
19 wait_queue_head_t wait
;
26 # define __SEM_DEBUG_INIT(name) , (long)&(name).__magic
28 # define __SEM_DEBUG_INIT(name)
31 #define __SEMAPHORE_INITIALIZER(name,count) \
32 { ATOMIC_INIT(count), ATOMIC_INIT(-1), \
33 __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
34 __SEM_DEBUG_INIT(name) }
36 #define __MUTEX_INITIALIZER(name) \
37 __SEMAPHORE_INITIALIZER(name,1)
39 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
40 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
42 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
43 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
45 extern inline void sema_init (struct semaphore
*sem
, int val
)
49 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
50 * except that gcc produces better initializing by parts yet.
53 atomic_set(&sem
->count
, val
);
54 atomic_set(&sem
->waking
, -1);
55 init_waitqueue_head(&sem
->wait
);
57 sem
->__magic
= (long)&sem
->__magic
;
61 static inline void init_MUTEX (struct semaphore
*sem
)
66 static inline void init_MUTEX_LOCKED (struct semaphore
*sem
)
72 extern void __down(struct semaphore
* sem
);
73 extern int __down_interruptible(struct semaphore
* sem
);
74 extern int __down_trylock(struct semaphore
* sem
);
75 extern void __up(struct semaphore
* sem
);
77 /* All have custom assembly linkages. */
78 extern void __down_failed(struct semaphore
* sem
);
79 extern void __down_failed_interruptible(struct semaphore
* sem
);
80 extern void __down_failed_trylock(struct semaphore
* sem
);
81 extern void __up_wakeup(struct semaphore
* sem
);
84 * Whee. Hidden out of line code is fun. The contention cases are
85 * handled out of line in kernel/sched.c; arch/alpha/lib/semaphore.S
86 * takes care of making sure we can call it without clobbering regs.
89 extern inline void down(struct semaphore
* sem
)
91 /* Given that we have to use particular hard registers to
92 communicate with __down_failed anyway, reuse them in
93 the atomic operation as well.
95 __down_failed takes the semaphore address in $24, and
96 it's return address in $28. The pv is loaded as usual.
97 The gp is clobbered (in the module case) as usual. */
99 /* This little bit of silliness is to get the GP loaded for
100 a function that ordinarily wouldn't. Otherwise we could
101 have it done by the macro directly, which can be optimized
103 register void *pv
__asm__("$27");
106 CHECK_MAGIC(sem
->__magic
);
110 __asm__
__volatile__ (
111 "/* semaphore down operation */\n"
119 ".section .text2,\"ax\"\n"
122 " jsr $28,($27),__down_failed\n"
127 : "m"(sem
->count
), "r"(pv
)
128 : "$24", "$28", "memory");
131 extern inline int down_interruptible(struct semaphore
* sem
)
133 /* __down_failed_interruptible takes the semaphore address in $24,
134 and it's return address in $28. The pv is loaded as usual.
135 The gp is clobbered (in the module case) as usual. The return
138 register int ret
__asm__("$24");
139 register void *pv
__asm__("$27");
142 CHECK_MAGIC(sem
->__magic
);
145 pv
= __down_failed_interruptible
;
146 __asm__
__volatile__ (
147 "/* semaphore down interruptible operation */\n"
156 ".section .text2,\"ax\"\n"
159 " jsr $28,($27),__down_failed_interruptible\n"
163 : "=r"(ret
), "=r"(pv
)
164 : "m"(sem
->count
), "r"(pv
)
171 * down_trylock returns 0 on success, 1 if we failed to get the lock.
173 * We must manipulate count and waking simultaneously and atomically.
174 * Do this by using ll/sc on the pair of 32-bit words.
177 extern inline int down_trylock(struct semaphore
* sem
)
179 long ret
, tmp
, tmp2
, sub
;
181 /* "Equivalent" C. Note that we have to do this all without
182 (taken) branches in order to be a valid ll/sc sequence.
186 sub = 0x0000000100000000;
187 ret = ((int)tmp <= 0); // count =< 0 ?
188 if ((int)tmp >= 0) sub = 0; // count >= 0 ?
189 // note that if count=0 subq overflows to the high
190 // longword (i.e waking)
191 ret &= ((long)tmp < 0); // waking < 0 ?
201 CHECK_MAGIC(sem
->__magic
);
204 __asm__
__volatile__(
219 ".section .text2,\"ax\"\n"
222 : "=&r"(ret
), "=&r"(tmp
), "=&r"(tmp2
), "=&r"(sub
)
229 extern inline void up(struct semaphore
* sem
)
231 /* Given that we have to use particular hard registers to
232 communicate with __up_wakeup anyway, reuse them in
233 the atomic operation as well.
235 __up_wakeup takes the semaphore address in $24, and
236 it's return address in $28. The pv is loaded as usual.
237 The gp is clobbered (in the module case) as usual. */
239 register void *pv
__asm__("$27");
242 CHECK_MAGIC(sem
->__magic
);
246 __asm__
__volatile__ (
247 "/* semaphore up operation */\n"
257 ".section .text2,\"ax\"\n"
260 " jsr $28,($27),__up_wakeup\n"
265 : "m"(sem
->count
), "r"(pv
)
266 : "$24", "$28", "memory");