1 #ifndef _ALPHA_SEMAPHORE_H
2 #define _ALPHA_SEMAPHORE_H
5 * SMP- and interrupt-safe semaphores..
7 * (C) Copyright 1996 Linus Torvalds
10 #include <asm/current.h>
11 #include <asm/system.h>
12 #include <asm/atomic.h>
15 * Semaphores are recursive: we allow the holder process to recursively do
16 * down() operations on a semaphore that the process already owns. In order
17 * to do that, we need to keep a semaphore-local copy of the owner and the
18 * "depth of ownership".
20 * NOTE! Nasty memory ordering rules:
21 * - "owner" and "owner_count" may only be modified once you hold the lock.
22 * - "owner_count" must be written _after_ modifying owner, and must be
23 * read _before_ reading owner. There must be appropriate write and read
24 * barriers to enforce this.
30 struct task_struct
*owner
;
32 struct wait_queue
* wait
;
35 #define MUTEX ((struct semaphore) \
36 { ATOMIC_INIT(1), ATOMIC_INIT(0), NULL, 0, NULL })
37 #define MUTEX_LOCKED ((struct semaphore) \
38 { ATOMIC_INIT(0), ATOMIC_INIT(0), NULL, 1, NULL })
40 #define semaphore_owner(sem) ((sem)->owner)
41 #define sema_init(sem, val) atomic_set(&((sem)->count), val)
43 extern void __down(struct semaphore
* sem
);
44 extern int __down_interruptible(struct semaphore
* sem
);
45 extern void __up(struct semaphore
* sem
);
47 /* All three have custom assembly linkages. */
48 extern void __down_failed(struct semaphore
* sem
);
49 extern void __down_failed_interruptible(struct semaphore
* sem
);
50 extern void __up_wakeup(struct semaphore
* sem
);
54 * These two _must_ execute atomically wrt each other.
56 * This is trivially done with load_locked/store_cond,
57 * which we have. Let the rest of the losers suck eggs.
61 * (1) One task does two downs, no other contention
63 * count = 1, waking = 0, depth = undef;
65 * count = 0, waking = 0, depth = 1;
67 * atomic dec and test sends us to waking_non_zero via __down
68 * count = -1, waking = 0;
69 * conditional atomic dec on waking discovers no free slots
70 * count = -1, waking = 0;
71 * test for owner succeeeds and we return ok.
72 * count = -1, waking = 0, depth = 2;
75 * count = -1, waking = 0, depth = 1;
76 * atomic inc and test sends us to slow path
77 * count = 0, waking = 0, depth = 1;
78 * notice !(depth < 0) and don't call __up.
81 * count = 0, waking = 0, depth = 0;
82 * atomic inc and test succeeds.
83 * count = 1, waking = 0, depth = 0;
86 static inline void wake_one_more(struct semaphore
* sem
)
88 atomic_inc(&sem
->waking
);
91 static inline int waking_non_zero(struct semaphore
*sem
,
92 struct task_struct
*tsk
)
97 owner_depth
= sem
->owner_depth
;
99 /* Atomic decrement, iff the value is > 0. */
100 __asm__
__volatile__(
107 ".section .text2,\"ax\"\n"
110 : "=r"(ret
), "=r"(tmp
), "=m"(__atomic_fool_gcc(&sem
->waking
))
113 ret
|= ((owner_depth
!= 0) & (sem
->owner
== tsk
));
117 /* Don't use the old value, which is stale in the
126 * Whee. Hidden out of line code is fun. The contention cases are
127 * handled out of line in kernel/sched.c; arch/alpha/lib/semaphore.S
128 * takes care of making sure we can call it without clobbering regs.
131 extern inline void down(struct semaphore
* sem
)
133 /* Given that we have to use particular hard registers to
134 communicate with __down_failed anyway, reuse them in
135 the atomic operation as well.
137 __down_failed takes the semaphore address in $24, and
138 it's return address in $28. The pv is loaded as usual.
139 The gp is clobbered (in the module case) as usual. */
141 __asm__
__volatile__ (
142 "/* semaphore down operation */\n"
149 /* Got the semaphore no contention. Set owner and depth. */
155 ".section .text2,\"ax\"\n"
158 " jsr $28,__down_failed\n"
162 : "=m"(sem
->count
), "=m"(sem
->owner
), "=m"(sem
->owner_depth
)
164 : "$24", "$27", "$28", "memory");
167 extern inline int down_interruptible(struct semaphore
* sem
)
169 /* __down_failed_interruptible takes the semaphore address in $24,
170 and it's return address in $28. The pv is loaded as usual.
171 The gp is clobbered (in the module case) as usual. The return
174 register int ret
__asm__("$24");
176 __asm__
__volatile__ (
177 "/* semaphore down interruptible operation */\n"
184 /* Got the semaphore no contention. Set owner and depth. */
191 ".section .text2,\"ax\"\n"
194 " jsr $28,__down_failed_interruptible\n"
198 : "=r"(ret
), "=m"(sem
->count
), "=m"(sem
->owner
),
199 "=m"(sem
->owner_depth
)
201 : "$27", "$28", "memory");
206 extern inline void up(struct semaphore
* sem
)
208 /* Given that we have to use particular hard registers to
209 communicate with __up_wakeup anyway, reuse them in
210 the atomic operation as well.
212 __up_wakeup takes the semaphore address in $24, and
213 it's return address in $28. The pv is loaded as usual.
214 The gp is clobbered (in the module case) as usual. */
216 __asm__
__volatile__ (
217 "/* semaphore up operation */\n"
227 ".section .text2,\"ax\"\n"
231 " jsr $28,__up_wakeup\n"
236 : "m"(sem
->count
), "r"(--sem
->owner_depth
)
237 : "$24", "$27", "$28", "memory");