1 /* spinlock.h: 64-bit Sparc spinlock support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 #ifndef __SPARC64_SPINLOCK_H
7 #define __SPARC64_SPINLOCK_H
11 /* To get debugging spinlocks which detect and catch
12 * deadlock situations, set CONFIG_DEBUG_SPINLOCK
13 * and rebuild your kernel.
16 /* Because we play games to save cycles in the non-contention case, we
17 * need to be extra careful about branch targets into the "spinning"
18 * code. They live in their own section, but the newer V9 branches
19 * have a shorter range than the traditional 32-bit sparc branch
20 * variants. The rule is that the branches that go into and out of
21 * the spinner sections must be pre-V9 branches.
24 #define arch_spin_is_locked(lp) ((lp)->lock != 0)
26 #define arch_spin_unlock_wait(lp) \
30 static inline void arch_spin_lock(arch_spinlock_t
*lock
)
35 "1: ldstub [%1], %0\n"
42 " ba,a,pt %%xcc, 1b\n"
49 static inline int arch_spin_trylock(arch_spinlock_t
*lock
)
59 return (result
== 0UL);
62 static inline void arch_spin_unlock(arch_spinlock_t
*lock
)
71 static inline void arch_spin_lock_flags(arch_spinlock_t
*lock
, unsigned long flags
)
73 unsigned long tmp1
, tmp2
;
76 "1: ldstub [%2], %0\n"
88 : "=&r" (tmp1
), "=&r" (tmp2
)
89 : "r"(lock
), "r"(flags
)
93 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
95 static void inline arch_read_lock(arch_rwlock_t
*lock
)
97 unsigned long tmp1
, tmp2
;
99 __asm__
__volatile__ (
103 " cas [%2], %0, %1\n"
105 " bne,pn %%icc, 1b\n"
111 " ba,a,pt %%xcc, 4b\n"
113 : "=&r" (tmp1
), "=&r" (tmp2
)
118 static int inline arch_read_trylock(arch_rwlock_t
*lock
)
122 __asm__
__volatile__ (
124 " brlz,a,pn %0, 2f\n"
127 " cas [%2], %0, %1\n"
129 " bne,pn %%icc, 1b\n"
132 : "=&r" (tmp1
), "=&r" (tmp2
)
139 static void inline arch_read_unlock(arch_rwlock_t
*lock
)
141 unsigned long tmp1
, tmp2
;
143 __asm__
__volatile__(
146 " cas [%2], %0, %1\n"
148 " bne,pn %%xcc, 1b\n"
150 : "=&r" (tmp1
), "=&r" (tmp2
)
155 static void inline arch_write_lock(arch_rwlock_t
*lock
)
157 unsigned long mask
, tmp1
, tmp2
;
161 __asm__
__volatile__(
165 " cas [%2], %0, %1\n"
167 " bne,pn %%icc, 1b\n"
173 " ba,a,pt %%xcc, 4b\n"
175 : "=&r" (tmp1
), "=&r" (tmp2
)
176 : "r" (lock
), "r" (mask
)
180 static void inline arch_write_unlock(arch_rwlock_t
*lock
)
182 __asm__
__volatile__(
189 static int inline arch_write_trylock(arch_rwlock_t
*lock
)
191 unsigned long mask
, tmp1
, tmp2
, result
;
195 __asm__
__volatile__(
200 " cas [%3], %0, %1\n"
202 " bne,pn %%icc, 1b\n"
206 : "=&r" (tmp1
), "=&r" (tmp2
), "=&r" (result
)
207 : "r" (lock
), "r" (mask
)
213 #define arch_read_lock(p) arch_read_lock(p)
214 #define arch_read_lock_flags(p, f) arch_read_lock(p)
215 #define arch_read_trylock(p) arch_read_trylock(p)
216 #define arch_read_unlock(p) arch_read_unlock(p)
217 #define arch_write_lock(p) arch_write_lock(p)
218 #define arch_write_lock_flags(p, f) arch_write_lock(p)
219 #define arch_write_unlock(p) arch_write_unlock(p)
220 #define arch_write_trylock(p) arch_write_trylock(p)
222 #define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
223 #define arch_write_can_lock(rw) (!(rw)->lock)
225 #define arch_spin_relax(lock) cpu_relax()
226 #define arch_read_relax(lock) cpu_relax()
227 #define arch_write_relax(lock) cpu_relax()
229 #endif /* !(__ASSEMBLY__) */
231 #endif /* !(__SPARC64_SPINLOCK_H) */