License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-2.6/btrfs-unstable.git] / arch / metag / include / asm / spinlock_lnkget.h
blob029935560b7f9d44c14899aebea028f0604a0f58
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_SPINLOCK_LNKGET_H
3 #define __ASM_SPINLOCK_LNKGET_H
5 /*
6 * None of these asm statements clobber memory as LNKSET writes around
7 * the cache so the memory it modifies cannot safely be read by any means
8 * other than these accessors.
9 */
11 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
13 int ret;
15 asm volatile ("LNKGETD %0, [%1]\n"
16 "TST %0, #1\n"
17 "MOV %0, #1\n"
18 "XORZ %0, %0, %0\n"
19 : "=&d" (ret)
20 : "da" (&lock->lock)
21 : "cc");
22 return ret;
25 static inline void arch_spin_lock(arch_spinlock_t *lock)
27 int tmp;
29 asm volatile ("1: LNKGETD %0,[%1]\n"
30 " TST %0, #1\n"
31 " ADD %0, %0, #1\n"
32 " LNKSETDZ [%1], %0\n"
33 " BNZ 1b\n"
34 " DEFR %0, TXSTAT\n"
35 " ANDT %0, %0, #HI(0x3f000000)\n"
36 " CMPT %0, #HI(0x02000000)\n"
37 " BNZ 1b\n"
38 : "=&d" (tmp)
39 : "da" (&lock->lock)
40 : "cc");
42 smp_mb();
45 /* Returns 0 if failed to acquire lock */
46 static inline int arch_spin_trylock(arch_spinlock_t *lock)
48 int tmp;
50 asm volatile (" LNKGETD %0,[%1]\n"
51 " TST %0, #1\n"
52 " ADD %0, %0, #1\n"
53 " LNKSETDZ [%1], %0\n"
54 " BNZ 1f\n"
55 " DEFR %0, TXSTAT\n"
56 " ANDT %0, %0, #HI(0x3f000000)\n"
57 " CMPT %0, #HI(0x02000000)\n"
58 " MOV %0, #1\n"
59 "1: XORNZ %0, %0, %0\n"
60 : "=&d" (tmp)
61 : "da" (&lock->lock)
62 : "cc");
64 smp_mb();
66 return tmp;
69 static inline void arch_spin_unlock(arch_spinlock_t *lock)
71 smp_mb();
73 asm volatile (" SETD [%0], %1\n"
75 : "da" (&lock->lock), "da" (0)
76 : "memory");
80 * RWLOCKS
83 * Write locks are easy - we just set bit 31. When unlocking, we can
84 * just write zero since the lock is exclusively held.
87 static inline void arch_write_lock(arch_rwlock_t *rw)
89 int tmp;
91 asm volatile ("1: LNKGETD %0,[%1]\n"
92 " CMP %0, #0\n"
93 " ADD %0, %0, %2\n"
94 " LNKSETDZ [%1], %0\n"
95 " BNZ 1b\n"
96 " DEFR %0, TXSTAT\n"
97 " ANDT %0, %0, #HI(0x3f000000)\n"
98 " CMPT %0, #HI(0x02000000)\n"
99 " BNZ 1b\n"
100 : "=&d" (tmp)
101 : "da" (&rw->lock), "bd" (0x80000000)
102 : "cc");
104 smp_mb();
107 static inline int arch_write_trylock(arch_rwlock_t *rw)
109 int tmp;
111 asm volatile (" LNKGETD %0,[%1]\n"
112 " CMP %0, #0\n"
113 " ADD %0, %0, %2\n"
114 " LNKSETDZ [%1], %0\n"
115 " BNZ 1f\n"
116 " DEFR %0, TXSTAT\n"
117 " ANDT %0, %0, #HI(0x3f000000)\n"
118 " CMPT %0, #HI(0x02000000)\n"
119 " MOV %0,#1\n"
120 "1: XORNZ %0, %0, %0\n"
121 : "=&d" (tmp)
122 : "da" (&rw->lock), "bd" (0x80000000)
123 : "cc");
125 smp_mb();
127 return tmp;
130 static inline void arch_write_unlock(arch_rwlock_t *rw)
132 smp_mb();
134 asm volatile (" SETD [%0], %1\n"
136 : "da" (&rw->lock), "da" (0)
137 : "memory");
140 /* write_can_lock - would write_trylock() succeed? */
141 static inline int arch_write_can_lock(arch_rwlock_t *rw)
143 int ret;
145 asm volatile ("LNKGETD %0, [%1]\n"
146 "CMP %0, #0\n"
147 "MOV %0, #1\n"
148 "XORNZ %0, %0, %0\n"
149 : "=&d" (ret)
150 : "da" (&rw->lock)
151 : "cc");
152 return ret;
156 * Read locks are a bit more hairy:
157 * - Exclusively load the lock value.
158 * - Increment it.
159 * - Store new lock value if positive, and we still own this location.
160 * If the value is negative, we've already failed.
161 * - If we failed to store the value, we want a negative result.
162 * - If we failed, try again.
163 * Unlocking is similarly hairy. We may have multiple read locks
164 * currently active. However, we know we won't have any write
165 * locks.
167 static inline void arch_read_lock(arch_rwlock_t *rw)
169 int tmp;
171 asm volatile ("1: LNKGETD %0,[%1]\n"
172 " ADDS %0, %0, #1\n"
173 " LNKSETDPL [%1], %0\n"
174 " BMI 1b\n"
175 " DEFR %0, TXSTAT\n"
176 " ANDT %0, %0, #HI(0x3f000000)\n"
177 " CMPT %0, #HI(0x02000000)\n"
178 " BNZ 1b\n"
179 : "=&d" (tmp)
180 : "da" (&rw->lock)
181 : "cc");
183 smp_mb();
186 static inline void arch_read_unlock(arch_rwlock_t *rw)
188 int tmp;
190 smp_mb();
192 asm volatile ("1: LNKGETD %0,[%1]\n"
193 " SUB %0, %0, #1\n"
194 " LNKSETD [%1], %0\n"
195 " DEFR %0, TXSTAT\n"
196 " ANDT %0, %0, #HI(0x3f000000)\n"
197 " CMPT %0, #HI(0x02000000)\n"
198 " BNZ 1b\n"
199 : "=&d" (tmp)
200 : "da" (&rw->lock)
201 : "cc", "memory");
204 static inline int arch_read_trylock(arch_rwlock_t *rw)
206 int tmp;
208 asm volatile (" LNKGETD %0,[%1]\n"
209 " ADDS %0, %0, #1\n"
210 " LNKSETDPL [%1], %0\n"
211 " BMI 1f\n"
212 " DEFR %0, TXSTAT\n"
213 " ANDT %0, %0, #HI(0x3f000000)\n"
214 " CMPT %0, #HI(0x02000000)\n"
215 " MOV %0,#1\n"
216 " BZ 2f\n"
217 "1: MOV %0,#0\n"
218 "2:\n"
219 : "=&d" (tmp)
220 : "da" (&rw->lock)
221 : "cc");
223 smp_mb();
225 return tmp;
228 /* read_can_lock - would read_trylock() succeed? */
229 static inline int arch_read_can_lock(arch_rwlock_t *rw)
231 int tmp;
233 asm volatile ("LNKGETD %0, [%1]\n"
234 "CMP %0, %2\n"
235 "MOV %0, #1\n"
236 "XORZ %0, %0, %0\n"
237 : "=&d" (tmp)
238 : "da" (&rw->lock), "bd" (0x80000000)
239 : "cc");
240 return tmp;
243 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
244 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
246 #define arch_spin_relax(lock) cpu_relax()
247 #define arch_read_relax(lock) cpu_relax()
248 #define arch_write_relax(lock) cpu_relax()
250 #endif /* __ASM_SPINLOCK_LNKGET_H */