1 /* Linux-specific atomic operations for PA Linux.
2 Copyright (C) 2008-2022 Free Software Foundation, Inc.
3 Based on code contributed by CodeSourcery for ARM EABI Linux.
4 Modifications for PA Linux by Helge Deller <deller@gmx.de>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
31 #define _ASM_EFAULT "-14"
33 typedef unsigned char u8
;
34 typedef short unsigned int u16
;
36 typedef long unsigned int u64
;
38 typedef long long unsigned int u64
;
41 /* PA-RISC 2.0 supports out-of-order execution for loads and stores.
42 Thus, we need to synchonize memory accesses. For more info, see:
43 "Advanced Performance Features of the 64-bit PA-8000" by Doug Hunt.
45 We implement byte, short and int versions of each atomic operation
46 using the kernel helper defined below. There is no support for
47 64-bit operations yet. */
49 /* Determine kernel LWS function call (0=32-bit, 1=64-bit userspace). */
50 #define LWS_CAS (sizeof(long) == 4 ? 0 : 1)
52 /* Kernel helper for compare-and-exchange a 32-bit value. */
54 __kernel_cmpxchg (volatile void *mem
, int oldval
, int newval
)
56 register unsigned long lws_mem
asm("r26") = (unsigned long) (mem
);
57 register int lws_old
asm("r25") = oldval
;
58 register int lws_new
asm("r24") = newval
;
59 register long lws_ret
asm("r28");
60 register long lws_errno
asm("r21");
61 asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
63 "cmpiclr,<> " _ASM_EFAULT
", %%r21, %%r0\n\t"
64 "iitlbp %%r0,(%%sr0, %%r0) \n\t"
65 : "=r" (lws_ret
), "=r" (lws_errno
)
66 : "i" (LWS_CAS
), "r" (lws_mem
), "r" (lws_old
), "r" (lws_new
)
67 : "r1", "r20", "r22", "r23", "r29", "r31", "memory"
70 /* If the kernel LWS call succeeded (lws_errno == 0), lws_ret contains
71 the old value from memory. If this value is equal to OLDVAL, the
72 new value was written to memory. If not, return -EBUSY. */
73 if (!lws_errno
&& lws_ret
!= oldval
)
80 __kernel_cmpxchg2 (volatile void *mem
, const void *oldval
, const void *newval
,
83 register unsigned long lws_mem
asm("r26") = (unsigned long) (mem
);
84 register unsigned long lws_old
asm("r25") = (unsigned long) oldval
;
85 register unsigned long lws_new
asm("r24") = (unsigned long) newval
;
86 register int lws_size
asm("r23") = val_size
;
87 register long lws_ret
asm("r28");
88 register long lws_errno
asm("r21");
89 asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
91 "cmpiclr,<> " _ASM_EFAULT
", %%r21, %%r0\n\t"
92 "iitlbp %%r0,(%%sr0, %%r0) \n\t"
93 : "=r" (lws_ret
), "=r" (lws_errno
), "+r" (lws_mem
),
94 "+r" (lws_old
), "+r" (lws_new
), "+r" (lws_size
)
96 : "r1", "r20", "r22", "r29", "r31", "fr4", "memory"
99 /* If the kernel LWS call is successful, lws_ret contains 0. */
100 if (__builtin_expect (lws_ret
== 0, 1))
103 /* If the kernel LWS call fails with no error, return -EBUSY */
104 if (__builtin_expect (!lws_errno
, 0))
109 #define HIDDEN __attribute__ ((visibility ("hidden")))
111 /* Big endian masks */
112 #define INVERT_MASK_1 24
113 #define INVERT_MASK_2 16
116 #define MASK_2 0xffffu
118 #define FETCH_AND_OP_2(OP, PFX_OP, INF_OP, TYPE, WIDTH, INDEX) \
120 __sync_fetch_and_##OP##_##WIDTH (volatile void *ptr, TYPE val) \
126 tmp = __atomic_load_n ((volatile TYPE *)ptr, __ATOMIC_RELAXED); \
127 newval = PFX_OP (tmp INF_OP val); \
128 failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \
129 } while (failure != 0); \
134 FETCH_AND_OP_2 (add
, , +, u64
, 8, 3)
135 FETCH_AND_OP_2 (sub
, , -, u64
, 8, 3)
136 FETCH_AND_OP_2 (or, , |, u64
, 8, 3)
137 FETCH_AND_OP_2 (and, , &, u64
, 8, 3)
138 FETCH_AND_OP_2 (xor, , ^, u64
, 8, 3)
139 FETCH_AND_OP_2 (nand
, ~, &, u64
, 8, 3)
141 FETCH_AND_OP_2 (add
, , +, u16
, 2, 1)
142 FETCH_AND_OP_2 (sub
, , -, u16
, 2, 1)
143 FETCH_AND_OP_2 (or, , |, u16
, 2, 1)
144 FETCH_AND_OP_2 (and, , &, u16
, 2, 1)
145 FETCH_AND_OP_2 (xor, , ^, u16
, 2, 1)
146 FETCH_AND_OP_2 (nand
, ~, &, u16
, 2, 1)
148 FETCH_AND_OP_2 (add
, , +, u8
, 1, 0)
149 FETCH_AND_OP_2 (sub
, , -, u8
, 1, 0)
150 FETCH_AND_OP_2 (or, , |, u8
, 1, 0)
151 FETCH_AND_OP_2 (and, , &, u8
, 1, 0)
152 FETCH_AND_OP_2 (xor, , ^, u8
, 1, 0)
153 FETCH_AND_OP_2 (nand
, ~, &, u8
, 1, 0)
155 #define OP_AND_FETCH_2(OP, PFX_OP, INF_OP, TYPE, WIDTH, INDEX) \
157 __sync_##OP##_and_fetch_##WIDTH (volatile void *ptr, TYPE val) \
163 tmp = __atomic_load_n ((volatile TYPE *)ptr, __ATOMIC_RELAXED); \
164 newval = PFX_OP (tmp INF_OP val); \
165 failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \
166 } while (failure != 0); \
168 return PFX_OP (tmp INF_OP val); \
171 OP_AND_FETCH_2 (add
, , +, u64
, 8, 3)
172 OP_AND_FETCH_2 (sub
, , -, u64
, 8, 3)
173 OP_AND_FETCH_2 (or, , |, u64
, 8, 3)
174 OP_AND_FETCH_2 (and, , &, u64
, 8, 3)
175 OP_AND_FETCH_2 (xor, , ^, u64
, 8, 3)
176 OP_AND_FETCH_2 (nand
, ~, &, u64
, 8, 3)
178 OP_AND_FETCH_2 (add
, , +, u16
, 2, 1)
179 OP_AND_FETCH_2 (sub
, , -, u16
, 2, 1)
180 OP_AND_FETCH_2 (or, , |, u16
, 2, 1)
181 OP_AND_FETCH_2 (and, , &, u16
, 2, 1)
182 OP_AND_FETCH_2 (xor, , ^, u16
, 2, 1)
183 OP_AND_FETCH_2 (nand
, ~, &, u16
, 2, 1)
185 OP_AND_FETCH_2 (add
, , +, u8
, 1, 0)
186 OP_AND_FETCH_2 (sub
, , -, u8
, 1, 0)
187 OP_AND_FETCH_2 (or, , |, u8
, 1, 0)
188 OP_AND_FETCH_2 (and, , &, u8
, 1, 0)
189 OP_AND_FETCH_2 (xor, , ^, u8
, 1, 0)
190 OP_AND_FETCH_2 (nand
, ~, &, u8
, 1, 0)
192 #define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
193 unsigned int HIDDEN \
194 __sync_fetch_and_##OP##_4 (volatile void *ptr, unsigned int val) \
200 tmp = __atomic_load_n ((volatile unsigned int *)ptr, \
202 failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \
203 } while (failure != 0); \
208 FETCH_AND_OP_WORD (add
, , +)
209 FETCH_AND_OP_WORD (sub
, , -)
210 FETCH_AND_OP_WORD (or, , |)
211 FETCH_AND_OP_WORD (and, , &)
212 FETCH_AND_OP_WORD (xor, , ^)
213 FETCH_AND_OP_WORD (nand
, ~, &)
215 #define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
216 unsigned int HIDDEN \
217 __sync_##OP##_and_fetch_4 (volatile void *ptr, unsigned int val) \
223 tmp = __atomic_load_n ((volatile unsigned int *)ptr, \
225 failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \
226 } while (failure != 0); \
228 return PFX_OP (tmp INF_OP val); \
231 OP_AND_FETCH_WORD (add
, , +)
232 OP_AND_FETCH_WORD (sub
, , -)
233 OP_AND_FETCH_WORD (or, , |)
234 OP_AND_FETCH_WORD (and, , &)
235 OP_AND_FETCH_WORD (xor, , ^)
236 OP_AND_FETCH_WORD (nand
, ~, &)
238 typedef unsigned char bool;
240 #define COMPARE_AND_SWAP_2(TYPE, WIDTH, INDEX) \
242 __sync_val_compare_and_swap_##WIDTH (volatile void *ptr, TYPE oldval, \
245 TYPE actual_oldval; \
250 actual_oldval = __atomic_load_n ((volatile TYPE *)ptr, \
253 if (__builtin_expect (oldval != actual_oldval, 0)) \
254 return actual_oldval; \
256 fail = __kernel_cmpxchg2 (ptr, &actual_oldval, &newval, INDEX); \
258 if (__builtin_expect (!fail, 1)) \
259 return actual_oldval; \
264 __sync_bool_compare_and_swap_##WIDTH (volatile void *ptr, \
265 TYPE oldval, TYPE newval) \
267 long failure = __kernel_cmpxchg2 (ptr, &oldval, &newval, INDEX); \
268 return (failure == 0); \
271 COMPARE_AND_SWAP_2 (u64
, 8, 3)
272 COMPARE_AND_SWAP_2 (u16
, 2, 1)
273 COMPARE_AND_SWAP_2 (u8
, 1, 0)
276 __sync_val_compare_and_swap_4 (volatile void *ptr
, unsigned int oldval
,
280 unsigned int actual_oldval
;
284 actual_oldval
= __atomic_load_n ((volatile unsigned int *)ptr
,
287 if (__builtin_expect (oldval
!= actual_oldval
, 0))
288 return actual_oldval
;
290 fail
= __kernel_cmpxchg (ptr
, actual_oldval
, newval
);
292 if (__builtin_expect (!fail
, 1))
293 return actual_oldval
;
298 __sync_bool_compare_and_swap_4 (volatile void *ptr
, unsigned int oldval
,
301 long failure
= __kernel_cmpxchg (ptr
, oldval
, newval
);
302 return (failure
== 0);
305 #define SYNC_LOCK_TEST_AND_SET_2(TYPE, WIDTH, INDEX) \
307 __sync_lock_test_and_set_##WIDTH (volatile void *ptr, TYPE val) \
313 oldval = __atomic_load_n ((volatile TYPE *)ptr, \
315 failure = __kernel_cmpxchg2 (ptr, &oldval, &val, INDEX); \
316 } while (failure != 0); \
321 SYNC_LOCK_TEST_AND_SET_2 (u64
, 8, 3)
322 SYNC_LOCK_TEST_AND_SET_2 (u16
, 2, 1)
323 SYNC_LOCK_TEST_AND_SET_2 (u8
, 1, 0)
326 __sync_lock_test_and_set_4 (volatile void *ptr
, unsigned int val
)
332 oldval
= __atomic_load_n ((volatile unsigned int *)ptr
, __ATOMIC_RELAXED
);
333 failure
= __kernel_cmpxchg (ptr
, oldval
, val
);
334 } while (failure
!= 0);
339 #define SYNC_LOCK_RELEASE_1(TYPE, WIDTH, INDEX) \
341 __sync_lock_release_##WIDTH (volatile void *ptr) \
343 TYPE oldval, val = 0; \
347 oldval = __atomic_load_n ((volatile TYPE *)ptr, \
349 failure = __kernel_cmpxchg2 (ptr, &oldval, &val, INDEX); \
350 } while (failure != 0); \
353 SYNC_LOCK_RELEASE_1 (u64
, 8, 3)
354 SYNC_LOCK_RELEASE_1 (u16
, 2, 1)
355 SYNC_LOCK_RELEASE_1 (u8
, 1, 0)
358 __sync_lock_release_4 (volatile void *ptr
)
364 oldval
= __atomic_load_n ((volatile unsigned int *)ptr
, __ATOMIC_RELAXED
);
365 failure
= __kernel_cmpxchg (ptr
, oldval
, 0);
366 } while (failure
!= 0);