Fix atomic operations on PA-RISC 2.0 processors.
[official-gcc.git] / libgcc / config / pa / linux-atomic.c
blob10d7f4217f5bd0a9f39e37d1715b2f1e7fff543b
1 /* Linux-specific atomic operations for PA Linux.
2 Copyright (C) 2008-2022 Free Software Foundation, Inc.
3 Based on code contributed by CodeSourcery for ARM EABI Linux.
4 Modifications for PA Linux by Helge Deller <deller@gmx.de>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
27 #define EFAULT 14
28 #define EBUSY 16
29 #define ENOSYS 251
31 #define _ASM_EFAULT "-14"
33 typedef unsigned char u8;
34 typedef short unsigned int u16;
35 #ifdef __LP64__
36 typedef long unsigned int u64;
37 #else
38 typedef long long unsigned int u64;
39 #endif
41 /* PA-RISC 2.0 supports out-of-order execution for loads and stores.
42 Thus, we need to synchonize memory accesses. For more info, see:
43 "Advanced Performance Features of the 64-bit PA-8000" by Doug Hunt.
45 We implement byte, short and int versions of each atomic operation
46 using the kernel helper defined below. There is no support for
47 64-bit operations yet. */
49 /* Determine kernel LWS function call (0=32-bit, 1=64-bit userspace). */
50 #define LWS_CAS (sizeof(long) == 4 ? 0 : 1)
52 /* Kernel helper for compare-and-exchange a 32-bit value. */
53 static inline long
54 __kernel_cmpxchg (volatile void *mem, int oldval, int newval)
56 register unsigned long lws_mem asm("r26") = (unsigned long) (mem);
57 register int lws_old asm("r25") = oldval;
58 register int lws_new asm("r24") = newval;
59 register long lws_ret asm("r28");
60 register long lws_errno asm("r21");
61 asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
62 "ldi %2, %%r20 \n\t"
63 "cmpiclr,<> " _ASM_EFAULT ", %%r21, %%r0\n\t"
64 "iitlbp %%r0,(%%sr0, %%r0) \n\t"
65 : "=r" (lws_ret), "=r" (lws_errno)
66 : "i" (LWS_CAS), "r" (lws_mem), "r" (lws_old), "r" (lws_new)
67 : "r1", "r20", "r22", "r23", "r29", "r31", "memory"
70 /* If the kernel LWS call succeeded (lws_errno == 0), lws_ret contains
71 the old value from memory. If this value is equal to OLDVAL, the
72 new value was written to memory. If not, return -EBUSY. */
73 if (!lws_errno && lws_ret != oldval)
74 return -EBUSY;
76 return lws_errno;
79 static inline long
80 __kernel_cmpxchg2 (volatile void *mem, const void *oldval, const void *newval,
81 int val_size)
83 register unsigned long lws_mem asm("r26") = (unsigned long) (mem);
84 register unsigned long lws_old asm("r25") = (unsigned long) oldval;
85 register unsigned long lws_new asm("r24") = (unsigned long) newval;
86 register int lws_size asm("r23") = val_size;
87 register long lws_ret asm("r28");
88 register long lws_errno asm("r21");
89 asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
90 "ldi %6, %%r20 \n\t"
91 "cmpiclr,<> " _ASM_EFAULT ", %%r21, %%r0\n\t"
92 "iitlbp %%r0,(%%sr0, %%r0) \n\t"
93 : "=r" (lws_ret), "=r" (lws_errno), "+r" (lws_mem),
94 "+r" (lws_old), "+r" (lws_new), "+r" (lws_size)
95 : "i" (2)
96 : "r1", "r20", "r22", "r29", "r31", "fr4", "memory"
99 /* If the kernel LWS call is successful, lws_ret contains 0. */
100 if (__builtin_expect (lws_ret == 0, 1))
101 return 0;
103 /* If the kernel LWS call fails with no error, return -EBUSY */
104 if (__builtin_expect (!lws_errno, 0))
105 return -EBUSY;
107 return lws_errno;
109 #define HIDDEN __attribute__ ((visibility ("hidden")))
111 /* Big endian masks */
112 #define INVERT_MASK_1 24
113 #define INVERT_MASK_2 16
115 #define MASK_1 0xffu
116 #define MASK_2 0xffffu
118 #define FETCH_AND_OP_2(OP, PFX_OP, INF_OP, TYPE, WIDTH, INDEX) \
119 TYPE HIDDEN \
120 __sync_fetch_and_##OP##_##WIDTH (volatile void *ptr, TYPE val) \
122 TYPE tmp, newval; \
123 long failure; \
125 do { \
126 tmp = __atomic_load_n ((volatile TYPE *)ptr, __ATOMIC_RELAXED); \
127 newval = PFX_OP (tmp INF_OP val); \
128 failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \
129 } while (failure != 0); \
131 return tmp; \
134 FETCH_AND_OP_2 (add, , +, u64, 8, 3)
135 FETCH_AND_OP_2 (sub, , -, u64, 8, 3)
136 FETCH_AND_OP_2 (or, , |, u64, 8, 3)
137 FETCH_AND_OP_2 (and, , &, u64, 8, 3)
138 FETCH_AND_OP_2 (xor, , ^, u64, 8, 3)
139 FETCH_AND_OP_2 (nand, ~, &, u64, 8, 3)
141 FETCH_AND_OP_2 (add, , +, u16, 2, 1)
142 FETCH_AND_OP_2 (sub, , -, u16, 2, 1)
143 FETCH_AND_OP_2 (or, , |, u16, 2, 1)
144 FETCH_AND_OP_2 (and, , &, u16, 2, 1)
145 FETCH_AND_OP_2 (xor, , ^, u16, 2, 1)
146 FETCH_AND_OP_2 (nand, ~, &, u16, 2, 1)
148 FETCH_AND_OP_2 (add, , +, u8, 1, 0)
149 FETCH_AND_OP_2 (sub, , -, u8, 1, 0)
150 FETCH_AND_OP_2 (or, , |, u8, 1, 0)
151 FETCH_AND_OP_2 (and, , &, u8, 1, 0)
152 FETCH_AND_OP_2 (xor, , ^, u8, 1, 0)
153 FETCH_AND_OP_2 (nand, ~, &, u8, 1, 0)
155 #define OP_AND_FETCH_2(OP, PFX_OP, INF_OP, TYPE, WIDTH, INDEX) \
156 TYPE HIDDEN \
157 __sync_##OP##_and_fetch_##WIDTH (volatile void *ptr, TYPE val) \
159 TYPE tmp, newval; \
160 long failure; \
162 do { \
163 tmp = __atomic_load_n ((volatile TYPE *)ptr, __ATOMIC_RELAXED); \
164 newval = PFX_OP (tmp INF_OP val); \
165 failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \
166 } while (failure != 0); \
168 return PFX_OP (tmp INF_OP val); \
171 OP_AND_FETCH_2 (add, , +, u64, 8, 3)
172 OP_AND_FETCH_2 (sub, , -, u64, 8, 3)
173 OP_AND_FETCH_2 (or, , |, u64, 8, 3)
174 OP_AND_FETCH_2 (and, , &, u64, 8, 3)
175 OP_AND_FETCH_2 (xor, , ^, u64, 8, 3)
176 OP_AND_FETCH_2 (nand, ~, &, u64, 8, 3)
178 OP_AND_FETCH_2 (add, , +, u16, 2, 1)
179 OP_AND_FETCH_2 (sub, , -, u16, 2, 1)
180 OP_AND_FETCH_2 (or, , |, u16, 2, 1)
181 OP_AND_FETCH_2 (and, , &, u16, 2, 1)
182 OP_AND_FETCH_2 (xor, , ^, u16, 2, 1)
183 OP_AND_FETCH_2 (nand, ~, &, u16, 2, 1)
185 OP_AND_FETCH_2 (add, , +, u8, 1, 0)
186 OP_AND_FETCH_2 (sub, , -, u8, 1, 0)
187 OP_AND_FETCH_2 (or, , |, u8, 1, 0)
188 OP_AND_FETCH_2 (and, , &, u8, 1, 0)
189 OP_AND_FETCH_2 (xor, , ^, u8, 1, 0)
190 OP_AND_FETCH_2 (nand, ~, &, u8, 1, 0)
192 #define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
193 unsigned int HIDDEN \
194 __sync_fetch_and_##OP##_4 (volatile void *ptr, unsigned int val) \
196 unsigned int tmp; \
197 long failure; \
199 do { \
200 tmp = __atomic_load_n ((volatile unsigned int *)ptr, \
201 __ATOMIC_RELAXED); \
202 failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \
203 } while (failure != 0); \
205 return tmp; \
208 FETCH_AND_OP_WORD (add, , +)
209 FETCH_AND_OP_WORD (sub, , -)
210 FETCH_AND_OP_WORD (or, , |)
211 FETCH_AND_OP_WORD (and, , &)
212 FETCH_AND_OP_WORD (xor, , ^)
213 FETCH_AND_OP_WORD (nand, ~, &)
215 #define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
216 unsigned int HIDDEN \
217 __sync_##OP##_and_fetch_4 (volatile void *ptr, unsigned int val) \
219 unsigned int tmp; \
220 long failure; \
222 do { \
223 tmp = __atomic_load_n ((volatile unsigned int *)ptr, \
224 __ATOMIC_RELAXED); \
225 failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \
226 } while (failure != 0); \
228 return PFX_OP (tmp INF_OP val); \
231 OP_AND_FETCH_WORD (add, , +)
232 OP_AND_FETCH_WORD (sub, , -)
233 OP_AND_FETCH_WORD (or, , |)
234 OP_AND_FETCH_WORD (and, , &)
235 OP_AND_FETCH_WORD (xor, , ^)
236 OP_AND_FETCH_WORD (nand, ~, &)
238 typedef unsigned char bool;
240 #define COMPARE_AND_SWAP_2(TYPE, WIDTH, INDEX) \
241 TYPE HIDDEN \
242 __sync_val_compare_and_swap_##WIDTH (volatile void *ptr, TYPE oldval, \
243 TYPE newval) \
245 TYPE actual_oldval; \
246 long fail; \
248 while (1) \
250 actual_oldval = __atomic_load_n ((volatile TYPE *)ptr, \
251 __ATOMIC_RELAXED); \
253 if (__builtin_expect (oldval != actual_oldval, 0)) \
254 return actual_oldval; \
256 fail = __kernel_cmpxchg2 (ptr, &actual_oldval, &newval, INDEX); \
258 if (__builtin_expect (!fail, 1)) \
259 return actual_oldval; \
263 _Bool HIDDEN \
264 __sync_bool_compare_and_swap_##WIDTH (volatile void *ptr, \
265 TYPE oldval, TYPE newval) \
267 long failure = __kernel_cmpxchg2 (ptr, &oldval, &newval, INDEX); \
268 return (failure == 0); \
271 COMPARE_AND_SWAP_2 (u64, 8, 3)
272 COMPARE_AND_SWAP_2 (u16, 2, 1)
273 COMPARE_AND_SWAP_2 (u8, 1, 0)
275 unsigned int HIDDEN
276 __sync_val_compare_and_swap_4 (volatile void *ptr, unsigned int oldval,
277 unsigned int newval)
279 long fail;
280 unsigned int actual_oldval;
282 while (1)
284 actual_oldval = __atomic_load_n ((volatile unsigned int *)ptr,
285 __ATOMIC_RELAXED);
287 if (__builtin_expect (oldval != actual_oldval, 0))
288 return actual_oldval;
290 fail = __kernel_cmpxchg (ptr, actual_oldval, newval);
292 if (__builtin_expect (!fail, 1))
293 return actual_oldval;
297 _Bool HIDDEN
298 __sync_bool_compare_and_swap_4 (volatile void *ptr, unsigned int oldval,
299 unsigned int newval)
301 long failure = __kernel_cmpxchg (ptr, oldval, newval);
302 return (failure == 0);
305 #define SYNC_LOCK_TEST_AND_SET_2(TYPE, WIDTH, INDEX) \
306 TYPE HIDDEN \
307 __sync_lock_test_and_set_##WIDTH (volatile void *ptr, TYPE val) \
309 TYPE oldval; \
310 long failure; \
312 do { \
313 oldval = __atomic_load_n ((volatile TYPE *)ptr, \
314 __ATOMIC_RELAXED); \
315 failure = __kernel_cmpxchg2 (ptr, &oldval, &val, INDEX); \
316 } while (failure != 0); \
318 return oldval; \
321 SYNC_LOCK_TEST_AND_SET_2 (u64, 8, 3)
322 SYNC_LOCK_TEST_AND_SET_2 (u16, 2, 1)
323 SYNC_LOCK_TEST_AND_SET_2 (u8, 1, 0)
325 unsigned int HIDDEN
326 __sync_lock_test_and_set_4 (volatile void *ptr, unsigned int val)
328 long failure;
329 unsigned int oldval;
331 do {
332 oldval = __atomic_load_n ((volatile unsigned int *)ptr, __ATOMIC_RELAXED);
333 failure = __kernel_cmpxchg (ptr, oldval, val);
334 } while (failure != 0);
336 return oldval;
339 #define SYNC_LOCK_RELEASE_1(TYPE, WIDTH, INDEX) \
340 void HIDDEN \
341 __sync_lock_release_##WIDTH (volatile void *ptr) \
343 TYPE oldval, val = 0; \
344 long failure; \
346 do { \
347 oldval = __atomic_load_n ((volatile TYPE *)ptr, \
348 __ATOMIC_RELAXED); \
349 failure = __kernel_cmpxchg2 (ptr, &oldval, &val, INDEX); \
350 } while (failure != 0); \
353 SYNC_LOCK_RELEASE_1 (u64, 8, 3)
354 SYNC_LOCK_RELEASE_1 (u16, 2, 1)
355 SYNC_LOCK_RELEASE_1 (u8, 1, 0)
357 void HIDDEN
358 __sync_lock_release_4 (volatile void *ptr)
360 long failure;
361 unsigned int oldval;
363 do {
364 oldval = __atomic_load_n ((volatile unsigned int *)ptr, __ATOMIC_RELAXED);
365 failure = __kernel_cmpxchg (ptr, oldval, 0);
366 } while (failure != 0);