1 /* Linux-specific atomic operations for PA Linux.
2 Copyright (C) 2008-2018 Free Software Foundation, Inc.
3 Based on code contributed by CodeSourcery for ARM EABI Linux.
4 Modifications for PA Linux by Helge Deller <deller@gmx.de>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
31 /* All PA-RISC implementations supported by linux have strongly
32 ordered loads and stores. Only cache flushes and purges can be
33 delayed. The data cache implementations are all globally
34 coherent. Thus, there is no need to synchonize memory accesses.
36 GCC automatically issues a asm memory barrier when it encounters
37 a __sync_synchronize builtin. Thus, we do not need to define this
40 We implement byte, short and int versions of each atomic operation
41 using the kernel helper defined below. There is no support for
42 64-bit operations yet. */
44 /* Determine kernel LWS function call (0=32-bit, 1=64-bit userspace). */
45 #define LWS_CAS (sizeof(long) == 4 ? 0 : 1)
47 /* Kernel helper for compare-and-exchange a 32-bit value. */
49 __kernel_cmpxchg (int *mem
, int oldval
, int newval
)
51 register unsigned long lws_mem
asm("r26") = (unsigned long) (mem
);
52 register int lws_old
asm("r25") = oldval
;
53 register int lws_new
asm("r24") = newval
;
54 register long lws_ret
asm("r28");
55 register long lws_errno
asm("r21");
56 asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
58 : "=r" (lws_ret
), "=r" (lws_errno
)
59 : "i" (LWS_CAS
), "r" (lws_mem
), "r" (lws_old
), "r" (lws_new
)
60 : "r1", "r20", "r22", "r23", "r29", "r31", "memory"
62 if (__builtin_expect (lws_errno
== -EFAULT
|| lws_errno
== -ENOSYS
, 0))
65 /* If the kernel LWS call succeeded (lws_errno == 0), lws_ret contains
66 the old value from memory. If this value is equal to OLDVAL, the
67 new value was written to memory. If not, return -EBUSY. */
68 if (!lws_errno
&& lws_ret
!= oldval
)
75 __kernel_cmpxchg2 (void *mem
, const void *oldval
, const void *newval
,
78 register unsigned long lws_mem
asm("r26") = (unsigned long) (mem
);
79 register unsigned long lws_old
asm("r25") = (unsigned long) oldval
;
80 register unsigned long lws_new
asm("r24") = (unsigned long) newval
;
81 register int lws_size
asm("r23") = val_size
;
82 register long lws_ret
asm("r28");
83 register long lws_errno
asm("r21");
84 asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
86 : "=r" (lws_ret
), "=r" (lws_errno
), "+r" (lws_mem
),
87 "+r" (lws_old
), "+r" (lws_new
), "+r" (lws_size
)
89 : "r1", "r20", "r22", "r29", "r31", "fr4", "memory"
92 /* If the kernel LWS call is successful, lws_ret contains 0. */
93 if (__builtin_expect (lws_ret
== 0, 1))
96 if (__builtin_expect (lws_errno
== -EFAULT
|| lws_errno
== -ENOSYS
, 0))
99 /* If the kernel LWS call fails with no error, return -EBUSY */
100 if (__builtin_expect (!lws_errno
, 0))
105 #define HIDDEN __attribute__ ((visibility ("hidden")))
107 /* Big endian masks */
108 #define INVERT_MASK_1 24
109 #define INVERT_MASK_2 16
112 #define MASK_2 0xffffu
114 #define FETCH_AND_OP_2(OP, PFX_OP, INF_OP, TYPE, WIDTH, INDEX) \
116 __sync_fetch_and_##OP##_##WIDTH (TYPE *ptr, TYPE val) \
122 tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
123 newval = PFX_OP (tmp INF_OP val); \
124 failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \
125 } while (failure != 0); \
130 FETCH_AND_OP_2 (add
, , +, long long, 8, 3)
131 FETCH_AND_OP_2 (sub
, , -, long long, 8, 3)
132 FETCH_AND_OP_2 (or, , |, long long, 8, 3)
133 FETCH_AND_OP_2 (and, , &, long long, 8, 3)
134 FETCH_AND_OP_2 (xor, , ^, long long, 8, 3)
135 FETCH_AND_OP_2 (nand
, ~, &, long long, 8, 3)
137 FETCH_AND_OP_2 (add
, , +, short, 2, 1)
138 FETCH_AND_OP_2 (sub
, , -, short, 2, 1)
139 FETCH_AND_OP_2 (or, , |, short, 2, 1)
140 FETCH_AND_OP_2 (and, , &, short, 2, 1)
141 FETCH_AND_OP_2 (xor, , ^, short, 2, 1)
142 FETCH_AND_OP_2 (nand
, ~, &, short, 2, 1)
144 FETCH_AND_OP_2 (add
, , +, signed char, 1, 0)
145 FETCH_AND_OP_2 (sub
, , -, signed char, 1, 0)
146 FETCH_AND_OP_2 (or, , |, signed char, 1, 0)
147 FETCH_AND_OP_2 (and, , &, signed char, 1, 0)
148 FETCH_AND_OP_2 (xor, , ^, signed char, 1, 0)
149 FETCH_AND_OP_2 (nand
, ~, &, signed char, 1, 0)
151 #define OP_AND_FETCH_2(OP, PFX_OP, INF_OP, TYPE, WIDTH, INDEX) \
153 __sync_##OP##_and_fetch_##WIDTH (TYPE *ptr, TYPE val) \
159 tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
160 newval = PFX_OP (tmp INF_OP val); \
161 failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \
162 } while (failure != 0); \
164 return PFX_OP (tmp INF_OP val); \
167 OP_AND_FETCH_2 (add
, , +, long long, 8, 3)
168 OP_AND_FETCH_2 (sub
, , -, long long, 8, 3)
169 OP_AND_FETCH_2 (or, , |, long long, 8, 3)
170 OP_AND_FETCH_2 (and, , &, long long, 8, 3)
171 OP_AND_FETCH_2 (xor, , ^, long long, 8, 3)
172 OP_AND_FETCH_2 (nand
, ~, &, long long, 8, 3)
174 OP_AND_FETCH_2 (add
, , +, short, 2, 1)
175 OP_AND_FETCH_2 (sub
, , -, short, 2, 1)
176 OP_AND_FETCH_2 (or, , |, short, 2, 1)
177 OP_AND_FETCH_2 (and, , &, short, 2, 1)
178 OP_AND_FETCH_2 (xor, , ^, short, 2, 1)
179 OP_AND_FETCH_2 (nand
, ~, &, short, 2, 1)
181 OP_AND_FETCH_2 (add
, , +, signed char, 1, 0)
182 OP_AND_FETCH_2 (sub
, , -, signed char, 1, 0)
183 OP_AND_FETCH_2 (or, , |, signed char, 1, 0)
184 OP_AND_FETCH_2 (and, , &, signed char, 1, 0)
185 OP_AND_FETCH_2 (xor, , ^, signed char, 1, 0)
186 OP_AND_FETCH_2 (nand
, ~, &, signed char, 1, 0)
188 #define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
190 __sync_fetch_and_##OP##_4 (int *ptr, int val) \
196 tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
197 failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \
198 } while (failure != 0); \
203 FETCH_AND_OP_WORD (add
, , +)
204 FETCH_AND_OP_WORD (sub
, , -)
205 FETCH_AND_OP_WORD (or, , |)
206 FETCH_AND_OP_WORD (and, , &)
207 FETCH_AND_OP_WORD (xor, , ^)
208 FETCH_AND_OP_WORD (nand
, ~, &)
210 #define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
212 __sync_##OP##_and_fetch_4 (int *ptr, int val) \
218 tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
219 failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \
220 } while (failure != 0); \
222 return PFX_OP (tmp INF_OP val); \
225 OP_AND_FETCH_WORD (add
, , +)
226 OP_AND_FETCH_WORD (sub
, , -)
227 OP_AND_FETCH_WORD (or, , |)
228 OP_AND_FETCH_WORD (and, , &)
229 OP_AND_FETCH_WORD (xor, , ^)
230 OP_AND_FETCH_WORD (nand
, ~, &)
232 typedef unsigned char bool;
234 #define COMPARE_AND_SWAP_2(TYPE, WIDTH, INDEX) \
236 __sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
239 TYPE actual_oldval; \
244 actual_oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
246 if (__builtin_expect (oldval != actual_oldval, 0)) \
247 return actual_oldval; \
249 fail = __kernel_cmpxchg2 (ptr, &actual_oldval, &newval, INDEX); \
251 if (__builtin_expect (!fail, 1)) \
252 return actual_oldval; \
257 __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
260 long failure = __kernel_cmpxchg2 (ptr, &oldval, &newval, INDEX); \
261 return (failure == 0); \
264 COMPARE_AND_SWAP_2 (long long, 8, 3)
265 COMPARE_AND_SWAP_2 (short, 2, 1)
266 COMPARE_AND_SWAP_2 (char, 1, 0)
269 __sync_val_compare_and_swap_4 (int *ptr
, int oldval
, int newval
)
276 actual_oldval
= __atomic_load_n (ptr
, __ATOMIC_SEQ_CST
);
278 if (__builtin_expect (oldval
!= actual_oldval
, 0))
279 return actual_oldval
;
281 fail
= __kernel_cmpxchg (ptr
, actual_oldval
, newval
);
283 if (__builtin_expect (!fail
, 1))
284 return actual_oldval
;
289 __sync_bool_compare_and_swap_4 (int *ptr
, int oldval
, int newval
)
291 long failure
= __kernel_cmpxchg (ptr
, oldval
, newval
);
292 return (failure
== 0);
295 #define SYNC_LOCK_TEST_AND_SET_2(TYPE, WIDTH, INDEX) \
297 __sync_lock_test_and_set_##WIDTH (TYPE *ptr, TYPE val) \
303 oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
304 failure = __kernel_cmpxchg2 (ptr, &oldval, &val, INDEX); \
305 } while (failure != 0); \
310 SYNC_LOCK_TEST_AND_SET_2 (long long, 8, 3)
311 SYNC_LOCK_TEST_AND_SET_2 (short, 2, 1)
312 SYNC_LOCK_TEST_AND_SET_2 (signed char, 1, 0)
315 __sync_lock_test_and_set_4 (int *ptr
, int val
)
321 oldval
= __atomic_load_n (ptr
, __ATOMIC_SEQ_CST
);
322 failure
= __kernel_cmpxchg (ptr
, oldval
, val
);
323 } while (failure
!= 0);
328 #define SYNC_LOCK_RELEASE_2(TYPE, WIDTH, INDEX) \
330 __sync_lock_release_##WIDTH (TYPE *ptr) \
332 TYPE oldval, zero = 0; \
336 oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
337 failure = __kernel_cmpxchg2 (ptr, &oldval, &zero, INDEX); \
338 } while (failure != 0); \
341 SYNC_LOCK_RELEASE_2 (long long, 8, 3)
342 SYNC_LOCK_RELEASE_2 (short, 2, 1)
343 SYNC_LOCK_RELEASE_2 (signed char, 1, 0)
346 __sync_lock_release_4 (int *ptr
)
352 oldval
= __atomic_load_n (ptr
, __ATOMIC_SEQ_CST
);
353 failure
= __kernel_cmpxchg (ptr
, oldval
, 0);
354 } while (failure
!= 0);