1 /* Linux-specific atomic operations for PA Linux.
2 Copyright (C) 2008-2015 Free Software Foundation, Inc.
3 Based on code contributed by CodeSourcery for ARM EABI Linux.
4 Modifications for PA Linux by Helge Deller <deller@gmx.de>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
31 /* All PA-RISC implementations supported by linux have strongly
32 ordered loads and stores. Only cache flushes and purges can be
33 delayed. The data cache implementations are all globally
34 coherent. Thus, there is no need to synchonize memory accesses.
36 GCC automatically issues a asm memory barrier when it encounters
37 a __sync_synchronize builtin. Thus, we do not need to define this
40 We implement byte, short and int versions of each atomic operation
41 using the kernel helper defined below. There is no support for
42 64-bit operations yet. */
44 /* Determine kernel LWS function call (0=32-bit, 1=64-bit userspace). */
45 #define LWS_CAS (sizeof(long) == 4 ? 0 : 1)
47 /* Kernel helper for compare-and-exchange a 32-bit value. */
49 __kernel_cmpxchg (int *mem
, int oldval
, int newval
)
51 register unsigned long lws_mem
asm("r26") = (unsigned long) (mem
);
52 register int lws_old
asm("r25") = oldval
;
53 register int lws_new
asm("r24") = newval
;
54 register long lws_ret
asm("r28");
55 register long lws_errno
asm("r21");
56 asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
58 : "=r" (lws_ret
), "=r" (lws_errno
)
59 : "i" (LWS_CAS
), "r" (lws_mem
), "r" (lws_old
), "r" (lws_new
)
60 : "r1", "r20", "r22", "r23", "r29", "r31", "memory"
62 if (__builtin_expect (lws_errno
== -EFAULT
|| lws_errno
== -ENOSYS
, 0))
65 /* If the kernel LWS call succeeded (lws_errno == 0), lws_ret contains
66 the old value from memory. If this value is equal to OLDVAL, the
67 new value was written to memory. If not, return -EBUSY. */
68 if (!lws_errno
&& lws_ret
!= oldval
)
75 __kernel_cmpxchg2 (void *mem
, const void *oldval
, const void *newval
,
78 register unsigned long lws_mem
asm("r26") = (unsigned long) (mem
);
79 register unsigned long lws_old
asm("r25") = (unsigned long) oldval
;
80 register unsigned long lws_new
asm("r24") = (unsigned long) newval
;
81 register int lws_size
asm("r23") = val_size
;
82 register long lws_ret
asm("r28");
83 register long lws_errno
asm("r21");
84 asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t"
86 : "=r" (lws_ret
), "=r" (lws_errno
), "+r" (lws_mem
),
87 "+r" (lws_old
), "+r" (lws_new
), "+r" (lws_size
)
89 : "r1", "r20", "r22", "r29", "r31", "fr4", "memory"
91 if (__builtin_expect (lws_errno
== -EFAULT
|| lws_errno
== -ENOSYS
, 0))
94 /* If the kernel LWS call fails, return EBUSY */
95 if (!lws_errno
&& lws_ret
)
100 #define HIDDEN __attribute__ ((visibility ("hidden")))
102 /* Big endian masks */
103 #define INVERT_MASK_1 24
104 #define INVERT_MASK_2 16
107 #define MASK_2 0xffffu
109 #define FETCH_AND_OP_2(OP, PFX_OP, INF_OP, TYPE, WIDTH, INDEX) \
111 __sync_fetch_and_##OP##_##WIDTH (TYPE *ptr, TYPE val) \
117 tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
118 newval = PFX_OP (tmp INF_OP val); \
119 failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \
120 } while (failure != 0); \
125 FETCH_AND_OP_2 (add
, , +, short, 2, 1)
126 FETCH_AND_OP_2 (sub
, , -, short, 2, 1)
127 FETCH_AND_OP_2 (or, , |, short, 2, 1)
128 FETCH_AND_OP_2 (and, , &, short, 2, 1)
129 FETCH_AND_OP_2 (xor, , ^, short, 2, 1)
130 FETCH_AND_OP_2 (nand
, ~, &, short, 2, 1)
132 FETCH_AND_OP_2 (add
, , +, signed char, 1, 0)
133 FETCH_AND_OP_2 (sub
, , -, signed char, 1, 0)
134 FETCH_AND_OP_2 (or, , |, signed char, 1, 0)
135 FETCH_AND_OP_2 (and, , &, signed char, 1, 0)
136 FETCH_AND_OP_2 (xor, , ^, signed char, 1, 0)
137 FETCH_AND_OP_2 (nand
, ~, &, signed char, 1, 0)
139 #define OP_AND_FETCH_2(OP, PFX_OP, INF_OP, TYPE, WIDTH, INDEX) \
141 __sync_##OP##_and_fetch_##WIDTH (TYPE *ptr, TYPE val) \
147 tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
148 newval = PFX_OP (tmp INF_OP val); \
149 failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \
150 } while (failure != 0); \
152 return PFX_OP (tmp INF_OP val); \
155 OP_AND_FETCH_2 (add
, , +, short, 2, 1)
156 OP_AND_FETCH_2 (sub
, , -, short, 2, 1)
157 OP_AND_FETCH_2 (or, , |, short, 2, 1)
158 OP_AND_FETCH_2 (and, , &, short, 2, 1)
159 OP_AND_FETCH_2 (xor, , ^, short, 2, 1)
160 OP_AND_FETCH_2 (nand
, ~, &, short, 2, 1)
162 OP_AND_FETCH_2 (add
, , +, signed char, 1, 0)
163 OP_AND_FETCH_2 (sub
, , -, signed char, 1, 0)
164 OP_AND_FETCH_2 (or, , |, signed char, 1, 0)
165 OP_AND_FETCH_2 (and, , &, signed char, 1, 0)
166 OP_AND_FETCH_2 (xor, , ^, signed char, 1, 0)
167 OP_AND_FETCH_2 (nand
, ~, &, signed char, 1, 0)
169 #define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
171 __sync_fetch_and_##OP##_4 (int *ptr, int val) \
176 tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
177 failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \
178 } while (failure != 0); \
183 FETCH_AND_OP_WORD (add
, , +)
184 FETCH_AND_OP_WORD (sub
, , -)
185 FETCH_AND_OP_WORD (or, , |)
186 FETCH_AND_OP_WORD (and, , &)
187 FETCH_AND_OP_WORD (xor, , ^)
188 FETCH_AND_OP_WORD (nand
, ~, &)
190 #define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
192 __sync_##OP##_and_fetch_4 (int *ptr, int val) \
197 tmp = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
198 failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \
199 } while (failure != 0); \
201 return PFX_OP (tmp INF_OP val); \
204 OP_AND_FETCH_WORD (add
, , +)
205 OP_AND_FETCH_WORD (sub
, , -)
206 OP_AND_FETCH_WORD (or, , |)
207 OP_AND_FETCH_WORD (and, , &)
208 OP_AND_FETCH_WORD (xor, , ^)
209 OP_AND_FETCH_WORD (nand
, ~, &)
211 typedef unsigned char bool;
213 #define COMPARE_AND_SWAP_2(TYPE, WIDTH, INDEX) \
215 __sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
218 TYPE actual_oldval; \
223 actual_oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
225 if (__builtin_expect (oldval != actual_oldval, 0)) \
226 return actual_oldval; \
228 fail = __kernel_cmpxchg2 (ptr, &actual_oldval, &newval, INDEX); \
230 if (__builtin_expect (!fail, 1)) \
231 return actual_oldval; \
236 __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
239 int failure = __kernel_cmpxchg2 (ptr, &oldval, &newval, INDEX); \
240 return (failure != 0); \
243 COMPARE_AND_SWAP_2 (short, 2, 1)
244 COMPARE_AND_SWAP_2 (char, 1, 0)
247 __sync_val_compare_and_swap_4 (int *ptr
, int oldval
, int newval
)
249 int actual_oldval
, fail
;
253 actual_oldval
= __atomic_load_n (ptr
, __ATOMIC_SEQ_CST
);
255 if (__builtin_expect (oldval
!= actual_oldval
, 0))
256 return actual_oldval
;
258 fail
= __kernel_cmpxchg (ptr
, actual_oldval
, newval
);
260 if (__builtin_expect (!fail
, 1))
261 return actual_oldval
;
266 __sync_bool_compare_and_swap_4 (int *ptr
, int oldval
, int newval
)
268 int failure
= __kernel_cmpxchg (ptr
, oldval
, newval
);
269 return (failure
== 0);
272 #define SYNC_LOCK_TEST_AND_SET_2(TYPE, WIDTH, INDEX) \
274 __sync_lock_test_and_set_##WIDTH (TYPE *ptr, TYPE val) \
280 oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
281 failure = __kernel_cmpxchg2 (ptr, &oldval, &val, INDEX); \
282 } while (failure != 0); \
287 SYNC_LOCK_TEST_AND_SET_2 (short, 2, 1)
288 SYNC_LOCK_TEST_AND_SET_2 (signed char, 1, 0)
291 __sync_lock_test_and_set_4 (int *ptr
, int val
)
296 oldval
= __atomic_load_n (ptr
, __ATOMIC_SEQ_CST
);
297 failure
= __kernel_cmpxchg (ptr
, oldval
, val
);
298 } while (failure
!= 0);
303 #define SYNC_LOCK_RELEASE_2(TYPE, WIDTH, INDEX) \
305 __sync_lock_release_##WIDTH (TYPE *ptr) \
307 TYPE failure, oldval, zero = 0; \
310 oldval = __atomic_load_n (ptr, __ATOMIC_SEQ_CST); \
311 failure = __kernel_cmpxchg2 (ptr, &oldval, &zero, INDEX); \
312 } while (failure != 0); \
315 SYNC_LOCK_RELEASE_2 (short, 2, 1)
316 SYNC_LOCK_RELEASE_2 (signed char, 1, 0)
319 __sync_lock_release_4 (int *ptr
)
324 oldval
= __atomic_load_n (ptr
, __ATOMIC_SEQ_CST
);
325 failure
= __kernel_cmpxchg (ptr
, oldval
, 0);
326 } while (failure
!= 0);