exp2l: Work around a NetBSD 10.0/i386 bug.
[gnulib.git] / lib / simple-atomic.c
blob656b4bdc19dcef4b83347225c67d58691c9e5be1
1 /* Simple atomic operations for multithreading.
2 Copyright (C) 2020-2024 Free Software Foundation, Inc.
4 This file is free software: you can redistribute it and/or modify
5 it under the terms of the GNU Lesser General Public License as
6 published by the Free Software Foundation; either version 2.1 of the
7 License, or (at your option) any later version.
9 This file is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public License
15 along with this program. If not, see <https://www.gnu.org/licenses/>. */
17 /* Written by Bruno Haible <bruno@clisp.org>, 2021. */
19 #include <config.h>
21 /* Specification. */
22 #include "simple-atomic.h"
24 #if 0x590 <= __SUNPRO_C && __STDC__
25 # define asm __asm
26 #endif
28 #if defined _WIN32 && ! defined __CYGWIN__
29 /* Native Windows. */
31 # include <windows.h>
33 void
34 memory_barrier (void)
36 /* MemoryBarrier
37 <https://docs.microsoft.com/en-us/windows/win32/api/winnt/nf-winnt-memorybarrier> */
38 MemoryBarrier ();
41 unsigned int
42 atomic_compare_and_swap (unsigned int volatile *vp,
43 unsigned int cmp,
44 unsigned int newval)
46 /* InterlockedCompareExchange
47 <https://docs.microsoft.com/en-us/windows/win32/api/winnt/nf-winnt-interlockedcompareexchange> */
48 return InterlockedCompareExchange ((LONG volatile *) vp,
49 (LONG) newval, (LONG) cmp);
52 uintptr_t
53 atomic_compare_and_swap_ptr (uintptr_t volatile *vp,
54 uintptr_t cmp,
55 uintptr_t newval)
57 /* InterlockedCompareExchangePointer
58 <https://docs.microsoft.com/en-us/windows/win32/api/winnt/nf-winnt-interlockedcompareexchangepointer> */
59 return InterlockedCompareExchangePointer ((void * volatile *) vp,
60 (void *) newval, (void *) cmp);
63 #elif HAVE_PTHREAD_H
64 /* Some other platform that supports multi-threading.
66 We don't use the C11 <stdatomic.h> (available in GCC >= 4.9) because it would
67 require to link with -latomic. */
69 # if (((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) \
70 || __clang_major__ >= 3) \
71 && HAVE_ATOMIC_COMPARE_AND_SWAP_GCC41)
72 /* Use GCC built-ins (available on many platforms with GCC >= 4.1 or
73 clang >= 3.0).
74 Documentation:
75 <https://gcc.gnu.org/onlinedocs/gcc-4.1.2/gcc/Atomic-Builtins.html> */
77 void
78 memory_barrier (void)
80 __sync_synchronize ();
83 unsigned int
84 atomic_compare_and_swap (unsigned int volatile *vp,
85 unsigned int cmp,
86 unsigned int newval)
88 return __sync_val_compare_and_swap (vp, cmp, newval);
91 uintptr_t
92 atomic_compare_and_swap_ptr (uintptr_t volatile *vp,
93 uintptr_t cmp,
94 uintptr_t newval)
96 return __sync_val_compare_and_swap (vp, cmp, newval);
99 # elif defined _AIX
100 /* AIX */
101 /* For older versions of GCC or xlc, use inline assembly.
102 __compare_and_swap and __compare_and_swaplp are not sufficient here. */
104 void
105 memory_barrier (void)
107 asm volatile ("sync");
110 unsigned int
111 atomic_compare_and_swap (unsigned int volatile *vp,
112 unsigned int cmp,
113 unsigned int newval)
115 asm volatile ("sync");
117 unsigned int oldval;
118 asm volatile (
119 # if defined __GNUC__ || defined __clang__
120 "1: lwarx %0,0,%1\n"
121 " cmpw 0,%0,%2\n"
122 " bne 0,2f\n"
123 " stwcx. %3,0,%1\n"
124 " bne 0,1b\n"
125 "2:"
126 # else /* another label syntax */
127 ".L01: lwarx %0,0,%1\n"
128 " cmpw 0,%0,%2\n"
129 " bne 0,.L02\n"
130 " stwcx. %3,0,%1\n"
131 " bne 0,.L01\n"
132 ".L02:"
133 # endif
134 : "=&r" (oldval)
135 : "r" (vp), "r" (cmp), "r" (newval)
136 : "cr0");
138 asm volatile ("isync");
139 return oldval;
142 uintptr_t
143 atomic_compare_and_swap_ptr (uintptr_t volatile *vp,
144 uintptr_t cmp,
145 uintptr_t newval)
147 asm volatile ("sync");
149 uintptr_t oldval;
150 asm volatile (
151 # if defined __GNUC__ || defined __clang__
152 # if defined __powerpc64__ || defined __LP64__
153 "1: ldarx %0,0,%1\n"
154 " cmpd 0,%0,%2\n"
155 " bne 0,2f\n"
156 " stdcx. %3,0,%1\n"
157 " bne 0,1b\n"
158 "2:"
159 # else
160 "1: lwarx %0,0,%1\n"
161 " cmpw 0,%0,%2\n"
162 " bne 0,2f\n"
163 " stwcx. %3,0,%1\n"
164 " bne 0,1b\n"
165 "2:"
166 # endif
167 # else /* another label syntax */
168 # if defined __powerpc64__ || defined __LP64__
169 ".L01: ldarx %0,0,%1\n"
170 " cmpd 0,%0,%2\n"
171 " bne 0,.L02\n"
172 " stdcx. %3,0,%1\n"
173 " bne 0,.L01\n"
174 ".L02:"
175 # else
176 ".L01: lwarx %0,0,%1\n"
177 " cmpw 0,%0,%2\n"
178 " bne 0,.L02\n"
179 " stwcx. %3,0,%1\n"
180 " bne 0,.L01\n"
181 ".L02:"
182 # endif
183 # endif
184 : "=&r" (oldval)
185 : "r" (vp), "r" (cmp), "r" (newval)
186 : "cr0");
188 asm volatile ("isync");
189 return oldval;
192 # elif ((defined __GNUC__ || defined __clang__ || defined __SUNPRO_C) && (defined __sparc || defined __i386 || defined __x86_64__)) || (defined __TINYC__ && (defined __i386 || defined __x86_64__))
193 /* For older versions of GCC or clang, use inline assembly.
194 GCC, clang, and the Oracle Studio C 12 compiler understand GCC's extended
195 asm syntax, but the plain Oracle Studio C 11 compiler understands only
196 simple asm. */
198 void
199 memory_barrier (void)
201 # if defined __GNUC__ || defined __clang__ || __SUNPRO_C >= 0x590 || defined __TINYC__
202 # if defined __i386 || defined __x86_64__
203 # if defined __TINYC__ && defined __i386
204 /* Cannot use the SSE instruction "mfence" with this compiler. */
205 asm volatile ("lock orl $0,(%esp)");
206 # else
207 asm volatile ("mfence");
208 # endif
209 # endif
210 # if defined __sparc
211 asm volatile ("membar 2");
212 # endif
213 # else
214 # if defined __i386 || defined __x86_64__
215 asm ("mfence");
216 # endif
217 # if defined __sparc
218 asm ("membar 2");
219 # endif
220 # endif
223 unsigned int
224 atomic_compare_and_swap (unsigned int volatile *vp,
225 unsigned int cmp,
226 unsigned int newval)
228 # if defined __GNUC__ || defined __clang__ || __SUNPRO_C >= 0x590 || defined __TINYC__
229 unsigned int oldval;
230 # if defined __i386 || defined __x86_64__
231 asm volatile (" lock\n cmpxchgl %3,(%1)"
232 : "=a" (oldval) : "r" (vp), "a" (cmp), "r" (newval) : "memory");
233 # endif
234 # if defined __sparc
235 asm volatile (" cas [%1],%2,%3\n"
236 " mov %3,%0"
237 : "=r" (oldval) : "r" (vp), "r" (cmp), "r" (newval) : "memory");
238 # endif
239 return oldval;
240 # else /* __SUNPRO_C */
241 # if defined __x86_64__
242 asm (" movl %esi,%eax\n"
243 " lock\n cmpxchgl %edx,(%rdi)");
244 # elif defined __i386
245 asm (" movl 16(%ebp),%ecx\n"
246 " movl 12(%ebp),%eax\n"
247 " movl 8(%ebp),%edx\n"
248 " lock\n cmpxchgl %ecx,(%edx)");
249 # endif
250 # if defined __sparc
251 asm (" cas [%i0],%i1,%i2\n"
252 " mov %i2,%i0");
253 # endif
254 # endif
257 uintptr_t
258 atomic_compare_and_swap_ptr (uintptr_t volatile *vp,
259 uintptr_t cmp,
260 uintptr_t newval)
262 # if defined __GNUC__ || defined __clang__ || __SUNPRO_C >= 0x590 || defined __TINYC__
263 uintptr_t oldval;
264 # if defined __x86_64__
265 asm volatile (" lock\n cmpxchgq %3,(%1)"
266 : "=a" (oldval) : "r" (vp), "a" (cmp), "r" (newval) : "memory");
267 # elif defined __i386
268 asm volatile (" lock\n cmpxchgl %3,(%1)"
269 : "=a" (oldval) : "r" (vp), "a" (cmp), "r" (newval) : "memory");
270 # endif
271 # if defined __sparc && (defined __sparcv9 || defined __arch64__)
272 asm volatile (" casx [%1],%2,%3\n"
273 " mov %3,%0"
274 : "=r" (oldval) : "r" (vp), "r" (cmp), "r" (newval) : "memory");
275 # elif defined __sparc
276 asm volatile (" cas [%1],%2,%3\n"
277 " mov %3,%0"
278 : "=r" (oldval) : "r" (vp), "r" (cmp), "r" (newval) : "memory");
279 # endif
280 return oldval;
281 # else /* __SUNPRO_C */
282 # if defined __x86_64__
283 asm (" movq %rsi,%rax\n"
284 " lock\n cmpxchgq %rdx,(%rdi)");
285 # elif defined __i386
286 asm (" movl 16(%ebp),%ecx\n"
287 " movl 12(%ebp),%eax\n"
288 " movl 8(%ebp),%edx\n"
289 " lock\n cmpxchgl %ecx,(%edx)");
290 # endif
291 # if defined __sparc && (defined __sparcv9 || defined __arch64__)
292 asm (" casx [%i0],%i1,%i2\n"
293 " mov %i2,%i0");
294 # elif defined __sparc
295 asm (" cas [%i0],%i1,%i2\n"
296 " mov %i2,%i0");
297 # endif
298 # endif
301 # else
302 /* Fallback code. It has some race conditions. The unit test will fail. */
304 void
305 memory_barrier (void)
309 unsigned int
310 atomic_compare_and_swap (unsigned int volatile *vp,
311 unsigned int cmp,
312 unsigned int newval)
314 unsigned int oldval = *vp;
315 if (oldval == cmp)
316 *vp = newval;
317 return oldval;
320 uintptr_t
321 atomic_compare_and_swap_ptr (uintptr_t volatile *vp,
322 uintptr_t cmp,
323 uintptr_t newval)
325 uintptr_t oldval = *vp;
326 if (oldval == cmp)
327 *vp = newval;
328 return oldval;
331 # endif
333 #else
334 /* A platform that does not support multi-threading. */
336 void
337 memory_barrier (void)
341 unsigned int
342 atomic_compare_and_swap (unsigned int volatile *vp,
343 unsigned int cmp,
344 unsigned int newval)
346 unsigned int oldval = *vp;
347 if (oldval == cmp)
348 *vp = newval;
349 return oldval;
352 uintptr_t
353 atomic_compare_and_swap_ptr (uintptr_t volatile *vp,
354 uintptr_t cmp,
355 uintptr_t newval)
357 uintptr_t oldval = *vp;
358 if (oldval == cmp)
359 *vp = newval;
360 return oldval;
363 #endif