Use module 'c99' when needed for subobject initializer syntax.
[gnulib.git] / lib / asyncsafe-spin.c
blob98d4fadb319cd7dbdcf6257297e3c187ff0d58eb
1 /* Spin locks for communication between threads and signal handlers.
2 Copyright (C) 2020 Free Software Foundation, Inc.
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2, or (at your option)
7 any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, see <https://www.gnu.org/licenses/>. */
17 /* Written by Bruno Haible <bruno@clisp.org>, 2020. */
19 #include <config.h>
21 /* Specification. */
22 #include "asyncsafe-spin.h"
24 #include <stdbool.h>
25 #include <stdlib.h>
26 #if defined _AIX
27 # include <sys/atomic_op.h>
28 #endif
30 #if defined _WIN32 && ! defined __CYGWIN__
31 /* Use Windows threads. */
33 void
34 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
36 glwthread_spin_init (lock);
39 static inline void
40 do_lock (asyncsafe_spinlock_t *lock)
42 glwthread_spin_lock (lock);
45 static inline void
46 do_unlock (asyncsafe_spinlock_t *lock)
48 if (glwthread_spin_unlock (lock))
49 abort ();
52 void
53 asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
55 glwthread_spin_destroy (lock);
58 #else
60 # if HAVE_PTHREAD_H
61 /* Use POSIX threads. */
63 /* We don't use semaphores (although sem_post() is allowed in signal handlers),
64 because it would require to link with -lrt on HP-UX 11, OSF/1, Solaris 10,
65 and also because on macOS only named semaphores work.
67 We don't use the C11 <stdatomic.h> (available in GCC >= 4.9) because it would
68 require to link with -latomic. */
70 # if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7) \
71 || __clang_major > 3 || (__clang_major__ == 3 && __clang_minor__ >= 1)) \
72 && !defined __ibmxl__
73 /* Use GCC built-ins (available in GCC >= 4.7 and clang >= 3.1) that operate on
74 the first byte of the lock.
75 Documentation:
76 <https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/_005f_005fatomic-Builtins.html>
79 # if 1
80 /* An implementation that verifies the unlocks. */
82 void
83 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
85 __atomic_store_n (lock, 0, __ATOMIC_SEQ_CST);
88 static inline void
89 do_lock (asyncsafe_spinlock_t *lock)
91 /* Wait until *lock becomes 0, then replace it with 1. */
92 asyncsafe_spinlock_t zero;
93 while (!(zero = 0,
94 __atomic_compare_exchange_n (lock, &zero, 1, false,
95 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)))
99 static inline void
100 do_unlock (asyncsafe_spinlock_t *lock)
102 /* If *lock is 1, then replace it with 0. */
103 asyncsafe_spinlock_t one = 1;
104 if (!__atomic_compare_exchange_n (lock, &one, 0, false,
105 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
106 abort ();
109 # else
110 /* An implementation that is a little bit more optimized, but does not verify
111 the unlocks. */
113 void
114 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
116 __atomic_clear (lock, __ATOMIC_SEQ_CST);
119 static inline void
120 do_lock (asyncsafe_spinlock_t *lock)
122 while (__atomic_test_and_set (lock, __ATOMIC_SEQ_CST))
126 static inline void
127 do_unlock (asyncsafe_spinlock_t *lock)
129 __atomic_clear (lock, __ATOMIC_SEQ_CST);
132 # endif
134 # elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) \
135 || __clang_major__ >= 3) \
136 && !defined __ibmxl__
137 /* Use GCC built-ins (available in GCC >= 4.1 and clang >= 3.0).
138 Documentation:
139 <https://gcc.gnu.org/onlinedocs/gcc-4.1.2/gcc/Atomic-Builtins.html> */
141 void
142 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
144 volatile unsigned int *vp = lock;
145 *vp = 0;
146 __sync_synchronize ();
149 static inline void
150 do_lock (asyncsafe_spinlock_t *lock)
152 /* Wait until *lock becomes 0, then replace it with 1. */
153 while (__sync_val_compare_and_swap (lock, 0, 1) != 0)
157 static inline void
158 do_unlock (asyncsafe_spinlock_t *lock)
160 /* If *lock is 1, then replace it with 0. */
161 if (__sync_val_compare_and_swap (lock, 1, 0) != 1)
162 abort ();
165 # elif defined _AIX
166 /* AIX */
168 void
169 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
171 atomic_p vp = (int *) lock;
172 _clear_lock (vp, 0);
175 static inline void
176 do_lock (asyncsafe_spinlock_t *lock)
178 atomic_p vp = (int *) lock;
179 while (_check_lock (vp, 0, 1))
183 static inline void
184 do_unlock (asyncsafe_spinlock_t *lock)
186 atomic_p vp = (int *) lock;
187 if (_check_lock (vp, 1, 0))
188 abort ();
191 # elif (defined __GNUC__ || defined __clang__ || defined __SUNPRO_C) && (defined __sparc || defined __i386 || defined __x86_64__)
192 /* For older versions of GCC or clang, use inline assembly.
193 GCC, clang, and the Oracle Studio C 12 compiler understand GCC's extended
194 asm syntax, but the plain Oracle Studio C 11 compiler understands only
195 simple asm. */
196 /* An implementation that verifies the unlocks. */
198 static void
199 memory_barrier (void)
201 # if defined __GNUC__ || defined __clang__ || __SUNPRO_C >= 0x590
202 # if defined __i386 || defined __x86_64__
203 asm volatile ("mfence");
204 # endif
205 # if defined __sparc
206 asm volatile ("membar 2");
207 # endif
208 # else
209 # if defined __i386 || defined __x86_64__
210 asm ("mfence");
211 # endif
212 # if defined __sparc
213 asm ("membar 2");
214 # endif
215 # endif
218 /* Store NEWVAL in *VP if the old value *VP is == CMP.
219 Return the old value. */
220 static unsigned int
221 atomic_compare_and_swap (volatile unsigned int *vp, unsigned int cmp,
222 unsigned int newval)
224 # if defined __GNUC__ || defined __clang__ || __SUNPRO_C >= 0x590
225 unsigned int oldval;
226 # if defined __i386 || defined __x86_64__
227 asm volatile (" lock\n cmpxchgl %3,(%1)"
228 : "=a" (oldval) : "r" (vp), "a" (cmp), "r" (newval) : "memory");
229 # endif
230 # if defined __sparc
231 asm volatile (" cas [%1],%2,%3\n"
232 " mov %3,%0"
233 : "=r" (oldval) : "r" (vp), "r" (cmp), "r" (newval) : "memory");
234 # endif
235 return oldval;
236 # else /* __SUNPRO_C */
237 # if defined __x86_64__
238 asm (" movl %esi,%eax\n"
239 " lock\n cmpxchgl %edx,(%rdi)");
240 # elif defined __i386
241 asm (" movl 16(%ebp),%ecx\n"
242 " movl 12(%ebp),%eax\n"
243 " movl 8(%ebp),%edx\n"
244 " lock\n cmpxchgl %ecx,(%edx)");
245 # endif
246 # if defined __sparc
247 asm (" cas [%i0],%i1,%i2\n"
248 " mov %i2,%i0");
249 # endif
250 # endif
253 void
254 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
256 volatile unsigned int *vp = lock;
257 *vp = 0;
258 memory_barrier ();
261 static inline void
262 do_lock (asyncsafe_spinlock_t *lock)
264 volatile unsigned int *vp = lock;
265 while (atomic_compare_and_swap (vp, 0, 1) != 0)
269 static inline void
270 do_unlock (asyncsafe_spinlock_t *lock)
272 volatile unsigned int *vp = lock;
273 if (atomic_compare_and_swap (vp, 1, 0) != 1)
274 abort ();
277 # else
278 /* Fallback code. It has some race conditions. */
280 void
281 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
283 volatile unsigned int *vp = lock;
284 *vp = 0;
287 static inline void
288 do_lock (asyncsafe_spinlock_t *lock)
290 volatile unsigned int *vp = lock;
291 while (*vp)
293 *vp = 1;
296 static inline void
297 do_unlock (asyncsafe_spinlock_t *lock)
299 volatile unsigned int *vp = lock;
300 *vp = 0;
303 # endif
305 # else
306 /* Provide a dummy implementation for single-threaded applications. */
308 void
309 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
313 static inline void
314 do_lock (asyncsafe_spinlock_t *lock)
318 static inline void
319 do_unlock (asyncsafe_spinlock_t *lock)
323 # endif
325 void
326 asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
330 #endif
332 void
333 asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
334 const sigset_t *mask, sigset_t *saved_mask)
336 sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask */
337 do_lock (lock);
340 void
341 asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
343 do_unlock (lock);
344 sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to pthread_sigmask */