execute, spawn-pipe: Make multithread-safe on native Windows.
[gnulib.git] / lib / asyncsafe-spin.c
blobdb70e50a408ca818e0f877934410d2f52ae61c9f
1 /* Spin locks for communication between threads and signal handlers.
2 Copyright (C) 2020 Free Software Foundation, Inc.
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2, or (at your option)
7 any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, see <https://www.gnu.org/licenses/>. */
17 /* Written by Bruno Haible <bruno@clisp.org>, 2020. */
19 #include <config.h>
21 /* Specification. */
22 #include "asyncsafe-spin.h"
24 #include <stdbool.h>
25 #include <stdlib.h>
26 #if defined _AIX
27 # include <sys/atomic_op.h>
28 #endif
30 #if defined _WIN32 && ! defined __CYGWIN__
31 /* Use Windows threads. */
33 void
34 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
36 glwthread_spin_init (lock);
39 static inline void
40 do_lock (asyncsafe_spinlock_t *lock)
42 glwthread_spin_lock (lock);
45 static inline void
46 do_unlock (asyncsafe_spinlock_t *lock)
48 if (glwthread_spin_unlock (lock))
49 abort ();
52 void
53 asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
55 glwthread_spin_destroy (lock);
58 #else
60 # if HAVE_PTHREAD_H
61 /* Use POSIX threads. */
63 /* We don't use semaphores (although sem_post() is allowed in signal handlers),
64 because it would require to link with -lrt on HP-UX 11, OSF/1, Solaris 10,
65 and also because on macOS only named semaphores work.
67 We don't use the C11 <stdatomic.h> (available in GCC >= 4.9) because it would
68 require to link with -latomic. */
70 # if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7) \
71 || __clang_major > 3 || (__clang_major__ == 3 && __clang_minor__ >= 1)) \
72 && !defined __ibmxl__
73 /* Use GCC built-ins (available in GCC >= 4.7 and clang >= 3.1) that operate on
74 the first byte of the lock.
75 Documentation:
76 <https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/_005f_005fatomic-Builtins.html>
79 # if 1
80 /* An implementation that verifies the unlocks. */
82 void
83 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
85 __atomic_store_n (lock, 0, __ATOMIC_SEQ_CST);
88 static inline void
89 do_lock (asyncsafe_spinlock_t *lock)
91 /* Wait until *lock becomes 0, then replace it with 1. */
92 asyncsafe_spinlock_t zero;
93 while (!(zero = 0,
94 __atomic_compare_exchange_n (lock, &zero, 1, false,
95 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)))
99 static inline void
100 do_unlock (asyncsafe_spinlock_t *lock)
102 /* If *lock is 1, then replace it with 0. */
103 asyncsafe_spinlock_t one = 1;
104 if (!__atomic_compare_exchange_n (lock, &one, 0, false,
105 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
106 abort ();
109 # else
110 /* An implementation that is a little bit more optimized, but does not verify
111 the unlocks. */
113 void
114 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
116 __atomic_clear (lock, __ATOMIC_SEQ_CST);
119 static inline void
120 do_lock (asyncsafe_spinlock_t *lock)
122 while (__atomic_test_and_set (lock, __ATOMIC_SEQ_CST))
126 static inline void
127 do_unlock (asyncsafe_spinlock_t *lock)
129 __atomic_clear (lock, __ATOMIC_SEQ_CST);
132 # endif
134 # elif (((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) \
135 && !defined __sparc__) \
136 || __clang_major__ >= 3) \
137 && !defined __ibmxl__
138 /* Use GCC built-ins (available in GCC >= 4.1, except on SPARC, and
139 clang >= 3.0).
140 Documentation:
141 <https://gcc.gnu.org/onlinedocs/gcc-4.1.2/gcc/Atomic-Builtins.html> */
143 void
144 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
146 volatile unsigned int *vp = lock;
147 *vp = 0;
148 __sync_synchronize ();
151 static inline void
152 do_lock (asyncsafe_spinlock_t *lock)
154 /* Wait until *lock becomes 0, then replace it with 1. */
155 while (__sync_val_compare_and_swap (lock, 0, 1) != 0)
159 static inline void
160 do_unlock (asyncsafe_spinlock_t *lock)
162 /* If *lock is 1, then replace it with 0. */
163 if (__sync_val_compare_and_swap (lock, 1, 0) != 1)
164 abort ();
167 # elif defined _AIX
168 /* AIX */
170 void
171 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
173 atomic_p vp = (int *) lock;
174 _clear_lock (vp, 0);
177 static inline void
178 do_lock (asyncsafe_spinlock_t *lock)
180 atomic_p vp = (int *) lock;
181 while (_check_lock (vp, 0, 1))
185 static inline void
186 do_unlock (asyncsafe_spinlock_t *lock)
188 atomic_p vp = (int *) lock;
189 if (_check_lock (vp, 1, 0))
190 abort ();
193 # elif (defined __GNUC__ || defined __clang__ || defined __SUNPRO_C) && (defined __sparc || defined __i386 || defined __x86_64__)
194 /* For older versions of GCC or clang, use inline assembly.
195 GCC, clang, and the Oracle Studio C 12 compiler understand GCC's extended
196 asm syntax, but the plain Oracle Studio C 11 compiler understands only
197 simple asm. */
198 /* An implementation that verifies the unlocks. */
200 static void
201 memory_barrier (void)
203 # if defined __GNUC__ || defined __clang__ || __SUNPRO_C >= 0x590
204 # if defined __i386 || defined __x86_64__
205 asm volatile ("mfence");
206 # endif
207 # if defined __sparc
208 asm volatile ("membar 2");
209 # endif
210 # else
211 # if defined __i386 || defined __x86_64__
212 asm ("mfence");
213 # endif
214 # if defined __sparc
215 asm ("membar 2");
216 # endif
217 # endif
220 /* Store NEWVAL in *VP if the old value *VP is == CMP.
221 Return the old value. */
222 static unsigned int
223 atomic_compare_and_swap (volatile unsigned int *vp, unsigned int cmp,
224 unsigned int newval)
226 # if defined __GNUC__ || defined __clang__ || __SUNPRO_C >= 0x590
227 unsigned int oldval;
228 # if defined __i386 || defined __x86_64__
229 asm volatile (" lock\n cmpxchgl %3,(%1)"
230 : "=a" (oldval) : "r" (vp), "a" (cmp), "r" (newval) : "memory");
231 # endif
232 # if defined __sparc
233 asm volatile (" cas [%1],%2,%3\n"
234 " mov %3,%0"
235 : "=r" (oldval) : "r" (vp), "r" (cmp), "r" (newval) : "memory");
236 # endif
237 return oldval;
238 # else /* __SUNPRO_C */
239 # if defined __x86_64__
240 asm (" movl %esi,%eax\n"
241 " lock\n cmpxchgl %edx,(%rdi)");
242 # elif defined __i386
243 asm (" movl 16(%ebp),%ecx\n"
244 " movl 12(%ebp),%eax\n"
245 " movl 8(%ebp),%edx\n"
246 " lock\n cmpxchgl %ecx,(%edx)");
247 # endif
248 # if defined __sparc
249 asm (" cas [%i0],%i1,%i2\n"
250 " mov %i2,%i0");
251 # endif
252 # endif
255 void
256 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
258 volatile unsigned int *vp = lock;
259 *vp = 0;
260 memory_barrier ();
263 static inline void
264 do_lock (asyncsafe_spinlock_t *lock)
266 volatile unsigned int *vp = lock;
267 while (atomic_compare_and_swap (vp, 0, 1) != 0)
271 static inline void
272 do_unlock (asyncsafe_spinlock_t *lock)
274 volatile unsigned int *vp = lock;
275 if (atomic_compare_and_swap (vp, 1, 0) != 1)
276 abort ();
279 # else
280 /* Fallback code. It has some race conditions. */
282 void
283 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
285 volatile unsigned int *vp = lock;
286 *vp = 0;
289 static inline void
290 do_lock (asyncsafe_spinlock_t *lock)
292 volatile unsigned int *vp = lock;
293 while (*vp)
295 *vp = 1;
298 static inline void
299 do_unlock (asyncsafe_spinlock_t *lock)
301 volatile unsigned int *vp = lock;
302 *vp = 0;
305 # endif
307 # else
308 /* Provide a dummy implementation for single-threaded applications. */
310 void
311 asyncsafe_spin_init (asyncsafe_spinlock_t *lock)
315 static inline void
316 do_lock (asyncsafe_spinlock_t *lock)
320 static inline void
321 do_unlock (asyncsafe_spinlock_t *lock)
325 # endif
327 void
328 asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
332 #endif
334 void
335 asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
336 const sigset_t *mask, sigset_t *saved_mask)
338 sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask */
339 do_lock (lock);
342 void
343 asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
345 do_unlock (lock);
346 sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to pthread_sigmask */