1 // Copyright (C) 2002-2023 Free Software Foundation, Inc.
3 // This file is part of GCC.
5 // GCC is free software; you can redistribute it and/or modify
6 // it under the terms of the GNU General Public License as published by
7 // the Free Software Foundation; either version 3, or (at your option)
10 // GCC is distributed in the hope that it will be useful,
11 // but WITHOUT ANY WARRANTY; without even the implied warranty of
12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 // GNU General Public License for more details.
15 // Under Section 7 of GPL version 3, you are granted additional
16 // permissions described in the GCC Runtime Library Exception, version
17 // 3.1, as published by the Free Software Foundation.
19 // You should have received a copy of the GNU General Public License and
20 // a copy of the GCC Runtime Library Exception along with this program;
21 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
22 // <http://www.gnu.org/licenses/>.
24 // Written by Mark Mitchell, CodeSourcery LLC, <mark@codesourcery.com>
25 // Thread support written by Jason Merrill, Red Hat Inc. <jason@redhat.com>
27 #include <bits/c++config.h>
32 #ifdef __USING_MCFGTHREAD__
34 #include <mcfgthread/cxa.h>
36 namespace __cxxabiv1
{
39 __cxa_guard_acquire (__guard
* g
) _GLIBCXX_NOTHROW
41 return __MCF_cxa_guard_acquire(g
);
45 __cxa_guard_release (__guard
* g
) _GLIBCXX_NOTHROW
47 __MCF_cxa_guard_release(g
);
51 __cxa_guard_abort (__guard
* g
) _GLIBCXX_NOTHROW
53 __MCF_cxa_guard_abort(g
);
56 } // namespace __cxxabiv1
58 #else // __USING_MCFGTHREAD__
60 #include <ext/atomicity.h>
61 #include <ext/concurrence.h>
62 #include <bits/atomic_lockfree_defines.h>
63 #if defined(__GTHREADS) && defined(__GTHREAD_HAS_COND) \
64 && (ATOMIC_INT_LOCK_FREE > 1) && defined(_GLIBCXX_HAVE_LINUX_FUTEX)
68 # define _GLIBCXX_USE_FUTEX
69 # define _GLIBCXX_FUTEX_WAIT 0
70 # define _GLIBCXX_FUTEX_WAKE 1
73 // The IA64/generic ABI uses the first byte of the guard variable.
74 // The ARM EABI uses the least significant bit.
76 // Thread-safe static local initialization support.
78 # ifndef _GLIBCXX_USE_FUTEX
81 // A single mutex controlling all static initializations.
82 static __gnu_cxx::__recursive_mutex
* static_mutex
;
84 typedef char fake_recursive_mutex
[sizeof(__gnu_cxx::__recursive_mutex
)]
85 __attribute__ ((aligned(__alignof__(__gnu_cxx::__recursive_mutex
))));
86 fake_recursive_mutex fake_mutex
;
89 { static_mutex
= new (&fake_mutex
) __gnu_cxx::__recursive_mutex(); }
91 __gnu_cxx::__recursive_mutex
&
94 static __gthread_once_t once
= __GTHREAD_ONCE_INIT
;
95 __gthread_once(&once
, init
);
99 // Simple wrapper for exception safety.
103 mutex_wrapper() : unlock(true)
104 { get_static_mutex().lock(); }
109 static_mutex
->unlock();
115 # if defined(__GTHREAD_HAS_COND) && !defined(_GLIBCXX_USE_FUTEX)
118 // A single condition variable controlling all static initializations.
119 static __gnu_cxx::__cond
* static_cond
;
121 // using a fake type to avoid initializing a static class.
122 typedef char fake_cond_t
[sizeof(__gnu_cxx::__cond
)]
123 __attribute__ ((aligned(__alignof__(__gnu_cxx::__cond
))));
124 fake_cond_t fake_cond
;
126 static void init_static_cond()
127 { static_cond
= new (&fake_cond
) __gnu_cxx::__cond(); }
132 static __gthread_once_t once
= __GTHREAD_ONCE_INIT
;
133 __gthread_once(&once
, init_static_cond
);
139 # ifndef _GLIBCXX_GUARD_TEST_AND_ACQUIRE
141 // Test the guard variable with a memory load with
142 // acquire semantics.
145 __test_and_acquire (__cxxabiv1::__guard
*g
)
148 unsigned char *__p
= reinterpret_cast<unsigned char *>(g
);
149 __atomic_load (__p
, &__c
, __ATOMIC_ACQUIRE
);
151 return _GLIBCXX_GUARD_TEST(&__c
);
153 # define _GLIBCXX_GUARD_TEST_AND_ACQUIRE(G) __test_and_acquire (G)
156 # ifndef _GLIBCXX_GUARD_SET_AND_RELEASE
158 // Set the guard variable to 1 with memory order release semantics.
161 __set_and_release (__cxxabiv1::__guard
*g
)
163 unsigned char *__p
= reinterpret_cast<unsigned char *>(g
);
164 unsigned char val
= 1;
165 __atomic_store (__p
, &val
, __ATOMIC_RELEASE
);
168 # define _GLIBCXX_GUARD_SET_AND_RELEASE(G) __set_and_release (G)
171 #else /* !__GTHREADS */
173 # undef _GLIBCXX_GUARD_TEST_AND_ACQUIRE
174 # undef _GLIBCXX_GUARD_SET_AND_RELEASE
175 # define _GLIBCXX_GUARD_SET_AND_RELEASE(G) _GLIBCXX_GUARD_SET (G)
177 #endif /* __GTHREADS */
180 // Here are C++ run-time routines for guarded initialization of static
181 // variables. There are 4 scenarios under which these routines are called:
183 // 1. Threads not supported (__GTHREADS not defined)
184 // 2. Threads are supported but not enabled at run-time.
185 // 3. Threads enabled at run-time but __gthreads_* are not fully POSIX.
186 // 4. Threads enabled at run-time and __gthreads_* support all POSIX threads
187 // primitives we need here.
189 // The old code supported scenarios 1-3 but was broken since it used a global
190 // mutex for all threads and had the mutex locked during the whole duration of
191 // initialization of a guarded static variable. The following created a
192 // dead-lock with the old code.
194 // Thread 1 acquires the global mutex.
195 // Thread 1 starts initializing static variable.
196 // Thread 1 creates thread 2 during initialization.
197 // Thread 2 attempts to acquire mutex to initialize another variable.
198 // Thread 2 blocks since thread 1 is locking the mutex.
199 // Thread 1 waits for result from thread 2 and also blocks. A deadlock.
201 // The new code here can handle this situation and thus is more robust. However,
202 // we need to use the POSIX thread condition variable, which is not supported
203 // in all platforms, notably older versions of Microsoft Windows. The gthr*.h
204 // headers define a symbol __GTHREAD_HAS_COND for platforms that support POSIX
205 // like condition variables. For platforms that do not support condition
206 // variables, we need to fall back to the old code.
208 // If _GLIBCXX_USE_FUTEX, no global mutex or condition variable is used,
209 // only atomic operations are used together with futex syscall.
210 // Valid values of the first integer in guard are:
211 // 0 No thread encountered the guarded init
212 // yet or it has been aborted.
213 // _GLIBCXX_GUARD_BIT The guarded static var has been successfully
215 // _GLIBCXX_GUARD_PENDING_BIT The guarded static var is being initialized
216 // and no other thread is waiting for its
218 // (_GLIBCXX_GUARD_PENDING_BIT The guarded static var is being initialized
219 // | _GLIBCXX_GUARD_WAITING_BIT) and some other threads are waiting until
220 // it is initialized.
224 #ifdef _GLIBCXX_USE_FUTEX
227 static inline int __guard_test_bit (const int __byte
, const int __val
)
229 union { int __i
; char __c
[sizeof (int)]; } __u
= { 0 };
230 __u
.__c
[__byte
] = __val
;
237 init_in_progress_flag(__guard
* g
)
238 { return ((char *)g
)[1]; }
241 set_init_in_progress_flag(__guard
* g
, int v
)
242 { ((char *)g
)[1] = v
; }
245 throw_recursive_init_exception()
248 throw __gnu_cxx::recursive_init_error();
250 // Use __builtin_trap so we don't require abort().
255 // acquire() is a helper function used to acquire guard if thread support is
256 // not compiled in or is compiled in but not enabled at run-time.
260 // Quit if the object is already initialized.
261 if (_GLIBCXX_GUARD_TEST(g
))
264 if (init_in_progress_flag(g
))
265 throw_recursive_init_exception();
267 set_init_in_progress_flag(g
, 1);
272 int __cxa_guard_acquire (__guard
*g
)
275 // If the target can reorder loads, we need to insert a read memory
276 // barrier so that accesses to the guarded variable happen after the
278 if (_GLIBCXX_GUARD_TEST_AND_ACQUIRE (g
))
281 # ifdef _GLIBCXX_USE_FUTEX
282 // If __atomic_* and futex syscall are supported, don't use any global
285 // Use the same bits in the guard variable whether single-threaded or not,
286 // so that __cxa_guard_release and __cxa_guard_abort match the logic here
287 // even if __libc_single_threaded becomes false between now and then.
289 if (__gnu_cxx::__is_single_threaded())
291 // No need to use atomics, and no need to wait for other threads.
292 int *gi
= (int *) (void *) g
;
295 *gi
= _GLIBCXX_GUARD_PENDING_BIT
;
299 throw_recursive_init_exception();
303 int *gi
= (int *) (void *) g
;
304 const int guard_bit
= _GLIBCXX_GUARD_BIT
;
305 const int pending_bit
= _GLIBCXX_GUARD_PENDING_BIT
;
306 const int waiting_bit
= _GLIBCXX_GUARD_WAITING_BIT
;
311 if (__atomic_compare_exchange_n(gi
, &expected
, pending_bit
, false,
315 // This thread should do the initialization.
319 if (expected
== guard_bit
)
321 // Already initialized.
325 if (expected
== pending_bit
)
328 int newv
= expected
| waiting_bit
;
329 if (!__atomic_compare_exchange_n(gi
, &expected
, newv
, false,
333 if (expected
== guard_bit
)
335 // Make a thread that failed to set the
336 // waiting bit exit the function earlier,
337 // if it detects that another thread has
338 // successfully finished initialising.
348 syscall (SYS_futex
, gi
, _GLIBCXX_FUTEX_WAIT
, expected
, 0);
351 # else // ! _GLIBCXX_USE_FUTEX
352 if (__gthread_active_p ())
356 while (1) // When this loop is executing, mutex is locked.
358 # ifdef __GTHREAD_HAS_COND
359 // The static is already initialized.
360 if (_GLIBCXX_GUARD_TEST(g
))
361 return 0; // The mutex will be unlocked via wrapper
363 if (init_in_progress_flag(g
))
365 // The guarded static is currently being initialized by
366 // another thread, so we release mutex and wait for the
367 // condition variable. We will lock the mutex again after
369 get_static_cond().wait_recursive(&get_static_mutex());
373 set_init_in_progress_flag(g
, 1);
374 return 1; // The mutex will be unlocked via wrapper.
377 // This provides compatibility with older systems not supporting
378 // POSIX like condition variables.
382 return 1; // The mutex still locked.
384 return 0; // The mutex will be unlocked via wrapper.
389 #endif // ! __GTHREADS
395 void __cxa_guard_abort (__guard
*g
) noexcept
397 #ifdef _GLIBCXX_USE_FUTEX
398 // If __atomic_* and futex syscall are supported, don't use any global
401 if (__gnu_cxx::__is_single_threaded())
403 // No need to use atomics, and no other threads to wake.
404 int *gi
= (int *) (void *) g
;
410 int *gi
= (int *) (void *) g
;
411 const int waiting_bit
= _GLIBCXX_GUARD_WAITING_BIT
;
412 int old
= __atomic_exchange_n (gi
, 0, __ATOMIC_ACQ_REL
);
414 if ((old
& waiting_bit
) != 0)
415 syscall (SYS_futex
, gi
, _GLIBCXX_FUTEX_WAKE
, INT_MAX
);
418 #elif defined(__GTHREAD_HAS_COND)
419 if (__gthread_active_p())
423 set_init_in_progress_flag(g
, 0);
425 // If we abort, we still need to wake up all other threads waiting for
426 // the condition variable.
427 get_static_cond().broadcast();
432 set_init_in_progress_flag(g
, 0);
433 #if defined(__GTHREADS) && !defined(__GTHREAD_HAS_COND)
434 // This provides compatibility with older systems not supporting POSIX like
435 // condition variables.
436 if (__gthread_active_p ())
437 static_mutex
->unlock();
442 void __cxa_guard_release (__guard
*g
) noexcept
444 #ifdef _GLIBCXX_USE_FUTEX
445 // If __atomic_* and futex syscall are supported, don't use any global
448 if (__gnu_cxx::__is_single_threaded())
450 int *gi
= (int *) (void *) g
;
451 *gi
= _GLIBCXX_GUARD_BIT
;
456 int *gi
= (int *) (void *) g
;
457 const int guard_bit
= _GLIBCXX_GUARD_BIT
;
458 const int waiting_bit
= _GLIBCXX_GUARD_WAITING_BIT
;
459 int old
= __atomic_exchange_n (gi
, guard_bit
, __ATOMIC_ACQ_REL
);
461 if ((old
& waiting_bit
) != 0)
462 syscall (SYS_futex
, gi
, _GLIBCXX_FUTEX_WAKE
, INT_MAX
);
466 #elif defined(__GTHREAD_HAS_COND)
467 if (__gthread_active_p())
471 set_init_in_progress_flag(g
, 0);
472 _GLIBCXX_GUARD_SET_AND_RELEASE(g
);
474 get_static_cond().broadcast();
479 set_init_in_progress_flag(g
, 0);
480 _GLIBCXX_GUARD_SET_AND_RELEASE (g
);
482 #if defined(__GTHREADS) && !defined(__GTHREAD_HAS_COND)
483 // This provides compatibility with older systems not supporting POSIX like
484 // condition variables.
485 if (__gthread_active_p())
486 static_mutex
->unlock();
491 #endif // __USING_MCFGTHREAD__