RISC-V: Bugfix vec_extract v mode iterator restriction mismatch
[official-gcc.git] / libstdc++-v3 / libsupc++ / eh_alloc.cc
blobec8c8fd13af42426f4831358d93a93ef1781273e
1 // -*- C++ -*- Allocate exception objects.
2 // Copyright (C) 2001-2024 Free Software Foundation, Inc.
3 //
4 // This file is part of GCC.
5 //
6 // GCC is free software; you can redistribute it and/or modify
7 // it under the terms of the GNU General Public License as published by
8 // the Free Software Foundation; either version 3, or (at your option)
9 // any later version.
11 // GCC is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
25 // This is derived from the C++ ABI for IA-64. Where we diverge
26 // for cross-architecture compatibility are noted with "@@@".
28 #ifndef _GNU_SOURCE
29 // Cygwin needs this for secure_getenv
30 # define _GNU_SOURCE 1
31 #endif
33 #include <exception> // std::exception
34 #include <new> // std::terminate
35 #include <cstdlib> // std::malloc, std::free, std::strtoul
36 #include <climits> // INT_MAX
37 #include <bits/stl_function.h> // std::less
38 #include "unwind-cxx.h"
39 #if _GLIBCXX_HOSTED
40 # include <string_view> // std::string_view
41 # include <cstring> // std::strchr, std::memset
42 # include <ext/concurrence.h> // __gnu_cxx::__mutex, __gnu_cxx::__scoped_lock
43 #endif
45 // We use an emergency buffer used for exceptions when malloc fails.
46 // If _GLIBCXX_EH_POOL_STATIC is defined (e.g. by configure) then we use
47 // a fixed-size static buffer. Otherwise, allocate on startup using malloc.
49 // The size of the buffer is N * (S * P + R + D), where:
50 // N == The number of objects to reserve space for.
51 // Defaults to EMERGENCY_OBJ_COUNT, defined below.
52 // S == Estimated size of exception objects to account for.
53 // This size is in units of sizeof(void*) not bytes.
54 // Defaults to EMERGENCY_OBJ_SIZE, defined below.
55 // P == sizeof(void*).
56 // R == sizeof(__cxa_refcounted_exception).
57 // D == sizeof(__cxa_dependent_exception).
59 // This provides space for N thrown exceptions of S words each, and an
60 // additional N dependent exceptions from std::rethrow_exception.
62 // The calculation allows values of N and S to be target-independent,
63 // as the size will be scaled by the size of basic types on the target,
64 // and space for the C++ exception header (__cxa_refcounted_exception)
65 // is added automatically.
67 // For a dynamically allocated buffer, N and S can be set from the environment.
68 // Setting N=0 will disable the emergency buffer.
69 // The GLIBCXX_TUNABLES environment variable will be checked for the following:
70 // - Tunable glibcxx.eh_pool.obj_count overrides EMERGENCY_OBJ_COUNT.
71 // - Tunable glibcxx.eh_pool.obj_size overrides EMERGENCY_OBJ_SIZE.
73 #if _GLIBCXX_HOSTED
74 using std::free;
75 using std::malloc;
76 using std::memset;
77 #else
78 // In a freestanding environment, these functions may not be available
79 // -- but for now, we assume that they are.
80 extern "C" void *malloc (std::size_t);
81 extern "C" void free(void *);
82 extern "C" void *memset (void *, int, std::size_t);
83 #endif
85 using namespace __cxxabiv1;
87 // Assume that 6 * sizeof(void*) is a reasonable exception object size.
88 // Throwing very many large objects will exhaust the pool more quickly.
89 // N.B. sizeof(std::bad_alloc) == sizeof(void*)
90 // and sizeof(std::runtime_error) == 2 * sizeof(void*)
91 // and sizeof(std::system_error) == 4 * sizeof(void*).
92 #define EMERGENCY_OBJ_SIZE 6
94 #ifdef __GTHREADS
95 // Assume that the number of concurrent exception objects scales with the
96 // processor word size, i.e., 16-bit systems are not likely to have hundreds
97 // of threads all simultaneously throwing on OOM conditions.
98 # define EMERGENCY_OBJ_COUNT (4 * __SIZEOF_POINTER__ * __SIZEOF_POINTER__)
99 # define MAX_OBJ_COUNT (16 << __SIZEOF_POINTER__)
100 #else
101 # define EMERGENCY_OBJ_COUNT 4
102 # define MAX_OBJ_COUNT 64
103 #endif
105 // This can be set by configure.
106 #ifdef _GLIBCXX_EH_POOL_NOBJS
107 # if _GLIBCXX_EH_POOL_NOBJS > MAX_OBJ_COUNT
108 # warning "_GLIBCXX_EH_POOL_NOBJS value is too large; ignoring it"
109 # elif _GLIBCXX_EH_POOL_NOBJS < 0
110 # warning "_GLIBCXX_EH_POOL_NOBJS value is negative; ignoring it"
111 # else
112 # undef EMERGENCY_OBJ_COUNT
113 # define EMERGENCY_OBJ_COUNT _GLIBCXX_EH_POOL_NOBJS
114 # endif
115 #endif
117 #if defined _GLIBCXX_EH_POOL_STATIC && EMERGENCY_OBJ_COUNT == 0
118 # define USE_POOL 0
119 #else
120 # define USE_POOL 1
121 #endif
123 #if USE_POOL
124 namespace __gnu_cxx
126 void __freeres() noexcept;
129 namespace
131 static constexpr std::size_t
132 buffer_size_in_bytes(std::size_t obj_count, std::size_t obj_size) noexcept
134 // N * (S * P + R + D)
135 constexpr std::size_t P = sizeof(void*);
136 constexpr std::size_t R = sizeof(__cxa_refcounted_exception);
137 constexpr std::size_t D = sizeof(__cxa_dependent_exception);
138 return obj_count * (obj_size * P + R + D);
141 // A fixed-size heap, variable size object allocator
142 class pool
144 public:
145 pool() noexcept;
147 _GLIBCXX_NODISCARD void *allocate (std::size_t) noexcept;
148 void free (void *) noexcept;
150 bool in_pool (void *) const noexcept;
152 private:
153 struct free_entry {
154 std::size_t size;
155 free_entry *next;
157 struct allocated_entry {
158 std::size_t size;
159 char data[] __attribute__((aligned));
162 #if _GLIBCXX_HOSTED
163 // A single mutex controlling emergency allocations.
164 __gnu_cxx::__mutex emergency_mutex;
165 using __scoped_lock = __gnu_cxx::__scoped_lock;
166 #else
167 int emergency_mutex = 0;
168 struct __scoped_lock { explicit __scoped_lock(int) { } };
169 #endif
171 // The free-list
172 free_entry *first_free_entry = nullptr;
173 // The arena itself - we need to keep track of these only
174 // to implement in_pool.
175 #ifdef _GLIBCXX_EH_POOL_STATIC
176 static constexpr std::size_t arena_size
177 = buffer_size_in_bytes(EMERGENCY_OBJ_COUNT, EMERGENCY_OBJ_SIZE);
178 alignas(void*) char arena[arena_size];
179 #else
180 char *arena = nullptr;
181 std::size_t arena_size = 0;
182 #endif
184 friend void __gnu_cxx::__freeres() noexcept;
187 pool::pool() noexcept
189 #ifndef _GLIBCXX_EH_POOL_STATIC
190 int obj_size = EMERGENCY_OBJ_SIZE;
191 int obj_count = EMERGENCY_OBJ_COUNT;
193 #if _GLIBCXX_HOSTED
194 #if _GLIBCXX_HAVE_SECURE_GETENV
195 const char* str = ::secure_getenv("GLIBCXX_TUNABLES");
196 #else
197 const char* str = std::getenv("GLIBCXX_TUNABLES");
198 #endif
199 const std::string_view ns_name = "glibcxx.eh_pool";
200 std::pair<std::string_view, int> tunables[]{
201 {"obj_size", 0}, {"obj_count", obj_count}
203 while (str)
205 if (*str == ':')
206 ++str;
208 if (!ns_name.compare(0, ns_name.size(), str, ns_name.size())
209 && str[ns_name.size()] == '.')
211 str += ns_name.size() + 1;
212 for (auto& t : tunables)
213 if (!t.first.compare(0, t.first.size(), str, t.first.size())
214 && str[t.first.size()] == '=')
216 str += t.first.size() + 1;
217 char* end;
218 unsigned long val = strtoul(str, &end, 0);
219 if ((*end == ':' || *end == '\0') && val <= INT_MAX)
220 t.second = val;
221 str = end;
222 break;
225 str = strchr(str, ':');
227 obj_count = std::min(tunables[1].second, MAX_OBJ_COUNT); // Can be zero.
228 if (tunables[0].second != 0)
229 obj_size = tunables[0].second;
230 #endif // HOSTED
232 arena_size = buffer_size_in_bytes(obj_count, obj_size);
233 if (arena_size == 0)
234 return;
235 arena = (char *)malloc (arena_size);
236 if (!arena)
238 // If the allocation failed go without an emergency pool.
239 arena_size = 0;
240 return;
242 #endif // STATIC
244 // Populate the free-list with a single entry covering the whole arena
245 first_free_entry = reinterpret_cast <free_entry *> (arena);
246 new (first_free_entry) free_entry;
247 first_free_entry->size = arena_size;
248 first_free_entry->next = NULL;
251 void *pool::allocate (std::size_t size) noexcept
253 __scoped_lock sentry(emergency_mutex);
254 // We need an additional size_t member plus the padding to
255 // ensure proper alignment of data.
256 size += offsetof (allocated_entry, data);
257 // And we need to at least hand out objects of the size of
258 // a freelist entry.
259 if (size < sizeof (free_entry))
260 size = sizeof (free_entry);
261 // And we need to align objects we hand out to the maximum
262 // alignment required on the target (this really aligns the
263 // tail which will become a new freelist entry).
264 size = ((size + __alignof__ (allocated_entry::data) - 1)
265 & ~(__alignof__ (allocated_entry::data) - 1));
266 // Search for an entry of proper size on the freelist.
267 free_entry **e;
268 for (e = &first_free_entry;
269 *e && (*e)->size < size;
270 e = &(*e)->next)
272 if (!*e)
273 return NULL;
274 allocated_entry *x;
275 if ((*e)->size - size >= sizeof (free_entry))
277 // Split block if it is too large.
278 free_entry *f = reinterpret_cast <free_entry *>
279 (reinterpret_cast <char *> (*e) + size);
280 std::size_t sz = (*e)->size;
281 free_entry *next = (*e)->next;
282 new (f) free_entry;
283 f->next = next;
284 f->size = sz - size;
285 x = reinterpret_cast <allocated_entry *> (*e);
286 new (x) allocated_entry;
287 x->size = size;
288 *e = f;
290 else
292 // Exact size match or too small overhead for a free entry.
293 std::size_t sz = (*e)->size;
294 free_entry *next = (*e)->next;
295 x = reinterpret_cast <allocated_entry *> (*e);
296 new (x) allocated_entry;
297 x->size = sz;
298 *e = next;
300 return &x->data;
303 void pool::free (void *data) noexcept
305 __scoped_lock sentry(emergency_mutex);
306 allocated_entry *e = reinterpret_cast <allocated_entry *>
307 (reinterpret_cast <char *> (data) - offsetof (allocated_entry, data));
308 std::size_t sz = e->size;
309 if (!first_free_entry
310 || (reinterpret_cast <char *> (e) + sz
311 < reinterpret_cast <char *> (first_free_entry)))
313 // If the free list is empty or the entry is before the
314 // first element and cannot be merged with it add it as
315 // the first free entry.
316 free_entry *f = reinterpret_cast <free_entry *> (e);
317 new (f) free_entry;
318 f->size = sz;
319 f->next = first_free_entry;
320 first_free_entry = f;
322 else if (reinterpret_cast <char *> (e) + sz
323 == reinterpret_cast <char *> (first_free_entry))
325 // Check if we can merge with the first free entry being right
326 // after us.
327 free_entry *f = reinterpret_cast <free_entry *> (e);
328 new (f) free_entry;
329 f->size = sz + first_free_entry->size;
330 f->next = first_free_entry->next;
331 first_free_entry = f;
333 else
335 // Else search for a free item we can merge with at its end.
336 free_entry **fe;
337 for (fe = &first_free_entry;
338 (*fe)->next
339 && (reinterpret_cast <char *> (e) + sz
340 > reinterpret_cast <char *> ((*fe)->next));
341 fe = &(*fe)->next)
343 // If we can merge the next block into us do so and continue
344 // with the cases below.
345 if (reinterpret_cast <char *> (e) + sz
346 == reinterpret_cast <char *> ((*fe)->next))
348 sz += (*fe)->next->size;
349 (*fe)->next = (*fe)->next->next;
351 if (reinterpret_cast <char *> (*fe) + (*fe)->size
352 == reinterpret_cast <char *> (e))
353 // Merge with the freelist entry.
354 (*fe)->size += sz;
355 else
357 // Else put it after it which keeps the freelist sorted.
358 free_entry *f = reinterpret_cast <free_entry *> (e);
359 new (f) free_entry;
360 f->size = sz;
361 f->next = (*fe)->next;
362 (*fe)->next = f;
367 inline bool pool::in_pool (void *ptr) const noexcept
369 std::less<const void*> less;
370 return less(ptr, arena + arena_size) && less(arena, ptr);
373 pool emergency_pool;
376 namespace __gnu_cxx
378 __attribute__((cold))
379 void
380 __freeres() noexcept
382 #ifndef _GLIBCXX_EH_POOL_STATIC
383 if (emergency_pool.arena)
385 ::free(emergency_pool.arena);
386 emergency_pool.arena = 0;
388 #endif
391 #endif // USE_POOL
393 extern "C" void *
394 __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size) noexcept
396 thrown_size += sizeof (__cxa_refcounted_exception);
398 void *ret = malloc (thrown_size);
400 #if USE_POOL
401 if (!ret)
402 ret = emergency_pool.allocate (thrown_size);
403 #endif
405 if (!ret)
406 std::terminate ();
408 memset (ret, 0, sizeof (__cxa_refcounted_exception));
410 return (void *)((char *)ret + sizeof (__cxa_refcounted_exception));
414 extern "C" void
415 __cxxabiv1::__cxa_free_exception(void *vptr) noexcept
417 char *ptr = (char *) vptr - sizeof (__cxa_refcounted_exception);
418 #if USE_POOL
419 if (emergency_pool.in_pool (ptr)) [[__unlikely__]]
420 emergency_pool.free (ptr);
421 else
422 #endif
423 free (ptr);
427 extern "C" __cxa_dependent_exception*
428 __cxxabiv1::__cxa_allocate_dependent_exception() noexcept
430 void *ret = malloc (sizeof (__cxa_dependent_exception));
432 #if USE_POOL
433 if (!ret)
434 ret = emergency_pool.allocate (sizeof (__cxa_dependent_exception));
435 #endif
437 if (!ret)
438 std::terminate ();
440 memset (ret, 0, sizeof (__cxa_dependent_exception));
442 return static_cast<__cxa_dependent_exception*>(ret);
446 extern "C" void
447 __cxxabiv1::__cxa_free_dependent_exception
448 (__cxa_dependent_exception *vptr) noexcept
450 #if USE_POOL
451 if (emergency_pool.in_pool (vptr)) [[__unlikely__]]
452 emergency_pool.free (vptr);
453 else
454 #endif
455 free (vptr);