1 // -*- C++ -*- Allocate exception objects.
2 // Copyright (C) 2001-2022 Free Software Foundation, Inc.
4 // This file is part of GCC.
6 // GCC is free software; you can redistribute it and/or modify
7 // it under the terms of the GNU General Public License as published by
8 // the Free Software Foundation; either version 3, or (at your option)
11 // GCC is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
25 // This is derived from the C++ ABI for IA-64. Where we diverge
26 // for cross-architecture compatibility are noted with "@@@".
29 // Cygwin needs this for secure_getenv
30 # define _GNU_SOURCE 1
33 #include <exception> // std::exception
34 #include <new> // std::terminate
35 #include <cstdlib> // std::malloc, std::free, std::strtoul
36 #include <climits> // INT_MAX
37 #include <bits/stl_function.h> // std::less
38 #include "unwind-cxx.h"
40 # include <string_view> // std::string_view
41 # include <cstring> // std::strchr, std::memset
42 # include <ext/concurrence.h> // __gnu_cxx::__mutex, __gnu_cxx::__scoped_lock
45 // We use an emergency buffer used for exceptions when malloc fails.
46 // If _GLIBCXX_EH_POOL_STATIC is defined (e.g. by configure) then we use
47 // a fixed-size static buffer. Otherwise, allocate on startup using malloc.
49 // The size of the buffer is N * (S * P + R + D), where:
50 // N == The number of objects to reserve space for.
51 // Defaults to EMERGENCY_OBJ_COUNT, defined below.
52 // S == Estimated size of exception objects to account for.
53 // This size is in units of sizeof(void*) not bytes.
54 // Defaults to EMERGENCY_OBJ_SIZE, defined below.
55 // P == sizeof(void*).
56 // R == sizeof(__cxa_refcounted_exception).
57 // D == sizeof(__cxa_dependent_exception).
59 // This provides space for N thrown exceptions of S words each, and an
60 // additional N dependent exceptions from std::rethrow_exception.
62 // The calculation allows values of N and S to be target-independent,
63 // as the size will be scaled by the size of basic types on the target,
64 // and space for the C++ exception header (__cxa_refcounted_exception)
65 // is added automatically.
67 // For a dynamically allocated buffer, N and S can be set from the environment.
68 // Setting N=0 will disable the emergency buffer.
69 // The GLIBCXX_TUNABLES environment variable will be checked for the following:
70 // - Tunable glibcxx.eh_pool.obj_count overrides EMERGENCY_OBJ_COUNT.
71 // - Tunable glibcxx.eh_pool.obj_size overrides EMERGENCY_OBJ_SIZE.
78 // In a freestanding environment, these functions may not be available
79 // -- but for now, we assume that they are.
80 extern "C" void *malloc (std::size_t);
81 extern "C" void free(void *);
82 extern "C" void *memset (void *, int, std::size_t);
85 using namespace __cxxabiv1
;
87 // Assume that 6 * sizeof(void*) is a reasonable exception object size.
88 // Throwing very many large objects will exhaust the pool more quickly.
89 // N.B. sizeof(std::bad_alloc) == sizeof(void*)
90 // and sizeof(std::runtime_error) == 2 * sizeof(void*)
91 // and sizeof(std::system_error) == 4 * sizeof(void*).
92 #define EMERGENCY_OBJ_SIZE 6
95 // Assume that the number of concurrent exception objects scales with the
96 // processor word size, i.e., 16-bit systems are not likely to have hundreds
97 // of threads all simultaneously throwing on OOM conditions.
98 # define EMERGENCY_OBJ_COUNT (4 * __SIZEOF_POINTER__ * __SIZEOF_POINTER__)
99 # define MAX_OBJ_COUNT (16 << __SIZEOF_POINTER__)
101 # define EMERGENCY_OBJ_COUNT 4
102 # define MAX_OBJ_COUNT 64
105 // This can be set by configure.
106 #ifdef _GLIBCXX_EH_POOL_NOBJS
107 # if _GLIBCXX_EH_POOL_NOBJS > MAX_OBJ_COUNT
108 # warning "_GLIBCXX_EH_POOL_NOBJS value is too large; ignoring it"
109 # elif _GLIBCXX_EH_POOL_NOBJS < 0
110 # warning "_GLIBCXX_EH_POOL_NOBJS value is negative; ignoring it"
112 # undef EMERGENCY_OBJ_COUNT
113 # define EMERGENCY_OBJ_COUNT _GLIBCXX_EH_POOL_NOBJS
117 #if defined _GLIBCXX_EH_POOL_STATIC && EMERGENCY_OBJ_COUNT == 0
126 void __freeres() noexcept
;
131 static constexpr std::size_t
132 buffer_size_in_bytes(std::size_t obj_count
, std::size_t obj_size
) noexcept
134 // N * (S * P + R + D)
135 constexpr std::size_t P
= sizeof(void*);
136 constexpr std::size_t R
= sizeof(__cxa_refcounted_exception
);
137 constexpr std::size_t D
= sizeof(__cxa_dependent_exception
);
138 return obj_count
* (obj_size
* P
+ R
+ D
);
141 // A fixed-size heap, variable size object allocator
147 _GLIBCXX_NODISCARD
void *allocate (std::size_t) noexcept
;
148 void free (void *) noexcept
;
150 bool in_pool (void *) const noexcept
;
157 struct allocated_entry
{
159 char data
[] __attribute__((aligned
));
163 // A single mutex controlling emergency allocations.
164 __gnu_cxx::__mutex emergency_mutex
;
165 using __scoped_lock
= __gnu_cxx::__scoped_lock
;
167 int emergency_mutex
= 0;
168 struct __scoped_lock
{ explicit __scoped_lock(int) { } };
172 free_entry
*first_free_entry
= nullptr;
173 // The arena itself - we need to keep track of these only
174 // to implement in_pool.
175 #ifdef _GLIBCXX_EH_POOL_STATIC
176 static constexpr std::size_t arena_size
177 = buffer_size_in_bytes(EMERGENCY_OBJ_COUNT
, EMERGENCY_OBJ_SIZE
);
178 alignas(void*) char arena
[arena_size
];
180 char *arena
= nullptr;
181 std::size_t arena_size
= 0;
184 friend void __gnu_cxx::__freeres() noexcept
;
187 pool::pool() noexcept
189 #ifndef _GLIBCXX_EH_POOL_STATIC
190 int obj_size
= EMERGENCY_OBJ_SIZE
;
191 int obj_count
= EMERGENCY_OBJ_COUNT
;
194 #if _GLIBCXX_HAVE_SECURE_GETENV
195 const char* str
= ::secure_getenv("GLIBCXX_TUNABLES");
197 const char* str
= std::getenv("GLIBCXX_TUNABLES");
199 const std::string_view ns_name
= "glibcxx.eh_pool";
200 std::pair
<std::string_view
, int> tunables
[]{
201 {"obj_size", 0}, {"obj_count", obj_count
}
208 if (!ns_name
.compare(0, ns_name
.size(), str
, ns_name
.size())
209 && str
[ns_name
.size()] == '.')
211 str
+= ns_name
.size() + 1;
212 for (auto& t
: tunables
)
213 if (!t
.first
.compare(0, t
.first
.size(), str
, t
.first
.size())
214 && str
[t
.first
.size()] == '=')
216 str
+= t
.first
.size() + 1;
218 unsigned long val
= strtoul(str
, &end
, 0);
219 if ((*end
== ':' || *end
== '\0') && val
<= INT_MAX
)
225 str
= strchr(str
, ':');
227 obj_count
= std::min(tunables
[1].second
, MAX_OBJ_COUNT
); // Can be zero.
228 if (tunables
[0].second
!= 0)
229 obj_size
= tunables
[0].second
;
232 arena_size
= buffer_size_in_bytes(obj_count
, obj_size
);
235 arena
= (char *)malloc (arena_size
);
238 // If the allocation failed go without an emergency pool.
244 // Populate the free-list with a single entry covering the whole arena
245 first_free_entry
= reinterpret_cast <free_entry
*> (arena
);
246 new (first_free_entry
) free_entry
;
247 first_free_entry
->size
= arena_size
;
248 first_free_entry
->next
= NULL
;
251 void *pool::allocate (std::size_t size
) noexcept
253 __scoped_lock
sentry(emergency_mutex
);
254 // We need an additional size_t member plus the padding to
255 // ensure proper alignment of data.
256 size
+= offsetof (allocated_entry
, data
);
257 // And we need to at least hand out objects of the size of
259 if (size
< sizeof (free_entry
))
260 size
= sizeof (free_entry
);
261 // And we need to align objects we hand out to the maximum
262 // alignment required on the target (this really aligns the
263 // tail which will become a new freelist entry).
264 size
= ((size
+ __alignof__ (allocated_entry::data
) - 1)
265 & ~(__alignof__ (allocated_entry::data
) - 1));
266 // Search for an entry of proper size on the freelist.
268 for (e
= &first_free_entry
;
269 *e
&& (*e
)->size
< size
;
275 if ((*e
)->size
- size
>= sizeof (free_entry
))
277 // Split block if it is too large.
278 free_entry
*f
= reinterpret_cast <free_entry
*>
279 (reinterpret_cast <char *> (*e
) + size
);
280 std::size_t sz
= (*e
)->size
;
281 free_entry
*next
= (*e
)->next
;
285 x
= reinterpret_cast <allocated_entry
*> (*e
);
286 new (x
) allocated_entry
;
292 // Exact size match or too small overhead for a free entry.
293 std::size_t sz
= (*e
)->size
;
294 free_entry
*next
= (*e
)->next
;
295 x
= reinterpret_cast <allocated_entry
*> (*e
);
296 new (x
) allocated_entry
;
303 void pool::free (void *data
) noexcept
305 __scoped_lock
sentry(emergency_mutex
);
306 allocated_entry
*e
= reinterpret_cast <allocated_entry
*>
307 (reinterpret_cast <char *> (data
) - offsetof (allocated_entry
, data
));
308 std::size_t sz
= e
->size
;
309 if (!first_free_entry
310 || (reinterpret_cast <char *> (e
) + sz
311 < reinterpret_cast <char *> (first_free_entry
)))
313 // If the free list is empty or the entry is before the
314 // first element and cannot be merged with it add it as
315 // the first free entry.
316 free_entry
*f
= reinterpret_cast <free_entry
*> (e
);
319 f
->next
= first_free_entry
;
320 first_free_entry
= f
;
322 else if (reinterpret_cast <char *> (e
) + sz
323 == reinterpret_cast <char *> (first_free_entry
))
325 // Check if we can merge with the first free entry being right
327 free_entry
*f
= reinterpret_cast <free_entry
*> (e
);
329 f
->size
= sz
+ first_free_entry
->size
;
330 f
->next
= first_free_entry
->next
;
331 first_free_entry
= f
;
335 // Else search for a free item we can merge with at its end.
337 for (fe
= &first_free_entry
;
339 && (reinterpret_cast <char *> (e
) + sz
340 > reinterpret_cast <char *> ((*fe
)->next
));
343 // If we can merge the next block into us do so and continue
344 // with the cases below.
345 if (reinterpret_cast <char *> (e
) + sz
346 == reinterpret_cast <char *> ((*fe
)->next
))
348 sz
+= (*fe
)->next
->size
;
349 (*fe
)->next
= (*fe
)->next
->next
;
351 if (reinterpret_cast <char *> (*fe
) + (*fe
)->size
352 == reinterpret_cast <char *> (e
))
353 // Merge with the freelist entry.
357 // Else put it after it which keeps the freelist sorted.
358 free_entry
*f
= reinterpret_cast <free_entry
*> (e
);
361 f
->next
= (*fe
)->next
;
367 inline bool pool::in_pool (void *ptr
) const noexcept
369 std::less
<const void*> less
;
370 return less(ptr
, arena
+ arena_size
) && less(arena
, ptr
);
378 __attribute__((cold
))
382 #ifndef _GLIBCXX_EH_POOL_STATIC
383 if (emergency_pool
.arena
)
385 ::free(emergency_pool
.arena
);
386 emergency_pool
.arena
= 0;
394 __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size
) noexcept
396 thrown_size
+= sizeof (__cxa_refcounted_exception
);
398 void *ret
= malloc (thrown_size
);
402 ret
= emergency_pool
.allocate (thrown_size
);
408 memset (ret
, 0, sizeof (__cxa_refcounted_exception
));
410 return (void *)((char *)ret
+ sizeof (__cxa_refcounted_exception
));
415 __cxxabiv1::__cxa_free_exception(void *vptr
) noexcept
417 char *ptr
= (char *) vptr
- sizeof (__cxa_refcounted_exception
);
419 if (emergency_pool
.in_pool (ptr
)) [[__unlikely__
]]
420 emergency_pool
.free (ptr
);
427 extern "C" __cxa_dependent_exception
*
428 __cxxabiv1::__cxa_allocate_dependent_exception() noexcept
430 void *ret
= malloc (sizeof (__cxa_dependent_exception
));
434 ret
= emergency_pool
.allocate (sizeof (__cxa_dependent_exception
));
440 memset (ret
, 0, sizeof (__cxa_dependent_exception
));
442 return static_cast<__cxa_dependent_exception
*>(ret
);
447 __cxxabiv1::__cxa_free_dependent_exception
448 (__cxa_dependent_exception
*vptr
) noexcept
451 if (emergency_pool
.in_pool (vptr
)) [[__unlikely__
]]
452 emergency_pool
.free (vptr
);