1 // -*- C++ -*- Allocate exception objects.
2 // Copyright (C) 2001-2021 Free Software Foundation, Inc.
4 // This file is part of GCC.
6 // GCC is free software; you can redistribute it and/or modify
7 // it under the terms of the GNU General Public License as published by
8 // the Free Software Foundation; either version 3, or (at your option)
11 // GCC is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
25 // This is derived from the C++ ABI for IA-64. Where we diverge
26 // for cross-architecture compatibility are noted with "@@@".
28 #include <bits/c++config.h>
35 #include "unwind-cxx.h"
36 #include <ext/concurrence.h>
44 // In a freestanding environment, these functions may not be available
45 // -- but for now, we assume that they are.
46 extern "C" void *malloc (std::size_t);
47 extern "C" void free(void *);
48 extern "C" void *memset (void *, int, std::size_t);
51 using namespace __cxxabiv1
;
53 // ??? How to control these parameters.
55 // Guess from the size of basic types how large a buffer is reasonable.
56 // Note that the basic c++ exception header has 13 pointers and 2 ints,
57 // so on a system with PSImode pointers we're talking about 56 bytes
61 # define EMERGENCY_OBJ_SIZE 128
62 # define EMERGENCY_OBJ_COUNT 16
63 #elif !defined (_GLIBCXX_LLP64) && LONG_MAX == 2147483647
64 # define EMERGENCY_OBJ_SIZE 512
65 # define EMERGENCY_OBJ_COUNT 32
67 # define EMERGENCY_OBJ_SIZE 1024
68 # define EMERGENCY_OBJ_COUNT 64
72 # undef EMERGENCY_OBJ_COUNT
73 # define EMERGENCY_OBJ_COUNT 4
83 // A fixed-size heap, variable size object allocator
89 _GLIBCXX_NODISCARD
void *allocate (std::size_t);
92 bool in_pool (void *);
99 struct allocated_entry
{
101 char data
[] __attribute__((aligned
));
104 // A single mutex controlling emergency allocations.
105 __gnu_cxx::__mutex emergency_mutex
;
108 free_entry
*first_free_entry
;
109 // The arena itself - we need to keep track of these only
110 // to implement in_pool.
112 std::size_t arena_size
;
114 friend void __gnu_cxx::__freeres();
119 // Allocate the arena - we could add a GLIBCXX_EH_ARENA_SIZE environment
120 // to make this tunable.
121 arena_size
= (EMERGENCY_OBJ_SIZE
* EMERGENCY_OBJ_COUNT
122 + EMERGENCY_OBJ_COUNT
* sizeof (__cxa_dependent_exception
));
123 arena
= (char *)malloc (arena_size
);
126 // If the allocation failed go without an emergency pool.
128 first_free_entry
= NULL
;
132 // Populate the free-list with a single entry covering the whole arena
133 first_free_entry
= reinterpret_cast <free_entry
*> (arena
);
134 new (first_free_entry
) free_entry
;
135 first_free_entry
->size
= arena_size
;
136 first_free_entry
->next
= NULL
;
139 void *pool::allocate (std::size_t size
)
141 __gnu_cxx::__scoped_lock
sentry(emergency_mutex
);
142 // We need an additional size_t member plus the padding to
143 // ensure proper alignment of data.
144 size
+= offsetof (allocated_entry
, data
);
145 // And we need to at least hand out objects of the size of
147 if (size
< sizeof (free_entry
))
148 size
= sizeof (free_entry
);
149 // And we need to align objects we hand out to the maximum
150 // alignment required on the target (this really aligns the
151 // tail which will become a new freelist entry).
152 size
= ((size
+ __alignof__ (allocated_entry::data
) - 1)
153 & ~(__alignof__ (allocated_entry::data
) - 1));
154 // Search for an entry of proper size on the freelist.
156 for (e
= &first_free_entry
;
157 *e
&& (*e
)->size
< size
;
163 if ((*e
)->size
- size
>= sizeof (free_entry
))
165 // Split block if it is too large.
166 free_entry
*f
= reinterpret_cast <free_entry
*>
167 (reinterpret_cast <char *> (*e
) + size
);
168 std::size_t sz
= (*e
)->size
;
169 free_entry
*next
= (*e
)->next
;
173 x
= reinterpret_cast <allocated_entry
*> (*e
);
174 new (x
) allocated_entry
;
180 // Exact size match or too small overhead for a free entry.
181 std::size_t sz
= (*e
)->size
;
182 free_entry
*next
= (*e
)->next
;
183 x
= reinterpret_cast <allocated_entry
*> (*e
);
184 new (x
) allocated_entry
;
191 void pool::free (void *data
)
193 __gnu_cxx::__scoped_lock
sentry(emergency_mutex
);
194 allocated_entry
*e
= reinterpret_cast <allocated_entry
*>
195 (reinterpret_cast <char *> (data
) - offsetof (allocated_entry
, data
));
196 std::size_t sz
= e
->size
;
197 if (!first_free_entry
198 || (reinterpret_cast <char *> (e
) + sz
199 < reinterpret_cast <char *> (first_free_entry
)))
201 // If the free list is empty or the entry is before the
202 // first element and cannot be merged with it add it as
203 // the first free entry.
204 free_entry
*f
= reinterpret_cast <free_entry
*> (e
);
207 f
->next
= first_free_entry
;
208 first_free_entry
= f
;
210 else if (reinterpret_cast <char *> (e
) + sz
211 == reinterpret_cast <char *> (first_free_entry
))
213 // Check if we can merge with the first free entry being right
215 free_entry
*f
= reinterpret_cast <free_entry
*> (e
);
217 f
->size
= sz
+ first_free_entry
->size
;
218 f
->next
= first_free_entry
->next
;
219 first_free_entry
= f
;
223 // Else search for a free item we can merge with at its end.
225 for (fe
= &first_free_entry
;
227 && (reinterpret_cast <char *> ((*fe
)->next
)
228 > reinterpret_cast <char *> (e
) + sz
);
231 // If we can merge the next block into us do so and continue
232 // with the cases below.
233 if (reinterpret_cast <char *> (e
) + sz
234 == reinterpret_cast <char *> ((*fe
)->next
))
236 sz
+= (*fe
)->next
->size
;
237 (*fe
)->next
= (*fe
)->next
->next
;
239 if (reinterpret_cast <char *> (*fe
) + (*fe
)->size
240 == reinterpret_cast <char *> (e
))
241 // Merge with the freelist entry.
245 // Else put it after it which keeps the freelist sorted.
246 free_entry
*f
= reinterpret_cast <free_entry
*> (e
);
249 f
->next
= (*fe
)->next
;
255 bool pool::in_pool (void *ptr
)
257 char *p
= reinterpret_cast <char *> (ptr
);
259 && p
< arena
+ arena_size
);
270 if (emergency_pool
.arena
)
272 ::free(emergency_pool
.arena
);
273 emergency_pool
.arena
= 0;
279 __cxxabiv1::__cxa_allocate_exception(std::size_t thrown_size
) _GLIBCXX_NOTHROW
283 thrown_size
+= sizeof (__cxa_refcounted_exception
);
284 ret
= malloc (thrown_size
);
287 ret
= emergency_pool
.allocate (thrown_size
);
292 memset (ret
, 0, sizeof (__cxa_refcounted_exception
));
294 return (void *)((char *)ret
+ sizeof (__cxa_refcounted_exception
));
299 __cxxabiv1::__cxa_free_exception(void *vptr
) _GLIBCXX_NOTHROW
301 char *ptr
= (char *) vptr
- sizeof (__cxa_refcounted_exception
);
302 if (emergency_pool
.in_pool (ptr
))
303 emergency_pool
.free (ptr
);
309 extern "C" __cxa_dependent_exception
*
310 __cxxabiv1::__cxa_allocate_dependent_exception() _GLIBCXX_NOTHROW
312 __cxa_dependent_exception
*ret
;
314 ret
= static_cast<__cxa_dependent_exception
*>
315 (malloc (sizeof (__cxa_dependent_exception
)));
318 ret
= static_cast <__cxa_dependent_exception
*>
319 (emergency_pool
.allocate (sizeof (__cxa_dependent_exception
)));
324 memset (ret
, 0, sizeof (__cxa_dependent_exception
));
331 __cxxabiv1::__cxa_free_dependent_exception
332 (__cxa_dependent_exception
*vptr
) _GLIBCXX_NOTHROW
334 if (emergency_pool
.in_pool (vptr
))
335 emergency_pool
.free (vptr
);