Fix addvdi3 and subvdi3 patterns
[official-gcc.git] / libstdc++-v3 / include / ext / pool_allocator.h
blobf04a88c8d6c2b1a4e9cc3e7528a6c4f05b730b35
1 // Allocators -*- C++ -*-
3 // Copyright (C) 2001-2022 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
26 * Copyright (c) 1996-1997
27 * Silicon Graphics Computer Systems, Inc.
29 * Permission to use, copy, modify, distribute and sell this software
30 * and its documentation for any purpose is hereby granted without fee,
31 * provided that the above copyright notice appear in all copies and
32 * that both that copyright notice and this permission notice appear
33 * in supporting documentation. Silicon Graphics makes no
34 * representations about the suitability of this software for any
35 * purpose. It is provided "as is" without express or implied warranty.
38 /** @file ext/pool_allocator.h
39 * This file is a GNU extension to the Standard C++ Library.
42 #ifndef _POOL_ALLOCATOR_H
43 #define _POOL_ALLOCATOR_H 1
45 #include <bits/requires_hosted.h> // GNU extensions are currently omitted
47 #include <bits/c++config.h>
48 #include <cstdlib>
49 #include <new>
50 #include <bits/functexcept.h>
51 #include <ext/atomicity.h>
52 #include <ext/concurrence.h>
53 #include <bits/move.h>
54 #if __cplusplus >= 201103L
55 #include <type_traits>
56 #endif
58 namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)
60 _GLIBCXX_BEGIN_NAMESPACE_VERSION
62 /**
63 * @brief Base class for __pool_alloc.
65 * Uses various allocators to fulfill underlying requests (and makes as
66 * few requests as possible when in default high-speed pool mode).
68 * Important implementation properties:
69 * 0. If globally mandated, then allocate objects from new
70 * 1. If the clients request an object of size > _S_max_bytes, the resulting
71 * object will be obtained directly from new
72 * 2. In all other cases, we allocate an object of size exactly
73 * _S_round_up(requested_size). Thus the client has enough size
74 * information that we can return the object to the proper free list
75 * without permanently losing part of the object.
77 class __pool_alloc_base
79 typedef std::size_t size_t;
80 protected:
82 enum { _S_align = 8 };
83 enum { _S_max_bytes = 128 };
84 enum { _S_free_list_size = (size_t)_S_max_bytes / (size_t)_S_align };
86 union _Obj
88 union _Obj* _M_free_list_link;
89 char _M_client_data[1]; // The client sees this.
92 static _Obj* volatile _S_free_list[_S_free_list_size];
94 // Chunk allocation state.
95 static char* _S_start_free;
96 static char* _S_end_free;
97 static size_t _S_heap_size;
99 size_t
100 _M_round_up(size_t __bytes)
101 { return ((__bytes + (size_t)_S_align - 1) & ~((size_t)_S_align - 1)); }
103 _GLIBCXX_CONST _Obj* volatile*
104 _M_get_free_list(size_t __bytes) throw ();
106 __mutex&
107 _M_get_mutex() throw ();
109 // Returns an object of size __n, and optionally adds to size __n
110 // free list.
111 void*
112 _M_refill(size_t __n);
114 // Allocates a chunk for nobjs of size size. nobjs may be reduced
115 // if it is inconvenient to allocate the requested number.
116 char*
117 _M_allocate_chunk(size_t __n, int& __nobjs);
122 * @brief Allocator using a memory pool with a single lock.
123 * @ingroup allocators
125 template<typename _Tp>
126 class __pool_alloc : private __pool_alloc_base
128 private:
129 static _Atomic_word _S_force_new;
131 public:
132 typedef std::size_t size_type;
133 typedef std::ptrdiff_t difference_type;
134 typedef _Tp* pointer;
135 typedef const _Tp* const_pointer;
136 typedef _Tp& reference;
137 typedef const _Tp& const_reference;
138 typedef _Tp value_type;
140 template<typename _Tp1>
141 struct rebind
142 { typedef __pool_alloc<_Tp1> other; };
144 #if __cplusplus >= 201103L
145 // _GLIBCXX_RESOLVE_LIB_DEFECTS
146 // 2103. propagate_on_container_move_assignment
147 typedef std::true_type propagate_on_container_move_assignment;
148 #endif
150 __pool_alloc() _GLIBCXX_USE_NOEXCEPT { }
152 __pool_alloc(const __pool_alloc&) _GLIBCXX_USE_NOEXCEPT { }
154 template<typename _Tp1>
155 __pool_alloc(const __pool_alloc<_Tp1>&) _GLIBCXX_USE_NOEXCEPT { }
157 ~__pool_alloc() _GLIBCXX_USE_NOEXCEPT { }
159 pointer
160 address(reference __x) const _GLIBCXX_NOEXCEPT
161 { return std::__addressof(__x); }
163 const_pointer
164 address(const_reference __x) const _GLIBCXX_NOEXCEPT
165 { return std::__addressof(__x); }
167 size_type
168 max_size() const _GLIBCXX_USE_NOEXCEPT
169 { return std::size_t(-1) / sizeof(_Tp); }
171 #if __cplusplus >= 201103L
172 template<typename _Up, typename... _Args>
173 void
174 construct(_Up* __p, _Args&&... __args)
175 { ::new((void *)__p) _Up(std::forward<_Args>(__args)...); }
177 template<typename _Up>
178 void
179 destroy(_Up* __p) { __p->~_Up(); }
180 #else
181 // _GLIBCXX_RESOLVE_LIB_DEFECTS
182 // 402. wrong new expression in [some_] allocator::construct
183 void
184 construct(pointer __p, const _Tp& __val)
185 { ::new((void *)__p) _Tp(__val); }
187 void
188 destroy(pointer __p) { __p->~_Tp(); }
189 #endif
191 _GLIBCXX_NODISCARD pointer
192 allocate(size_type __n, const void* = 0);
194 void
195 deallocate(pointer __p, size_type __n);
198 template<typename _Tp>
199 inline bool
200 operator==(const __pool_alloc<_Tp>&, const __pool_alloc<_Tp>&)
201 { return true; }
203 #if __cpp_impl_three_way_comparison < 201907L
204 template<typename _Tp>
205 inline bool
206 operator!=(const __pool_alloc<_Tp>&, const __pool_alloc<_Tp>&)
207 { return false; }
208 #endif
210 template<typename _Tp>
211 _Atomic_word
212 __pool_alloc<_Tp>::_S_force_new;
214 template<typename _Tp>
215 _GLIBCXX_NODISCARD _Tp*
216 __pool_alloc<_Tp>::allocate(size_type __n, const void*)
218 using std::size_t;
219 pointer __ret = 0;
220 if (__builtin_expect(__n != 0, true))
222 if (__n > this->max_size())
223 std::__throw_bad_alloc();
225 const size_t __bytes = __n * sizeof(_Tp);
227 #if __cpp_aligned_new
228 if (alignof(_Tp) > __STDCPP_DEFAULT_NEW_ALIGNMENT__)
230 std::align_val_t __al = std::align_val_t(alignof(_Tp));
231 return static_cast<_Tp*>(::operator new(__bytes, __al));
233 #endif
235 // If there is a race through here, assume answer from getenv
236 // will resolve in same direction. Inspired by techniques
237 // to efficiently support threading found in basic_string.h.
238 if (_S_force_new == 0)
240 if (std::getenv("GLIBCXX_FORCE_NEW"))
241 __atomic_add_dispatch(&_S_force_new, 1);
242 else
243 __atomic_add_dispatch(&_S_force_new, -1);
246 if (__bytes > size_t(_S_max_bytes) || _S_force_new > 0)
247 __ret = static_cast<_Tp*>(::operator new(__bytes));
248 else
250 _Obj* volatile* __free_list = _M_get_free_list(__bytes);
252 __scoped_lock sentry(_M_get_mutex());
253 _Obj* __restrict__ __result = *__free_list;
254 if (__builtin_expect(__result == 0, 0))
255 __ret = static_cast<_Tp*>(_M_refill(_M_round_up(__bytes)));
256 else
258 *__free_list = __result->_M_free_list_link;
259 __ret = reinterpret_cast<_Tp*>(__result);
261 if (__ret == 0)
262 std::__throw_bad_alloc();
265 return __ret;
268 template<typename _Tp>
269 void
270 __pool_alloc<_Tp>::deallocate(pointer __p, size_type __n)
272 using std::size_t;
273 if (__builtin_expect(__n != 0 && __p != 0, true))
275 #if __cpp_aligned_new
276 if (alignof(_Tp) > __STDCPP_DEFAULT_NEW_ALIGNMENT__)
278 ::operator delete(__p, std::align_val_t(alignof(_Tp)));
279 return;
281 #endif
282 const size_t __bytes = __n * sizeof(_Tp);
283 if (__bytes > static_cast<size_t>(_S_max_bytes) || _S_force_new > 0)
284 ::operator delete(__p);
285 else
287 _Obj* volatile* __free_list = _M_get_free_list(__bytes);
288 _Obj* __q = reinterpret_cast<_Obj*>(__p);
290 __scoped_lock sentry(_M_get_mutex());
291 __q ->_M_free_list_link = *__free_list;
292 *__free_list = __q;
297 _GLIBCXX_END_NAMESPACE_VERSION
298 } // namespace
300 #endif