1 // <memory_resource> implementation -*- C++ -*-
3 // Copyright (C) 2018 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
25 #include <memory_resource>
26 #include <algorithm> // lower_bound, rotate
28 #include <bit> // __ceil2, __log2p1
30 #if ATOMIC_POINTER_LOCK_FREE != 2
31 # include <bits/std_mutex.h> // std::mutex, std::lock_guard
32 # include <bits/move.h> // std::exchange
35 namespace std
_GLIBCXX_VISIBILITY(default)
37 _GLIBCXX_BEGIN_NAMESPACE_VERSION
42 class newdel_res_t final
: public memory_resource
45 do_allocate(size_t __bytes
, size_t __alignment
) override
46 { return ::operator new(__bytes
, std::align_val_t(__alignment
)); }
49 do_deallocate(void* __p
, size_t __bytes
, size_t __alignment
) noexcept
51 { ::operator delete(__p
, __bytes
, std::align_val_t(__alignment
)); }
54 do_is_equal(const memory_resource
& __other
) const noexcept override
55 { return &__other
== this; }
58 class null_res_t final
: public memory_resource
61 do_allocate(size_t, size_t) override
62 { std::__throw_bad_alloc(); }
65 do_deallocate(void*, size_t, size_t) noexcept override
69 do_is_equal(const memory_resource
& __other
) const noexcept override
70 { return &__other
== this; }
80 constexpr constant_init() : obj() { }
83 explicit constexpr constant_init(U arg
) : obj(arg
) { }
85 ~constant_init() { /* do nothing, union member is not destroyed */ }
88 constant_init
<newdel_res_t
> newdel_res
{};
89 constant_init
<null_res_t
> null_res
{};
90 #if ATOMIC_POINTER_LOCK_FREE == 2
91 using atomic_mem_res
= atomic
<memory_resource
*>;
92 # define _GLIBCXX_ATOMIC_MEM_RES_CAN_BE_CONSTANT_INITIALIZED
93 #elif defined(_GLIBCXX_HAS_GTHREADS)
94 // Can't use pointer-width atomics, define a type using a mutex instead:
97 # ifdef __GTHREAD_MUTEX_INIT
98 # define _GLIBCXX_ATOMIC_MEM_RES_CAN_BE_CONSTANT_INITIALIZED
99 // std::mutex has constexpr constructor
102 atomic_mem_res(memory_resource
* r
) : val(r
) { }
105 memory_resource
* val
;
107 memory_resource
* load()
109 lock_guard
<mutex
> lock(mx
);
113 memory_resource
* exchange(memory_resource
* r
)
115 lock_guard
<mutex
> lock(mx
);
116 return std::exchange(val
, r
);
120 # define _GLIBCXX_ATOMIC_MEM_RES_CAN_BE_CONSTANT_INITIALIZED
121 // Single-threaded, no need for synchronization
122 struct atomic_mem_res
125 atomic_mem_res(memory_resource
* r
) : val(r
) { }
127 memory_resource
* val
;
129 memory_resource
* load() const
134 memory_resource
* exchange(memory_resource
* r
)
136 return std::exchange(val
, r
);
139 #endif // ATOMIC_POINTER_LOCK_FREE == 2
141 #ifdef _GLIBCXX_ATOMIC_MEM_RES_CAN_BE_CONSTANT_INITIALIZED
142 constant_init
<atomic_mem_res
> default_res
{&newdel_res
.obj
};
144 # include "default_resource.h"
149 new_delete_resource() noexcept
150 { return &newdel_res
.obj
; }
153 null_memory_resource() noexcept
154 { return &null_res
.obj
; }
157 set_default_resource(memory_resource
* r
) noexcept
160 r
= new_delete_resource();
161 return default_res
.obj
.exchange(r
);
165 get_default_resource() noexcept
166 { return default_res
.obj
.load(); }
168 // Member functions for std::pmr::monotonic_buffer_resource
170 // Memory allocated by the upstream resource is managed in a linked list
171 // of _Chunk objects. A _Chunk object recording the size and alignment of
172 // the allocated block and a pointer to the previous chunk is placed
173 // at end of the block.
174 class monotonic_buffer_resource::_Chunk
177 // Return the address and size of a block of memory allocated from __r,
178 // of at least __size bytes and aligned to __align.
179 // Add a new _Chunk to the front of the linked list at __head.
180 static pair
<void*, size_t>
181 allocate(memory_resource
* __r
, size_t __size
, size_t __align
,
184 __size
= std::__ceil2(__size
+ sizeof(_Chunk
));
185 void* __p
= __r
->allocate(__size
, __align
);
186 // Add a chunk defined by (__p, __size, __align) to linked list __head.
187 void* const __back
= (char*)__p
+ __size
- sizeof(_Chunk
);
188 __head
= ::new(__back
) _Chunk(__size
, __align
, __head
);
189 return { __p
, __size
- sizeof(_Chunk
) };
192 // Return every chunk in linked list __head to resource __r.
194 release(_Chunk
*& __head
, memory_resource
* __r
) noexcept
196 _Chunk
* __next
= __head
;
200 _Chunk
* __ch
= __next
;
201 __builtin_memcpy(&__next
, __ch
->_M_next
, sizeof(_Chunk
*));
203 __glibcxx_assert(__ch
->_M_canary
!= 0);
204 __glibcxx_assert(__ch
->_M_canary
== (__ch
->_M_size
|__ch
->_M_align
));
206 if (__ch
->_M_canary
!= (__ch
->_M_size
| __ch
->_M_align
))
207 return; // buffer overflow detected!
209 size_t __size
= (1u << __ch
->_M_size
);
210 size_t __align
= (1u << __ch
->_M_align
);
211 void* __start
= (char*)(__ch
+ 1) - __size
;
212 __r
->deallocate(__start
, __size
, __align
);
217 _Chunk(size_t __size
, size_t __align
, _Chunk
* __next
) noexcept
218 : _M_size(std::__log2p1(__size
) - 1),
219 _M_align(std::__log2p1(__align
) - 1)
221 __builtin_memcpy(_M_next
, &__next
, sizeof(__next
));
222 _M_canary
= _M_size
| _M_align
;
225 unsigned char _M_canary
;
226 unsigned char _M_size
;
227 unsigned char _M_align
;
228 unsigned char _M_next
[sizeof(_Chunk
*)];
232 monotonic_buffer_resource::_M_new_buffer(size_t bytes
, size_t alignment
)
234 // Need to check this somewhere, so put it here:
235 static_assert(alignof(monotonic_buffer_resource::_Chunk
) == 1);
237 const size_t n
= std::max(bytes
, _M_next_bufsiz
);
238 const size_t m
= std::max(alignment
, alignof(std::max_align_t
));
239 auto [p
, size
] = _Chunk::allocate(_M_upstream
, n
, m
, _M_head
);
242 _M_next_bufsiz
*= _S_growth_factor
;
246 monotonic_buffer_resource::_M_release_buffers() noexcept
248 _Chunk::release(_M_head
, _M_upstream
);
251 // Helper types for synchronized_pool_resource & unsynchronized_pool_resource
255 // Simple bitset with runtime size. Tracks used blocks in a pooled chunk.
258 using word
= uint64_t;
259 using size_type
= uint32_t;
261 static constexpr unsigned bits_per_word
= numeric_limits
<word
>::digits
;
263 // The bitset does not own p
264 bitset(void* p
, size_type num_blocks
)
265 : _M_words(static_cast<word
*>(p
)), _M_size(num_blocks
),
268 const size_type last_word
= num_blocks
/ bits_per_word
;
269 __builtin_memset(_M_words
, 0, last_word
* sizeof(*_M_words
));
270 // Set bits beyond _M_size, so they are not treated as free blocks:
271 if (const size_type extra_bits
= num_blocks
% bits_per_word
)
272 _M_words
[last_word
] = (word
)-1 << extra_bits
;
273 __glibcxx_assert( empty() );
274 __glibcxx_assert( free() == num_blocks
);
281 size_t size() const noexcept
{ return _M_size
; }
283 // Number of free blocks (unset bits)
284 size_t free() const noexcept
287 for (size_type i
= _M_next_word
; i
< nwords(); ++i
)
288 n
+= (bits_per_word
- std::__popcount(_M_words
[i
]));
292 // True if there are no free blocks (all bits are set)
293 bool full() const noexcept
295 if (_M_next_word
>= nwords())
297 // For a bitset with size() > (max_blocks_per_chunk() - 64) we will
298 // have nwords() == (max_word_index() + 1) and so _M_next_word will
299 // never be equal to nwords().
300 // In that case, check if the last word is full:
301 if (_M_next_word
== max_word_index())
302 return _M_words
[_M_next_word
] == word(-1);
306 // True if size() != 0 and all blocks are free (no bits are set).
307 bool empty() const noexcept
311 if (_M_next_word
!= 0)
313 for (size_type i
= 0; i
< nwords() - 1; ++i
)
314 if (_M_words
[i
] != 0)
316 word last
= _M_words
[nwords() - 1];
317 if (const size_type extra_bits
= size() % bits_per_word
)
318 last
<<= (bits_per_word
- extra_bits
);
322 void reset() noexcept
325 _M_size
= _M_next_word
= 0;
328 bool operator[](size_type n
) const noexcept
330 __glibcxx_assert( n
< _M_size
);
331 const size_type wd
= n
/ bits_per_word
;
332 const word bit
= word(1) << (n
% bits_per_word
);
333 return _M_words
[wd
] & bit
;
336 size_type
get_first_unset() noexcept
338 if (_M_next_word
< nwords())
340 const size_type n
= std::__countr_one(_M_words
[_M_next_word
]);
341 if (n
< bits_per_word
)
343 const word bit
= word(1) << n
;
344 _M_words
[_M_next_word
] |= bit
;
345 const size_t res
= (_M_next_word
* bits_per_word
) + n
;
346 if (n
== (bits_per_word
- 1))
351 return size_type(-1);
354 void set(size_type n
) noexcept
356 __glibcxx_assert( n
< _M_size
);
357 const size_type wd
= n
/ bits_per_word
;
358 const word bit
= word(1) << (n
% bits_per_word
);
360 if (wd
== _M_next_word
)
364 void clear(size_type n
) noexcept
366 __glibcxx_assert( n
< _M_size
);
367 const size_type wd
= n
/ bits_per_word
;
368 const word bit
= word(1) << (n
% bits_per_word
);
369 _M_words
[wd
] &= ~bit
;
370 if (wd
< _M_next_word
)
374 // Update _M_next_word to refer to the next word with an unset bit.
375 // The size of the _M_next_word bit-field means it cannot represent
376 // the maximum possible nwords() value. To avoid wraparound to zero
377 // this function saturates _M_next_word at max_word_index().
378 void update_next_word() noexcept
380 size_t next
= _M_next_word
;
381 while (_M_words
[next
] == word(-1) && ++next
< nwords())
383 _M_next_word
= std::min(next
, max_word_index());
386 void swap(bitset
& b
) noexcept
388 std::swap(_M_words
, b
._M_words
);
389 size_type tmp
= _M_size
;
393 _M_next_word
= b
._M_next_word
;
394 b
._M_next_word
= tmp
;
397 size_type
nwords() const noexcept
398 { return (_M_size
+ bits_per_word
- 1) / bits_per_word
; }
400 // Maximum value that can be stored in bitset::_M_size member (approx 500k)
401 static constexpr size_t max_blocks_per_chunk() noexcept
402 { return (1ull << _S_size_digits
) - 1; }
404 // Maximum value that can be stored in bitset::_M_next_word member (8191).
405 static constexpr size_t max_word_index() noexcept
406 { return (max_blocks_per_chunk() + bits_per_word
- 1) / bits_per_word
; }
408 word
* data() const noexcept
{ return _M_words
; }
411 static constexpr unsigned _S_size_digits
412 = (numeric_limits
<size_type
>::digits
413 + std::__log2p1(bits_per_word
) - 1) / 2;
415 word
* _M_words
= nullptr;
416 // Number of blocks represented by the bitset:
417 size_type _M_size
: _S_size_digits
;
418 // Index of the first word with unset bits:
419 size_type _M_next_word
: numeric_limits
<size_type
>::digits
- _S_size_digits
;
422 // A "chunk" belonging to a pool.
423 // A chunk contains many blocks of the same size.
424 // Derived from bitset to reuse its tail-padding.
425 struct chunk
: bitset
429 // p points to the start of a chunk of size bytes in length.
430 // The chunk has space for n blocks, followed by a bitset of size n
431 // that begins at address words.
432 // This object does not own p or words, the caller will free it.
433 chunk(void* p
, uint32_t bytes
, void* words
, size_t n
)
436 _M_p(static_cast<std::byte
*>(p
))
437 { __glibcxx_assert(bytes
<= chunk::max_bytes_per_chunk()); }
439 chunk(chunk
&& c
) noexcept
440 : bitset(std::move(c
)), _M_bytes(c
._M_bytes
), _M_p(c
._M_p
)
447 chunk
& operator=(chunk
&& c
) noexcept
453 // Allocated size of chunk:
454 uint32_t _M_bytes
= 0;
455 // Start of allocated chunk:
456 std::byte
* _M_p
= nullptr;
458 // True if there are free blocks in this chunk
460 // Number of blocks in this chunk
463 static constexpr uint32_t max_bytes_per_chunk() noexcept
464 { return numeric_limits
<decltype(_M_bytes
)>::max(); }
466 // Determine if block with address p and size block_size
467 // is contained within this chunk.
468 bool owns(void* p
, size_t block_size
)
470 std::less_equal
<uintptr_t> less_equal
;
471 return less_equal(reinterpret_cast<uintptr_t>(_M_p
),
472 reinterpret_cast<uintptr_t>(p
))
473 && less_equal(reinterpret_cast<uintptr_t>(p
) + block_size
,
474 reinterpret_cast<uintptr_t>(bitset::data()));
477 // Allocate next available block of block_size bytes from this chunk.
478 void* reserve(size_t block_size
) noexcept
480 const size_type n
= get_first_unset();
481 if (n
== size_type(-1))
483 return _M_p
+ (n
* block_size
);
486 // Deallocate a single block of block_size bytes
487 void release(void* vp
, size_t block_size
)
489 __glibcxx_assert( owns(vp
, block_size
) );
490 const size_t offset
= static_cast<std::byte
*>(vp
) - _M_p
;
491 // Pointer is correctly aligned for a block in this chunk:
492 __glibcxx_assert( (offset
% block_size
) == 0 );
493 // Block has been allocated:
494 __glibcxx_assert( (*this)[offset
/ block_size
] == true );
495 bitset::clear(offset
/ block_size
);
498 // Deallocate a single block if it belongs to this chunk.
499 bool try_release(void* p
, size_t block_size
)
501 if (!owns(p
, block_size
))
503 release(p
, block_size
);
507 void swap(chunk
& c
) noexcept
509 std::swap(_M_bytes
, c
._M_bytes
);
510 std::swap(_M_p
, c
._M_p
);
514 bool operator<(const chunk
& c
) const noexcept
515 { return std::less
<const void*>{}(_M_p
, c
._M_p
); }
517 friend void swap(chunk
& l
, chunk
& r
) { l
.swap(r
); }
519 friend bool operator<(const void* p
, const chunk
& c
) noexcept
520 { return std::less
<const void*>{}(p
, c
._M_p
); }
523 // For 64-bit this is 3*sizeof(void*) and for 32-bit it's 4*sizeof(void*).
524 // TODO pad 64-bit to 4*sizeof(void*) to avoid splitting across cache lines?
525 static_assert(sizeof(chunk
) == 2 * sizeof(uint32_t) + 2 * sizeof(void*));
527 // An oversized allocation that doesn't fit in a pool.
530 // Alignment must be a power-of-two so we only need to use enough bits
531 // to store the power, not the actual value:
532 static constexpr unsigned _S_alignbits
533 = std::__log2p1((unsigned)numeric_limits
<size_t>::digits
- 1);
534 // Use the remaining bits to store the size:
535 static constexpr unsigned _S_sizebits
536 = numeric_limits
<size_t>::digits
- _S_alignbits
;
537 // The maximum value that can be stored in _S_size
538 static constexpr size_t all_ones
= size_t(-1) >> _S_alignbits
;
539 // The minimum size of a big block (smaller sizes will be rounded up).
540 static constexpr size_t min
= 1u << _S_alignbits
;
542 big_block(size_t bytes
, size_t alignment
)
543 : _M_size(alloc_size(bytes
) >> _S_alignbits
),
544 _M_align_exp(std::__log2p1(alignment
) - 1u)
547 void* pointer
= nullptr;
548 size_t _M_size
: numeric_limits
<size_t>::digits
- _S_alignbits
;
549 size_t _M_align_exp
: _S_alignbits
;
551 size_t size() const noexcept
553 // If all bits are set in _M_size it means the maximum possible size:
554 if (__builtin_expect(_M_size
== (size_t(-1) >> _S_alignbits
), false))
557 return _M_size
<< _S_alignbits
;
560 size_t align() const noexcept
{ return size_t(1) << _M_align_exp
; }
562 // Calculate size to be allocated instead of requested number of bytes.
563 // The requested value will be rounded up to a multiple of big_block::min,
564 // so the low _S_alignbits bits are all zero and don't need to be stored.
565 static constexpr size_t alloc_size(size_t bytes
) noexcept
567 const size_t s
= bytes
+ min
- 1u;
568 if (__builtin_expect(s
< bytes
, false))
569 return size_t(-1); // addition wrapped past zero, return max value
571 return s
& ~(min
- 1u);
574 friend bool operator<(void* p
, const big_block
& b
) noexcept
575 { return less
<void*>{}(p
, b
.pointer
); }
577 friend bool operator<(const big_block
& b
, void* p
) noexcept
578 { return less
<void*>{}(b
.pointer
, p
); }
581 static_assert(sizeof(big_block
) == (2 * sizeof(void*)));
585 // A pool that serves blocks of a particular size.
586 // Each pool manages a number of chunks.
587 // When a pool is full it is replenished by allocating another chunk.
588 struct __pool_resource::_Pool
590 // Smallest supported block size
591 static constexpr unsigned _S_min_block
592 = std::max(sizeof(void*), alignof(bitset::word
));
594 _Pool(size_t __block_size
, size_t __blocks_per_chunk
)
596 _M_block_sz(__block_size
),
597 _M_blocks_per_chunk(__blocks_per_chunk
)
600 // Must call release(r) before destruction!
601 ~_Pool() { __glibcxx_assert(_M_chunks
.empty()); }
603 _Pool(_Pool
&&) noexcept
= default;
604 _Pool
& operator=(_Pool
&&) noexcept
= default;
606 // Size of blocks in this pool
607 size_t block_size() const noexcept
608 { return _M_block_sz
; }
610 // Allocate a block if the pool is not full, otherwise return null.
611 void* try_allocate() noexcept
613 const size_t blocksz
= block_size();
614 if (!_M_chunks
.empty())
616 auto& last
= _M_chunks
.back();
617 if (void* p
= last
.reserve(blocksz
))
619 // TODO last is full, so move another chunk to the back instead?
620 for (auto it
= _M_chunks
.begin(); it
!= &last
; ++it
)
621 if (void* p
= it
->reserve(blocksz
))
627 // Allocate a block from the pool, replenishing from upstream if needed.
628 void* allocate(memory_resource
* r
, const pool_options
& opts
)
630 if (void* p
= try_allocate())
633 return _M_chunks
.back().reserve(block_size());
636 // Return a block to the pool.
637 bool deallocate(memory_resource
*, void* p
)
639 const size_t blocksz
= block_size();
640 if (__builtin_expect(!_M_chunks
.empty(), true))
642 auto& last
= _M_chunks
.back();
643 if (last
.try_release(p
, blocksz
))
645 auto it
= std::upper_bound(_M_chunks
.begin(), &last
, p
);
646 if (it
!= _M_chunks
.begin())
649 if (it
->try_release(p
, blocksz
))
650 // If chunk is empty could return to upstream, but we don't
651 // currently do that. Pools only increase in size.
658 void replenish(memory_resource
* __r
, const pool_options
& __opts
)
660 using word
= chunk::word
;
661 const size_t __blocks
= _M_blocks_per_chunk
;
662 const auto __bits
= chunk::bits_per_word
;
663 const size_t __words
= (__blocks
+ __bits
- 1) / __bits
;
664 const size_t __block_size
= block_size();
665 size_t __bytes
= __blocks
* __block_size
+ __words
* sizeof(word
);
666 size_t __alignment
= std::__ceil2(__block_size
);
667 void* __p
= __r
->allocate(__bytes
, __alignment
);
670 size_t __n
= __blocks
* __block_size
;
671 void* __pwords
= static_cast<char*>(__p
) + __n
;
672 _M_chunks
.insert(chunk(__p
, __bytes
, __pwords
, __blocks
), __r
);
676 __r
->deallocate(__p
, __bytes
, __alignment
);
678 if (_M_blocks_per_chunk
< __opts
.max_blocks_per_chunk
)
680 const size_t max_blocks
681 = (chunk::max_bytes_per_chunk() - sizeof(word
))
682 / (__block_size
+ 0.125);
683 _M_blocks_per_chunk
= std::min({
685 __opts
.max_blocks_per_chunk
,
686 (size_t)_M_blocks_per_chunk
* 2
691 void release(memory_resource
* __r
)
693 const size_t __alignment
= std::__ceil2(block_size());
694 for (auto& __c
: _M_chunks
)
696 __r
->deallocate(__c
._M_p
, __c
._M_bytes
, __alignment
);
697 _M_chunks
.clear(__r
);
700 // A "resourceless vector" instead of pmr::vector, to save space.
701 // All resize operations need to be passed a memory resource, which
702 // obviously needs to be the same one every time.
703 // Chunks are kept sorted by address of their first block, except for
704 // the most recently-allocated Chunk which is at the end of the vector.
707 using value_type
= chunk
;
708 using size_type
= unsigned;
709 using iterator
= value_type
*;
711 // A vector owns its data pointer but not memory held by its elements.
712 chunk
* data
= nullptr;
714 size_type capacity
= 0;
718 vector(size_type __n
, memory_resource
* __r
)
719 : data(polymorphic_allocator
<value_type
>(__r
).allocate(__n
)),
723 // Must call clear(r) before destruction!
724 ~vector() { __glibcxx_assert(data
== nullptr); }
726 vector(vector
&& __rval
) noexcept
727 : data(__rval
.data
), size(__rval
.size
), capacity(__rval
.capacity
)
729 __rval
.data
= nullptr;
730 __rval
.capacity
= __rval
.size
= 0;
733 vector
& operator=(vector
&& __rval
) noexcept
735 __glibcxx_assert(data
== nullptr);
738 capacity
= __rval
.capacity
;
739 __rval
.data
= nullptr;
740 __rval
.capacity
= __rval
.size
= 0;
744 // void resize(size_type __n, memory_resource* __r);
745 // void reserve(size_type __n, memory_resource* __r);
747 void clear(memory_resource
* __r
)
751 // Chunks must be individually freed before clearing the vector.
752 std::destroy(begin(), end());
753 polymorphic_allocator
<value_type
>(__r
).deallocate(data
, capacity
);
758 // Sort existing elements then insert new one at the end.
759 iterator
insert(chunk
&& c
, memory_resource
* r
)
765 auto mid
= end() - 1;
766 std::rotate(std::lower_bound(begin(), mid
, *mid
), mid
, end());
771 polymorphic_allocator
<value_type
> __alloc(r
);
772 auto __mid
= std::lower_bound(begin(), end() - 1, back());
773 auto __p
= __alloc
.allocate(capacity
* 1.5);
774 // move [begin,__mid) to new storage
775 auto __p2
= std::move(begin(), __mid
, __p
);
776 // move end-1 to new storage
777 *__p2
= std::move(back());
778 // move [__mid,end-1) to new storage
779 std::move(__mid
, end() - 1, ++__p2
);
780 std::destroy(begin(), end());
781 __alloc
.deallocate(data
, capacity
);
787 polymorphic_allocator
<value_type
> __alloc(r
);
788 data
= __alloc
.allocate(capacity
= 8);
790 auto back
= ::new (data
+ size
) chunk(std::move(c
));
791 __glibcxx_assert(std::is_sorted(begin(), back
));
796 iterator
begin() const { return data
; }
797 iterator
end() const { return data
+ size
; }
799 bool empty() const noexcept
{ return size
== 0; }
801 value_type
& back() { return data
[size
- 1]; }
805 unsigned _M_block_sz
; // size of blocks allocated from this pool
806 unsigned _M_blocks_per_chunk
; // number of blocks to allocate next
809 // An oversized allocation that doesn't fit in a pool.
810 struct __pool_resource::_BigBlock
: big_block
812 using big_block::big_block
;
817 constexpr size_t pool_sizes
[] = {
826 1<<12, 1<<13, 1<<14, 1<<15, 1<<16, 1<<17,
827 1<<20, 1<<21, 1<<22 // 4MB should be enough for anybody
831 munge_options(pool_options opts
)
833 // The values in the returned struct may differ from those supplied
834 // to the pool resource constructor in that values of zero will be
835 // replaced with implementation-defined defaults, and sizes may be
836 // rounded to unspecified granularity.
838 // Absolute maximum. Each pool might have a smaller maximum.
839 if (opts
.max_blocks_per_chunk
== 0)
841 opts
.max_blocks_per_chunk
= 1024 * 10; // TODO a good default?
845 // TODO round to preferred granularity ?
848 if (opts
.max_blocks_per_chunk
> chunk::max_blocks_per_chunk())
850 opts
.max_blocks_per_chunk
= chunk::max_blocks_per_chunk();
853 // Absolute minimum. Likely to be much larger in practice.
854 if (opts
.largest_required_pool_block
== 0)
856 opts
.largest_required_pool_block
= 4096; // TODO a good default?
860 // Round to preferred granularity
861 static_assert(std::__ispow2(pool_sizes
[0]));
862 constexpr size_t mask
= pool_sizes
[0] - 1;
863 opts
.largest_required_pool_block
+= mask
;
864 opts
.largest_required_pool_block
&= ~mask
;
867 if (opts
.largest_required_pool_block
< big_block::min
)
869 opts
.largest_required_pool_block
= big_block::min
;
871 else if (opts
.largest_required_pool_block
> std::end(pool_sizes
)[-1])
873 // Setting _M_opts to the largest pool allows users to query it:
874 opts
.largest_required_pool_block
= std::end(pool_sizes
)[-1];
880 pool_index(size_t block_size
, int npools
)
882 auto p
= std::lower_bound(pool_sizes
, pool_sizes
+ npools
, block_size
);
883 int n
= p
- pool_sizes
;
890 select_num_pools(const pool_options
& opts
)
892 auto p
= std::lower_bound(std::begin(pool_sizes
), std::end(pool_sizes
),
893 opts
.largest_required_pool_block
);
894 const int n
= p
- std::begin(pool_sizes
);
895 if (p
== std::end(pool_sizes
) || *p
== opts
.largest_required_pool_block
)
903 __pool_resource(const pool_options
& opts
, memory_resource
* upstream
)
904 : _M_opts(munge_options(opts
)), _M_unpooled(upstream
),
905 _M_npools(select_num_pools(_M_opts
))
908 __pool_resource::~__pool_resource() { release(); }
911 __pool_resource::release() noexcept
913 memory_resource
* res
= resource();
914 // deallocate oversize allocations
915 for (auto& b
: _M_unpooled
)
916 res
->deallocate(b
.pointer
, b
.size(), b
.align());
917 pmr::vector
<_BigBlock
>{res
}.swap(_M_unpooled
);
921 __pool_resource::allocate(size_t bytes
, size_t alignment
)
923 auto& b
= _M_unpooled
.emplace_back(bytes
, alignment
);
925 // N.B. need to allocate b.size(), which might be larger than bytes.
926 void* p
= resource()->allocate(b
.size(), alignment
);
928 if (_M_unpooled
.size() > 1)
930 const auto mid
= _M_unpooled
.end() - 1;
931 // move to right position in vector
932 std::rotate(std::lower_bound(_M_unpooled
.begin(), mid
, p
),
933 mid
, _M_unpooled
.end());
937 _M_unpooled
.pop_back();
938 __throw_exception_again
;
943 __pool_resource::deallocate(void* p
, size_t bytes
, size_t alignment
)
946 = std::lower_bound(_M_unpooled
.begin(), _M_unpooled
.end(), p
);
947 __glibcxx_assert(it
!= _M_unpooled
.end() && it
->pointer
== p
);
948 if (it
!= _M_unpooled
.end() && it
->pointer
== p
) // [[likely]]
951 __glibcxx_assert(b
.size() == b
.alloc_size(bytes
));
952 __glibcxx_assert(b
.align() == alignment
);
953 _M_unpooled
.erase(it
);
954 // N.B. need to deallocate b.size(), which might be larger than bytes.
955 resource()->deallocate(p
, b
.size(), b
.align());
959 // Create array of pools, allocated from upstream resource.
961 __pool_resource::_M_alloc_pools()
964 polymorphic_allocator
<_Pool
> alloc
{resource()};
965 _Pool
* p
= alloc
.allocate(_M_npools
);
966 for (int i
= 0; i
< _M_npools
; ++i
)
968 // For last pool use largest_required_pool_block
969 const size_t block_size
= (i
+1 == _M_npools
)
970 ? _M_opts
.largest_required_pool_block
973 // Decide on initial number of blocks per chunk.
974 // Always have at least 16 blocks per chunk:
975 const size_t min_blocks_per_chunk
= 16;
976 // But for smaller blocks, use a larger initial size:
977 size_t blocks_per_chunk
978 = std::max(1024 / block_size
, min_blocks_per_chunk
);
979 // But don't exceed the requested max_blocks_per_chunk:
981 = std::min(blocks_per_chunk
, _M_opts
.max_blocks_per_chunk
);
982 // Allow space for bitset to track which blocks are used/unused:
983 blocks_per_chunk
*= 1 - 1.0 / (__CHAR_BIT__
* block_size
);
984 // Construct a _Pool for the given block size and initial chunk size:
985 alloc
.construct(p
+ i
, block_size
, blocks_per_chunk
);
990 // unsynchronized_pool_resource member functions
993 unsynchronized_pool_resource::
994 unsynchronized_pool_resource(const pool_options
& opts
,
995 memory_resource
* upstream
)
996 : _M_impl(opts
, upstream
), _M_pools(_M_impl
._M_alloc_pools())
1000 unsynchronized_pool_resource::~unsynchronized_pool_resource()
1003 // Return all memory to upstream resource.
1005 unsynchronized_pool_resource::release()
1007 // release pooled memory
1010 memory_resource
* res
= upstream_resource();
1011 polymorphic_allocator
<_Pool
> alloc
{res
};
1012 for (int i
= 0; i
< _M_impl
._M_npools
; ++i
)
1014 _M_pools
[i
].release(res
);
1015 alloc
.destroy(_M_pools
+ i
);
1017 alloc
.deallocate(_M_pools
, _M_impl
._M_npools
);
1021 // release unpooled memory
1025 // Find the right pool for a block of size block_size.
1027 unsynchronized_pool_resource::_M_find_pool(size_t block_size
) noexcept
1029 __pool_resource::_Pool
* pool
= nullptr;
1030 if (_M_pools
) // [[likely]]
1032 int index
= pool_index(block_size
, _M_impl
._M_npools
);
1034 pool
= _M_pools
+ index
;
1039 // Override for memory_resource::do_allocate
1041 unsynchronized_pool_resource::do_allocate(size_t bytes
, size_t alignment
)
1043 const auto block_size
= std::max(bytes
, alignment
);
1044 if (block_size
<= _M_impl
._M_opts
.largest_required_pool_block
)
1046 // Recreate pools if release() has been called:
1047 if (__builtin_expect(_M_pools
== nullptr, false))
1048 _M_pools
= _M_impl
._M_alloc_pools();
1049 if (auto pool
= _M_find_pool(block_size
))
1050 return pool
->allocate(upstream_resource(), _M_impl
._M_opts
);
1052 return _M_impl
.allocate(bytes
, alignment
);
1055 // Override for memory_resource::do_deallocate
1057 unsynchronized_pool_resource::
1058 do_deallocate(void* p
, size_t bytes
, size_t alignment
)
1060 size_t block_size
= std::max(bytes
, alignment
);
1061 if (block_size
<= _M_impl
._M_opts
.largest_required_pool_block
)
1063 if (auto pool
= _M_find_pool(block_size
))
1065 pool
->deallocate(upstream_resource(), p
);
1069 _M_impl
.deallocate(p
, bytes
, alignment
);
1073 _GLIBCXX_END_NAMESPACE_VERSION