exp2l: Work around a NetBSD 10.0/i386 bug.
[gnulib.git] / lib / ssfmalloc.h
blob6fe3a24ab5f64093b51980183d13dfff7bab0854
1 /* Simple and straight-forward malloc implementation (front end).
3 Copyright (C) 2020-2024 Free Software Foundation, Inc.
5 This file is free software: you can redistribute it and/or modify
6 it under the terms of the GNU Lesser General Public License as
7 published by the Free Software Foundation; either version 2.1 of the
8 License, or (at your option) any later version.
10 This file is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public License
16 along with this program. If not, see <https://www.gnu.org/licenses/>. */
18 /* Written by Bruno Haible <bruno@clisp.org>, 2020. */
20 /* This file implements an allocator of memory blocks of given size (a
21 "malloc front end"), based on an allocator of memory pages (a "malloc
22 back end").
24 The need for such an allocator arises because a memory block is often
25 50 bytes large or less, whereas an allocator of memory pages provides
26 entire pages (4096 bytes or more).
28 This implementation attempts to be
29 - simple and straight-forward,
30 - respecting locality of reference,
31 - usable for small allocations,
32 - nevertheless of reasonable speed.
34 Simple and straight-forward - means that it contains only a small amount
35 of code (compared to e.g. tcmalloc).
37 Respecting locality of reference - means that searching for a free block
38 will not follow lists of pointers that touch unrelated cache lines in
39 the same page or even unrelated memory pages, because that would cause
40 cache misses or even swapping in of unrelated memory pages.
42 Usable for small allocations - means that it can be used for data types
43 with few instances. It does not, unlike some other malloc implementations,
44 allocate 256 KB of memory up-front. Nor does it allocate memory pages
45 per thread.
47 Reasonable speed is nevertheless guaranteed by
48 - choosing algorithms that lead to little fragmentation,
49 - small caches where they make sense.
52 /* The user of this file needs to define the following macros before including
53 this file:
55 PAGESIZE A variable-like macro of type intptr_t or uintptr_t
56 that evaluates to the memory page size (>= 4096).
58 PAGESIZE_MAX A constant that specifies an upper bound for PAGESIZE.
60 ALLOC_PAGES A function-like macro with the signature
61 uintptr_t ALLOC_PAGES (size_t size)
62 where the argument size is > 0 and a multiple of the
63 PAGESIZE. It returns a multiple of PAGESIZE, or 0
64 upon failure.
66 FREE_PAGES A function-like macro with the signature
67 void FREE_PAGES (uintptr_t pages, size_t size)
68 where pages is a non-zero value returned by
69 ALLOC_PAGES (size).
71 ALIGNMENT A constant that specifies the desired alignment of all
72 the returned memory blocks. Possible values are the
73 powers of 2, from sizeof (void *) to 32.
75 PAGE_RESERVED_HEADER_SIZE A constant, either 0 or a multiple of
76 sizeof (void *), that denotes the size of a reserved
77 header - not to be used by the application - at the
78 beginning of a page sequence returned by ALLOC_PAGES.
81 /* This file uses _GL_CMP. */
82 #if !_GL_CONFIG_H_INCLUDED
83 #error "Please include config.h first."
84 #endif
86 /* =================== Declarations of exported functions =================== */
88 #include <stdint.h>
90 /* Allocates a block of memory, aligned to ALIGNMENT bytes.
91 Returns 0 upon failure. */
92 static uintptr_t allocate_block (size_t size);
94 /* Frees a block of memory, returned by allocate_block. */
95 static void free_block (uintptr_t block);
97 /* ============================= Implementation ============================= */
99 /* Outline of the implementation decisions (ID):
101 * ID: This implementation considers three types of blocks:
102 - small blocks - these are allocated in "small block" pages.
103 - medium blocks - these are allocated in "medium block" pages.
104 - large blocks - these are allocated individually, with a page or
105 sequence of pages uniquely for this block.
106 * Rationale:
107 - Most memory allocations are small (e.g. <= 32 bytes); this is a lesson
108 learned from xc/programs/Xserver/os/xalloc.c (1997) [Pascal Haible].
109 - Fragmentation is one of the biggest problems, and keeping large
110 blocks and small blocks separate from medium blocks is one way to
111 control it.
113 * ID: If an allocation succeeds in one page, the next allocation (of the
114 same type of block) will try to use the same page.
115 * Rationale: Locality of reference.
117 * ID: Pages of small or medium blocks have their management data structures
118 concentrated at the beginning of the page. No chained lists that force
119 to walk through the page.
120 * Rationale: Locality of reference.
122 * ID: Across pages, the management of the free space is done through data
123 structures outside the pages. No chained lists across pages.
124 * Rationale: Locality of reference.
128 #include <stdlib.h>
129 #include <string.h> /* ffsll */
130 #include <strings.h> /* ffs */
131 #include "flexmember.h"
132 #include "glthread/lock.h"
133 #include "thread-optim.h"
134 #include "gl_oset.h"
135 #include "gl_rbtree_oset.h"
136 #ifdef __CHERI_PURE_CAPABILITY__
137 # include <cheri.h>
138 #endif
140 /* Help the branch prediction. */
141 #if __GNUC__ >= 3
142 # define likely(cond) __builtin_expect ((cond), 1)
143 # define unlikely(cond) __builtin_expect ((cond), 0)
144 #else
145 # define likely(cond) (cond)
146 # define unlikely(cond) (cond)
147 #endif
149 enum { small_page_type = 1, medium_page_type = 2, large_page_type = 3 };
151 /* Header of a page of small or medium blocks or of a large block.
152 Lies at an address that is a multiple of PAGESIZE. */
153 struct any_page_header
155 #if PAGE_RESERVED_HEADER_SIZE > 0
156 void * reserved[PAGE_RESERVED_HEADER_SIZE / sizeof (void *)];
157 #endif
158 /* small_page_type or medium_page_type or large_page_type */
159 unsigned char page_type;
162 /* ========================= Small and medium blocks ======================== */
164 /* An integer type, capable of holding the values 0 .. PAGESIZE. */
165 #if PAGESIZE_MAX >= 0x10000
166 typedef unsigned int pg_offset_t;
167 #else
168 typedef unsigned short pg_offset_t;
169 #endif
171 /* Tree element that corresponds to a page.
172 These tree elements are allocated via malloc(). */
173 struct page_tree_element
175 uintptr_t page;
176 pg_offset_t free_space;
179 /* Header of a page of small or medium blocks.
180 Lies at an address that is a multiple of PAGESIZE. */
181 struct dissected_page_header
183 struct any_page_header common;
184 #ifdef __CHERI_PURE_CAPABILITY__
185 /* This page, with bounds [page, page + PAGESIZE). */
186 uintptr_t whole_page;
187 #endif
188 /* Amount of free space in this page. Always a multiple of ALIGNMENT. */
189 pg_offset_t free_space;
190 /* The tree element. */
191 struct page_tree_element *tree_element;
194 /* Data structure for managing a pool of pages. */
195 struct page_pool
197 /* Methods. */
198 void (*init_page_pool) (struct page_pool *pool);
199 void (*init_page) (uintptr_t page);
200 uintptr_t (*allocate_block_in_page) (size_t size, uintptr_t page);
201 void (*free_block_in_page) (uintptr_t block, uintptr_t page);
203 /* Maximum free space in a page of this pool. */
204 size_t page_capacity;
206 /* Page that provided the last successful allocation from this pool,
207 or 0. */
208 uintptr_t last_page;
210 /* Ordered set of managed pages, sorted according to free_space, in ascending
211 order. */
212 gl_oset_t /* <struct page_tree_element *> */ managed_pages;
214 /* A queue of pages which have a modified free_space but which have not been
215 updated in the managed_pages tree so far. */
216 #define UPDATE_QUEUE_SIZE 10
217 unsigned int update_queue_count; /* <= UPDATE_QUEUE_SIZE */
218 uintptr_t update_queue[UPDATE_QUEUE_SIZE];
220 /* A page that could be freed.
221 We don't free it immediately, so that on allocation/deallocation
222 pattern like
223 2x allocate, 2x free, 2x allocate, 2x free, 2x allocate, 2x free, ...
224 will not allocate and free a page so frequently. */
225 uintptr_t freeable_page;
228 /* Comparison function for managed_pages. */
229 static int
230 compare_pages_by_free_space (const void *elt1, const void *elt2)
232 struct page_tree_element *element1 = (struct page_tree_element *) elt1;
233 struct page_tree_element *element2 = (struct page_tree_element *) elt2;
234 int cmp = _GL_CMP (element1->free_space, element2->free_space);
235 if (unlikely (cmp == 0))
236 cmp = _GL_CMP (element1->page, element2->page);
237 return cmp;
240 /* Tests whether the free space in a tree element is greater or equal than the
241 given threshold. */
242 static bool
243 page_free_space_is_at_least (const void *elt, const void *threshold)
245 struct page_tree_element *element = (struct page_tree_element *) elt;
247 return element->free_space >= (uintptr_t) threshold;
250 /* Updates the free space of a 'struct page_tree_element *'.
251 Only to be called through gl_oset_update! */
252 static void
253 set_free_space (const void *elt, void *action_data)
255 struct page_tree_element *element = (struct page_tree_element *) elt;
257 element->free_space = (pg_offset_t) (uintptr_t) action_data;
260 /* Executes the pending updates in the managed_pages tree. */
261 static void
262 flush_all_updates (struct page_pool *pool)
264 size_t count = pool->update_queue_count;
265 while (likely (count > 0))
267 --count;
268 uintptr_t page = pool->update_queue[count];
269 struct dissected_page_header *pageptr =
270 (struct dissected_page_header *) page;
271 struct page_tree_element *tree_element = pageptr->tree_element;
272 if (gl_oset_update (pool->managed_pages, tree_element,
273 set_free_space,
274 (void *) (uintptr_t) pageptr->free_space)
275 < 0)
276 /* A collision was found. This contradicts the definition of
277 compare_pages_by_free_space. */
278 abort ();
280 pool->update_queue_count = 0;
283 /* Adds a page to the update queue.
284 This function has to be called when the free_space of the page has
285 changed. */
286 static inline void
287 add_update (uintptr_t page, struct page_pool *pool)
289 size_t count = pool->update_queue_count;
290 size_t i;
291 for (i = 0; i < count; i++)
292 if (pool->update_queue[i] == page)
293 /* It's already in the queue. */
294 return;
296 /* Ensure there is room for adding one more page to the update queue. */
297 if (unlikely (count == UPDATE_QUEUE_SIZE))
298 flush_all_updates (pool);
300 /* Add it to the update queue. */
301 pool->update_queue[pool->update_queue_count++] = page;
304 /* Drops a page from the update queue. */
305 static inline void
306 drop_update (uintptr_t page, struct page_pool *pool)
308 size_t count = pool->update_queue_count;
309 size_t i;
310 for (i = 0; i < count; i++)
311 if (pool->update_queue[i] == page)
313 /* It's in the queue. Remove it. */
314 for (i = i + 1; i < count; i++)
315 pool->update_queue[i - 1] = pool->update_queue[i];
316 pool->update_queue_count--;
317 return;
321 /* ============================== Small blocks ============================== */
323 #include "ssfmalloc-bitmap.h"
325 /* Maximum size of a small block.
326 Must be a power of 2. */
327 #define SMALL_BLOCK_MAX_SIZE (ALIGNMENT < 8 ? 32 * ALIGNMENT : 256)
329 /* Number of rows of ALIGNMENT bytes available in an empty page. */
330 static unsigned int small_block_page_num_bits;
331 /* Offset in the page where the memory blocks start.
332 A multiple of ALIGNMENT. */
333 static unsigned int small_block_page_blocks_start;
334 /* Number of uint32_t words in each of the two bitmaps. */
335 static unsigned int small_block_page_num_bitmap_words;
337 /* Header of a page of small blocks.
338 Lies at an address that is a multiple of PAGESIZE. */
339 struct small_page_header
341 struct dissected_page_header common;
342 /* Two bitmaps, each with small_block_page_num_bitmap_words words. In each
343 a bit represents ALIGNMENT bytes.
344 - available_bitmap: bit set means available, bit clear means allocated.
345 - blockend_bitmap: bit set means the an allocated block ends here. */
346 uint32_t bitmap_words[FLEXIBLE_ARRAY_MEMBER];
349 static inline uint32_t *
350 small_block_page_available_bitmap (struct small_page_header *pageptr)
352 return &pageptr->bitmap_words[0];
355 static inline uint32_t *
356 small_block_page_blockend_bitmap (struct small_page_header *pageptr)
358 return &pageptr->bitmap_words[small_block_page_num_bitmap_words];
361 static void
362 init_small_block_page_pool (struct page_pool *pool)
364 /* How many usable rows of ALIGNMENT bytes can we have?
365 Each takes ALIGNMENT bytes + 1/8 byte in each bitmap, so approximately
366 (ALIGNMENT + 1/4) bytes. */
367 unsigned int num_bits = (unsigned int) (4 * PAGESIZE) / (4 * ALIGNMENT + 1);
368 unsigned int num_bitmap_words;
369 unsigned int blocks_start;
370 /* Iterate until it converges. */
371 for (;;)
373 num_bitmap_words = (num_bits + 32 - 1) / 32;
374 blocks_start =
375 (FLEXNSIZEOF (struct small_page_header, bitmap_words,
376 2 * num_bitmap_words)
377 + ALIGNMENT - 1) & -ALIGNMENT;
378 unsigned int num_bits_r = (unsigned int) (PAGESIZE - blocks_start) / ALIGNMENT;
379 if (num_bits_r >= num_bits)
380 break;
381 num_bits = num_bits_r;
384 small_block_page_num_bits = num_bits;
385 small_block_page_num_bitmap_words = num_bitmap_words;
386 small_block_page_blocks_start = blocks_start;
388 pool->page_capacity = small_block_page_num_bits * ALIGNMENT;
391 static void
392 init_small_block_page (uintptr_t page)
394 struct small_page_header *pageptr = (struct small_page_header *) page;
395 pageptr->common.common.page_type = small_page_type;
396 #ifdef __CHERI_PURE_CAPABILITY__
397 pageptr->common.whole_page = page;
398 #endif
400 /* Initialize available_bitmap. */
401 uint32_t *available_bitmap = small_block_page_available_bitmap (pageptr);
402 init_bitmap_all_bits_set (small_block_page_num_bitmap_words,
403 available_bitmap);
404 if ((small_block_page_num_bits % 32) != 0)
405 available_bitmap[small_block_page_num_bitmap_words - 1] =
406 (1U << (small_block_page_num_bits % 32)) - 1;
408 /* Initialize blockend_bitmap. */
409 init_bitmap_all_bits_clear (small_block_page_num_bitmap_words,
410 small_block_page_blockend_bitmap (pageptr));
412 pageptr->common.free_space = small_block_page_num_bits * ALIGNMENT;
415 /* Allocates a block of memory of size <= SMALL_BLOCK_MAX_SIZE,
416 aligned to ALIGNMENT bytes, from the given page.
417 Returns 0 upon failure. */
418 static uintptr_t
419 allocate_small_block_in_page (size_t size, uintptr_t page)
421 struct small_page_header *pageptr = (struct small_page_header *) page;
423 /* glibc compatible. */
424 if (size == 0)
425 size = 1;
427 /* Number of consecutive bits to look for in the bitmap. */
428 size_t c = (size + ALIGNMENT - 1) / ALIGNMENT;
430 /* SMALL_BLOCK_MAX_SIZE has been chosen so that c <= 32. */
431 if (!(c > 0 && c <= 32))
432 abort ();
434 uint32_t *available_bitmap = small_block_page_available_bitmap (pageptr);
435 size_t k = find_first_packet_set (small_block_page_num_bitmap_words,
436 available_bitmap,
438 if (unlikely (k == (size_t)(-1)))
439 /* Failed to find c consecutive available rows of ALIGNMENT bytes each. */
440 return 0;
442 uint32_t *blockend_bitmap = small_block_page_blockend_bitmap (pageptr);
443 size_t j = k / 32;
444 size_t i = k % 32;
445 if (i + c <= 32)
447 available_bitmap[j] &= ~(((2U << (c - 1)) - 1) << i);
448 blockend_bitmap[j] |= (1U << (i + c - 1));
450 else
452 available_bitmap[j] &= ~(-1U << i);
453 available_bitmap[j + 1] &= ~((1U << (i + c - 32)) - 1);
454 blockend_bitmap[j + 1] |= (1U << (i + c - 1 - 32));
457 pageptr->common.free_space -= c * ALIGNMENT;
459 return page + small_block_page_blocks_start + k * ALIGNMENT;
462 static void
463 free_small_block_in_page (uintptr_t block, uintptr_t page)
465 struct small_page_header *pageptr = (struct small_page_header *) page;
467 if (!(block >= page + small_block_page_blocks_start
468 && (block % ALIGNMENT) == 0))
469 /* Invalid argument. */
470 abort ();
472 uint32_t *available_bitmap = small_block_page_available_bitmap (pageptr);
473 uint32_t *blockend_bitmap = small_block_page_blockend_bitmap (pageptr);
475 /* The bit that corresponds to where the block starts. */
476 size_t k = (block - page - small_block_page_blocks_start) / ALIGNMENT;
477 /* The bit that corresponds to where the block ends. */
478 size_t ke = find_first_bit_set (small_block_page_num_bitmap_words,
479 blockend_bitmap,
481 if (/* ke == (size_t)(-1) || */ ke >= k + 32)
482 /* Invalid argument or invalid state. */
483 abort ();
485 /* Number of consecutive bits to manipulate in the bitmap. */
486 size_t c = ke - k + 1;
488 size_t j = k / 32;
489 size_t i = k % 32;
490 if (i + c <= 32)
492 available_bitmap[j] |= (((2U << (c - 1)) - 1) << i);
493 blockend_bitmap[j] &= ~(1U << (i + c - 1));
495 else
497 available_bitmap[j] |= (-1U << i);
498 available_bitmap[j + 1] |= ((1U << (i + c - 32)) - 1);
499 blockend_bitmap[j + 1] &= ~(1U << (i + c - 1 - 32));
502 pageptr->common.free_space += c * ALIGNMENT;
505 /* Management of pages of small blocks. */
506 struct page_pool small_block_pages =
508 init_small_block_page_pool,
509 init_small_block_page,
510 allocate_small_block_in_page,
511 free_small_block_in_page
514 /* ============================== Medium blocks ============================= */
516 /* A range of memory in a page.
517 It covers the address range [page+start, page+end).
518 start <= end. */
519 struct memory_range
521 pg_offset_t start;
522 pg_offset_t end;
525 /* Header of a page of medium blocks.
526 Lies at an address that is a multiple of PAGESIZE. */
527 struct medium_page_header
529 struct dissected_page_header common;
530 /* If n blocks are allocated, there are n+1 gaps before, between, and
531 after them. Keep them in an array, sorted in ascending order. */
532 unsigned int num_gaps; /* > 0 */
533 struct memory_range gaps[FLEXIBLE_ARRAY_MEMBER /* PAGESIZE / SMALL_BLOCK_MAX_SIZE + 1 */];
536 #define MEDIUM_BLOCKS_PAGE_MAX_GAPS \
537 (PAGESIZE / SMALL_BLOCK_MAX_SIZE + 1)
538 #define MEDIUM_BLOCKS_PAGE_FIRST_GAP_START \
539 ((FLEXSIZEOF (struct medium_page_header, gaps, \
540 MEDIUM_BLOCKS_PAGE_MAX_GAPS * sizeof (struct memory_range)) \
541 + ALIGNMENT - 1) & -ALIGNMENT)
542 #define MEDIUM_BLOCKS_PAGE_LAST_GAP_END \
543 PAGESIZE
544 #define MEDIUM_BLOCKS_PAGE_CAPACITY \
545 (MEDIUM_BLOCKS_PAGE_LAST_GAP_END - MEDIUM_BLOCKS_PAGE_FIRST_GAP_START)
547 static void
548 init_medium_block_page_pool (struct page_pool *pool)
550 pool->page_capacity = MEDIUM_BLOCKS_PAGE_CAPACITY;
553 static void
554 init_medium_block_page (uintptr_t page)
556 struct medium_page_header *pageptr = (struct medium_page_header *) page;
557 pageptr->common.common.page_type = medium_page_type;
558 #ifdef __CHERI_PURE_CAPABILITY__
559 pageptr->common.whole_page = page;
560 #endif
561 pageptr->num_gaps = 1;
562 pageptr->gaps[0].start = MEDIUM_BLOCKS_PAGE_FIRST_GAP_START;
563 pageptr->gaps[0].end = MEDIUM_BLOCKS_PAGE_LAST_GAP_END;
564 pageptr->common.free_space = MEDIUM_BLOCKS_PAGE_CAPACITY;
567 /* Allocates a block of memory of size > SMALL_BLOCK_MAX_SIZE,
568 aligned to ALIGNMENT bytes, from the given page.
569 Returns 0 upon failure. */
570 static uintptr_t
571 allocate_medium_block_in_page (size_t size, uintptr_t page)
573 struct medium_page_header *pageptr = (struct medium_page_header *) page;
575 /* Walk through the gaps and remember the smallest gap of at least
576 the given size. */
577 size_t best_i = (size_t)(-1);
578 size_t best_length = (size_t)(-1);
579 size_t num_gaps = pageptr->num_gaps;
580 size_t i;
581 for (i = 0; i < num_gaps; i++)
583 size_t length = pageptr->gaps[i].end - pageptr->gaps[i].start;
584 if (length >= size)
586 /* Found a gap of sufficient size. */
587 if (length < best_length)
589 best_i = i;
590 best_length = length;
594 if (unlikely (best_i == (size_t)(-1)))
595 /* Failed to find a gap of sufficient size. */
596 return 0;
598 size_t aligned_size = (size + ALIGNMENT - 1) & -ALIGNMENT;
600 if (pageptr->common.free_space < aligned_size)
601 /* Invalid state: Less free space than expected. */
602 abort ();
604 /* Split the gap, leaving an empty gap and a remaining gap. */
605 for (i = num_gaps - 1; ; i--)
607 pageptr->gaps[i + 1] = pageptr->gaps[i];
608 if (i == best_i)
609 break;
611 size_t result = pageptr->gaps[best_i].start;
612 pageptr->gaps[best_i].end = result;
613 pageptr->gaps[best_i + 1].start = result + aligned_size;
614 pageptr->num_gaps = num_gaps + 1;
615 if (pageptr->num_gaps > PAGESIZE / SMALL_BLOCK_MAX_SIZE + 1)
616 /* Invalid state: More gaps than expected. */
617 abort ();
619 pageptr->common.free_space -= aligned_size;
621 return page + result;
624 static void
625 free_medium_block_in_page (uintptr_t block, uintptr_t page)
627 struct medium_page_header *pageptr = (struct medium_page_header *) page;
628 size_t offset = block - page;
630 /* Search for the gap that ends where this block begins.
631 We can ignore the last gap here, since it ends where the page ends. */
632 struct memory_range *gaps = pageptr->gaps;
633 size_t lo = 0;
634 size_t hi = pageptr->num_gaps - 1;
635 size_t index;
636 while (lo < hi)
638 /* Invariant:
639 for i < lo, gaps[i].end < offset,
640 for i >= hi, gaps[i].end > offset. */
641 size_t mid = (hi + lo) >> 1; /* >= lo, < hi */
642 if (offset > gaps[mid].end)
643 lo = mid + 1;
644 else if (offset < gaps[mid].end)
645 hi = mid;
646 else
648 /* Found it: offset == gaps[mid].end. */
649 index = mid;
650 goto found;
653 /* Invalid argument: block is not the start of a currently allocated
654 block. */
655 abort ();
657 found:
658 /* Here 0 <= index < pageptr->num_gaps - 1.
659 Combine the gaps index and index+1. */
660 pageptr->common.free_space += gaps[index + 1].start - gaps[index].end;
661 if (pageptr->common.free_space < gaps[index + 1].start - gaps[index].end)
662 /* Wrap around. */
663 abort ();
665 gaps[index].end = gaps[index + 1].end;
667 size_t num_gaps = pageptr->num_gaps - 1;
668 size_t i;
669 for (i = index + 1; i < num_gaps; i++)
670 gaps[i] = gaps[i + 1];
671 pageptr->num_gaps = num_gaps;
674 /* Management of pages of medium blocks. */
675 struct page_pool medium_block_pages =
677 init_medium_block_page_pool,
678 init_medium_block_page,
679 allocate_medium_block_in_page,
680 free_medium_block_in_page
683 /* ==================== Pages of small and medium blocks ==================== */
685 /* Allocates a block of memory from the given pool, aligned to ALIGNMENT bytes.
686 Returns 0 upon failure. */
687 static inline uintptr_t
688 allocate_block_from_pool (size_t size, struct page_pool *pool)
690 uintptr_t page;
692 /* Try in the last used page first. */
693 page = pool->last_page;
694 if (likely (page != 0))
696 uintptr_t block = pool->allocate_block_in_page (size, page);
697 if (likely (block != 0))
699 add_update (page, pool);
700 return block;
704 /* Ensure that the pool and its managed_pages is initialized. */
705 if (unlikely (pool->managed_pages == NULL))
707 pool->managed_pages =
708 gl_oset_nx_create_empty (GL_RBTREE_OSET, compare_pages_by_free_space, NULL);
709 if (unlikely (pool->managed_pages == NULL))
710 /* Could not allocate the managed_pages. */
711 return 0;
712 pool->init_page_pool (pool);
715 /* Ensure that managed_pages is up-to-date. */
716 flush_all_updates (pool);
718 /* Try in the other managed_pages. */
720 gl_oset_iterator_t iter =
721 gl_oset_iterator_atleast (pool->managed_pages,
722 page_free_space_is_at_least,
723 (void *) (uintptr_t) size);
724 const void *elt;
725 while (gl_oset_iterator_next (&iter, &elt))
727 struct page_tree_element *element = (struct page_tree_element *) elt;
728 page = element->page;
729 /* No need to try the last used page again. */
730 if (likely (page != pool->last_page))
732 uintptr_t block = pool->allocate_block_in_page (size, page);
733 if (likely (block != 0))
735 gl_oset_iterator_free (&iter);
736 add_update (page, pool);
737 pool->last_page = page;
738 return block;
742 gl_oset_iterator_free (&iter);
745 /* If we have a freeable page ready for reuse, use it. */
746 if (pool->freeable_page != 0)
748 page = pool->freeable_page;
749 pool->init_page (page);
750 struct page_tree_element *element =
751 (struct page_tree_element *) malloc (sizeof (struct page_tree_element));
752 if (unlikely (element == NULL))
754 /* Could not allocate the tree element. */
755 pool->last_page = 0;
756 return 0;
758 element->page = page;
759 element->free_space = ((struct dissected_page_header *) page)->free_space;
760 if (unlikely (gl_oset_nx_add (pool->managed_pages, element) < 0))
762 /* Could not allocate the tree node. */
763 free (element);
764 pool->last_page = 0;
765 return 0;
767 ((struct dissected_page_header *) page)->tree_element = element;
768 pool->freeable_page = 0;
770 uintptr_t block = pool->allocate_block_in_page (size, page);
771 if (block == 0)
772 /* If the size is too large for an empty page, this function should not
773 have been invoked. */
774 abort ();
775 add_update (page, pool);
776 pool->last_page = page;
777 return block;
780 /* Allocate a fresh page. */
781 page = ALLOC_PAGES (PAGESIZE);
782 if (unlikely (page == 0))
784 /* Failed. */
785 pool->last_page = 0;
786 return 0;
788 if ((page & (PAGESIZE - 1)) != 0)
789 /* ALLOC_PAGES's result is not aligned as expected. */
790 abort ();
792 pool->init_page (page);
793 struct page_tree_element *element =
794 (struct page_tree_element *) malloc (sizeof (struct page_tree_element));
795 if (unlikely (element == NULL))
797 /* Could not allocate the tree element. */
798 FREE_PAGES (page, PAGESIZE);
799 pool->last_page = 0;
800 return 0;
802 element->page = page;
803 element->free_space = ((struct dissected_page_header *) page)->free_space;
804 if (unlikely (gl_oset_nx_add (pool->managed_pages, element) < 0))
806 /* Could not allocate the tree node. */
807 free (element);
808 FREE_PAGES (page, PAGESIZE);
809 pool->last_page = 0;
810 return 0;
812 ((struct dissected_page_header *) page)->tree_element = element;
814 uintptr_t block = pool->allocate_block_in_page (size, page);
815 if (block == 0)
816 /* If the size is too large for an empty page, this function should not
817 have been invoked. */
818 abort ();
819 add_update (page, pool);
820 pool->last_page = page;
821 return block;
824 static void
825 free_block_from_pool (uintptr_t block, uintptr_t page, struct page_pool *pool)
827 if (pool->page_capacity == 0)
828 /* Invalid argument: The block is not valid, since the pool has not yet
829 been initialized. */
830 abort ();
832 pool->free_block_in_page (block, page);
834 struct dissected_page_header *pageptr = (struct dissected_page_header *) page;
835 if (likely (pageptr->free_space != pool->page_capacity))
837 /* The page is not entirely free. */
838 add_update (page, pool);
840 else
842 /* The page is now entirely free. */
843 /* Remove its tree element and free it. */
844 struct page_tree_element *element = pageptr->tree_element;
845 if (!gl_oset_remove (pool->managed_pages, element))
846 /* Invalid state: The element is not in the managed_pages. */
847 abort ();
848 free (element);
850 if (pool->last_page == page)
851 pool->last_page = 0;
853 drop_update (page, pool);
855 /* If we would now have two freeable pages, free the old one. */
856 if (pool->freeable_page != 0)
857 FREE_PAGES (pool->freeable_page, PAGESIZE);
859 /* Don't free the page now, but later. */
860 #ifdef __CHERI_PURE_CAPABILITY__
861 pool->freeable_page = pageptr->whole_page;
862 #else
863 pool->freeable_page = page;
864 #endif
868 /* Lock that protects the management of small and medium blocks from
869 simultaneous access from multiple threads. */
870 gl_lock_define_initialized(static, ssfmalloc_lock)
872 /* ============================== Large blocks ============================== */
874 /* Header of a page sequence for a large block.
875 Lies at an address that is a multiple of PAGESIZE. */
876 struct large_block_header
878 #if PAGE_RESERVED_HEADER_SIZE > 0
879 void * reserved[PAGE_RESERVED_HEADER_SIZE / sizeof (void *)];
880 #endif
881 unsigned char page_type; /* large_page_type */
884 /* Information about a large block.
885 Ends at an address that is a multiple of ALIGNMENT. */
886 struct large_block_caption
888 size_t pages_size; /* A multiple of PAGESIZE. */
891 /* Size of large block page header, gap, and caption. */
892 #define LARGE_BLOCK_OFFSET \
893 ((sizeof (struct large_block_header) + sizeof (struct large_block_caption) \
894 + ALIGNMENT - 1) & -ALIGNMENT)
896 /* =========================== Exported functions =========================== */
898 /* Allocates a block of memory, aligned to ALIGNMENT bytes.
899 Returns 0 upon failure. */
900 static uintptr_t
901 allocate_block (size_t size)
903 uintptr_t block;
905 if (unlikely (size > MEDIUM_BLOCKS_PAGE_CAPACITY))
907 /* Allocate a large block. */
908 size_t pages_size = (size + LARGE_BLOCK_OFFSET + PAGESIZE - 1) & -PAGESIZE;
909 uintptr_t pages = ALLOC_PAGES (pages_size);
910 if (unlikely (pages == 0))
911 /* Failed. */
912 return 0;
913 if ((pages & (PAGESIZE - 1)) != 0)
914 /* ALLOC_PAGES's result is not aligned as expected. */
915 abort ();
916 ((struct any_page_header *) pages)->page_type = large_page_type;
917 block = pages + LARGE_BLOCK_OFFSET;
918 ((struct large_block_caption *) block)[-1].pages_size = pages_size;
920 else
922 bool mt = gl_multithreaded ();
923 if (mt) gl_lock_lock (ssfmalloc_lock);
924 struct page_pool *pool =
925 (size <= SMALL_BLOCK_MAX_SIZE ? &small_block_pages : &medium_block_pages);
926 block = allocate_block_from_pool (size, pool);
927 if (mt) gl_lock_unlock (ssfmalloc_lock);
928 #if defined __CHERI_PURE_CAPABILITY__
929 if (block != 0)
931 size_t offset = block & (PAGESIZE - 1);
932 block = (uintptr_t) cheri_bounds_set ((void *) (block - offset),
933 offset + size)
934 + offset;
936 #endif
938 return block;
941 /* Frees a block of memory, returned by allocate_block. */
942 static void
943 free_block (uintptr_t block)
945 if (block == 0 || (block % ALIGNMENT) != 0)
946 /* Invalid argument. */
947 abort ();
948 uintptr_t pages = block & -PAGESIZE;
949 unsigned char type = ((struct any_page_header *) pages)->page_type;
950 if (unlikely (type == large_page_type))
952 if (block != pages + LARGE_BLOCK_OFFSET)
953 /* Invalid argument. */
954 abort ();
955 size_t pages_size = ((struct large_block_caption *) block)[-1].pages_size;
956 if ((pages_size & (PAGESIZE - 1)) != 0)
957 /* Invalid memory state: pages_size not as expected. */
958 abort ();
959 FREE_PAGES (pages, pages_size);
961 else
963 bool mt = gl_multithreaded ();
964 if (mt) gl_lock_lock (ssfmalloc_lock);
965 struct page_pool *pool;
966 if (type == small_page_type)
967 pool = &small_block_pages;
968 else if (type == medium_page_type)
969 pool = &medium_block_pages;
970 else
971 /* Invalid memory state: type not as expected. */
972 abort ();
973 free_block_from_pool (block, pages, pool);
974 if (mt) gl_lock_unlock (ssfmalloc_lock);