timespec_get: New module.
[gnulib.git] / lib / ssfmalloc.h
blobd5e0a1eb9f0d43b20c108420040a5372b7319fba
1 /* Simple and straight-forward malloc implementation (front end).
3 Copyright (C) 2020-2021 Free Software Foundation, Inc.
5 This program is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <https://www.gnu.org/licenses/>. */
18 /* Written by Bruno Haible <bruno@clisp.org>, 2020. */
20 /* This file implements an allocator of memory blocks of given size (a
21 "malloc front end"), based on an allocator of memory pages (a "malloc
22 back end").
24 The need for such an allocator arises because a memory block is often
25 50 bytes large or less, whereas an allocator of memory pages provides
26 entire pages (4096 bytes or more).
28 This implementation attempts to be
29 - simple and straight-forward,
30 - respecting locality of reference,
31 - usable for small allocations,
32 - nevertheless of reasonable speed.
34 Simple and straight-forward - means that it contains only a small amount
35 of code (compared to e.g. tcmalloc).
37 Respecting locality of reference - means that searching for a free block
38 will not follow lists of pointers that touch unrelated cache lines in
39 the same page or even unrelated memory pages, because that would cause
40 cache misses or even swapping in of unrelated memory pages.
42 Usable for small allocations - means that it can be used for data types
43 with few instances. It does not, unlike some other malloc implementations,
44 allocate 256 KB of memory up-front. Nor does it allocate memory pages
45 per thread.
47 Reasonable speed is nevertheless guaranteed by
48 - choosing algorithms that lead to little fragmentation,
49 - small caches where they make sense.
52 /* The user of this file needs to define the following macros before including
53 this file:
55 PAGESIZE A variable-like macro of type intptr_t or uintptr_t
56 that evaluates to the memory page size (>= 4096).
58 PAGESIZE_MAX A constant that specifies an upper bound for PAGESIZE.
60 ALLOC_PAGES A function-like macro with the signature
61 uintptr_t ALLOC_PAGES (size_t size)
62 where the argument size is > 0 and a multiple of the
63 PAGESIZE. It returns a multiple of PAGESIZE, or 0
64 upon failure.
66 FREE_PAGES A function-like macro with the signature
67 void FREE_PAGES (uintptr_t pages, size_t size)
68 where pages is a non-zero value returned by
69 ALLOC_PAGES (size).
71 ALIGNMENT A constant that specifies the desired alignment of all
72 the returned memory blocks. Possible values are the
73 powers of 2, from sizeof (void *) to 32.
75 PAGE_RESERVED_HEADER_SIZE A constant, either 0 or a multiple of
76 sizeof (void *), that denotes the size of a reserved
77 header - not to be used by the application - at the
78 beginning of a page sequence returned by ALLOC_PAGES.
81 /* =================== Declarations of exported functions =================== */
83 #include <stdint.h>
85 /* Allocates a block of memory, aligned to ALIGNMENT bytes.
86 Returns 0 upon failure. */
87 static uintptr_t allocate_block (size_t size);
89 /* Frees a block of memory, returned by allocate_block. */
90 static void free_block (uintptr_t block);
92 /* ============================= Implementation ============================= */
94 /* Outline of the implementation decisions (ID):
96 * ID: This implementation considers three types of blocks:
97 - small blocks - these are allocated in "small block" pages.
98 - medium blocks - these are allocated in "medium block" pages.
99 - large blocks - these are allocated individually, with a page or
100 sequence of pages uniquely for this block.
101 * Rationale:
102 - Most memory allocations are small (e.g. <= 32 bytes); this is a lesson
103 learned from xc/programs/Xserver/os/xalloc.c (1997) [Pascal Haible].
104 - Fragmentation is one of the biggest problems, and keeping large
105 blocks and small blocks separate from medium blocks is one way to
106 control it.
108 * ID: If an allocation succeeds in one page, the next allocation (of the
109 same type of block) will try to use the same page.
110 * Rationale: Locality of reference.
112 * ID: Pages of small or medium blocks have their management data structures
113 concentrated at the beginning of the page. No chained lists that force
114 to walk through the page.
115 * Rationale: Locality of reference.
117 * ID: Across pages, the management of the free space is done through data
118 structures outside the pages. No chained lists across pages.
119 * Rationale: Locality of reference.
123 #include <stdlib.h>
124 #include <string.h>
125 #include "flexmember.h"
126 #include "glthread/lock.h"
127 #include "thread-optim.h"
128 #include "gl_oset.h"
129 #include "gl_rbtree_oset.h"
131 /* Help the branch prediction. */
132 #if __GNUC__ >= 3
133 # define likely(cond) __builtin_expect ((cond), 1)
134 # define unlikely(cond) __builtin_expect ((cond), 0)
135 #else
136 # define likely(cond) (cond)
137 # define unlikely(cond) (cond)
138 #endif
140 enum { small_page_type = 1, medium_page_type = 2, large_page_type = 3 };
142 /* Header of a page of small or medium blocks or of a large block.
143 Lies at an address that is a multiple of PAGESIZE. */
144 struct any_page_header
146 #if PAGE_RESERVED_HEADER_SIZE > 0
147 void * reserved[PAGE_RESERVED_HEADER_SIZE / sizeof (void *)];
148 #endif
149 /* small_page_type or medium_page_type or large_page_type */
150 unsigned char page_type;
153 /* ========================= Small and medium blocks ======================== */
155 /* An integer type, capable of holding the values 0 .. PAGESIZE. */
156 #if PAGESIZE_MAX >= 0x10000
157 typedef unsigned int pg_offset_t;
158 #else
159 typedef unsigned short pg_offset_t;
160 #endif
162 /* Tree element that corresponds to a page.
163 These tree elements are allocated via malloc(). */
164 struct page_tree_element
166 uintptr_t page;
167 pg_offset_t free_space;
170 /* Header of a page of small or medium blocks.
171 Lies at an address that is a multiple of PAGESIZE. */
172 struct dissected_page_header
174 struct any_page_header common;
175 /* Amount of free space in this page. Always a multiple of ALIGNMENT. */
176 pg_offset_t free_space;
177 /* The tree element. */
178 struct page_tree_element *tree_element;
181 /* Data structure for managing a pool of pages. */
182 struct page_pool
184 /* Methods. */
185 void (*init_page_pool) (struct page_pool *pool);
186 void (*init_page) (uintptr_t page);
187 uintptr_t (*allocate_block_in_page) (size_t size, uintptr_t page);
188 void (*free_block_in_page) (uintptr_t block, uintptr_t page);
190 /* Maximum free space in a page of this pool. */
191 size_t page_capacity;
193 /* Page that provided the last successful allocation from this pool,
194 or 0. */
195 uintptr_t last_page;
197 /* Ordered set of managed pages, sorted according to free_space, in ascending
198 order. */
199 gl_oset_t /* <struct page_tree_element *> */ managed_pages;
201 /* A queue of pages which have a modified free_space but which have not been
202 updated in the managed_pages tree so far. */
203 #define UPDATE_QUEUE_SIZE 10
204 unsigned int update_queue_count; /* <= UPDATE_QUEUE_SIZE */
205 uintptr_t update_queue[UPDATE_QUEUE_SIZE];
207 /* A page that could be freed.
208 We don't free it immediately, so that on allocation/deallocation
209 pattern like
210 2x allocate, 2x free, 2x allocate, 2x free, 2x allocate, 2x free, ...
211 will not allocate and free a page so frequently. */
212 uintptr_t freeable_page;
215 /* Comparison function for managed_pages. */
216 static int
217 compare_pages_by_free_space (const void *elt1, const void *elt2)
219 struct page_tree_element *element1 = (struct page_tree_element *) elt1;
220 struct page_tree_element *element2 = (struct page_tree_element *) elt2;
221 int cmp = _GL_CMP (element1->free_space, element2->free_space);
222 if (unlikely (cmp == 0))
223 cmp = _GL_CMP (element1->page, element2->page);
224 return cmp;
227 /* Tests whether the free space in a tree element is greater or equal than the
228 given threshold. */
229 static bool
230 page_free_space_is_at_least (const void *elt, const void *threshold)
232 struct page_tree_element *element = (struct page_tree_element *) elt;
234 return element->free_space >= (uintptr_t) threshold;
237 /* Updates the free space of a 'struct page_tree_element *'.
238 Only to be called through gl_oset_update! */
239 static void
240 set_free_space (const void *elt, void *action_data)
242 struct page_tree_element *element = (struct page_tree_element *) elt;
244 element->free_space = (pg_offset_t) (uintptr_t) action_data;
247 /* Executes the pending updates in the managed_pages tree. */
248 static void
249 flush_all_updates (struct page_pool *pool)
251 size_t count = pool->update_queue_count;
252 while (likely (count > 0))
254 --count;
255 uintptr_t page = pool->update_queue[count];
256 struct dissected_page_header *pageptr =
257 (struct dissected_page_header *) page;
258 struct page_tree_element *tree_element = pageptr->tree_element;
259 if (gl_oset_update (pool->managed_pages, tree_element,
260 set_free_space,
261 (void *) (uintptr_t) pageptr->free_space)
262 < 0)
263 /* A collision was found. This contradicts the definition of
264 compare_pages_by_free_space. */
265 abort ();
267 pool->update_queue_count = 0;
270 /* Adds a page to the update queue.
271 This function has to be called when the free_space of the page has
272 changed. */
273 static inline void
274 add_update (uintptr_t page, struct page_pool *pool)
276 size_t count = pool->update_queue_count;
277 size_t i;
278 for (i = 0; i < count; i++)
279 if (pool->update_queue[i] == page)
280 /* It's already in the queue. */
281 return;
283 /* Ensure there is room for adding one more page to the update queue. */
284 if (unlikely (count == UPDATE_QUEUE_SIZE))
285 flush_all_updates (pool);
287 /* Add it to the update queue. */
288 pool->update_queue[pool->update_queue_count++] = page;
291 /* Drops a page from the update queue. */
292 static inline void
293 drop_update (uintptr_t page, struct page_pool *pool)
295 size_t count = pool->update_queue_count;
296 size_t i;
297 for (i = 0; i < count; i++)
298 if (pool->update_queue[i] == page)
300 /* It's in the queue. Remove it. */
301 for (i = i + 1; i < count; i++)
302 pool->update_queue[i - 1] = pool->update_queue[i];
303 pool->update_queue_count--;
304 return;
308 /* ============================== Small blocks ============================== */
310 #include "ssfmalloc-bitmap.h"
312 /* Maximum size of a small block.
313 Must be a power of 2. */
314 #define SMALL_BLOCK_MAX_SIZE (ALIGNMENT < 8 ? 32 * ALIGNMENT : 256)
316 /* Number of rows of ALIGNMENT bytes available in an empty page. */
317 static unsigned int small_block_page_num_bits;
318 /* Offset in the page where the memory blocks start.
319 A multiple of ALIGNMENT. */
320 static unsigned int small_block_page_blocks_start;
321 /* Number of uint32_t words in each of the two bitmaps. */
322 static unsigned int small_block_page_num_bitmap_words;
324 /* Header of a page of small blocks.
325 Lies at an address that is a multiple of PAGESIZE. */
326 struct small_page_header
328 struct dissected_page_header common;
329 /* Two bitmaps, each with small_block_page_num_bitmap_words. In each a bit
330 represents ALIGNMENT bytes.
331 - available_bitmap: bit set means available, bit clear means allocated.
332 - blockend_bitmap: bit set means the an allocated block ends here. */
333 uint32_t bitmap_words[FLEXIBLE_ARRAY_MEMBER];
336 static inline uint32_t *
337 small_block_page_available_bitmap (struct small_page_header *pageptr)
339 return &pageptr->bitmap_words[0];
342 static inline uint32_t *
343 small_block_page_blockend_bitmap (struct small_page_header *pageptr)
345 return &pageptr->bitmap_words[small_block_page_num_bitmap_words];
348 static void
349 init_small_block_page_pool (struct page_pool *pool)
351 /* How many usable rows of ALIGNMENT bytes can we have?
352 Each takes ALIGNMENT bytes + 1/8 byte in each bitmap, so approximately
353 (ALIGNMENT + 1/4) bytes. */
354 unsigned int num_bits = (unsigned int) (4 * PAGESIZE) / (4 * ALIGNMENT + 1);
355 unsigned int num_bitmap_words;
356 unsigned int blocks_start;
357 /* Iterate until it converges. */
358 for (;;)
360 num_bitmap_words = (num_bits + 32 - 1) / 32;
361 blocks_start =
362 (FLEXSIZEOF (struct small_page_header, bitmap_words,
363 2 * num_bitmap_words * sizeof (uint32_t))
364 + ALIGNMENT - 1) & -ALIGNMENT;
365 unsigned int num_bits_r = (unsigned int) (PAGESIZE - blocks_start) / ALIGNMENT;
366 if (num_bits_r >= num_bits)
367 break;
368 num_bits = num_bits_r;
371 small_block_page_num_bits = num_bits;
372 small_block_page_num_bitmap_words = num_bitmap_words;
373 small_block_page_blocks_start = blocks_start;
375 pool->page_capacity = small_block_page_num_bits * ALIGNMENT;
378 static void
379 init_small_block_page (uintptr_t page)
381 struct small_page_header *pageptr = (struct small_page_header *) page;
382 pageptr->common.common.page_type = small_page_type;
384 /* Initialize available_bitmap. */
385 uint32_t *available_bitmap = small_block_page_available_bitmap (pageptr);
386 init_bitmap_all_bits_set (small_block_page_num_bitmap_words,
387 available_bitmap);
388 if ((small_block_page_num_bits % 32) != 0)
389 available_bitmap[small_block_page_num_bitmap_words - 1] =
390 (1U << (small_block_page_num_bits % 32)) - 1;
392 /* Initialize blockend_bitmap. */
393 init_bitmap_all_bits_clear (small_block_page_num_bitmap_words,
394 small_block_page_blockend_bitmap (pageptr));
396 pageptr->common.free_space = small_block_page_num_bits * ALIGNMENT;
399 /* Allocates a block of memory of size <= SMALL_BLOCK_MAX_SIZE,
400 aligned to ALIGNMENT bytes, from the given page.
401 Returns 0 upon failure. */
402 static uintptr_t
403 allocate_small_block_in_page (size_t size, uintptr_t page)
405 struct small_page_header *pageptr = (struct small_page_header *) page;
407 /* glibc compatible. */
408 if (size == 0)
409 size = 1;
411 /* Number of consecutive bits to look for in the bitmap. */
412 size_t c = (size + ALIGNMENT - 1) / ALIGNMENT;
414 /* SMALL_BLOCK_MAX_SIZE has been chosen so that c <= 32. */
415 if (!(c > 0 && c <= 32))
416 abort ();
418 uint32_t *available_bitmap = small_block_page_available_bitmap (pageptr);
419 size_t k = find_first_packet_set (small_block_page_num_bitmap_words,
420 available_bitmap,
422 if (unlikely (k == (size_t)(-1)))
423 /* Failed to find c consecutive available rows of ALIGNMENT bytes each. */
424 return 0;
426 uint32_t *blockend_bitmap = small_block_page_blockend_bitmap (pageptr);
427 size_t j = k / 32;
428 size_t i = k % 32;
429 if (i + c <= 32)
431 available_bitmap[j] &= ~(((2U << (c - 1)) - 1) << i);
432 blockend_bitmap[j] |= (1U << (i + c - 1));
434 else
436 available_bitmap[j] &= ~(-1U << i);
437 available_bitmap[j + 1] &= ~((1U << (i + c - 32)) - 1);
438 blockend_bitmap[j + 1] |= (1U << (i + c - 1 - 32));
441 pageptr->common.free_space -= c * ALIGNMENT;
443 return page + small_block_page_blocks_start + k * ALIGNMENT;
446 static void
447 free_small_block_in_page (uintptr_t block, uintptr_t page)
449 struct small_page_header *pageptr = (struct small_page_header *) page;
451 if (!(block >= page + small_block_page_blocks_start
452 && (block % ALIGNMENT) == 0))
453 /* Invalid argument. */
454 abort ();
456 uint32_t *available_bitmap = small_block_page_available_bitmap (pageptr);
457 uint32_t *blockend_bitmap = small_block_page_blockend_bitmap (pageptr);
459 /* The bit that corresponds to where the block starts. */
460 size_t k = (block - page - small_block_page_blocks_start) / ALIGNMENT;
461 /* The bit that corresponds to where the block ends. */
462 size_t ke = find_first_bit_set (small_block_page_num_bitmap_words,
463 blockend_bitmap,
465 if (/* ke == (size_t)(-1) || */ ke >= k + 32)
466 /* Invalid argument or invalid state. */
467 abort ();
469 /* Number of consecutive bits to manipulate in the bitmap. */
470 size_t c = ke - k + 1;
472 size_t j = k / 32;
473 size_t i = k % 32;
474 if (i + c <= 32)
476 available_bitmap[j] |= (((2U << (c - 1)) - 1) << i);
477 blockend_bitmap[j] &= ~(1U << (i + c - 1));
479 else
481 available_bitmap[j] |= (-1U << i);
482 available_bitmap[j + 1] |= ((1U << (i + c - 32)) - 1);
483 blockend_bitmap[j + 1] &= ~(1U << (i + c - 1 - 32));
486 pageptr->common.free_space += c * ALIGNMENT;
489 /* Management of pages of small blocks. */
490 struct page_pool small_block_pages =
492 init_small_block_page_pool,
493 init_small_block_page,
494 allocate_small_block_in_page,
495 free_small_block_in_page
498 /* ============================== Medium blocks ============================= */
500 /* A range of memory in a page.
501 It covers the address range [page+start, page+end).
502 start <= end. */
503 struct memory_range
505 pg_offset_t start;
506 pg_offset_t end;
509 /* Header of a page of medium blocks.
510 Lies at an address that is a multiple of PAGESIZE. */
511 struct medium_page_header
513 struct dissected_page_header common;
514 /* If n blocks are allocated, there are n+1 gaps before, between, and
515 after them. Keep them in an array, sorted in ascending order. */
516 unsigned int num_gaps; /* > 0 */
517 struct memory_range gaps[FLEXIBLE_ARRAY_MEMBER /* PAGESIZE / SMALL_BLOCK_MAX_SIZE + 1 */];
520 #define MEDIUM_BLOCKS_PAGE_MAX_GAPS \
521 (PAGESIZE / SMALL_BLOCK_MAX_SIZE + 1)
522 #define MEDIUM_BLOCKS_PAGE_FIRST_GAP_START \
523 ((FLEXSIZEOF (struct medium_page_header, gaps, \
524 MEDIUM_BLOCKS_PAGE_MAX_GAPS * sizeof (struct memory_range)) \
525 + ALIGNMENT - 1) & -ALIGNMENT)
526 #define MEDIUM_BLOCKS_PAGE_LAST_GAP_END \
527 PAGESIZE
528 #define MEDIUM_BLOCKS_PAGE_CAPACITY \
529 (MEDIUM_BLOCKS_PAGE_LAST_GAP_END - MEDIUM_BLOCKS_PAGE_FIRST_GAP_START)
531 static void
532 init_medium_block_page_pool (struct page_pool *pool)
534 pool->page_capacity = MEDIUM_BLOCKS_PAGE_CAPACITY;
537 static void
538 init_medium_block_page (uintptr_t page)
540 struct medium_page_header *pageptr = (struct medium_page_header *) page;
541 pageptr->common.common.page_type = medium_page_type;
542 pageptr->num_gaps = 1;
543 pageptr->gaps[0].start = MEDIUM_BLOCKS_PAGE_FIRST_GAP_START;
544 pageptr->gaps[0].end = MEDIUM_BLOCKS_PAGE_LAST_GAP_END;
545 pageptr->common.free_space = MEDIUM_BLOCKS_PAGE_CAPACITY;
548 /* Allocates a block of memory of size > SMALL_BLOCK_MAX_SIZE,
549 aligned to ALIGNMENT bytes, from the given page.
550 Returns 0 upon failure. */
551 static uintptr_t
552 allocate_medium_block_in_page (size_t size, uintptr_t page)
554 struct medium_page_header *pageptr = (struct medium_page_header *) page;
556 /* Walk through the gaps and remember the smallest gap of at least
557 the given size. */
558 size_t best_i = (size_t)(-1);
559 size_t best_length = (size_t)(-1);
560 size_t num_gaps = pageptr->num_gaps;
561 size_t i;
562 for (i = 0; i < num_gaps; i++)
564 size_t length = pageptr->gaps[i].end - pageptr->gaps[i].start;
565 if (length >= size)
567 /* Found a gap of sufficient size. */
568 if (length < best_length)
570 best_i = i;
571 best_length = length;
575 if (unlikely (best_i == (size_t)(-1)))
576 /* Failed to find a gap of sufficient size. */
577 return 0;
579 size_t aligned_size = (size + ALIGNMENT - 1) & -ALIGNMENT;
581 if (pageptr->common.free_space < aligned_size)
582 /* Invalid state: Less free space than expected. */
583 abort ();
585 /* Split the gap, leaving an empty gap and a remaining gap. */
586 for (i = num_gaps - 1; ; i--)
588 pageptr->gaps[i + 1] = pageptr->gaps[i];
589 if (i == best_i)
590 break;
592 size_t result = pageptr->gaps[best_i].start;
593 pageptr->gaps[best_i].end = result;
594 pageptr->gaps[best_i + 1].start = result + aligned_size;
595 pageptr->num_gaps = num_gaps + 1;
596 if (pageptr->num_gaps > PAGESIZE / SMALL_BLOCK_MAX_SIZE + 1)
597 /* Invalid state: More gaps than expected. */
598 abort ();
600 pageptr->common.free_space -= aligned_size;
602 return page + result;
605 static void
606 free_medium_block_in_page (uintptr_t block, uintptr_t page)
608 struct medium_page_header *pageptr = (struct medium_page_header *) page;
609 size_t offset = block - page;
611 /* Search for the gap that ends where this block begins.
612 We can ignore the last gap here, since it ends where the page ends. */
613 struct memory_range *gaps = pageptr->gaps;
614 size_t lo = 0;
615 size_t hi = pageptr->num_gaps - 1;
616 size_t index;
617 while (lo < hi)
619 /* Invariant:
620 for i < lo, gaps[i].end < offset,
621 for i >= hi, gaps[i].end > offset. */
622 size_t mid = (hi + lo) >> 1; /* >= lo, < hi */
623 if (offset > gaps[mid].end)
624 lo = mid + 1;
625 else if (offset < gaps[mid].end)
626 hi = mid;
627 else
629 /* Found it: offset == gaps[mid].end. */
630 index = mid;
631 goto found;
634 /* Invalid argument: block is not the start of a currently allocated
635 block. */
636 abort ();
638 found:
639 /* Here 0 <= index < pageptr->num_gaps - 1.
640 Combine the gaps index and index+1. */
641 pageptr->common.free_space += gaps[index + 1].start - gaps[index].end;
642 if (pageptr->common.free_space < gaps[index + 1].start - gaps[index].end)
643 /* Wrap around. */
644 abort ();
646 gaps[index].end = gaps[index + 1].end;
648 size_t num_gaps = pageptr->num_gaps - 1;
649 size_t i;
650 for (i = index + 1; i < num_gaps; i++)
651 gaps[i] = gaps[i + 1];
652 pageptr->num_gaps = num_gaps;
655 /* Management of pages of medium blocks. */
656 struct page_pool medium_block_pages =
658 init_medium_block_page_pool,
659 init_medium_block_page,
660 allocate_medium_block_in_page,
661 free_medium_block_in_page
664 /* ==================== Pages of small and medium blocks ==================== */
666 /* Allocates a block of memory from the given pool, aligned to ALIGNMENT bytes.
667 Returns 0 upon failure. */
668 static inline uintptr_t
669 allocate_block_from_pool (size_t size, struct page_pool *pool)
671 uintptr_t page;
673 /* Try in the last used page first. */
674 page = pool->last_page;
675 if (likely (page != 0))
677 uintptr_t block = pool->allocate_block_in_page (size, page);
678 if (likely (block != 0))
680 add_update (page, pool);
681 return block;
685 /* Ensure that the pool and its managed_pages is initialized. */
686 if (unlikely (pool->managed_pages == NULL))
688 pool->managed_pages =
689 gl_oset_nx_create_empty (GL_RBTREE_OSET, compare_pages_by_free_space, NULL);
690 if (unlikely (pool->managed_pages == NULL))
691 /* Could not allocate the managed_pages. */
692 return 0;
693 pool->init_page_pool (pool);
696 /* Ensure that managed_pages is up-to-date. */
697 flush_all_updates (pool);
699 /* Try in the other managed_pages. */
701 gl_oset_iterator_t iter =
702 gl_oset_iterator_atleast (pool->managed_pages,
703 page_free_space_is_at_least,
704 (void *) (uintptr_t) size);
705 const void *elt;
706 while (gl_oset_iterator_next (&iter, &elt))
708 struct page_tree_element *element = (struct page_tree_element *) elt;
709 page = element->page;
710 /* No need to try the last used page again. */
711 if (likely (page != pool->last_page))
713 uintptr_t block = pool->allocate_block_in_page (size, page);
714 if (likely (block != 0))
716 gl_oset_iterator_free (&iter);
717 add_update (page, pool);
718 pool->last_page = page;
719 return block;
723 gl_oset_iterator_free (&iter);
726 /* If we have a freeable page ready for reuse, use it. */
727 if (pool->freeable_page != 0)
729 page = pool->freeable_page;
730 pool->init_page (page);
731 struct page_tree_element *element =
732 (struct page_tree_element *) malloc (sizeof (struct page_tree_element));
733 if (unlikely (element == NULL))
735 /* Could not allocate the tree element. */
736 pool->last_page = 0;
737 return 0;
739 element->page = page;
740 element->free_space = ((struct dissected_page_header *) page)->free_space;
741 if (unlikely (gl_oset_nx_add (pool->managed_pages, element) < 0))
743 /* Could not allocate the tree node. */
744 free (element);
745 pool->last_page = 0;
746 return 0;
748 ((struct dissected_page_header *) page)->tree_element = element;
749 pool->freeable_page = 0;
751 uintptr_t block = pool->allocate_block_in_page (size, page);
752 if (block == 0)
753 /* If the size is too large for an empty page, this function should not
754 have been invoked. */
755 abort ();
756 add_update (page, pool);
757 pool->last_page = page;
758 return block;
761 /* Allocate a fresh page. */
762 page = ALLOC_PAGES (PAGESIZE);
763 if (unlikely (page == 0))
765 /* Failed. */
766 pool->last_page = 0;
767 return 0;
769 if ((page & (PAGESIZE - 1)) != 0)
770 /* ALLOC_PAGES's result is not aligned as expected. */
771 abort ();
773 pool->init_page (page);
774 struct page_tree_element *element =
775 (struct page_tree_element *) malloc (sizeof (struct page_tree_element));
776 if (unlikely (element == NULL))
778 /* Could not allocate the tree element. */
779 FREE_PAGES (page, PAGESIZE);
780 pool->last_page = 0;
781 return 0;
783 element->page = page;
784 element->free_space = ((struct dissected_page_header *) page)->free_space;
785 if (unlikely (gl_oset_nx_add (pool->managed_pages, element) < 0))
787 /* Could not allocate the tree node. */
788 free (element);
789 FREE_PAGES (page, PAGESIZE);
790 pool->last_page = 0;
791 return 0;
793 ((struct dissected_page_header *) page)->tree_element = element;
795 uintptr_t block = pool->allocate_block_in_page (size, page);
796 if (block == 0)
797 /* If the size is too large for an empty page, this function should not
798 have been invoked. */
799 abort ();
800 add_update (page, pool);
801 pool->last_page = page;
802 return block;
805 static void
806 free_block_from_pool (uintptr_t block, uintptr_t page, struct page_pool *pool)
808 if (pool->page_capacity == 0)
809 /* Invalid argument: The block is not valid, since the pool has not yet
810 been initialized. */
811 abort ();
813 pool->free_block_in_page (block, page);
815 struct dissected_page_header *pageptr = (struct dissected_page_header *) page;
816 if (likely (pageptr->free_space != pool->page_capacity))
818 /* The page is not entirely free. */
819 add_update (page, pool);
821 else
823 /* The page is now entirely free. */
824 /* Remove its tree element and free it. */
825 struct page_tree_element *element = pageptr->tree_element;
826 if (!gl_oset_remove (pool->managed_pages, element))
827 /* Invalid state: The element is not in the managed_pages. */
828 abort ();
829 free (element);
831 if (pool->last_page == page)
832 pool->last_page = 0;
834 drop_update (page, pool);
836 /* If we would now have two freeable pages, free the old one. */
837 if (pool->freeable_page != 0)
838 FREE_PAGES (pool->freeable_page, PAGESIZE);
840 /* Don't free the page now, but later. */
841 pool->freeable_page = page;
845 /* Lock that protects the management of small and medium blocks from
846 simultaneous access from multiple threads. */
847 gl_lock_define_initialized(static, ssfmalloc_lock)
849 /* ============================== Large blocks ============================== */
851 /* Header of a page sequence for a large block.
852 Lies at an address that is a multiple of PAGESIZE. */
853 struct large_block_header
855 #if PAGE_RESERVED_HEADER_SIZE > 0
856 void * reserved[PAGE_RESERVED_HEADER_SIZE / sizeof (void *)];
857 #endif
858 unsigned char page_type; /* large_page_type */
861 /* Information about a large block.
862 Ends at an address that is a multiple of ALIGNMENT. */
863 struct large_block_caption
865 size_t pages_size; /* A multiple of PAGESIZE. */
868 /* Size of large block page header, gap, and caption. */
869 #define LARGE_BLOCK_OFFSET \
870 ((sizeof (struct large_block_header) + sizeof (struct large_block_caption) \
871 + ALIGNMENT - 1) & -ALIGNMENT)
873 /* =========================== Exported functions =========================== */
875 /* Allocates a block of memory, aligned to ALIGNMENT bytes.
876 Returns 0 upon failure. */
877 static uintptr_t
878 allocate_block (size_t size)
880 uintptr_t block;
882 if (unlikely (size > MEDIUM_BLOCKS_PAGE_CAPACITY))
884 /* Allocate a large block. */
885 size_t pages_size = (size + LARGE_BLOCK_OFFSET + PAGESIZE - 1) & -PAGESIZE;
886 uintptr_t pages = ALLOC_PAGES (pages_size);
887 if (unlikely (pages == 0))
888 /* Failed. */
889 return 0;
890 if ((pages & (PAGESIZE - 1)) != 0)
891 /* ALLOC_PAGES's result is not aligned as expected. */
892 abort ();
893 ((struct any_page_header *) pages)->page_type = large_page_type;
894 block = pages + LARGE_BLOCK_OFFSET;
895 ((struct large_block_caption *) block)[-1].pages_size = pages_size;
897 else
899 bool mt = gl_multithreaded ();
900 if (mt) gl_lock_lock (ssfmalloc_lock);
901 struct page_pool *pool =
902 (size <= SMALL_BLOCK_MAX_SIZE ? &small_block_pages : &medium_block_pages);
903 block = allocate_block_from_pool (size, pool);
904 if (mt) gl_lock_unlock (ssfmalloc_lock);
906 return block;
909 /* Frees a block of memory, returned by allocate_block. */
910 static void
911 free_block (uintptr_t block)
913 if (block == 0 || (block % ALIGNMENT) != 0)
914 /* Invalid argument. */
915 abort ();
916 uintptr_t pages = block & -PAGESIZE;
917 unsigned char type = ((struct any_page_header *) pages)->page_type;
918 if (unlikely (type == large_page_type))
920 if (block != pages + LARGE_BLOCK_OFFSET)
921 /* Invalid argument. */
922 abort ();
923 size_t pages_size = ((struct large_block_caption *) block)[-1].pages_size;
924 if ((pages_size & (PAGESIZE - 1)) != 0)
925 /* Invalid memory state: pages_size not as expected. */
926 abort ();
927 FREE_PAGES (pages, pages_size);
929 else
931 bool mt = gl_multithreaded ();
932 if (mt) gl_lock_lock (ssfmalloc_lock);
933 struct page_pool *pool;
934 if (type == small_page_type)
935 pool = &small_block_pages;
936 else if (type == medium_page_type)
937 pool = &medium_block_pages;
938 else
939 /* Invalid memory state: type not as expected. */
940 abort ();
941 free_block_from_pool (block, pages, pool);
942 if (mt) gl_lock_unlock (ssfmalloc_lock);