1 /* Simple and straight-forward malloc implementation (front end).
3 Copyright (C) 2020-2021 Free Software Foundation, Inc.
5 This program is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <https://www.gnu.org/licenses/>. */
18 /* Written by Bruno Haible <bruno@clisp.org>, 2020. */
20 /* This file implements an allocator of memory blocks of given size (a
21 "malloc front end"), based on an allocator of memory pages (a "malloc
24 The need for such an allocator arises because a memory block is often
25 50 bytes large or less, whereas an allocator of memory pages provides
26 entire pages (4096 bytes or more).
28 This implementation attempts to be
29 - simple and straight-forward,
30 - respecting locality of reference,
31 - usable for small allocations,
32 - nevertheless of reasonable speed.
34 Simple and straight-forward - means that it contains only a small amount
35 of code (compared to e.g. tcmalloc).
37 Respecting locality of reference - means that searching for a free block
38 will not follow lists of pointers that touch unrelated cache lines in
39 the same page or even unrelated memory pages, because that would cause
40 cache misses or even swapping in of unrelated memory pages.
42 Usable for small allocations - means that it can be used for data types
43 with few instances. It does not, unlike some other malloc implementations,
44 allocate 256 KB of memory up-front. Nor does it allocate memory pages
47 Reasonable speed is nevertheless guaranteed by
48 - choosing algorithms that lead to little fragmentation,
49 - small caches where they make sense.
52 /* The user of this file needs to define the following macros before including
55 PAGESIZE A variable-like macro of type intptr_t or uintptr_t
56 that evaluates to the memory page size (>= 4096).
58 PAGESIZE_MAX A constant that specifies an upper bound for PAGESIZE.
60 ALLOC_PAGES A function-like macro with the signature
61 uintptr_t ALLOC_PAGES (size_t size)
62 where the argument size is > 0 and a multiple of the
63 PAGESIZE. It returns a multiple of PAGESIZE, or 0
66 FREE_PAGES A function-like macro with the signature
67 void FREE_PAGES (uintptr_t pages, size_t size)
68 where pages is a non-zero value returned by
71 ALIGNMENT A constant that specifies the desired alignment of all
72 the returned memory blocks. Possible values are the
73 powers of 2, from sizeof (void *) to 32.
75 PAGE_RESERVED_HEADER_SIZE A constant, either 0 or a multiple of
76 sizeof (void *), that denotes the size of a reserved
77 header - not to be used by the application - at the
78 beginning of a page sequence returned by ALLOC_PAGES.
81 /* =================== Declarations of exported functions =================== */
85 /* Allocates a block of memory, aligned to ALIGNMENT bytes.
86 Returns 0 upon failure. */
87 static uintptr_t allocate_block (size_t size
);
89 /* Frees a block of memory, returned by allocate_block. */
90 static void free_block (uintptr_t block
);
92 /* ============================= Implementation ============================= */
94 /* Outline of the implementation decisions (ID):
96 * ID: This implementation considers three types of blocks:
97 - small blocks - these are allocated in "small block" pages.
98 - medium blocks - these are allocated in "medium block" pages.
99 - large blocks - these are allocated individually, with a page or
100 sequence of pages uniquely for this block.
102 - Most memory allocations are small (e.g. <= 32 bytes); this is a lesson
103 learned from xc/programs/Xserver/os/xalloc.c (1997) [Pascal Haible].
104 - Fragmentation is one of the biggest problems, and keeping large
105 blocks and small blocks separate from medium blocks is one way to
108 * ID: If an allocation succeeds in one page, the next allocation (of the
109 same type of block) will try to use the same page.
110 * Rationale: Locality of reference.
112 * ID: Pages of small or medium blocks have their management data structures
113 concentrated at the beginning of the page. No chained lists that force
114 to walk through the page.
115 * Rationale: Locality of reference.
117 * ID: Across pages, the management of the free space is done through data
118 structures outside the pages. No chained lists across pages.
119 * Rationale: Locality of reference.
125 #include "flexmember.h"
126 #include "glthread/lock.h"
127 #include "thread-optim.h"
129 #include "gl_rbtree_oset.h"
131 /* Help the branch prediction. */
133 # define likely(cond) __builtin_expect ((cond), 1)
134 # define unlikely(cond) __builtin_expect ((cond), 0)
136 # define likely(cond) (cond)
137 # define unlikely(cond) (cond)
140 enum { small_page_type
= 1, medium_page_type
= 2, large_page_type
= 3 };
142 /* Header of a page of small or medium blocks or of a large block.
143 Lies at an address that is a multiple of PAGESIZE. */
144 struct any_page_header
146 #if PAGE_RESERVED_HEADER_SIZE > 0
147 void * reserved
[PAGE_RESERVED_HEADER_SIZE
/ sizeof (void *)];
149 /* small_page_type or medium_page_type or large_page_type */
150 unsigned char page_type
;
153 /* ========================= Small and medium blocks ======================== */
155 /* An integer type, capable of holding the values 0 .. PAGESIZE. */
156 #if PAGESIZE_MAX >= 0x10000
157 typedef unsigned int pg_offset_t
;
159 typedef unsigned short pg_offset_t
;
162 /* Tree element that corresponds to a page.
163 These tree elements are allocated via malloc(). */
164 struct page_tree_element
167 pg_offset_t free_space
;
170 /* Header of a page of small or medium blocks.
171 Lies at an address that is a multiple of PAGESIZE. */
172 struct dissected_page_header
174 struct any_page_header common
;
175 /* Amount of free space in this page. Always a multiple of ALIGNMENT. */
176 pg_offset_t free_space
;
177 /* The tree element. */
178 struct page_tree_element
*tree_element
;
181 /* Data structure for managing a pool of pages. */
185 void (*init_page_pool
) (struct page_pool
*pool
);
186 void (*init_page
) (uintptr_t page
);
187 uintptr_t (*allocate_block_in_page
) (size_t size
, uintptr_t page
);
188 void (*free_block_in_page
) (uintptr_t block
, uintptr_t page
);
190 /* Maximum free space in a page of this pool. */
191 size_t page_capacity
;
193 /* Page that provided the last successful allocation from this pool,
197 /* Ordered set of managed pages, sorted according to free_space, in ascending
199 gl_oset_t
/* <struct page_tree_element *> */ managed_pages
;
201 /* A queue of pages which have a modified free_space but which have not been
202 updated in the managed_pages tree so far. */
203 #define UPDATE_QUEUE_SIZE 10
204 unsigned int update_queue_count
; /* <= UPDATE_QUEUE_SIZE */
205 uintptr_t update_queue
[UPDATE_QUEUE_SIZE
];
207 /* A page that could be freed.
208 We don't free it immediately, so that on allocation/deallocation
210 2x allocate, 2x free, 2x allocate, 2x free, 2x allocate, 2x free, ...
211 will not allocate and free a page so frequently. */
212 uintptr_t freeable_page
;
215 /* Comparison function for managed_pages. */
217 compare_pages_by_free_space (const void *elt1
, const void *elt2
)
219 struct page_tree_element
*element1
= (struct page_tree_element
*) elt1
;
220 struct page_tree_element
*element2
= (struct page_tree_element
*) elt2
;
221 int cmp
= _GL_CMP (element1
->free_space
, element2
->free_space
);
222 if (unlikely (cmp
== 0))
223 cmp
= _GL_CMP (element1
->page
, element2
->page
);
227 /* Tests whether the free space in a tree element is greater or equal than the
230 page_free_space_is_at_least (const void *elt
, const void *threshold
)
232 struct page_tree_element
*element
= (struct page_tree_element
*) elt
;
234 return element
->free_space
>= (uintptr_t) threshold
;
237 /* Updates the free space of a 'struct page_tree_element *'.
238 Only to be called through gl_oset_update! */
240 set_free_space (const void *elt
, void *action_data
)
242 struct page_tree_element
*element
= (struct page_tree_element
*) elt
;
244 element
->free_space
= (pg_offset_t
) (uintptr_t) action_data
;
247 /* Executes the pending updates in the managed_pages tree. */
249 flush_all_updates (struct page_pool
*pool
)
251 size_t count
= pool
->update_queue_count
;
252 while (likely (count
> 0))
255 uintptr_t page
= pool
->update_queue
[count
];
256 struct dissected_page_header
*pageptr
=
257 (struct dissected_page_header
*) page
;
258 struct page_tree_element
*tree_element
= pageptr
->tree_element
;
259 if (gl_oset_update (pool
->managed_pages
, tree_element
,
261 (void *) (uintptr_t) pageptr
->free_space
)
263 /* A collision was found. This contradicts the definition of
264 compare_pages_by_free_space. */
267 pool
->update_queue_count
= 0;
270 /* Adds a page to the update queue.
271 This function has to be called when the free_space of the page has
274 add_update (uintptr_t page
, struct page_pool
*pool
)
276 size_t count
= pool
->update_queue_count
;
278 for (i
= 0; i
< count
; i
++)
279 if (pool
->update_queue
[i
] == page
)
280 /* It's already in the queue. */
283 /* Ensure there is room for adding one more page to the update queue. */
284 if (unlikely (count
== UPDATE_QUEUE_SIZE
))
285 flush_all_updates (pool
);
287 /* Add it to the update queue. */
288 pool
->update_queue
[pool
->update_queue_count
++] = page
;
291 /* Drops a page from the update queue. */
293 drop_update (uintptr_t page
, struct page_pool
*pool
)
295 size_t count
= pool
->update_queue_count
;
297 for (i
= 0; i
< count
; i
++)
298 if (pool
->update_queue
[i
] == page
)
300 /* It's in the queue. Remove it. */
301 for (i
= i
+ 1; i
< count
; i
++)
302 pool
->update_queue
[i
- 1] = pool
->update_queue
[i
];
303 pool
->update_queue_count
--;
308 /* ============================== Small blocks ============================== */
310 #include "ssfmalloc-bitmap.h"
312 /* Maximum size of a small block.
313 Must be a power of 2. */
314 #define SMALL_BLOCK_MAX_SIZE (ALIGNMENT < 8 ? 32 * ALIGNMENT : 256)
316 /* Number of rows of ALIGNMENT bytes available in an empty page. */
317 static unsigned int small_block_page_num_bits
;
318 /* Offset in the page where the memory blocks start.
319 A multiple of ALIGNMENT. */
320 static unsigned int small_block_page_blocks_start
;
321 /* Number of uint32_t words in each of the two bitmaps. */
322 static unsigned int small_block_page_num_bitmap_words
;
324 /* Header of a page of small blocks.
325 Lies at an address that is a multiple of PAGESIZE. */
326 struct small_page_header
328 struct dissected_page_header common
;
329 /* Two bitmaps, each with small_block_page_num_bitmap_words. In each a bit
330 represents ALIGNMENT bytes.
331 - available_bitmap: bit set means available, bit clear means allocated.
332 - blockend_bitmap: bit set means the an allocated block ends here. */
333 uint32_t bitmap_words
[FLEXIBLE_ARRAY_MEMBER
];
336 static inline uint32_t *
337 small_block_page_available_bitmap (struct small_page_header
*pageptr
)
339 return &pageptr
->bitmap_words
[0];
342 static inline uint32_t *
343 small_block_page_blockend_bitmap (struct small_page_header
*pageptr
)
345 return &pageptr
->bitmap_words
[small_block_page_num_bitmap_words
];
349 init_small_block_page_pool (struct page_pool
*pool
)
351 /* How many usable rows of ALIGNMENT bytes can we have?
352 Each takes ALIGNMENT bytes + 1/8 byte in each bitmap, so approximately
353 (ALIGNMENT + 1/4) bytes. */
354 unsigned int num_bits
= (unsigned int) (4 * PAGESIZE
) / (4 * ALIGNMENT
+ 1);
355 unsigned int num_bitmap_words
;
356 unsigned int blocks_start
;
357 /* Iterate until it converges. */
360 num_bitmap_words
= (num_bits
+ 32 - 1) / 32;
362 (FLEXSIZEOF (struct small_page_header
, bitmap_words
,
363 2 * num_bitmap_words
* sizeof (uint32_t))
364 + ALIGNMENT
- 1) & -ALIGNMENT
;
365 unsigned int num_bits_r
= (unsigned int) (PAGESIZE
- blocks_start
) / ALIGNMENT
;
366 if (num_bits_r
>= num_bits
)
368 num_bits
= num_bits_r
;
371 small_block_page_num_bits
= num_bits
;
372 small_block_page_num_bitmap_words
= num_bitmap_words
;
373 small_block_page_blocks_start
= blocks_start
;
375 pool
->page_capacity
= small_block_page_num_bits
* ALIGNMENT
;
379 init_small_block_page (uintptr_t page
)
381 struct small_page_header
*pageptr
= (struct small_page_header
*) page
;
382 pageptr
->common
.common
.page_type
= small_page_type
;
384 /* Initialize available_bitmap. */
385 uint32_t *available_bitmap
= small_block_page_available_bitmap (pageptr
);
386 init_bitmap_all_bits_set (small_block_page_num_bitmap_words
,
388 if ((small_block_page_num_bits
% 32) != 0)
389 available_bitmap
[small_block_page_num_bitmap_words
- 1] =
390 (1U << (small_block_page_num_bits
% 32)) - 1;
392 /* Initialize blockend_bitmap. */
393 init_bitmap_all_bits_clear (small_block_page_num_bitmap_words
,
394 small_block_page_blockend_bitmap (pageptr
));
396 pageptr
->common
.free_space
= small_block_page_num_bits
* ALIGNMENT
;
399 /* Allocates a block of memory of size <= SMALL_BLOCK_MAX_SIZE,
400 aligned to ALIGNMENT bytes, from the given page.
401 Returns 0 upon failure. */
403 allocate_small_block_in_page (size_t size
, uintptr_t page
)
405 struct small_page_header
*pageptr
= (struct small_page_header
*) page
;
407 /* glibc compatible. */
411 /* Number of consecutive bits to look for in the bitmap. */
412 size_t c
= (size
+ ALIGNMENT
- 1) / ALIGNMENT
;
414 /* SMALL_BLOCK_MAX_SIZE has been chosen so that c <= 32. */
415 if (!(c
> 0 && c
<= 32))
418 uint32_t *available_bitmap
= small_block_page_available_bitmap (pageptr
);
419 size_t k
= find_first_packet_set (small_block_page_num_bitmap_words
,
422 if (unlikely (k
== (size_t)(-1)))
423 /* Failed to find c consecutive available rows of ALIGNMENT bytes each. */
426 uint32_t *blockend_bitmap
= small_block_page_blockend_bitmap (pageptr
);
431 available_bitmap
[j
] &= ~(((2U << (c
- 1)) - 1) << i
);
432 blockend_bitmap
[j
] |= (1U << (i
+ c
- 1));
436 available_bitmap
[j
] &= ~(-1U << i
);
437 available_bitmap
[j
+ 1] &= ~((1U << (i
+ c
- 32)) - 1);
438 blockend_bitmap
[j
+ 1] |= (1U << (i
+ c
- 1 - 32));
441 pageptr
->common
.free_space
-= c
* ALIGNMENT
;
443 return page
+ small_block_page_blocks_start
+ k
* ALIGNMENT
;
447 free_small_block_in_page (uintptr_t block
, uintptr_t page
)
449 struct small_page_header
*pageptr
= (struct small_page_header
*) page
;
451 if (!(block
>= page
+ small_block_page_blocks_start
452 && (block
% ALIGNMENT
) == 0))
453 /* Invalid argument. */
456 uint32_t *available_bitmap
= small_block_page_available_bitmap (pageptr
);
457 uint32_t *blockend_bitmap
= small_block_page_blockend_bitmap (pageptr
);
459 /* The bit that corresponds to where the block starts. */
460 size_t k
= (block
- page
- small_block_page_blocks_start
) / ALIGNMENT
;
461 /* The bit that corresponds to where the block ends. */
462 size_t ke
= find_first_bit_set (small_block_page_num_bitmap_words
,
465 if (/* ke == (size_t)(-1) || */ ke
>= k
+ 32)
466 /* Invalid argument or invalid state. */
469 /* Number of consecutive bits to manipulate in the bitmap. */
470 size_t c
= ke
- k
+ 1;
476 available_bitmap
[j
] |= (((2U << (c
- 1)) - 1) << i
);
477 blockend_bitmap
[j
] &= ~(1U << (i
+ c
- 1));
481 available_bitmap
[j
] |= (-1U << i
);
482 available_bitmap
[j
+ 1] |= ((1U << (i
+ c
- 32)) - 1);
483 blockend_bitmap
[j
+ 1] &= ~(1U << (i
+ c
- 1 - 32));
486 pageptr
->common
.free_space
+= c
* ALIGNMENT
;
489 /* Management of pages of small blocks. */
490 struct page_pool small_block_pages
=
492 init_small_block_page_pool
,
493 init_small_block_page
,
494 allocate_small_block_in_page
,
495 free_small_block_in_page
498 /* ============================== Medium blocks ============================= */
500 /* A range of memory in a page.
501 It covers the address range [page+start, page+end).
509 /* Header of a page of medium blocks.
510 Lies at an address that is a multiple of PAGESIZE. */
511 struct medium_page_header
513 struct dissected_page_header common
;
514 /* If n blocks are allocated, there are n+1 gaps before, between, and
515 after them. Keep them in an array, sorted in ascending order. */
516 unsigned int num_gaps
; /* > 0 */
517 struct memory_range gaps
[FLEXIBLE_ARRAY_MEMBER
/* PAGESIZE / SMALL_BLOCK_MAX_SIZE + 1 */];
520 #define MEDIUM_BLOCKS_PAGE_MAX_GAPS \
521 (PAGESIZE / SMALL_BLOCK_MAX_SIZE + 1)
522 #define MEDIUM_BLOCKS_PAGE_FIRST_GAP_START \
523 ((FLEXSIZEOF (struct medium_page_header, gaps, \
524 MEDIUM_BLOCKS_PAGE_MAX_GAPS * sizeof (struct memory_range)) \
525 + ALIGNMENT - 1) & -ALIGNMENT)
526 #define MEDIUM_BLOCKS_PAGE_LAST_GAP_END \
528 #define MEDIUM_BLOCKS_PAGE_CAPACITY \
529 (MEDIUM_BLOCKS_PAGE_LAST_GAP_END - MEDIUM_BLOCKS_PAGE_FIRST_GAP_START)
532 init_medium_block_page_pool (struct page_pool
*pool
)
534 pool
->page_capacity
= MEDIUM_BLOCKS_PAGE_CAPACITY
;
538 init_medium_block_page (uintptr_t page
)
540 struct medium_page_header
*pageptr
= (struct medium_page_header
*) page
;
541 pageptr
->common
.common
.page_type
= medium_page_type
;
542 pageptr
->num_gaps
= 1;
543 pageptr
->gaps
[0].start
= MEDIUM_BLOCKS_PAGE_FIRST_GAP_START
;
544 pageptr
->gaps
[0].end
= MEDIUM_BLOCKS_PAGE_LAST_GAP_END
;
545 pageptr
->common
.free_space
= MEDIUM_BLOCKS_PAGE_CAPACITY
;
548 /* Allocates a block of memory of size > SMALL_BLOCK_MAX_SIZE,
549 aligned to ALIGNMENT bytes, from the given page.
550 Returns 0 upon failure. */
552 allocate_medium_block_in_page (size_t size
, uintptr_t page
)
554 struct medium_page_header
*pageptr
= (struct medium_page_header
*) page
;
556 /* Walk through the gaps and remember the smallest gap of at least
558 size_t best_i
= (size_t)(-1);
559 size_t best_length
= (size_t)(-1);
560 size_t num_gaps
= pageptr
->num_gaps
;
562 for (i
= 0; i
< num_gaps
; i
++)
564 size_t length
= pageptr
->gaps
[i
].end
- pageptr
->gaps
[i
].start
;
567 /* Found a gap of sufficient size. */
568 if (length
< best_length
)
571 best_length
= length
;
575 if (unlikely (best_i
== (size_t)(-1)))
576 /* Failed to find a gap of sufficient size. */
579 size_t aligned_size
= (size
+ ALIGNMENT
- 1) & -ALIGNMENT
;
581 if (pageptr
->common
.free_space
< aligned_size
)
582 /* Invalid state: Less free space than expected. */
585 /* Split the gap, leaving an empty gap and a remaining gap. */
586 for (i
= num_gaps
- 1; ; i
--)
588 pageptr
->gaps
[i
+ 1] = pageptr
->gaps
[i
];
592 size_t result
= pageptr
->gaps
[best_i
].start
;
593 pageptr
->gaps
[best_i
].end
= result
;
594 pageptr
->gaps
[best_i
+ 1].start
= result
+ aligned_size
;
595 pageptr
->num_gaps
= num_gaps
+ 1;
596 if (pageptr
->num_gaps
> PAGESIZE
/ SMALL_BLOCK_MAX_SIZE
+ 1)
597 /* Invalid state: More gaps than expected. */
600 pageptr
->common
.free_space
-= aligned_size
;
602 return page
+ result
;
606 free_medium_block_in_page (uintptr_t block
, uintptr_t page
)
608 struct medium_page_header
*pageptr
= (struct medium_page_header
*) page
;
609 size_t offset
= block
- page
;
611 /* Search for the gap that ends where this block begins.
612 We can ignore the last gap here, since it ends where the page ends. */
613 struct memory_range
*gaps
= pageptr
->gaps
;
615 size_t hi
= pageptr
->num_gaps
- 1;
620 for i < lo, gaps[i].end < offset,
621 for i >= hi, gaps[i].end > offset. */
622 size_t mid
= (hi
+ lo
) >> 1; /* >= lo, < hi */
623 if (offset
> gaps
[mid
].end
)
625 else if (offset
< gaps
[mid
].end
)
629 /* Found it: offset == gaps[mid].end. */
634 /* Invalid argument: block is not the start of a currently allocated
639 /* Here 0 <= index < pageptr->num_gaps - 1.
640 Combine the gaps index and index+1. */
641 pageptr
->common
.free_space
+= gaps
[index
+ 1].start
- gaps
[index
].end
;
642 if (pageptr
->common
.free_space
< gaps
[index
+ 1].start
- gaps
[index
].end
)
646 gaps
[index
].end
= gaps
[index
+ 1].end
;
648 size_t num_gaps
= pageptr
->num_gaps
- 1;
650 for (i
= index
+ 1; i
< num_gaps
; i
++)
651 gaps
[i
] = gaps
[i
+ 1];
652 pageptr
->num_gaps
= num_gaps
;
655 /* Management of pages of medium blocks. */
656 struct page_pool medium_block_pages
=
658 init_medium_block_page_pool
,
659 init_medium_block_page
,
660 allocate_medium_block_in_page
,
661 free_medium_block_in_page
664 /* ==================== Pages of small and medium blocks ==================== */
666 /* Allocates a block of memory from the given pool, aligned to ALIGNMENT bytes.
667 Returns 0 upon failure. */
668 static inline uintptr_t
669 allocate_block_from_pool (size_t size
, struct page_pool
*pool
)
673 /* Try in the last used page first. */
674 page
= pool
->last_page
;
675 if (likely (page
!= 0))
677 uintptr_t block
= pool
->allocate_block_in_page (size
, page
);
678 if (likely (block
!= 0))
680 add_update (page
, pool
);
685 /* Ensure that the pool and its managed_pages is initialized. */
686 if (unlikely (pool
->managed_pages
== NULL
))
688 pool
->managed_pages
=
689 gl_oset_nx_create_empty (GL_RBTREE_OSET
, compare_pages_by_free_space
, NULL
);
690 if (unlikely (pool
->managed_pages
== NULL
))
691 /* Could not allocate the managed_pages. */
693 pool
->init_page_pool (pool
);
696 /* Ensure that managed_pages is up-to-date. */
697 flush_all_updates (pool
);
699 /* Try in the other managed_pages. */
701 gl_oset_iterator_t iter
=
702 gl_oset_iterator_atleast (pool
->managed_pages
,
703 page_free_space_is_at_least
,
704 (void *) (uintptr_t) size
);
706 while (gl_oset_iterator_next (&iter
, &elt
))
708 struct page_tree_element
*element
= (struct page_tree_element
*) elt
;
709 page
= element
->page
;
710 /* No need to try the last used page again. */
711 if (likely (page
!= pool
->last_page
))
713 uintptr_t block
= pool
->allocate_block_in_page (size
, page
);
714 if (likely (block
!= 0))
716 gl_oset_iterator_free (&iter
);
717 add_update (page
, pool
);
718 pool
->last_page
= page
;
723 gl_oset_iterator_free (&iter
);
726 /* If we have a freeable page ready for reuse, use it. */
727 if (pool
->freeable_page
!= 0)
729 page
= pool
->freeable_page
;
730 pool
->init_page (page
);
731 struct page_tree_element
*element
=
732 (struct page_tree_element
*) malloc (sizeof (struct page_tree_element
));
733 if (unlikely (element
== NULL
))
735 /* Could not allocate the tree element. */
739 element
->page
= page
;
740 element
->free_space
= ((struct dissected_page_header
*) page
)->free_space
;
741 if (unlikely (gl_oset_nx_add (pool
->managed_pages
, element
) < 0))
743 /* Could not allocate the tree node. */
748 ((struct dissected_page_header
*) page
)->tree_element
= element
;
749 pool
->freeable_page
= 0;
751 uintptr_t block
= pool
->allocate_block_in_page (size
, page
);
753 /* If the size is too large for an empty page, this function should not
754 have been invoked. */
756 add_update (page
, pool
);
757 pool
->last_page
= page
;
761 /* Allocate a fresh page. */
762 page
= ALLOC_PAGES (PAGESIZE
);
763 if (unlikely (page
== 0))
769 if ((page
& (PAGESIZE
- 1)) != 0)
770 /* ALLOC_PAGES's result is not aligned as expected. */
773 pool
->init_page (page
);
774 struct page_tree_element
*element
=
775 (struct page_tree_element
*) malloc (sizeof (struct page_tree_element
));
776 if (unlikely (element
== NULL
))
778 /* Could not allocate the tree element. */
779 FREE_PAGES (page
, PAGESIZE
);
783 element
->page
= page
;
784 element
->free_space
= ((struct dissected_page_header
*) page
)->free_space
;
785 if (unlikely (gl_oset_nx_add (pool
->managed_pages
, element
) < 0))
787 /* Could not allocate the tree node. */
789 FREE_PAGES (page
, PAGESIZE
);
793 ((struct dissected_page_header
*) page
)->tree_element
= element
;
795 uintptr_t block
= pool
->allocate_block_in_page (size
, page
);
797 /* If the size is too large for an empty page, this function should not
798 have been invoked. */
800 add_update (page
, pool
);
801 pool
->last_page
= page
;
806 free_block_from_pool (uintptr_t block
, uintptr_t page
, struct page_pool
*pool
)
808 if (pool
->page_capacity
== 0)
809 /* Invalid argument: The block is not valid, since the pool has not yet
813 pool
->free_block_in_page (block
, page
);
815 struct dissected_page_header
*pageptr
= (struct dissected_page_header
*) page
;
816 if (likely (pageptr
->free_space
!= pool
->page_capacity
))
818 /* The page is not entirely free. */
819 add_update (page
, pool
);
823 /* The page is now entirely free. */
824 /* Remove its tree element and free it. */
825 struct page_tree_element
*element
= pageptr
->tree_element
;
826 if (!gl_oset_remove (pool
->managed_pages
, element
))
827 /* Invalid state: The element is not in the managed_pages. */
831 if (pool
->last_page
== page
)
834 drop_update (page
, pool
);
836 /* If we would now have two freeable pages, free the old one. */
837 if (pool
->freeable_page
!= 0)
838 FREE_PAGES (pool
->freeable_page
, PAGESIZE
);
840 /* Don't free the page now, but later. */
841 pool
->freeable_page
= page
;
845 /* Lock that protects the management of small and medium blocks from
846 simultaneous access from multiple threads. */
847 gl_lock_define_initialized(static, ssfmalloc_lock
)
849 /* ============================== Large blocks ============================== */
851 /* Header of a page sequence for a large block.
852 Lies at an address that is a multiple of PAGESIZE. */
853 struct large_block_header
855 #if PAGE_RESERVED_HEADER_SIZE > 0
856 void * reserved
[PAGE_RESERVED_HEADER_SIZE
/ sizeof (void *)];
858 unsigned char page_type
; /* large_page_type */
861 /* Information about a large block.
862 Ends at an address that is a multiple of ALIGNMENT. */
863 struct large_block_caption
865 size_t pages_size
; /* A multiple of PAGESIZE. */
868 /* Size of large block page header, gap, and caption. */
869 #define LARGE_BLOCK_OFFSET \
870 ((sizeof (struct large_block_header) + sizeof (struct large_block_caption) \
871 + ALIGNMENT - 1) & -ALIGNMENT)
873 /* =========================== Exported functions =========================== */
875 /* Allocates a block of memory, aligned to ALIGNMENT bytes.
876 Returns 0 upon failure. */
878 allocate_block (size_t size
)
882 if (unlikely (size
> MEDIUM_BLOCKS_PAGE_CAPACITY
))
884 /* Allocate a large block. */
885 size_t pages_size
= (size
+ LARGE_BLOCK_OFFSET
+ PAGESIZE
- 1) & -PAGESIZE
;
886 uintptr_t pages
= ALLOC_PAGES (pages_size
);
887 if (unlikely (pages
== 0))
890 if ((pages
& (PAGESIZE
- 1)) != 0)
891 /* ALLOC_PAGES's result is not aligned as expected. */
893 ((struct any_page_header
*) pages
)->page_type
= large_page_type
;
894 block
= pages
+ LARGE_BLOCK_OFFSET
;
895 ((struct large_block_caption
*) block
)[-1].pages_size
= pages_size
;
899 bool mt
= gl_multithreaded ();
900 if (mt
) gl_lock_lock (ssfmalloc_lock
);
901 struct page_pool
*pool
=
902 (size
<= SMALL_BLOCK_MAX_SIZE
? &small_block_pages
: &medium_block_pages
);
903 block
= allocate_block_from_pool (size
, pool
);
904 if (mt
) gl_lock_unlock (ssfmalloc_lock
);
909 /* Frees a block of memory, returned by allocate_block. */
911 free_block (uintptr_t block
)
913 if (block
== 0 || (block
% ALIGNMENT
) != 0)
914 /* Invalid argument. */
916 uintptr_t pages
= block
& -PAGESIZE
;
917 unsigned char type
= ((struct any_page_header
*) pages
)->page_type
;
918 if (unlikely (type
== large_page_type
))
920 if (block
!= pages
+ LARGE_BLOCK_OFFSET
)
921 /* Invalid argument. */
923 size_t pages_size
= ((struct large_block_caption
*) block
)[-1].pages_size
;
924 if ((pages_size
& (PAGESIZE
- 1)) != 0)
925 /* Invalid memory state: pages_size not as expected. */
927 FREE_PAGES (pages
, pages_size
);
931 bool mt
= gl_multithreaded ();
932 if (mt
) gl_lock_lock (ssfmalloc_lock
);
933 struct page_pool
*pool
;
934 if (type
== small_page_type
)
935 pool
= &small_block_pages
;
936 else if (type
== medium_page_type
)
937 pool
= &medium_block_pages
;
939 /* Invalid memory state: type not as expected. */
941 free_block_from_pool (block
, pages
, pool
);
942 if (mt
) gl_lock_unlock (ssfmalloc_lock
);