misc: Reformat Makefile.
[glibc.git] / malloc / malloc.c
blob5d8b61d66caa374c649047d4114746585a3fe501
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 1996-2023 Free Software Foundation, Inc.
3 Copyright The GNU Toolchain Authors.
4 This file is part of the GNU C Library.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <https://www.gnu.org/licenses/>. */
21 This is a version (aka ptmalloc2) of malloc/free/realloc written by
22 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
24 There have been substantial changes made after the integration into
25 glibc in all parts of the code. Do not look for much commonality
26 with the ptmalloc2 version.
28 * Version ptmalloc2-20011215
29 based on:
30 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
32 * Quickstart
34 In order to compile this implementation, a Makefile is provided with
35 the ptmalloc2 distribution, which has pre-defined targets for some
36 popular systems (e.g. "make posix" for Posix threads). All that is
37 typically required with regard to compiler flags is the selection of
38 the thread package via defining one out of USE_PTHREADS, USE_THR or
39 USE_SPROC. Check the thread-m.h file for what effects this has.
40 Many/most systems will additionally require USE_TSD_DATA_HACK to be
41 defined, so this is the default for "make posix".
43 * Why use this malloc?
45 This is not the fastest, most space-conserving, most portable, or
46 most tunable malloc ever written. However it is among the fastest
47 while also being among the most space-conserving, portable and tunable.
48 Consistent balance across these factors results in a good general-purpose
49 allocator for malloc-intensive programs.
51 The main properties of the algorithms are:
52 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
53 with ties normally decided via FIFO (i.e. least recently used).
54 * For small (<= 64 bytes by default) requests, it is a caching
55 allocator, that maintains pools of quickly recycled chunks.
56 * In between, and for combinations of large and small requests, it does
57 the best it can trying to meet both goals at once.
58 * For very large requests (>= 128KB by default), it relies on system
59 memory mapping facilities, if supported.
61 For a longer but slightly out of date high-level description, see
62 http://gee.cs.oswego.edu/dl/html/malloc.html
64 You may already by default be using a C library containing a malloc
65 that is based on some version of this malloc (for example in
66 linux). You might still want to use the one in this file in order to
67 customize settings or to avoid overheads associated with library
68 versions.
70 * Contents, described in more detail in "description of public routines" below.
72 Standard (ANSI/SVID/...) functions:
73 malloc(size_t n);
74 calloc(size_t n_elements, size_t element_size);
75 free(void* p);
76 realloc(void* p, size_t n);
77 memalign(size_t alignment, size_t n);
78 valloc(size_t n);
79 mallinfo()
80 mallopt(int parameter_number, int parameter_value)
82 Additional functions:
83 independent_calloc(size_t n_elements, size_t size, void* chunks[]);
84 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
85 pvalloc(size_t n);
86 malloc_trim(size_t pad);
87 malloc_usable_size(void* p);
88 malloc_stats();
90 * Vital statistics:
92 Supported pointer representation: 4 or 8 bytes
93 Supported size_t representation: 4 or 8 bytes
94 Note that size_t is allowed to be 4 bytes even if pointers are 8.
95 You can adjust this by defining INTERNAL_SIZE_T
97 Alignment: 2 * sizeof(size_t) (default)
98 (i.e., 8 byte alignment with 4byte size_t). This suffices for
99 nearly all current machines and C compilers. However, you can
100 define MALLOC_ALIGNMENT to be wider than this if necessary.
102 Minimum overhead per allocated chunk: 4 or 8 bytes
103 Each malloced chunk has a hidden word of overhead holding size
104 and status information.
106 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
107 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
109 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
110 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
111 needed; 4 (8) for a trailing size field and 8 (16) bytes for
112 free list pointers. Thus, the minimum allocatable size is
113 16/24/32 bytes.
115 Even a request for zero bytes (i.e., malloc(0)) returns a
116 pointer to something of the minimum allocatable size.
118 The maximum overhead wastage (i.e., number of extra bytes
119 allocated than were requested in malloc) is less than or equal
120 to the minimum size, except for requests >= mmap_threshold that
121 are serviced via mmap(), where the worst case wastage is 2 *
122 sizeof(size_t) bytes plus the remainder from a system page (the
123 minimal mmap unit); typically 4096 or 8192 bytes.
125 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
126 8-byte size_t: 2^64 minus about two pages
128 It is assumed that (possibly signed) size_t values suffice to
129 represent chunk sizes. `Possibly signed' is due to the fact
130 that `size_t' may be defined on a system as either a signed or
131 an unsigned type. The ISO C standard says that it must be
132 unsigned, but a few systems are known not to adhere to this.
133 Additionally, even when size_t is unsigned, sbrk (which is by
134 default used to obtain memory from system) accepts signed
135 arguments, and may not be able to handle size_t-wide arguments
136 with negative sign bit. Generally, values that would
137 appear as negative after accounting for overhead and alignment
138 are supported only via mmap(), which does not have this
139 limitation.
141 Requests for sizes outside the allowed range will perform an optional
142 failure action and then return null. (Requests may also
143 also fail because a system is out of memory.)
145 Thread-safety: thread-safe
147 Compliance: I believe it is compliant with the 1997 Single Unix Specification
148 Also SVID/XPG, ANSI C, and probably others as well.
150 * Synopsis of compile-time options:
152 People have reported using previous versions of this malloc on all
153 versions of Unix, sometimes by tweaking some of the defines
154 below. It has been tested most extensively on Solaris and Linux.
155 People also report using it in stand-alone embedded systems.
157 The implementation is in straight, hand-tuned ANSI C. It is not
158 at all modular. (Sorry!) It uses a lot of macros. To be at all
159 usable, this code should be compiled using an optimizing compiler
160 (for example gcc -O3) that can simplify expressions and control
161 paths. (FAQ: some macros import variables as arguments rather than
162 declare locals because people reported that some debuggers
163 otherwise get confused.)
165 OPTION DEFAULT VALUE
167 Compilation Environment options:
169 HAVE_MREMAP 0
171 Changing default word sizes:
173 INTERNAL_SIZE_T size_t
175 Configuration and functionality options:
177 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
178 USE_MALLOC_LOCK NOT defined
179 MALLOC_DEBUG NOT defined
180 REALLOC_ZERO_BYTES_FREES 1
181 TRIM_FASTBINS 0
183 Options for customizing MORECORE:
185 MORECORE sbrk
186 MORECORE_FAILURE -1
187 MORECORE_CONTIGUOUS 1
188 MORECORE_CANNOT_TRIM NOT defined
189 MORECORE_CLEARS 1
190 MMAP_AS_MORECORE_SIZE (1024 * 1024)
192 Tuning options that are also dynamically changeable via mallopt:
194 DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
195 DEFAULT_TRIM_THRESHOLD 128 * 1024
196 DEFAULT_TOP_PAD 0
197 DEFAULT_MMAP_THRESHOLD 128 * 1024
198 DEFAULT_MMAP_MAX 65536
200 There are several other #defined constants and macros that you
201 probably don't want to touch unless you are extending or adapting malloc. */
204 void* is the pointer type that malloc should say it returns
207 #ifndef void
208 #define void void
209 #endif /*void*/
211 #include <stddef.h> /* for size_t */
212 #include <stdlib.h> /* for getenv(), abort() */
213 #include <unistd.h> /* for __libc_enable_secure */
215 #include <atomic.h>
216 #include <_itoa.h>
217 #include <bits/wordsize.h>
218 #include <sys/sysinfo.h>
220 #include <ldsodefs.h>
222 #include <unistd.h>
223 #include <stdio.h> /* needed for malloc_stats */
224 #include <errno.h>
225 #include <assert.h>
227 #include <shlib-compat.h>
229 /* For uintptr_t. */
230 #include <stdint.h>
232 /* For va_arg, va_start, va_end. */
233 #include <stdarg.h>
235 /* For MIN, MAX, powerof2. */
236 #include <sys/param.h>
238 /* For ALIGN_UP et. al. */
239 #include <libc-pointer-arith.h>
241 /* For DIAG_PUSH/POP_NEEDS_COMMENT et al. */
242 #include <libc-diag.h>
244 /* For memory tagging. */
245 #include <libc-mtag.h>
247 #include <malloc/malloc-internal.h>
249 /* For SINGLE_THREAD_P. */
250 #include <sysdep-cancel.h>
252 #include <libc-internal.h>
254 /* For tcache double-free check. */
255 #include <random-bits.h>
256 #include <sys/random.h>
257 #include <not-cancel.h>
260 Debugging:
262 Because freed chunks may be overwritten with bookkeeping fields, this
263 malloc will often die when freed memory is overwritten by user
264 programs. This can be very effective (albeit in an annoying way)
265 in helping track down dangling pointers.
267 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
268 enabled that will catch more memory errors. You probably won't be
269 able to make much sense of the actual assertion errors, but they
270 should help you locate incorrectly overwritten memory. The checking
271 is fairly extensive, and will slow down execution
272 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
273 will attempt to check every non-mmapped allocated and free chunk in
274 the course of computing the summmaries. (By nature, mmapped regions
275 cannot be checked very much automatically.)
277 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
278 this code. The assertions in the check routines spell out in more
279 detail the assumptions and invariants underlying the algorithms.
281 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
282 checking that all accesses to malloced memory stay within their
283 bounds. However, there are several add-ons and adaptations of this
284 or other mallocs available that do this.
287 #ifndef MALLOC_DEBUG
288 #define MALLOC_DEBUG 0
289 #endif
291 #if USE_TCACHE
292 /* We want 64 entries. This is an arbitrary limit, which tunables can reduce. */
293 # define TCACHE_MAX_BINS 64
294 # define MAX_TCACHE_SIZE tidx2usize (TCACHE_MAX_BINS-1)
296 /* Only used to pre-fill the tunables. */
297 # define tidx2usize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ)
299 /* When "x" is from chunksize(). */
300 # define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)
301 /* When "x" is a user-provided size. */
302 # define usize2tidx(x) csize2tidx (request2size (x))
304 /* With rounding and alignment, the bins are...
305 idx 0 bytes 0..24 (64-bit) or 0..12 (32-bit)
306 idx 1 bytes 25..40 or 13..20
307 idx 2 bytes 41..56 or 21..28
308 etc. */
310 /* This is another arbitrary limit, which tunables can change. Each
311 tcache bin will hold at most this number of chunks. */
312 # define TCACHE_FILL_COUNT 7
314 /* Maximum chunks in tcache bins for tunables. This value must fit the range
315 of tcache->counts[] entries, else they may overflow. */
316 # define MAX_TCACHE_COUNT UINT16_MAX
317 #endif
319 /* Safe-Linking:
320 Use randomness from ASLR (mmap_base) to protect single-linked lists
321 of Fast-Bins and TCache. That is, mask the "next" pointers of the
322 lists' chunks, and also perform allocation alignment checks on them.
323 This mechanism reduces the risk of pointer hijacking, as was done with
324 Safe-Unlinking in the double-linked lists of Small-Bins.
325 It assumes a minimum page size of 4096 bytes (12 bits). Systems with
326 larger pages provide less entropy, although the pointer mangling
327 still works. */
328 #define PROTECT_PTR(pos, ptr) \
329 ((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr)))
330 #define REVEAL_PTR(ptr) PROTECT_PTR (&ptr, ptr)
333 The REALLOC_ZERO_BYTES_FREES macro controls the behavior of realloc (p, 0)
334 when p is nonnull. If the macro is nonzero, the realloc call returns NULL;
335 otherwise, the call returns what malloc (0) would. In either case,
336 p is freed. Glibc uses a nonzero REALLOC_ZERO_BYTES_FREES, which
337 implements common historical practice.
339 ISO C17 says the realloc call has implementation-defined behavior,
340 and it might not even free p.
343 #ifndef REALLOC_ZERO_BYTES_FREES
344 #define REALLOC_ZERO_BYTES_FREES 1
345 #endif
348 TRIM_FASTBINS controls whether free() of a very small chunk can
349 immediately lead to trimming. Setting to true (1) can reduce memory
350 footprint, but will almost always slow down programs that use a lot
351 of small chunks.
353 Define this only if you are willing to give up some speed to more
354 aggressively reduce system-level memory footprint when releasing
355 memory in programs that use many small chunks. You can get
356 essentially the same effect by setting MXFAST to 0, but this can
357 lead to even greater slowdowns in programs using many small chunks.
358 TRIM_FASTBINS is an in-between compile-time option, that disables
359 only those chunks bordering topmost memory from being placed in
360 fastbins.
363 #ifndef TRIM_FASTBINS
364 #define TRIM_FASTBINS 0
365 #endif
367 /* Definition for getting more memory from the OS. */
368 #include "morecore.c"
370 #define MORECORE (*__glibc_morecore)
371 #define MORECORE_FAILURE 0
373 /* Memory tagging. */
375 /* Some systems support the concept of tagging (sometimes known as
376 coloring) memory locations on a fine grained basis. Each memory
377 location is given a color (normally allocated randomly) and
378 pointers are also colored. When the pointer is dereferenced, the
379 pointer's color is checked against the memory's color and if they
380 differ the access is faulted (sometimes lazily).
382 We use this in glibc by maintaining a single color for the malloc
383 data structures that are interleaved with the user data and then
384 assigning separate colors for each block allocation handed out. In
385 this way simple buffer overruns will be rapidly detected. When
386 memory is freed, the memory is recolored back to the glibc default
387 so that simple use-after-free errors can also be detected.
389 If memory is reallocated the buffer is recolored even if the
390 address remains the same. This has a performance impact, but
391 guarantees that the old pointer cannot mistakenly be reused (code
392 that compares old against new will see a mismatch and will then
393 need to behave as though realloc moved the data to a new location).
395 Internal API for memory tagging support.
397 The aim is to keep the code for memory tagging support as close to
398 the normal APIs in glibc as possible, so that if tagging is not
399 enabled in the library, or is disabled at runtime then standard
400 operations can continue to be used. Support macros are used to do
401 this:
403 void *tag_new_zero_region (void *ptr, size_t size)
405 Allocates a new tag, colors the memory with that tag, zeros the
406 memory and returns a pointer that is correctly colored for that
407 location. The non-tagging version will simply call memset with 0.
409 void *tag_region (void *ptr, size_t size)
411 Color the region of memory pointed to by PTR and size SIZE with
412 the color of PTR. Returns the original pointer.
414 void *tag_new_usable (void *ptr)
416 Allocate a new random color and use it to color the user region of
417 a chunk; this may include data from the subsequent chunk's header
418 if tagging is sufficiently fine grained. Returns PTR suitably
419 recolored for accessing the memory there.
421 void *tag_at (void *ptr)
423 Read the current color of the memory at the address pointed to by
424 PTR (ignoring it's current color) and return PTR recolored to that
425 color. PTR must be valid address in all other respects. When
426 tagging is not enabled, it simply returns the original pointer.
429 #ifdef USE_MTAG
430 static bool mtag_enabled = false;
431 static int mtag_mmap_flags = 0;
432 #else
433 # define mtag_enabled false
434 # define mtag_mmap_flags 0
435 #endif
437 static __always_inline void *
438 tag_region (void *ptr, size_t size)
440 if (__glibc_unlikely (mtag_enabled))
441 return __libc_mtag_tag_region (ptr, size);
442 return ptr;
445 static __always_inline void *
446 tag_new_zero_region (void *ptr, size_t size)
448 if (__glibc_unlikely (mtag_enabled))
449 return __libc_mtag_tag_zero_region (__libc_mtag_new_tag (ptr), size);
450 return memset (ptr, 0, size);
453 /* Defined later. */
454 static void *
455 tag_new_usable (void *ptr);
457 static __always_inline void *
458 tag_at (void *ptr)
460 if (__glibc_unlikely (mtag_enabled))
461 return __libc_mtag_address_get_tag (ptr);
462 return ptr;
465 #include <string.h>
468 MORECORE-related declarations. By default, rely on sbrk
473 MORECORE is the name of the routine to call to obtain more memory
474 from the system. See below for general guidance on writing
475 alternative MORECORE functions, as well as a version for WIN32 and a
476 sample version for pre-OSX macos.
479 #ifndef MORECORE
480 #define MORECORE sbrk
481 #endif
484 MORECORE_FAILURE is the value returned upon failure of MORECORE
485 as well as mmap. Since it cannot be an otherwise valid memory address,
486 and must reflect values of standard sys calls, you probably ought not
487 try to redefine it.
490 #ifndef MORECORE_FAILURE
491 #define MORECORE_FAILURE (-1)
492 #endif
495 If MORECORE_CONTIGUOUS is true, take advantage of fact that
496 consecutive calls to MORECORE with positive arguments always return
497 contiguous increasing addresses. This is true of unix sbrk. Even
498 if not defined, when regions happen to be contiguous, malloc will
499 permit allocations spanning regions obtained from different
500 calls. But defining this when applicable enables some stronger
501 consistency checks and space efficiencies.
504 #ifndef MORECORE_CONTIGUOUS
505 #define MORECORE_CONTIGUOUS 1
506 #endif
509 Define MORECORE_CANNOT_TRIM if your version of MORECORE
510 cannot release space back to the system when given negative
511 arguments. This is generally necessary only if you are using
512 a hand-crafted MORECORE function that cannot handle negative arguments.
515 /* #define MORECORE_CANNOT_TRIM */
517 /* MORECORE_CLEARS (default 1)
518 The degree to which the routine mapped to MORECORE zeroes out
519 memory: never (0), only for newly allocated space (1) or always
520 (2). The distinction between (1) and (2) is necessary because on
521 some systems, if the application first decrements and then
522 increments the break value, the contents of the reallocated space
523 are unspecified.
526 #ifndef MORECORE_CLEARS
527 # define MORECORE_CLEARS 1
528 #endif
532 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
533 sbrk fails, and mmap is used as a backup. The value must be a
534 multiple of page size. This backup strategy generally applies only
535 when systems have "holes" in address space, so sbrk cannot perform
536 contiguous expansion, but there is still space available on system.
537 On systems for which this is known to be useful (i.e. most linux
538 kernels), this occurs only when programs allocate huge amounts of
539 memory. Between this, and the fact that mmap regions tend to be
540 limited, the size should be large, to avoid too many mmap calls and
541 thus avoid running out of kernel resources. */
543 #ifndef MMAP_AS_MORECORE_SIZE
544 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
545 #endif
548 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
549 large blocks.
552 #ifndef HAVE_MREMAP
553 #define HAVE_MREMAP 0
554 #endif
557 This version of malloc supports the standard SVID/XPG mallinfo
558 routine that returns a struct containing usage properties and
559 statistics. It should work on any SVID/XPG compliant system that has
560 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
561 install such a thing yourself, cut out the preliminary declarations
562 as described above and below and save them in a malloc.h file. But
563 there's no compelling reason to bother to do this.)
565 The main declaration needed is the mallinfo struct that is returned
566 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
567 bunch of fields that are not even meaningful in this version of
568 malloc. These fields are are instead filled by mallinfo() with
569 other numbers that might be of interest.
573 /* ---------- description of public routines ------------ */
575 #if IS_IN (libc)
577 malloc(size_t n)
578 Returns a pointer to a newly allocated chunk of at least n bytes, or null
579 if no space is available. Additionally, on failure, errno is
580 set to ENOMEM on ANSI C systems.
582 If n is zero, malloc returns a minimum-sized chunk. (The minimum
583 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
584 systems.) On most systems, size_t is an unsigned type, so calls
585 with negative arguments are interpreted as requests for huge amounts
586 of space, which will often fail. The maximum supported value of n
587 differs across systems, but is in all cases less than the maximum
588 representable value of a size_t.
590 void* __libc_malloc(size_t);
591 libc_hidden_proto (__libc_malloc)
594 free(void* p)
595 Releases the chunk of memory pointed to by p, that had been previously
596 allocated using malloc or a related routine such as realloc.
597 It has no effect if p is null. It can have arbitrary (i.e., bad!)
598 effects if p has already been freed.
600 Unless disabled (using mallopt), freeing very large spaces will
601 when possible, automatically trigger operations that give
602 back unused memory to the system, thus reducing program footprint.
604 void __libc_free(void*);
605 libc_hidden_proto (__libc_free)
608 calloc(size_t n_elements, size_t element_size);
609 Returns a pointer to n_elements * element_size bytes, with all locations
610 set to zero.
612 void* __libc_calloc(size_t, size_t);
615 realloc(void* p, size_t n)
616 Returns a pointer to a chunk of size n that contains the same data
617 as does chunk p up to the minimum of (n, p's size) bytes, or null
618 if no space is available.
620 The returned pointer may or may not be the same as p. The algorithm
621 prefers extending p when possible, otherwise it employs the
622 equivalent of a malloc-copy-free sequence.
624 If p is null, realloc is equivalent to malloc.
626 If space is not available, realloc returns null, errno is set (if on
627 ANSI) and p is NOT freed.
629 if n is for fewer bytes than already held by p, the newly unused
630 space is lopped off and freed if possible. Unless the #define
631 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
632 zero (re)allocates a minimum-sized chunk.
634 Large chunks that were internally obtained via mmap will always be
635 grown using malloc-copy-free sequences unless the system supports
636 MREMAP (currently only linux).
638 The old unix realloc convention of allowing the last-free'd chunk
639 to be used as an argument to realloc is not supported.
641 void* __libc_realloc(void*, size_t);
642 libc_hidden_proto (__libc_realloc)
645 memalign(size_t alignment, size_t n);
646 Returns a pointer to a newly allocated chunk of n bytes, aligned
647 in accord with the alignment argument.
649 The alignment argument should be a power of two. If the argument is
650 not a power of two, the nearest greater power is used.
651 8-byte alignment is guaranteed by normal malloc calls, so don't
652 bother calling memalign with an argument of 8 or less.
654 Overreliance on memalign is a sure way to fragment space.
656 void* __libc_memalign(size_t, size_t);
657 libc_hidden_proto (__libc_memalign)
660 valloc(size_t n);
661 Equivalent to memalign(pagesize, n), where pagesize is the page
662 size of the system. If the pagesize is unknown, 4096 is used.
664 void* __libc_valloc(size_t);
669 mallinfo()
670 Returns (by copy) a struct containing various summary statistics:
672 arena: current total non-mmapped bytes allocated from system
673 ordblks: the number of free chunks
674 smblks: the number of fastbin blocks (i.e., small chunks that
675 have been freed but not use resused or consolidated)
676 hblks: current number of mmapped regions
677 hblkhd: total bytes held in mmapped regions
678 usmblks: always 0
679 fsmblks: total bytes held in fastbin blocks
680 uordblks: current total allocated space (normal or mmapped)
681 fordblks: total free space
682 keepcost: the maximum number of bytes that could ideally be released
683 back to system via malloc_trim. ("ideally" means that
684 it ignores page restrictions etc.)
686 Because these fields are ints, but internal bookkeeping may
687 be kept as longs, the reported values may wrap around zero and
688 thus be inaccurate.
690 struct mallinfo2 __libc_mallinfo2(void);
691 libc_hidden_proto (__libc_mallinfo2)
693 struct mallinfo __libc_mallinfo(void);
697 pvalloc(size_t n);
698 Equivalent to valloc(minimum-page-that-holds(n)), that is,
699 round up n to nearest pagesize.
701 void* __libc_pvalloc(size_t);
704 malloc_trim(size_t pad);
706 If possible, gives memory back to the system (via negative
707 arguments to sbrk) if there is unused memory at the `high' end of
708 the malloc pool. You can call this after freeing large blocks of
709 memory to potentially reduce the system-level memory requirements
710 of a program. However, it cannot guarantee to reduce memory. Under
711 some allocation patterns, some large free blocks of memory will be
712 locked between two used chunks, so they cannot be given back to
713 the system.
715 The `pad' argument to malloc_trim represents the amount of free
716 trailing space to leave untrimmed. If this argument is zero,
717 only the minimum amount of memory to maintain internal data
718 structures will be left (one page or less). Non-zero arguments
719 can be supplied to maintain enough trailing space to service
720 future expected allocations without having to re-obtain memory
721 from the system.
723 Malloc_trim returns 1 if it actually released any memory, else 0.
724 On systems that do not support "negative sbrks", it will always
725 return 0.
727 int __malloc_trim(size_t);
730 malloc_usable_size(void* p);
732 Returns the number of bytes you can actually use in
733 an allocated chunk, which may be more than you requested (although
734 often not) due to alignment and minimum size constraints.
735 You can use this many bytes without worrying about
736 overwriting other allocated objects. This is not a particularly great
737 programming practice. malloc_usable_size can be more useful in
738 debugging and assertions, for example:
740 p = malloc(n);
741 assert(malloc_usable_size(p) >= 256);
744 size_t __malloc_usable_size(void*);
747 malloc_stats();
748 Prints on stderr the amount of space obtained from the system (both
749 via sbrk and mmap), the maximum amount (which may be more than
750 current if malloc_trim and/or munmap got called), and the current
751 number of bytes allocated via malloc (or realloc, etc) but not yet
752 freed. Note that this is the number of bytes allocated, not the
753 number requested. It will be larger than the number requested
754 because of alignment and bookkeeping overhead. Because it includes
755 alignment wastage as being in use, this figure may be greater than
756 zero even when no user-level chunks are allocated.
758 The reported current and maximum system memory can be inaccurate if
759 a program makes other calls to system memory allocation functions
760 (normally sbrk) outside of malloc.
762 malloc_stats prints only the most commonly interesting statistics.
763 More information can be obtained by calling mallinfo.
766 void __malloc_stats(void);
769 posix_memalign(void **memptr, size_t alignment, size_t size);
771 POSIX wrapper like memalign(), checking for validity of size.
773 int __posix_memalign(void **, size_t, size_t);
774 #endif /* IS_IN (libc) */
777 mallopt(int parameter_number, int parameter_value)
778 Sets tunable parameters The format is to provide a
779 (parameter-number, parameter-value) pair. mallopt then sets the
780 corresponding parameter to the argument value if it can (i.e., so
781 long as the value is meaningful), and returns 1 if successful else
782 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
783 normally defined in malloc.h. Only one of these (M_MXFAST) is used
784 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
785 so setting them has no effect. But this malloc also supports four
786 other options in mallopt. See below for details. Briefly, supported
787 parameters are as follows (listed defaults are for "typical"
788 configurations).
790 Symbol param # default allowed param values
791 M_MXFAST 1 64 0-80 (0 disables fastbins)
792 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
793 M_TOP_PAD -2 0 any
794 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
795 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
797 int __libc_mallopt(int, int);
798 #if IS_IN (libc)
799 libc_hidden_proto (__libc_mallopt)
800 #endif
802 /* mallopt tuning options */
805 M_MXFAST is the maximum request size used for "fastbins", special bins
806 that hold returned chunks without consolidating their spaces. This
807 enables future requests for chunks of the same size to be handled
808 very quickly, but can increase fragmentation, and thus increase the
809 overall memory footprint of a program.
811 This malloc manages fastbins very conservatively yet still
812 efficiently, so fragmentation is rarely a problem for values less
813 than or equal to the default. The maximum supported value of MXFAST
814 is 80. You wouldn't want it any higher than this anyway. Fastbins
815 are designed especially for use with many small structs, objects or
816 strings -- the default handles structs/objects/arrays with sizes up
817 to 8 4byte fields, or small strings representing words, tokens,
818 etc. Using fastbins for larger objects normally worsens
819 fragmentation without improving speed.
821 M_MXFAST is set in REQUEST size units. It is internally used in
822 chunksize units, which adds padding and alignment. You can reduce
823 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
824 algorithm to be a closer approximation of fifo-best-fit in all cases,
825 not just for larger requests, but will generally cause it to be
826 slower.
830 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
831 #ifndef M_MXFAST
832 #define M_MXFAST 1
833 #endif
835 #ifndef DEFAULT_MXFAST
836 #define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
837 #endif
841 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
842 to keep before releasing via malloc_trim in free().
844 Automatic trimming is mainly useful in long-lived programs.
845 Because trimming via sbrk can be slow on some systems, and can
846 sometimes be wasteful (in cases where programs immediately
847 afterward allocate more large chunks) the value should be high
848 enough so that your overall system performance would improve by
849 releasing this much memory.
851 The trim threshold and the mmap control parameters (see below)
852 can be traded off with one another. Trimming and mmapping are
853 two different ways of releasing unused memory back to the
854 system. Between these two, it is often possible to keep
855 system-level demands of a long-lived program down to a bare
856 minimum. For example, in one test suite of sessions measuring
857 the XF86 X server on Linux, using a trim threshold of 128K and a
858 mmap threshold of 192K led to near-minimal long term resource
859 consumption.
861 If you are using this malloc in a long-lived program, it should
862 pay to experiment with these values. As a rough guide, you
863 might set to a value close to the average size of a process
864 (program) running on your system. Releasing this much memory
865 would allow such a process to run in memory. Generally, it's
866 worth it to tune for trimming rather tham memory mapping when a
867 program undergoes phases where several large chunks are
868 allocated and released in ways that can reuse each other's
869 storage, perhaps mixed with phases where there are no such
870 chunks at all. And in well-behaved long-lived programs,
871 controlling release of large blocks via trimming versus mapping
872 is usually faster.
874 However, in most programs, these parameters serve mainly as
875 protection against the system-level effects of carrying around
876 massive amounts of unneeded memory. Since frequent calls to
877 sbrk, mmap, and munmap otherwise degrade performance, the default
878 parameters are set to relatively high values that serve only as
879 safeguards.
881 The trim value It must be greater than page size to have any useful
882 effect. To disable trimming completely, you can set to
883 (unsigned long)(-1)
885 Trim settings interact with fastbin (MXFAST) settings: Unless
886 TRIM_FASTBINS is defined, automatic trimming never takes place upon
887 freeing a chunk with size less than or equal to MXFAST. Trimming is
888 instead delayed until subsequent freeing of larger chunks. However,
889 you can still force an attempted trim by calling malloc_trim.
891 Also, trimming is not generally possible in cases where
892 the main arena is obtained via mmap.
894 Note that the trick some people use of mallocing a huge space and
895 then freeing it at program startup, in an attempt to reserve system
896 memory, doesn't have the intended effect under automatic trimming,
897 since that memory will immediately be returned to the system.
900 #define M_TRIM_THRESHOLD -1
902 #ifndef DEFAULT_TRIM_THRESHOLD
903 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
904 #endif
907 M_TOP_PAD is the amount of extra `padding' space to allocate or
908 retain whenever sbrk is called. It is used in two ways internally:
910 * When sbrk is called to extend the top of the arena to satisfy
911 a new malloc request, this much padding is added to the sbrk
912 request.
914 * When malloc_trim is called automatically from free(),
915 it is used as the `pad' argument.
917 In both cases, the actual amount of padding is rounded
918 so that the end of the arena is always a system page boundary.
920 The main reason for using padding is to avoid calling sbrk so
921 often. Having even a small pad greatly reduces the likelihood
922 that nearly every malloc request during program start-up (or
923 after trimming) will invoke sbrk, which needlessly wastes
924 time.
926 Automatic rounding-up to page-size units is normally sufficient
927 to avoid measurable overhead, so the default is 0. However, in
928 systems where sbrk is relatively slow, it can pay to increase
929 this value, at the expense of carrying around more memory than
930 the program needs.
933 #define M_TOP_PAD -2
935 #ifndef DEFAULT_TOP_PAD
936 #define DEFAULT_TOP_PAD (0)
937 #endif
940 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
941 adjusted MMAP_THRESHOLD.
944 #ifndef DEFAULT_MMAP_THRESHOLD_MIN
945 #define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
946 #endif
948 #ifndef DEFAULT_MMAP_THRESHOLD_MAX
949 /* For 32-bit platforms we cannot increase the maximum mmap
950 threshold much because it is also the minimum value for the
951 maximum heap size and its alignment. Going above 512k (i.e., 1M
952 for new heaps) wastes too much address space. */
953 # if __WORDSIZE == 32
954 # define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
955 # else
956 # define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
957 # endif
958 #endif
961 M_MMAP_THRESHOLD is the request size threshold for using mmap()
962 to service a request. Requests of at least this size that cannot
963 be allocated using already-existing space will be serviced via mmap.
964 (If enough normal freed space already exists it is used instead.)
966 Using mmap segregates relatively large chunks of memory so that
967 they can be individually obtained and released from the host
968 system. A request serviced through mmap is never reused by any
969 other request (at least not directly; the system may just so
970 happen to remap successive requests to the same locations).
972 Segregating space in this way has the benefits that:
974 1. Mmapped space can ALWAYS be individually released back
975 to the system, which helps keep the system level memory
976 demands of a long-lived program low.
977 2. Mapped memory can never become `locked' between
978 other chunks, as can happen with normally allocated chunks, which
979 means that even trimming via malloc_trim would not release them.
980 3. On some systems with "holes" in address spaces, mmap can obtain
981 memory that sbrk cannot.
983 However, it has the disadvantages that:
985 1. The space cannot be reclaimed, consolidated, and then
986 used to service later requests, as happens with normal chunks.
987 2. It can lead to more wastage because of mmap page alignment
988 requirements
989 3. It causes malloc performance to be more dependent on host
990 system memory management support routines which may vary in
991 implementation quality and may impose arbitrary
992 limitations. Generally, servicing a request via normal
993 malloc steps is faster than going through a system's mmap.
995 The advantages of mmap nearly always outweigh disadvantages for
996 "large" chunks, but the value of "large" varies across systems. The
997 default is an empirically derived value that works well in most
998 systems.
1001 Update in 2006:
1002 The above was written in 2001. Since then the world has changed a lot.
1003 Memory got bigger. Applications got bigger. The virtual address space
1004 layout in 32 bit linux changed.
1006 In the new situation, brk() and mmap space is shared and there are no
1007 artificial limits on brk size imposed by the kernel. What is more,
1008 applications have started using transient allocations larger than the
1009 128Kb as was imagined in 2001.
1011 The price for mmap is also high now; each time glibc mmaps from the
1012 kernel, the kernel is forced to zero out the memory it gives to the
1013 application. Zeroing memory is expensive and eats a lot of cache and
1014 memory bandwidth. This has nothing to do with the efficiency of the
1015 virtual memory system, by doing mmap the kernel just has no choice but
1016 to zero.
1018 In 2001, the kernel had a maximum size for brk() which was about 800
1019 megabytes on 32 bit x86, at that point brk() would hit the first
1020 mmaped shared libaries and couldn't expand anymore. With current 2.6
1021 kernels, the VA space layout is different and brk() and mmap
1022 both can span the entire heap at will.
1024 Rather than using a static threshold for the brk/mmap tradeoff,
1025 we are now using a simple dynamic one. The goal is still to avoid
1026 fragmentation. The old goals we kept are
1027 1) try to get the long lived large allocations to use mmap()
1028 2) really large allocations should always use mmap()
1029 and we're adding now:
1030 3) transient allocations should use brk() to avoid forcing the kernel
1031 having to zero memory over and over again
1033 The implementation works with a sliding threshold, which is by default
1034 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
1035 out at 128Kb as per the 2001 default.
1037 This allows us to satisfy requirement 1) under the assumption that long
1038 lived allocations are made early in the process' lifespan, before it has
1039 started doing dynamic allocations of the same size (which will
1040 increase the threshold).
1042 The upperbound on the threshold satisfies requirement 2)
1044 The threshold goes up in value when the application frees memory that was
1045 allocated with the mmap allocator. The idea is that once the application
1046 starts freeing memory of a certain size, it's highly probable that this is
1047 a size the application uses for transient allocations. This estimator
1048 is there to satisfy the new third requirement.
1052 #define M_MMAP_THRESHOLD -3
1054 #ifndef DEFAULT_MMAP_THRESHOLD
1055 #define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
1056 #endif
1059 M_MMAP_MAX is the maximum number of requests to simultaneously
1060 service using mmap. This parameter exists because
1061 some systems have a limited number of internal tables for
1062 use by mmap, and using more than a few of them may degrade
1063 performance.
1065 The default is set to a value that serves only as a safeguard.
1066 Setting to 0 disables use of mmap for servicing large requests.
1069 #define M_MMAP_MAX -4
1071 #ifndef DEFAULT_MMAP_MAX
1072 #define DEFAULT_MMAP_MAX (65536)
1073 #endif
1075 #include <malloc.h>
1077 #ifndef RETURN_ADDRESS
1078 #define RETURN_ADDRESS(X_) (NULL)
1079 #endif
1081 /* Forward declarations. */
1082 struct malloc_chunk;
1083 typedef struct malloc_chunk* mchunkptr;
1085 /* Internal routines. */
1087 static void* _int_malloc(mstate, size_t);
1088 static void _int_free(mstate, mchunkptr, int);
1089 static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
1090 INTERNAL_SIZE_T);
1091 static void* _int_memalign(mstate, size_t, size_t);
1092 #if IS_IN (libc)
1093 static void* _mid_memalign(size_t, size_t, void *);
1094 #endif
1096 static void malloc_printerr(const char *str) __attribute__ ((noreturn));
1098 static void munmap_chunk(mchunkptr p);
1099 #if HAVE_MREMAP
1100 static mchunkptr mremap_chunk(mchunkptr p, size_t new_size);
1101 #endif
1103 static size_t musable (void *mem);
1105 /* ------------------ MMAP support ------------------ */
1108 #include <fcntl.h>
1109 #include <sys/mman.h>
1111 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1112 # define MAP_ANONYMOUS MAP_ANON
1113 #endif
1115 #define MMAP(addr, size, prot, flags) \
1116 __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
1120 ----------------------- Chunk representations -----------------------
1125 This struct declaration is misleading (but accurate and necessary).
1126 It declares a "view" into memory allowing access to necessary
1127 fields at known offsets from a given base. See explanation below.
1130 struct malloc_chunk {
1132 INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */
1133 INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */
1135 struct malloc_chunk* fd; /* double links -- used only if free. */
1136 struct malloc_chunk* bk;
1138 /* Only used for large blocks: pointer to next larger size. */
1139 struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
1140 struct malloc_chunk* bk_nextsize;
1145 malloc_chunk details:
1147 (The following includes lightly edited explanations by Colin Plumb.)
1149 Chunks of memory are maintained using a `boundary tag' method as
1150 described in e.g., Knuth or Standish. (See the paper by Paul
1151 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1152 survey of such techniques.) Sizes of free chunks are stored both
1153 in the front of each chunk and at the end. This makes
1154 consolidating fragmented chunks into bigger chunks very fast. The
1155 size fields also hold bits representing whether chunks are free or
1156 in use.
1158 An allocated chunk looks like this:
1161 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1162 | Size of previous chunk, if unallocated (P clear) |
1163 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1164 | Size of chunk, in bytes |A|M|P|
1165 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1166 | User data starts here... .
1168 . (malloc_usable_size() bytes) .
1170 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1171 | (size of chunk, but used for application data) |
1172 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1173 | Size of next chunk, in bytes |A|0|1|
1174 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1176 Where "chunk" is the front of the chunk for the purpose of most of
1177 the malloc code, but "mem" is the pointer that is returned to the
1178 user. "Nextchunk" is the beginning of the next contiguous chunk.
1180 Chunks always begin on even word boundaries, so the mem portion
1181 (which is returned to the user) is also on an even word boundary, and
1182 thus at least double-word aligned.
1184 Free chunks are stored in circular doubly-linked lists, and look like this:
1186 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1187 | Size of previous chunk, if unallocated (P clear) |
1188 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1189 `head:' | Size of chunk, in bytes |A|0|P|
1190 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1191 | Forward pointer to next chunk in list |
1192 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1193 | Back pointer to previous chunk in list |
1194 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1195 | Unused space (may be 0 bytes long) .
1198 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1199 `foot:' | Size of chunk, in bytes |
1200 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1201 | Size of next chunk, in bytes |A|0|0|
1202 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1204 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1205 chunk size (which is always a multiple of two words), is an in-use
1206 bit for the *previous* chunk. If that bit is *clear*, then the
1207 word before the current chunk size contains the previous chunk
1208 size, and can be used to find the front of the previous chunk.
1209 The very first chunk allocated always has this bit set,
1210 preventing access to non-existent (or non-owned) memory. If
1211 prev_inuse is set for any given chunk, then you CANNOT determine
1212 the size of the previous chunk, and might even get a memory
1213 addressing fault when trying to do so.
1215 The A (NON_MAIN_ARENA) bit is cleared for chunks on the initial,
1216 main arena, described by the main_arena variable. When additional
1217 threads are spawned, each thread receives its own arena (up to a
1218 configurable limit, after which arenas are reused for multiple
1219 threads), and the chunks in these arenas have the A bit set. To
1220 find the arena for a chunk on such a non-main arena, heap_for_ptr
1221 performs a bit mask operation and indirection through the ar_ptr
1222 member of the per-heap header heap_info (see arena.c).
1224 Note that the `foot' of the current chunk is actually represented
1225 as the prev_size of the NEXT chunk. This makes it easier to
1226 deal with alignments etc but can be very confusing when trying
1227 to extend or adapt this code.
1229 The three exceptions to all this are:
1231 1. The special chunk `top' doesn't bother using the
1232 trailing size field since there is no next contiguous chunk
1233 that would have to index off it. After initialization, `top'
1234 is forced to always exist. If it would become less than
1235 MINSIZE bytes long, it is replenished.
1237 2. Chunks allocated via mmap, which have the second-lowest-order
1238 bit M (IS_MMAPPED) set in their size fields. Because they are
1239 allocated one-by-one, each must contain its own trailing size
1240 field. If the M bit is set, the other bits are ignored
1241 (because mmapped chunks are neither in an arena, nor adjacent
1242 to a freed chunk). The M bit is also used for chunks which
1243 originally came from a dumped heap via malloc_set_state in
1244 hooks.c.
1246 3. Chunks in fastbins are treated as allocated chunks from the
1247 point of view of the chunk allocator. They are consolidated
1248 with their neighbors only in bulk, in malloc_consolidate.
1252 ---------- Size and alignment checks and conversions ----------
1255 /* Conversion from malloc headers to user pointers, and back. When
1256 using memory tagging the user data and the malloc data structure
1257 headers have distinct tags. Converting fully from one to the other
1258 involves extracting the tag at the other address and creating a
1259 suitable pointer using it. That can be quite expensive. There are
1260 cases when the pointers are not dereferenced (for example only used
1261 for alignment check) so the tags are not relevant, and there are
1262 cases when user data is not tagged distinctly from malloc headers
1263 (user data is untagged because tagging is done late in malloc and
1264 early in free). User memory tagging across internal interfaces:
1266 sysmalloc: Returns untagged memory.
1267 _int_malloc: Returns untagged memory.
1268 _int_free: Takes untagged memory.
1269 _int_memalign: Returns untagged memory.
1270 _int_memalign: Returns untagged memory.
1271 _mid_memalign: Returns tagged memory.
1272 _int_realloc: Takes and returns tagged memory.
1275 /* The chunk header is two SIZE_SZ elements, but this is used widely, so
1276 we define it here for clarity later. */
1277 #define CHUNK_HDR_SZ (2 * SIZE_SZ)
1279 /* Convert a chunk address to a user mem pointer without correcting
1280 the tag. */
1281 #define chunk2mem(p) ((void*)((char*)(p) + CHUNK_HDR_SZ))
1283 /* Convert a chunk address to a user mem pointer and extract the right tag. */
1284 #define chunk2mem_tag(p) ((void*)tag_at ((char*)(p) + CHUNK_HDR_SZ))
1286 /* Convert a user mem pointer to a chunk address and extract the right tag. */
1287 #define mem2chunk(mem) ((mchunkptr)tag_at (((char*)(mem) - CHUNK_HDR_SZ)))
1289 /* The smallest possible chunk */
1290 #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
1292 /* The smallest size we can malloc is an aligned minimal chunk */
1294 #define MINSIZE \
1295 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1297 /* Check if m has acceptable alignment */
1299 #define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1301 #define misaligned_chunk(p) \
1302 ((uintptr_t)(MALLOC_ALIGNMENT == CHUNK_HDR_SZ ? (p) : chunk2mem (p)) \
1303 & MALLOC_ALIGN_MASK)
1305 /* pad request bytes into a usable size -- internal version */
1306 /* Note: This must be a macro that evaluates to a compile time constant
1307 if passed a literal constant. */
1308 #define request2size(req) \
1309 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1310 MINSIZE : \
1311 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1313 /* Check if REQ overflows when padded and aligned and if the resulting
1314 value is less than PTRDIFF_T. Returns the requested size or
1315 MINSIZE in case the value is less than MINSIZE, or 0 if any of the
1316 previous checks fail. */
1317 static inline size_t
1318 checked_request2size (size_t req) __nonnull (1)
1320 if (__glibc_unlikely (req > PTRDIFF_MAX))
1321 return 0;
1323 /* When using tagged memory, we cannot share the end of the user
1324 block with the header for the next chunk, so ensure that we
1325 allocate blocks that are rounded up to the granule size. Take
1326 care not to overflow from close to MAX_SIZE_T to a small
1327 number. Ideally, this would be part of request2size(), but that
1328 must be a macro that produces a compile time constant if passed
1329 a constant literal. */
1330 if (__glibc_unlikely (mtag_enabled))
1332 /* Ensure this is not evaluated if !mtag_enabled, see gcc PR 99551. */
1333 asm ("");
1335 req = (req + (__MTAG_GRANULE_SIZE - 1)) &
1336 ~(size_t)(__MTAG_GRANULE_SIZE - 1);
1339 return request2size (req);
1343 --------------- Physical chunk operations ---------------
1347 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1348 #define PREV_INUSE 0x1
1350 /* extract inuse bit of previous chunk */
1351 #define prev_inuse(p) ((p)->mchunk_size & PREV_INUSE)
1354 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1355 #define IS_MMAPPED 0x2
1357 /* check for mmap()'ed chunk */
1358 #define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)
1361 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1362 from a non-main arena. This is only set immediately before handing
1363 the chunk to the user, if necessary. */
1364 #define NON_MAIN_ARENA 0x4
1366 /* Check for chunk from main arena. */
1367 #define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0)
1369 /* Mark a chunk as not being on the main arena. */
1370 #define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)
1374 Bits to mask off when extracting size
1376 Note: IS_MMAPPED is intentionally not masked off from size field in
1377 macros for which mmapped chunks should never be seen. This should
1378 cause helpful core dumps to occur if it is tried by accident by
1379 people extending or adapting this malloc.
1381 #define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
1383 /* Get size, ignoring use bits */
1384 #define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
1386 /* Like chunksize, but do not mask SIZE_BITS. */
1387 #define chunksize_nomask(p) ((p)->mchunk_size)
1389 /* Ptr to next physical malloc_chunk. */
1390 #define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))
1392 /* Size of the chunk below P. Only valid if !prev_inuse (P). */
1393 #define prev_size(p) ((p)->mchunk_prev_size)
1395 /* Set the size of the chunk below P. Only valid if !prev_inuse (P). */
1396 #define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))
1398 /* Ptr to previous physical malloc_chunk. Only valid if !prev_inuse (P). */
1399 #define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
1401 /* Treat space at ptr + offset as a chunk */
1402 #define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))
1404 /* extract p's inuse bit */
1405 #define inuse(p) \
1406 ((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE)
1408 /* set/clear chunk as being inuse without otherwise disturbing */
1409 #define set_inuse(p) \
1410 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE
1412 #define clear_inuse(p) \
1413 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE)
1416 /* check/set/clear inuse bits in known places */
1417 #define inuse_bit_at_offset(p, s) \
1418 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE)
1420 #define set_inuse_bit_at_offset(p, s) \
1421 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE)
1423 #define clear_inuse_bit_at_offset(p, s) \
1424 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE))
1427 /* Set size at head, without disturbing its use bit */
1428 #define set_head_size(p, s) ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))
1430 /* Set size/use field */
1431 #define set_head(p, s) ((p)->mchunk_size = (s))
1433 /* Set size at footer (only when chunk is not in use) */
1434 #define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
1436 #pragma GCC poison mchunk_size
1437 #pragma GCC poison mchunk_prev_size
1439 /* This is the size of the real usable data in the chunk. Not valid for
1440 dumped heap chunks. */
1441 #define memsize(p) \
1442 (__MTAG_GRANULE_SIZE > SIZE_SZ && __glibc_unlikely (mtag_enabled) ? \
1443 chunksize (p) - CHUNK_HDR_SZ : \
1444 chunksize (p) - CHUNK_HDR_SZ + (chunk_is_mmapped (p) ? 0 : SIZE_SZ))
1446 /* If memory tagging is enabled the layout changes to accommodate the granule
1447 size, this is wasteful for small allocations so not done by default.
1448 Both the chunk header and user data has to be granule aligned. */
1449 _Static_assert (__MTAG_GRANULE_SIZE <= CHUNK_HDR_SZ,
1450 "memory tagging is not supported with large granule.");
1452 static __always_inline void *
1453 tag_new_usable (void *ptr)
1455 if (__glibc_unlikely (mtag_enabled) && ptr)
1457 mchunkptr cp = mem2chunk(ptr);
1458 ptr = __libc_mtag_tag_region (__libc_mtag_new_tag (ptr), memsize (cp));
1460 return ptr;
1464 -------------------- Internal data structures --------------------
1466 All internal state is held in an instance of malloc_state defined
1467 below. There are no other static variables, except in two optional
1468 cases:
1469 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1470 * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
1471 for mmap.
1473 Beware of lots of tricks that minimize the total bookkeeping space
1474 requirements. The result is a little over 1K bytes (for 4byte
1475 pointers and size_t.)
1479 Bins
1481 An array of bin headers for free chunks. Each bin is doubly
1482 linked. The bins are approximately proportionally (log) spaced.
1483 There are a lot of these bins (128). This may look excessive, but
1484 works very well in practice. Most bins hold sizes that are
1485 unusual as malloc request sizes, but are more usual for fragments
1486 and consolidated sets of chunks, which is what these bins hold, so
1487 they can be found quickly. All procedures maintain the invariant
1488 that no consolidated chunk physically borders another one, so each
1489 chunk in a list is known to be preceeded and followed by either
1490 inuse chunks or the ends of memory.
1492 Chunks in bins are kept in size order, with ties going to the
1493 approximately least recently used chunk. Ordering isn't needed
1494 for the small bins, which all contain the same-sized chunks, but
1495 facilitates best-fit allocation for larger chunks. These lists
1496 are just sequential. Keeping them in order almost never requires
1497 enough traversal to warrant using fancier ordered data
1498 structures.
1500 Chunks of the same size are linked with the most
1501 recently freed at the front, and allocations are taken from the
1502 back. This results in LRU (FIFO) allocation order, which tends
1503 to give each chunk an equal opportunity to be consolidated with
1504 adjacent freed chunks, resulting in larger free chunks and less
1505 fragmentation.
1507 To simplify use in double-linked lists, each bin header acts
1508 as a malloc_chunk. This avoids special-casing for headers.
1509 But to conserve space and improve locality, we allocate
1510 only the fd/bk pointers of bins, and then use repositioning tricks
1511 to treat these as the fields of a malloc_chunk*.
1514 typedef struct malloc_chunk *mbinptr;
1516 /* addressing -- note that bin_at(0) does not exist */
1517 #define bin_at(m, i) \
1518 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
1519 - offsetof (struct malloc_chunk, fd))
1521 /* analog of ++bin */
1522 #define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
1524 /* Reminders about list directionality within bins */
1525 #define first(b) ((b)->fd)
1526 #define last(b) ((b)->bk)
1529 Indexing
1531 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1532 8 bytes apart. Larger bins are approximately logarithmically spaced:
1534 64 bins of size 8
1535 32 bins of size 64
1536 16 bins of size 512
1537 8 bins of size 4096
1538 4 bins of size 32768
1539 2 bins of size 262144
1540 1 bin of size what's left
1542 There is actually a little bit of slop in the numbers in bin_index
1543 for the sake of speed. This makes no difference elsewhere.
1545 The bins top out around 1MB because we expect to service large
1546 requests via mmap.
1548 Bin 0 does not exist. Bin 1 is the unordered list; if that would be
1549 a valid chunk size the small bins are bumped up one.
1552 #define NBINS 128
1553 #define NSMALLBINS 64
1554 #define SMALLBIN_WIDTH MALLOC_ALIGNMENT
1555 #define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > CHUNK_HDR_SZ)
1556 #define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
1558 #define in_smallbin_range(sz) \
1559 ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
1561 #define smallbin_index(sz) \
1562 ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
1563 + SMALLBIN_CORRECTION)
1565 #define largebin_index_32(sz) \
1566 (((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\
1567 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1568 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1569 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1570 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1571 126)
1573 #define largebin_index_32_big(sz) \
1574 (((((unsigned long) (sz)) >> 6) <= 45) ? 49 + (((unsigned long) (sz)) >> 6) :\
1575 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1576 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1577 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1578 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1579 126)
1581 // XXX It remains to be seen whether it is good to keep the widths of
1582 // XXX the buckets the same or whether it should be scaled by a factor
1583 // XXX of two as well.
1584 #define largebin_index_64(sz) \
1585 (((((unsigned long) (sz)) >> 6) <= 48) ? 48 + (((unsigned long) (sz)) >> 6) :\
1586 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1587 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1588 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1589 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1590 126)
1592 #define largebin_index(sz) \
1593 (SIZE_SZ == 8 ? largebin_index_64 (sz) \
1594 : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \
1595 : largebin_index_32 (sz))
1597 #define bin_index(sz) \
1598 ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))
1600 /* Take a chunk off a bin list. */
1601 static void
1602 unlink_chunk (mstate av, mchunkptr p)
1604 if (chunksize (p) != prev_size (next_chunk (p)))
1605 malloc_printerr ("corrupted size vs. prev_size");
1607 mchunkptr fd = p->fd;
1608 mchunkptr bk = p->bk;
1610 if (__builtin_expect (fd->bk != p || bk->fd != p, 0))
1611 malloc_printerr ("corrupted double-linked list");
1613 fd->bk = bk;
1614 bk->fd = fd;
1615 if (!in_smallbin_range (chunksize_nomask (p)) && p->fd_nextsize != NULL)
1617 if (p->fd_nextsize->bk_nextsize != p
1618 || p->bk_nextsize->fd_nextsize != p)
1619 malloc_printerr ("corrupted double-linked list (not small)");
1621 if (fd->fd_nextsize == NULL)
1623 if (p->fd_nextsize == p)
1624 fd->fd_nextsize = fd->bk_nextsize = fd;
1625 else
1627 fd->fd_nextsize = p->fd_nextsize;
1628 fd->bk_nextsize = p->bk_nextsize;
1629 p->fd_nextsize->bk_nextsize = fd;
1630 p->bk_nextsize->fd_nextsize = fd;
1633 else
1635 p->fd_nextsize->bk_nextsize = p->bk_nextsize;
1636 p->bk_nextsize->fd_nextsize = p->fd_nextsize;
1642 Unsorted chunks
1644 All remainders from chunk splits, as well as all returned chunks,
1645 are first placed in the "unsorted" bin. They are then placed
1646 in regular bins after malloc gives them ONE chance to be used before
1647 binning. So, basically, the unsorted_chunks list acts as a queue,
1648 with chunks being placed on it in free (and malloc_consolidate),
1649 and taken off (to be either used or placed in bins) in malloc.
1651 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
1652 does not have to be taken into account in size comparisons.
1655 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
1656 #define unsorted_chunks(M) (bin_at (M, 1))
1661 The top-most available chunk (i.e., the one bordering the end of
1662 available memory) is treated specially. It is never included in
1663 any bin, is used only if no other chunk is available, and is
1664 released back to the system if it is very large (see
1665 M_TRIM_THRESHOLD). Because top initially
1666 points to its own bin with initial zero size, thus forcing
1667 extension on the first malloc request, we avoid having any special
1668 code in malloc to check whether it even exists yet. But we still
1669 need to do so when getting memory from system, so we make
1670 initial_top treat the bin as a legal but unusable chunk during the
1671 interval between initialization and the first call to
1672 sysmalloc. (This is somewhat delicate, since it relies on
1673 the 2 preceding words to be zero during this interval as well.)
1676 /* Conveniently, the unsorted bin can be used as dummy top on first call */
1677 #define initial_top(M) (unsorted_chunks (M))
1680 Binmap
1682 To help compensate for the large number of bins, a one-level index
1683 structure is used for bin-by-bin searching. `binmap' is a
1684 bitvector recording whether bins are definitely empty so they can
1685 be skipped over during during traversals. The bits are NOT always
1686 cleared as soon as bins are empty, but instead only
1687 when they are noticed to be empty during traversal in malloc.
1690 /* Conservatively use 32 bits per map word, even if on 64bit system */
1691 #define BINMAPSHIFT 5
1692 #define BITSPERMAP (1U << BINMAPSHIFT)
1693 #define BINMAPSIZE (NBINS / BITSPERMAP)
1695 #define idx2block(i) ((i) >> BINMAPSHIFT)
1696 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
1698 #define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i))
1699 #define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
1700 #define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i))
1703 Fastbins
1705 An array of lists holding recently freed small chunks. Fastbins
1706 are not doubly linked. It is faster to single-link them, and
1707 since chunks are never removed from the middles of these lists,
1708 double linking is not necessary. Also, unlike regular bins, they
1709 are not even processed in FIFO order (they use faster LIFO) since
1710 ordering doesn't much matter in the transient contexts in which
1711 fastbins are normally used.
1713 Chunks in fastbins keep their inuse bit set, so they cannot
1714 be consolidated with other free chunks. malloc_consolidate
1715 releases all chunks in fastbins and consolidates them with
1716 other free chunks.
1719 typedef struct malloc_chunk *mfastbinptr;
1720 #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
1722 /* offset 2 to use otherwise unindexable first 2 bins */
1723 #define fastbin_index(sz) \
1724 ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
1727 /* The maximum fastbin request size we support */
1728 #define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
1730 #define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
1733 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
1734 that triggers automatic consolidation of possibly-surrounding
1735 fastbin chunks. This is a heuristic, so the exact value should not
1736 matter too much. It is defined at half the default trim threshold as a
1737 compromise heuristic to only attempt consolidation if it is likely
1738 to lead to trimming. However, it is not dynamically tunable, since
1739 consolidation reduces fragmentation surrounding large chunks even
1740 if trimming is not used.
1743 #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
1746 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
1747 regions. Otherwise, contiguity is exploited in merging together,
1748 when possible, results from consecutive MORECORE calls.
1750 The initial value comes from MORECORE_CONTIGUOUS, but is
1751 changed dynamically if mmap is ever used as an sbrk substitute.
1754 #define NONCONTIGUOUS_BIT (2U)
1756 #define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
1757 #define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
1758 #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
1759 #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
1761 /* Maximum size of memory handled in fastbins. */
1762 static uint8_t global_max_fast;
1765 Set value of max_fast.
1766 Use impossibly small value if 0.
1767 Precondition: there are no existing fastbin chunks in the main arena.
1768 Since do_check_malloc_state () checks this, we call malloc_consolidate ()
1769 before changing max_fast. Note other arenas will leak their fast bin
1770 entries if max_fast is reduced.
1773 #define set_max_fast(s) \
1774 global_max_fast = (((size_t) (s) <= MALLOC_ALIGN_MASK - SIZE_SZ) \
1775 ? MIN_CHUNK_SIZE / 2 : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
1777 static inline INTERNAL_SIZE_T
1778 get_max_fast (void)
1780 /* Tell the GCC optimizers that global_max_fast is never larger
1781 than MAX_FAST_SIZE. This avoids out-of-bounds array accesses in
1782 _int_malloc after constant propagation of the size parameter.
1783 (The code never executes because malloc preserves the
1784 global_max_fast invariant, but the optimizers may not recognize
1785 this.) */
1786 if (global_max_fast > MAX_FAST_SIZE)
1787 __builtin_unreachable ();
1788 return global_max_fast;
1792 ----------- Internal state representation and initialization -----------
1796 have_fastchunks indicates that there are probably some fastbin chunks.
1797 It is set true on entering a chunk into any fastbin, and cleared early in
1798 malloc_consolidate. The value is approximate since it may be set when there
1799 are no fastbin chunks, or it may be clear even if there are fastbin chunks
1800 available. Given it's sole purpose is to reduce number of redundant calls to
1801 malloc_consolidate, it does not affect correctness. As a result we can safely
1802 use relaxed atomic accesses.
1806 struct malloc_state
1808 /* Serialize access. */
1809 __libc_lock_define (, mutex);
1811 /* Flags (formerly in max_fast). */
1812 int flags;
1814 /* Set if the fastbin chunks contain recently inserted free blocks. */
1815 /* Note this is a bool but not all targets support atomics on booleans. */
1816 int have_fastchunks;
1818 /* Fastbins */
1819 mfastbinptr fastbinsY[NFASTBINS];
1821 /* Base of the topmost chunk -- not otherwise kept in a bin */
1822 mchunkptr top;
1824 /* The remainder from the most recent split of a small request */
1825 mchunkptr last_remainder;
1827 /* Normal bins packed as described above */
1828 mchunkptr bins[NBINS * 2 - 2];
1830 /* Bitmap of bins */
1831 unsigned int binmap[BINMAPSIZE];
1833 /* Linked list */
1834 struct malloc_state *next;
1836 /* Linked list for free arenas. Access to this field is serialized
1837 by free_list_lock in arena.c. */
1838 struct malloc_state *next_free;
1840 /* Number of threads attached to this arena. 0 if the arena is on
1841 the free list. Access to this field is serialized by
1842 free_list_lock in arena.c. */
1843 INTERNAL_SIZE_T attached_threads;
1845 /* Memory allocated from the system in this arena. */
1846 INTERNAL_SIZE_T system_mem;
1847 INTERNAL_SIZE_T max_system_mem;
1850 struct malloc_par
1852 /* Tunable parameters */
1853 unsigned long trim_threshold;
1854 INTERNAL_SIZE_T top_pad;
1855 INTERNAL_SIZE_T mmap_threshold;
1856 INTERNAL_SIZE_T arena_test;
1857 INTERNAL_SIZE_T arena_max;
1859 /* Transparent Large Page support. */
1860 INTERNAL_SIZE_T thp_pagesize;
1861 /* A value different than 0 means to align mmap allocation to hp_pagesize
1862 add hp_flags on flags. */
1863 INTERNAL_SIZE_T hp_pagesize;
1864 int hp_flags;
1866 /* Memory map support */
1867 int n_mmaps;
1868 int n_mmaps_max;
1869 int max_n_mmaps;
1870 /* the mmap_threshold is dynamic, until the user sets
1871 it manually, at which point we need to disable any
1872 dynamic behavior. */
1873 int no_dyn_threshold;
1875 /* Statistics */
1876 INTERNAL_SIZE_T mmapped_mem;
1877 INTERNAL_SIZE_T max_mmapped_mem;
1879 /* First address handed out by MORECORE/sbrk. */
1880 char *sbrk_base;
1882 #if USE_TCACHE
1883 /* Maximum number of buckets to use. */
1884 size_t tcache_bins;
1885 size_t tcache_max_bytes;
1886 /* Maximum number of chunks in each bucket. */
1887 size_t tcache_count;
1888 /* Maximum number of chunks to remove from the unsorted list, which
1889 aren't used to prefill the cache. */
1890 size_t tcache_unsorted_limit;
1891 #endif
1894 /* There are several instances of this struct ("arenas") in this
1895 malloc. If you are adapting this malloc in a way that does NOT use
1896 a static or mmapped malloc_state, you MUST explicitly zero-fill it
1897 before using. This malloc relies on the property that malloc_state
1898 is initialized to all zeroes (as is true of C statics). */
1900 static struct malloc_state main_arena =
1902 .mutex = _LIBC_LOCK_INITIALIZER,
1903 .next = &main_arena,
1904 .attached_threads = 1
1907 /* There is only one instance of the malloc parameters. */
1909 static struct malloc_par mp_ =
1911 .top_pad = DEFAULT_TOP_PAD,
1912 .n_mmaps_max = DEFAULT_MMAP_MAX,
1913 .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
1914 .trim_threshold = DEFAULT_TRIM_THRESHOLD,
1915 #define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
1916 .arena_test = NARENAS_FROM_NCORES (1)
1917 #if USE_TCACHE
1919 .tcache_count = TCACHE_FILL_COUNT,
1920 .tcache_bins = TCACHE_MAX_BINS,
1921 .tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1),
1922 .tcache_unsorted_limit = 0 /* No limit. */
1923 #endif
1927 Initialize a malloc_state struct.
1929 This is called from ptmalloc_init () or from _int_new_arena ()
1930 when creating a new arena.
1933 static void
1934 malloc_init_state (mstate av)
1936 int i;
1937 mbinptr bin;
1939 /* Establish circular links for normal bins */
1940 for (i = 1; i < NBINS; ++i)
1942 bin = bin_at (av, i);
1943 bin->fd = bin->bk = bin;
1946 #if MORECORE_CONTIGUOUS
1947 if (av != &main_arena)
1948 #endif
1949 set_noncontiguous (av);
1950 if (av == &main_arena)
1951 set_max_fast (DEFAULT_MXFAST);
1952 atomic_store_relaxed (&av->have_fastchunks, false);
1954 av->top = initial_top (av);
1958 Other internal utilities operating on mstates
1961 static void *sysmalloc (INTERNAL_SIZE_T, mstate);
1962 static int systrim (size_t, mstate);
1963 static void malloc_consolidate (mstate);
1966 /* -------------- Early definitions for debugging hooks ---------------- */
1968 /* This function is called from the arena shutdown hook, to free the
1969 thread cache (if it exists). */
1970 static void tcache_thread_shutdown (void);
1972 /* ------------------ Testing support ----------------------------------*/
1974 static int perturb_byte;
1976 static void
1977 alloc_perturb (char *p, size_t n)
1979 if (__glibc_unlikely (perturb_byte))
1980 memset (p, perturb_byte ^ 0xff, n);
1983 static void
1984 free_perturb (char *p, size_t n)
1986 if (__glibc_unlikely (perturb_byte))
1987 memset (p, perturb_byte, n);
1992 #include <stap-probe.h>
1994 /* ----------- Routines dealing with transparent huge pages ----------- */
1996 static inline void
1997 madvise_thp (void *p, INTERNAL_SIZE_T size)
1999 #ifdef MADV_HUGEPAGE
2000 /* Do not consider areas smaller than a huge page or if the tunable is
2001 not active. */
2002 if (mp_.thp_pagesize == 0 || size < mp_.thp_pagesize)
2003 return;
2005 /* Linux requires the input address to be page-aligned, and unaligned
2006 inputs happens only for initial data segment. */
2007 if (__glibc_unlikely (!PTR_IS_ALIGNED (p, GLRO (dl_pagesize))))
2009 void *q = PTR_ALIGN_DOWN (p, GLRO (dl_pagesize));
2010 size += PTR_DIFF (p, q);
2011 p = q;
2014 __madvise (p, size, MADV_HUGEPAGE);
2015 #endif
2018 /* ------------------- Support for multiple arenas -------------------- */
2019 #include "arena.c"
2022 Debugging support
2024 These routines make a number of assertions about the states
2025 of data structures that should be true at all times. If any
2026 are not true, it's very likely that a user program has somehow
2027 trashed memory. (It's also possible that there is a coding error
2028 in malloc. In which case, please report it!)
2031 #if !MALLOC_DEBUG
2033 # define check_chunk(A, P)
2034 # define check_free_chunk(A, P)
2035 # define check_inuse_chunk(A, P)
2036 # define check_remalloced_chunk(A, P, N)
2037 # define check_malloced_chunk(A, P, N)
2038 # define check_malloc_state(A)
2040 #else
2042 # define check_chunk(A, P) do_check_chunk (A, P)
2043 # define check_free_chunk(A, P) do_check_free_chunk (A, P)
2044 # define check_inuse_chunk(A, P) do_check_inuse_chunk (A, P)
2045 # define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
2046 # define check_malloced_chunk(A, P, N) do_check_malloced_chunk (A, P, N)
2047 # define check_malloc_state(A) do_check_malloc_state (A)
2050 Properties of all chunks
2053 static void
2054 do_check_chunk (mstate av, mchunkptr p)
2056 unsigned long sz = chunksize (p);
2057 /* min and max possible addresses assuming contiguous allocation */
2058 char *max_address = (char *) (av->top) + chunksize (av->top);
2059 char *min_address = max_address - av->system_mem;
2061 if (!chunk_is_mmapped (p))
2063 /* Has legal address ... */
2064 if (p != av->top)
2066 if (contiguous (av))
2068 assert (((char *) p) >= min_address);
2069 assert (((char *) p + sz) <= ((char *) (av->top)));
2072 else
2074 /* top size is always at least MINSIZE */
2075 assert ((unsigned long) (sz) >= MINSIZE);
2076 /* top predecessor always marked inuse */
2077 assert (prev_inuse (p));
2080 else
2082 /* address is outside main heap */
2083 if (contiguous (av) && av->top != initial_top (av))
2085 assert (((char *) p) < min_address || ((char *) p) >= max_address);
2087 /* chunk is page-aligned */
2088 assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
2089 /* mem is aligned */
2090 assert (aligned_OK (chunk2mem (p)));
2095 Properties of free chunks
2098 static void
2099 do_check_free_chunk (mstate av, mchunkptr p)
2101 INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
2102 mchunkptr next = chunk_at_offset (p, sz);
2104 do_check_chunk (av, p);
2106 /* Chunk must claim to be free ... */
2107 assert (!inuse (p));
2108 assert (!chunk_is_mmapped (p));
2110 /* Unless a special marker, must have OK fields */
2111 if ((unsigned long) (sz) >= MINSIZE)
2113 assert ((sz & MALLOC_ALIGN_MASK) == 0);
2114 assert (aligned_OK (chunk2mem (p)));
2115 /* ... matching footer field */
2116 assert (prev_size (next_chunk (p)) == sz);
2117 /* ... and is fully consolidated */
2118 assert (prev_inuse (p));
2119 assert (next == av->top || inuse (next));
2121 /* ... and has minimally sane links */
2122 assert (p->fd->bk == p);
2123 assert (p->bk->fd == p);
2125 else /* markers are always of size SIZE_SZ */
2126 assert (sz == SIZE_SZ);
2130 Properties of inuse chunks
2133 static void
2134 do_check_inuse_chunk (mstate av, mchunkptr p)
2136 mchunkptr next;
2138 do_check_chunk (av, p);
2140 if (chunk_is_mmapped (p))
2141 return; /* mmapped chunks have no next/prev */
2143 /* Check whether it claims to be in use ... */
2144 assert (inuse (p));
2146 next = next_chunk (p);
2148 /* ... and is surrounded by OK chunks.
2149 Since more things can be checked with free chunks than inuse ones,
2150 if an inuse chunk borders them and debug is on, it's worth doing them.
2152 if (!prev_inuse (p))
2154 /* Note that we cannot even look at prev unless it is not inuse */
2155 mchunkptr prv = prev_chunk (p);
2156 assert (next_chunk (prv) == p);
2157 do_check_free_chunk (av, prv);
2160 if (next == av->top)
2162 assert (prev_inuse (next));
2163 assert (chunksize (next) >= MINSIZE);
2165 else if (!inuse (next))
2166 do_check_free_chunk (av, next);
2170 Properties of chunks recycled from fastbins
2173 static void
2174 do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2176 INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
2178 if (!chunk_is_mmapped (p))
2180 assert (av == arena_for_chunk (p));
2181 if (chunk_main_arena (p))
2182 assert (av == &main_arena);
2183 else
2184 assert (av != &main_arena);
2187 do_check_inuse_chunk (av, p);
2189 /* Legal size ... */
2190 assert ((sz & MALLOC_ALIGN_MASK) == 0);
2191 assert ((unsigned long) (sz) >= MINSIZE);
2192 /* ... and alignment */
2193 assert (aligned_OK (chunk2mem (p)));
2194 /* chunk is less than MINSIZE more than request */
2195 assert ((long) (sz) - (long) (s) >= 0);
2196 assert ((long) (sz) - (long) (s + MINSIZE) < 0);
2200 Properties of nonrecycled chunks at the point they are malloced
2203 static void
2204 do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2206 /* same as recycled case ... */
2207 do_check_remalloced_chunk (av, p, s);
2210 ... plus, must obey implementation invariant that prev_inuse is
2211 always true of any allocated chunk; i.e., that each allocated
2212 chunk borders either a previously allocated and still in-use
2213 chunk, or the base of its memory arena. This is ensured
2214 by making all allocations from the `lowest' part of any found
2215 chunk. This does not necessarily hold however for chunks
2216 recycled via fastbins.
2219 assert (prev_inuse (p));
2224 Properties of malloc_state.
2226 This may be useful for debugging malloc, as well as detecting user
2227 programmer errors that somehow write into malloc_state.
2229 If you are extending or experimenting with this malloc, you can
2230 probably figure out how to hack this routine to print out or
2231 display chunk addresses, sizes, bins, and other instrumentation.
2234 static void
2235 do_check_malloc_state (mstate av)
2237 int i;
2238 mchunkptr p;
2239 mchunkptr q;
2240 mbinptr b;
2241 unsigned int idx;
2242 INTERNAL_SIZE_T size;
2243 unsigned long total = 0;
2244 int max_fast_bin;
2246 /* internal size_t must be no wider than pointer type */
2247 assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *));
2249 /* alignment is a power of 2 */
2250 assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
2252 /* Check the arena is initialized. */
2253 assert (av->top != 0);
2255 /* No memory has been allocated yet, so doing more tests is not possible. */
2256 if (av->top == initial_top (av))
2257 return;
2259 /* pagesize is a power of 2 */
2260 assert (powerof2(GLRO (dl_pagesize)));
2262 /* A contiguous main_arena is consistent with sbrk_base. */
2263 if (av == &main_arena && contiguous (av))
2264 assert ((char *) mp_.sbrk_base + av->system_mem ==
2265 (char *) av->top + chunksize (av->top));
2267 /* properties of fastbins */
2269 /* max_fast is in allowed range */
2270 assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE));
2272 max_fast_bin = fastbin_index (get_max_fast ());
2274 for (i = 0; i < NFASTBINS; ++i)
2276 p = fastbin (av, i);
2278 /* The following test can only be performed for the main arena.
2279 While mallopt calls malloc_consolidate to get rid of all fast
2280 bins (especially those larger than the new maximum) this does
2281 only happen for the main arena. Trying to do this for any
2282 other arena would mean those arenas have to be locked and
2283 malloc_consolidate be called for them. This is excessive. And
2284 even if this is acceptable to somebody it still cannot solve
2285 the problem completely since if the arena is locked a
2286 concurrent malloc call might create a new arena which then
2287 could use the newly invalid fast bins. */
2289 /* all bins past max_fast are empty */
2290 if (av == &main_arena && i > max_fast_bin)
2291 assert (p == 0);
2293 while (p != 0)
2295 if (__glibc_unlikely (misaligned_chunk (p)))
2296 malloc_printerr ("do_check_malloc_state(): "
2297 "unaligned fastbin chunk detected");
2298 /* each chunk claims to be inuse */
2299 do_check_inuse_chunk (av, p);
2300 total += chunksize (p);
2301 /* chunk belongs in this bin */
2302 assert (fastbin_index (chunksize (p)) == i);
2303 p = REVEAL_PTR (p->fd);
2307 /* check normal bins */
2308 for (i = 1; i < NBINS; ++i)
2310 b = bin_at (av, i);
2312 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2313 if (i >= 2)
2315 unsigned int binbit = get_binmap (av, i);
2316 int empty = last (b) == b;
2317 if (!binbit)
2318 assert (empty);
2319 else if (!empty)
2320 assert (binbit);
2323 for (p = last (b); p != b; p = p->bk)
2325 /* each chunk claims to be free */
2326 do_check_free_chunk (av, p);
2327 size = chunksize (p);
2328 total += size;
2329 if (i >= 2)
2331 /* chunk belongs in bin */
2332 idx = bin_index (size);
2333 assert (idx == i);
2334 /* lists are sorted */
2335 assert (p->bk == b ||
2336 (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p));
2338 if (!in_smallbin_range (size))
2340 if (p->fd_nextsize != NULL)
2342 if (p->fd_nextsize == p)
2343 assert (p->bk_nextsize == p);
2344 else
2346 if (p->fd_nextsize == first (b))
2347 assert (chunksize (p) < chunksize (p->fd_nextsize));
2348 else
2349 assert (chunksize (p) > chunksize (p->fd_nextsize));
2351 if (p == first (b))
2352 assert (chunksize (p) > chunksize (p->bk_nextsize));
2353 else
2354 assert (chunksize (p) < chunksize (p->bk_nextsize));
2357 else
2358 assert (p->bk_nextsize == NULL);
2361 else if (!in_smallbin_range (size))
2362 assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
2363 /* chunk is followed by a legal chain of inuse chunks */
2364 for (q = next_chunk (p);
2365 (q != av->top && inuse (q) &&
2366 (unsigned long) (chunksize (q)) >= MINSIZE);
2367 q = next_chunk (q))
2368 do_check_inuse_chunk (av, q);
2372 /* top chunk is OK */
2373 check_chunk (av, av->top);
2375 #endif
2378 /* ----------------- Support for debugging hooks -------------------- */
2379 #if IS_IN (libc)
2380 #include "hooks.c"
2381 #endif
2384 /* ----------- Routines dealing with system allocation -------------- */
2387 sysmalloc handles malloc cases requiring more memory from the system.
2388 On entry, it is assumed that av->top does not have enough
2389 space to service request for nb bytes, thus requiring that av->top
2390 be extended or replaced.
2393 static void *
2394 sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av)
2396 long int size;
2399 Round up size to nearest page. For mmapped chunks, the overhead is one
2400 SIZE_SZ unit larger than for normal chunks, because there is no
2401 following chunk whose prev_size field could be used.
2403 See the front_misalign handling below, for glibc there is no need for
2404 further alignments unless we have have high alignment.
2406 if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
2407 size = ALIGN_UP (nb + SIZE_SZ, pagesize);
2408 else
2409 size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
2411 /* Don't try if size wraps around 0. */
2412 if ((unsigned long) (size) <= (unsigned long) (nb))
2413 return MAP_FAILED;
2415 char *mm = (char *) MMAP (0, size,
2416 mtag_mmap_flags | PROT_READ | PROT_WRITE,
2417 extra_flags);
2418 if (mm == MAP_FAILED)
2419 return mm;
2421 #ifdef MAP_HUGETLB
2422 if (!(extra_flags & MAP_HUGETLB))
2423 madvise_thp (mm, size);
2424 #endif
2427 The offset to the start of the mmapped region is stored in the prev_size
2428 field of the chunk. This allows us to adjust returned start address to
2429 meet alignment requirements here and in memalign(), and still be able to
2430 compute proper address argument for later munmap in free() and realloc().
2433 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2435 if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
2437 /* For glibc, chunk2mem increases the address by CHUNK_HDR_SZ and
2438 MALLOC_ALIGN_MASK is CHUNK_HDR_SZ-1. Each mmap'ed area is page
2439 aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
2440 assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
2441 front_misalign = 0;
2443 else
2444 front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
2446 mchunkptr p; /* the allocated/returned chunk */
2448 if (front_misalign > 0)
2450 ptrdiff_t correction = MALLOC_ALIGNMENT - front_misalign;
2451 p = (mchunkptr) (mm + correction);
2452 set_prev_size (p, correction);
2453 set_head (p, (size - correction) | IS_MMAPPED);
2455 else
2457 p = (mchunkptr) mm;
2458 set_prev_size (p, 0);
2459 set_head (p, size | IS_MMAPPED);
2462 /* update statistics */
2463 int new = atomic_fetch_add_relaxed (&mp_.n_mmaps, 1) + 1;
2464 atomic_max (&mp_.max_n_mmaps, new);
2466 unsigned long sum;
2467 sum = atomic_fetch_add_relaxed (&mp_.mmapped_mem, size) + size;
2468 atomic_max (&mp_.max_mmapped_mem, sum);
2470 check_chunk (av, p);
2472 return chunk2mem (p);
2476 Allocate memory using mmap() based on S and NB requested size, aligning to
2477 PAGESIZE if required. The EXTRA_FLAGS is used on mmap() call. If the call
2478 succeedes S is updated with the allocated size. This is used as a fallback
2479 if MORECORE fails.
2481 static void *
2482 sysmalloc_mmap_fallback (long int *s, INTERNAL_SIZE_T nb,
2483 INTERNAL_SIZE_T old_size, size_t minsize,
2484 size_t pagesize, int extra_flags, mstate av)
2486 long int size = *s;
2488 /* Cannot merge with old top, so add its size back in */
2489 if (contiguous (av))
2490 size = ALIGN_UP (size + old_size, pagesize);
2492 /* If we are relying on mmap as backup, then use larger units */
2493 if ((unsigned long) (size) < minsize)
2494 size = minsize;
2496 /* Don't try if size wraps around 0 */
2497 if ((unsigned long) (size) <= (unsigned long) (nb))
2498 return MORECORE_FAILURE;
2500 char *mbrk = (char *) (MMAP (0, size,
2501 mtag_mmap_flags | PROT_READ | PROT_WRITE,
2502 extra_flags));
2503 if (mbrk == MAP_FAILED)
2504 return MAP_FAILED;
2506 #ifdef MAP_HUGETLB
2507 if (!(extra_flags & MAP_HUGETLB))
2508 madvise_thp (mbrk, size);
2509 #endif
2511 /* Record that we no longer have a contiguous sbrk region. After the first
2512 time mmap is used as backup, we do not ever rely on contiguous space
2513 since this could incorrectly bridge regions. */
2514 set_noncontiguous (av);
2516 *s = size;
2517 return mbrk;
2520 static void *
2521 sysmalloc (INTERNAL_SIZE_T nb, mstate av)
2523 mchunkptr old_top; /* incoming value of av->top */
2524 INTERNAL_SIZE_T old_size; /* its size */
2525 char *old_end; /* its end address */
2527 long size; /* arg to first MORECORE or mmap call */
2528 char *brk; /* return value from MORECORE */
2530 long correction; /* arg to 2nd MORECORE call */
2531 char *snd_brk; /* 2nd return val */
2533 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2534 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
2535 char *aligned_brk; /* aligned offset into brk */
2537 mchunkptr p; /* the allocated/returned chunk */
2538 mchunkptr remainder; /* remainder from allocation */
2539 unsigned long remainder_size; /* its size */
2542 size_t pagesize = GLRO (dl_pagesize);
2543 bool tried_mmap = false;
2547 If have mmap, and the request size meets the mmap threshold, and
2548 the system supports mmap, and there are few enough currently
2549 allocated mmapped regions, try to directly map this request
2550 rather than expanding top.
2553 if (av == NULL
2554 || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
2555 && (mp_.n_mmaps < mp_.n_mmaps_max)))
2557 char *mm;
2558 if (mp_.hp_pagesize > 0 && nb >= mp_.hp_pagesize)
2560 /* There is no need to isse the THP madvise call if Huge Pages are
2561 used directly. */
2562 mm = sysmalloc_mmap (nb, mp_.hp_pagesize, mp_.hp_flags, av);
2563 if (mm != MAP_FAILED)
2564 return mm;
2566 mm = sysmalloc_mmap (nb, pagesize, 0, av);
2567 if (mm != MAP_FAILED)
2568 return mm;
2569 tried_mmap = true;
2572 /* There are no usable arenas and mmap also failed. */
2573 if (av == NULL)
2574 return 0;
2576 /* Record incoming configuration of top */
2578 old_top = av->top;
2579 old_size = chunksize (old_top);
2580 old_end = (char *) (chunk_at_offset (old_top, old_size));
2582 brk = snd_brk = (char *) (MORECORE_FAILURE);
2585 If not the first time through, we require old_size to be
2586 at least MINSIZE and to have prev_inuse set.
2589 assert ((old_top == initial_top (av) && old_size == 0) ||
2590 ((unsigned long) (old_size) >= MINSIZE &&
2591 prev_inuse (old_top) &&
2592 ((unsigned long) old_end & (pagesize - 1)) == 0));
2594 /* Precondition: not enough current space to satisfy nb request */
2595 assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
2598 if (av != &main_arena)
2600 heap_info *old_heap, *heap;
2601 size_t old_heap_size;
2603 /* First try to extend the current heap. */
2604 old_heap = heap_for_ptr (old_top);
2605 old_heap_size = old_heap->size;
2606 if ((long) (MINSIZE + nb - old_size) > 0
2607 && grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
2609 av->system_mem += old_heap->size - old_heap_size;
2610 set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
2611 | PREV_INUSE);
2613 else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
2615 /* Use a newly allocated heap. */
2616 heap->ar_ptr = av;
2617 heap->prev = old_heap;
2618 av->system_mem += heap->size;
2619 /* Set up the new top. */
2620 top (av) = chunk_at_offset (heap, sizeof (*heap));
2621 set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
2623 /* Setup fencepost and free the old top chunk with a multiple of
2624 MALLOC_ALIGNMENT in size. */
2625 /* The fencepost takes at least MINSIZE bytes, because it might
2626 become the top chunk again later. Note that a footer is set
2627 up, too, although the chunk is marked in use. */
2628 old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
2629 set_head (chunk_at_offset (old_top, old_size + CHUNK_HDR_SZ),
2630 0 | PREV_INUSE);
2631 if (old_size >= MINSIZE)
2633 set_head (chunk_at_offset (old_top, old_size),
2634 CHUNK_HDR_SZ | PREV_INUSE);
2635 set_foot (chunk_at_offset (old_top, old_size), CHUNK_HDR_SZ);
2636 set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
2637 _int_free (av, old_top, 1);
2639 else
2641 set_head (old_top, (old_size + CHUNK_HDR_SZ) | PREV_INUSE);
2642 set_foot (old_top, (old_size + CHUNK_HDR_SZ));
2645 else if (!tried_mmap)
2647 /* We can at least try to use to mmap memory. If new_heap fails
2648 it is unlikely that trying to allocate huge pages will
2649 succeed. */
2650 char *mm = sysmalloc_mmap (nb, pagesize, 0, av);
2651 if (mm != MAP_FAILED)
2652 return mm;
2655 else /* av == main_arena */
2658 { /* Request enough space for nb + pad + overhead */
2659 size = nb + mp_.top_pad + MINSIZE;
2662 If contiguous, we can subtract out existing space that we hope to
2663 combine with new space. We add it back later only if
2664 we don't actually get contiguous space.
2667 if (contiguous (av))
2668 size -= old_size;
2671 Round to a multiple of page size or huge page size.
2672 If MORECORE is not contiguous, this ensures that we only call it
2673 with whole-page arguments. And if MORECORE is contiguous and
2674 this is not first time through, this preserves page-alignment of
2675 previous calls. Otherwise, we correct to page-align below.
2678 #ifdef MADV_HUGEPAGE
2679 /* Defined in brk.c. */
2680 extern void *__curbrk;
2681 if (__glibc_unlikely (mp_.thp_pagesize != 0))
2683 uintptr_t top = ALIGN_UP ((uintptr_t) __curbrk + size,
2684 mp_.thp_pagesize);
2685 size = top - (uintptr_t) __curbrk;
2687 else
2688 #endif
2689 size = ALIGN_UP (size, GLRO(dl_pagesize));
2692 Don't try to call MORECORE if argument is so big as to appear
2693 negative. Note that since mmap takes size_t arg, it may succeed
2694 below even if we cannot call MORECORE.
2697 if (size > 0)
2699 brk = (char *) (MORECORE (size));
2700 if (brk != (char *) (MORECORE_FAILURE))
2701 madvise_thp (brk, size);
2702 LIBC_PROBE (memory_sbrk_more, 2, brk, size);
2705 if (brk == (char *) (MORECORE_FAILURE))
2708 If have mmap, try using it as a backup when MORECORE fails or
2709 cannot be used. This is worth doing on systems that have "holes" in
2710 address space, so sbrk cannot extend to give contiguous space, but
2711 space is available elsewhere. Note that we ignore mmap max count
2712 and threshold limits, since the space will not be used as a
2713 segregated mmap region.
2716 char *mbrk = MAP_FAILED;
2717 if (mp_.hp_pagesize > 0)
2718 mbrk = sysmalloc_mmap_fallback (&size, nb, old_size,
2719 mp_.hp_pagesize, mp_.hp_pagesize,
2720 mp_.hp_flags, av);
2721 if (mbrk == MAP_FAILED)
2722 mbrk = sysmalloc_mmap_fallback (&size, nb, old_size, MMAP_AS_MORECORE_SIZE,
2723 pagesize, 0, av);
2724 if (mbrk != MAP_FAILED)
2726 /* We do not need, and cannot use, another sbrk call to find end */
2727 brk = mbrk;
2728 snd_brk = brk + size;
2732 if (brk != (char *) (MORECORE_FAILURE))
2734 if (mp_.sbrk_base == 0)
2735 mp_.sbrk_base = brk;
2736 av->system_mem += size;
2739 If MORECORE extends previous space, we can likewise extend top size.
2742 if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
2743 set_head (old_top, (size + old_size) | PREV_INUSE);
2745 else if (contiguous (av) && old_size && brk < old_end)
2746 /* Oops! Someone else killed our space.. Can't touch anything. */
2747 malloc_printerr ("break adjusted to free malloc space");
2750 Otherwise, make adjustments:
2752 * If the first time through or noncontiguous, we need to call sbrk
2753 just to find out where the end of memory lies.
2755 * We need to ensure that all returned chunks from malloc will meet
2756 MALLOC_ALIGNMENT
2758 * If there was an intervening foreign sbrk, we need to adjust sbrk
2759 request size to account for fact that we will not be able to
2760 combine new space with existing space in old_top.
2762 * Almost all systems internally allocate whole pages at a time, in
2763 which case we might as well use the whole last page of request.
2764 So we allocate enough more memory to hit a page boundary now,
2765 which in turn causes future contiguous calls to page-align.
2768 else
2770 front_misalign = 0;
2771 end_misalign = 0;
2772 correction = 0;
2773 aligned_brk = brk;
2775 /* handle contiguous cases */
2776 if (contiguous (av))
2778 /* Count foreign sbrk as system_mem. */
2779 if (old_size)
2780 av->system_mem += brk - old_end;
2782 /* Guarantee alignment of first new chunk made from this space */
2784 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2785 if (front_misalign > 0)
2788 Skip over some bytes to arrive at an aligned position.
2789 We don't need to specially mark these wasted front bytes.
2790 They will never be accessed anyway because
2791 prev_inuse of av->top (and any chunk created from its start)
2792 is always true after initialization.
2795 correction = MALLOC_ALIGNMENT - front_misalign;
2796 aligned_brk += correction;
2800 If this isn't adjacent to existing space, then we will not
2801 be able to merge with old_top space, so must add to 2nd request.
2804 correction += old_size;
2806 /* Extend the end address to hit a page boundary */
2807 end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
2808 correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
2810 assert (correction >= 0);
2811 snd_brk = (char *) (MORECORE (correction));
2814 If can't allocate correction, try to at least find out current
2815 brk. It might be enough to proceed without failing.
2817 Note that if second sbrk did NOT fail, we assume that space
2818 is contiguous with first sbrk. This is a safe assumption unless
2819 program is multithreaded but doesn't use locks and a foreign sbrk
2820 occurred between our first and second calls.
2823 if (snd_brk == (char *) (MORECORE_FAILURE))
2825 correction = 0;
2826 snd_brk = (char *) (MORECORE (0));
2828 else
2829 madvise_thp (snd_brk, correction);
2832 /* handle non-contiguous cases */
2833 else
2835 if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
2836 /* MORECORE/mmap must correctly align */
2837 assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
2838 else
2840 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2841 if (front_misalign > 0)
2844 Skip over some bytes to arrive at an aligned position.
2845 We don't need to specially mark these wasted front bytes.
2846 They will never be accessed anyway because
2847 prev_inuse of av->top (and any chunk created from its start)
2848 is always true after initialization.
2851 aligned_brk += MALLOC_ALIGNMENT - front_misalign;
2855 /* Find out current end of memory */
2856 if (snd_brk == (char *) (MORECORE_FAILURE))
2858 snd_brk = (char *) (MORECORE (0));
2862 /* Adjust top based on results of second sbrk */
2863 if (snd_brk != (char *) (MORECORE_FAILURE))
2865 av->top = (mchunkptr) aligned_brk;
2866 set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
2867 av->system_mem += correction;
2870 If not the first time through, we either have a
2871 gap due to foreign sbrk or a non-contiguous region. Insert a
2872 double fencepost at old_top to prevent consolidation with space
2873 we don't own. These fenceposts are artificial chunks that are
2874 marked as inuse and are in any case too small to use. We need
2875 two to make sizes and alignments work out.
2878 if (old_size != 0)
2881 Shrink old_top to insert fenceposts, keeping size a
2882 multiple of MALLOC_ALIGNMENT. We know there is at least
2883 enough space in old_top to do this.
2885 old_size = (old_size - 2 * CHUNK_HDR_SZ) & ~MALLOC_ALIGN_MASK;
2886 set_head (old_top, old_size | PREV_INUSE);
2889 Note that the following assignments completely overwrite
2890 old_top when old_size was previously MINSIZE. This is
2891 intentional. We need the fencepost, even if old_top otherwise gets
2892 lost.
2894 set_head (chunk_at_offset (old_top, old_size),
2895 CHUNK_HDR_SZ | PREV_INUSE);
2896 set_head (chunk_at_offset (old_top,
2897 old_size + CHUNK_HDR_SZ),
2898 CHUNK_HDR_SZ | PREV_INUSE);
2900 /* If possible, release the rest. */
2901 if (old_size >= MINSIZE)
2903 _int_free (av, old_top, 1);
2909 } /* if (av != &main_arena) */
2911 if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
2912 av->max_system_mem = av->system_mem;
2913 check_malloc_state (av);
2915 /* finally, do the allocation */
2916 p = av->top;
2917 size = chunksize (p);
2919 /* check that one of the above allocation paths succeeded */
2920 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
2922 remainder_size = size - nb;
2923 remainder = chunk_at_offset (p, nb);
2924 av->top = remainder;
2925 set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
2926 set_head (remainder, remainder_size | PREV_INUSE);
2927 check_malloced_chunk (av, p, nb);
2928 return chunk2mem (p);
2931 /* catch all failure paths */
2932 __set_errno (ENOMEM);
2933 return 0;
2938 systrim is an inverse of sorts to sysmalloc. It gives memory back
2939 to the system (via negative arguments to sbrk) if there is unused
2940 memory at the `high' end of the malloc pool. It is called
2941 automatically by free() when top space exceeds the trim
2942 threshold. It is also called by the public malloc_trim routine. It
2943 returns 1 if it actually released any memory, else 0.
2946 static int
2947 systrim (size_t pad, mstate av)
2949 long top_size; /* Amount of top-most memory */
2950 long extra; /* Amount to release */
2951 long released; /* Amount actually released */
2952 char *current_brk; /* address returned by pre-check sbrk call */
2953 char *new_brk; /* address returned by post-check sbrk call */
2954 long top_area;
2956 top_size = chunksize (av->top);
2958 top_area = top_size - MINSIZE - 1;
2959 if (top_area <= pad)
2960 return 0;
2962 /* Release in pagesize units and round down to the nearest page. */
2963 #ifdef MADV_HUGEPAGE
2964 if (__glibc_unlikely (mp_.thp_pagesize != 0))
2965 extra = ALIGN_DOWN (top_area - pad, mp_.thp_pagesize);
2966 else
2967 #endif
2968 extra = ALIGN_DOWN (top_area - pad, GLRO(dl_pagesize));
2970 if (extra == 0)
2971 return 0;
2974 Only proceed if end of memory is where we last set it.
2975 This avoids problems if there were foreign sbrk calls.
2977 current_brk = (char *) (MORECORE (0));
2978 if (current_brk == (char *) (av->top) + top_size)
2981 Attempt to release memory. We ignore MORECORE return value,
2982 and instead call again to find out where new end of memory is.
2983 This avoids problems if first call releases less than we asked,
2984 of if failure somehow altered brk value. (We could still
2985 encounter problems if it altered brk in some very bad way,
2986 but the only thing we can do is adjust anyway, which will cause
2987 some downstream failure.)
2990 MORECORE (-extra);
2991 new_brk = (char *) (MORECORE (0));
2993 LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra);
2995 if (new_brk != (char *) MORECORE_FAILURE)
2997 released = (long) (current_brk - new_brk);
2999 if (released != 0)
3001 /* Success. Adjust top. */
3002 av->system_mem -= released;
3003 set_head (av->top, (top_size - released) | PREV_INUSE);
3004 check_malloc_state (av);
3005 return 1;
3009 return 0;
3012 static void
3013 munmap_chunk (mchunkptr p)
3015 size_t pagesize = GLRO (dl_pagesize);
3016 INTERNAL_SIZE_T size = chunksize (p);
3018 assert (chunk_is_mmapped (p));
3020 uintptr_t mem = (uintptr_t) chunk2mem (p);
3021 uintptr_t block = (uintptr_t) p - prev_size (p);
3022 size_t total_size = prev_size (p) + size;
3023 /* Unfortunately we have to do the compilers job by hand here. Normally
3024 we would test BLOCK and TOTAL-SIZE separately for compliance with the
3025 page size. But gcc does not recognize the optimization possibility
3026 (in the moment at least) so we combine the two values into one before
3027 the bit test. */
3028 if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
3029 || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
3030 malloc_printerr ("munmap_chunk(): invalid pointer");
3032 atomic_fetch_add_relaxed (&mp_.n_mmaps, -1);
3033 atomic_fetch_add_relaxed (&mp_.mmapped_mem, -total_size);
3035 /* If munmap failed the process virtual memory address space is in a
3036 bad shape. Just leave the block hanging around, the process will
3037 terminate shortly anyway since not much can be done. */
3038 __munmap ((char *) block, total_size);
3041 #if HAVE_MREMAP
3043 static mchunkptr
3044 mremap_chunk (mchunkptr p, size_t new_size)
3046 size_t pagesize = GLRO (dl_pagesize);
3047 INTERNAL_SIZE_T offset = prev_size (p);
3048 INTERNAL_SIZE_T size = chunksize (p);
3049 char *cp;
3051 assert (chunk_is_mmapped (p));
3053 uintptr_t block = (uintptr_t) p - offset;
3054 uintptr_t mem = (uintptr_t) chunk2mem(p);
3055 size_t total_size = offset + size;
3056 if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
3057 || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
3058 malloc_printerr("mremap_chunk(): invalid pointer");
3060 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
3061 new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize);
3063 /* No need to remap if the number of pages does not change. */
3064 if (total_size == new_size)
3065 return p;
3067 cp = (char *) __mremap ((char *) block, total_size, new_size,
3068 MREMAP_MAYMOVE);
3070 if (cp == MAP_FAILED)
3071 return 0;
3073 madvise_thp (cp, new_size);
3075 p = (mchunkptr) (cp + offset);
3077 assert (aligned_OK (chunk2mem (p)));
3079 assert (prev_size (p) == offset);
3080 set_head (p, (new_size - offset) | IS_MMAPPED);
3082 INTERNAL_SIZE_T new;
3083 new = atomic_fetch_add_relaxed (&mp_.mmapped_mem, new_size - size - offset)
3084 + new_size - size - offset;
3085 atomic_max (&mp_.max_mmapped_mem, new);
3086 return p;
3088 #endif /* HAVE_MREMAP */
3090 /*------------------------ Public wrappers. --------------------------------*/
3092 #if USE_TCACHE
3094 /* We overlay this structure on the user-data portion of a chunk when
3095 the chunk is stored in the per-thread cache. */
3096 typedef struct tcache_entry
3098 struct tcache_entry *next;
3099 /* This field exists to detect double frees. */
3100 uintptr_t key;
3101 } tcache_entry;
3103 /* There is one of these for each thread, which contains the
3104 per-thread cache (hence "tcache_perthread_struct"). Keeping
3105 overall size low is mildly important. Note that COUNTS and ENTRIES
3106 are redundant (we could have just counted the linked list each
3107 time), this is for performance reasons. */
3108 typedef struct tcache_perthread_struct
3110 uint16_t counts[TCACHE_MAX_BINS];
3111 tcache_entry *entries[TCACHE_MAX_BINS];
3112 } tcache_perthread_struct;
3114 static __thread bool tcache_shutting_down = false;
3115 static __thread tcache_perthread_struct *tcache = NULL;
3117 /* Process-wide key to try and catch a double-free in the same thread. */
3118 static uintptr_t tcache_key;
3120 /* The value of tcache_key does not really have to be a cryptographically
3121 secure random number. It only needs to be arbitrary enough so that it does
3122 not collide with values present in applications. If a collision does happen
3123 consistently enough, it could cause a degradation in performance since the
3124 entire list is checked to check if the block indeed has been freed the
3125 second time. The odds of this happening are exceedingly low though, about 1
3126 in 2^wordsize. There is probably a higher chance of the performance
3127 degradation being due to a double free where the first free happened in a
3128 different thread; that's a case this check does not cover. */
3129 static void
3130 tcache_key_initialize (void)
3132 if (__getrandom_nocancel (&tcache_key, sizeof(tcache_key), GRND_NONBLOCK)
3133 != sizeof (tcache_key))
3135 tcache_key = random_bits ();
3136 #if __WORDSIZE == 64
3137 tcache_key = (tcache_key << 32) | random_bits ();
3138 #endif
3142 /* Caller must ensure that we know tc_idx is valid and there's room
3143 for more chunks. */
3144 static __always_inline void
3145 tcache_put (mchunkptr chunk, size_t tc_idx)
3147 tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
3149 /* Mark this chunk as "in the tcache" so the test in _int_free will
3150 detect a double free. */
3151 e->key = tcache_key;
3153 e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]);
3154 tcache->entries[tc_idx] = e;
3155 ++(tcache->counts[tc_idx]);
3158 /* Caller must ensure that we know tc_idx is valid and there's
3159 available chunks to remove. Removes chunk from the middle of the
3160 list. */
3161 static __always_inline void *
3162 tcache_get_n (size_t tc_idx, tcache_entry **ep)
3164 tcache_entry *e;
3165 if (ep == &(tcache->entries[tc_idx]))
3166 e = *ep;
3167 else
3168 e = REVEAL_PTR (*ep);
3170 if (__glibc_unlikely (!aligned_OK (e)))
3171 malloc_printerr ("malloc(): unaligned tcache chunk detected");
3173 if (ep == &(tcache->entries[tc_idx]))
3174 *ep = REVEAL_PTR (e->next);
3175 else
3176 *ep = PROTECT_PTR (ep, REVEAL_PTR (e->next));
3178 --(tcache->counts[tc_idx]);
3179 e->key = 0;
3180 return (void *) e;
3183 /* Like the above, but removes from the head of the list. */
3184 static __always_inline void *
3185 tcache_get (size_t tc_idx)
3187 return tcache_get_n (tc_idx, & tcache->entries[tc_idx]);
3190 /* Iterates through the tcache linked list. */
3191 static __always_inline tcache_entry *
3192 tcache_next (tcache_entry *e)
3194 return (tcache_entry *) REVEAL_PTR (e->next);
3197 static void
3198 tcache_thread_shutdown (void)
3200 int i;
3201 tcache_perthread_struct *tcache_tmp = tcache;
3203 tcache_shutting_down = true;
3205 if (!tcache)
3206 return;
3208 /* Disable the tcache and prevent it from being reinitialized. */
3209 tcache = NULL;
3211 /* Free all of the entries and the tcache itself back to the arena
3212 heap for coalescing. */
3213 for (i = 0; i < TCACHE_MAX_BINS; ++i)
3215 while (tcache_tmp->entries[i])
3217 tcache_entry *e = tcache_tmp->entries[i];
3218 if (__glibc_unlikely (!aligned_OK (e)))
3219 malloc_printerr ("tcache_thread_shutdown(): "
3220 "unaligned tcache chunk detected");
3221 tcache_tmp->entries[i] = REVEAL_PTR (e->next);
3222 __libc_free (e);
3226 __libc_free (tcache_tmp);
3229 static void
3230 tcache_init(void)
3232 mstate ar_ptr;
3233 void *victim = 0;
3234 const size_t bytes = sizeof (tcache_perthread_struct);
3236 if (tcache_shutting_down)
3237 return;
3239 arena_get (ar_ptr, bytes);
3240 victim = _int_malloc (ar_ptr, bytes);
3241 if (!victim && ar_ptr != NULL)
3243 ar_ptr = arena_get_retry (ar_ptr, bytes);
3244 victim = _int_malloc (ar_ptr, bytes);
3248 if (ar_ptr != NULL)
3249 __libc_lock_unlock (ar_ptr->mutex);
3251 /* In a low memory situation, we may not be able to allocate memory
3252 - in which case, we just keep trying later. However, we
3253 typically do this very early, so either there is sufficient
3254 memory, or there isn't enough memory to do non-trivial
3255 allocations anyway. */
3256 if (victim)
3258 tcache = (tcache_perthread_struct *) victim;
3259 memset (tcache, 0, sizeof (tcache_perthread_struct));
3264 # define MAYBE_INIT_TCACHE() \
3265 if (__glibc_unlikely (tcache == NULL)) \
3266 tcache_init();
3268 #else /* !USE_TCACHE */
3269 # define MAYBE_INIT_TCACHE()
3271 static void
3272 tcache_thread_shutdown (void)
3274 /* Nothing to do if there is no thread cache. */
3277 #endif /* !USE_TCACHE */
3279 #if IS_IN (libc)
3280 void *
3281 __libc_malloc (size_t bytes)
3283 mstate ar_ptr;
3284 void *victim;
3286 _Static_assert (PTRDIFF_MAX <= SIZE_MAX / 2,
3287 "PTRDIFF_MAX is not more than half of SIZE_MAX");
3289 if (!__malloc_initialized)
3290 ptmalloc_init ();
3291 #if USE_TCACHE
3292 /* int_free also calls request2size, be careful to not pad twice. */
3293 size_t tbytes = checked_request2size (bytes);
3294 if (tbytes == 0)
3296 __set_errno (ENOMEM);
3297 return NULL;
3299 size_t tc_idx = csize2tidx (tbytes);
3301 MAYBE_INIT_TCACHE ();
3303 DIAG_PUSH_NEEDS_COMMENT;
3304 if (tc_idx < mp_.tcache_bins
3305 && tcache != NULL
3306 && tcache->counts[tc_idx] > 0)
3308 victim = tcache_get (tc_idx);
3309 return tag_new_usable (victim);
3311 DIAG_POP_NEEDS_COMMENT;
3312 #endif
3314 if (SINGLE_THREAD_P)
3316 victim = tag_new_usable (_int_malloc (&main_arena, bytes));
3317 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
3318 &main_arena == arena_for_chunk (mem2chunk (victim)));
3319 return victim;
3322 arena_get (ar_ptr, bytes);
3324 victim = _int_malloc (ar_ptr, bytes);
3325 /* Retry with another arena only if we were able to find a usable arena
3326 before. */
3327 if (!victim && ar_ptr != NULL)
3329 LIBC_PROBE (memory_malloc_retry, 1, bytes);
3330 ar_ptr = arena_get_retry (ar_ptr, bytes);
3331 victim = _int_malloc (ar_ptr, bytes);
3334 if (ar_ptr != NULL)
3335 __libc_lock_unlock (ar_ptr->mutex);
3337 victim = tag_new_usable (victim);
3339 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
3340 ar_ptr == arena_for_chunk (mem2chunk (victim)));
3341 return victim;
3343 libc_hidden_def (__libc_malloc)
3345 void
3346 __libc_free (void *mem)
3348 mstate ar_ptr;
3349 mchunkptr p; /* chunk corresponding to mem */
3351 if (mem == 0) /* free(0) has no effect */
3352 return;
3354 /* Quickly check that the freed pointer matches the tag for the memory.
3355 This gives a useful double-free detection. */
3356 if (__glibc_unlikely (mtag_enabled))
3357 *(volatile char *)mem;
3359 int err = errno;
3361 p = mem2chunk (mem);
3363 if (chunk_is_mmapped (p)) /* release mmapped memory. */
3365 /* See if the dynamic brk/mmap threshold needs adjusting.
3366 Dumped fake mmapped chunks do not affect the threshold. */
3367 if (!mp_.no_dyn_threshold
3368 && chunksize_nomask (p) > mp_.mmap_threshold
3369 && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX)
3371 mp_.mmap_threshold = chunksize (p);
3372 mp_.trim_threshold = 2 * mp_.mmap_threshold;
3373 LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
3374 mp_.mmap_threshold, mp_.trim_threshold);
3376 munmap_chunk (p);
3378 else
3380 MAYBE_INIT_TCACHE ();
3382 /* Mark the chunk as belonging to the library again. */
3383 (void)tag_region (chunk2mem (p), memsize (p));
3385 ar_ptr = arena_for_chunk (p);
3386 _int_free (ar_ptr, p, 0);
3389 __set_errno (err);
3391 libc_hidden_def (__libc_free)
3393 void *
3394 __libc_realloc (void *oldmem, size_t bytes)
3396 mstate ar_ptr;
3397 INTERNAL_SIZE_T nb; /* padded request size */
3399 void *newp; /* chunk to return */
3401 if (!__malloc_initialized)
3402 ptmalloc_init ();
3404 #if REALLOC_ZERO_BYTES_FREES
3405 if (bytes == 0 && oldmem != NULL)
3407 __libc_free (oldmem); return 0;
3409 #endif
3411 /* realloc of null is supposed to be same as malloc */
3412 if (oldmem == 0)
3413 return __libc_malloc (bytes);
3415 /* Perform a quick check to ensure that the pointer's tag matches the
3416 memory's tag. */
3417 if (__glibc_unlikely (mtag_enabled))
3418 *(volatile char*) oldmem;
3420 /* Return the chunk as is whenever possible, i.e. there's enough usable space
3421 but not so much that we end up fragmenting the block. We use the trim
3422 threshold as the heuristic to decide the latter. */
3423 size_t usable = musable (oldmem);
3424 if (bytes <= usable
3425 && (unsigned long) (usable - bytes) <= mp_.trim_threshold)
3426 return oldmem;
3428 /* chunk corresponding to oldmem */
3429 const mchunkptr oldp = mem2chunk (oldmem);
3430 /* its size */
3431 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
3433 if (chunk_is_mmapped (oldp))
3434 ar_ptr = NULL;
3435 else
3437 MAYBE_INIT_TCACHE ();
3438 ar_ptr = arena_for_chunk (oldp);
3441 /* Little security check which won't hurt performance: the allocator
3442 never wrapps around at the end of the address space. Therefore
3443 we can exclude some size values which might appear here by
3444 accident or by "design" from some intruder. */
3445 if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
3446 || __builtin_expect (misaligned_chunk (oldp), 0)))
3447 malloc_printerr ("realloc(): invalid pointer");
3449 nb = checked_request2size (bytes);
3450 if (nb == 0)
3452 __set_errno (ENOMEM);
3453 return NULL;
3456 if (chunk_is_mmapped (oldp))
3458 void *newmem;
3460 #if HAVE_MREMAP
3461 newp = mremap_chunk (oldp, nb);
3462 if (newp)
3464 void *newmem = chunk2mem_tag (newp);
3465 /* Give the new block a different tag. This helps to ensure
3466 that stale handles to the previous mapping are not
3467 reused. There's a performance hit for both us and the
3468 caller for doing this, so we might want to
3469 reconsider. */
3470 return tag_new_usable (newmem);
3472 #endif
3473 /* Note the extra SIZE_SZ overhead. */
3474 if (oldsize - SIZE_SZ >= nb)
3475 return oldmem; /* do nothing */
3477 /* Must alloc, copy, free. */
3478 newmem = __libc_malloc (bytes);
3479 if (newmem == 0)
3480 return 0; /* propagate failure */
3482 memcpy (newmem, oldmem, oldsize - CHUNK_HDR_SZ);
3483 munmap_chunk (oldp);
3484 return newmem;
3487 if (SINGLE_THREAD_P)
3489 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
3490 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3491 ar_ptr == arena_for_chunk (mem2chunk (newp)));
3493 return newp;
3496 __libc_lock_lock (ar_ptr->mutex);
3498 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
3500 __libc_lock_unlock (ar_ptr->mutex);
3501 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3502 ar_ptr == arena_for_chunk (mem2chunk (newp)));
3504 if (newp == NULL)
3506 /* Try harder to allocate memory in other arenas. */
3507 LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
3508 newp = __libc_malloc (bytes);
3509 if (newp != NULL)
3511 size_t sz = memsize (oldp);
3512 memcpy (newp, oldmem, sz);
3513 (void) tag_region (chunk2mem (oldp), sz);
3514 _int_free (ar_ptr, oldp, 0);
3518 return newp;
3520 libc_hidden_def (__libc_realloc)
3522 void *
3523 __libc_memalign (size_t alignment, size_t bytes)
3525 if (!__malloc_initialized)
3526 ptmalloc_init ();
3528 void *address = RETURN_ADDRESS (0);
3529 return _mid_memalign (alignment, bytes, address);
3531 libc_hidden_def (__libc_memalign)
3533 /* For ISO C17. */
3534 void *
3535 weak_function
3536 aligned_alloc (size_t alignment, size_t bytes)
3538 if (!__malloc_initialized)
3539 ptmalloc_init ();
3541 /* Similar to memalign, but starting with ISO C17 the standard
3542 requires an error for alignments that are not supported by the
3543 implementation. Valid alignments for the current implementation
3544 are non-negative powers of two. */
3545 if (!powerof2 (alignment) || alignment == 0)
3547 __set_errno (EINVAL);
3548 return 0;
3551 void *address = RETURN_ADDRESS (0);
3552 return _mid_memalign (alignment, bytes, address);
3555 static void *
3556 _mid_memalign (size_t alignment, size_t bytes, void *address)
3558 mstate ar_ptr;
3559 void *p;
3561 /* If we need less alignment than we give anyway, just relay to malloc. */
3562 if (alignment <= MALLOC_ALIGNMENT)
3563 return __libc_malloc (bytes);
3565 /* Otherwise, ensure that it is at least a minimum chunk size */
3566 if (alignment < MINSIZE)
3567 alignment = MINSIZE;
3569 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
3570 power of 2 and will cause overflow in the check below. */
3571 if (alignment > SIZE_MAX / 2 + 1)
3573 __set_errno (EINVAL);
3574 return 0;
3578 /* Make sure alignment is power of 2. */
3579 if (!powerof2 (alignment))
3581 size_t a = MALLOC_ALIGNMENT * 2;
3582 while (a < alignment)
3583 a <<= 1;
3584 alignment = a;
3587 #if USE_TCACHE
3589 size_t tbytes;
3590 tbytes = checked_request2size (bytes);
3591 if (tbytes == 0)
3593 __set_errno (ENOMEM);
3594 return NULL;
3596 size_t tc_idx = csize2tidx (tbytes);
3598 if (tc_idx < mp_.tcache_bins
3599 && tcache != NULL
3600 && tcache->counts[tc_idx] > 0)
3602 /* The tcache itself isn't encoded, but the chain is. */
3603 tcache_entry **tep = & tcache->entries[tc_idx];
3604 tcache_entry *te = *tep;
3605 while (te != NULL && !PTR_IS_ALIGNED (te, alignment))
3607 tep = & (te->next);
3608 te = tcache_next (te);
3610 if (te != NULL)
3612 void *victim = tcache_get_n (tc_idx, tep);
3613 return tag_new_usable (victim);
3617 #endif
3619 if (SINGLE_THREAD_P)
3621 p = _int_memalign (&main_arena, alignment, bytes);
3622 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3623 &main_arena == arena_for_chunk (mem2chunk (p)));
3624 return tag_new_usable (p);
3627 arena_get (ar_ptr, bytes + alignment + MINSIZE);
3629 p = _int_memalign (ar_ptr, alignment, bytes);
3630 if (!p && ar_ptr != NULL)
3632 LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
3633 ar_ptr = arena_get_retry (ar_ptr, bytes);
3634 p = _int_memalign (ar_ptr, alignment, bytes);
3637 if (ar_ptr != NULL)
3638 __libc_lock_unlock (ar_ptr->mutex);
3640 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3641 ar_ptr == arena_for_chunk (mem2chunk (p)));
3642 return tag_new_usable (p);
3645 void *
3646 __libc_valloc (size_t bytes)
3648 if (!__malloc_initialized)
3649 ptmalloc_init ();
3651 void *address = RETURN_ADDRESS (0);
3652 size_t pagesize = GLRO (dl_pagesize);
3653 return _mid_memalign (pagesize, bytes, address);
3656 void *
3657 __libc_pvalloc (size_t bytes)
3659 if (!__malloc_initialized)
3660 ptmalloc_init ();
3662 void *address = RETURN_ADDRESS (0);
3663 size_t pagesize = GLRO (dl_pagesize);
3664 size_t rounded_bytes;
3665 /* ALIGN_UP with overflow check. */
3666 if (__glibc_unlikely (__builtin_add_overflow (bytes,
3667 pagesize - 1,
3668 &rounded_bytes)))
3670 __set_errno (ENOMEM);
3671 return 0;
3673 rounded_bytes = rounded_bytes & -(pagesize - 1);
3675 return _mid_memalign (pagesize, rounded_bytes, address);
3678 void *
3679 __libc_calloc (size_t n, size_t elem_size)
3681 mstate av;
3682 mchunkptr oldtop;
3683 INTERNAL_SIZE_T sz, oldtopsize;
3684 void *mem;
3685 unsigned long clearsize;
3686 unsigned long nclears;
3687 INTERNAL_SIZE_T *d;
3688 ptrdiff_t bytes;
3690 if (__glibc_unlikely (__builtin_mul_overflow (n, elem_size, &bytes)))
3692 __set_errno (ENOMEM);
3693 return NULL;
3696 sz = bytes;
3698 if (!__malloc_initialized)
3699 ptmalloc_init ();
3701 MAYBE_INIT_TCACHE ();
3703 if (SINGLE_THREAD_P)
3704 av = &main_arena;
3705 else
3706 arena_get (av, sz);
3708 if (av)
3710 /* Check if we hand out the top chunk, in which case there may be no
3711 need to clear. */
3712 #if MORECORE_CLEARS
3713 oldtop = top (av);
3714 oldtopsize = chunksize (top (av));
3715 # if MORECORE_CLEARS < 2
3716 /* Only newly allocated memory is guaranteed to be cleared. */
3717 if (av == &main_arena &&
3718 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
3719 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
3720 # endif
3721 if (av != &main_arena)
3723 heap_info *heap = heap_for_ptr (oldtop);
3724 if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
3725 oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
3727 #endif
3729 else
3731 /* No usable arenas. */
3732 oldtop = 0;
3733 oldtopsize = 0;
3735 mem = _int_malloc (av, sz);
3737 assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
3738 av == arena_for_chunk (mem2chunk (mem)));
3740 if (!SINGLE_THREAD_P)
3742 if (mem == 0 && av != NULL)
3744 LIBC_PROBE (memory_calloc_retry, 1, sz);
3745 av = arena_get_retry (av, sz);
3746 mem = _int_malloc (av, sz);
3749 if (av != NULL)
3750 __libc_lock_unlock (av->mutex);
3753 /* Allocation failed even after a retry. */
3754 if (mem == 0)
3755 return 0;
3757 mchunkptr p = mem2chunk (mem);
3759 /* If we are using memory tagging, then we need to set the tags
3760 regardless of MORECORE_CLEARS, so we zero the whole block while
3761 doing so. */
3762 if (__glibc_unlikely (mtag_enabled))
3763 return tag_new_zero_region (mem, memsize (p));
3765 INTERNAL_SIZE_T csz = chunksize (p);
3767 /* Two optional cases in which clearing not necessary */
3768 if (chunk_is_mmapped (p))
3770 if (__builtin_expect (perturb_byte, 0))
3771 return memset (mem, 0, sz);
3773 return mem;
3776 #if MORECORE_CLEARS
3777 if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
3779 /* clear only the bytes from non-freshly-sbrked memory */
3780 csz = oldtopsize;
3782 #endif
3784 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3785 contents have an odd number of INTERNAL_SIZE_T-sized words;
3786 minimally 3. */
3787 d = (INTERNAL_SIZE_T *) mem;
3788 clearsize = csz - SIZE_SZ;
3789 nclears = clearsize / sizeof (INTERNAL_SIZE_T);
3790 assert (nclears >= 3);
3792 if (nclears > 9)
3793 return memset (d, 0, clearsize);
3795 else
3797 *(d + 0) = 0;
3798 *(d + 1) = 0;
3799 *(d + 2) = 0;
3800 if (nclears > 4)
3802 *(d + 3) = 0;
3803 *(d + 4) = 0;
3804 if (nclears > 6)
3806 *(d + 5) = 0;
3807 *(d + 6) = 0;
3808 if (nclears > 8)
3810 *(d + 7) = 0;
3811 *(d + 8) = 0;
3817 return mem;
3819 #endif /* IS_IN (libc) */
3822 ------------------------------ malloc ------------------------------
3825 static void *
3826 _int_malloc (mstate av, size_t bytes)
3828 INTERNAL_SIZE_T nb; /* normalized request size */
3829 unsigned int idx; /* associated bin index */
3830 mbinptr bin; /* associated bin */
3832 mchunkptr victim; /* inspected/selected chunk */
3833 INTERNAL_SIZE_T size; /* its size */
3834 int victim_index; /* its bin index */
3836 mchunkptr remainder; /* remainder from a split */
3837 unsigned long remainder_size; /* its size */
3839 unsigned int block; /* bit map traverser */
3840 unsigned int bit; /* bit map traverser */
3841 unsigned int map; /* current word of binmap */
3843 mchunkptr fwd; /* misc temp for linking */
3844 mchunkptr bck; /* misc temp for linking */
3846 #if USE_TCACHE
3847 size_t tcache_unsorted_count; /* count of unsorted chunks processed */
3848 #endif
3851 Convert request size to internal form by adding SIZE_SZ bytes
3852 overhead plus possibly more to obtain necessary alignment and/or
3853 to obtain a size of at least MINSIZE, the smallest allocatable
3854 size. Also, checked_request2size returns false for request sizes
3855 that are so large that they wrap around zero when padded and
3856 aligned.
3859 nb = checked_request2size (bytes);
3860 if (nb == 0)
3862 __set_errno (ENOMEM);
3863 return NULL;
3866 /* There are no usable arenas. Fall back to sysmalloc to get a chunk from
3867 mmap. */
3868 if (__glibc_unlikely (av == NULL))
3870 void *p = sysmalloc (nb, av);
3871 if (p != NULL)
3872 alloc_perturb (p, bytes);
3873 return p;
3877 If the size qualifies as a fastbin, first check corresponding bin.
3878 This code is safe to execute even if av is not yet initialized, so we
3879 can try it without checking, which saves some time on this fast path.
3882 #define REMOVE_FB(fb, victim, pp) \
3883 do \
3885 victim = pp; \
3886 if (victim == NULL) \
3887 break; \
3888 pp = REVEAL_PTR (victim->fd); \
3889 if (__glibc_unlikely (pp != NULL && misaligned_chunk (pp))) \
3890 malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \
3892 while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \
3893 != victim); \
3895 if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
3897 idx = fastbin_index (nb);
3898 mfastbinptr *fb = &fastbin (av, idx);
3899 mchunkptr pp;
3900 victim = *fb;
3902 if (victim != NULL)
3904 if (__glibc_unlikely (misaligned_chunk (victim)))
3905 malloc_printerr ("malloc(): unaligned fastbin chunk detected 2");
3907 if (SINGLE_THREAD_P)
3908 *fb = REVEAL_PTR (victim->fd);
3909 else
3910 REMOVE_FB (fb, pp, victim);
3911 if (__glibc_likely (victim != NULL))
3913 size_t victim_idx = fastbin_index (chunksize (victim));
3914 if (__builtin_expect (victim_idx != idx, 0))
3915 malloc_printerr ("malloc(): memory corruption (fast)");
3916 check_remalloced_chunk (av, victim, nb);
3917 #if USE_TCACHE
3918 /* While we're here, if we see other chunks of the same size,
3919 stash them in the tcache. */
3920 size_t tc_idx = csize2tidx (nb);
3921 if (tcache != NULL && tc_idx < mp_.tcache_bins)
3923 mchunkptr tc_victim;
3925 /* While bin not empty and tcache not full, copy chunks. */
3926 while (tcache->counts[tc_idx] < mp_.tcache_count
3927 && (tc_victim = *fb) != NULL)
3929 if (__glibc_unlikely (misaligned_chunk (tc_victim)))
3930 malloc_printerr ("malloc(): unaligned fastbin chunk detected 3");
3931 if (SINGLE_THREAD_P)
3932 *fb = REVEAL_PTR (tc_victim->fd);
3933 else
3935 REMOVE_FB (fb, pp, tc_victim);
3936 if (__glibc_unlikely (tc_victim == NULL))
3937 break;
3939 tcache_put (tc_victim, tc_idx);
3942 #endif
3943 void *p = chunk2mem (victim);
3944 alloc_perturb (p, bytes);
3945 return p;
3951 If a small request, check regular bin. Since these "smallbins"
3952 hold one size each, no searching within bins is necessary.
3953 (For a large request, we need to wait until unsorted chunks are
3954 processed to find best fit. But for small ones, fits are exact
3955 anyway, so we can check now, which is faster.)
3958 if (in_smallbin_range (nb))
3960 idx = smallbin_index (nb);
3961 bin = bin_at (av, idx);
3963 if ((victim = last (bin)) != bin)
3965 bck = victim->bk;
3966 if (__glibc_unlikely (bck->fd != victim))
3967 malloc_printerr ("malloc(): smallbin double linked list corrupted");
3968 set_inuse_bit_at_offset (victim, nb);
3969 bin->bk = bck;
3970 bck->fd = bin;
3972 if (av != &main_arena)
3973 set_non_main_arena (victim);
3974 check_malloced_chunk (av, victim, nb);
3975 #if USE_TCACHE
3976 /* While we're here, if we see other chunks of the same size,
3977 stash them in the tcache. */
3978 size_t tc_idx = csize2tidx (nb);
3979 if (tcache != NULL && tc_idx < mp_.tcache_bins)
3981 mchunkptr tc_victim;
3983 /* While bin not empty and tcache not full, copy chunks over. */
3984 while (tcache->counts[tc_idx] < mp_.tcache_count
3985 && (tc_victim = last (bin)) != bin)
3987 if (tc_victim != 0)
3989 bck = tc_victim->bk;
3990 set_inuse_bit_at_offset (tc_victim, nb);
3991 if (av != &main_arena)
3992 set_non_main_arena (tc_victim);
3993 bin->bk = bck;
3994 bck->fd = bin;
3996 tcache_put (tc_victim, tc_idx);
4000 #endif
4001 void *p = chunk2mem (victim);
4002 alloc_perturb (p, bytes);
4003 return p;
4008 If this is a large request, consolidate fastbins before continuing.
4009 While it might look excessive to kill all fastbins before
4010 even seeing if there is space available, this avoids
4011 fragmentation problems normally associated with fastbins.
4012 Also, in practice, programs tend to have runs of either small or
4013 large requests, but less often mixtures, so consolidation is not
4014 invoked all that often in most programs. And the programs that
4015 it is called frequently in otherwise tend to fragment.
4018 else
4020 idx = largebin_index (nb);
4021 if (atomic_load_relaxed (&av->have_fastchunks))
4022 malloc_consolidate (av);
4026 Process recently freed or remaindered chunks, taking one only if
4027 it is exact fit, or, if this a small request, the chunk is remainder from
4028 the most recent non-exact fit. Place other traversed chunks in
4029 bins. Note that this step is the only place in any routine where
4030 chunks are placed in bins.
4032 The outer loop here is needed because we might not realize until
4033 near the end of malloc that we should have consolidated, so must
4034 do so and retry. This happens at most once, and only when we would
4035 otherwise need to expand memory to service a "small" request.
4038 #if USE_TCACHE
4039 INTERNAL_SIZE_T tcache_nb = 0;
4040 size_t tc_idx = csize2tidx (nb);
4041 if (tcache != NULL && tc_idx < mp_.tcache_bins)
4042 tcache_nb = nb;
4043 int return_cached = 0;
4045 tcache_unsorted_count = 0;
4046 #endif
4048 for (;; )
4050 int iters = 0;
4051 while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
4053 bck = victim->bk;
4054 size = chunksize (victim);
4055 mchunkptr next = chunk_at_offset (victim, size);
4057 if (__glibc_unlikely (size <= CHUNK_HDR_SZ)
4058 || __glibc_unlikely (size > av->system_mem))
4059 malloc_printerr ("malloc(): invalid size (unsorted)");
4060 if (__glibc_unlikely (chunksize_nomask (next) < CHUNK_HDR_SZ)
4061 || __glibc_unlikely (chunksize_nomask (next) > av->system_mem))
4062 malloc_printerr ("malloc(): invalid next size (unsorted)");
4063 if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size))
4064 malloc_printerr ("malloc(): mismatching next->prev_size (unsorted)");
4065 if (__glibc_unlikely (bck->fd != victim)
4066 || __glibc_unlikely (victim->fd != unsorted_chunks (av)))
4067 malloc_printerr ("malloc(): unsorted double linked list corrupted");
4068 if (__glibc_unlikely (prev_inuse (next)))
4069 malloc_printerr ("malloc(): invalid next->prev_inuse (unsorted)");
4072 If a small request, try to use last remainder if it is the
4073 only chunk in unsorted bin. This helps promote locality for
4074 runs of consecutive small requests. This is the only
4075 exception to best-fit, and applies only when there is
4076 no exact fit for a small chunk.
4079 if (in_smallbin_range (nb) &&
4080 bck == unsorted_chunks (av) &&
4081 victim == av->last_remainder &&
4082 (unsigned long) (size) > (unsigned long) (nb + MINSIZE))
4084 /* split and reattach remainder */
4085 remainder_size = size - nb;
4086 remainder = chunk_at_offset (victim, nb);
4087 unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
4088 av->last_remainder = remainder;
4089 remainder->bk = remainder->fd = unsorted_chunks (av);
4090 if (!in_smallbin_range (remainder_size))
4092 remainder->fd_nextsize = NULL;
4093 remainder->bk_nextsize = NULL;
4096 set_head (victim, nb | PREV_INUSE |
4097 (av != &main_arena ? NON_MAIN_ARENA : 0));
4098 set_head (remainder, remainder_size | PREV_INUSE);
4099 set_foot (remainder, remainder_size);
4101 check_malloced_chunk (av, victim, nb);
4102 void *p = chunk2mem (victim);
4103 alloc_perturb (p, bytes);
4104 return p;
4107 /* remove from unsorted list */
4108 unsorted_chunks (av)->bk = bck;
4109 bck->fd = unsorted_chunks (av);
4111 /* Take now instead of binning if exact fit */
4113 if (size == nb)
4115 set_inuse_bit_at_offset (victim, size);
4116 if (av != &main_arena)
4117 set_non_main_arena (victim);
4118 #if USE_TCACHE
4119 /* Fill cache first, return to user only if cache fills.
4120 We may return one of these chunks later. */
4121 if (tcache_nb > 0
4122 && tcache->counts[tc_idx] < mp_.tcache_count)
4124 tcache_put (victim, tc_idx);
4125 return_cached = 1;
4126 continue;
4128 else
4130 #endif
4131 check_malloced_chunk (av, victim, nb);
4132 void *p = chunk2mem (victim);
4133 alloc_perturb (p, bytes);
4134 return p;
4135 #if USE_TCACHE
4137 #endif
4140 /* place chunk in bin */
4142 if (in_smallbin_range (size))
4144 victim_index = smallbin_index (size);
4145 bck = bin_at (av, victim_index);
4146 fwd = bck->fd;
4148 else
4150 victim_index = largebin_index (size);
4151 bck = bin_at (av, victim_index);
4152 fwd = bck->fd;
4154 /* maintain large bins in sorted order */
4155 if (fwd != bck)
4157 /* Or with inuse bit to speed comparisons */
4158 size |= PREV_INUSE;
4159 /* if smaller than smallest, bypass loop below */
4160 assert (chunk_main_arena (bck->bk));
4161 if ((unsigned long) (size)
4162 < (unsigned long) chunksize_nomask (bck->bk))
4164 fwd = bck;
4165 bck = bck->bk;
4167 victim->fd_nextsize = fwd->fd;
4168 victim->bk_nextsize = fwd->fd->bk_nextsize;
4169 fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
4171 else
4173 assert (chunk_main_arena (fwd));
4174 while ((unsigned long) size < chunksize_nomask (fwd))
4176 fwd = fwd->fd_nextsize;
4177 assert (chunk_main_arena (fwd));
4180 if ((unsigned long) size
4181 == (unsigned long) chunksize_nomask (fwd))
4182 /* Always insert in the second position. */
4183 fwd = fwd->fd;
4184 else
4186 victim->fd_nextsize = fwd;
4187 victim->bk_nextsize = fwd->bk_nextsize;
4188 if (__glibc_unlikely (fwd->bk_nextsize->fd_nextsize != fwd))
4189 malloc_printerr ("malloc(): largebin double linked list corrupted (nextsize)");
4190 fwd->bk_nextsize = victim;
4191 victim->bk_nextsize->fd_nextsize = victim;
4193 bck = fwd->bk;
4194 if (bck->fd != fwd)
4195 malloc_printerr ("malloc(): largebin double linked list corrupted (bk)");
4198 else
4199 victim->fd_nextsize = victim->bk_nextsize = victim;
4202 mark_bin (av, victim_index);
4203 victim->bk = bck;
4204 victim->fd = fwd;
4205 fwd->bk = victim;
4206 bck->fd = victim;
4208 #if USE_TCACHE
4209 /* If we've processed as many chunks as we're allowed while
4210 filling the cache, return one of the cached ones. */
4211 ++tcache_unsorted_count;
4212 if (return_cached
4213 && mp_.tcache_unsorted_limit > 0
4214 && tcache_unsorted_count > mp_.tcache_unsorted_limit)
4216 return tcache_get (tc_idx);
4218 #endif
4220 #define MAX_ITERS 10000
4221 if (++iters >= MAX_ITERS)
4222 break;
4225 #if USE_TCACHE
4226 /* If all the small chunks we found ended up cached, return one now. */
4227 if (return_cached)
4229 return tcache_get (tc_idx);
4231 #endif
4234 If a large request, scan through the chunks of current bin in
4235 sorted order to find smallest that fits. Use the skip list for this.
4238 if (!in_smallbin_range (nb))
4240 bin = bin_at (av, idx);
4242 /* skip scan if empty or largest chunk is too small */
4243 if ((victim = first (bin)) != bin
4244 && (unsigned long) chunksize_nomask (victim)
4245 >= (unsigned long) (nb))
4247 victim = victim->bk_nextsize;
4248 while (((unsigned long) (size = chunksize (victim)) <
4249 (unsigned long) (nb)))
4250 victim = victim->bk_nextsize;
4252 /* Avoid removing the first entry for a size so that the skip
4253 list does not have to be rerouted. */
4254 if (victim != last (bin)
4255 && chunksize_nomask (victim)
4256 == chunksize_nomask (victim->fd))
4257 victim = victim->fd;
4259 remainder_size = size - nb;
4260 unlink_chunk (av, victim);
4262 /* Exhaust */
4263 if (remainder_size < MINSIZE)
4265 set_inuse_bit_at_offset (victim, size);
4266 if (av != &main_arena)
4267 set_non_main_arena (victim);
4269 /* Split */
4270 else
4272 remainder = chunk_at_offset (victim, nb);
4273 /* We cannot assume the unsorted list is empty and therefore
4274 have to perform a complete insert here. */
4275 bck = unsorted_chunks (av);
4276 fwd = bck->fd;
4277 if (__glibc_unlikely (fwd->bk != bck))
4278 malloc_printerr ("malloc(): corrupted unsorted chunks");
4279 remainder->bk = bck;
4280 remainder->fd = fwd;
4281 bck->fd = remainder;
4282 fwd->bk = remainder;
4283 if (!in_smallbin_range (remainder_size))
4285 remainder->fd_nextsize = NULL;
4286 remainder->bk_nextsize = NULL;
4288 set_head (victim, nb | PREV_INUSE |
4289 (av != &main_arena ? NON_MAIN_ARENA : 0));
4290 set_head (remainder, remainder_size | PREV_INUSE);
4291 set_foot (remainder, remainder_size);
4293 check_malloced_chunk (av, victim, nb);
4294 void *p = chunk2mem (victim);
4295 alloc_perturb (p, bytes);
4296 return p;
4301 Search for a chunk by scanning bins, starting with next largest
4302 bin. This search is strictly by best-fit; i.e., the smallest
4303 (with ties going to approximately the least recently used) chunk
4304 that fits is selected.
4306 The bitmap avoids needing to check that most blocks are nonempty.
4307 The particular case of skipping all bins during warm-up phases
4308 when no chunks have been returned yet is faster than it might look.
4311 ++idx;
4312 bin = bin_at (av, idx);
4313 block = idx2block (idx);
4314 map = av->binmap[block];
4315 bit = idx2bit (idx);
4317 for (;; )
4319 /* Skip rest of block if there are no more set bits in this block. */
4320 if (bit > map || bit == 0)
4324 if (++block >= BINMAPSIZE) /* out of bins */
4325 goto use_top;
4327 while ((map = av->binmap[block]) == 0);
4329 bin = bin_at (av, (block << BINMAPSHIFT));
4330 bit = 1;
4333 /* Advance to bin with set bit. There must be one. */
4334 while ((bit & map) == 0)
4336 bin = next_bin (bin);
4337 bit <<= 1;
4338 assert (bit != 0);
4341 /* Inspect the bin. It is likely to be non-empty */
4342 victim = last (bin);
4344 /* If a false alarm (empty bin), clear the bit. */
4345 if (victim == bin)
4347 av->binmap[block] = map &= ~bit; /* Write through */
4348 bin = next_bin (bin);
4349 bit <<= 1;
4352 else
4354 size = chunksize (victim);
4356 /* We know the first chunk in this bin is big enough to use. */
4357 assert ((unsigned long) (size) >= (unsigned long) (nb));
4359 remainder_size = size - nb;
4361 /* unlink */
4362 unlink_chunk (av, victim);
4364 /* Exhaust */
4365 if (remainder_size < MINSIZE)
4367 set_inuse_bit_at_offset (victim, size);
4368 if (av != &main_arena)
4369 set_non_main_arena (victim);
4372 /* Split */
4373 else
4375 remainder = chunk_at_offset (victim, nb);
4377 /* We cannot assume the unsorted list is empty and therefore
4378 have to perform a complete insert here. */
4379 bck = unsorted_chunks (av);
4380 fwd = bck->fd;
4381 if (__glibc_unlikely (fwd->bk != bck))
4382 malloc_printerr ("malloc(): corrupted unsorted chunks 2");
4383 remainder->bk = bck;
4384 remainder->fd = fwd;
4385 bck->fd = remainder;
4386 fwd->bk = remainder;
4388 /* advertise as last remainder */
4389 if (in_smallbin_range (nb))
4390 av->last_remainder = remainder;
4391 if (!in_smallbin_range (remainder_size))
4393 remainder->fd_nextsize = NULL;
4394 remainder->bk_nextsize = NULL;
4396 set_head (victim, nb | PREV_INUSE |
4397 (av != &main_arena ? NON_MAIN_ARENA : 0));
4398 set_head (remainder, remainder_size | PREV_INUSE);
4399 set_foot (remainder, remainder_size);
4401 check_malloced_chunk (av, victim, nb);
4402 void *p = chunk2mem (victim);
4403 alloc_perturb (p, bytes);
4404 return p;
4408 use_top:
4410 If large enough, split off the chunk bordering the end of memory
4411 (held in av->top). Note that this is in accord with the best-fit
4412 search rule. In effect, av->top is treated as larger (and thus
4413 less well fitting) than any other available chunk since it can
4414 be extended to be as large as necessary (up to system
4415 limitations).
4417 We require that av->top always exists (i.e., has size >=
4418 MINSIZE) after initialization, so if it would otherwise be
4419 exhausted by current request, it is replenished. (The main
4420 reason for ensuring it exists is that we may need MINSIZE space
4421 to put in fenceposts in sysmalloc.)
4424 victim = av->top;
4425 size = chunksize (victim);
4427 if (__glibc_unlikely (size > av->system_mem))
4428 malloc_printerr ("malloc(): corrupted top size");
4430 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
4432 remainder_size = size - nb;
4433 remainder = chunk_at_offset (victim, nb);
4434 av->top = remainder;
4435 set_head (victim, nb | PREV_INUSE |
4436 (av != &main_arena ? NON_MAIN_ARENA : 0));
4437 set_head (remainder, remainder_size | PREV_INUSE);
4439 check_malloced_chunk (av, victim, nb);
4440 void *p = chunk2mem (victim);
4441 alloc_perturb (p, bytes);
4442 return p;
4445 /* When we are using atomic ops to free fast chunks we can get
4446 here for all block sizes. */
4447 else if (atomic_load_relaxed (&av->have_fastchunks))
4449 malloc_consolidate (av);
4450 /* restore original bin index */
4451 if (in_smallbin_range (nb))
4452 idx = smallbin_index (nb);
4453 else
4454 idx = largebin_index (nb);
4458 Otherwise, relay to handle system-dependent cases
4460 else
4462 void *p = sysmalloc (nb, av);
4463 if (p != NULL)
4464 alloc_perturb (p, bytes);
4465 return p;
4471 ------------------------------ free ------------------------------
4474 static void
4475 _int_free (mstate av, mchunkptr p, int have_lock)
4477 INTERNAL_SIZE_T size; /* its size */
4478 mfastbinptr *fb; /* associated fastbin */
4479 mchunkptr nextchunk; /* next contiguous chunk */
4480 INTERNAL_SIZE_T nextsize; /* its size */
4481 int nextinuse; /* true if nextchunk is used */
4482 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
4483 mchunkptr bck; /* misc temp for linking */
4484 mchunkptr fwd; /* misc temp for linking */
4486 size = chunksize (p);
4488 /* Little security check which won't hurt performance: the
4489 allocator never wrapps around at the end of the address space.
4490 Therefore we can exclude some size values which might appear
4491 here by accident or by "design" from some intruder. */
4492 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
4493 || __builtin_expect (misaligned_chunk (p), 0))
4494 malloc_printerr ("free(): invalid pointer");
4495 /* We know that each chunk is at least MINSIZE bytes in size or a
4496 multiple of MALLOC_ALIGNMENT. */
4497 if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
4498 malloc_printerr ("free(): invalid size");
4500 check_inuse_chunk(av, p);
4502 #if USE_TCACHE
4504 size_t tc_idx = csize2tidx (size);
4505 if (tcache != NULL && tc_idx < mp_.tcache_bins)
4507 /* Check to see if it's already in the tcache. */
4508 tcache_entry *e = (tcache_entry *) chunk2mem (p);
4510 /* This test succeeds on double free. However, we don't 100%
4511 trust it (it also matches random payload data at a 1 in
4512 2^<size_t> chance), so verify it's not an unlikely
4513 coincidence before aborting. */
4514 if (__glibc_unlikely (e->key == tcache_key))
4516 tcache_entry *tmp;
4517 size_t cnt = 0;
4518 LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
4519 for (tmp = tcache->entries[tc_idx];
4520 tmp;
4521 tmp = REVEAL_PTR (tmp->next), ++cnt)
4523 if (cnt >= mp_.tcache_count)
4524 malloc_printerr ("free(): too many chunks detected in tcache");
4525 if (__glibc_unlikely (!aligned_OK (tmp)))
4526 malloc_printerr ("free(): unaligned chunk detected in tcache 2");
4527 if (tmp == e)
4528 malloc_printerr ("free(): double free detected in tcache 2");
4529 /* If we get here, it was a coincidence. We've wasted a
4530 few cycles, but don't abort. */
4534 if (tcache->counts[tc_idx] < mp_.tcache_count)
4536 tcache_put (p, tc_idx);
4537 return;
4541 #endif
4544 If eligible, place chunk on a fastbin so it can be found
4545 and used quickly in malloc.
4548 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
4550 #if TRIM_FASTBINS
4552 If TRIM_FASTBINS set, don't place chunks
4553 bordering top into fastbins
4555 && (chunk_at_offset(p, size) != av->top)
4556 #endif
4559 if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
4560 <= CHUNK_HDR_SZ, 0)
4561 || __builtin_expect (chunksize (chunk_at_offset (p, size))
4562 >= av->system_mem, 0))
4564 bool fail = true;
4565 /* We might not have a lock at this point and concurrent modifications
4566 of system_mem might result in a false positive. Redo the test after
4567 getting the lock. */
4568 if (!have_lock)
4570 __libc_lock_lock (av->mutex);
4571 fail = (chunksize_nomask (chunk_at_offset (p, size)) <= CHUNK_HDR_SZ
4572 || chunksize (chunk_at_offset (p, size)) >= av->system_mem);
4573 __libc_lock_unlock (av->mutex);
4576 if (fail)
4577 malloc_printerr ("free(): invalid next size (fast)");
4580 free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
4582 atomic_store_relaxed (&av->have_fastchunks, true);
4583 unsigned int idx = fastbin_index(size);
4584 fb = &fastbin (av, idx);
4586 /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
4587 mchunkptr old = *fb, old2;
4589 if (SINGLE_THREAD_P)
4591 /* Check that the top of the bin is not the record we are going to
4592 add (i.e., double free). */
4593 if (__builtin_expect (old == p, 0))
4594 malloc_printerr ("double free or corruption (fasttop)");
4595 p->fd = PROTECT_PTR (&p->fd, old);
4596 *fb = p;
4598 else
4601 /* Check that the top of the bin is not the record we are going to
4602 add (i.e., double free). */
4603 if (__builtin_expect (old == p, 0))
4604 malloc_printerr ("double free or corruption (fasttop)");
4605 old2 = old;
4606 p->fd = PROTECT_PTR (&p->fd, old);
4608 while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
4609 != old2);
4611 /* Check that size of fastbin chunk at the top is the same as
4612 size of the chunk that we are adding. We can dereference OLD
4613 only if we have the lock, otherwise it might have already been
4614 allocated again. */
4615 if (have_lock && old != NULL
4616 && __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
4617 malloc_printerr ("invalid fastbin entry (free)");
4621 Consolidate other non-mmapped chunks as they arrive.
4624 else if (!chunk_is_mmapped(p)) {
4626 /* If we're single-threaded, don't lock the arena. */
4627 if (SINGLE_THREAD_P)
4628 have_lock = true;
4630 if (!have_lock)
4631 __libc_lock_lock (av->mutex);
4633 nextchunk = chunk_at_offset(p, size);
4635 /* Lightweight tests: check whether the block is already the
4636 top block. */
4637 if (__glibc_unlikely (p == av->top))
4638 malloc_printerr ("double free or corruption (top)");
4639 /* Or whether the next chunk is beyond the boundaries of the arena. */
4640 if (__builtin_expect (contiguous (av)
4641 && (char *) nextchunk
4642 >= ((char *) av->top + chunksize(av->top)), 0))
4643 malloc_printerr ("double free or corruption (out)");
4644 /* Or whether the block is actually not marked used. */
4645 if (__glibc_unlikely (!prev_inuse(nextchunk)))
4646 malloc_printerr ("double free or corruption (!prev)");
4648 nextsize = chunksize(nextchunk);
4649 if (__builtin_expect (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ, 0)
4650 || __builtin_expect (nextsize >= av->system_mem, 0))
4651 malloc_printerr ("free(): invalid next size (normal)");
4653 free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
4655 /* consolidate backward */
4656 if (!prev_inuse(p)) {
4657 prevsize = prev_size (p);
4658 size += prevsize;
4659 p = chunk_at_offset(p, -((long) prevsize));
4660 if (__glibc_unlikely (chunksize(p) != prevsize))
4661 malloc_printerr ("corrupted size vs. prev_size while consolidating");
4662 unlink_chunk (av, p);
4665 if (nextchunk != av->top) {
4666 /* get and clear inuse bit */
4667 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4669 /* consolidate forward */
4670 if (!nextinuse) {
4671 unlink_chunk (av, nextchunk);
4672 size += nextsize;
4673 } else
4674 clear_inuse_bit_at_offset(nextchunk, 0);
4677 Place the chunk in unsorted chunk list. Chunks are
4678 not placed into regular bins until after they have
4679 been given one chance to be used in malloc.
4682 bck = unsorted_chunks(av);
4683 fwd = bck->fd;
4684 if (__glibc_unlikely (fwd->bk != bck))
4685 malloc_printerr ("free(): corrupted unsorted chunks");
4686 p->fd = fwd;
4687 p->bk = bck;
4688 if (!in_smallbin_range(size))
4690 p->fd_nextsize = NULL;
4691 p->bk_nextsize = NULL;
4693 bck->fd = p;
4694 fwd->bk = p;
4696 set_head(p, size | PREV_INUSE);
4697 set_foot(p, size);
4699 check_free_chunk(av, p);
4703 If the chunk borders the current high end of memory,
4704 consolidate into top
4707 else {
4708 size += nextsize;
4709 set_head(p, size | PREV_INUSE);
4710 av->top = p;
4711 check_chunk(av, p);
4715 If freeing a large space, consolidate possibly-surrounding
4716 chunks. Then, if the total unused topmost memory exceeds trim
4717 threshold, ask malloc_trim to reduce top.
4719 Unless max_fast is 0, we don't know if there are fastbins
4720 bordering top, so we cannot tell for sure whether threshold
4721 has been reached unless fastbins are consolidated. But we
4722 don't want to consolidate on each free. As a compromise,
4723 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4724 is reached.
4727 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
4728 if (atomic_load_relaxed (&av->have_fastchunks))
4729 malloc_consolidate(av);
4731 if (av == &main_arena) {
4732 #ifndef MORECORE_CANNOT_TRIM
4733 if ((unsigned long)(chunksize(av->top)) >=
4734 (unsigned long)(mp_.trim_threshold))
4735 systrim(mp_.top_pad, av);
4736 #endif
4737 } else {
4738 /* Always try heap_trim(), even if the top chunk is not
4739 large, because the corresponding heap might go away. */
4740 heap_info *heap = heap_for_ptr(top(av));
4742 assert(heap->ar_ptr == av);
4743 heap_trim(heap, mp_.top_pad);
4747 if (!have_lock)
4748 __libc_lock_unlock (av->mutex);
4751 If the chunk was allocated via mmap, release via munmap().
4754 else {
4755 munmap_chunk (p);
4760 ------------------------- malloc_consolidate -------------------------
4762 malloc_consolidate is a specialized version of free() that tears
4763 down chunks held in fastbins. Free itself cannot be used for this
4764 purpose since, among other things, it might place chunks back onto
4765 fastbins. So, instead, we need to use a minor variant of the same
4766 code.
4769 static void malloc_consolidate(mstate av)
4771 mfastbinptr* fb; /* current fastbin being consolidated */
4772 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4773 mchunkptr p; /* current chunk being consolidated */
4774 mchunkptr nextp; /* next chunk to consolidate */
4775 mchunkptr unsorted_bin; /* bin header */
4776 mchunkptr first_unsorted; /* chunk to link to */
4778 /* These have same use as in free() */
4779 mchunkptr nextchunk;
4780 INTERNAL_SIZE_T size;
4781 INTERNAL_SIZE_T nextsize;
4782 INTERNAL_SIZE_T prevsize;
4783 int nextinuse;
4785 atomic_store_relaxed (&av->have_fastchunks, false);
4787 unsorted_bin = unsorted_chunks(av);
4790 Remove each chunk from fast bin and consolidate it, placing it
4791 then in unsorted bin. Among other reasons for doing this,
4792 placing in unsorted bin avoids needing to calculate actual bins
4793 until malloc is sure that chunks aren't immediately going to be
4794 reused anyway.
4797 maxfb = &fastbin (av, NFASTBINS - 1);
4798 fb = &fastbin (av, 0);
4799 do {
4800 p = atomic_exchange_acquire (fb, NULL);
4801 if (p != 0) {
4802 do {
4804 if (__glibc_unlikely (misaligned_chunk (p)))
4805 malloc_printerr ("malloc_consolidate(): "
4806 "unaligned fastbin chunk detected");
4808 unsigned int idx = fastbin_index (chunksize (p));
4809 if ((&fastbin (av, idx)) != fb)
4810 malloc_printerr ("malloc_consolidate(): invalid chunk size");
4813 check_inuse_chunk(av, p);
4814 nextp = REVEAL_PTR (p->fd);
4816 /* Slightly streamlined version of consolidation code in free() */
4817 size = chunksize (p);
4818 nextchunk = chunk_at_offset(p, size);
4819 nextsize = chunksize(nextchunk);
4821 if (!prev_inuse(p)) {
4822 prevsize = prev_size (p);
4823 size += prevsize;
4824 p = chunk_at_offset(p, -((long) prevsize));
4825 if (__glibc_unlikely (chunksize(p) != prevsize))
4826 malloc_printerr ("corrupted size vs. prev_size in fastbins");
4827 unlink_chunk (av, p);
4830 if (nextchunk != av->top) {
4831 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4833 if (!nextinuse) {
4834 size += nextsize;
4835 unlink_chunk (av, nextchunk);
4836 } else
4837 clear_inuse_bit_at_offset(nextchunk, 0);
4839 first_unsorted = unsorted_bin->fd;
4840 unsorted_bin->fd = p;
4841 first_unsorted->bk = p;
4843 if (!in_smallbin_range (size)) {
4844 p->fd_nextsize = NULL;
4845 p->bk_nextsize = NULL;
4848 set_head(p, size | PREV_INUSE);
4849 p->bk = unsorted_bin;
4850 p->fd = first_unsorted;
4851 set_foot(p, size);
4854 else {
4855 size += nextsize;
4856 set_head(p, size | PREV_INUSE);
4857 av->top = p;
4860 } while ( (p = nextp) != 0);
4863 } while (fb++ != maxfb);
4867 ------------------------------ realloc ------------------------------
4870 static void *
4871 _int_realloc (mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
4872 INTERNAL_SIZE_T nb)
4874 mchunkptr newp; /* chunk to return */
4875 INTERNAL_SIZE_T newsize; /* its size */
4876 void* newmem; /* corresponding user mem */
4878 mchunkptr next; /* next contiguous chunk after oldp */
4880 mchunkptr remainder; /* extra space at end of newp */
4881 unsigned long remainder_size; /* its size */
4883 /* oldmem size */
4884 if (__builtin_expect (chunksize_nomask (oldp) <= CHUNK_HDR_SZ, 0)
4885 || __builtin_expect (oldsize >= av->system_mem, 0)
4886 || __builtin_expect (oldsize != chunksize (oldp), 0))
4887 malloc_printerr ("realloc(): invalid old size");
4889 check_inuse_chunk (av, oldp);
4891 /* All callers already filter out mmap'ed chunks. */
4892 assert (!chunk_is_mmapped (oldp));
4894 next = chunk_at_offset (oldp, oldsize);
4895 INTERNAL_SIZE_T nextsize = chunksize (next);
4896 if (__builtin_expect (chunksize_nomask (next) <= CHUNK_HDR_SZ, 0)
4897 || __builtin_expect (nextsize >= av->system_mem, 0))
4898 malloc_printerr ("realloc(): invalid next size");
4900 if ((unsigned long) (oldsize) >= (unsigned long) (nb))
4902 /* already big enough; split below */
4903 newp = oldp;
4904 newsize = oldsize;
4907 else
4909 /* Try to expand forward into top */
4910 if (next == av->top &&
4911 (unsigned long) (newsize = oldsize + nextsize) >=
4912 (unsigned long) (nb + MINSIZE))
4914 set_head_size (oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4915 av->top = chunk_at_offset (oldp, nb);
4916 set_head (av->top, (newsize - nb) | PREV_INUSE);
4917 check_inuse_chunk (av, oldp);
4918 return tag_new_usable (chunk2mem (oldp));
4921 /* Try to expand forward into next chunk; split off remainder below */
4922 else if (next != av->top &&
4923 !inuse (next) &&
4924 (unsigned long) (newsize = oldsize + nextsize) >=
4925 (unsigned long) (nb))
4927 newp = oldp;
4928 unlink_chunk (av, next);
4931 /* allocate, copy, free */
4932 else
4934 newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK);
4935 if (newmem == 0)
4936 return 0; /* propagate failure */
4938 newp = mem2chunk (newmem);
4939 newsize = chunksize (newp);
4942 Avoid copy if newp is next chunk after oldp.
4944 if (newp == next)
4946 newsize += oldsize;
4947 newp = oldp;
4949 else
4951 void *oldmem = chunk2mem (oldp);
4952 size_t sz = memsize (oldp);
4953 (void) tag_region (oldmem, sz);
4954 newmem = tag_new_usable (newmem);
4955 memcpy (newmem, oldmem, sz);
4956 _int_free (av, oldp, 1);
4957 check_inuse_chunk (av, newp);
4958 return newmem;
4963 /* If possible, free extra space in old or extended chunk */
4965 assert ((unsigned long) (newsize) >= (unsigned long) (nb));
4967 remainder_size = newsize - nb;
4969 if (remainder_size < MINSIZE) /* not enough extra to split off */
4971 set_head_size (newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4972 set_inuse_bit_at_offset (newp, newsize);
4974 else /* split remainder */
4976 remainder = chunk_at_offset (newp, nb);
4977 /* Clear any user-space tags before writing the header. */
4978 remainder = tag_region (remainder, remainder_size);
4979 set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4980 set_head (remainder, remainder_size | PREV_INUSE |
4981 (av != &main_arena ? NON_MAIN_ARENA : 0));
4982 /* Mark remainder as inuse so free() won't complain */
4983 set_inuse_bit_at_offset (remainder, remainder_size);
4984 _int_free (av, remainder, 1);
4987 check_inuse_chunk (av, newp);
4988 return tag_new_usable (chunk2mem (newp));
4992 ------------------------------ memalign ------------------------------
4995 /* Returns 0 if the chunk is not and does not contain the requested
4996 aligned sub-chunk, else returns the amount of "waste" from
4997 trimming. NB is the *chunk* byte size, not the user byte
4998 size. */
4999 static size_t
5000 chunk_ok_for_memalign (mchunkptr p, size_t alignment, size_t nb)
5002 void *m = chunk2mem (p);
5003 INTERNAL_SIZE_T size = chunksize (p);
5004 void *aligned_m = m;
5006 if (__glibc_unlikely (misaligned_chunk (p)))
5007 malloc_printerr ("_int_memalign(): unaligned chunk detected");
5009 aligned_m = PTR_ALIGN_UP (m, alignment);
5011 INTERNAL_SIZE_T front_extra = (intptr_t) aligned_m - (intptr_t) m;
5013 /* We can't trim off the front as it's too small. */
5014 if (front_extra > 0 && front_extra < MINSIZE)
5015 return 0;
5017 /* If it's a perfect fit, it's an exception to the return value rule
5018 (we would return zero waste, which looks like "not usable"), so
5019 handle it here by returning a small non-zero value instead. */
5020 if (size == nb && front_extra == 0)
5021 return 1;
5023 /* If the block we need fits in the chunk, calculate total waste. */
5024 if (size > nb + front_extra)
5025 return size - nb;
5027 /* Can't use this chunk. */
5028 return 0;
5031 /* BYTES is user requested bytes, not requested chunksize bytes. */
5032 static void *
5033 _int_memalign (mstate av, size_t alignment, size_t bytes)
5035 INTERNAL_SIZE_T nb; /* padded request size */
5036 char *m; /* memory returned by malloc call */
5037 mchunkptr p; /* corresponding chunk */
5038 char *brk; /* alignment point within p */
5039 mchunkptr newp; /* chunk to return */
5040 INTERNAL_SIZE_T newsize; /* its size */
5041 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
5042 mchunkptr remainder; /* spare room at end to split off */
5043 unsigned long remainder_size; /* its size */
5044 INTERNAL_SIZE_T size;
5045 mchunkptr victim;
5047 nb = checked_request2size (bytes);
5048 if (nb == 0)
5050 __set_errno (ENOMEM);
5051 return NULL;
5054 /* We can't check tcache here because we hold the arena lock, which
5055 tcache doesn't expect. We expect it has been checked
5056 earlier. */
5058 /* Strategy: search the bins looking for an existing block that
5059 meets our needs. We scan a range of bins from "exact size" to
5060 "just under 2x", spanning the small/large barrier if needed. If
5061 we don't find anything in those bins, the common malloc code will
5062 scan starting at 2x. */
5064 /* This will be set if we found a candidate chunk. */
5065 victim = NULL;
5067 /* Fast bins are singly-linked, hard to remove a chunk from the middle
5068 and unlikely to meet our alignment requirements. We have not done
5069 any experimentation with searching for aligned fastbins. */
5071 if (av != NULL)
5073 int first_bin_index;
5074 int first_largebin_index;
5075 int last_bin_index;
5077 if (in_smallbin_range (nb))
5078 first_bin_index = smallbin_index (nb);
5079 else
5080 first_bin_index = largebin_index (nb);
5082 if (in_smallbin_range (nb * 2))
5083 last_bin_index = smallbin_index (nb * 2);
5084 else
5085 last_bin_index = largebin_index (nb * 2);
5087 first_largebin_index = largebin_index (MIN_LARGE_SIZE);
5089 int victim_index; /* its bin index */
5091 for (victim_index = first_bin_index;
5092 victim_index < last_bin_index;
5093 victim_index ++)
5095 victim = NULL;
5097 if (victim_index < first_largebin_index)
5099 /* Check small bins. Small bin chunks are doubly-linked despite
5100 being the same size. */
5102 mchunkptr fwd; /* misc temp for linking */
5103 mchunkptr bck; /* misc temp for linking */
5105 bck = bin_at (av, victim_index);
5106 fwd = bck->fd;
5107 while (fwd != bck)
5109 if (chunk_ok_for_memalign (fwd, alignment, nb) > 0)
5111 victim = fwd;
5113 /* Unlink it */
5114 victim->fd->bk = victim->bk;
5115 victim->bk->fd = victim->fd;
5116 break;
5119 fwd = fwd->fd;
5122 else
5124 /* Check large bins. */
5125 mchunkptr fwd; /* misc temp for linking */
5126 mchunkptr bck; /* misc temp for linking */
5127 mchunkptr best = NULL;
5128 size_t best_size = 0;
5130 bck = bin_at (av, victim_index);
5131 fwd = bck->fd;
5133 while (fwd != bck)
5135 int extra;
5137 if (chunksize (fwd) < nb)
5138 break;
5139 extra = chunk_ok_for_memalign (fwd, alignment, nb);
5140 if (extra > 0
5141 && (extra <= best_size || best == NULL))
5143 best = fwd;
5144 best_size = extra;
5147 fwd = fwd->fd;
5149 victim = best;
5151 if (victim != NULL)
5153 unlink_chunk (av, victim);
5154 break;
5158 if (victim != NULL)
5159 break;
5163 /* Strategy: find a spot within that chunk that meets the alignment
5164 request, and then possibly free the leading and trailing space.
5165 This strategy is incredibly costly and can lead to external
5166 fragmentation if header and footer chunks are unused. */
5168 if (victim != NULL)
5170 p = victim;
5171 m = chunk2mem (p);
5172 set_inuse (p);
5173 if (av != &main_arena)
5174 set_non_main_arena (p);
5176 else
5178 /* Call malloc with worst case padding to hit alignment. */
5180 m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
5182 if (m == 0)
5183 return 0; /* propagate failure */
5185 p = mem2chunk (m);
5188 if ((((unsigned long) (m)) % alignment) != 0) /* misaligned */
5190 /* Find an aligned spot inside chunk. Since we need to give back
5191 leading space in a chunk of at least MINSIZE, if the first
5192 calculation places us at a spot with less than MINSIZE leader,
5193 we can move to the next aligned spot -- we've allocated enough
5194 total room so that this is always possible. */
5195 brk = (char *) mem2chunk (((unsigned long) (m + alignment - 1)) &
5196 - ((signed long) alignment));
5197 if ((unsigned long) (brk - (char *) (p)) < MINSIZE)
5198 brk += alignment;
5200 newp = (mchunkptr) brk;
5201 leadsize = brk - (char *) (p);
5202 newsize = chunksize (p) - leadsize;
5204 /* For mmapped chunks, just adjust offset */
5205 if (chunk_is_mmapped (p))
5207 set_prev_size (newp, prev_size (p) + leadsize);
5208 set_head (newp, newsize | IS_MMAPPED);
5209 return chunk2mem (newp);
5212 /* Otherwise, give back leader, use the rest */
5213 set_head (newp, newsize | PREV_INUSE |
5214 (av != &main_arena ? NON_MAIN_ARENA : 0));
5215 set_inuse_bit_at_offset (newp, newsize);
5216 set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
5217 _int_free (av, p, 1);
5218 p = newp;
5220 assert (newsize >= nb &&
5221 (((unsigned long) (chunk2mem (p))) % alignment) == 0);
5224 /* Also give back spare room at the end */
5225 if (!chunk_is_mmapped (p))
5227 size = chunksize (p);
5228 if ((unsigned long) (size) > (unsigned long) (nb + MINSIZE))
5230 remainder_size = size - nb;
5231 remainder = chunk_at_offset (p, nb);
5232 set_head (remainder, remainder_size | PREV_INUSE |
5233 (av != &main_arena ? NON_MAIN_ARENA : 0));
5234 set_head_size (p, nb);
5235 _int_free (av, remainder, 1);
5239 check_inuse_chunk (av, p);
5240 return chunk2mem (p);
5245 ------------------------------ malloc_trim ------------------------------
5248 static int
5249 mtrim (mstate av, size_t pad)
5251 /* Ensure all blocks are consolidated. */
5252 malloc_consolidate (av);
5254 const size_t ps = GLRO (dl_pagesize);
5255 int psindex = bin_index (ps);
5256 const size_t psm1 = ps - 1;
5258 int result = 0;
5259 for (int i = 1; i < NBINS; ++i)
5260 if (i == 1 || i >= psindex)
5262 mbinptr bin = bin_at (av, i);
5264 for (mchunkptr p = last (bin); p != bin; p = p->bk)
5266 INTERNAL_SIZE_T size = chunksize (p);
5268 if (size > psm1 + sizeof (struct malloc_chunk))
5270 /* See whether the chunk contains at least one unused page. */
5271 char *paligned_mem = (char *) (((uintptr_t) p
5272 + sizeof (struct malloc_chunk)
5273 + psm1) & ~psm1);
5275 assert ((char *) chunk2mem (p) + 2 * CHUNK_HDR_SZ
5276 <= paligned_mem);
5277 assert ((char *) p + size > paligned_mem);
5279 /* This is the size we could potentially free. */
5280 size -= paligned_mem - (char *) p;
5282 if (size > psm1)
5284 #if MALLOC_DEBUG
5285 /* When debugging we simulate destroying the memory
5286 content. */
5287 memset (paligned_mem, 0x89, size & ~psm1);
5288 #endif
5289 __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
5291 result = 1;
5297 #ifndef MORECORE_CANNOT_TRIM
5298 return result | (av == &main_arena ? systrim (pad, av) : 0);
5300 #else
5301 return result;
5302 #endif
5307 __malloc_trim (size_t s)
5309 int result = 0;
5311 if (!__malloc_initialized)
5312 ptmalloc_init ();
5314 mstate ar_ptr = &main_arena;
5317 __libc_lock_lock (ar_ptr->mutex);
5318 result |= mtrim (ar_ptr, s);
5319 __libc_lock_unlock (ar_ptr->mutex);
5321 ar_ptr = ar_ptr->next;
5323 while (ar_ptr != &main_arena);
5325 return result;
5330 ------------------------- malloc_usable_size -------------------------
5333 static size_t
5334 musable (void *mem)
5336 mchunkptr p = mem2chunk (mem);
5338 if (chunk_is_mmapped (p))
5339 return chunksize (p) - CHUNK_HDR_SZ;
5340 else if (inuse (p))
5341 return memsize (p);
5343 return 0;
5346 #if IS_IN (libc)
5347 size_t
5348 __malloc_usable_size (void *m)
5350 if (m == NULL)
5351 return 0;
5352 return musable (m);
5354 #endif
5357 ------------------------------ mallinfo ------------------------------
5358 Accumulate malloc statistics for arena AV into M.
5360 static void
5361 int_mallinfo (mstate av, struct mallinfo2 *m)
5363 size_t i;
5364 mbinptr b;
5365 mchunkptr p;
5366 INTERNAL_SIZE_T avail;
5367 INTERNAL_SIZE_T fastavail;
5368 int nblocks;
5369 int nfastblocks;
5371 check_malloc_state (av);
5373 /* Account for top */
5374 avail = chunksize (av->top);
5375 nblocks = 1; /* top always exists */
5377 /* traverse fastbins */
5378 nfastblocks = 0;
5379 fastavail = 0;
5381 for (i = 0; i < NFASTBINS; ++i)
5383 for (p = fastbin (av, i);
5384 p != 0;
5385 p = REVEAL_PTR (p->fd))
5387 if (__glibc_unlikely (misaligned_chunk (p)))
5388 malloc_printerr ("int_mallinfo(): "
5389 "unaligned fastbin chunk detected");
5390 ++nfastblocks;
5391 fastavail += chunksize (p);
5395 avail += fastavail;
5397 /* traverse regular bins */
5398 for (i = 1; i < NBINS; ++i)
5400 b = bin_at (av, i);
5401 for (p = last (b); p != b; p = p->bk)
5403 ++nblocks;
5404 avail += chunksize (p);
5408 m->smblks += nfastblocks;
5409 m->ordblks += nblocks;
5410 m->fordblks += avail;
5411 m->uordblks += av->system_mem - avail;
5412 m->arena += av->system_mem;
5413 m->fsmblks += fastavail;
5414 if (av == &main_arena)
5416 m->hblks = mp_.n_mmaps;
5417 m->hblkhd = mp_.mmapped_mem;
5418 m->usmblks = 0;
5419 m->keepcost = chunksize (av->top);
5424 struct mallinfo2
5425 __libc_mallinfo2 (void)
5427 struct mallinfo2 m;
5428 mstate ar_ptr;
5430 if (!__malloc_initialized)
5431 ptmalloc_init ();
5433 memset (&m, 0, sizeof (m));
5434 ar_ptr = &main_arena;
5437 __libc_lock_lock (ar_ptr->mutex);
5438 int_mallinfo (ar_ptr, &m);
5439 __libc_lock_unlock (ar_ptr->mutex);
5441 ar_ptr = ar_ptr->next;
5443 while (ar_ptr != &main_arena);
5445 return m;
5447 libc_hidden_def (__libc_mallinfo2)
5449 struct mallinfo
5450 __libc_mallinfo (void)
5452 struct mallinfo m;
5453 struct mallinfo2 m2 = __libc_mallinfo2 ();
5455 m.arena = m2.arena;
5456 m.ordblks = m2.ordblks;
5457 m.smblks = m2.smblks;
5458 m.hblks = m2.hblks;
5459 m.hblkhd = m2.hblkhd;
5460 m.usmblks = m2.usmblks;
5461 m.fsmblks = m2.fsmblks;
5462 m.uordblks = m2.uordblks;
5463 m.fordblks = m2.fordblks;
5464 m.keepcost = m2.keepcost;
5466 return m;
5471 ------------------------------ malloc_stats ------------------------------
5474 void
5475 __malloc_stats (void)
5477 int i;
5478 mstate ar_ptr;
5479 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
5481 if (!__malloc_initialized)
5482 ptmalloc_init ();
5483 _IO_flockfile (stderr);
5484 int old_flags2 = stderr->_flags2;
5485 stderr->_flags2 |= _IO_FLAGS2_NOTCANCEL;
5486 for (i = 0, ar_ptr = &main_arena;; i++)
5488 struct mallinfo2 mi;
5490 memset (&mi, 0, sizeof (mi));
5491 __libc_lock_lock (ar_ptr->mutex);
5492 int_mallinfo (ar_ptr, &mi);
5493 fprintf (stderr, "Arena %d:\n", i);
5494 fprintf (stderr, "system bytes = %10u\n", (unsigned int) mi.arena);
5495 fprintf (stderr, "in use bytes = %10u\n", (unsigned int) mi.uordblks);
5496 #if MALLOC_DEBUG > 1
5497 if (i > 0)
5498 dump_heap (heap_for_ptr (top (ar_ptr)));
5499 #endif
5500 system_b += mi.arena;
5501 in_use_b += mi.uordblks;
5502 __libc_lock_unlock (ar_ptr->mutex);
5503 ar_ptr = ar_ptr->next;
5504 if (ar_ptr == &main_arena)
5505 break;
5507 fprintf (stderr, "Total (incl. mmap):\n");
5508 fprintf (stderr, "system bytes = %10u\n", system_b);
5509 fprintf (stderr, "in use bytes = %10u\n", in_use_b);
5510 fprintf (stderr, "max mmap regions = %10u\n", (unsigned int) mp_.max_n_mmaps);
5511 fprintf (stderr, "max mmap bytes = %10lu\n",
5512 (unsigned long) mp_.max_mmapped_mem);
5513 stderr->_flags2 = old_flags2;
5514 _IO_funlockfile (stderr);
5519 ------------------------------ mallopt ------------------------------
5521 static __always_inline int
5522 do_set_trim_threshold (size_t value)
5524 LIBC_PROBE (memory_mallopt_trim_threshold, 3, value, mp_.trim_threshold,
5525 mp_.no_dyn_threshold);
5526 mp_.trim_threshold = value;
5527 mp_.no_dyn_threshold = 1;
5528 return 1;
5531 static __always_inline int
5532 do_set_top_pad (size_t value)
5534 LIBC_PROBE (memory_mallopt_top_pad, 3, value, mp_.top_pad,
5535 mp_.no_dyn_threshold);
5536 mp_.top_pad = value;
5537 mp_.no_dyn_threshold = 1;
5538 return 1;
5541 static __always_inline int
5542 do_set_mmap_threshold (size_t value)
5544 LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value, mp_.mmap_threshold,
5545 mp_.no_dyn_threshold);
5546 mp_.mmap_threshold = value;
5547 mp_.no_dyn_threshold = 1;
5548 return 1;
5551 static __always_inline int
5552 do_set_mmaps_max (int32_t value)
5554 LIBC_PROBE (memory_mallopt_mmap_max, 3, value, mp_.n_mmaps_max,
5555 mp_.no_dyn_threshold);
5556 mp_.n_mmaps_max = value;
5557 mp_.no_dyn_threshold = 1;
5558 return 1;
5561 static __always_inline int
5562 do_set_mallopt_check (int32_t value)
5564 return 1;
5567 static __always_inline int
5568 do_set_perturb_byte (int32_t value)
5570 LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
5571 perturb_byte = value;
5572 return 1;
5575 static __always_inline int
5576 do_set_arena_test (size_t value)
5578 LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
5579 mp_.arena_test = value;
5580 return 1;
5583 static __always_inline int
5584 do_set_arena_max (size_t value)
5586 LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
5587 mp_.arena_max = value;
5588 return 1;
5591 #if USE_TCACHE
5592 static __always_inline int
5593 do_set_tcache_max (size_t value)
5595 if (value <= MAX_TCACHE_SIZE)
5597 LIBC_PROBE (memory_tunable_tcache_max_bytes, 2, value, mp_.tcache_max_bytes);
5598 mp_.tcache_max_bytes = value;
5599 mp_.tcache_bins = csize2tidx (request2size(value)) + 1;
5600 return 1;
5602 return 0;
5605 static __always_inline int
5606 do_set_tcache_count (size_t value)
5608 if (value <= MAX_TCACHE_COUNT)
5610 LIBC_PROBE (memory_tunable_tcache_count, 2, value, mp_.tcache_count);
5611 mp_.tcache_count = value;
5612 return 1;
5614 return 0;
5617 static __always_inline int
5618 do_set_tcache_unsorted_limit (size_t value)
5620 LIBC_PROBE (memory_tunable_tcache_unsorted_limit, 2, value, mp_.tcache_unsorted_limit);
5621 mp_.tcache_unsorted_limit = value;
5622 return 1;
5624 #endif
5626 static __always_inline int
5627 do_set_mxfast (size_t value)
5629 if (value <= MAX_FAST_SIZE)
5631 LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ());
5632 set_max_fast (value);
5633 return 1;
5635 return 0;
5638 static __always_inline int
5639 do_set_hugetlb (size_t value)
5641 if (value == 1)
5643 enum malloc_thp_mode_t thp_mode = __malloc_thp_mode ();
5645 Only enable THP madvise usage if system does support it and
5646 has 'madvise' mode. Otherwise the madvise() call is wasteful.
5648 if (thp_mode == malloc_thp_mode_madvise)
5649 mp_.thp_pagesize = __malloc_default_thp_pagesize ();
5651 else if (value >= 2)
5652 __malloc_hugepage_config (value == 2 ? 0 : value, &mp_.hp_pagesize,
5653 &mp_.hp_flags);
5654 return 0;
5658 __libc_mallopt (int param_number, int value)
5660 mstate av = &main_arena;
5661 int res = 1;
5663 if (!__malloc_initialized)
5664 ptmalloc_init ();
5665 __libc_lock_lock (av->mutex);
5667 LIBC_PROBE (memory_mallopt, 2, param_number, value);
5669 /* We must consolidate main arena before changing max_fast
5670 (see definition of set_max_fast). */
5671 malloc_consolidate (av);
5673 /* Many of these helper functions take a size_t. We do not worry
5674 about overflow here, because negative int values will wrap to
5675 very large size_t values and the helpers have sufficient range
5676 checking for such conversions. Many of these helpers are also
5677 used by the tunables macros in arena.c. */
5679 switch (param_number)
5681 case M_MXFAST:
5682 res = do_set_mxfast (value);
5683 break;
5685 case M_TRIM_THRESHOLD:
5686 res = do_set_trim_threshold (value);
5687 break;
5689 case M_TOP_PAD:
5690 res = do_set_top_pad (value);
5691 break;
5693 case M_MMAP_THRESHOLD:
5694 res = do_set_mmap_threshold (value);
5695 break;
5697 case M_MMAP_MAX:
5698 res = do_set_mmaps_max (value);
5699 break;
5701 case M_CHECK_ACTION:
5702 res = do_set_mallopt_check (value);
5703 break;
5705 case M_PERTURB:
5706 res = do_set_perturb_byte (value);
5707 break;
5709 case M_ARENA_TEST:
5710 if (value > 0)
5711 res = do_set_arena_test (value);
5712 break;
5714 case M_ARENA_MAX:
5715 if (value > 0)
5716 res = do_set_arena_max (value);
5717 break;
5719 __libc_lock_unlock (av->mutex);
5720 return res;
5722 libc_hidden_def (__libc_mallopt)
5726 -------------------- Alternative MORECORE functions --------------------
5731 General Requirements for MORECORE.
5733 The MORECORE function must have the following properties:
5735 If MORECORE_CONTIGUOUS is false:
5737 * MORECORE must allocate in multiples of pagesize. It will
5738 only be called with arguments that are multiples of pagesize.
5740 * MORECORE(0) must return an address that is at least
5741 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
5743 else (i.e. If MORECORE_CONTIGUOUS is true):
5745 * Consecutive calls to MORECORE with positive arguments
5746 return increasing addresses, indicating that space has been
5747 contiguously extended.
5749 * MORECORE need not allocate in multiples of pagesize.
5750 Calls to MORECORE need not have args of multiples of pagesize.
5752 * MORECORE need not page-align.
5754 In either case:
5756 * MORECORE may allocate more memory than requested. (Or even less,
5757 but this will generally result in a malloc failure.)
5759 * MORECORE must not allocate memory when given argument zero, but
5760 instead return one past the end address of memory from previous
5761 nonzero call. This malloc does NOT call MORECORE(0)
5762 until at least one call with positive arguments is made, so
5763 the initial value returned is not important.
5765 * Even though consecutive calls to MORECORE need not return contiguous
5766 addresses, it must be OK for malloc'ed chunks to span multiple
5767 regions in those cases where they do happen to be contiguous.
5769 * MORECORE need not handle negative arguments -- it may instead
5770 just return MORECORE_FAILURE when given negative arguments.
5771 Negative arguments are always multiples of pagesize. MORECORE
5772 must not misinterpret negative args as large positive unsigned
5773 args. You can suppress all such calls from even occurring by defining
5774 MORECORE_CANNOT_TRIM,
5776 There is some variation across systems about the type of the
5777 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
5778 actually be size_t, because sbrk supports negative args, so it is
5779 normally the signed type of the same width as size_t (sometimes
5780 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
5781 matter though. Internally, we use "long" as arguments, which should
5782 work across all reasonable possibilities.
5784 Additionally, if MORECORE ever returns failure for a positive
5785 request, then mmap is used as a noncontiguous system allocator. This
5786 is a useful backup strategy for systems with holes in address spaces
5787 -- in this case sbrk cannot contiguously expand the heap, but mmap
5788 may be able to map noncontiguous space.
5790 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
5791 a function that always returns MORECORE_FAILURE.
5793 If you are using this malloc with something other than sbrk (or its
5794 emulation) to supply memory regions, you probably want to set
5795 MORECORE_CONTIGUOUS as false. As an example, here is a custom
5796 allocator kindly contributed for pre-OSX macOS. It uses virtually
5797 but not necessarily physically contiguous non-paged memory (locked
5798 in, present and won't get swapped out). You can use it by
5799 uncommenting this section, adding some #includes, and setting up the
5800 appropriate defines above:
5802 *#define MORECORE osMoreCore
5803 *#define MORECORE_CONTIGUOUS 0
5805 There is also a shutdown routine that should somehow be called for
5806 cleanup upon program exit.
5808 *#define MAX_POOL_ENTRIES 100
5809 *#define MINIMUM_MORECORE_SIZE (64 * 1024)
5810 static int next_os_pool;
5811 void *our_os_pools[MAX_POOL_ENTRIES];
5813 void *osMoreCore(int size)
5815 void *ptr = 0;
5816 static void *sbrk_top = 0;
5818 if (size > 0)
5820 if (size < MINIMUM_MORECORE_SIZE)
5821 size = MINIMUM_MORECORE_SIZE;
5822 if (CurrentExecutionLevel() == kTaskLevel)
5823 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5824 if (ptr == 0)
5826 return (void *) MORECORE_FAILURE;
5828 // save ptrs so they can be freed during cleanup
5829 our_os_pools[next_os_pool] = ptr;
5830 next_os_pool++;
5831 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5832 sbrk_top = (char *) ptr + size;
5833 return ptr;
5835 else if (size < 0)
5837 // we don't currently support shrink behavior
5838 return (void *) MORECORE_FAILURE;
5840 else
5842 return sbrk_top;
5846 // cleanup any allocated memory pools
5847 // called as last thing before shutting down driver
5849 void osCleanupMem(void)
5851 void **ptr;
5853 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5854 if (*ptr)
5856 PoolDeallocate(*ptr);
5857 * ptr = 0;
5864 /* Helper code. */
5866 extern char **__libc_argv attribute_hidden;
5868 static void
5869 malloc_printerr (const char *str)
5871 #if IS_IN (libc)
5872 __libc_message ("%s\n", str);
5873 #else
5874 __libc_fatal (str);
5875 #endif
5876 __builtin_unreachable ();
5879 #if IS_IN (libc)
5880 /* We need a wrapper function for one of the additions of POSIX. */
5882 __posix_memalign (void **memptr, size_t alignment, size_t size)
5884 void *mem;
5886 if (!__malloc_initialized)
5887 ptmalloc_init ();
5889 /* Test whether the SIZE argument is valid. It must be a power of
5890 two multiple of sizeof (void *). */
5891 if (alignment % sizeof (void *) != 0
5892 || !powerof2 (alignment / sizeof (void *))
5893 || alignment == 0)
5894 return EINVAL;
5897 void *address = RETURN_ADDRESS (0);
5898 mem = _mid_memalign (alignment, size, address);
5900 if (mem != NULL)
5902 *memptr = mem;
5903 return 0;
5906 return ENOMEM;
5908 weak_alias (__posix_memalign, posix_memalign)
5909 #endif
5913 __malloc_info (int options, FILE *fp)
5915 /* For now, at least. */
5916 if (options != 0)
5917 return EINVAL;
5919 int n = 0;
5920 size_t total_nblocks = 0;
5921 size_t total_nfastblocks = 0;
5922 size_t total_avail = 0;
5923 size_t total_fastavail = 0;
5924 size_t total_system = 0;
5925 size_t total_max_system = 0;
5926 size_t total_aspace = 0;
5927 size_t total_aspace_mprotect = 0;
5931 if (!__malloc_initialized)
5932 ptmalloc_init ();
5934 fputs ("<malloc version=\"1\">\n", fp);
5936 /* Iterate over all arenas currently in use. */
5937 mstate ar_ptr = &main_arena;
5940 fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
5942 size_t nblocks = 0;
5943 size_t nfastblocks = 0;
5944 size_t avail = 0;
5945 size_t fastavail = 0;
5946 struct
5948 size_t from;
5949 size_t to;
5950 size_t total;
5951 size_t count;
5952 } sizes[NFASTBINS + NBINS - 1];
5953 #define nsizes (sizeof (sizes) / sizeof (sizes[0]))
5955 __libc_lock_lock (ar_ptr->mutex);
5957 /* Account for top chunk. The top-most available chunk is
5958 treated specially and is never in any bin. See "initial_top"
5959 comments. */
5960 avail = chunksize (ar_ptr->top);
5961 nblocks = 1; /* Top always exists. */
5963 for (size_t i = 0; i < NFASTBINS; ++i)
5965 mchunkptr p = fastbin (ar_ptr, i);
5966 if (p != NULL)
5968 size_t nthissize = 0;
5969 size_t thissize = chunksize (p);
5971 while (p != NULL)
5973 if (__glibc_unlikely (misaligned_chunk (p)))
5974 malloc_printerr ("__malloc_info(): "
5975 "unaligned fastbin chunk detected");
5976 ++nthissize;
5977 p = REVEAL_PTR (p->fd);
5980 fastavail += nthissize * thissize;
5981 nfastblocks += nthissize;
5982 sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
5983 sizes[i].to = thissize;
5984 sizes[i].count = nthissize;
5986 else
5987 sizes[i].from = sizes[i].to = sizes[i].count = 0;
5989 sizes[i].total = sizes[i].count * sizes[i].to;
5993 mbinptr bin;
5994 struct malloc_chunk *r;
5996 for (size_t i = 1; i < NBINS; ++i)
5998 bin = bin_at (ar_ptr, i);
5999 r = bin->fd;
6000 sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
6001 sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
6002 = sizes[NFASTBINS - 1 + i].count = 0;
6004 if (r != NULL)
6005 while (r != bin)
6007 size_t r_size = chunksize_nomask (r);
6008 ++sizes[NFASTBINS - 1 + i].count;
6009 sizes[NFASTBINS - 1 + i].total += r_size;
6010 sizes[NFASTBINS - 1 + i].from
6011 = MIN (sizes[NFASTBINS - 1 + i].from, r_size);
6012 sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
6013 r_size);
6015 r = r->fd;
6018 if (sizes[NFASTBINS - 1 + i].count == 0)
6019 sizes[NFASTBINS - 1 + i].from = 0;
6020 nblocks += sizes[NFASTBINS - 1 + i].count;
6021 avail += sizes[NFASTBINS - 1 + i].total;
6024 size_t heap_size = 0;
6025 size_t heap_mprotect_size = 0;
6026 size_t heap_count = 0;
6027 if (ar_ptr != &main_arena)
6029 /* Iterate over the arena heaps from back to front. */
6030 heap_info *heap = heap_for_ptr (top (ar_ptr));
6033 heap_size += heap->size;
6034 heap_mprotect_size += heap->mprotect_size;
6035 heap = heap->prev;
6036 ++heap_count;
6038 while (heap != NULL);
6041 __libc_lock_unlock (ar_ptr->mutex);
6043 total_nfastblocks += nfastblocks;
6044 total_fastavail += fastavail;
6046 total_nblocks += nblocks;
6047 total_avail += avail;
6049 for (size_t i = 0; i < nsizes; ++i)
6050 if (sizes[i].count != 0 && i != NFASTBINS)
6051 fprintf (fp, "\
6052 <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
6053 sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
6055 if (sizes[NFASTBINS].count != 0)
6056 fprintf (fp, "\
6057 <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
6058 sizes[NFASTBINS].from, sizes[NFASTBINS].to,
6059 sizes[NFASTBINS].total, sizes[NFASTBINS].count);
6061 total_system += ar_ptr->system_mem;
6062 total_max_system += ar_ptr->max_system_mem;
6064 fprintf (fp,
6065 "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
6066 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
6067 "<system type=\"current\" size=\"%zu\"/>\n"
6068 "<system type=\"max\" size=\"%zu\"/>\n",
6069 nfastblocks, fastavail, nblocks, avail,
6070 ar_ptr->system_mem, ar_ptr->max_system_mem);
6072 if (ar_ptr != &main_arena)
6074 fprintf (fp,
6075 "<aspace type=\"total\" size=\"%zu\"/>\n"
6076 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
6077 "<aspace type=\"subheaps\" size=\"%zu\"/>\n",
6078 heap_size, heap_mprotect_size, heap_count);
6079 total_aspace += heap_size;
6080 total_aspace_mprotect += heap_mprotect_size;
6082 else
6084 fprintf (fp,
6085 "<aspace type=\"total\" size=\"%zu\"/>\n"
6086 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
6087 ar_ptr->system_mem, ar_ptr->system_mem);
6088 total_aspace += ar_ptr->system_mem;
6089 total_aspace_mprotect += ar_ptr->system_mem;
6092 fputs ("</heap>\n", fp);
6093 ar_ptr = ar_ptr->next;
6095 while (ar_ptr != &main_arena);
6097 fprintf (fp,
6098 "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
6099 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
6100 "<total type=\"mmap\" count=\"%d\" size=\"%zu\"/>\n"
6101 "<system type=\"current\" size=\"%zu\"/>\n"
6102 "<system type=\"max\" size=\"%zu\"/>\n"
6103 "<aspace type=\"total\" size=\"%zu\"/>\n"
6104 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
6105 "</malloc>\n",
6106 total_nfastblocks, total_fastavail, total_nblocks, total_avail,
6107 mp_.n_mmaps, mp_.mmapped_mem,
6108 total_system, total_max_system,
6109 total_aspace, total_aspace_mprotect);
6111 return 0;
6113 #if IS_IN (libc)
6114 weak_alias (__malloc_info, malloc_info)
6116 strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
6117 strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
6118 strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
6119 strong_alias (__libc_memalign, __memalign)
6120 weak_alias (__libc_memalign, memalign)
6121 strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
6122 strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
6123 strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
6124 strong_alias (__libc_mallinfo, __mallinfo)
6125 weak_alias (__libc_mallinfo, mallinfo)
6126 strong_alias (__libc_mallinfo2, __mallinfo2)
6127 weak_alias (__libc_mallinfo2, mallinfo2)
6128 strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
6130 weak_alias (__malloc_stats, malloc_stats)
6131 weak_alias (__malloc_usable_size, malloc_usable_size)
6132 weak_alias (__malloc_trim, malloc_trim)
6133 #endif
6135 #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_26)
6136 compat_symbol (libc, __libc_free, cfree, GLIBC_2_0);
6137 #endif
6139 /* ------------------------------------------------------------
6140 History:
6142 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
6146 * Local variables:
6147 * c-basic-offset: 2
6148 * End: