Use C11 atomics instead atomic_add(_zero)
[glibc.git] / malloc / malloc.c
blobaea4993f1b9a6f14d2694da737bd7d89e07385a3
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 1996-2022 Free Software Foundation, Inc.
3 Copyright The GNU Toolchain Authors.
4 This file is part of the GNU C Library.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public License as
8 published by the Free Software Foundation; either version 2.1 of the
9 License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; see the file COPYING.LIB. If
18 not, see <https://www.gnu.org/licenses/>. */
21 This is a version (aka ptmalloc2) of malloc/free/realloc written by
22 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
24 There have been substantial changes made after the integration into
25 glibc in all parts of the code. Do not look for much commonality
26 with the ptmalloc2 version.
28 * Version ptmalloc2-20011215
29 based on:
30 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
32 * Quickstart
34 In order to compile this implementation, a Makefile is provided with
35 the ptmalloc2 distribution, which has pre-defined targets for some
36 popular systems (e.g. "make posix" for Posix threads). All that is
37 typically required with regard to compiler flags is the selection of
38 the thread package via defining one out of USE_PTHREADS, USE_THR or
39 USE_SPROC. Check the thread-m.h file for what effects this has.
40 Many/most systems will additionally require USE_TSD_DATA_HACK to be
41 defined, so this is the default for "make posix".
43 * Why use this malloc?
45 This is not the fastest, most space-conserving, most portable, or
46 most tunable malloc ever written. However it is among the fastest
47 while also being among the most space-conserving, portable and tunable.
48 Consistent balance across these factors results in a good general-purpose
49 allocator for malloc-intensive programs.
51 The main properties of the algorithms are:
52 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
53 with ties normally decided via FIFO (i.e. least recently used).
54 * For small (<= 64 bytes by default) requests, it is a caching
55 allocator, that maintains pools of quickly recycled chunks.
56 * In between, and for combinations of large and small requests, it does
57 the best it can trying to meet both goals at once.
58 * For very large requests (>= 128KB by default), it relies on system
59 memory mapping facilities, if supported.
61 For a longer but slightly out of date high-level description, see
62 http://gee.cs.oswego.edu/dl/html/malloc.html
64 You may already by default be using a C library containing a malloc
65 that is based on some version of this malloc (for example in
66 linux). You might still want to use the one in this file in order to
67 customize settings or to avoid overheads associated with library
68 versions.
70 * Contents, described in more detail in "description of public routines" below.
72 Standard (ANSI/SVID/...) functions:
73 malloc(size_t n);
74 calloc(size_t n_elements, size_t element_size);
75 free(void* p);
76 realloc(void* p, size_t n);
77 memalign(size_t alignment, size_t n);
78 valloc(size_t n);
79 mallinfo()
80 mallopt(int parameter_number, int parameter_value)
82 Additional functions:
83 independent_calloc(size_t n_elements, size_t size, void* chunks[]);
84 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
85 pvalloc(size_t n);
86 malloc_trim(size_t pad);
87 malloc_usable_size(void* p);
88 malloc_stats();
90 * Vital statistics:
92 Supported pointer representation: 4 or 8 bytes
93 Supported size_t representation: 4 or 8 bytes
94 Note that size_t is allowed to be 4 bytes even if pointers are 8.
95 You can adjust this by defining INTERNAL_SIZE_T
97 Alignment: 2 * sizeof(size_t) (default)
98 (i.e., 8 byte alignment with 4byte size_t). This suffices for
99 nearly all current machines and C compilers. However, you can
100 define MALLOC_ALIGNMENT to be wider than this if necessary.
102 Minimum overhead per allocated chunk: 4 or 8 bytes
103 Each malloced chunk has a hidden word of overhead holding size
104 and status information.
106 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
107 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
109 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
110 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
111 needed; 4 (8) for a trailing size field and 8 (16) bytes for
112 free list pointers. Thus, the minimum allocatable size is
113 16/24/32 bytes.
115 Even a request for zero bytes (i.e., malloc(0)) returns a
116 pointer to something of the minimum allocatable size.
118 The maximum overhead wastage (i.e., number of extra bytes
119 allocated than were requested in malloc) is less than or equal
120 to the minimum size, except for requests >= mmap_threshold that
121 are serviced via mmap(), where the worst case wastage is 2 *
122 sizeof(size_t) bytes plus the remainder from a system page (the
123 minimal mmap unit); typically 4096 or 8192 bytes.
125 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
126 8-byte size_t: 2^64 minus about two pages
128 It is assumed that (possibly signed) size_t values suffice to
129 represent chunk sizes. `Possibly signed' is due to the fact
130 that `size_t' may be defined on a system as either a signed or
131 an unsigned type. The ISO C standard says that it must be
132 unsigned, but a few systems are known not to adhere to this.
133 Additionally, even when size_t is unsigned, sbrk (which is by
134 default used to obtain memory from system) accepts signed
135 arguments, and may not be able to handle size_t-wide arguments
136 with negative sign bit. Generally, values that would
137 appear as negative after accounting for overhead and alignment
138 are supported only via mmap(), which does not have this
139 limitation.
141 Requests for sizes outside the allowed range will perform an optional
142 failure action and then return null. (Requests may also
143 also fail because a system is out of memory.)
145 Thread-safety: thread-safe
147 Compliance: I believe it is compliant with the 1997 Single Unix Specification
148 Also SVID/XPG, ANSI C, and probably others as well.
150 * Synopsis of compile-time options:
152 People have reported using previous versions of this malloc on all
153 versions of Unix, sometimes by tweaking some of the defines
154 below. It has been tested most extensively on Solaris and Linux.
155 People also report using it in stand-alone embedded systems.
157 The implementation is in straight, hand-tuned ANSI C. It is not
158 at all modular. (Sorry!) It uses a lot of macros. To be at all
159 usable, this code should be compiled using an optimizing compiler
160 (for example gcc -O3) that can simplify expressions and control
161 paths. (FAQ: some macros import variables as arguments rather than
162 declare locals because people reported that some debuggers
163 otherwise get confused.)
165 OPTION DEFAULT VALUE
167 Compilation Environment options:
169 HAVE_MREMAP 0
171 Changing default word sizes:
173 INTERNAL_SIZE_T size_t
175 Configuration and functionality options:
177 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
178 USE_MALLOC_LOCK NOT defined
179 MALLOC_DEBUG NOT defined
180 REALLOC_ZERO_BYTES_FREES 1
181 TRIM_FASTBINS 0
183 Options for customizing MORECORE:
185 MORECORE sbrk
186 MORECORE_FAILURE -1
187 MORECORE_CONTIGUOUS 1
188 MORECORE_CANNOT_TRIM NOT defined
189 MORECORE_CLEARS 1
190 MMAP_AS_MORECORE_SIZE (1024 * 1024)
192 Tuning options that are also dynamically changeable via mallopt:
194 DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
195 DEFAULT_TRIM_THRESHOLD 128 * 1024
196 DEFAULT_TOP_PAD 0
197 DEFAULT_MMAP_THRESHOLD 128 * 1024
198 DEFAULT_MMAP_MAX 65536
200 There are several other #defined constants and macros that you
201 probably don't want to touch unless you are extending or adapting malloc. */
204 void* is the pointer type that malloc should say it returns
207 #ifndef void
208 #define void void
209 #endif /*void*/
211 #include <stddef.h> /* for size_t */
212 #include <stdlib.h> /* for getenv(), abort() */
213 #include <unistd.h> /* for __libc_enable_secure */
215 #include <atomic.h>
216 #include <_itoa.h>
217 #include <bits/wordsize.h>
218 #include <sys/sysinfo.h>
220 #include <ldsodefs.h>
222 #include <unistd.h>
223 #include <stdio.h> /* needed for malloc_stats */
224 #include <errno.h>
225 #include <assert.h>
227 #include <shlib-compat.h>
229 /* For uintptr_t. */
230 #include <stdint.h>
232 /* For va_arg, va_start, va_end. */
233 #include <stdarg.h>
235 /* For MIN, MAX, powerof2. */
236 #include <sys/param.h>
238 /* For ALIGN_UP et. al. */
239 #include <libc-pointer-arith.h>
241 /* For DIAG_PUSH/POP_NEEDS_COMMENT et al. */
242 #include <libc-diag.h>
244 /* For memory tagging. */
245 #include <libc-mtag.h>
247 #include <malloc/malloc-internal.h>
249 /* For SINGLE_THREAD_P. */
250 #include <sysdep-cancel.h>
252 #include <libc-internal.h>
254 /* For tcache double-free check. */
255 #include <random-bits.h>
256 #include <sys/random.h>
257 #include <not-cancel.h>
260 Debugging:
262 Because freed chunks may be overwritten with bookkeeping fields, this
263 malloc will often die when freed memory is overwritten by user
264 programs. This can be very effective (albeit in an annoying way)
265 in helping track down dangling pointers.
267 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
268 enabled that will catch more memory errors. You probably won't be
269 able to make much sense of the actual assertion errors, but they
270 should help you locate incorrectly overwritten memory. The checking
271 is fairly extensive, and will slow down execution
272 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
273 will attempt to check every non-mmapped allocated and free chunk in
274 the course of computing the summmaries. (By nature, mmapped regions
275 cannot be checked very much automatically.)
277 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
278 this code. The assertions in the check routines spell out in more
279 detail the assumptions and invariants underlying the algorithms.
281 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
282 checking that all accesses to malloced memory stay within their
283 bounds. However, there are several add-ons and adaptations of this
284 or other mallocs available that do this.
287 #ifndef MALLOC_DEBUG
288 #define MALLOC_DEBUG 0
289 #endif
291 #if USE_TCACHE
292 /* We want 64 entries. This is an arbitrary limit, which tunables can reduce. */
293 # define TCACHE_MAX_BINS 64
294 # define MAX_TCACHE_SIZE tidx2usize (TCACHE_MAX_BINS-1)
296 /* Only used to pre-fill the tunables. */
297 # define tidx2usize(idx) (((size_t) idx) * MALLOC_ALIGNMENT + MINSIZE - SIZE_SZ)
299 /* When "x" is from chunksize(). */
300 # define csize2tidx(x) (((x) - MINSIZE + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT)
301 /* When "x" is a user-provided size. */
302 # define usize2tidx(x) csize2tidx (request2size (x))
304 /* With rounding and alignment, the bins are...
305 idx 0 bytes 0..24 (64-bit) or 0..12 (32-bit)
306 idx 1 bytes 25..40 or 13..20
307 idx 2 bytes 41..56 or 21..28
308 etc. */
310 /* This is another arbitrary limit, which tunables can change. Each
311 tcache bin will hold at most this number of chunks. */
312 # define TCACHE_FILL_COUNT 7
314 /* Maximum chunks in tcache bins for tunables. This value must fit the range
315 of tcache->counts[] entries, else they may overflow. */
316 # define MAX_TCACHE_COUNT UINT16_MAX
317 #endif
319 /* Safe-Linking:
320 Use randomness from ASLR (mmap_base) to protect single-linked lists
321 of Fast-Bins and TCache. That is, mask the "next" pointers of the
322 lists' chunks, and also perform allocation alignment checks on them.
323 This mechanism reduces the risk of pointer hijacking, as was done with
324 Safe-Unlinking in the double-linked lists of Small-Bins.
325 It assumes a minimum page size of 4096 bytes (12 bits). Systems with
326 larger pages provide less entropy, although the pointer mangling
327 still works. */
328 #define PROTECT_PTR(pos, ptr) \
329 ((__typeof (ptr)) ((((size_t) pos) >> 12) ^ ((size_t) ptr)))
330 #define REVEAL_PTR(ptr) PROTECT_PTR (&ptr, ptr)
333 The REALLOC_ZERO_BYTES_FREES macro controls the behavior of realloc (p, 0)
334 when p is nonnull. If the macro is nonzero, the realloc call returns NULL;
335 otherwise, the call returns what malloc (0) would. In either case,
336 p is freed. Glibc uses a nonzero REALLOC_ZERO_BYTES_FREES, which
337 implements common historical practice.
339 ISO C17 says the realloc call has implementation-defined behavior,
340 and it might not even free p.
343 #ifndef REALLOC_ZERO_BYTES_FREES
344 #define REALLOC_ZERO_BYTES_FREES 1
345 #endif
348 TRIM_FASTBINS controls whether free() of a very small chunk can
349 immediately lead to trimming. Setting to true (1) can reduce memory
350 footprint, but will almost always slow down programs that use a lot
351 of small chunks.
353 Define this only if you are willing to give up some speed to more
354 aggressively reduce system-level memory footprint when releasing
355 memory in programs that use many small chunks. You can get
356 essentially the same effect by setting MXFAST to 0, but this can
357 lead to even greater slowdowns in programs using many small chunks.
358 TRIM_FASTBINS is an in-between compile-time option, that disables
359 only those chunks bordering topmost memory from being placed in
360 fastbins.
363 #ifndef TRIM_FASTBINS
364 #define TRIM_FASTBINS 0
365 #endif
367 /* Definition for getting more memory from the OS. */
368 #include "morecore.c"
370 #define MORECORE (*__glibc_morecore)
371 #define MORECORE_FAILURE 0
373 /* Memory tagging. */
375 /* Some systems support the concept of tagging (sometimes known as
376 coloring) memory locations on a fine grained basis. Each memory
377 location is given a color (normally allocated randomly) and
378 pointers are also colored. When the pointer is dereferenced, the
379 pointer's color is checked against the memory's color and if they
380 differ the access is faulted (sometimes lazily).
382 We use this in glibc by maintaining a single color for the malloc
383 data structures that are interleaved with the user data and then
384 assigning separate colors for each block allocation handed out. In
385 this way simple buffer overruns will be rapidly detected. When
386 memory is freed, the memory is recolored back to the glibc default
387 so that simple use-after-free errors can also be detected.
389 If memory is reallocated the buffer is recolored even if the
390 address remains the same. This has a performance impact, but
391 guarantees that the old pointer cannot mistakenly be reused (code
392 that compares old against new will see a mismatch and will then
393 need to behave as though realloc moved the data to a new location).
395 Internal API for memory tagging support.
397 The aim is to keep the code for memory tagging support as close to
398 the normal APIs in glibc as possible, so that if tagging is not
399 enabled in the library, or is disabled at runtime then standard
400 operations can continue to be used. Support macros are used to do
401 this:
403 void *tag_new_zero_region (void *ptr, size_t size)
405 Allocates a new tag, colors the memory with that tag, zeros the
406 memory and returns a pointer that is correctly colored for that
407 location. The non-tagging version will simply call memset with 0.
409 void *tag_region (void *ptr, size_t size)
411 Color the region of memory pointed to by PTR and size SIZE with
412 the color of PTR. Returns the original pointer.
414 void *tag_new_usable (void *ptr)
416 Allocate a new random color and use it to color the user region of
417 a chunk; this may include data from the subsequent chunk's header
418 if tagging is sufficiently fine grained. Returns PTR suitably
419 recolored for accessing the memory there.
421 void *tag_at (void *ptr)
423 Read the current color of the memory at the address pointed to by
424 PTR (ignoring it's current color) and return PTR recolored to that
425 color. PTR must be valid address in all other respects. When
426 tagging is not enabled, it simply returns the original pointer.
429 #ifdef USE_MTAG
430 static bool mtag_enabled = false;
431 static int mtag_mmap_flags = 0;
432 #else
433 # define mtag_enabled false
434 # define mtag_mmap_flags 0
435 #endif
437 static __always_inline void *
438 tag_region (void *ptr, size_t size)
440 if (__glibc_unlikely (mtag_enabled))
441 return __libc_mtag_tag_region (ptr, size);
442 return ptr;
445 static __always_inline void *
446 tag_new_zero_region (void *ptr, size_t size)
448 if (__glibc_unlikely (mtag_enabled))
449 return __libc_mtag_tag_zero_region (__libc_mtag_new_tag (ptr), size);
450 return memset (ptr, 0, size);
453 /* Defined later. */
454 static void *
455 tag_new_usable (void *ptr);
457 static __always_inline void *
458 tag_at (void *ptr)
460 if (__glibc_unlikely (mtag_enabled))
461 return __libc_mtag_address_get_tag (ptr);
462 return ptr;
465 #include <string.h>
468 MORECORE-related declarations. By default, rely on sbrk
473 MORECORE is the name of the routine to call to obtain more memory
474 from the system. See below for general guidance on writing
475 alternative MORECORE functions, as well as a version for WIN32 and a
476 sample version for pre-OSX macos.
479 #ifndef MORECORE
480 #define MORECORE sbrk
481 #endif
484 MORECORE_FAILURE is the value returned upon failure of MORECORE
485 as well as mmap. Since it cannot be an otherwise valid memory address,
486 and must reflect values of standard sys calls, you probably ought not
487 try to redefine it.
490 #ifndef MORECORE_FAILURE
491 #define MORECORE_FAILURE (-1)
492 #endif
495 If MORECORE_CONTIGUOUS is true, take advantage of fact that
496 consecutive calls to MORECORE with positive arguments always return
497 contiguous increasing addresses. This is true of unix sbrk. Even
498 if not defined, when regions happen to be contiguous, malloc will
499 permit allocations spanning regions obtained from different
500 calls. But defining this when applicable enables some stronger
501 consistency checks and space efficiencies.
504 #ifndef MORECORE_CONTIGUOUS
505 #define MORECORE_CONTIGUOUS 1
506 #endif
509 Define MORECORE_CANNOT_TRIM if your version of MORECORE
510 cannot release space back to the system when given negative
511 arguments. This is generally necessary only if you are using
512 a hand-crafted MORECORE function that cannot handle negative arguments.
515 /* #define MORECORE_CANNOT_TRIM */
517 /* MORECORE_CLEARS (default 1)
518 The degree to which the routine mapped to MORECORE zeroes out
519 memory: never (0), only for newly allocated space (1) or always
520 (2). The distinction between (1) and (2) is necessary because on
521 some systems, if the application first decrements and then
522 increments the break value, the contents of the reallocated space
523 are unspecified.
526 #ifndef MORECORE_CLEARS
527 # define MORECORE_CLEARS 1
528 #endif
532 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
533 sbrk fails, and mmap is used as a backup. The value must be a
534 multiple of page size. This backup strategy generally applies only
535 when systems have "holes" in address space, so sbrk cannot perform
536 contiguous expansion, but there is still space available on system.
537 On systems for which this is known to be useful (i.e. most linux
538 kernels), this occurs only when programs allocate huge amounts of
539 memory. Between this, and the fact that mmap regions tend to be
540 limited, the size should be large, to avoid too many mmap calls and
541 thus avoid running out of kernel resources. */
543 #ifndef MMAP_AS_MORECORE_SIZE
544 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
545 #endif
548 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
549 large blocks.
552 #ifndef HAVE_MREMAP
553 #define HAVE_MREMAP 0
554 #endif
557 This version of malloc supports the standard SVID/XPG mallinfo
558 routine that returns a struct containing usage properties and
559 statistics. It should work on any SVID/XPG compliant system that has
560 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
561 install such a thing yourself, cut out the preliminary declarations
562 as described above and below and save them in a malloc.h file. But
563 there's no compelling reason to bother to do this.)
565 The main declaration needed is the mallinfo struct that is returned
566 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
567 bunch of fields that are not even meaningful in this version of
568 malloc. These fields are are instead filled by mallinfo() with
569 other numbers that might be of interest.
573 /* ---------- description of public routines ------------ */
575 #if IS_IN (libc)
577 malloc(size_t n)
578 Returns a pointer to a newly allocated chunk of at least n bytes, or null
579 if no space is available. Additionally, on failure, errno is
580 set to ENOMEM on ANSI C systems.
582 If n is zero, malloc returns a minimum-sized chunk. (The minimum
583 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
584 systems.) On most systems, size_t is an unsigned type, so calls
585 with negative arguments are interpreted as requests for huge amounts
586 of space, which will often fail. The maximum supported value of n
587 differs across systems, but is in all cases less than the maximum
588 representable value of a size_t.
590 void* __libc_malloc(size_t);
591 libc_hidden_proto (__libc_malloc)
594 free(void* p)
595 Releases the chunk of memory pointed to by p, that had been previously
596 allocated using malloc or a related routine such as realloc.
597 It has no effect if p is null. It can have arbitrary (i.e., bad!)
598 effects if p has already been freed.
600 Unless disabled (using mallopt), freeing very large spaces will
601 when possible, automatically trigger operations that give
602 back unused memory to the system, thus reducing program footprint.
604 void __libc_free(void*);
605 libc_hidden_proto (__libc_free)
608 calloc(size_t n_elements, size_t element_size);
609 Returns a pointer to n_elements * element_size bytes, with all locations
610 set to zero.
612 void* __libc_calloc(size_t, size_t);
615 realloc(void* p, size_t n)
616 Returns a pointer to a chunk of size n that contains the same data
617 as does chunk p up to the minimum of (n, p's size) bytes, or null
618 if no space is available.
620 The returned pointer may or may not be the same as p. The algorithm
621 prefers extending p when possible, otherwise it employs the
622 equivalent of a malloc-copy-free sequence.
624 If p is null, realloc is equivalent to malloc.
626 If space is not available, realloc returns null, errno is set (if on
627 ANSI) and p is NOT freed.
629 if n is for fewer bytes than already held by p, the newly unused
630 space is lopped off and freed if possible. Unless the #define
631 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
632 zero (re)allocates a minimum-sized chunk.
634 Large chunks that were internally obtained via mmap will always be
635 grown using malloc-copy-free sequences unless the system supports
636 MREMAP (currently only linux).
638 The old unix realloc convention of allowing the last-free'd chunk
639 to be used as an argument to realloc is not supported.
641 void* __libc_realloc(void*, size_t);
642 libc_hidden_proto (__libc_realloc)
645 memalign(size_t alignment, size_t n);
646 Returns a pointer to a newly allocated chunk of n bytes, aligned
647 in accord with the alignment argument.
649 The alignment argument should be a power of two. If the argument is
650 not a power of two, the nearest greater power is used.
651 8-byte alignment is guaranteed by normal malloc calls, so don't
652 bother calling memalign with an argument of 8 or less.
654 Overreliance on memalign is a sure way to fragment space.
656 void* __libc_memalign(size_t, size_t);
657 libc_hidden_proto (__libc_memalign)
660 valloc(size_t n);
661 Equivalent to memalign(pagesize, n), where pagesize is the page
662 size of the system. If the pagesize is unknown, 4096 is used.
664 void* __libc_valloc(size_t);
669 mallinfo()
670 Returns (by copy) a struct containing various summary statistics:
672 arena: current total non-mmapped bytes allocated from system
673 ordblks: the number of free chunks
674 smblks: the number of fastbin blocks (i.e., small chunks that
675 have been freed but not use resused or consolidated)
676 hblks: current number of mmapped regions
677 hblkhd: total bytes held in mmapped regions
678 usmblks: always 0
679 fsmblks: total bytes held in fastbin blocks
680 uordblks: current total allocated space (normal or mmapped)
681 fordblks: total free space
682 keepcost: the maximum number of bytes that could ideally be released
683 back to system via malloc_trim. ("ideally" means that
684 it ignores page restrictions etc.)
686 Because these fields are ints, but internal bookkeeping may
687 be kept as longs, the reported values may wrap around zero and
688 thus be inaccurate.
690 struct mallinfo2 __libc_mallinfo2(void);
691 libc_hidden_proto (__libc_mallinfo2)
693 struct mallinfo __libc_mallinfo(void);
697 pvalloc(size_t n);
698 Equivalent to valloc(minimum-page-that-holds(n)), that is,
699 round up n to nearest pagesize.
701 void* __libc_pvalloc(size_t);
704 malloc_trim(size_t pad);
706 If possible, gives memory back to the system (via negative
707 arguments to sbrk) if there is unused memory at the `high' end of
708 the malloc pool. You can call this after freeing large blocks of
709 memory to potentially reduce the system-level memory requirements
710 of a program. However, it cannot guarantee to reduce memory. Under
711 some allocation patterns, some large free blocks of memory will be
712 locked between two used chunks, so they cannot be given back to
713 the system.
715 The `pad' argument to malloc_trim represents the amount of free
716 trailing space to leave untrimmed. If this argument is zero,
717 only the minimum amount of memory to maintain internal data
718 structures will be left (one page or less). Non-zero arguments
719 can be supplied to maintain enough trailing space to service
720 future expected allocations without having to re-obtain memory
721 from the system.
723 Malloc_trim returns 1 if it actually released any memory, else 0.
724 On systems that do not support "negative sbrks", it will always
725 return 0.
727 int __malloc_trim(size_t);
730 malloc_usable_size(void* p);
732 Returns the number of bytes you can actually use in
733 an allocated chunk, which may be more than you requested (although
734 often not) due to alignment and minimum size constraints.
735 You can use this many bytes without worrying about
736 overwriting other allocated objects. This is not a particularly great
737 programming practice. malloc_usable_size can be more useful in
738 debugging and assertions, for example:
740 p = malloc(n);
741 assert(malloc_usable_size(p) >= 256);
744 size_t __malloc_usable_size(void*);
747 malloc_stats();
748 Prints on stderr the amount of space obtained from the system (both
749 via sbrk and mmap), the maximum amount (which may be more than
750 current if malloc_trim and/or munmap got called), and the current
751 number of bytes allocated via malloc (or realloc, etc) but not yet
752 freed. Note that this is the number of bytes allocated, not the
753 number requested. It will be larger than the number requested
754 because of alignment and bookkeeping overhead. Because it includes
755 alignment wastage as being in use, this figure may be greater than
756 zero even when no user-level chunks are allocated.
758 The reported current and maximum system memory can be inaccurate if
759 a program makes other calls to system memory allocation functions
760 (normally sbrk) outside of malloc.
762 malloc_stats prints only the most commonly interesting statistics.
763 More information can be obtained by calling mallinfo.
766 void __malloc_stats(void);
769 posix_memalign(void **memptr, size_t alignment, size_t size);
771 POSIX wrapper like memalign(), checking for validity of size.
773 int __posix_memalign(void **, size_t, size_t);
774 #endif /* IS_IN (libc) */
777 mallopt(int parameter_number, int parameter_value)
778 Sets tunable parameters The format is to provide a
779 (parameter-number, parameter-value) pair. mallopt then sets the
780 corresponding parameter to the argument value if it can (i.e., so
781 long as the value is meaningful), and returns 1 if successful else
782 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
783 normally defined in malloc.h. Only one of these (M_MXFAST) is used
784 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
785 so setting them has no effect. But this malloc also supports four
786 other options in mallopt. See below for details. Briefly, supported
787 parameters are as follows (listed defaults are for "typical"
788 configurations).
790 Symbol param # default allowed param values
791 M_MXFAST 1 64 0-80 (0 disables fastbins)
792 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
793 M_TOP_PAD -2 0 any
794 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
795 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
797 int __libc_mallopt(int, int);
798 #if IS_IN (libc)
799 libc_hidden_proto (__libc_mallopt)
800 #endif
802 /* mallopt tuning options */
805 M_MXFAST is the maximum request size used for "fastbins", special bins
806 that hold returned chunks without consolidating their spaces. This
807 enables future requests for chunks of the same size to be handled
808 very quickly, but can increase fragmentation, and thus increase the
809 overall memory footprint of a program.
811 This malloc manages fastbins very conservatively yet still
812 efficiently, so fragmentation is rarely a problem for values less
813 than or equal to the default. The maximum supported value of MXFAST
814 is 80. You wouldn't want it any higher than this anyway. Fastbins
815 are designed especially for use with many small structs, objects or
816 strings -- the default handles structs/objects/arrays with sizes up
817 to 8 4byte fields, or small strings representing words, tokens,
818 etc. Using fastbins for larger objects normally worsens
819 fragmentation without improving speed.
821 M_MXFAST is set in REQUEST size units. It is internally used in
822 chunksize units, which adds padding and alignment. You can reduce
823 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
824 algorithm to be a closer approximation of fifo-best-fit in all cases,
825 not just for larger requests, but will generally cause it to be
826 slower.
830 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
831 #ifndef M_MXFAST
832 #define M_MXFAST 1
833 #endif
835 #ifndef DEFAULT_MXFAST
836 #define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
837 #endif
841 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
842 to keep before releasing via malloc_trim in free().
844 Automatic trimming is mainly useful in long-lived programs.
845 Because trimming via sbrk can be slow on some systems, and can
846 sometimes be wasteful (in cases where programs immediately
847 afterward allocate more large chunks) the value should be high
848 enough so that your overall system performance would improve by
849 releasing this much memory.
851 The trim threshold and the mmap control parameters (see below)
852 can be traded off with one another. Trimming and mmapping are
853 two different ways of releasing unused memory back to the
854 system. Between these two, it is often possible to keep
855 system-level demands of a long-lived program down to a bare
856 minimum. For example, in one test suite of sessions measuring
857 the XF86 X server on Linux, using a trim threshold of 128K and a
858 mmap threshold of 192K led to near-minimal long term resource
859 consumption.
861 If you are using this malloc in a long-lived program, it should
862 pay to experiment with these values. As a rough guide, you
863 might set to a value close to the average size of a process
864 (program) running on your system. Releasing this much memory
865 would allow such a process to run in memory. Generally, it's
866 worth it to tune for trimming rather tham memory mapping when a
867 program undergoes phases where several large chunks are
868 allocated and released in ways that can reuse each other's
869 storage, perhaps mixed with phases where there are no such
870 chunks at all. And in well-behaved long-lived programs,
871 controlling release of large blocks via trimming versus mapping
872 is usually faster.
874 However, in most programs, these parameters serve mainly as
875 protection against the system-level effects of carrying around
876 massive amounts of unneeded memory. Since frequent calls to
877 sbrk, mmap, and munmap otherwise degrade performance, the default
878 parameters are set to relatively high values that serve only as
879 safeguards.
881 The trim value It must be greater than page size to have any useful
882 effect. To disable trimming completely, you can set to
883 (unsigned long)(-1)
885 Trim settings interact with fastbin (MXFAST) settings: Unless
886 TRIM_FASTBINS is defined, automatic trimming never takes place upon
887 freeing a chunk with size less than or equal to MXFAST. Trimming is
888 instead delayed until subsequent freeing of larger chunks. However,
889 you can still force an attempted trim by calling malloc_trim.
891 Also, trimming is not generally possible in cases where
892 the main arena is obtained via mmap.
894 Note that the trick some people use of mallocing a huge space and
895 then freeing it at program startup, in an attempt to reserve system
896 memory, doesn't have the intended effect under automatic trimming,
897 since that memory will immediately be returned to the system.
900 #define M_TRIM_THRESHOLD -1
902 #ifndef DEFAULT_TRIM_THRESHOLD
903 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
904 #endif
907 M_TOP_PAD is the amount of extra `padding' space to allocate or
908 retain whenever sbrk is called. It is used in two ways internally:
910 * When sbrk is called to extend the top of the arena to satisfy
911 a new malloc request, this much padding is added to the sbrk
912 request.
914 * When malloc_trim is called automatically from free(),
915 it is used as the `pad' argument.
917 In both cases, the actual amount of padding is rounded
918 so that the end of the arena is always a system page boundary.
920 The main reason for using padding is to avoid calling sbrk so
921 often. Having even a small pad greatly reduces the likelihood
922 that nearly every malloc request during program start-up (or
923 after trimming) will invoke sbrk, which needlessly wastes
924 time.
926 Automatic rounding-up to page-size units is normally sufficient
927 to avoid measurable overhead, so the default is 0. However, in
928 systems where sbrk is relatively slow, it can pay to increase
929 this value, at the expense of carrying around more memory than
930 the program needs.
933 #define M_TOP_PAD -2
935 #ifndef DEFAULT_TOP_PAD
936 #define DEFAULT_TOP_PAD (0)
937 #endif
940 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
941 adjusted MMAP_THRESHOLD.
944 #ifndef DEFAULT_MMAP_THRESHOLD_MIN
945 #define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
946 #endif
948 #ifndef DEFAULT_MMAP_THRESHOLD_MAX
949 /* For 32-bit platforms we cannot increase the maximum mmap
950 threshold much because it is also the minimum value for the
951 maximum heap size and its alignment. Going above 512k (i.e., 1M
952 for new heaps) wastes too much address space. */
953 # if __WORDSIZE == 32
954 # define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
955 # else
956 # define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
957 # endif
958 #endif
961 M_MMAP_THRESHOLD is the request size threshold for using mmap()
962 to service a request. Requests of at least this size that cannot
963 be allocated using already-existing space will be serviced via mmap.
964 (If enough normal freed space already exists it is used instead.)
966 Using mmap segregates relatively large chunks of memory so that
967 they can be individually obtained and released from the host
968 system. A request serviced through mmap is never reused by any
969 other request (at least not directly; the system may just so
970 happen to remap successive requests to the same locations).
972 Segregating space in this way has the benefits that:
974 1. Mmapped space can ALWAYS be individually released back
975 to the system, which helps keep the system level memory
976 demands of a long-lived program low.
977 2. Mapped memory can never become `locked' between
978 other chunks, as can happen with normally allocated chunks, which
979 means that even trimming via malloc_trim would not release them.
980 3. On some systems with "holes" in address spaces, mmap can obtain
981 memory that sbrk cannot.
983 However, it has the disadvantages that:
985 1. The space cannot be reclaimed, consolidated, and then
986 used to service later requests, as happens with normal chunks.
987 2. It can lead to more wastage because of mmap page alignment
988 requirements
989 3. It causes malloc performance to be more dependent on host
990 system memory management support routines which may vary in
991 implementation quality and may impose arbitrary
992 limitations. Generally, servicing a request via normal
993 malloc steps is faster than going through a system's mmap.
995 The advantages of mmap nearly always outweigh disadvantages for
996 "large" chunks, but the value of "large" varies across systems. The
997 default is an empirically derived value that works well in most
998 systems.
1001 Update in 2006:
1002 The above was written in 2001. Since then the world has changed a lot.
1003 Memory got bigger. Applications got bigger. The virtual address space
1004 layout in 32 bit linux changed.
1006 In the new situation, brk() and mmap space is shared and there are no
1007 artificial limits on brk size imposed by the kernel. What is more,
1008 applications have started using transient allocations larger than the
1009 128Kb as was imagined in 2001.
1011 The price for mmap is also high now; each time glibc mmaps from the
1012 kernel, the kernel is forced to zero out the memory it gives to the
1013 application. Zeroing memory is expensive and eats a lot of cache and
1014 memory bandwidth. This has nothing to do with the efficiency of the
1015 virtual memory system, by doing mmap the kernel just has no choice but
1016 to zero.
1018 In 2001, the kernel had a maximum size for brk() which was about 800
1019 megabytes on 32 bit x86, at that point brk() would hit the first
1020 mmaped shared libaries and couldn't expand anymore. With current 2.6
1021 kernels, the VA space layout is different and brk() and mmap
1022 both can span the entire heap at will.
1024 Rather than using a static threshold for the brk/mmap tradeoff,
1025 we are now using a simple dynamic one. The goal is still to avoid
1026 fragmentation. The old goals we kept are
1027 1) try to get the long lived large allocations to use mmap()
1028 2) really large allocations should always use mmap()
1029 and we're adding now:
1030 3) transient allocations should use brk() to avoid forcing the kernel
1031 having to zero memory over and over again
1033 The implementation works with a sliding threshold, which is by default
1034 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
1035 out at 128Kb as per the 2001 default.
1037 This allows us to satisfy requirement 1) under the assumption that long
1038 lived allocations are made early in the process' lifespan, before it has
1039 started doing dynamic allocations of the same size (which will
1040 increase the threshold).
1042 The upperbound on the threshold satisfies requirement 2)
1044 The threshold goes up in value when the application frees memory that was
1045 allocated with the mmap allocator. The idea is that once the application
1046 starts freeing memory of a certain size, it's highly probable that this is
1047 a size the application uses for transient allocations. This estimator
1048 is there to satisfy the new third requirement.
1052 #define M_MMAP_THRESHOLD -3
1054 #ifndef DEFAULT_MMAP_THRESHOLD
1055 #define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
1056 #endif
1059 M_MMAP_MAX is the maximum number of requests to simultaneously
1060 service using mmap. This parameter exists because
1061 some systems have a limited number of internal tables for
1062 use by mmap, and using more than a few of them may degrade
1063 performance.
1065 The default is set to a value that serves only as a safeguard.
1066 Setting to 0 disables use of mmap for servicing large requests.
1069 #define M_MMAP_MAX -4
1071 #ifndef DEFAULT_MMAP_MAX
1072 #define DEFAULT_MMAP_MAX (65536)
1073 #endif
1075 #include <malloc.h>
1077 #ifndef RETURN_ADDRESS
1078 #define RETURN_ADDRESS(X_) (NULL)
1079 #endif
1081 /* Forward declarations. */
1082 struct malloc_chunk;
1083 typedef struct malloc_chunk* mchunkptr;
1085 /* Internal routines. */
1087 static void* _int_malloc(mstate, size_t);
1088 static void _int_free(mstate, mchunkptr, int);
1089 static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
1090 INTERNAL_SIZE_T);
1091 static void* _int_memalign(mstate, size_t, size_t);
1092 #if IS_IN (libc)
1093 static void* _mid_memalign(size_t, size_t, void *);
1094 #endif
1096 static void malloc_printerr(const char *str) __attribute__ ((noreturn));
1098 static void munmap_chunk(mchunkptr p);
1099 #if HAVE_MREMAP
1100 static mchunkptr mremap_chunk(mchunkptr p, size_t new_size);
1101 #endif
1103 /* ------------------ MMAP support ------------------ */
1106 #include <fcntl.h>
1107 #include <sys/mman.h>
1109 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1110 # define MAP_ANONYMOUS MAP_ANON
1111 #endif
1113 #define MMAP(addr, size, prot, flags) \
1114 __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
1118 ----------------------- Chunk representations -----------------------
1123 This struct declaration is misleading (but accurate and necessary).
1124 It declares a "view" into memory allowing access to necessary
1125 fields at known offsets from a given base. See explanation below.
1128 struct malloc_chunk {
1130 INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */
1131 INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */
1133 struct malloc_chunk* fd; /* double links -- used only if free. */
1134 struct malloc_chunk* bk;
1136 /* Only used for large blocks: pointer to next larger size. */
1137 struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
1138 struct malloc_chunk* bk_nextsize;
1143 malloc_chunk details:
1145 (The following includes lightly edited explanations by Colin Plumb.)
1147 Chunks of memory are maintained using a `boundary tag' method as
1148 described in e.g., Knuth or Standish. (See the paper by Paul
1149 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1150 survey of such techniques.) Sizes of free chunks are stored both
1151 in the front of each chunk and at the end. This makes
1152 consolidating fragmented chunks into bigger chunks very fast. The
1153 size fields also hold bits representing whether chunks are free or
1154 in use.
1156 An allocated chunk looks like this:
1159 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1160 | Size of previous chunk, if unallocated (P clear) |
1161 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1162 | Size of chunk, in bytes |A|M|P|
1163 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1164 | User data starts here... .
1166 . (malloc_usable_size() bytes) .
1168 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1169 | (size of chunk, but used for application data) |
1170 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1171 | Size of next chunk, in bytes |A|0|1|
1172 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1174 Where "chunk" is the front of the chunk for the purpose of most of
1175 the malloc code, but "mem" is the pointer that is returned to the
1176 user. "Nextchunk" is the beginning of the next contiguous chunk.
1178 Chunks always begin on even word boundaries, so the mem portion
1179 (which is returned to the user) is also on an even word boundary, and
1180 thus at least double-word aligned.
1182 Free chunks are stored in circular doubly-linked lists, and look like this:
1184 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1185 | Size of previous chunk, if unallocated (P clear) |
1186 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1187 `head:' | Size of chunk, in bytes |A|0|P|
1188 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1189 | Forward pointer to next chunk in list |
1190 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1191 | Back pointer to previous chunk in list |
1192 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1193 | Unused space (may be 0 bytes long) .
1196 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1197 `foot:' | Size of chunk, in bytes |
1198 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1199 | Size of next chunk, in bytes |A|0|0|
1200 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1202 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1203 chunk size (which is always a multiple of two words), is an in-use
1204 bit for the *previous* chunk. If that bit is *clear*, then the
1205 word before the current chunk size contains the previous chunk
1206 size, and can be used to find the front of the previous chunk.
1207 The very first chunk allocated always has this bit set,
1208 preventing access to non-existent (or non-owned) memory. If
1209 prev_inuse is set for any given chunk, then you CANNOT determine
1210 the size of the previous chunk, and might even get a memory
1211 addressing fault when trying to do so.
1213 The A (NON_MAIN_ARENA) bit is cleared for chunks on the initial,
1214 main arena, described by the main_arena variable. When additional
1215 threads are spawned, each thread receives its own arena (up to a
1216 configurable limit, after which arenas are reused for multiple
1217 threads), and the chunks in these arenas have the A bit set. To
1218 find the arena for a chunk on such a non-main arena, heap_for_ptr
1219 performs a bit mask operation and indirection through the ar_ptr
1220 member of the per-heap header heap_info (see arena.c).
1222 Note that the `foot' of the current chunk is actually represented
1223 as the prev_size of the NEXT chunk. This makes it easier to
1224 deal with alignments etc but can be very confusing when trying
1225 to extend or adapt this code.
1227 The three exceptions to all this are:
1229 1. The special chunk `top' doesn't bother using the
1230 trailing size field since there is no next contiguous chunk
1231 that would have to index off it. After initialization, `top'
1232 is forced to always exist. If it would become less than
1233 MINSIZE bytes long, it is replenished.
1235 2. Chunks allocated via mmap, which have the second-lowest-order
1236 bit M (IS_MMAPPED) set in their size fields. Because they are
1237 allocated one-by-one, each must contain its own trailing size
1238 field. If the M bit is set, the other bits are ignored
1239 (because mmapped chunks are neither in an arena, nor adjacent
1240 to a freed chunk). The M bit is also used for chunks which
1241 originally came from a dumped heap via malloc_set_state in
1242 hooks.c.
1244 3. Chunks in fastbins are treated as allocated chunks from the
1245 point of view of the chunk allocator. They are consolidated
1246 with their neighbors only in bulk, in malloc_consolidate.
1250 ---------- Size and alignment checks and conversions ----------
1253 /* Conversion from malloc headers to user pointers, and back. When
1254 using memory tagging the user data and the malloc data structure
1255 headers have distinct tags. Converting fully from one to the other
1256 involves extracting the tag at the other address and creating a
1257 suitable pointer using it. That can be quite expensive. There are
1258 cases when the pointers are not dereferenced (for example only used
1259 for alignment check) so the tags are not relevant, and there are
1260 cases when user data is not tagged distinctly from malloc headers
1261 (user data is untagged because tagging is done late in malloc and
1262 early in free). User memory tagging across internal interfaces:
1264 sysmalloc: Returns untagged memory.
1265 _int_malloc: Returns untagged memory.
1266 _int_free: Takes untagged memory.
1267 _int_memalign: Returns untagged memory.
1268 _int_memalign: Returns untagged memory.
1269 _mid_memalign: Returns tagged memory.
1270 _int_realloc: Takes and returns tagged memory.
1273 /* The chunk header is two SIZE_SZ elements, but this is used widely, so
1274 we define it here for clarity later. */
1275 #define CHUNK_HDR_SZ (2 * SIZE_SZ)
1277 /* Convert a chunk address to a user mem pointer without correcting
1278 the tag. */
1279 #define chunk2mem(p) ((void*)((char*)(p) + CHUNK_HDR_SZ))
1281 /* Convert a chunk address to a user mem pointer and extract the right tag. */
1282 #define chunk2mem_tag(p) ((void*)tag_at ((char*)(p) + CHUNK_HDR_SZ))
1284 /* Convert a user mem pointer to a chunk address and extract the right tag. */
1285 #define mem2chunk(mem) ((mchunkptr)tag_at (((char*)(mem) - CHUNK_HDR_SZ)))
1287 /* The smallest possible chunk */
1288 #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
1290 /* The smallest size we can malloc is an aligned minimal chunk */
1292 #define MINSIZE \
1293 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1295 /* Check if m has acceptable alignment */
1297 #define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1299 #define misaligned_chunk(p) \
1300 ((uintptr_t)(MALLOC_ALIGNMENT == CHUNK_HDR_SZ ? (p) : chunk2mem (p)) \
1301 & MALLOC_ALIGN_MASK)
1303 /* pad request bytes into a usable size -- internal version */
1304 /* Note: This must be a macro that evaluates to a compile time constant
1305 if passed a literal constant. */
1306 #define request2size(req) \
1307 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1308 MINSIZE : \
1309 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1311 /* Check if REQ overflows when padded and aligned and if the resulting
1312 value is less than PTRDIFF_T. Returns the requested size or
1313 MINSIZE in case the value is less than MINSIZE, or 0 if any of the
1314 previous checks fail. */
1315 static inline size_t
1316 checked_request2size (size_t req) __nonnull (1)
1318 if (__glibc_unlikely (req > PTRDIFF_MAX))
1319 return 0;
1321 /* When using tagged memory, we cannot share the end of the user
1322 block with the header for the next chunk, so ensure that we
1323 allocate blocks that are rounded up to the granule size. Take
1324 care not to overflow from close to MAX_SIZE_T to a small
1325 number. Ideally, this would be part of request2size(), but that
1326 must be a macro that produces a compile time constant if passed
1327 a constant literal. */
1328 if (__glibc_unlikely (mtag_enabled))
1330 /* Ensure this is not evaluated if !mtag_enabled, see gcc PR 99551. */
1331 asm ("");
1333 req = (req + (__MTAG_GRANULE_SIZE - 1)) &
1334 ~(size_t)(__MTAG_GRANULE_SIZE - 1);
1337 return request2size (req);
1341 --------------- Physical chunk operations ---------------
1345 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1346 #define PREV_INUSE 0x1
1348 /* extract inuse bit of previous chunk */
1349 #define prev_inuse(p) ((p)->mchunk_size & PREV_INUSE)
1352 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1353 #define IS_MMAPPED 0x2
1355 /* check for mmap()'ed chunk */
1356 #define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)
1359 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1360 from a non-main arena. This is only set immediately before handing
1361 the chunk to the user, if necessary. */
1362 #define NON_MAIN_ARENA 0x4
1364 /* Check for chunk from main arena. */
1365 #define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0)
1367 /* Mark a chunk as not being on the main arena. */
1368 #define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)
1372 Bits to mask off when extracting size
1374 Note: IS_MMAPPED is intentionally not masked off from size field in
1375 macros for which mmapped chunks should never be seen. This should
1376 cause helpful core dumps to occur if it is tried by accident by
1377 people extending or adapting this malloc.
1379 #define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
1381 /* Get size, ignoring use bits */
1382 #define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
1384 /* Like chunksize, but do not mask SIZE_BITS. */
1385 #define chunksize_nomask(p) ((p)->mchunk_size)
1387 /* Ptr to next physical malloc_chunk. */
1388 #define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))
1390 /* Size of the chunk below P. Only valid if !prev_inuse (P). */
1391 #define prev_size(p) ((p)->mchunk_prev_size)
1393 /* Set the size of the chunk below P. Only valid if !prev_inuse (P). */
1394 #define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))
1396 /* Ptr to previous physical malloc_chunk. Only valid if !prev_inuse (P). */
1397 #define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
1399 /* Treat space at ptr + offset as a chunk */
1400 #define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))
1402 /* extract p's inuse bit */
1403 #define inuse(p) \
1404 ((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE)
1406 /* set/clear chunk as being inuse without otherwise disturbing */
1407 #define set_inuse(p) \
1408 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE
1410 #define clear_inuse(p) \
1411 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE)
1414 /* check/set/clear inuse bits in known places */
1415 #define inuse_bit_at_offset(p, s) \
1416 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE)
1418 #define set_inuse_bit_at_offset(p, s) \
1419 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE)
1421 #define clear_inuse_bit_at_offset(p, s) \
1422 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE))
1425 /* Set size at head, without disturbing its use bit */
1426 #define set_head_size(p, s) ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))
1428 /* Set size/use field */
1429 #define set_head(p, s) ((p)->mchunk_size = (s))
1431 /* Set size at footer (only when chunk is not in use) */
1432 #define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
1434 #pragma GCC poison mchunk_size
1435 #pragma GCC poison mchunk_prev_size
1437 /* This is the size of the real usable data in the chunk. Not valid for
1438 dumped heap chunks. */
1439 #define memsize(p) \
1440 (__MTAG_GRANULE_SIZE > SIZE_SZ && __glibc_unlikely (mtag_enabled) ? \
1441 chunksize (p) - CHUNK_HDR_SZ : \
1442 chunksize (p) - CHUNK_HDR_SZ + (chunk_is_mmapped (p) ? 0 : SIZE_SZ))
1444 /* If memory tagging is enabled the layout changes to accommodate the granule
1445 size, this is wasteful for small allocations so not done by default.
1446 Both the chunk header and user data has to be granule aligned. */
1447 _Static_assert (__MTAG_GRANULE_SIZE <= CHUNK_HDR_SZ,
1448 "memory tagging is not supported with large granule.");
1450 static __always_inline void *
1451 tag_new_usable (void *ptr)
1453 if (__glibc_unlikely (mtag_enabled) && ptr)
1455 mchunkptr cp = mem2chunk(ptr);
1456 ptr = __libc_mtag_tag_region (__libc_mtag_new_tag (ptr), memsize (cp));
1458 return ptr;
1462 -------------------- Internal data structures --------------------
1464 All internal state is held in an instance of malloc_state defined
1465 below. There are no other static variables, except in two optional
1466 cases:
1467 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1468 * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
1469 for mmap.
1471 Beware of lots of tricks that minimize the total bookkeeping space
1472 requirements. The result is a little over 1K bytes (for 4byte
1473 pointers and size_t.)
1477 Bins
1479 An array of bin headers for free chunks. Each bin is doubly
1480 linked. The bins are approximately proportionally (log) spaced.
1481 There are a lot of these bins (128). This may look excessive, but
1482 works very well in practice. Most bins hold sizes that are
1483 unusual as malloc request sizes, but are more usual for fragments
1484 and consolidated sets of chunks, which is what these bins hold, so
1485 they can be found quickly. All procedures maintain the invariant
1486 that no consolidated chunk physically borders another one, so each
1487 chunk in a list is known to be preceeded and followed by either
1488 inuse chunks or the ends of memory.
1490 Chunks in bins are kept in size order, with ties going to the
1491 approximately least recently used chunk. Ordering isn't needed
1492 for the small bins, which all contain the same-sized chunks, but
1493 facilitates best-fit allocation for larger chunks. These lists
1494 are just sequential. Keeping them in order almost never requires
1495 enough traversal to warrant using fancier ordered data
1496 structures.
1498 Chunks of the same size are linked with the most
1499 recently freed at the front, and allocations are taken from the
1500 back. This results in LRU (FIFO) allocation order, which tends
1501 to give each chunk an equal opportunity to be consolidated with
1502 adjacent freed chunks, resulting in larger free chunks and less
1503 fragmentation.
1505 To simplify use in double-linked lists, each bin header acts
1506 as a malloc_chunk. This avoids special-casing for headers.
1507 But to conserve space and improve locality, we allocate
1508 only the fd/bk pointers of bins, and then use repositioning tricks
1509 to treat these as the fields of a malloc_chunk*.
1512 typedef struct malloc_chunk *mbinptr;
1514 /* addressing -- note that bin_at(0) does not exist */
1515 #define bin_at(m, i) \
1516 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
1517 - offsetof (struct malloc_chunk, fd))
1519 /* analog of ++bin */
1520 #define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
1522 /* Reminders about list directionality within bins */
1523 #define first(b) ((b)->fd)
1524 #define last(b) ((b)->bk)
1527 Indexing
1529 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1530 8 bytes apart. Larger bins are approximately logarithmically spaced:
1532 64 bins of size 8
1533 32 bins of size 64
1534 16 bins of size 512
1535 8 bins of size 4096
1536 4 bins of size 32768
1537 2 bins of size 262144
1538 1 bin of size what's left
1540 There is actually a little bit of slop in the numbers in bin_index
1541 for the sake of speed. This makes no difference elsewhere.
1543 The bins top out around 1MB because we expect to service large
1544 requests via mmap.
1546 Bin 0 does not exist. Bin 1 is the unordered list; if that would be
1547 a valid chunk size the small bins are bumped up one.
1550 #define NBINS 128
1551 #define NSMALLBINS 64
1552 #define SMALLBIN_WIDTH MALLOC_ALIGNMENT
1553 #define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > CHUNK_HDR_SZ)
1554 #define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
1556 #define in_smallbin_range(sz) \
1557 ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
1559 #define smallbin_index(sz) \
1560 ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
1561 + SMALLBIN_CORRECTION)
1563 #define largebin_index_32(sz) \
1564 (((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\
1565 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1566 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1567 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1568 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1569 126)
1571 #define largebin_index_32_big(sz) \
1572 (((((unsigned long) (sz)) >> 6) <= 45) ? 49 + (((unsigned long) (sz)) >> 6) :\
1573 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1574 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1575 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1576 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1577 126)
1579 // XXX It remains to be seen whether it is good to keep the widths of
1580 // XXX the buckets the same or whether it should be scaled by a factor
1581 // XXX of two as well.
1582 #define largebin_index_64(sz) \
1583 (((((unsigned long) (sz)) >> 6) <= 48) ? 48 + (((unsigned long) (sz)) >> 6) :\
1584 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1585 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1586 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1587 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1588 126)
1590 #define largebin_index(sz) \
1591 (SIZE_SZ == 8 ? largebin_index_64 (sz) \
1592 : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \
1593 : largebin_index_32 (sz))
1595 #define bin_index(sz) \
1596 ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))
1598 /* Take a chunk off a bin list. */
1599 static void
1600 unlink_chunk (mstate av, mchunkptr p)
1602 if (chunksize (p) != prev_size (next_chunk (p)))
1603 malloc_printerr ("corrupted size vs. prev_size");
1605 mchunkptr fd = p->fd;
1606 mchunkptr bk = p->bk;
1608 if (__builtin_expect (fd->bk != p || bk->fd != p, 0))
1609 malloc_printerr ("corrupted double-linked list");
1611 fd->bk = bk;
1612 bk->fd = fd;
1613 if (!in_smallbin_range (chunksize_nomask (p)) && p->fd_nextsize != NULL)
1615 if (p->fd_nextsize->bk_nextsize != p
1616 || p->bk_nextsize->fd_nextsize != p)
1617 malloc_printerr ("corrupted double-linked list (not small)");
1619 if (fd->fd_nextsize == NULL)
1621 if (p->fd_nextsize == p)
1622 fd->fd_nextsize = fd->bk_nextsize = fd;
1623 else
1625 fd->fd_nextsize = p->fd_nextsize;
1626 fd->bk_nextsize = p->bk_nextsize;
1627 p->fd_nextsize->bk_nextsize = fd;
1628 p->bk_nextsize->fd_nextsize = fd;
1631 else
1633 p->fd_nextsize->bk_nextsize = p->bk_nextsize;
1634 p->bk_nextsize->fd_nextsize = p->fd_nextsize;
1640 Unsorted chunks
1642 All remainders from chunk splits, as well as all returned chunks,
1643 are first placed in the "unsorted" bin. They are then placed
1644 in regular bins after malloc gives them ONE chance to be used before
1645 binning. So, basically, the unsorted_chunks list acts as a queue,
1646 with chunks being placed on it in free (and malloc_consolidate),
1647 and taken off (to be either used or placed in bins) in malloc.
1649 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
1650 does not have to be taken into account in size comparisons.
1653 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
1654 #define unsorted_chunks(M) (bin_at (M, 1))
1659 The top-most available chunk (i.e., the one bordering the end of
1660 available memory) is treated specially. It is never included in
1661 any bin, is used only if no other chunk is available, and is
1662 released back to the system if it is very large (see
1663 M_TRIM_THRESHOLD). Because top initially
1664 points to its own bin with initial zero size, thus forcing
1665 extension on the first malloc request, we avoid having any special
1666 code in malloc to check whether it even exists yet. But we still
1667 need to do so when getting memory from system, so we make
1668 initial_top treat the bin as a legal but unusable chunk during the
1669 interval between initialization and the first call to
1670 sysmalloc. (This is somewhat delicate, since it relies on
1671 the 2 preceding words to be zero during this interval as well.)
1674 /* Conveniently, the unsorted bin can be used as dummy top on first call */
1675 #define initial_top(M) (unsorted_chunks (M))
1678 Binmap
1680 To help compensate for the large number of bins, a one-level index
1681 structure is used for bin-by-bin searching. `binmap' is a
1682 bitvector recording whether bins are definitely empty so they can
1683 be skipped over during during traversals. The bits are NOT always
1684 cleared as soon as bins are empty, but instead only
1685 when they are noticed to be empty during traversal in malloc.
1688 /* Conservatively use 32 bits per map word, even if on 64bit system */
1689 #define BINMAPSHIFT 5
1690 #define BITSPERMAP (1U << BINMAPSHIFT)
1691 #define BINMAPSIZE (NBINS / BITSPERMAP)
1693 #define idx2block(i) ((i) >> BINMAPSHIFT)
1694 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
1696 #define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i))
1697 #define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
1698 #define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i))
1701 Fastbins
1703 An array of lists holding recently freed small chunks. Fastbins
1704 are not doubly linked. It is faster to single-link them, and
1705 since chunks are never removed from the middles of these lists,
1706 double linking is not necessary. Also, unlike regular bins, they
1707 are not even processed in FIFO order (they use faster LIFO) since
1708 ordering doesn't much matter in the transient contexts in which
1709 fastbins are normally used.
1711 Chunks in fastbins keep their inuse bit set, so they cannot
1712 be consolidated with other free chunks. malloc_consolidate
1713 releases all chunks in fastbins and consolidates them with
1714 other free chunks.
1717 typedef struct malloc_chunk *mfastbinptr;
1718 #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
1720 /* offset 2 to use otherwise unindexable first 2 bins */
1721 #define fastbin_index(sz) \
1722 ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
1725 /* The maximum fastbin request size we support */
1726 #define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
1728 #define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
1731 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
1732 that triggers automatic consolidation of possibly-surrounding
1733 fastbin chunks. This is a heuristic, so the exact value should not
1734 matter too much. It is defined at half the default trim threshold as a
1735 compromise heuristic to only attempt consolidation if it is likely
1736 to lead to trimming. However, it is not dynamically tunable, since
1737 consolidation reduces fragmentation surrounding large chunks even
1738 if trimming is not used.
1741 #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
1744 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
1745 regions. Otherwise, contiguity is exploited in merging together,
1746 when possible, results from consecutive MORECORE calls.
1748 The initial value comes from MORECORE_CONTIGUOUS, but is
1749 changed dynamically if mmap is ever used as an sbrk substitute.
1752 #define NONCONTIGUOUS_BIT (2U)
1754 #define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
1755 #define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
1756 #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
1757 #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
1759 /* Maximum size of memory handled in fastbins. */
1760 static INTERNAL_SIZE_T global_max_fast;
1763 Set value of max_fast.
1764 Use impossibly small value if 0.
1765 Precondition: there are no existing fastbin chunks in the main arena.
1766 Since do_check_malloc_state () checks this, we call malloc_consolidate ()
1767 before changing max_fast. Note other arenas will leak their fast bin
1768 entries if max_fast is reduced.
1771 #define set_max_fast(s) \
1772 global_max_fast = (((size_t) (s) <= MALLOC_ALIGN_MASK - SIZE_SZ) \
1773 ? MIN_CHUNK_SIZE / 2 : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
1775 static inline INTERNAL_SIZE_T
1776 get_max_fast (void)
1778 /* Tell the GCC optimizers that global_max_fast is never larger
1779 than MAX_FAST_SIZE. This avoids out-of-bounds array accesses in
1780 _int_malloc after constant propagation of the size parameter.
1781 (The code never executes because malloc preserves the
1782 global_max_fast invariant, but the optimizers may not recognize
1783 this.) */
1784 if (global_max_fast > MAX_FAST_SIZE)
1785 __builtin_unreachable ();
1786 return global_max_fast;
1790 ----------- Internal state representation and initialization -----------
1794 have_fastchunks indicates that there are probably some fastbin chunks.
1795 It is set true on entering a chunk into any fastbin, and cleared early in
1796 malloc_consolidate. The value is approximate since it may be set when there
1797 are no fastbin chunks, or it may be clear even if there are fastbin chunks
1798 available. Given it's sole purpose is to reduce number of redundant calls to
1799 malloc_consolidate, it does not affect correctness. As a result we can safely
1800 use relaxed atomic accesses.
1804 struct malloc_state
1806 /* Serialize access. */
1807 __libc_lock_define (, mutex);
1809 /* Flags (formerly in max_fast). */
1810 int flags;
1812 /* Set if the fastbin chunks contain recently inserted free blocks. */
1813 /* Note this is a bool but not all targets support atomics on booleans. */
1814 int have_fastchunks;
1816 /* Fastbins */
1817 mfastbinptr fastbinsY[NFASTBINS];
1819 /* Base of the topmost chunk -- not otherwise kept in a bin */
1820 mchunkptr top;
1822 /* The remainder from the most recent split of a small request */
1823 mchunkptr last_remainder;
1825 /* Normal bins packed as described above */
1826 mchunkptr bins[NBINS * 2 - 2];
1828 /* Bitmap of bins */
1829 unsigned int binmap[BINMAPSIZE];
1831 /* Linked list */
1832 struct malloc_state *next;
1834 /* Linked list for free arenas. Access to this field is serialized
1835 by free_list_lock in arena.c. */
1836 struct malloc_state *next_free;
1838 /* Number of threads attached to this arena. 0 if the arena is on
1839 the free list. Access to this field is serialized by
1840 free_list_lock in arena.c. */
1841 INTERNAL_SIZE_T attached_threads;
1843 /* Memory allocated from the system in this arena. */
1844 INTERNAL_SIZE_T system_mem;
1845 INTERNAL_SIZE_T max_system_mem;
1848 struct malloc_par
1850 /* Tunable parameters */
1851 unsigned long trim_threshold;
1852 INTERNAL_SIZE_T top_pad;
1853 INTERNAL_SIZE_T mmap_threshold;
1854 INTERNAL_SIZE_T arena_test;
1855 INTERNAL_SIZE_T arena_max;
1857 #if HAVE_TUNABLES
1858 /* Transparent Large Page support. */
1859 INTERNAL_SIZE_T thp_pagesize;
1860 /* A value different than 0 means to align mmap allocation to hp_pagesize
1861 add hp_flags on flags. */
1862 INTERNAL_SIZE_T hp_pagesize;
1863 int hp_flags;
1864 #endif
1866 /* Memory map support */
1867 int n_mmaps;
1868 int n_mmaps_max;
1869 int max_n_mmaps;
1870 /* the mmap_threshold is dynamic, until the user sets
1871 it manually, at which point we need to disable any
1872 dynamic behavior. */
1873 int no_dyn_threshold;
1875 /* Statistics */
1876 INTERNAL_SIZE_T mmapped_mem;
1877 INTERNAL_SIZE_T max_mmapped_mem;
1879 /* First address handed out by MORECORE/sbrk. */
1880 char *sbrk_base;
1882 #if USE_TCACHE
1883 /* Maximum number of buckets to use. */
1884 size_t tcache_bins;
1885 size_t tcache_max_bytes;
1886 /* Maximum number of chunks in each bucket. */
1887 size_t tcache_count;
1888 /* Maximum number of chunks to remove from the unsorted list, which
1889 aren't used to prefill the cache. */
1890 size_t tcache_unsorted_limit;
1891 #endif
1894 /* There are several instances of this struct ("arenas") in this
1895 malloc. If you are adapting this malloc in a way that does NOT use
1896 a static or mmapped malloc_state, you MUST explicitly zero-fill it
1897 before using. This malloc relies on the property that malloc_state
1898 is initialized to all zeroes (as is true of C statics). */
1900 static struct malloc_state main_arena =
1902 .mutex = _LIBC_LOCK_INITIALIZER,
1903 .next = &main_arena,
1904 .attached_threads = 1
1907 /* There is only one instance of the malloc parameters. */
1909 static struct malloc_par mp_ =
1911 .top_pad = DEFAULT_TOP_PAD,
1912 .n_mmaps_max = DEFAULT_MMAP_MAX,
1913 .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
1914 .trim_threshold = DEFAULT_TRIM_THRESHOLD,
1915 #define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
1916 .arena_test = NARENAS_FROM_NCORES (1)
1917 #if USE_TCACHE
1919 .tcache_count = TCACHE_FILL_COUNT,
1920 .tcache_bins = TCACHE_MAX_BINS,
1921 .tcache_max_bytes = tidx2usize (TCACHE_MAX_BINS-1),
1922 .tcache_unsorted_limit = 0 /* No limit. */
1923 #endif
1927 Initialize a malloc_state struct.
1929 This is called from ptmalloc_init () or from _int_new_arena ()
1930 when creating a new arena.
1933 static void
1934 malloc_init_state (mstate av)
1936 int i;
1937 mbinptr bin;
1939 /* Establish circular links for normal bins */
1940 for (i = 1; i < NBINS; ++i)
1942 bin = bin_at (av, i);
1943 bin->fd = bin->bk = bin;
1946 #if MORECORE_CONTIGUOUS
1947 if (av != &main_arena)
1948 #endif
1949 set_noncontiguous (av);
1950 if (av == &main_arena)
1951 set_max_fast (DEFAULT_MXFAST);
1952 atomic_store_relaxed (&av->have_fastchunks, false);
1954 av->top = initial_top (av);
1958 Other internal utilities operating on mstates
1961 static void *sysmalloc (INTERNAL_SIZE_T, mstate);
1962 static int systrim (size_t, mstate);
1963 static void malloc_consolidate (mstate);
1966 /* -------------- Early definitions for debugging hooks ---------------- */
1968 /* This function is called from the arena shutdown hook, to free the
1969 thread cache (if it exists). */
1970 static void tcache_thread_shutdown (void);
1972 /* ------------------ Testing support ----------------------------------*/
1974 static int perturb_byte;
1976 static void
1977 alloc_perturb (char *p, size_t n)
1979 if (__glibc_unlikely (perturb_byte))
1980 memset (p, perturb_byte ^ 0xff, n);
1983 static void
1984 free_perturb (char *p, size_t n)
1986 if (__glibc_unlikely (perturb_byte))
1987 memset (p, perturb_byte, n);
1992 #include <stap-probe.h>
1994 /* ----------- Routines dealing with transparent huge pages ----------- */
1996 static inline void
1997 madvise_thp (void *p, INTERNAL_SIZE_T size)
1999 #if HAVE_TUNABLES && defined (MADV_HUGEPAGE)
2000 /* Do not consider areas smaller than a huge page or if the tunable is
2001 not active. */
2002 if (mp_.thp_pagesize == 0 || size < mp_.thp_pagesize)
2003 return;
2005 /* Linux requires the input address to be page-aligned, and unaligned
2006 inputs happens only for initial data segment. */
2007 if (__glibc_unlikely (!PTR_IS_ALIGNED (p, GLRO (dl_pagesize))))
2009 void *q = PTR_ALIGN_DOWN (p, GLRO (dl_pagesize));
2010 size += PTR_DIFF (p, q);
2011 p = q;
2014 __madvise (p, size, MADV_HUGEPAGE);
2015 #endif
2018 /* ------------------- Support for multiple arenas -------------------- */
2019 #include "arena.c"
2022 Debugging support
2024 These routines make a number of assertions about the states
2025 of data structures that should be true at all times. If any
2026 are not true, it's very likely that a user program has somehow
2027 trashed memory. (It's also possible that there is a coding error
2028 in malloc. In which case, please report it!)
2031 #if !MALLOC_DEBUG
2033 # define check_chunk(A, P)
2034 # define check_free_chunk(A, P)
2035 # define check_inuse_chunk(A, P)
2036 # define check_remalloced_chunk(A, P, N)
2037 # define check_malloced_chunk(A, P, N)
2038 # define check_malloc_state(A)
2040 #else
2042 # define check_chunk(A, P) do_check_chunk (A, P)
2043 # define check_free_chunk(A, P) do_check_free_chunk (A, P)
2044 # define check_inuse_chunk(A, P) do_check_inuse_chunk (A, P)
2045 # define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
2046 # define check_malloced_chunk(A, P, N) do_check_malloced_chunk (A, P, N)
2047 # define check_malloc_state(A) do_check_malloc_state (A)
2050 Properties of all chunks
2053 static void
2054 do_check_chunk (mstate av, mchunkptr p)
2056 unsigned long sz = chunksize (p);
2057 /* min and max possible addresses assuming contiguous allocation */
2058 char *max_address = (char *) (av->top) + chunksize (av->top);
2059 char *min_address = max_address - av->system_mem;
2061 if (!chunk_is_mmapped (p))
2063 /* Has legal address ... */
2064 if (p != av->top)
2066 if (contiguous (av))
2068 assert (((char *) p) >= min_address);
2069 assert (((char *) p + sz) <= ((char *) (av->top)));
2072 else
2074 /* top size is always at least MINSIZE */
2075 assert ((unsigned long) (sz) >= MINSIZE);
2076 /* top predecessor always marked inuse */
2077 assert (prev_inuse (p));
2080 else
2082 /* address is outside main heap */
2083 if (contiguous (av) && av->top != initial_top (av))
2085 assert (((char *) p) < min_address || ((char *) p) >= max_address);
2087 /* chunk is page-aligned */
2088 assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
2089 /* mem is aligned */
2090 assert (aligned_OK (chunk2mem (p)));
2095 Properties of free chunks
2098 static void
2099 do_check_free_chunk (mstate av, mchunkptr p)
2101 INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
2102 mchunkptr next = chunk_at_offset (p, sz);
2104 do_check_chunk (av, p);
2106 /* Chunk must claim to be free ... */
2107 assert (!inuse (p));
2108 assert (!chunk_is_mmapped (p));
2110 /* Unless a special marker, must have OK fields */
2111 if ((unsigned long) (sz) >= MINSIZE)
2113 assert ((sz & MALLOC_ALIGN_MASK) == 0);
2114 assert (aligned_OK (chunk2mem (p)));
2115 /* ... matching footer field */
2116 assert (prev_size (next_chunk (p)) == sz);
2117 /* ... and is fully consolidated */
2118 assert (prev_inuse (p));
2119 assert (next == av->top || inuse (next));
2121 /* ... and has minimally sane links */
2122 assert (p->fd->bk == p);
2123 assert (p->bk->fd == p);
2125 else /* markers are always of size SIZE_SZ */
2126 assert (sz == SIZE_SZ);
2130 Properties of inuse chunks
2133 static void
2134 do_check_inuse_chunk (mstate av, mchunkptr p)
2136 mchunkptr next;
2138 do_check_chunk (av, p);
2140 if (chunk_is_mmapped (p))
2141 return; /* mmapped chunks have no next/prev */
2143 /* Check whether it claims to be in use ... */
2144 assert (inuse (p));
2146 next = next_chunk (p);
2148 /* ... and is surrounded by OK chunks.
2149 Since more things can be checked with free chunks than inuse ones,
2150 if an inuse chunk borders them and debug is on, it's worth doing them.
2152 if (!prev_inuse (p))
2154 /* Note that we cannot even look at prev unless it is not inuse */
2155 mchunkptr prv = prev_chunk (p);
2156 assert (next_chunk (prv) == p);
2157 do_check_free_chunk (av, prv);
2160 if (next == av->top)
2162 assert (prev_inuse (next));
2163 assert (chunksize (next) >= MINSIZE);
2165 else if (!inuse (next))
2166 do_check_free_chunk (av, next);
2170 Properties of chunks recycled from fastbins
2173 static void
2174 do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2176 INTERNAL_SIZE_T sz = chunksize_nomask (p) & ~(PREV_INUSE | NON_MAIN_ARENA);
2178 if (!chunk_is_mmapped (p))
2180 assert (av == arena_for_chunk (p));
2181 if (chunk_main_arena (p))
2182 assert (av == &main_arena);
2183 else
2184 assert (av != &main_arena);
2187 do_check_inuse_chunk (av, p);
2189 /* Legal size ... */
2190 assert ((sz & MALLOC_ALIGN_MASK) == 0);
2191 assert ((unsigned long) (sz) >= MINSIZE);
2192 /* ... and alignment */
2193 assert (aligned_OK (chunk2mem (p)));
2194 /* chunk is less than MINSIZE more than request */
2195 assert ((long) (sz) - (long) (s) >= 0);
2196 assert ((long) (sz) - (long) (s + MINSIZE) < 0);
2200 Properties of nonrecycled chunks at the point they are malloced
2203 static void
2204 do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2206 /* same as recycled case ... */
2207 do_check_remalloced_chunk (av, p, s);
2210 ... plus, must obey implementation invariant that prev_inuse is
2211 always true of any allocated chunk; i.e., that each allocated
2212 chunk borders either a previously allocated and still in-use
2213 chunk, or the base of its memory arena. This is ensured
2214 by making all allocations from the `lowest' part of any found
2215 chunk. This does not necessarily hold however for chunks
2216 recycled via fastbins.
2219 assert (prev_inuse (p));
2224 Properties of malloc_state.
2226 This may be useful for debugging malloc, as well as detecting user
2227 programmer errors that somehow write into malloc_state.
2229 If you are extending or experimenting with this malloc, you can
2230 probably figure out how to hack this routine to print out or
2231 display chunk addresses, sizes, bins, and other instrumentation.
2234 static void
2235 do_check_malloc_state (mstate av)
2237 int i;
2238 mchunkptr p;
2239 mchunkptr q;
2240 mbinptr b;
2241 unsigned int idx;
2242 INTERNAL_SIZE_T size;
2243 unsigned long total = 0;
2244 int max_fast_bin;
2246 /* internal size_t must be no wider than pointer type */
2247 assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *));
2249 /* alignment is a power of 2 */
2250 assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
2252 /* Check the arena is initialized. */
2253 assert (av->top != 0);
2255 /* No memory has been allocated yet, so doing more tests is not possible. */
2256 if (av->top == initial_top (av))
2257 return;
2259 /* pagesize is a power of 2 */
2260 assert (powerof2(GLRO (dl_pagesize)));
2262 /* A contiguous main_arena is consistent with sbrk_base. */
2263 if (av == &main_arena && contiguous (av))
2264 assert ((char *) mp_.sbrk_base + av->system_mem ==
2265 (char *) av->top + chunksize (av->top));
2267 /* properties of fastbins */
2269 /* max_fast is in allowed range */
2270 assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE));
2272 max_fast_bin = fastbin_index (get_max_fast ());
2274 for (i = 0; i < NFASTBINS; ++i)
2276 p = fastbin (av, i);
2278 /* The following test can only be performed for the main arena.
2279 While mallopt calls malloc_consolidate to get rid of all fast
2280 bins (especially those larger than the new maximum) this does
2281 only happen for the main arena. Trying to do this for any
2282 other arena would mean those arenas have to be locked and
2283 malloc_consolidate be called for them. This is excessive. And
2284 even if this is acceptable to somebody it still cannot solve
2285 the problem completely since if the arena is locked a
2286 concurrent malloc call might create a new arena which then
2287 could use the newly invalid fast bins. */
2289 /* all bins past max_fast are empty */
2290 if (av == &main_arena && i > max_fast_bin)
2291 assert (p == 0);
2293 while (p != 0)
2295 if (__glibc_unlikely (misaligned_chunk (p)))
2296 malloc_printerr ("do_check_malloc_state(): "
2297 "unaligned fastbin chunk detected");
2298 /* each chunk claims to be inuse */
2299 do_check_inuse_chunk (av, p);
2300 total += chunksize (p);
2301 /* chunk belongs in this bin */
2302 assert (fastbin_index (chunksize (p)) == i);
2303 p = REVEAL_PTR (p->fd);
2307 /* check normal bins */
2308 for (i = 1; i < NBINS; ++i)
2310 b = bin_at (av, i);
2312 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2313 if (i >= 2)
2315 unsigned int binbit = get_binmap (av, i);
2316 int empty = last (b) == b;
2317 if (!binbit)
2318 assert (empty);
2319 else if (!empty)
2320 assert (binbit);
2323 for (p = last (b); p != b; p = p->bk)
2325 /* each chunk claims to be free */
2326 do_check_free_chunk (av, p);
2327 size = chunksize (p);
2328 total += size;
2329 if (i >= 2)
2331 /* chunk belongs in bin */
2332 idx = bin_index (size);
2333 assert (idx == i);
2334 /* lists are sorted */
2335 assert (p->bk == b ||
2336 (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p));
2338 if (!in_smallbin_range (size))
2340 if (p->fd_nextsize != NULL)
2342 if (p->fd_nextsize == p)
2343 assert (p->bk_nextsize == p);
2344 else
2346 if (p->fd_nextsize == first (b))
2347 assert (chunksize (p) < chunksize (p->fd_nextsize));
2348 else
2349 assert (chunksize (p) > chunksize (p->fd_nextsize));
2351 if (p == first (b))
2352 assert (chunksize (p) > chunksize (p->bk_nextsize));
2353 else
2354 assert (chunksize (p) < chunksize (p->bk_nextsize));
2357 else
2358 assert (p->bk_nextsize == NULL);
2361 else if (!in_smallbin_range (size))
2362 assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
2363 /* chunk is followed by a legal chain of inuse chunks */
2364 for (q = next_chunk (p);
2365 (q != av->top && inuse (q) &&
2366 (unsigned long) (chunksize (q)) >= MINSIZE);
2367 q = next_chunk (q))
2368 do_check_inuse_chunk (av, q);
2372 /* top chunk is OK */
2373 check_chunk (av, av->top);
2375 #endif
2378 /* ----------------- Support for debugging hooks -------------------- */
2379 #if IS_IN (libc)
2380 #include "hooks.c"
2381 #endif
2384 /* ----------- Routines dealing with system allocation -------------- */
2387 sysmalloc handles malloc cases requiring more memory from the system.
2388 On entry, it is assumed that av->top does not have enough
2389 space to service request for nb bytes, thus requiring that av->top
2390 be extended or replaced.
2393 static void *
2394 sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av)
2396 long int size;
2399 Round up size to nearest page. For mmapped chunks, the overhead is one
2400 SIZE_SZ unit larger than for normal chunks, because there is no
2401 following chunk whose prev_size field could be used.
2403 See the front_misalign handling below, for glibc there is no need for
2404 further alignments unless we have have high alignment.
2406 if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
2407 size = ALIGN_UP (nb + SIZE_SZ, pagesize);
2408 else
2409 size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
2411 /* Don't try if size wraps around 0. */
2412 if ((unsigned long) (size) <= (unsigned long) (nb))
2413 return MAP_FAILED;
2415 char *mm = (char *) MMAP (0, size,
2416 mtag_mmap_flags | PROT_READ | PROT_WRITE,
2417 extra_flags);
2418 if (mm == MAP_FAILED)
2419 return mm;
2421 #ifdef MAP_HUGETLB
2422 if (!(extra_flags & MAP_HUGETLB))
2423 madvise_thp (mm, size);
2424 #endif
2427 The offset to the start of the mmapped region is stored in the prev_size
2428 field of the chunk. This allows us to adjust returned start address to
2429 meet alignment requirements here and in memalign(), and still be able to
2430 compute proper address argument for later munmap in free() and realloc().
2433 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2435 if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
2437 /* For glibc, chunk2mem increases the address by CHUNK_HDR_SZ and
2438 MALLOC_ALIGN_MASK is CHUNK_HDR_SZ-1. Each mmap'ed area is page
2439 aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
2440 assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
2441 front_misalign = 0;
2443 else
2444 front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
2446 mchunkptr p; /* the allocated/returned chunk */
2448 if (front_misalign > 0)
2450 ptrdiff_t correction = MALLOC_ALIGNMENT - front_misalign;
2451 p = (mchunkptr) (mm + correction);
2452 set_prev_size (p, correction);
2453 set_head (p, (size - correction) | IS_MMAPPED);
2455 else
2457 p = (mchunkptr) mm;
2458 set_prev_size (p, 0);
2459 set_head (p, size | IS_MMAPPED);
2462 /* update statistics */
2463 int new = atomic_fetch_add_relaxed (&mp_.n_mmaps, 1) + 1;
2464 atomic_max (&mp_.max_n_mmaps, new);
2466 unsigned long sum;
2467 sum = atomic_fetch_add_relaxed (&mp_.mmapped_mem, size) + size;
2468 atomic_max (&mp_.max_mmapped_mem, sum);
2470 check_chunk (av, p);
2472 return chunk2mem (p);
2476 Allocate memory using mmap() based on S and NB requested size, aligning to
2477 PAGESIZE if required. The EXTRA_FLAGS is used on mmap() call. If the call
2478 succeedes S is updated with the allocated size. This is used as a fallback
2479 if MORECORE fails.
2481 static void *
2482 sysmalloc_mmap_fallback (long int *s, INTERNAL_SIZE_T nb,
2483 INTERNAL_SIZE_T old_size, size_t minsize,
2484 size_t pagesize, int extra_flags, mstate av)
2486 long int size = *s;
2488 /* Cannot merge with old top, so add its size back in */
2489 if (contiguous (av))
2490 size = ALIGN_UP (size + old_size, pagesize);
2492 /* If we are relying on mmap as backup, then use larger units */
2493 if ((unsigned long) (size) < minsize)
2494 size = minsize;
2496 /* Don't try if size wraps around 0 */
2497 if ((unsigned long) (size) <= (unsigned long) (nb))
2498 return MORECORE_FAILURE;
2500 char *mbrk = (char *) (MMAP (0, size,
2501 mtag_mmap_flags | PROT_READ | PROT_WRITE,
2502 extra_flags));
2503 if (mbrk == MAP_FAILED)
2504 return MAP_FAILED;
2506 #ifdef MAP_HUGETLB
2507 if (!(extra_flags & MAP_HUGETLB))
2508 madvise_thp (mbrk, size);
2509 #endif
2511 /* Record that we no longer have a contiguous sbrk region. After the first
2512 time mmap is used as backup, we do not ever rely on contiguous space
2513 since this could incorrectly bridge regions. */
2514 set_noncontiguous (av);
2516 *s = size;
2517 return mbrk;
2520 static void *
2521 sysmalloc (INTERNAL_SIZE_T nb, mstate av)
2523 mchunkptr old_top; /* incoming value of av->top */
2524 INTERNAL_SIZE_T old_size; /* its size */
2525 char *old_end; /* its end address */
2527 long size; /* arg to first MORECORE or mmap call */
2528 char *brk; /* return value from MORECORE */
2530 long correction; /* arg to 2nd MORECORE call */
2531 char *snd_brk; /* 2nd return val */
2533 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2534 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
2535 char *aligned_brk; /* aligned offset into brk */
2537 mchunkptr p; /* the allocated/returned chunk */
2538 mchunkptr remainder; /* remainder from allocation */
2539 unsigned long remainder_size; /* its size */
2542 size_t pagesize = GLRO (dl_pagesize);
2543 bool tried_mmap = false;
2547 If have mmap, and the request size meets the mmap threshold, and
2548 the system supports mmap, and there are few enough currently
2549 allocated mmapped regions, try to directly map this request
2550 rather than expanding top.
2553 if (av == NULL
2554 || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
2555 && (mp_.n_mmaps < mp_.n_mmaps_max)))
2557 char *mm;
2558 #if HAVE_TUNABLES
2559 if (mp_.hp_pagesize > 0 && nb >= mp_.hp_pagesize)
2561 /* There is no need to isse the THP madvise call if Huge Pages are
2562 used directly. */
2563 mm = sysmalloc_mmap (nb, mp_.hp_pagesize, mp_.hp_flags, av);
2564 if (mm != MAP_FAILED)
2565 return mm;
2567 #endif
2568 mm = sysmalloc_mmap (nb, pagesize, 0, av);
2569 if (mm != MAP_FAILED)
2570 return mm;
2571 tried_mmap = true;
2574 /* There are no usable arenas and mmap also failed. */
2575 if (av == NULL)
2576 return 0;
2578 /* Record incoming configuration of top */
2580 old_top = av->top;
2581 old_size = chunksize (old_top);
2582 old_end = (char *) (chunk_at_offset (old_top, old_size));
2584 brk = snd_brk = (char *) (MORECORE_FAILURE);
2587 If not the first time through, we require old_size to be
2588 at least MINSIZE and to have prev_inuse set.
2591 assert ((old_top == initial_top (av) && old_size == 0) ||
2592 ((unsigned long) (old_size) >= MINSIZE &&
2593 prev_inuse (old_top) &&
2594 ((unsigned long) old_end & (pagesize - 1)) == 0));
2596 /* Precondition: not enough current space to satisfy nb request */
2597 assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
2600 if (av != &main_arena)
2602 heap_info *old_heap, *heap;
2603 size_t old_heap_size;
2605 /* First try to extend the current heap. */
2606 old_heap = heap_for_ptr (old_top);
2607 old_heap_size = old_heap->size;
2608 if ((long) (MINSIZE + nb - old_size) > 0
2609 && grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
2611 av->system_mem += old_heap->size - old_heap_size;
2612 set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
2613 | PREV_INUSE);
2615 else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
2617 /* Use a newly allocated heap. */
2618 heap->ar_ptr = av;
2619 heap->prev = old_heap;
2620 av->system_mem += heap->size;
2621 /* Set up the new top. */
2622 top (av) = chunk_at_offset (heap, sizeof (*heap));
2623 set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
2625 /* Setup fencepost and free the old top chunk with a multiple of
2626 MALLOC_ALIGNMENT in size. */
2627 /* The fencepost takes at least MINSIZE bytes, because it might
2628 become the top chunk again later. Note that a footer is set
2629 up, too, although the chunk is marked in use. */
2630 old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
2631 set_head (chunk_at_offset (old_top, old_size + CHUNK_HDR_SZ),
2632 0 | PREV_INUSE);
2633 if (old_size >= MINSIZE)
2635 set_head (chunk_at_offset (old_top, old_size),
2636 CHUNK_HDR_SZ | PREV_INUSE);
2637 set_foot (chunk_at_offset (old_top, old_size), CHUNK_HDR_SZ);
2638 set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
2639 _int_free (av, old_top, 1);
2641 else
2643 set_head (old_top, (old_size + CHUNK_HDR_SZ) | PREV_INUSE);
2644 set_foot (old_top, (old_size + CHUNK_HDR_SZ));
2647 else if (!tried_mmap)
2649 /* We can at least try to use to mmap memory. If new_heap fails
2650 it is unlikely that trying to allocate huge pages will
2651 succeed. */
2652 char *mm = sysmalloc_mmap (nb, pagesize, 0, av);
2653 if (mm != MAP_FAILED)
2654 return mm;
2657 else /* av == main_arena */
2660 { /* Request enough space for nb + pad + overhead */
2661 size = nb + mp_.top_pad + MINSIZE;
2664 If contiguous, we can subtract out existing space that we hope to
2665 combine with new space. We add it back later only if
2666 we don't actually get contiguous space.
2669 if (contiguous (av))
2670 size -= old_size;
2673 Round to a multiple of page size or huge page size.
2674 If MORECORE is not contiguous, this ensures that we only call it
2675 with whole-page arguments. And if MORECORE is contiguous and
2676 this is not first time through, this preserves page-alignment of
2677 previous calls. Otherwise, we correct to page-align below.
2680 #if HAVE_TUNABLES && defined (MADV_HUGEPAGE)
2681 /* Defined in brk.c. */
2682 extern void *__curbrk;
2683 if (__glibc_unlikely (mp_.thp_pagesize != 0))
2685 uintptr_t top = ALIGN_UP ((uintptr_t) __curbrk + size,
2686 mp_.thp_pagesize);
2687 size = top - (uintptr_t) __curbrk;
2689 else
2690 #endif
2691 size = ALIGN_UP (size, GLRO(dl_pagesize));
2694 Don't try to call MORECORE if argument is so big as to appear
2695 negative. Note that since mmap takes size_t arg, it may succeed
2696 below even if we cannot call MORECORE.
2699 if (size > 0)
2701 brk = (char *) (MORECORE (size));
2702 if (brk != (char *) (MORECORE_FAILURE))
2703 madvise_thp (brk, size);
2704 LIBC_PROBE (memory_sbrk_more, 2, brk, size);
2707 if (brk == (char *) (MORECORE_FAILURE))
2710 If have mmap, try using it as a backup when MORECORE fails or
2711 cannot be used. This is worth doing on systems that have "holes" in
2712 address space, so sbrk cannot extend to give contiguous space, but
2713 space is available elsewhere. Note that we ignore mmap max count
2714 and threshold limits, since the space will not be used as a
2715 segregated mmap region.
2718 char *mbrk = MAP_FAILED;
2719 #if HAVE_TUNABLES
2720 if (mp_.hp_pagesize > 0)
2721 mbrk = sysmalloc_mmap_fallback (&size, nb, old_size,
2722 mp_.hp_pagesize, mp_.hp_pagesize,
2723 mp_.hp_flags, av);
2724 #endif
2725 if (mbrk == MAP_FAILED)
2726 mbrk = sysmalloc_mmap_fallback (&size, nb, old_size, pagesize,
2727 MMAP_AS_MORECORE_SIZE, 0, av);
2728 if (mbrk != MAP_FAILED)
2730 /* We do not need, and cannot use, another sbrk call to find end */
2731 brk = mbrk;
2732 snd_brk = brk + size;
2736 if (brk != (char *) (MORECORE_FAILURE))
2738 if (mp_.sbrk_base == 0)
2739 mp_.sbrk_base = brk;
2740 av->system_mem += size;
2743 If MORECORE extends previous space, we can likewise extend top size.
2746 if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
2747 set_head (old_top, (size + old_size) | PREV_INUSE);
2749 else if (contiguous (av) && old_size && brk < old_end)
2750 /* Oops! Someone else killed our space.. Can't touch anything. */
2751 malloc_printerr ("break adjusted to free malloc space");
2754 Otherwise, make adjustments:
2756 * If the first time through or noncontiguous, we need to call sbrk
2757 just to find out where the end of memory lies.
2759 * We need to ensure that all returned chunks from malloc will meet
2760 MALLOC_ALIGNMENT
2762 * If there was an intervening foreign sbrk, we need to adjust sbrk
2763 request size to account for fact that we will not be able to
2764 combine new space with existing space in old_top.
2766 * Almost all systems internally allocate whole pages at a time, in
2767 which case we might as well use the whole last page of request.
2768 So we allocate enough more memory to hit a page boundary now,
2769 which in turn causes future contiguous calls to page-align.
2772 else
2774 front_misalign = 0;
2775 end_misalign = 0;
2776 correction = 0;
2777 aligned_brk = brk;
2779 /* handle contiguous cases */
2780 if (contiguous (av))
2782 /* Count foreign sbrk as system_mem. */
2783 if (old_size)
2784 av->system_mem += brk - old_end;
2786 /* Guarantee alignment of first new chunk made from this space */
2788 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2789 if (front_misalign > 0)
2792 Skip over some bytes to arrive at an aligned position.
2793 We don't need to specially mark these wasted front bytes.
2794 They will never be accessed anyway because
2795 prev_inuse of av->top (and any chunk created from its start)
2796 is always true after initialization.
2799 correction = MALLOC_ALIGNMENT - front_misalign;
2800 aligned_brk += correction;
2804 If this isn't adjacent to existing space, then we will not
2805 be able to merge with old_top space, so must add to 2nd request.
2808 correction += old_size;
2810 /* Extend the end address to hit a page boundary */
2811 end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
2812 correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
2814 assert (correction >= 0);
2815 snd_brk = (char *) (MORECORE (correction));
2818 If can't allocate correction, try to at least find out current
2819 brk. It might be enough to proceed without failing.
2821 Note that if second sbrk did NOT fail, we assume that space
2822 is contiguous with first sbrk. This is a safe assumption unless
2823 program is multithreaded but doesn't use locks and a foreign sbrk
2824 occurred between our first and second calls.
2827 if (snd_brk == (char *) (MORECORE_FAILURE))
2829 correction = 0;
2830 snd_brk = (char *) (MORECORE (0));
2832 else
2833 madvise_thp (snd_brk, correction);
2836 /* handle non-contiguous cases */
2837 else
2839 if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
2840 /* MORECORE/mmap must correctly align */
2841 assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
2842 else
2844 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2845 if (front_misalign > 0)
2848 Skip over some bytes to arrive at an aligned position.
2849 We don't need to specially mark these wasted front bytes.
2850 They will never be accessed anyway because
2851 prev_inuse of av->top (and any chunk created from its start)
2852 is always true after initialization.
2855 aligned_brk += MALLOC_ALIGNMENT - front_misalign;
2859 /* Find out current end of memory */
2860 if (snd_brk == (char *) (MORECORE_FAILURE))
2862 snd_brk = (char *) (MORECORE (0));
2866 /* Adjust top based on results of second sbrk */
2867 if (snd_brk != (char *) (MORECORE_FAILURE))
2869 av->top = (mchunkptr) aligned_brk;
2870 set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
2871 av->system_mem += correction;
2874 If not the first time through, we either have a
2875 gap due to foreign sbrk or a non-contiguous region. Insert a
2876 double fencepost at old_top to prevent consolidation with space
2877 we don't own. These fenceposts are artificial chunks that are
2878 marked as inuse and are in any case too small to use. We need
2879 two to make sizes and alignments work out.
2882 if (old_size != 0)
2885 Shrink old_top to insert fenceposts, keeping size a
2886 multiple of MALLOC_ALIGNMENT. We know there is at least
2887 enough space in old_top to do this.
2889 old_size = (old_size - 2 * CHUNK_HDR_SZ) & ~MALLOC_ALIGN_MASK;
2890 set_head (old_top, old_size | PREV_INUSE);
2893 Note that the following assignments completely overwrite
2894 old_top when old_size was previously MINSIZE. This is
2895 intentional. We need the fencepost, even if old_top otherwise gets
2896 lost.
2898 set_head (chunk_at_offset (old_top, old_size),
2899 CHUNK_HDR_SZ | PREV_INUSE);
2900 set_head (chunk_at_offset (old_top,
2901 old_size + CHUNK_HDR_SZ),
2902 CHUNK_HDR_SZ | PREV_INUSE);
2904 /* If possible, release the rest. */
2905 if (old_size >= MINSIZE)
2907 _int_free (av, old_top, 1);
2913 } /* if (av != &main_arena) */
2915 if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
2916 av->max_system_mem = av->system_mem;
2917 check_malloc_state (av);
2919 /* finally, do the allocation */
2920 p = av->top;
2921 size = chunksize (p);
2923 /* check that one of the above allocation paths succeeded */
2924 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
2926 remainder_size = size - nb;
2927 remainder = chunk_at_offset (p, nb);
2928 av->top = remainder;
2929 set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
2930 set_head (remainder, remainder_size | PREV_INUSE);
2931 check_malloced_chunk (av, p, nb);
2932 return chunk2mem (p);
2935 /* catch all failure paths */
2936 __set_errno (ENOMEM);
2937 return 0;
2942 systrim is an inverse of sorts to sysmalloc. It gives memory back
2943 to the system (via negative arguments to sbrk) if there is unused
2944 memory at the `high' end of the malloc pool. It is called
2945 automatically by free() when top space exceeds the trim
2946 threshold. It is also called by the public malloc_trim routine. It
2947 returns 1 if it actually released any memory, else 0.
2950 static int
2951 systrim (size_t pad, mstate av)
2953 long top_size; /* Amount of top-most memory */
2954 long extra; /* Amount to release */
2955 long released; /* Amount actually released */
2956 char *current_brk; /* address returned by pre-check sbrk call */
2957 char *new_brk; /* address returned by post-check sbrk call */
2958 long top_area;
2960 top_size = chunksize (av->top);
2962 top_area = top_size - MINSIZE - 1;
2963 if (top_area <= pad)
2964 return 0;
2966 /* Release in pagesize units and round down to the nearest page. */
2967 #if HAVE_TUNABLES && defined (MADV_HUGEPAGE)
2968 if (__glibc_unlikely (mp_.thp_pagesize != 0))
2969 extra = ALIGN_DOWN (top_area - pad, mp_.thp_pagesize);
2970 else
2971 #endif
2972 extra = ALIGN_DOWN (top_area - pad, GLRO(dl_pagesize));
2974 if (extra == 0)
2975 return 0;
2978 Only proceed if end of memory is where we last set it.
2979 This avoids problems if there were foreign sbrk calls.
2981 current_brk = (char *) (MORECORE (0));
2982 if (current_brk == (char *) (av->top) + top_size)
2985 Attempt to release memory. We ignore MORECORE return value,
2986 and instead call again to find out where new end of memory is.
2987 This avoids problems if first call releases less than we asked,
2988 of if failure somehow altered brk value. (We could still
2989 encounter problems if it altered brk in some very bad way,
2990 but the only thing we can do is adjust anyway, which will cause
2991 some downstream failure.)
2994 MORECORE (-extra);
2995 new_brk = (char *) (MORECORE (0));
2997 LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra);
2999 if (new_brk != (char *) MORECORE_FAILURE)
3001 released = (long) (current_brk - new_brk);
3003 if (released != 0)
3005 /* Success. Adjust top. */
3006 av->system_mem -= released;
3007 set_head (av->top, (top_size - released) | PREV_INUSE);
3008 check_malloc_state (av);
3009 return 1;
3013 return 0;
3016 static void
3017 munmap_chunk (mchunkptr p)
3019 size_t pagesize = GLRO (dl_pagesize);
3020 INTERNAL_SIZE_T size = chunksize (p);
3022 assert (chunk_is_mmapped (p));
3024 uintptr_t mem = (uintptr_t) chunk2mem (p);
3025 uintptr_t block = (uintptr_t) p - prev_size (p);
3026 size_t total_size = prev_size (p) + size;
3027 /* Unfortunately we have to do the compilers job by hand here. Normally
3028 we would test BLOCK and TOTAL-SIZE separately for compliance with the
3029 page size. But gcc does not recognize the optimization possibility
3030 (in the moment at least) so we combine the two values into one before
3031 the bit test. */
3032 if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
3033 || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
3034 malloc_printerr ("munmap_chunk(): invalid pointer");
3036 atomic_decrement (&mp_.n_mmaps);
3037 atomic_fetch_add_relaxed (&mp_.mmapped_mem, -total_size);
3039 /* If munmap failed the process virtual memory address space is in a
3040 bad shape. Just leave the block hanging around, the process will
3041 terminate shortly anyway since not much can be done. */
3042 __munmap ((char *) block, total_size);
3045 #if HAVE_MREMAP
3047 static mchunkptr
3048 mremap_chunk (mchunkptr p, size_t new_size)
3050 size_t pagesize = GLRO (dl_pagesize);
3051 INTERNAL_SIZE_T offset = prev_size (p);
3052 INTERNAL_SIZE_T size = chunksize (p);
3053 char *cp;
3055 assert (chunk_is_mmapped (p));
3057 uintptr_t block = (uintptr_t) p - offset;
3058 uintptr_t mem = (uintptr_t) chunk2mem(p);
3059 size_t total_size = offset + size;
3060 if (__glibc_unlikely ((block | total_size) & (pagesize - 1)) != 0
3061 || __glibc_unlikely (!powerof2 (mem & (pagesize - 1))))
3062 malloc_printerr("mremap_chunk(): invalid pointer");
3064 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
3065 new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize);
3067 /* No need to remap if the number of pages does not change. */
3068 if (total_size == new_size)
3069 return p;
3071 cp = (char *) __mremap ((char *) block, total_size, new_size,
3072 MREMAP_MAYMOVE);
3074 if (cp == MAP_FAILED)
3075 return 0;
3077 madvise_thp (cp, new_size);
3079 p = (mchunkptr) (cp + offset);
3081 assert (aligned_OK (chunk2mem (p)));
3083 assert (prev_size (p) == offset);
3084 set_head (p, (new_size - offset) | IS_MMAPPED);
3086 INTERNAL_SIZE_T new;
3087 new = atomic_fetch_add_relaxed (&mp_.mmapped_mem, new_size - size - offset)
3088 + new_size - size - offset;
3089 atomic_max (&mp_.max_mmapped_mem, new);
3090 return p;
3092 #endif /* HAVE_MREMAP */
3094 /*------------------------ Public wrappers. --------------------------------*/
3096 #if USE_TCACHE
3098 /* We overlay this structure on the user-data portion of a chunk when
3099 the chunk is stored in the per-thread cache. */
3100 typedef struct tcache_entry
3102 struct tcache_entry *next;
3103 /* This field exists to detect double frees. */
3104 uintptr_t key;
3105 } tcache_entry;
3107 /* There is one of these for each thread, which contains the
3108 per-thread cache (hence "tcache_perthread_struct"). Keeping
3109 overall size low is mildly important. Note that COUNTS and ENTRIES
3110 are redundant (we could have just counted the linked list each
3111 time), this is for performance reasons. */
3112 typedef struct tcache_perthread_struct
3114 uint16_t counts[TCACHE_MAX_BINS];
3115 tcache_entry *entries[TCACHE_MAX_BINS];
3116 } tcache_perthread_struct;
3118 static __thread bool tcache_shutting_down = false;
3119 static __thread tcache_perthread_struct *tcache = NULL;
3121 /* Process-wide key to try and catch a double-free in the same thread. */
3122 static uintptr_t tcache_key;
3124 /* The value of tcache_key does not really have to be a cryptographically
3125 secure random number. It only needs to be arbitrary enough so that it does
3126 not collide with values present in applications. If a collision does happen
3127 consistently enough, it could cause a degradation in performance since the
3128 entire list is checked to check if the block indeed has been freed the
3129 second time. The odds of this happening are exceedingly low though, about 1
3130 in 2^wordsize. There is probably a higher chance of the performance
3131 degradation being due to a double free where the first free happened in a
3132 different thread; that's a case this check does not cover. */
3133 static void
3134 tcache_key_initialize (void)
3136 if (__getrandom_nocancel (&tcache_key, sizeof(tcache_key), GRND_NONBLOCK)
3137 != sizeof (tcache_key))
3139 tcache_key = random_bits ();
3140 #if __WORDSIZE == 64
3141 tcache_key = (tcache_key << 32) | random_bits ();
3142 #endif
3146 /* Caller must ensure that we know tc_idx is valid and there's room
3147 for more chunks. */
3148 static __always_inline void
3149 tcache_put (mchunkptr chunk, size_t tc_idx)
3151 tcache_entry *e = (tcache_entry *) chunk2mem (chunk);
3153 /* Mark this chunk as "in the tcache" so the test in _int_free will
3154 detect a double free. */
3155 e->key = tcache_key;
3157 e->next = PROTECT_PTR (&e->next, tcache->entries[tc_idx]);
3158 tcache->entries[tc_idx] = e;
3159 ++(tcache->counts[tc_idx]);
3162 /* Caller must ensure that we know tc_idx is valid and there's
3163 available chunks to remove. */
3164 static __always_inline void *
3165 tcache_get (size_t tc_idx)
3167 tcache_entry *e = tcache->entries[tc_idx];
3168 if (__glibc_unlikely (!aligned_OK (e)))
3169 malloc_printerr ("malloc(): unaligned tcache chunk detected");
3170 tcache->entries[tc_idx] = REVEAL_PTR (e->next);
3171 --(tcache->counts[tc_idx]);
3172 e->key = 0;
3173 return (void *) e;
3176 static void
3177 tcache_thread_shutdown (void)
3179 int i;
3180 tcache_perthread_struct *tcache_tmp = tcache;
3182 tcache_shutting_down = true;
3184 if (!tcache)
3185 return;
3187 /* Disable the tcache and prevent it from being reinitialized. */
3188 tcache = NULL;
3190 /* Free all of the entries and the tcache itself back to the arena
3191 heap for coalescing. */
3192 for (i = 0; i < TCACHE_MAX_BINS; ++i)
3194 while (tcache_tmp->entries[i])
3196 tcache_entry *e = tcache_tmp->entries[i];
3197 if (__glibc_unlikely (!aligned_OK (e)))
3198 malloc_printerr ("tcache_thread_shutdown(): "
3199 "unaligned tcache chunk detected");
3200 tcache_tmp->entries[i] = REVEAL_PTR (e->next);
3201 __libc_free (e);
3205 __libc_free (tcache_tmp);
3208 static void
3209 tcache_init(void)
3211 mstate ar_ptr;
3212 void *victim = 0;
3213 const size_t bytes = sizeof (tcache_perthread_struct);
3215 if (tcache_shutting_down)
3216 return;
3218 arena_get (ar_ptr, bytes);
3219 victim = _int_malloc (ar_ptr, bytes);
3220 if (!victim && ar_ptr != NULL)
3222 ar_ptr = arena_get_retry (ar_ptr, bytes);
3223 victim = _int_malloc (ar_ptr, bytes);
3227 if (ar_ptr != NULL)
3228 __libc_lock_unlock (ar_ptr->mutex);
3230 /* In a low memory situation, we may not be able to allocate memory
3231 - in which case, we just keep trying later. However, we
3232 typically do this very early, so either there is sufficient
3233 memory, or there isn't enough memory to do non-trivial
3234 allocations anyway. */
3235 if (victim)
3237 tcache = (tcache_perthread_struct *) victim;
3238 memset (tcache, 0, sizeof (tcache_perthread_struct));
3243 # define MAYBE_INIT_TCACHE() \
3244 if (__glibc_unlikely (tcache == NULL)) \
3245 tcache_init();
3247 #else /* !USE_TCACHE */
3248 # define MAYBE_INIT_TCACHE()
3250 static void
3251 tcache_thread_shutdown (void)
3253 /* Nothing to do if there is no thread cache. */
3256 #endif /* !USE_TCACHE */
3258 #if IS_IN (libc)
3259 void *
3260 __libc_malloc (size_t bytes)
3262 mstate ar_ptr;
3263 void *victim;
3265 _Static_assert (PTRDIFF_MAX <= SIZE_MAX / 2,
3266 "PTRDIFF_MAX is not more than half of SIZE_MAX");
3268 if (!__malloc_initialized)
3269 ptmalloc_init ();
3270 #if USE_TCACHE
3271 /* int_free also calls request2size, be careful to not pad twice. */
3272 size_t tbytes = checked_request2size (bytes);
3273 if (tbytes == 0)
3275 __set_errno (ENOMEM);
3276 return NULL;
3278 size_t tc_idx = csize2tidx (tbytes);
3280 MAYBE_INIT_TCACHE ();
3282 DIAG_PUSH_NEEDS_COMMENT;
3283 if (tc_idx < mp_.tcache_bins
3284 && tcache
3285 && tcache->counts[tc_idx] > 0)
3287 victim = tcache_get (tc_idx);
3288 return tag_new_usable (victim);
3290 DIAG_POP_NEEDS_COMMENT;
3291 #endif
3293 if (SINGLE_THREAD_P)
3295 victim = tag_new_usable (_int_malloc (&main_arena, bytes));
3296 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
3297 &main_arena == arena_for_chunk (mem2chunk (victim)));
3298 return victim;
3301 arena_get (ar_ptr, bytes);
3303 victim = _int_malloc (ar_ptr, bytes);
3304 /* Retry with another arena only if we were able to find a usable arena
3305 before. */
3306 if (!victim && ar_ptr != NULL)
3308 LIBC_PROBE (memory_malloc_retry, 1, bytes);
3309 ar_ptr = arena_get_retry (ar_ptr, bytes);
3310 victim = _int_malloc (ar_ptr, bytes);
3313 if (ar_ptr != NULL)
3314 __libc_lock_unlock (ar_ptr->mutex);
3316 victim = tag_new_usable (victim);
3318 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
3319 ar_ptr == arena_for_chunk (mem2chunk (victim)));
3320 return victim;
3322 libc_hidden_def (__libc_malloc)
3324 void
3325 __libc_free (void *mem)
3327 mstate ar_ptr;
3328 mchunkptr p; /* chunk corresponding to mem */
3330 if (mem == 0) /* free(0) has no effect */
3331 return;
3333 /* Quickly check that the freed pointer matches the tag for the memory.
3334 This gives a useful double-free detection. */
3335 if (__glibc_unlikely (mtag_enabled))
3336 *(volatile char *)mem;
3338 int err = errno;
3340 p = mem2chunk (mem);
3342 if (chunk_is_mmapped (p)) /* release mmapped memory. */
3344 /* See if the dynamic brk/mmap threshold needs adjusting.
3345 Dumped fake mmapped chunks do not affect the threshold. */
3346 if (!mp_.no_dyn_threshold
3347 && chunksize_nomask (p) > mp_.mmap_threshold
3348 && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX)
3350 mp_.mmap_threshold = chunksize (p);
3351 mp_.trim_threshold = 2 * mp_.mmap_threshold;
3352 LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
3353 mp_.mmap_threshold, mp_.trim_threshold);
3355 munmap_chunk (p);
3357 else
3359 MAYBE_INIT_TCACHE ();
3361 /* Mark the chunk as belonging to the library again. */
3362 (void)tag_region (chunk2mem (p), memsize (p));
3364 ar_ptr = arena_for_chunk (p);
3365 _int_free (ar_ptr, p, 0);
3368 __set_errno (err);
3370 libc_hidden_def (__libc_free)
3372 void *
3373 __libc_realloc (void *oldmem, size_t bytes)
3375 mstate ar_ptr;
3376 INTERNAL_SIZE_T nb; /* padded request size */
3378 void *newp; /* chunk to return */
3380 if (!__malloc_initialized)
3381 ptmalloc_init ();
3383 #if REALLOC_ZERO_BYTES_FREES
3384 if (bytes == 0 && oldmem != NULL)
3386 __libc_free (oldmem); return 0;
3388 #endif
3390 /* realloc of null is supposed to be same as malloc */
3391 if (oldmem == 0)
3392 return __libc_malloc (bytes);
3394 /* Perform a quick check to ensure that the pointer's tag matches the
3395 memory's tag. */
3396 if (__glibc_unlikely (mtag_enabled))
3397 *(volatile char*) oldmem;
3399 /* chunk corresponding to oldmem */
3400 const mchunkptr oldp = mem2chunk (oldmem);
3401 /* its size */
3402 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
3404 if (chunk_is_mmapped (oldp))
3405 ar_ptr = NULL;
3406 else
3408 MAYBE_INIT_TCACHE ();
3409 ar_ptr = arena_for_chunk (oldp);
3412 /* Little security check which won't hurt performance: the allocator
3413 never wrapps around at the end of the address space. Therefore
3414 we can exclude some size values which might appear here by
3415 accident or by "design" from some intruder. */
3416 if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
3417 || __builtin_expect (misaligned_chunk (oldp), 0)))
3418 malloc_printerr ("realloc(): invalid pointer");
3420 nb = checked_request2size (bytes);
3421 if (nb == 0)
3423 __set_errno (ENOMEM);
3424 return NULL;
3427 if (chunk_is_mmapped (oldp))
3429 void *newmem;
3431 #if HAVE_MREMAP
3432 newp = mremap_chunk (oldp, nb);
3433 if (newp)
3435 void *newmem = chunk2mem_tag (newp);
3436 /* Give the new block a different tag. This helps to ensure
3437 that stale handles to the previous mapping are not
3438 reused. There's a performance hit for both us and the
3439 caller for doing this, so we might want to
3440 reconsider. */
3441 return tag_new_usable (newmem);
3443 #endif
3444 /* Note the extra SIZE_SZ overhead. */
3445 if (oldsize - SIZE_SZ >= nb)
3446 return oldmem; /* do nothing */
3448 /* Must alloc, copy, free. */
3449 newmem = __libc_malloc (bytes);
3450 if (newmem == 0)
3451 return 0; /* propagate failure */
3453 memcpy (newmem, oldmem, oldsize - CHUNK_HDR_SZ);
3454 munmap_chunk (oldp);
3455 return newmem;
3458 if (SINGLE_THREAD_P)
3460 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
3461 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3462 ar_ptr == arena_for_chunk (mem2chunk (newp)));
3464 return newp;
3467 __libc_lock_lock (ar_ptr->mutex);
3469 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
3471 __libc_lock_unlock (ar_ptr->mutex);
3472 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3473 ar_ptr == arena_for_chunk (mem2chunk (newp)));
3475 if (newp == NULL)
3477 /* Try harder to allocate memory in other arenas. */
3478 LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
3479 newp = __libc_malloc (bytes);
3480 if (newp != NULL)
3482 size_t sz = memsize (oldp);
3483 memcpy (newp, oldmem, sz);
3484 (void) tag_region (chunk2mem (oldp), sz);
3485 _int_free (ar_ptr, oldp, 0);
3489 return newp;
3491 libc_hidden_def (__libc_realloc)
3493 void *
3494 __libc_memalign (size_t alignment, size_t bytes)
3496 if (!__malloc_initialized)
3497 ptmalloc_init ();
3499 void *address = RETURN_ADDRESS (0);
3500 return _mid_memalign (alignment, bytes, address);
3503 static void *
3504 _mid_memalign (size_t alignment, size_t bytes, void *address)
3506 mstate ar_ptr;
3507 void *p;
3509 /* If we need less alignment than we give anyway, just relay to malloc. */
3510 if (alignment <= MALLOC_ALIGNMENT)
3511 return __libc_malloc (bytes);
3513 /* Otherwise, ensure that it is at least a minimum chunk size */
3514 if (alignment < MINSIZE)
3515 alignment = MINSIZE;
3517 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
3518 power of 2 and will cause overflow in the check below. */
3519 if (alignment > SIZE_MAX / 2 + 1)
3521 __set_errno (EINVAL);
3522 return 0;
3526 /* Make sure alignment is power of 2. */
3527 if (!powerof2 (alignment))
3529 size_t a = MALLOC_ALIGNMENT * 2;
3530 while (a < alignment)
3531 a <<= 1;
3532 alignment = a;
3535 if (SINGLE_THREAD_P)
3537 p = _int_memalign (&main_arena, alignment, bytes);
3538 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3539 &main_arena == arena_for_chunk (mem2chunk (p)));
3540 return tag_new_usable (p);
3543 arena_get (ar_ptr, bytes + alignment + MINSIZE);
3545 p = _int_memalign (ar_ptr, alignment, bytes);
3546 if (!p && ar_ptr != NULL)
3548 LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
3549 ar_ptr = arena_get_retry (ar_ptr, bytes);
3550 p = _int_memalign (ar_ptr, alignment, bytes);
3553 if (ar_ptr != NULL)
3554 __libc_lock_unlock (ar_ptr->mutex);
3556 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3557 ar_ptr == arena_for_chunk (mem2chunk (p)));
3558 return tag_new_usable (p);
3560 /* For ISO C11. */
3561 weak_alias (__libc_memalign, aligned_alloc)
3562 libc_hidden_def (__libc_memalign)
3564 void *
3565 __libc_valloc (size_t bytes)
3567 if (!__malloc_initialized)
3568 ptmalloc_init ();
3570 void *address = RETURN_ADDRESS (0);
3571 size_t pagesize = GLRO (dl_pagesize);
3572 return _mid_memalign (pagesize, bytes, address);
3575 void *
3576 __libc_pvalloc (size_t bytes)
3578 if (!__malloc_initialized)
3579 ptmalloc_init ();
3581 void *address = RETURN_ADDRESS (0);
3582 size_t pagesize = GLRO (dl_pagesize);
3583 size_t rounded_bytes;
3584 /* ALIGN_UP with overflow check. */
3585 if (__glibc_unlikely (__builtin_add_overflow (bytes,
3586 pagesize - 1,
3587 &rounded_bytes)))
3589 __set_errno (ENOMEM);
3590 return 0;
3592 rounded_bytes = rounded_bytes & -(pagesize - 1);
3594 return _mid_memalign (pagesize, rounded_bytes, address);
3597 void *
3598 __libc_calloc (size_t n, size_t elem_size)
3600 mstate av;
3601 mchunkptr oldtop;
3602 INTERNAL_SIZE_T sz, oldtopsize;
3603 void *mem;
3604 unsigned long clearsize;
3605 unsigned long nclears;
3606 INTERNAL_SIZE_T *d;
3607 ptrdiff_t bytes;
3609 if (__glibc_unlikely (__builtin_mul_overflow (n, elem_size, &bytes)))
3611 __set_errno (ENOMEM);
3612 return NULL;
3615 sz = bytes;
3617 if (!__malloc_initialized)
3618 ptmalloc_init ();
3620 MAYBE_INIT_TCACHE ();
3622 if (SINGLE_THREAD_P)
3623 av = &main_arena;
3624 else
3625 arena_get (av, sz);
3627 if (av)
3629 /* Check if we hand out the top chunk, in which case there may be no
3630 need to clear. */
3631 #if MORECORE_CLEARS
3632 oldtop = top (av);
3633 oldtopsize = chunksize (top (av));
3634 # if MORECORE_CLEARS < 2
3635 /* Only newly allocated memory is guaranteed to be cleared. */
3636 if (av == &main_arena &&
3637 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
3638 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
3639 # endif
3640 if (av != &main_arena)
3642 heap_info *heap = heap_for_ptr (oldtop);
3643 if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
3644 oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
3646 #endif
3648 else
3650 /* No usable arenas. */
3651 oldtop = 0;
3652 oldtopsize = 0;
3654 mem = _int_malloc (av, sz);
3656 assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
3657 av == arena_for_chunk (mem2chunk (mem)));
3659 if (!SINGLE_THREAD_P)
3661 if (mem == 0 && av != NULL)
3663 LIBC_PROBE (memory_calloc_retry, 1, sz);
3664 av = arena_get_retry (av, sz);
3665 mem = _int_malloc (av, sz);
3668 if (av != NULL)
3669 __libc_lock_unlock (av->mutex);
3672 /* Allocation failed even after a retry. */
3673 if (mem == 0)
3674 return 0;
3676 mchunkptr p = mem2chunk (mem);
3678 /* If we are using memory tagging, then we need to set the tags
3679 regardless of MORECORE_CLEARS, so we zero the whole block while
3680 doing so. */
3681 if (__glibc_unlikely (mtag_enabled))
3682 return tag_new_zero_region (mem, memsize (p));
3684 INTERNAL_SIZE_T csz = chunksize (p);
3686 /* Two optional cases in which clearing not necessary */
3687 if (chunk_is_mmapped (p))
3689 if (__builtin_expect (perturb_byte, 0))
3690 return memset (mem, 0, sz);
3692 return mem;
3695 #if MORECORE_CLEARS
3696 if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
3698 /* clear only the bytes from non-freshly-sbrked memory */
3699 csz = oldtopsize;
3701 #endif
3703 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3704 contents have an odd number of INTERNAL_SIZE_T-sized words;
3705 minimally 3. */
3706 d = (INTERNAL_SIZE_T *) mem;
3707 clearsize = csz - SIZE_SZ;
3708 nclears = clearsize / sizeof (INTERNAL_SIZE_T);
3709 assert (nclears >= 3);
3711 if (nclears > 9)
3712 return memset (d, 0, clearsize);
3714 else
3716 *(d + 0) = 0;
3717 *(d + 1) = 0;
3718 *(d + 2) = 0;
3719 if (nclears > 4)
3721 *(d + 3) = 0;
3722 *(d + 4) = 0;
3723 if (nclears > 6)
3725 *(d + 5) = 0;
3726 *(d + 6) = 0;
3727 if (nclears > 8)
3729 *(d + 7) = 0;
3730 *(d + 8) = 0;
3736 return mem;
3738 #endif /* IS_IN (libc) */
3741 ------------------------------ malloc ------------------------------
3744 static void *
3745 _int_malloc (mstate av, size_t bytes)
3747 INTERNAL_SIZE_T nb; /* normalized request size */
3748 unsigned int idx; /* associated bin index */
3749 mbinptr bin; /* associated bin */
3751 mchunkptr victim; /* inspected/selected chunk */
3752 INTERNAL_SIZE_T size; /* its size */
3753 int victim_index; /* its bin index */
3755 mchunkptr remainder; /* remainder from a split */
3756 unsigned long remainder_size; /* its size */
3758 unsigned int block; /* bit map traverser */
3759 unsigned int bit; /* bit map traverser */
3760 unsigned int map; /* current word of binmap */
3762 mchunkptr fwd; /* misc temp for linking */
3763 mchunkptr bck; /* misc temp for linking */
3765 #if USE_TCACHE
3766 size_t tcache_unsorted_count; /* count of unsorted chunks processed */
3767 #endif
3770 Convert request size to internal form by adding SIZE_SZ bytes
3771 overhead plus possibly more to obtain necessary alignment and/or
3772 to obtain a size of at least MINSIZE, the smallest allocatable
3773 size. Also, checked_request2size returns false for request sizes
3774 that are so large that they wrap around zero when padded and
3775 aligned.
3778 nb = checked_request2size (bytes);
3779 if (nb == 0)
3781 __set_errno (ENOMEM);
3782 return NULL;
3785 /* There are no usable arenas. Fall back to sysmalloc to get a chunk from
3786 mmap. */
3787 if (__glibc_unlikely (av == NULL))
3789 void *p = sysmalloc (nb, av);
3790 if (p != NULL)
3791 alloc_perturb (p, bytes);
3792 return p;
3796 If the size qualifies as a fastbin, first check corresponding bin.
3797 This code is safe to execute even if av is not yet initialized, so we
3798 can try it without checking, which saves some time on this fast path.
3801 #define REMOVE_FB(fb, victim, pp) \
3802 do \
3804 victim = pp; \
3805 if (victim == NULL) \
3806 break; \
3807 pp = REVEAL_PTR (victim->fd); \
3808 if (__glibc_unlikely (pp != NULL && misaligned_chunk (pp))) \
3809 malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \
3811 while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \
3812 != victim); \
3814 if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
3816 idx = fastbin_index (nb);
3817 mfastbinptr *fb = &fastbin (av, idx);
3818 mchunkptr pp;
3819 victim = *fb;
3821 if (victim != NULL)
3823 if (__glibc_unlikely (misaligned_chunk (victim)))
3824 malloc_printerr ("malloc(): unaligned fastbin chunk detected 2");
3826 if (SINGLE_THREAD_P)
3827 *fb = REVEAL_PTR (victim->fd);
3828 else
3829 REMOVE_FB (fb, pp, victim);
3830 if (__glibc_likely (victim != NULL))
3832 size_t victim_idx = fastbin_index (chunksize (victim));
3833 if (__builtin_expect (victim_idx != idx, 0))
3834 malloc_printerr ("malloc(): memory corruption (fast)");
3835 check_remalloced_chunk (av, victim, nb);
3836 #if USE_TCACHE
3837 /* While we're here, if we see other chunks of the same size,
3838 stash them in the tcache. */
3839 size_t tc_idx = csize2tidx (nb);
3840 if (tcache && tc_idx < mp_.tcache_bins)
3842 mchunkptr tc_victim;
3844 /* While bin not empty and tcache not full, copy chunks. */
3845 while (tcache->counts[tc_idx] < mp_.tcache_count
3846 && (tc_victim = *fb) != NULL)
3848 if (__glibc_unlikely (misaligned_chunk (tc_victim)))
3849 malloc_printerr ("malloc(): unaligned fastbin chunk detected 3");
3850 if (SINGLE_THREAD_P)
3851 *fb = REVEAL_PTR (tc_victim->fd);
3852 else
3854 REMOVE_FB (fb, pp, tc_victim);
3855 if (__glibc_unlikely (tc_victim == NULL))
3856 break;
3858 tcache_put (tc_victim, tc_idx);
3861 #endif
3862 void *p = chunk2mem (victim);
3863 alloc_perturb (p, bytes);
3864 return p;
3870 If a small request, check regular bin. Since these "smallbins"
3871 hold one size each, no searching within bins is necessary.
3872 (For a large request, we need to wait until unsorted chunks are
3873 processed to find best fit. But for small ones, fits are exact
3874 anyway, so we can check now, which is faster.)
3877 if (in_smallbin_range (nb))
3879 idx = smallbin_index (nb);
3880 bin = bin_at (av, idx);
3882 if ((victim = last (bin)) != bin)
3884 bck = victim->bk;
3885 if (__glibc_unlikely (bck->fd != victim))
3886 malloc_printerr ("malloc(): smallbin double linked list corrupted");
3887 set_inuse_bit_at_offset (victim, nb);
3888 bin->bk = bck;
3889 bck->fd = bin;
3891 if (av != &main_arena)
3892 set_non_main_arena (victim);
3893 check_malloced_chunk (av, victim, nb);
3894 #if USE_TCACHE
3895 /* While we're here, if we see other chunks of the same size,
3896 stash them in the tcache. */
3897 size_t tc_idx = csize2tidx (nb);
3898 if (tcache && tc_idx < mp_.tcache_bins)
3900 mchunkptr tc_victim;
3902 /* While bin not empty and tcache not full, copy chunks over. */
3903 while (tcache->counts[tc_idx] < mp_.tcache_count
3904 && (tc_victim = last (bin)) != bin)
3906 if (tc_victim != 0)
3908 bck = tc_victim->bk;
3909 set_inuse_bit_at_offset (tc_victim, nb);
3910 if (av != &main_arena)
3911 set_non_main_arena (tc_victim);
3912 bin->bk = bck;
3913 bck->fd = bin;
3915 tcache_put (tc_victim, tc_idx);
3919 #endif
3920 void *p = chunk2mem (victim);
3921 alloc_perturb (p, bytes);
3922 return p;
3927 If this is a large request, consolidate fastbins before continuing.
3928 While it might look excessive to kill all fastbins before
3929 even seeing if there is space available, this avoids
3930 fragmentation problems normally associated with fastbins.
3931 Also, in practice, programs tend to have runs of either small or
3932 large requests, but less often mixtures, so consolidation is not
3933 invoked all that often in most programs. And the programs that
3934 it is called frequently in otherwise tend to fragment.
3937 else
3939 idx = largebin_index (nb);
3940 if (atomic_load_relaxed (&av->have_fastchunks))
3941 malloc_consolidate (av);
3945 Process recently freed or remaindered chunks, taking one only if
3946 it is exact fit, or, if this a small request, the chunk is remainder from
3947 the most recent non-exact fit. Place other traversed chunks in
3948 bins. Note that this step is the only place in any routine where
3949 chunks are placed in bins.
3951 The outer loop here is needed because we might not realize until
3952 near the end of malloc that we should have consolidated, so must
3953 do so and retry. This happens at most once, and only when we would
3954 otherwise need to expand memory to service a "small" request.
3957 #if USE_TCACHE
3958 INTERNAL_SIZE_T tcache_nb = 0;
3959 size_t tc_idx = csize2tidx (nb);
3960 if (tcache && tc_idx < mp_.tcache_bins)
3961 tcache_nb = nb;
3962 int return_cached = 0;
3964 tcache_unsorted_count = 0;
3965 #endif
3967 for (;; )
3969 int iters = 0;
3970 while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
3972 bck = victim->bk;
3973 size = chunksize (victim);
3974 mchunkptr next = chunk_at_offset (victim, size);
3976 if (__glibc_unlikely (size <= CHUNK_HDR_SZ)
3977 || __glibc_unlikely (size > av->system_mem))
3978 malloc_printerr ("malloc(): invalid size (unsorted)");
3979 if (__glibc_unlikely (chunksize_nomask (next) < CHUNK_HDR_SZ)
3980 || __glibc_unlikely (chunksize_nomask (next) > av->system_mem))
3981 malloc_printerr ("malloc(): invalid next size (unsorted)");
3982 if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size))
3983 malloc_printerr ("malloc(): mismatching next->prev_size (unsorted)");
3984 if (__glibc_unlikely (bck->fd != victim)
3985 || __glibc_unlikely (victim->fd != unsorted_chunks (av)))
3986 malloc_printerr ("malloc(): unsorted double linked list corrupted");
3987 if (__glibc_unlikely (prev_inuse (next)))
3988 malloc_printerr ("malloc(): invalid next->prev_inuse (unsorted)");
3991 If a small request, try to use last remainder if it is the
3992 only chunk in unsorted bin. This helps promote locality for
3993 runs of consecutive small requests. This is the only
3994 exception to best-fit, and applies only when there is
3995 no exact fit for a small chunk.
3998 if (in_smallbin_range (nb) &&
3999 bck == unsorted_chunks (av) &&
4000 victim == av->last_remainder &&
4001 (unsigned long) (size) > (unsigned long) (nb + MINSIZE))
4003 /* split and reattach remainder */
4004 remainder_size = size - nb;
4005 remainder = chunk_at_offset (victim, nb);
4006 unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
4007 av->last_remainder = remainder;
4008 remainder->bk = remainder->fd = unsorted_chunks (av);
4009 if (!in_smallbin_range (remainder_size))
4011 remainder->fd_nextsize = NULL;
4012 remainder->bk_nextsize = NULL;
4015 set_head (victim, nb | PREV_INUSE |
4016 (av != &main_arena ? NON_MAIN_ARENA : 0));
4017 set_head (remainder, remainder_size | PREV_INUSE);
4018 set_foot (remainder, remainder_size);
4020 check_malloced_chunk (av, victim, nb);
4021 void *p = chunk2mem (victim);
4022 alloc_perturb (p, bytes);
4023 return p;
4026 /* remove from unsorted list */
4027 if (__glibc_unlikely (bck->fd != victim))
4028 malloc_printerr ("malloc(): corrupted unsorted chunks 3");
4029 unsorted_chunks (av)->bk = bck;
4030 bck->fd = unsorted_chunks (av);
4032 /* Take now instead of binning if exact fit */
4034 if (size == nb)
4036 set_inuse_bit_at_offset (victim, size);
4037 if (av != &main_arena)
4038 set_non_main_arena (victim);
4039 #if USE_TCACHE
4040 /* Fill cache first, return to user only if cache fills.
4041 We may return one of these chunks later. */
4042 if (tcache_nb
4043 && tcache->counts[tc_idx] < mp_.tcache_count)
4045 tcache_put (victim, tc_idx);
4046 return_cached = 1;
4047 continue;
4049 else
4051 #endif
4052 check_malloced_chunk (av, victim, nb);
4053 void *p = chunk2mem (victim);
4054 alloc_perturb (p, bytes);
4055 return p;
4056 #if USE_TCACHE
4058 #endif
4061 /* place chunk in bin */
4063 if (in_smallbin_range (size))
4065 victim_index = smallbin_index (size);
4066 bck = bin_at (av, victim_index);
4067 fwd = bck->fd;
4069 else
4071 victim_index = largebin_index (size);
4072 bck = bin_at (av, victim_index);
4073 fwd = bck->fd;
4075 /* maintain large bins in sorted order */
4076 if (fwd != bck)
4078 /* Or with inuse bit to speed comparisons */
4079 size |= PREV_INUSE;
4080 /* if smaller than smallest, bypass loop below */
4081 assert (chunk_main_arena (bck->bk));
4082 if ((unsigned long) (size)
4083 < (unsigned long) chunksize_nomask (bck->bk))
4085 fwd = bck;
4086 bck = bck->bk;
4088 victim->fd_nextsize = fwd->fd;
4089 victim->bk_nextsize = fwd->fd->bk_nextsize;
4090 fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
4092 else
4094 assert (chunk_main_arena (fwd));
4095 while ((unsigned long) size < chunksize_nomask (fwd))
4097 fwd = fwd->fd_nextsize;
4098 assert (chunk_main_arena (fwd));
4101 if ((unsigned long) size
4102 == (unsigned long) chunksize_nomask (fwd))
4103 /* Always insert in the second position. */
4104 fwd = fwd->fd;
4105 else
4107 victim->fd_nextsize = fwd;
4108 victim->bk_nextsize = fwd->bk_nextsize;
4109 if (__glibc_unlikely (fwd->bk_nextsize->fd_nextsize != fwd))
4110 malloc_printerr ("malloc(): largebin double linked list corrupted (nextsize)");
4111 fwd->bk_nextsize = victim;
4112 victim->bk_nextsize->fd_nextsize = victim;
4114 bck = fwd->bk;
4115 if (bck->fd != fwd)
4116 malloc_printerr ("malloc(): largebin double linked list corrupted (bk)");
4119 else
4120 victim->fd_nextsize = victim->bk_nextsize = victim;
4123 mark_bin (av, victim_index);
4124 victim->bk = bck;
4125 victim->fd = fwd;
4126 fwd->bk = victim;
4127 bck->fd = victim;
4129 #if USE_TCACHE
4130 /* If we've processed as many chunks as we're allowed while
4131 filling the cache, return one of the cached ones. */
4132 ++tcache_unsorted_count;
4133 if (return_cached
4134 && mp_.tcache_unsorted_limit > 0
4135 && tcache_unsorted_count > mp_.tcache_unsorted_limit)
4137 return tcache_get (tc_idx);
4139 #endif
4141 #define MAX_ITERS 10000
4142 if (++iters >= MAX_ITERS)
4143 break;
4146 #if USE_TCACHE
4147 /* If all the small chunks we found ended up cached, return one now. */
4148 if (return_cached)
4150 return tcache_get (tc_idx);
4152 #endif
4155 If a large request, scan through the chunks of current bin in
4156 sorted order to find smallest that fits. Use the skip list for this.
4159 if (!in_smallbin_range (nb))
4161 bin = bin_at (av, idx);
4163 /* skip scan if empty or largest chunk is too small */
4164 if ((victim = first (bin)) != bin
4165 && (unsigned long) chunksize_nomask (victim)
4166 >= (unsigned long) (nb))
4168 victim = victim->bk_nextsize;
4169 while (((unsigned long) (size = chunksize (victim)) <
4170 (unsigned long) (nb)))
4171 victim = victim->bk_nextsize;
4173 /* Avoid removing the first entry for a size so that the skip
4174 list does not have to be rerouted. */
4175 if (victim != last (bin)
4176 && chunksize_nomask (victim)
4177 == chunksize_nomask (victim->fd))
4178 victim = victim->fd;
4180 remainder_size = size - nb;
4181 unlink_chunk (av, victim);
4183 /* Exhaust */
4184 if (remainder_size < MINSIZE)
4186 set_inuse_bit_at_offset (victim, size);
4187 if (av != &main_arena)
4188 set_non_main_arena (victim);
4190 /* Split */
4191 else
4193 remainder = chunk_at_offset (victim, nb);
4194 /* We cannot assume the unsorted list is empty and therefore
4195 have to perform a complete insert here. */
4196 bck = unsorted_chunks (av);
4197 fwd = bck->fd;
4198 if (__glibc_unlikely (fwd->bk != bck))
4199 malloc_printerr ("malloc(): corrupted unsorted chunks");
4200 remainder->bk = bck;
4201 remainder->fd = fwd;
4202 bck->fd = remainder;
4203 fwd->bk = remainder;
4204 if (!in_smallbin_range (remainder_size))
4206 remainder->fd_nextsize = NULL;
4207 remainder->bk_nextsize = NULL;
4209 set_head (victim, nb | PREV_INUSE |
4210 (av != &main_arena ? NON_MAIN_ARENA : 0));
4211 set_head (remainder, remainder_size | PREV_INUSE);
4212 set_foot (remainder, remainder_size);
4214 check_malloced_chunk (av, victim, nb);
4215 void *p = chunk2mem (victim);
4216 alloc_perturb (p, bytes);
4217 return p;
4222 Search for a chunk by scanning bins, starting with next largest
4223 bin. This search is strictly by best-fit; i.e., the smallest
4224 (with ties going to approximately the least recently used) chunk
4225 that fits is selected.
4227 The bitmap avoids needing to check that most blocks are nonempty.
4228 The particular case of skipping all bins during warm-up phases
4229 when no chunks have been returned yet is faster than it might look.
4232 ++idx;
4233 bin = bin_at (av, idx);
4234 block = idx2block (idx);
4235 map = av->binmap[block];
4236 bit = idx2bit (idx);
4238 for (;; )
4240 /* Skip rest of block if there are no more set bits in this block. */
4241 if (bit > map || bit == 0)
4245 if (++block >= BINMAPSIZE) /* out of bins */
4246 goto use_top;
4248 while ((map = av->binmap[block]) == 0);
4250 bin = bin_at (av, (block << BINMAPSHIFT));
4251 bit = 1;
4254 /* Advance to bin with set bit. There must be one. */
4255 while ((bit & map) == 0)
4257 bin = next_bin (bin);
4258 bit <<= 1;
4259 assert (bit != 0);
4262 /* Inspect the bin. It is likely to be non-empty */
4263 victim = last (bin);
4265 /* If a false alarm (empty bin), clear the bit. */
4266 if (victim == bin)
4268 av->binmap[block] = map &= ~bit; /* Write through */
4269 bin = next_bin (bin);
4270 bit <<= 1;
4273 else
4275 size = chunksize (victim);
4277 /* We know the first chunk in this bin is big enough to use. */
4278 assert ((unsigned long) (size) >= (unsigned long) (nb));
4280 remainder_size = size - nb;
4282 /* unlink */
4283 unlink_chunk (av, victim);
4285 /* Exhaust */
4286 if (remainder_size < MINSIZE)
4288 set_inuse_bit_at_offset (victim, size);
4289 if (av != &main_arena)
4290 set_non_main_arena (victim);
4293 /* Split */
4294 else
4296 remainder = chunk_at_offset (victim, nb);
4298 /* We cannot assume the unsorted list is empty and therefore
4299 have to perform a complete insert here. */
4300 bck = unsorted_chunks (av);
4301 fwd = bck->fd;
4302 if (__glibc_unlikely (fwd->bk != bck))
4303 malloc_printerr ("malloc(): corrupted unsorted chunks 2");
4304 remainder->bk = bck;
4305 remainder->fd = fwd;
4306 bck->fd = remainder;
4307 fwd->bk = remainder;
4309 /* advertise as last remainder */
4310 if (in_smallbin_range (nb))
4311 av->last_remainder = remainder;
4312 if (!in_smallbin_range (remainder_size))
4314 remainder->fd_nextsize = NULL;
4315 remainder->bk_nextsize = NULL;
4317 set_head (victim, nb | PREV_INUSE |
4318 (av != &main_arena ? NON_MAIN_ARENA : 0));
4319 set_head (remainder, remainder_size | PREV_INUSE);
4320 set_foot (remainder, remainder_size);
4322 check_malloced_chunk (av, victim, nb);
4323 void *p = chunk2mem (victim);
4324 alloc_perturb (p, bytes);
4325 return p;
4329 use_top:
4331 If large enough, split off the chunk bordering the end of memory
4332 (held in av->top). Note that this is in accord with the best-fit
4333 search rule. In effect, av->top is treated as larger (and thus
4334 less well fitting) than any other available chunk since it can
4335 be extended to be as large as necessary (up to system
4336 limitations).
4338 We require that av->top always exists (i.e., has size >=
4339 MINSIZE) after initialization, so if it would otherwise be
4340 exhausted by current request, it is replenished. (The main
4341 reason for ensuring it exists is that we may need MINSIZE space
4342 to put in fenceposts in sysmalloc.)
4345 victim = av->top;
4346 size = chunksize (victim);
4348 if (__glibc_unlikely (size > av->system_mem))
4349 malloc_printerr ("malloc(): corrupted top size");
4351 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
4353 remainder_size = size - nb;
4354 remainder = chunk_at_offset (victim, nb);
4355 av->top = remainder;
4356 set_head (victim, nb | PREV_INUSE |
4357 (av != &main_arena ? NON_MAIN_ARENA : 0));
4358 set_head (remainder, remainder_size | PREV_INUSE);
4360 check_malloced_chunk (av, victim, nb);
4361 void *p = chunk2mem (victim);
4362 alloc_perturb (p, bytes);
4363 return p;
4366 /* When we are using atomic ops to free fast chunks we can get
4367 here for all block sizes. */
4368 else if (atomic_load_relaxed (&av->have_fastchunks))
4370 malloc_consolidate (av);
4371 /* restore original bin index */
4372 if (in_smallbin_range (nb))
4373 idx = smallbin_index (nb);
4374 else
4375 idx = largebin_index (nb);
4379 Otherwise, relay to handle system-dependent cases
4381 else
4383 void *p = sysmalloc (nb, av);
4384 if (p != NULL)
4385 alloc_perturb (p, bytes);
4386 return p;
4392 ------------------------------ free ------------------------------
4395 static void
4396 _int_free (mstate av, mchunkptr p, int have_lock)
4398 INTERNAL_SIZE_T size; /* its size */
4399 mfastbinptr *fb; /* associated fastbin */
4400 mchunkptr nextchunk; /* next contiguous chunk */
4401 INTERNAL_SIZE_T nextsize; /* its size */
4402 int nextinuse; /* true if nextchunk is used */
4403 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
4404 mchunkptr bck; /* misc temp for linking */
4405 mchunkptr fwd; /* misc temp for linking */
4407 size = chunksize (p);
4409 /* Little security check which won't hurt performance: the
4410 allocator never wrapps around at the end of the address space.
4411 Therefore we can exclude some size values which might appear
4412 here by accident or by "design" from some intruder. */
4413 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
4414 || __builtin_expect (misaligned_chunk (p), 0))
4415 malloc_printerr ("free(): invalid pointer");
4416 /* We know that each chunk is at least MINSIZE bytes in size or a
4417 multiple of MALLOC_ALIGNMENT. */
4418 if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
4419 malloc_printerr ("free(): invalid size");
4421 check_inuse_chunk(av, p);
4423 #if USE_TCACHE
4425 size_t tc_idx = csize2tidx (size);
4426 if (tcache != NULL && tc_idx < mp_.tcache_bins)
4428 /* Check to see if it's already in the tcache. */
4429 tcache_entry *e = (tcache_entry *) chunk2mem (p);
4431 /* This test succeeds on double free. However, we don't 100%
4432 trust it (it also matches random payload data at a 1 in
4433 2^<size_t> chance), so verify it's not an unlikely
4434 coincidence before aborting. */
4435 if (__glibc_unlikely (e->key == tcache_key))
4437 tcache_entry *tmp;
4438 size_t cnt = 0;
4439 LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
4440 for (tmp = tcache->entries[tc_idx];
4441 tmp;
4442 tmp = REVEAL_PTR (tmp->next), ++cnt)
4444 if (cnt >= mp_.tcache_count)
4445 malloc_printerr ("free(): too many chunks detected in tcache");
4446 if (__glibc_unlikely (!aligned_OK (tmp)))
4447 malloc_printerr ("free(): unaligned chunk detected in tcache 2");
4448 if (tmp == e)
4449 malloc_printerr ("free(): double free detected in tcache 2");
4450 /* If we get here, it was a coincidence. We've wasted a
4451 few cycles, but don't abort. */
4455 if (tcache->counts[tc_idx] < mp_.tcache_count)
4457 tcache_put (p, tc_idx);
4458 return;
4462 #endif
4465 If eligible, place chunk on a fastbin so it can be found
4466 and used quickly in malloc.
4469 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
4471 #if TRIM_FASTBINS
4473 If TRIM_FASTBINS set, don't place chunks
4474 bordering top into fastbins
4476 && (chunk_at_offset(p, size) != av->top)
4477 #endif
4480 if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
4481 <= CHUNK_HDR_SZ, 0)
4482 || __builtin_expect (chunksize (chunk_at_offset (p, size))
4483 >= av->system_mem, 0))
4485 bool fail = true;
4486 /* We might not have a lock at this point and concurrent modifications
4487 of system_mem might result in a false positive. Redo the test after
4488 getting the lock. */
4489 if (!have_lock)
4491 __libc_lock_lock (av->mutex);
4492 fail = (chunksize_nomask (chunk_at_offset (p, size)) <= CHUNK_HDR_SZ
4493 || chunksize (chunk_at_offset (p, size)) >= av->system_mem);
4494 __libc_lock_unlock (av->mutex);
4497 if (fail)
4498 malloc_printerr ("free(): invalid next size (fast)");
4501 free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
4503 atomic_store_relaxed (&av->have_fastchunks, true);
4504 unsigned int idx = fastbin_index(size);
4505 fb = &fastbin (av, idx);
4507 /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
4508 mchunkptr old = *fb, old2;
4510 if (SINGLE_THREAD_P)
4512 /* Check that the top of the bin is not the record we are going to
4513 add (i.e., double free). */
4514 if (__builtin_expect (old == p, 0))
4515 malloc_printerr ("double free or corruption (fasttop)");
4516 p->fd = PROTECT_PTR (&p->fd, old);
4517 *fb = p;
4519 else
4522 /* Check that the top of the bin is not the record we are going to
4523 add (i.e., double free). */
4524 if (__builtin_expect (old == p, 0))
4525 malloc_printerr ("double free or corruption (fasttop)");
4526 old2 = old;
4527 p->fd = PROTECT_PTR (&p->fd, old);
4529 while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
4530 != old2);
4532 /* Check that size of fastbin chunk at the top is the same as
4533 size of the chunk that we are adding. We can dereference OLD
4534 only if we have the lock, otherwise it might have already been
4535 allocated again. */
4536 if (have_lock && old != NULL
4537 && __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
4538 malloc_printerr ("invalid fastbin entry (free)");
4542 Consolidate other non-mmapped chunks as they arrive.
4545 else if (!chunk_is_mmapped(p)) {
4547 /* If we're single-threaded, don't lock the arena. */
4548 if (SINGLE_THREAD_P)
4549 have_lock = true;
4551 if (!have_lock)
4552 __libc_lock_lock (av->mutex);
4554 nextchunk = chunk_at_offset(p, size);
4556 /* Lightweight tests: check whether the block is already the
4557 top block. */
4558 if (__glibc_unlikely (p == av->top))
4559 malloc_printerr ("double free or corruption (top)");
4560 /* Or whether the next chunk is beyond the boundaries of the arena. */
4561 if (__builtin_expect (contiguous (av)
4562 && (char *) nextchunk
4563 >= ((char *) av->top + chunksize(av->top)), 0))
4564 malloc_printerr ("double free or corruption (out)");
4565 /* Or whether the block is actually not marked used. */
4566 if (__glibc_unlikely (!prev_inuse(nextchunk)))
4567 malloc_printerr ("double free or corruption (!prev)");
4569 nextsize = chunksize(nextchunk);
4570 if (__builtin_expect (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ, 0)
4571 || __builtin_expect (nextsize >= av->system_mem, 0))
4572 malloc_printerr ("free(): invalid next size (normal)");
4574 free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
4576 /* consolidate backward */
4577 if (!prev_inuse(p)) {
4578 prevsize = prev_size (p);
4579 size += prevsize;
4580 p = chunk_at_offset(p, -((long) prevsize));
4581 if (__glibc_unlikely (chunksize(p) != prevsize))
4582 malloc_printerr ("corrupted size vs. prev_size while consolidating");
4583 unlink_chunk (av, p);
4586 if (nextchunk != av->top) {
4587 /* get and clear inuse bit */
4588 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4590 /* consolidate forward */
4591 if (!nextinuse) {
4592 unlink_chunk (av, nextchunk);
4593 size += nextsize;
4594 } else
4595 clear_inuse_bit_at_offset(nextchunk, 0);
4598 Place the chunk in unsorted chunk list. Chunks are
4599 not placed into regular bins until after they have
4600 been given one chance to be used in malloc.
4603 bck = unsorted_chunks(av);
4604 fwd = bck->fd;
4605 if (__glibc_unlikely (fwd->bk != bck))
4606 malloc_printerr ("free(): corrupted unsorted chunks");
4607 p->fd = fwd;
4608 p->bk = bck;
4609 if (!in_smallbin_range(size))
4611 p->fd_nextsize = NULL;
4612 p->bk_nextsize = NULL;
4614 bck->fd = p;
4615 fwd->bk = p;
4617 set_head(p, size | PREV_INUSE);
4618 set_foot(p, size);
4620 check_free_chunk(av, p);
4624 If the chunk borders the current high end of memory,
4625 consolidate into top
4628 else {
4629 size += nextsize;
4630 set_head(p, size | PREV_INUSE);
4631 av->top = p;
4632 check_chunk(av, p);
4636 If freeing a large space, consolidate possibly-surrounding
4637 chunks. Then, if the total unused topmost memory exceeds trim
4638 threshold, ask malloc_trim to reduce top.
4640 Unless max_fast is 0, we don't know if there are fastbins
4641 bordering top, so we cannot tell for sure whether threshold
4642 has been reached unless fastbins are consolidated. But we
4643 don't want to consolidate on each free. As a compromise,
4644 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4645 is reached.
4648 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
4649 if (atomic_load_relaxed (&av->have_fastchunks))
4650 malloc_consolidate(av);
4652 if (av == &main_arena) {
4653 #ifndef MORECORE_CANNOT_TRIM
4654 if ((unsigned long)(chunksize(av->top)) >=
4655 (unsigned long)(mp_.trim_threshold))
4656 systrim(mp_.top_pad, av);
4657 #endif
4658 } else {
4659 /* Always try heap_trim(), even if the top chunk is not
4660 large, because the corresponding heap might go away. */
4661 heap_info *heap = heap_for_ptr(top(av));
4663 assert(heap->ar_ptr == av);
4664 heap_trim(heap, mp_.top_pad);
4668 if (!have_lock)
4669 __libc_lock_unlock (av->mutex);
4672 If the chunk was allocated via mmap, release via munmap().
4675 else {
4676 munmap_chunk (p);
4681 ------------------------- malloc_consolidate -------------------------
4683 malloc_consolidate is a specialized version of free() that tears
4684 down chunks held in fastbins. Free itself cannot be used for this
4685 purpose since, among other things, it might place chunks back onto
4686 fastbins. So, instead, we need to use a minor variant of the same
4687 code.
4690 static void malloc_consolidate(mstate av)
4692 mfastbinptr* fb; /* current fastbin being consolidated */
4693 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4694 mchunkptr p; /* current chunk being consolidated */
4695 mchunkptr nextp; /* next chunk to consolidate */
4696 mchunkptr unsorted_bin; /* bin header */
4697 mchunkptr first_unsorted; /* chunk to link to */
4699 /* These have same use as in free() */
4700 mchunkptr nextchunk;
4701 INTERNAL_SIZE_T size;
4702 INTERNAL_SIZE_T nextsize;
4703 INTERNAL_SIZE_T prevsize;
4704 int nextinuse;
4706 atomic_store_relaxed (&av->have_fastchunks, false);
4708 unsorted_bin = unsorted_chunks(av);
4711 Remove each chunk from fast bin and consolidate it, placing it
4712 then in unsorted bin. Among other reasons for doing this,
4713 placing in unsorted bin avoids needing to calculate actual bins
4714 until malloc is sure that chunks aren't immediately going to be
4715 reused anyway.
4718 maxfb = &fastbin (av, NFASTBINS - 1);
4719 fb = &fastbin (av, 0);
4720 do {
4721 p = atomic_exchange_acq (fb, NULL);
4722 if (p != 0) {
4723 do {
4725 if (__glibc_unlikely (misaligned_chunk (p)))
4726 malloc_printerr ("malloc_consolidate(): "
4727 "unaligned fastbin chunk detected");
4729 unsigned int idx = fastbin_index (chunksize (p));
4730 if ((&fastbin (av, idx)) != fb)
4731 malloc_printerr ("malloc_consolidate(): invalid chunk size");
4734 check_inuse_chunk(av, p);
4735 nextp = REVEAL_PTR (p->fd);
4737 /* Slightly streamlined version of consolidation code in free() */
4738 size = chunksize (p);
4739 nextchunk = chunk_at_offset(p, size);
4740 nextsize = chunksize(nextchunk);
4742 if (!prev_inuse(p)) {
4743 prevsize = prev_size (p);
4744 size += prevsize;
4745 p = chunk_at_offset(p, -((long) prevsize));
4746 if (__glibc_unlikely (chunksize(p) != prevsize))
4747 malloc_printerr ("corrupted size vs. prev_size in fastbins");
4748 unlink_chunk (av, p);
4751 if (nextchunk != av->top) {
4752 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4754 if (!nextinuse) {
4755 size += nextsize;
4756 unlink_chunk (av, nextchunk);
4757 } else
4758 clear_inuse_bit_at_offset(nextchunk, 0);
4760 first_unsorted = unsorted_bin->fd;
4761 unsorted_bin->fd = p;
4762 first_unsorted->bk = p;
4764 if (!in_smallbin_range (size)) {
4765 p->fd_nextsize = NULL;
4766 p->bk_nextsize = NULL;
4769 set_head(p, size | PREV_INUSE);
4770 p->bk = unsorted_bin;
4771 p->fd = first_unsorted;
4772 set_foot(p, size);
4775 else {
4776 size += nextsize;
4777 set_head(p, size | PREV_INUSE);
4778 av->top = p;
4781 } while ( (p = nextp) != 0);
4784 } while (fb++ != maxfb);
4788 ------------------------------ realloc ------------------------------
4791 static void *
4792 _int_realloc (mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
4793 INTERNAL_SIZE_T nb)
4795 mchunkptr newp; /* chunk to return */
4796 INTERNAL_SIZE_T newsize; /* its size */
4797 void* newmem; /* corresponding user mem */
4799 mchunkptr next; /* next contiguous chunk after oldp */
4801 mchunkptr remainder; /* extra space at end of newp */
4802 unsigned long remainder_size; /* its size */
4804 /* oldmem size */
4805 if (__builtin_expect (chunksize_nomask (oldp) <= CHUNK_HDR_SZ, 0)
4806 || __builtin_expect (oldsize >= av->system_mem, 0))
4807 malloc_printerr ("realloc(): invalid old size");
4809 check_inuse_chunk (av, oldp);
4811 /* All callers already filter out mmap'ed chunks. */
4812 assert (!chunk_is_mmapped (oldp));
4814 next = chunk_at_offset (oldp, oldsize);
4815 INTERNAL_SIZE_T nextsize = chunksize (next);
4816 if (__builtin_expect (chunksize_nomask (next) <= CHUNK_HDR_SZ, 0)
4817 || __builtin_expect (nextsize >= av->system_mem, 0))
4818 malloc_printerr ("realloc(): invalid next size");
4820 if ((unsigned long) (oldsize) >= (unsigned long) (nb))
4822 /* already big enough; split below */
4823 newp = oldp;
4824 newsize = oldsize;
4827 else
4829 /* Try to expand forward into top */
4830 if (next == av->top &&
4831 (unsigned long) (newsize = oldsize + nextsize) >=
4832 (unsigned long) (nb + MINSIZE))
4834 set_head_size (oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4835 av->top = chunk_at_offset (oldp, nb);
4836 set_head (av->top, (newsize - nb) | PREV_INUSE);
4837 check_inuse_chunk (av, oldp);
4838 return tag_new_usable (chunk2mem (oldp));
4841 /* Try to expand forward into next chunk; split off remainder below */
4842 else if (next != av->top &&
4843 !inuse (next) &&
4844 (unsigned long) (newsize = oldsize + nextsize) >=
4845 (unsigned long) (nb))
4847 newp = oldp;
4848 unlink_chunk (av, next);
4851 /* allocate, copy, free */
4852 else
4854 newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK);
4855 if (newmem == 0)
4856 return 0; /* propagate failure */
4858 newp = mem2chunk (newmem);
4859 newsize = chunksize (newp);
4862 Avoid copy if newp is next chunk after oldp.
4864 if (newp == next)
4866 newsize += oldsize;
4867 newp = oldp;
4869 else
4871 void *oldmem = chunk2mem (oldp);
4872 size_t sz = memsize (oldp);
4873 (void) tag_region (oldmem, sz);
4874 newmem = tag_new_usable (newmem);
4875 memcpy (newmem, oldmem, sz);
4876 _int_free (av, oldp, 1);
4877 check_inuse_chunk (av, newp);
4878 return newmem;
4883 /* If possible, free extra space in old or extended chunk */
4885 assert ((unsigned long) (newsize) >= (unsigned long) (nb));
4887 remainder_size = newsize - nb;
4889 if (remainder_size < MINSIZE) /* not enough extra to split off */
4891 set_head_size (newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4892 set_inuse_bit_at_offset (newp, newsize);
4894 else /* split remainder */
4896 remainder = chunk_at_offset (newp, nb);
4897 /* Clear any user-space tags before writing the header. */
4898 remainder = tag_region (remainder, remainder_size);
4899 set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4900 set_head (remainder, remainder_size | PREV_INUSE |
4901 (av != &main_arena ? NON_MAIN_ARENA : 0));
4902 /* Mark remainder as inuse so free() won't complain */
4903 set_inuse_bit_at_offset (remainder, remainder_size);
4904 _int_free (av, remainder, 1);
4907 check_inuse_chunk (av, newp);
4908 return tag_new_usable (chunk2mem (newp));
4912 ------------------------------ memalign ------------------------------
4915 static void *
4916 _int_memalign (mstate av, size_t alignment, size_t bytes)
4918 INTERNAL_SIZE_T nb; /* padded request size */
4919 char *m; /* memory returned by malloc call */
4920 mchunkptr p; /* corresponding chunk */
4921 char *brk; /* alignment point within p */
4922 mchunkptr newp; /* chunk to return */
4923 INTERNAL_SIZE_T newsize; /* its size */
4924 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
4925 mchunkptr remainder; /* spare room at end to split off */
4926 unsigned long remainder_size; /* its size */
4927 INTERNAL_SIZE_T size;
4931 nb = checked_request2size (bytes);
4932 if (nb == 0)
4934 __set_errno (ENOMEM);
4935 return NULL;
4939 Strategy: find a spot within that chunk that meets the alignment
4940 request, and then possibly free the leading and trailing space.
4943 /* Call malloc with worst case padding to hit alignment. */
4945 m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
4947 if (m == 0)
4948 return 0; /* propagate failure */
4950 p = mem2chunk (m);
4952 if ((((unsigned long) (m)) % alignment) != 0) /* misaligned */
4954 { /*
4955 Find an aligned spot inside chunk. Since we need to give back
4956 leading space in a chunk of at least MINSIZE, if the first
4957 calculation places us at a spot with less than MINSIZE leader,
4958 we can move to the next aligned spot -- we've allocated enough
4959 total room so that this is always possible.
4961 brk = (char *) mem2chunk (((unsigned long) (m + alignment - 1)) &
4962 - ((signed long) alignment));
4963 if ((unsigned long) (brk - (char *) (p)) < MINSIZE)
4964 brk += alignment;
4966 newp = (mchunkptr) brk;
4967 leadsize = brk - (char *) (p);
4968 newsize = chunksize (p) - leadsize;
4970 /* For mmapped chunks, just adjust offset */
4971 if (chunk_is_mmapped (p))
4973 set_prev_size (newp, prev_size (p) + leadsize);
4974 set_head (newp, newsize | IS_MMAPPED);
4975 return chunk2mem (newp);
4978 /* Otherwise, give back leader, use the rest */
4979 set_head (newp, newsize | PREV_INUSE |
4980 (av != &main_arena ? NON_MAIN_ARENA : 0));
4981 set_inuse_bit_at_offset (newp, newsize);
4982 set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4983 _int_free (av, p, 1);
4984 p = newp;
4986 assert (newsize >= nb &&
4987 (((unsigned long) (chunk2mem (p))) % alignment) == 0);
4990 /* Also give back spare room at the end */
4991 if (!chunk_is_mmapped (p))
4993 size = chunksize (p);
4994 if ((unsigned long) (size) > (unsigned long) (nb + MINSIZE))
4996 remainder_size = size - nb;
4997 remainder = chunk_at_offset (p, nb);
4998 set_head (remainder, remainder_size | PREV_INUSE |
4999 (av != &main_arena ? NON_MAIN_ARENA : 0));
5000 set_head_size (p, nb);
5001 _int_free (av, remainder, 1);
5005 check_inuse_chunk (av, p);
5006 return chunk2mem (p);
5011 ------------------------------ malloc_trim ------------------------------
5014 static int
5015 mtrim (mstate av, size_t pad)
5017 /* Ensure all blocks are consolidated. */
5018 malloc_consolidate (av);
5020 const size_t ps = GLRO (dl_pagesize);
5021 int psindex = bin_index (ps);
5022 const size_t psm1 = ps - 1;
5024 int result = 0;
5025 for (int i = 1; i < NBINS; ++i)
5026 if (i == 1 || i >= psindex)
5028 mbinptr bin = bin_at (av, i);
5030 for (mchunkptr p = last (bin); p != bin; p = p->bk)
5032 INTERNAL_SIZE_T size = chunksize (p);
5034 if (size > psm1 + sizeof (struct malloc_chunk))
5036 /* See whether the chunk contains at least one unused page. */
5037 char *paligned_mem = (char *) (((uintptr_t) p
5038 + sizeof (struct malloc_chunk)
5039 + psm1) & ~psm1);
5041 assert ((char *) chunk2mem (p) + 2 * CHUNK_HDR_SZ
5042 <= paligned_mem);
5043 assert ((char *) p + size > paligned_mem);
5045 /* This is the size we could potentially free. */
5046 size -= paligned_mem - (char *) p;
5048 if (size > psm1)
5050 #if MALLOC_DEBUG
5051 /* When debugging we simulate destroying the memory
5052 content. */
5053 memset (paligned_mem, 0x89, size & ~psm1);
5054 #endif
5055 __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
5057 result = 1;
5063 #ifndef MORECORE_CANNOT_TRIM
5064 return result | (av == &main_arena ? systrim (pad, av) : 0);
5066 #else
5067 return result;
5068 #endif
5073 __malloc_trim (size_t s)
5075 int result = 0;
5077 if (!__malloc_initialized)
5078 ptmalloc_init ();
5080 mstate ar_ptr = &main_arena;
5083 __libc_lock_lock (ar_ptr->mutex);
5084 result |= mtrim (ar_ptr, s);
5085 __libc_lock_unlock (ar_ptr->mutex);
5087 ar_ptr = ar_ptr->next;
5089 while (ar_ptr != &main_arena);
5091 return result;
5096 ------------------------- malloc_usable_size -------------------------
5099 static size_t
5100 musable (void *mem)
5102 mchunkptr p = mem2chunk (mem);
5104 if (chunk_is_mmapped (p))
5105 return chunksize (p) - CHUNK_HDR_SZ;
5106 else if (inuse (p))
5107 return memsize (p);
5109 return 0;
5112 #if IS_IN (libc)
5113 size_t
5114 __malloc_usable_size (void *m)
5116 if (m == NULL)
5117 return 0;
5118 return musable (m);
5120 #endif
5123 ------------------------------ mallinfo ------------------------------
5124 Accumulate malloc statistics for arena AV into M.
5126 static void
5127 int_mallinfo (mstate av, struct mallinfo2 *m)
5129 size_t i;
5130 mbinptr b;
5131 mchunkptr p;
5132 INTERNAL_SIZE_T avail;
5133 INTERNAL_SIZE_T fastavail;
5134 int nblocks;
5135 int nfastblocks;
5137 check_malloc_state (av);
5139 /* Account for top */
5140 avail = chunksize (av->top);
5141 nblocks = 1; /* top always exists */
5143 /* traverse fastbins */
5144 nfastblocks = 0;
5145 fastavail = 0;
5147 for (i = 0; i < NFASTBINS; ++i)
5149 for (p = fastbin (av, i);
5150 p != 0;
5151 p = REVEAL_PTR (p->fd))
5153 if (__glibc_unlikely (misaligned_chunk (p)))
5154 malloc_printerr ("int_mallinfo(): "
5155 "unaligned fastbin chunk detected");
5156 ++nfastblocks;
5157 fastavail += chunksize (p);
5161 avail += fastavail;
5163 /* traverse regular bins */
5164 for (i = 1; i < NBINS; ++i)
5166 b = bin_at (av, i);
5167 for (p = last (b); p != b; p = p->bk)
5169 ++nblocks;
5170 avail += chunksize (p);
5174 m->smblks += nfastblocks;
5175 m->ordblks += nblocks;
5176 m->fordblks += avail;
5177 m->uordblks += av->system_mem - avail;
5178 m->arena += av->system_mem;
5179 m->fsmblks += fastavail;
5180 if (av == &main_arena)
5182 m->hblks = mp_.n_mmaps;
5183 m->hblkhd = mp_.mmapped_mem;
5184 m->usmblks = 0;
5185 m->keepcost = chunksize (av->top);
5190 struct mallinfo2
5191 __libc_mallinfo2 (void)
5193 struct mallinfo2 m;
5194 mstate ar_ptr;
5196 if (!__malloc_initialized)
5197 ptmalloc_init ();
5199 memset (&m, 0, sizeof (m));
5200 ar_ptr = &main_arena;
5203 __libc_lock_lock (ar_ptr->mutex);
5204 int_mallinfo (ar_ptr, &m);
5205 __libc_lock_unlock (ar_ptr->mutex);
5207 ar_ptr = ar_ptr->next;
5209 while (ar_ptr != &main_arena);
5211 return m;
5213 libc_hidden_def (__libc_mallinfo2)
5215 struct mallinfo
5216 __libc_mallinfo (void)
5218 struct mallinfo m;
5219 struct mallinfo2 m2 = __libc_mallinfo2 ();
5221 m.arena = m2.arena;
5222 m.ordblks = m2.ordblks;
5223 m.smblks = m2.smblks;
5224 m.hblks = m2.hblks;
5225 m.hblkhd = m2.hblkhd;
5226 m.usmblks = m2.usmblks;
5227 m.fsmblks = m2.fsmblks;
5228 m.uordblks = m2.uordblks;
5229 m.fordblks = m2.fordblks;
5230 m.keepcost = m2.keepcost;
5232 return m;
5237 ------------------------------ malloc_stats ------------------------------
5240 void
5241 __malloc_stats (void)
5243 int i;
5244 mstate ar_ptr;
5245 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
5247 if (!__malloc_initialized)
5248 ptmalloc_init ();
5249 _IO_flockfile (stderr);
5250 int old_flags2 = stderr->_flags2;
5251 stderr->_flags2 |= _IO_FLAGS2_NOTCANCEL;
5252 for (i = 0, ar_ptr = &main_arena;; i++)
5254 struct mallinfo2 mi;
5256 memset (&mi, 0, sizeof (mi));
5257 __libc_lock_lock (ar_ptr->mutex);
5258 int_mallinfo (ar_ptr, &mi);
5259 fprintf (stderr, "Arena %d:\n", i);
5260 fprintf (stderr, "system bytes = %10u\n", (unsigned int) mi.arena);
5261 fprintf (stderr, "in use bytes = %10u\n", (unsigned int) mi.uordblks);
5262 #if MALLOC_DEBUG > 1
5263 if (i > 0)
5264 dump_heap (heap_for_ptr (top (ar_ptr)));
5265 #endif
5266 system_b += mi.arena;
5267 in_use_b += mi.uordblks;
5268 __libc_lock_unlock (ar_ptr->mutex);
5269 ar_ptr = ar_ptr->next;
5270 if (ar_ptr == &main_arena)
5271 break;
5273 fprintf (stderr, "Total (incl. mmap):\n");
5274 fprintf (stderr, "system bytes = %10u\n", system_b);
5275 fprintf (stderr, "in use bytes = %10u\n", in_use_b);
5276 fprintf (stderr, "max mmap regions = %10u\n", (unsigned int) mp_.max_n_mmaps);
5277 fprintf (stderr, "max mmap bytes = %10lu\n",
5278 (unsigned long) mp_.max_mmapped_mem);
5279 stderr->_flags2 = old_flags2;
5280 _IO_funlockfile (stderr);
5285 ------------------------------ mallopt ------------------------------
5287 static __always_inline int
5288 do_set_trim_threshold (size_t value)
5290 LIBC_PROBE (memory_mallopt_trim_threshold, 3, value, mp_.trim_threshold,
5291 mp_.no_dyn_threshold);
5292 mp_.trim_threshold = value;
5293 mp_.no_dyn_threshold = 1;
5294 return 1;
5297 static __always_inline int
5298 do_set_top_pad (size_t value)
5300 LIBC_PROBE (memory_mallopt_top_pad, 3, value, mp_.top_pad,
5301 mp_.no_dyn_threshold);
5302 mp_.top_pad = value;
5303 mp_.no_dyn_threshold = 1;
5304 return 1;
5307 static __always_inline int
5308 do_set_mmap_threshold (size_t value)
5310 LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value, mp_.mmap_threshold,
5311 mp_.no_dyn_threshold);
5312 mp_.mmap_threshold = value;
5313 mp_.no_dyn_threshold = 1;
5314 return 1;
5317 static __always_inline int
5318 do_set_mmaps_max (int32_t value)
5320 LIBC_PROBE (memory_mallopt_mmap_max, 3, value, mp_.n_mmaps_max,
5321 mp_.no_dyn_threshold);
5322 mp_.n_mmaps_max = value;
5323 mp_.no_dyn_threshold = 1;
5324 return 1;
5327 static __always_inline int
5328 do_set_mallopt_check (int32_t value)
5330 return 1;
5333 static __always_inline int
5334 do_set_perturb_byte (int32_t value)
5336 LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
5337 perturb_byte = value;
5338 return 1;
5341 static __always_inline int
5342 do_set_arena_test (size_t value)
5344 LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
5345 mp_.arena_test = value;
5346 return 1;
5349 static __always_inline int
5350 do_set_arena_max (size_t value)
5352 LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
5353 mp_.arena_max = value;
5354 return 1;
5357 #if USE_TCACHE
5358 static __always_inline int
5359 do_set_tcache_max (size_t value)
5361 if (value <= MAX_TCACHE_SIZE)
5363 LIBC_PROBE (memory_tunable_tcache_max_bytes, 2, value, mp_.tcache_max_bytes);
5364 mp_.tcache_max_bytes = value;
5365 mp_.tcache_bins = csize2tidx (request2size(value)) + 1;
5366 return 1;
5368 return 0;
5371 static __always_inline int
5372 do_set_tcache_count (size_t value)
5374 if (value <= MAX_TCACHE_COUNT)
5376 LIBC_PROBE (memory_tunable_tcache_count, 2, value, mp_.tcache_count);
5377 mp_.tcache_count = value;
5378 return 1;
5380 return 0;
5383 static __always_inline int
5384 do_set_tcache_unsorted_limit (size_t value)
5386 LIBC_PROBE (memory_tunable_tcache_unsorted_limit, 2, value, mp_.tcache_unsorted_limit);
5387 mp_.tcache_unsorted_limit = value;
5388 return 1;
5390 #endif
5392 static __always_inline int
5393 do_set_mxfast (size_t value)
5395 if (value <= MAX_FAST_SIZE)
5397 LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ());
5398 set_max_fast (value);
5399 return 1;
5401 return 0;
5404 #if HAVE_TUNABLES
5405 static __always_inline int
5406 do_set_hugetlb (size_t value)
5408 if (value == 1)
5410 enum malloc_thp_mode_t thp_mode = __malloc_thp_mode ();
5412 Only enable THP madvise usage if system does support it and
5413 has 'madvise' mode. Otherwise the madvise() call is wasteful.
5415 if (thp_mode == malloc_thp_mode_madvise)
5416 mp_.thp_pagesize = __malloc_default_thp_pagesize ();
5418 else if (value >= 2)
5419 __malloc_hugepage_config (value == 2 ? 0 : value, &mp_.hp_pagesize,
5420 &mp_.hp_flags);
5421 return 0;
5423 #endif
5426 __libc_mallopt (int param_number, int value)
5428 mstate av = &main_arena;
5429 int res = 1;
5431 if (!__malloc_initialized)
5432 ptmalloc_init ();
5433 __libc_lock_lock (av->mutex);
5435 LIBC_PROBE (memory_mallopt, 2, param_number, value);
5437 /* We must consolidate main arena before changing max_fast
5438 (see definition of set_max_fast). */
5439 malloc_consolidate (av);
5441 /* Many of these helper functions take a size_t. We do not worry
5442 about overflow here, because negative int values will wrap to
5443 very large size_t values and the helpers have sufficient range
5444 checking for such conversions. Many of these helpers are also
5445 used by the tunables macros in arena.c. */
5447 switch (param_number)
5449 case M_MXFAST:
5450 res = do_set_mxfast (value);
5451 break;
5453 case M_TRIM_THRESHOLD:
5454 res = do_set_trim_threshold (value);
5455 break;
5457 case M_TOP_PAD:
5458 res = do_set_top_pad (value);
5459 break;
5461 case M_MMAP_THRESHOLD:
5462 res = do_set_mmap_threshold (value);
5463 break;
5465 case M_MMAP_MAX:
5466 res = do_set_mmaps_max (value);
5467 break;
5469 case M_CHECK_ACTION:
5470 res = do_set_mallopt_check (value);
5471 break;
5473 case M_PERTURB:
5474 res = do_set_perturb_byte (value);
5475 break;
5477 case M_ARENA_TEST:
5478 if (value > 0)
5479 res = do_set_arena_test (value);
5480 break;
5482 case M_ARENA_MAX:
5483 if (value > 0)
5484 res = do_set_arena_max (value);
5485 break;
5487 __libc_lock_unlock (av->mutex);
5488 return res;
5490 libc_hidden_def (__libc_mallopt)
5494 -------------------- Alternative MORECORE functions --------------------
5499 General Requirements for MORECORE.
5501 The MORECORE function must have the following properties:
5503 If MORECORE_CONTIGUOUS is false:
5505 * MORECORE must allocate in multiples of pagesize. It will
5506 only be called with arguments that are multiples of pagesize.
5508 * MORECORE(0) must return an address that is at least
5509 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
5511 else (i.e. If MORECORE_CONTIGUOUS is true):
5513 * Consecutive calls to MORECORE with positive arguments
5514 return increasing addresses, indicating that space has been
5515 contiguously extended.
5517 * MORECORE need not allocate in multiples of pagesize.
5518 Calls to MORECORE need not have args of multiples of pagesize.
5520 * MORECORE need not page-align.
5522 In either case:
5524 * MORECORE may allocate more memory than requested. (Or even less,
5525 but this will generally result in a malloc failure.)
5527 * MORECORE must not allocate memory when given argument zero, but
5528 instead return one past the end address of memory from previous
5529 nonzero call. This malloc does NOT call MORECORE(0)
5530 until at least one call with positive arguments is made, so
5531 the initial value returned is not important.
5533 * Even though consecutive calls to MORECORE need not return contiguous
5534 addresses, it must be OK for malloc'ed chunks to span multiple
5535 regions in those cases where they do happen to be contiguous.
5537 * MORECORE need not handle negative arguments -- it may instead
5538 just return MORECORE_FAILURE when given negative arguments.
5539 Negative arguments are always multiples of pagesize. MORECORE
5540 must not misinterpret negative args as large positive unsigned
5541 args. You can suppress all such calls from even occurring by defining
5542 MORECORE_CANNOT_TRIM,
5544 There is some variation across systems about the type of the
5545 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
5546 actually be size_t, because sbrk supports negative args, so it is
5547 normally the signed type of the same width as size_t (sometimes
5548 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
5549 matter though. Internally, we use "long" as arguments, which should
5550 work across all reasonable possibilities.
5552 Additionally, if MORECORE ever returns failure for a positive
5553 request, then mmap is used as a noncontiguous system allocator. This
5554 is a useful backup strategy for systems with holes in address spaces
5555 -- in this case sbrk cannot contiguously expand the heap, but mmap
5556 may be able to map noncontiguous space.
5558 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
5559 a function that always returns MORECORE_FAILURE.
5561 If you are using this malloc with something other than sbrk (or its
5562 emulation) to supply memory regions, you probably want to set
5563 MORECORE_CONTIGUOUS as false. As an example, here is a custom
5564 allocator kindly contributed for pre-OSX macOS. It uses virtually
5565 but not necessarily physically contiguous non-paged memory (locked
5566 in, present and won't get swapped out). You can use it by
5567 uncommenting this section, adding some #includes, and setting up the
5568 appropriate defines above:
5570 *#define MORECORE osMoreCore
5571 *#define MORECORE_CONTIGUOUS 0
5573 There is also a shutdown routine that should somehow be called for
5574 cleanup upon program exit.
5576 *#define MAX_POOL_ENTRIES 100
5577 *#define MINIMUM_MORECORE_SIZE (64 * 1024)
5578 static int next_os_pool;
5579 void *our_os_pools[MAX_POOL_ENTRIES];
5581 void *osMoreCore(int size)
5583 void *ptr = 0;
5584 static void *sbrk_top = 0;
5586 if (size > 0)
5588 if (size < MINIMUM_MORECORE_SIZE)
5589 size = MINIMUM_MORECORE_SIZE;
5590 if (CurrentExecutionLevel() == kTaskLevel)
5591 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5592 if (ptr == 0)
5594 return (void *) MORECORE_FAILURE;
5596 // save ptrs so they can be freed during cleanup
5597 our_os_pools[next_os_pool] = ptr;
5598 next_os_pool++;
5599 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5600 sbrk_top = (char *) ptr + size;
5601 return ptr;
5603 else if (size < 0)
5605 // we don't currently support shrink behavior
5606 return (void *) MORECORE_FAILURE;
5608 else
5610 return sbrk_top;
5614 // cleanup any allocated memory pools
5615 // called as last thing before shutting down driver
5617 void osCleanupMem(void)
5619 void **ptr;
5621 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5622 if (*ptr)
5624 PoolDeallocate(*ptr);
5625 * ptr = 0;
5632 /* Helper code. */
5634 extern char **__libc_argv attribute_hidden;
5636 static void
5637 malloc_printerr (const char *str)
5639 #if IS_IN (libc)
5640 __libc_message ("%s\n", str);
5641 #else
5642 __libc_fatal (str);
5643 #endif
5644 __builtin_unreachable ();
5647 #if IS_IN (libc)
5648 /* We need a wrapper function for one of the additions of POSIX. */
5650 __posix_memalign (void **memptr, size_t alignment, size_t size)
5652 void *mem;
5654 if (!__malloc_initialized)
5655 ptmalloc_init ();
5657 /* Test whether the SIZE argument is valid. It must be a power of
5658 two multiple of sizeof (void *). */
5659 if (alignment % sizeof (void *) != 0
5660 || !powerof2 (alignment / sizeof (void *))
5661 || alignment == 0)
5662 return EINVAL;
5665 void *address = RETURN_ADDRESS (0);
5666 mem = _mid_memalign (alignment, size, address);
5668 if (mem != NULL)
5670 *memptr = mem;
5671 return 0;
5674 return ENOMEM;
5676 weak_alias (__posix_memalign, posix_memalign)
5677 #endif
5681 __malloc_info (int options, FILE *fp)
5683 /* For now, at least. */
5684 if (options != 0)
5685 return EINVAL;
5687 int n = 0;
5688 size_t total_nblocks = 0;
5689 size_t total_nfastblocks = 0;
5690 size_t total_avail = 0;
5691 size_t total_fastavail = 0;
5692 size_t total_system = 0;
5693 size_t total_max_system = 0;
5694 size_t total_aspace = 0;
5695 size_t total_aspace_mprotect = 0;
5699 if (!__malloc_initialized)
5700 ptmalloc_init ();
5702 fputs ("<malloc version=\"1\">\n", fp);
5704 /* Iterate over all arenas currently in use. */
5705 mstate ar_ptr = &main_arena;
5708 fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
5710 size_t nblocks = 0;
5711 size_t nfastblocks = 0;
5712 size_t avail = 0;
5713 size_t fastavail = 0;
5714 struct
5716 size_t from;
5717 size_t to;
5718 size_t total;
5719 size_t count;
5720 } sizes[NFASTBINS + NBINS - 1];
5721 #define nsizes (sizeof (sizes) / sizeof (sizes[0]))
5723 __libc_lock_lock (ar_ptr->mutex);
5725 /* Account for top chunk. The top-most available chunk is
5726 treated specially and is never in any bin. See "initial_top"
5727 comments. */
5728 avail = chunksize (ar_ptr->top);
5729 nblocks = 1; /* Top always exists. */
5731 for (size_t i = 0; i < NFASTBINS; ++i)
5733 mchunkptr p = fastbin (ar_ptr, i);
5734 if (p != NULL)
5736 size_t nthissize = 0;
5737 size_t thissize = chunksize (p);
5739 while (p != NULL)
5741 if (__glibc_unlikely (misaligned_chunk (p)))
5742 malloc_printerr ("__malloc_info(): "
5743 "unaligned fastbin chunk detected");
5744 ++nthissize;
5745 p = REVEAL_PTR (p->fd);
5748 fastavail += nthissize * thissize;
5749 nfastblocks += nthissize;
5750 sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
5751 sizes[i].to = thissize;
5752 sizes[i].count = nthissize;
5754 else
5755 sizes[i].from = sizes[i].to = sizes[i].count = 0;
5757 sizes[i].total = sizes[i].count * sizes[i].to;
5761 mbinptr bin;
5762 struct malloc_chunk *r;
5764 for (size_t i = 1; i < NBINS; ++i)
5766 bin = bin_at (ar_ptr, i);
5767 r = bin->fd;
5768 sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
5769 sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
5770 = sizes[NFASTBINS - 1 + i].count = 0;
5772 if (r != NULL)
5773 while (r != bin)
5775 size_t r_size = chunksize_nomask (r);
5776 ++sizes[NFASTBINS - 1 + i].count;
5777 sizes[NFASTBINS - 1 + i].total += r_size;
5778 sizes[NFASTBINS - 1 + i].from
5779 = MIN (sizes[NFASTBINS - 1 + i].from, r_size);
5780 sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
5781 r_size);
5783 r = r->fd;
5786 if (sizes[NFASTBINS - 1 + i].count == 0)
5787 sizes[NFASTBINS - 1 + i].from = 0;
5788 nblocks += sizes[NFASTBINS - 1 + i].count;
5789 avail += sizes[NFASTBINS - 1 + i].total;
5792 size_t heap_size = 0;
5793 size_t heap_mprotect_size = 0;
5794 size_t heap_count = 0;
5795 if (ar_ptr != &main_arena)
5797 /* Iterate over the arena heaps from back to front. */
5798 heap_info *heap = heap_for_ptr (top (ar_ptr));
5801 heap_size += heap->size;
5802 heap_mprotect_size += heap->mprotect_size;
5803 heap = heap->prev;
5804 ++heap_count;
5806 while (heap != NULL);
5809 __libc_lock_unlock (ar_ptr->mutex);
5811 total_nfastblocks += nfastblocks;
5812 total_fastavail += fastavail;
5814 total_nblocks += nblocks;
5815 total_avail += avail;
5817 for (size_t i = 0; i < nsizes; ++i)
5818 if (sizes[i].count != 0 && i != NFASTBINS)
5819 fprintf (fp, "\
5820 <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5821 sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
5823 if (sizes[NFASTBINS].count != 0)
5824 fprintf (fp, "\
5825 <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5826 sizes[NFASTBINS].from, sizes[NFASTBINS].to,
5827 sizes[NFASTBINS].total, sizes[NFASTBINS].count);
5829 total_system += ar_ptr->system_mem;
5830 total_max_system += ar_ptr->max_system_mem;
5832 fprintf (fp,
5833 "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5834 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5835 "<system type=\"current\" size=\"%zu\"/>\n"
5836 "<system type=\"max\" size=\"%zu\"/>\n",
5837 nfastblocks, fastavail, nblocks, avail,
5838 ar_ptr->system_mem, ar_ptr->max_system_mem);
5840 if (ar_ptr != &main_arena)
5842 fprintf (fp,
5843 "<aspace type=\"total\" size=\"%zu\"/>\n"
5844 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5845 "<aspace type=\"subheaps\" size=\"%zu\"/>\n",
5846 heap_size, heap_mprotect_size, heap_count);
5847 total_aspace += heap_size;
5848 total_aspace_mprotect += heap_mprotect_size;
5850 else
5852 fprintf (fp,
5853 "<aspace type=\"total\" size=\"%zu\"/>\n"
5854 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
5855 ar_ptr->system_mem, ar_ptr->system_mem);
5856 total_aspace += ar_ptr->system_mem;
5857 total_aspace_mprotect += ar_ptr->system_mem;
5860 fputs ("</heap>\n", fp);
5861 ar_ptr = ar_ptr->next;
5863 while (ar_ptr != &main_arena);
5865 fprintf (fp,
5866 "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5867 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5868 "<total type=\"mmap\" count=\"%d\" size=\"%zu\"/>\n"
5869 "<system type=\"current\" size=\"%zu\"/>\n"
5870 "<system type=\"max\" size=\"%zu\"/>\n"
5871 "<aspace type=\"total\" size=\"%zu\"/>\n"
5872 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5873 "</malloc>\n",
5874 total_nfastblocks, total_fastavail, total_nblocks, total_avail,
5875 mp_.n_mmaps, mp_.mmapped_mem,
5876 total_system, total_max_system,
5877 total_aspace, total_aspace_mprotect);
5879 return 0;
5881 #if IS_IN (libc)
5882 weak_alias (__malloc_info, malloc_info)
5884 strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
5885 strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
5886 strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
5887 strong_alias (__libc_memalign, __memalign)
5888 weak_alias (__libc_memalign, memalign)
5889 strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
5890 strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
5891 strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
5892 strong_alias (__libc_mallinfo, __mallinfo)
5893 weak_alias (__libc_mallinfo, mallinfo)
5894 strong_alias (__libc_mallinfo2, __mallinfo2)
5895 weak_alias (__libc_mallinfo2, mallinfo2)
5896 strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
5898 weak_alias (__malloc_stats, malloc_stats)
5899 weak_alias (__malloc_usable_size, malloc_usable_size)
5900 weak_alias (__malloc_trim, malloc_trim)
5901 #endif
5903 #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_26)
5904 compat_symbol (libc, __libc_free, cfree, GLIBC_2_0);
5905 #endif
5907 /* ------------------------------------------------------------
5908 History:
5910 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
5914 * Local variables:
5915 * c-basic-offset: 2
5916 * End: