nss_dns: Fix assertion failure in _nss_dns_getcanonname_r [BZ #19865]
[glibc.git] / malloc / malloc.c
blob1eed79414c585e961ec22fc28c462a07058ce4a6
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 1996-2016 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If
19 not, see <http://www.gnu.org/licenses/>. */
22 This is a version (aka ptmalloc2) of malloc/free/realloc written by
23 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
25 There have been substantial changes made after the integration into
26 glibc in all parts of the code. Do not look for much commonality
27 with the ptmalloc2 version.
29 * Version ptmalloc2-20011215
30 based on:
31 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
33 * Quickstart
35 In order to compile this implementation, a Makefile is provided with
36 the ptmalloc2 distribution, which has pre-defined targets for some
37 popular systems (e.g. "make posix" for Posix threads). All that is
38 typically required with regard to compiler flags is the selection of
39 the thread package via defining one out of USE_PTHREADS, USE_THR or
40 USE_SPROC. Check the thread-m.h file for what effects this has.
41 Many/most systems will additionally require USE_TSD_DATA_HACK to be
42 defined, so this is the default for "make posix".
44 * Why use this malloc?
46 This is not the fastest, most space-conserving, most portable, or
47 most tunable malloc ever written. However it is among the fastest
48 while also being among the most space-conserving, portable and tunable.
49 Consistent balance across these factors results in a good general-purpose
50 allocator for malloc-intensive programs.
52 The main properties of the algorithms are:
53 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
54 with ties normally decided via FIFO (i.e. least recently used).
55 * For small (<= 64 bytes by default) requests, it is a caching
56 allocator, that maintains pools of quickly recycled chunks.
57 * In between, and for combinations of large and small requests, it does
58 the best it can trying to meet both goals at once.
59 * For very large requests (>= 128KB by default), it relies on system
60 memory mapping facilities, if supported.
62 For a longer but slightly out of date high-level description, see
63 http://gee.cs.oswego.edu/dl/html/malloc.html
65 You may already by default be using a C library containing a malloc
66 that is based on some version of this malloc (for example in
67 linux). You might still want to use the one in this file in order to
68 customize settings or to avoid overheads associated with library
69 versions.
71 * Contents, described in more detail in "description of public routines" below.
73 Standard (ANSI/SVID/...) functions:
74 malloc(size_t n);
75 calloc(size_t n_elements, size_t element_size);
76 free(void* p);
77 realloc(void* p, size_t n);
78 memalign(size_t alignment, size_t n);
79 valloc(size_t n);
80 mallinfo()
81 mallopt(int parameter_number, int parameter_value)
83 Additional functions:
84 independent_calloc(size_t n_elements, size_t size, void* chunks[]);
85 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
86 pvalloc(size_t n);
87 cfree(void* p);
88 malloc_trim(size_t pad);
89 malloc_usable_size(void* p);
90 malloc_stats();
92 * Vital statistics:
94 Supported pointer representation: 4 or 8 bytes
95 Supported size_t representation: 4 or 8 bytes
96 Note that size_t is allowed to be 4 bytes even if pointers are 8.
97 You can adjust this by defining INTERNAL_SIZE_T
99 Alignment: 2 * sizeof(size_t) (default)
100 (i.e., 8 byte alignment with 4byte size_t). This suffices for
101 nearly all current machines and C compilers. However, you can
102 define MALLOC_ALIGNMENT to be wider than this if necessary.
104 Minimum overhead per allocated chunk: 4 or 8 bytes
105 Each malloced chunk has a hidden word of overhead holding size
106 and status information.
108 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
109 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
111 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
112 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
113 needed; 4 (8) for a trailing size field and 8 (16) bytes for
114 free list pointers. Thus, the minimum allocatable size is
115 16/24/32 bytes.
117 Even a request for zero bytes (i.e., malloc(0)) returns a
118 pointer to something of the minimum allocatable size.
120 The maximum overhead wastage (i.e., number of extra bytes
121 allocated than were requested in malloc) is less than or equal
122 to the minimum size, except for requests >= mmap_threshold that
123 are serviced via mmap(), where the worst case wastage is 2 *
124 sizeof(size_t) bytes plus the remainder from a system page (the
125 minimal mmap unit); typically 4096 or 8192 bytes.
127 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
128 8-byte size_t: 2^64 minus about two pages
130 It is assumed that (possibly signed) size_t values suffice to
131 represent chunk sizes. `Possibly signed' is due to the fact
132 that `size_t' may be defined on a system as either a signed or
133 an unsigned type. The ISO C standard says that it must be
134 unsigned, but a few systems are known not to adhere to this.
135 Additionally, even when size_t is unsigned, sbrk (which is by
136 default used to obtain memory from system) accepts signed
137 arguments, and may not be able to handle size_t-wide arguments
138 with negative sign bit. Generally, values that would
139 appear as negative after accounting for overhead and alignment
140 are supported only via mmap(), which does not have this
141 limitation.
143 Requests for sizes outside the allowed range will perform an optional
144 failure action and then return null. (Requests may also
145 also fail because a system is out of memory.)
147 Thread-safety: thread-safe
149 Compliance: I believe it is compliant with the 1997 Single Unix Specification
150 Also SVID/XPG, ANSI C, and probably others as well.
152 * Synopsis of compile-time options:
154 People have reported using previous versions of this malloc on all
155 versions of Unix, sometimes by tweaking some of the defines
156 below. It has been tested most extensively on Solaris and Linux.
157 People also report using it in stand-alone embedded systems.
159 The implementation is in straight, hand-tuned ANSI C. It is not
160 at all modular. (Sorry!) It uses a lot of macros. To be at all
161 usable, this code should be compiled using an optimizing compiler
162 (for example gcc -O3) that can simplify expressions and control
163 paths. (FAQ: some macros import variables as arguments rather than
164 declare locals because people reported that some debuggers
165 otherwise get confused.)
167 OPTION DEFAULT VALUE
169 Compilation Environment options:
171 HAVE_MREMAP 0
173 Changing default word sizes:
175 INTERNAL_SIZE_T size_t
176 MALLOC_ALIGNMENT MAX (2 * sizeof(INTERNAL_SIZE_T),
177 __alignof__ (long double))
179 Configuration and functionality options:
181 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
182 USE_MALLOC_LOCK NOT defined
183 MALLOC_DEBUG NOT defined
184 REALLOC_ZERO_BYTES_FREES 1
185 TRIM_FASTBINS 0
187 Options for customizing MORECORE:
189 MORECORE sbrk
190 MORECORE_FAILURE -1
191 MORECORE_CONTIGUOUS 1
192 MORECORE_CANNOT_TRIM NOT defined
193 MORECORE_CLEARS 1
194 MMAP_AS_MORECORE_SIZE (1024 * 1024)
196 Tuning options that are also dynamically changeable via mallopt:
198 DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
199 DEFAULT_TRIM_THRESHOLD 128 * 1024
200 DEFAULT_TOP_PAD 0
201 DEFAULT_MMAP_THRESHOLD 128 * 1024
202 DEFAULT_MMAP_MAX 65536
204 There are several other #defined constants and macros that you
205 probably don't want to touch unless you are extending or adapting malloc. */
208 void* is the pointer type that malloc should say it returns
211 #ifndef void
212 #define void void
213 #endif /*void*/
215 #include <stddef.h> /* for size_t */
216 #include <stdlib.h> /* for getenv(), abort() */
217 #include <unistd.h> /* for __libc_enable_secure */
219 #include <malloc-machine.h>
220 #include <malloc-sysdep.h>
222 #include <atomic.h>
223 #include <_itoa.h>
224 #include <bits/wordsize.h>
225 #include <sys/sysinfo.h>
227 #include <ldsodefs.h>
229 #include <unistd.h>
230 #include <stdio.h> /* needed for malloc_stats */
231 #include <errno.h>
233 #include <shlib-compat.h>
235 /* For uintptr_t. */
236 #include <stdint.h>
238 /* For va_arg, va_start, va_end. */
239 #include <stdarg.h>
241 /* For MIN, MAX, powerof2. */
242 #include <sys/param.h>
244 /* For ALIGN_UP et. al. */
245 #include <libc-internal.h>
249 Debugging:
251 Because freed chunks may be overwritten with bookkeeping fields, this
252 malloc will often die when freed memory is overwritten by user
253 programs. This can be very effective (albeit in an annoying way)
254 in helping track down dangling pointers.
256 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
257 enabled that will catch more memory errors. You probably won't be
258 able to make much sense of the actual assertion errors, but they
259 should help you locate incorrectly overwritten memory. The checking
260 is fairly extensive, and will slow down execution
261 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
262 will attempt to check every non-mmapped allocated and free chunk in
263 the course of computing the summmaries. (By nature, mmapped regions
264 cannot be checked very much automatically.)
266 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
267 this code. The assertions in the check routines spell out in more
268 detail the assumptions and invariants underlying the algorithms.
270 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
271 checking that all accesses to malloced memory stay within their
272 bounds. However, there are several add-ons and adaptations of this
273 or other mallocs available that do this.
276 #ifndef MALLOC_DEBUG
277 #define MALLOC_DEBUG 0
278 #endif
280 #ifdef NDEBUG
281 # define assert(expr) ((void) 0)
282 #else
283 # define assert(expr) \
284 ((expr) \
285 ? ((void) 0) \
286 : __malloc_assert (#expr, __FILE__, __LINE__, __func__))
288 extern const char *__progname;
290 static void
291 __malloc_assert (const char *assertion, const char *file, unsigned int line,
292 const char *function)
294 (void) __fxprintf (NULL, "%s%s%s:%u: %s%sAssertion `%s' failed.\n",
295 __progname, __progname[0] ? ": " : "",
296 file, line,
297 function ? function : "", function ? ": " : "",
298 assertion);
299 fflush (stderr);
300 abort ();
302 #endif
306 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
307 of chunk sizes.
309 The default version is the same as size_t.
311 While not strictly necessary, it is best to define this as an
312 unsigned type, even if size_t is a signed type. This may avoid some
313 artificial size limitations on some systems.
315 On a 64-bit machine, you may be able to reduce malloc overhead by
316 defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
317 expense of not being able to handle more than 2^32 of malloced
318 space. If this limitation is acceptable, you are encouraged to set
319 this unless you are on a platform requiring 16byte alignments. In
320 this case the alignment requirements turn out to negate any
321 potential advantages of decreasing size_t word size.
323 Implementors: Beware of the possible combinations of:
324 - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
325 and might be the same width as int or as long
326 - size_t might have different width and signedness as INTERNAL_SIZE_T
327 - int and long might be 32 or 64 bits, and might be the same width
328 To deal with this, most comparisons and difference computations
329 among INTERNAL_SIZE_Ts should cast them to unsigned long, being
330 aware of the fact that casting an unsigned int to a wider long does
331 not sign-extend. (This also makes checking for negative numbers
332 awkward.) Some of these casts result in harmless compiler warnings
333 on some systems.
336 #ifndef INTERNAL_SIZE_T
337 #define INTERNAL_SIZE_T size_t
338 #endif
340 /* The corresponding word size */
341 #define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
345 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
346 It must be a power of two at least 2 * SIZE_SZ, even on machines
347 for which smaller alignments would suffice. It may be defined as
348 larger than this though. Note however that code and data structures
349 are optimized for the case of 8-byte alignment.
353 #ifndef MALLOC_ALIGNMENT
354 # if !SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_16)
355 /* This is the correct definition when there is no past ABI to constrain it.
357 Among configurations with a past ABI constraint, it differs from
358 2*SIZE_SZ only on powerpc32. For the time being, changing this is
359 causing more compatibility problems due to malloc_get_state and
360 malloc_set_state than will returning blocks not adequately aligned for
361 long double objects under -mlong-double-128. */
363 # define MALLOC_ALIGNMENT (2 *SIZE_SZ < __alignof__ (long double) \
364 ? __alignof__ (long double) : 2 *SIZE_SZ)
365 # else
366 # define MALLOC_ALIGNMENT (2 *SIZE_SZ)
367 # endif
368 #endif
370 /* The corresponding bit mask value */
371 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
376 REALLOC_ZERO_BYTES_FREES should be set if a call to
377 realloc with zero bytes should be the same as a call to free.
378 This is required by the C standard. Otherwise, since this malloc
379 returns a unique pointer for malloc(0), so does realloc(p, 0).
382 #ifndef REALLOC_ZERO_BYTES_FREES
383 #define REALLOC_ZERO_BYTES_FREES 1
384 #endif
387 TRIM_FASTBINS controls whether free() of a very small chunk can
388 immediately lead to trimming. Setting to true (1) can reduce memory
389 footprint, but will almost always slow down programs that use a lot
390 of small chunks.
392 Define this only if you are willing to give up some speed to more
393 aggressively reduce system-level memory footprint when releasing
394 memory in programs that use many small chunks. You can get
395 essentially the same effect by setting MXFAST to 0, but this can
396 lead to even greater slowdowns in programs using many small chunks.
397 TRIM_FASTBINS is an in-between compile-time option, that disables
398 only those chunks bordering topmost memory from being placed in
399 fastbins.
402 #ifndef TRIM_FASTBINS
403 #define TRIM_FASTBINS 0
404 #endif
407 /* Definition for getting more memory from the OS. */
408 #define MORECORE (*__morecore)
409 #define MORECORE_FAILURE 0
410 void * __default_morecore (ptrdiff_t);
411 void *(*__morecore)(ptrdiff_t) = __default_morecore;
414 #include <string.h>
417 MORECORE-related declarations. By default, rely on sbrk
422 MORECORE is the name of the routine to call to obtain more memory
423 from the system. See below for general guidance on writing
424 alternative MORECORE functions, as well as a version for WIN32 and a
425 sample version for pre-OSX macos.
428 #ifndef MORECORE
429 #define MORECORE sbrk
430 #endif
433 MORECORE_FAILURE is the value returned upon failure of MORECORE
434 as well as mmap. Since it cannot be an otherwise valid memory address,
435 and must reflect values of standard sys calls, you probably ought not
436 try to redefine it.
439 #ifndef MORECORE_FAILURE
440 #define MORECORE_FAILURE (-1)
441 #endif
444 If MORECORE_CONTIGUOUS is true, take advantage of fact that
445 consecutive calls to MORECORE with positive arguments always return
446 contiguous increasing addresses. This is true of unix sbrk. Even
447 if not defined, when regions happen to be contiguous, malloc will
448 permit allocations spanning regions obtained from different
449 calls. But defining this when applicable enables some stronger
450 consistency checks and space efficiencies.
453 #ifndef MORECORE_CONTIGUOUS
454 #define MORECORE_CONTIGUOUS 1
455 #endif
458 Define MORECORE_CANNOT_TRIM if your version of MORECORE
459 cannot release space back to the system when given negative
460 arguments. This is generally necessary only if you are using
461 a hand-crafted MORECORE function that cannot handle negative arguments.
464 /* #define MORECORE_CANNOT_TRIM */
466 /* MORECORE_CLEARS (default 1)
467 The degree to which the routine mapped to MORECORE zeroes out
468 memory: never (0), only for newly allocated space (1) or always
469 (2). The distinction between (1) and (2) is necessary because on
470 some systems, if the application first decrements and then
471 increments the break value, the contents of the reallocated space
472 are unspecified.
475 #ifndef MORECORE_CLEARS
476 # define MORECORE_CLEARS 1
477 #endif
481 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
482 sbrk fails, and mmap is used as a backup. The value must be a
483 multiple of page size. This backup strategy generally applies only
484 when systems have "holes" in address space, so sbrk cannot perform
485 contiguous expansion, but there is still space available on system.
486 On systems for which this is known to be useful (i.e. most linux
487 kernels), this occurs only when programs allocate huge amounts of
488 memory. Between this, and the fact that mmap regions tend to be
489 limited, the size should be large, to avoid too many mmap calls and
490 thus avoid running out of kernel resources. */
492 #ifndef MMAP_AS_MORECORE_SIZE
493 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
494 #endif
497 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
498 large blocks.
501 #ifndef HAVE_MREMAP
502 #define HAVE_MREMAP 0
503 #endif
507 This version of malloc supports the standard SVID/XPG mallinfo
508 routine that returns a struct containing usage properties and
509 statistics. It should work on any SVID/XPG compliant system that has
510 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
511 install such a thing yourself, cut out the preliminary declarations
512 as described above and below and save them in a malloc.h file. But
513 there's no compelling reason to bother to do this.)
515 The main declaration needed is the mallinfo struct that is returned
516 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
517 bunch of fields that are not even meaningful in this version of
518 malloc. These fields are are instead filled by mallinfo() with
519 other numbers that might be of interest.
523 /* ---------- description of public routines ------------ */
526 malloc(size_t n)
527 Returns a pointer to a newly allocated chunk of at least n bytes, or null
528 if no space is available. Additionally, on failure, errno is
529 set to ENOMEM on ANSI C systems.
531 If n is zero, malloc returns a minumum-sized chunk. (The minimum
532 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
533 systems.) On most systems, size_t is an unsigned type, so calls
534 with negative arguments are interpreted as requests for huge amounts
535 of space, which will often fail. The maximum supported value of n
536 differs across systems, but is in all cases less than the maximum
537 representable value of a size_t.
539 void* __libc_malloc(size_t);
540 libc_hidden_proto (__libc_malloc)
543 free(void* p)
544 Releases the chunk of memory pointed to by p, that had been previously
545 allocated using malloc or a related routine such as realloc.
546 It has no effect if p is null. It can have arbitrary (i.e., bad!)
547 effects if p has already been freed.
549 Unless disabled (using mallopt), freeing very large spaces will
550 when possible, automatically trigger operations that give
551 back unused memory to the system, thus reducing program footprint.
553 void __libc_free(void*);
554 libc_hidden_proto (__libc_free)
557 calloc(size_t n_elements, size_t element_size);
558 Returns a pointer to n_elements * element_size bytes, with all locations
559 set to zero.
561 void* __libc_calloc(size_t, size_t);
564 realloc(void* p, size_t n)
565 Returns a pointer to a chunk of size n that contains the same data
566 as does chunk p up to the minimum of (n, p's size) bytes, or null
567 if no space is available.
569 The returned pointer may or may not be the same as p. The algorithm
570 prefers extending p when possible, otherwise it employs the
571 equivalent of a malloc-copy-free sequence.
573 If p is null, realloc is equivalent to malloc.
575 If space is not available, realloc returns null, errno is set (if on
576 ANSI) and p is NOT freed.
578 if n is for fewer bytes than already held by p, the newly unused
579 space is lopped off and freed if possible. Unless the #define
580 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
581 zero (re)allocates a minimum-sized chunk.
583 Large chunks that were internally obtained via mmap will always
584 be reallocated using malloc-copy-free sequences unless
585 the system supports MREMAP (currently only linux).
587 The old unix realloc convention of allowing the last-free'd chunk
588 to be used as an argument to realloc is not supported.
590 void* __libc_realloc(void*, size_t);
591 libc_hidden_proto (__libc_realloc)
594 memalign(size_t alignment, size_t n);
595 Returns a pointer to a newly allocated chunk of n bytes, aligned
596 in accord with the alignment argument.
598 The alignment argument should be a power of two. If the argument is
599 not a power of two, the nearest greater power is used.
600 8-byte alignment is guaranteed by normal malloc calls, so don't
601 bother calling memalign with an argument of 8 or less.
603 Overreliance on memalign is a sure way to fragment space.
605 void* __libc_memalign(size_t, size_t);
606 libc_hidden_proto (__libc_memalign)
609 valloc(size_t n);
610 Equivalent to memalign(pagesize, n), where pagesize is the page
611 size of the system. If the pagesize is unknown, 4096 is used.
613 void* __libc_valloc(size_t);
618 mallopt(int parameter_number, int parameter_value)
619 Sets tunable parameters The format is to provide a
620 (parameter-number, parameter-value) pair. mallopt then sets the
621 corresponding parameter to the argument value if it can (i.e., so
622 long as the value is meaningful), and returns 1 if successful else
623 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
624 normally defined in malloc.h. Only one of these (M_MXFAST) is used
625 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
626 so setting them has no effect. But this malloc also supports four
627 other options in mallopt. See below for details. Briefly, supported
628 parameters are as follows (listed defaults are for "typical"
629 configurations).
631 Symbol param # default allowed param values
632 M_MXFAST 1 64 0-80 (0 disables fastbins)
633 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
634 M_TOP_PAD -2 0 any
635 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
636 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
638 int __libc_mallopt(int, int);
639 libc_hidden_proto (__libc_mallopt)
643 mallinfo()
644 Returns (by copy) a struct containing various summary statistics:
646 arena: current total non-mmapped bytes allocated from system
647 ordblks: the number of free chunks
648 smblks: the number of fastbin blocks (i.e., small chunks that
649 have been freed but not use resused or consolidated)
650 hblks: current number of mmapped regions
651 hblkhd: total bytes held in mmapped regions
652 usmblks: always 0
653 fsmblks: total bytes held in fastbin blocks
654 uordblks: current total allocated space (normal or mmapped)
655 fordblks: total free space
656 keepcost: the maximum number of bytes that could ideally be released
657 back to system via malloc_trim. ("ideally" means that
658 it ignores page restrictions etc.)
660 Because these fields are ints, but internal bookkeeping may
661 be kept as longs, the reported values may wrap around zero and
662 thus be inaccurate.
664 struct mallinfo __libc_mallinfo(void);
668 pvalloc(size_t n);
669 Equivalent to valloc(minimum-page-that-holds(n)), that is,
670 round up n to nearest pagesize.
672 void* __libc_pvalloc(size_t);
675 malloc_trim(size_t pad);
677 If possible, gives memory back to the system (via negative
678 arguments to sbrk) if there is unused memory at the `high' end of
679 the malloc pool. You can call this after freeing large blocks of
680 memory to potentially reduce the system-level memory requirements
681 of a program. However, it cannot guarantee to reduce memory. Under
682 some allocation patterns, some large free blocks of memory will be
683 locked between two used chunks, so they cannot be given back to
684 the system.
686 The `pad' argument to malloc_trim represents the amount of free
687 trailing space to leave untrimmed. If this argument is zero,
688 only the minimum amount of memory to maintain internal data
689 structures will be left (one page or less). Non-zero arguments
690 can be supplied to maintain enough trailing space to service
691 future expected allocations without having to re-obtain memory
692 from the system.
694 Malloc_trim returns 1 if it actually released any memory, else 0.
695 On systems that do not support "negative sbrks", it will always
696 return 0.
698 int __malloc_trim(size_t);
701 malloc_usable_size(void* p);
703 Returns the number of bytes you can actually use in
704 an allocated chunk, which may be more than you requested (although
705 often not) due to alignment and minimum size constraints.
706 You can use this many bytes without worrying about
707 overwriting other allocated objects. This is not a particularly great
708 programming practice. malloc_usable_size can be more useful in
709 debugging and assertions, for example:
711 p = malloc(n);
712 assert(malloc_usable_size(p) >= 256);
715 size_t __malloc_usable_size(void*);
718 malloc_stats();
719 Prints on stderr the amount of space obtained from the system (both
720 via sbrk and mmap), the maximum amount (which may be more than
721 current if malloc_trim and/or munmap got called), and the current
722 number of bytes allocated via malloc (or realloc, etc) but not yet
723 freed. Note that this is the number of bytes allocated, not the
724 number requested. It will be larger than the number requested
725 because of alignment and bookkeeping overhead. Because it includes
726 alignment wastage as being in use, this figure may be greater than
727 zero even when no user-level chunks are allocated.
729 The reported current and maximum system memory can be inaccurate if
730 a program makes other calls to system memory allocation functions
731 (normally sbrk) outside of malloc.
733 malloc_stats prints only the most commonly interesting statistics.
734 More information can be obtained by calling mallinfo.
737 void __malloc_stats(void);
740 malloc_get_state(void);
742 Returns the state of all malloc variables in an opaque data
743 structure.
745 void* __malloc_get_state(void);
748 malloc_set_state(void* state);
750 Restore the state of all malloc variables from data obtained with
751 malloc_get_state().
753 int __malloc_set_state(void*);
756 posix_memalign(void **memptr, size_t alignment, size_t size);
758 POSIX wrapper like memalign(), checking for validity of size.
760 int __posix_memalign(void **, size_t, size_t);
762 /* mallopt tuning options */
765 M_MXFAST is the maximum request size used for "fastbins", special bins
766 that hold returned chunks without consolidating their spaces. This
767 enables future requests for chunks of the same size to be handled
768 very quickly, but can increase fragmentation, and thus increase the
769 overall memory footprint of a program.
771 This malloc manages fastbins very conservatively yet still
772 efficiently, so fragmentation is rarely a problem for values less
773 than or equal to the default. The maximum supported value of MXFAST
774 is 80. You wouldn't want it any higher than this anyway. Fastbins
775 are designed especially for use with many small structs, objects or
776 strings -- the default handles structs/objects/arrays with sizes up
777 to 8 4byte fields, or small strings representing words, tokens,
778 etc. Using fastbins for larger objects normally worsens
779 fragmentation without improving speed.
781 M_MXFAST is set in REQUEST size units. It is internally used in
782 chunksize units, which adds padding and alignment. You can reduce
783 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
784 algorithm to be a closer approximation of fifo-best-fit in all cases,
785 not just for larger requests, but will generally cause it to be
786 slower.
790 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
791 #ifndef M_MXFAST
792 #define M_MXFAST 1
793 #endif
795 #ifndef DEFAULT_MXFAST
796 #define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
797 #endif
801 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
802 to keep before releasing via malloc_trim in free().
804 Automatic trimming is mainly useful in long-lived programs.
805 Because trimming via sbrk can be slow on some systems, and can
806 sometimes be wasteful (in cases where programs immediately
807 afterward allocate more large chunks) the value should be high
808 enough so that your overall system performance would improve by
809 releasing this much memory.
811 The trim threshold and the mmap control parameters (see below)
812 can be traded off with one another. Trimming and mmapping are
813 two different ways of releasing unused memory back to the
814 system. Between these two, it is often possible to keep
815 system-level demands of a long-lived program down to a bare
816 minimum. For example, in one test suite of sessions measuring
817 the XF86 X server on Linux, using a trim threshold of 128K and a
818 mmap threshold of 192K led to near-minimal long term resource
819 consumption.
821 If you are using this malloc in a long-lived program, it should
822 pay to experiment with these values. As a rough guide, you
823 might set to a value close to the average size of a process
824 (program) running on your system. Releasing this much memory
825 would allow such a process to run in memory. Generally, it's
826 worth it to tune for trimming rather tham memory mapping when a
827 program undergoes phases where several large chunks are
828 allocated and released in ways that can reuse each other's
829 storage, perhaps mixed with phases where there are no such
830 chunks at all. And in well-behaved long-lived programs,
831 controlling release of large blocks via trimming versus mapping
832 is usually faster.
834 However, in most programs, these parameters serve mainly as
835 protection against the system-level effects of carrying around
836 massive amounts of unneeded memory. Since frequent calls to
837 sbrk, mmap, and munmap otherwise degrade performance, the default
838 parameters are set to relatively high values that serve only as
839 safeguards.
841 The trim value It must be greater than page size to have any useful
842 effect. To disable trimming completely, you can set to
843 (unsigned long)(-1)
845 Trim settings interact with fastbin (MXFAST) settings: Unless
846 TRIM_FASTBINS is defined, automatic trimming never takes place upon
847 freeing a chunk with size less than or equal to MXFAST. Trimming is
848 instead delayed until subsequent freeing of larger chunks. However,
849 you can still force an attempted trim by calling malloc_trim.
851 Also, trimming is not generally possible in cases where
852 the main arena is obtained via mmap.
854 Note that the trick some people use of mallocing a huge space and
855 then freeing it at program startup, in an attempt to reserve system
856 memory, doesn't have the intended effect under automatic trimming,
857 since that memory will immediately be returned to the system.
860 #define M_TRIM_THRESHOLD -1
862 #ifndef DEFAULT_TRIM_THRESHOLD
863 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
864 #endif
867 M_TOP_PAD is the amount of extra `padding' space to allocate or
868 retain whenever sbrk is called. It is used in two ways internally:
870 * When sbrk is called to extend the top of the arena to satisfy
871 a new malloc request, this much padding is added to the sbrk
872 request.
874 * When malloc_trim is called automatically from free(),
875 it is used as the `pad' argument.
877 In both cases, the actual amount of padding is rounded
878 so that the end of the arena is always a system page boundary.
880 The main reason for using padding is to avoid calling sbrk so
881 often. Having even a small pad greatly reduces the likelihood
882 that nearly every malloc request during program start-up (or
883 after trimming) will invoke sbrk, which needlessly wastes
884 time.
886 Automatic rounding-up to page-size units is normally sufficient
887 to avoid measurable overhead, so the default is 0. However, in
888 systems where sbrk is relatively slow, it can pay to increase
889 this value, at the expense of carrying around more memory than
890 the program needs.
893 #define M_TOP_PAD -2
895 #ifndef DEFAULT_TOP_PAD
896 #define DEFAULT_TOP_PAD (0)
897 #endif
900 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
901 adjusted MMAP_THRESHOLD.
904 #ifndef DEFAULT_MMAP_THRESHOLD_MIN
905 #define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
906 #endif
908 #ifndef DEFAULT_MMAP_THRESHOLD_MAX
909 /* For 32-bit platforms we cannot increase the maximum mmap
910 threshold much because it is also the minimum value for the
911 maximum heap size and its alignment. Going above 512k (i.e., 1M
912 for new heaps) wastes too much address space. */
913 # if __WORDSIZE == 32
914 # define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
915 # else
916 # define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
917 # endif
918 #endif
921 M_MMAP_THRESHOLD is the request size threshold for using mmap()
922 to service a request. Requests of at least this size that cannot
923 be allocated using already-existing space will be serviced via mmap.
924 (If enough normal freed space already exists it is used instead.)
926 Using mmap segregates relatively large chunks of memory so that
927 they can be individually obtained and released from the host
928 system. A request serviced through mmap is never reused by any
929 other request (at least not directly; the system may just so
930 happen to remap successive requests to the same locations).
932 Segregating space in this way has the benefits that:
934 1. Mmapped space can ALWAYS be individually released back
935 to the system, which helps keep the system level memory
936 demands of a long-lived program low.
937 2. Mapped memory can never become `locked' between
938 other chunks, as can happen with normally allocated chunks, which
939 means that even trimming via malloc_trim would not release them.
940 3. On some systems with "holes" in address spaces, mmap can obtain
941 memory that sbrk cannot.
943 However, it has the disadvantages that:
945 1. The space cannot be reclaimed, consolidated, and then
946 used to service later requests, as happens with normal chunks.
947 2. It can lead to more wastage because of mmap page alignment
948 requirements
949 3. It causes malloc performance to be more dependent on host
950 system memory management support routines which may vary in
951 implementation quality and may impose arbitrary
952 limitations. Generally, servicing a request via normal
953 malloc steps is faster than going through a system's mmap.
955 The advantages of mmap nearly always outweigh disadvantages for
956 "large" chunks, but the value of "large" varies across systems. The
957 default is an empirically derived value that works well in most
958 systems.
961 Update in 2006:
962 The above was written in 2001. Since then the world has changed a lot.
963 Memory got bigger. Applications got bigger. The virtual address space
964 layout in 32 bit linux changed.
966 In the new situation, brk() and mmap space is shared and there are no
967 artificial limits on brk size imposed by the kernel. What is more,
968 applications have started using transient allocations larger than the
969 128Kb as was imagined in 2001.
971 The price for mmap is also high now; each time glibc mmaps from the
972 kernel, the kernel is forced to zero out the memory it gives to the
973 application. Zeroing memory is expensive and eats a lot of cache and
974 memory bandwidth. This has nothing to do with the efficiency of the
975 virtual memory system, by doing mmap the kernel just has no choice but
976 to zero.
978 In 2001, the kernel had a maximum size for brk() which was about 800
979 megabytes on 32 bit x86, at that point brk() would hit the first
980 mmaped shared libaries and couldn't expand anymore. With current 2.6
981 kernels, the VA space layout is different and brk() and mmap
982 both can span the entire heap at will.
984 Rather than using a static threshold for the brk/mmap tradeoff,
985 we are now using a simple dynamic one. The goal is still to avoid
986 fragmentation. The old goals we kept are
987 1) try to get the long lived large allocations to use mmap()
988 2) really large allocations should always use mmap()
989 and we're adding now:
990 3) transient allocations should use brk() to avoid forcing the kernel
991 having to zero memory over and over again
993 The implementation works with a sliding threshold, which is by default
994 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
995 out at 128Kb as per the 2001 default.
997 This allows us to satisfy requirement 1) under the assumption that long
998 lived allocations are made early in the process' lifespan, before it has
999 started doing dynamic allocations of the same size (which will
1000 increase the threshold).
1002 The upperbound on the threshold satisfies requirement 2)
1004 The threshold goes up in value when the application frees memory that was
1005 allocated with the mmap allocator. The idea is that once the application
1006 starts freeing memory of a certain size, it's highly probable that this is
1007 a size the application uses for transient allocations. This estimator
1008 is there to satisfy the new third requirement.
1012 #define M_MMAP_THRESHOLD -3
1014 #ifndef DEFAULT_MMAP_THRESHOLD
1015 #define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
1016 #endif
1019 M_MMAP_MAX is the maximum number of requests to simultaneously
1020 service using mmap. This parameter exists because
1021 some systems have a limited number of internal tables for
1022 use by mmap, and using more than a few of them may degrade
1023 performance.
1025 The default is set to a value that serves only as a safeguard.
1026 Setting to 0 disables use of mmap for servicing large requests.
1029 #define M_MMAP_MAX -4
1031 #ifndef DEFAULT_MMAP_MAX
1032 #define DEFAULT_MMAP_MAX (65536)
1033 #endif
1035 #include <malloc.h>
1037 #ifndef RETURN_ADDRESS
1038 #define RETURN_ADDRESS(X_) (NULL)
1039 #endif
1041 /* On some platforms we can compile internal, not exported functions better.
1042 Let the environment provide a macro and define it to be empty if it
1043 is not available. */
1044 #ifndef internal_function
1045 # define internal_function
1046 #endif
1048 /* Forward declarations. */
1049 struct malloc_chunk;
1050 typedef struct malloc_chunk* mchunkptr;
1052 /* Internal routines. */
1054 static void* _int_malloc(mstate, size_t);
1055 static void _int_free(mstate, mchunkptr, int);
1056 static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
1057 INTERNAL_SIZE_T);
1058 static void* _int_memalign(mstate, size_t, size_t);
1059 static void* _mid_memalign(size_t, size_t, void *);
1061 static void malloc_printerr(int action, const char *str, void *ptr, mstate av);
1063 static void* internal_function mem2mem_check(void *p, size_t sz);
1064 static int internal_function top_check(void);
1065 static void internal_function munmap_chunk(mchunkptr p);
1066 #if HAVE_MREMAP
1067 static mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size);
1068 #endif
1070 static void* malloc_check(size_t sz, const void *caller);
1071 static void free_check(void* mem, const void *caller);
1072 static void* realloc_check(void* oldmem, size_t bytes,
1073 const void *caller);
1074 static void* memalign_check(size_t alignment, size_t bytes,
1075 const void *caller);
1076 static void* malloc_atfork(size_t sz, const void *caller);
1077 static void free_atfork(void* mem, const void *caller);
1079 /* ------------------ MMAP support ------------------ */
1082 #include <fcntl.h>
1083 #include <sys/mman.h>
1085 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1086 # define MAP_ANONYMOUS MAP_ANON
1087 #endif
1089 #ifndef MAP_NORESERVE
1090 # define MAP_NORESERVE 0
1091 #endif
1093 #define MMAP(addr, size, prot, flags) \
1094 __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
1098 ----------------------- Chunk representations -----------------------
1103 This struct declaration is misleading (but accurate and necessary).
1104 It declares a "view" into memory allowing access to necessary
1105 fields at known offsets from a given base. See explanation below.
1108 struct malloc_chunk {
1110 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
1111 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
1113 struct malloc_chunk* fd; /* double links -- used only if free. */
1114 struct malloc_chunk* bk;
1116 /* Only used for large blocks: pointer to next larger size. */
1117 struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
1118 struct malloc_chunk* bk_nextsize;
1123 malloc_chunk details:
1125 (The following includes lightly edited explanations by Colin Plumb.)
1127 Chunks of memory are maintained using a `boundary tag' method as
1128 described in e.g., Knuth or Standish. (See the paper by Paul
1129 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1130 survey of such techniques.) Sizes of free chunks are stored both
1131 in the front of each chunk and at the end. This makes
1132 consolidating fragmented chunks into bigger chunks very fast. The
1133 size fields also hold bits representing whether chunks are free or
1134 in use.
1136 An allocated chunk looks like this:
1139 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1140 | Size of previous chunk, if allocated | |
1141 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1142 | Size of chunk, in bytes |M|P|
1143 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1144 | User data starts here... .
1146 . (malloc_usable_size() bytes) .
1148 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1149 | Size of chunk |
1150 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1153 Where "chunk" is the front of the chunk for the purpose of most of
1154 the malloc code, but "mem" is the pointer that is returned to the
1155 user. "Nextchunk" is the beginning of the next contiguous chunk.
1157 Chunks always begin on even word boundaries, so the mem portion
1158 (which is returned to the user) is also on an even word boundary, and
1159 thus at least double-word aligned.
1161 Free chunks are stored in circular doubly-linked lists, and look like this:
1163 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1164 | Size of previous chunk |
1165 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1166 `head:' | Size of chunk, in bytes |P|
1167 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1168 | Forward pointer to next chunk in list |
1169 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1170 | Back pointer to previous chunk in list |
1171 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1172 | Unused space (may be 0 bytes long) .
1175 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1176 `foot:' | Size of chunk, in bytes |
1177 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1179 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1180 chunk size (which is always a multiple of two words), is an in-use
1181 bit for the *previous* chunk. If that bit is *clear*, then the
1182 word before the current chunk size contains the previous chunk
1183 size, and can be used to find the front of the previous chunk.
1184 The very first chunk allocated always has this bit set,
1185 preventing access to non-existent (or non-owned) memory. If
1186 prev_inuse is set for any given chunk, then you CANNOT determine
1187 the size of the previous chunk, and might even get a memory
1188 addressing fault when trying to do so.
1190 Note that the `foot' of the current chunk is actually represented
1191 as the prev_size of the NEXT chunk. This makes it easier to
1192 deal with alignments etc but can be very confusing when trying
1193 to extend or adapt this code.
1195 The two exceptions to all this are
1197 1. The special chunk `top' doesn't bother using the
1198 trailing size field since there is no next contiguous chunk
1199 that would have to index off it. After initialization, `top'
1200 is forced to always exist. If it would become less than
1201 MINSIZE bytes long, it is replenished.
1203 2. Chunks allocated via mmap, which have the second-lowest-order
1204 bit M (IS_MMAPPED) set in their size fields. Because they are
1205 allocated one-by-one, each must contain its own trailing size field.
1210 ---------- Size and alignment checks and conversions ----------
1213 /* conversion from malloc headers to user pointers, and back */
1215 #define chunk2mem(p) ((void*)((char*)(p) + 2*SIZE_SZ))
1216 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
1218 /* The smallest possible chunk */
1219 #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
1221 /* The smallest size we can malloc is an aligned minimal chunk */
1223 #define MINSIZE \
1224 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1226 /* Check if m has acceptable alignment */
1228 #define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1230 #define misaligned_chunk(p) \
1231 ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
1232 & MALLOC_ALIGN_MASK)
1236 Check if a request is so large that it would wrap around zero when
1237 padded and aligned. To simplify some other code, the bound is made
1238 low enough so that adding MINSIZE will also not wrap around zero.
1241 #define REQUEST_OUT_OF_RANGE(req) \
1242 ((unsigned long) (req) >= \
1243 (unsigned long) (INTERNAL_SIZE_T) (-2 * MINSIZE))
1245 /* pad request bytes into a usable size -- internal version */
1247 #define request2size(req) \
1248 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1249 MINSIZE : \
1250 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1252 /* Same, except also perform argument check */
1254 #define checked_request2size(req, sz) \
1255 if (REQUEST_OUT_OF_RANGE (req)) { \
1256 __set_errno (ENOMEM); \
1257 return 0; \
1259 (sz) = request2size (req);
1262 --------------- Physical chunk operations ---------------
1266 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1267 #define PREV_INUSE 0x1
1269 /* extract inuse bit of previous chunk */
1270 #define prev_inuse(p) ((p)->size & PREV_INUSE)
1273 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1274 #define IS_MMAPPED 0x2
1276 /* check for mmap()'ed chunk */
1277 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1280 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1281 from a non-main arena. This is only set immediately before handing
1282 the chunk to the user, if necessary. */
1283 #define NON_MAIN_ARENA 0x4
1285 /* check for chunk from non-main arena */
1286 #define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
1290 Bits to mask off when extracting size
1292 Note: IS_MMAPPED is intentionally not masked off from size field in
1293 macros for which mmapped chunks should never be seen. This should
1294 cause helpful core dumps to occur if it is tried by accident by
1295 people extending or adapting this malloc.
1297 #define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
1299 /* Get size, ignoring use bits */
1300 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
1303 /* Ptr to next physical malloc_chunk. */
1304 #define next_chunk(p) ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))
1306 /* Ptr to previous physical malloc_chunk */
1307 #define prev_chunk(p) ((mchunkptr) (((char *) (p)) - ((p)->prev_size)))
1309 /* Treat space at ptr + offset as a chunk */
1310 #define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))
1312 /* extract p's inuse bit */
1313 #define inuse(p) \
1314 ((((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
1316 /* set/clear chunk as being inuse without otherwise disturbing */
1317 #define set_inuse(p) \
1318 ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
1320 #define clear_inuse(p) \
1321 ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
1324 /* check/set/clear inuse bits in known places */
1325 #define inuse_bit_at_offset(p, s) \
1326 (((mchunkptr) (((char *) (p)) + (s)))->size & PREV_INUSE)
1328 #define set_inuse_bit_at_offset(p, s) \
1329 (((mchunkptr) (((char *) (p)) + (s)))->size |= PREV_INUSE)
1331 #define clear_inuse_bit_at_offset(p, s) \
1332 (((mchunkptr) (((char *) (p)) + (s)))->size &= ~(PREV_INUSE))
1335 /* Set size at head, without disturbing its use bit */
1336 #define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_BITS) | (s)))
1338 /* Set size/use field */
1339 #define set_head(p, s) ((p)->size = (s))
1341 /* Set size at footer (only when chunk is not in use) */
1342 #define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->prev_size = (s))
1346 -------------------- Internal data structures --------------------
1348 All internal state is held in an instance of malloc_state defined
1349 below. There are no other static variables, except in two optional
1350 cases:
1351 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1352 * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
1353 for mmap.
1355 Beware of lots of tricks that minimize the total bookkeeping space
1356 requirements. The result is a little over 1K bytes (for 4byte
1357 pointers and size_t.)
1361 Bins
1363 An array of bin headers for free chunks. Each bin is doubly
1364 linked. The bins are approximately proportionally (log) spaced.
1365 There are a lot of these bins (128). This may look excessive, but
1366 works very well in practice. Most bins hold sizes that are
1367 unusual as malloc request sizes, but are more usual for fragments
1368 and consolidated sets of chunks, which is what these bins hold, so
1369 they can be found quickly. All procedures maintain the invariant
1370 that no consolidated chunk physically borders another one, so each
1371 chunk in a list is known to be preceeded and followed by either
1372 inuse chunks or the ends of memory.
1374 Chunks in bins are kept in size order, with ties going to the
1375 approximately least recently used chunk. Ordering isn't needed
1376 for the small bins, which all contain the same-sized chunks, but
1377 facilitates best-fit allocation for larger chunks. These lists
1378 are just sequential. Keeping them in order almost never requires
1379 enough traversal to warrant using fancier ordered data
1380 structures.
1382 Chunks of the same size are linked with the most
1383 recently freed at the front, and allocations are taken from the
1384 back. This results in LRU (FIFO) allocation order, which tends
1385 to give each chunk an equal opportunity to be consolidated with
1386 adjacent freed chunks, resulting in larger free chunks and less
1387 fragmentation.
1389 To simplify use in double-linked lists, each bin header acts
1390 as a malloc_chunk. This avoids special-casing for headers.
1391 But to conserve space and improve locality, we allocate
1392 only the fd/bk pointers of bins, and then use repositioning tricks
1393 to treat these as the fields of a malloc_chunk*.
1396 typedef struct malloc_chunk *mbinptr;
1398 /* addressing -- note that bin_at(0) does not exist */
1399 #define bin_at(m, i) \
1400 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
1401 - offsetof (struct malloc_chunk, fd))
1403 /* analog of ++bin */
1404 #define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
1406 /* Reminders about list directionality within bins */
1407 #define first(b) ((b)->fd)
1408 #define last(b) ((b)->bk)
1410 /* Take a chunk off a bin list */
1411 #define unlink(AV, P, BK, FD) { \
1412 FD = P->fd; \
1413 BK = P->bk; \
1414 if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \
1415 malloc_printerr (check_action, "corrupted double-linked list", P, AV); \
1416 else { \
1417 FD->bk = BK; \
1418 BK->fd = FD; \
1419 if (!in_smallbin_range (P->size) \
1420 && __builtin_expect (P->fd_nextsize != NULL, 0)) { \
1421 if (__builtin_expect (P->fd_nextsize->bk_nextsize != P, 0) \
1422 || __builtin_expect (P->bk_nextsize->fd_nextsize != P, 0)) \
1423 malloc_printerr (check_action, \
1424 "corrupted double-linked list (not small)", \
1425 P, AV); \
1426 if (FD->fd_nextsize == NULL) { \
1427 if (P->fd_nextsize == P) \
1428 FD->fd_nextsize = FD->bk_nextsize = FD; \
1429 else { \
1430 FD->fd_nextsize = P->fd_nextsize; \
1431 FD->bk_nextsize = P->bk_nextsize; \
1432 P->fd_nextsize->bk_nextsize = FD; \
1433 P->bk_nextsize->fd_nextsize = FD; \
1435 } else { \
1436 P->fd_nextsize->bk_nextsize = P->bk_nextsize; \
1437 P->bk_nextsize->fd_nextsize = P->fd_nextsize; \
1444 Indexing
1446 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1447 8 bytes apart. Larger bins are approximately logarithmically spaced:
1449 64 bins of size 8
1450 32 bins of size 64
1451 16 bins of size 512
1452 8 bins of size 4096
1453 4 bins of size 32768
1454 2 bins of size 262144
1455 1 bin of size what's left
1457 There is actually a little bit of slop in the numbers in bin_index
1458 for the sake of speed. This makes no difference elsewhere.
1460 The bins top out around 1MB because we expect to service large
1461 requests via mmap.
1463 Bin 0 does not exist. Bin 1 is the unordered list; if that would be
1464 a valid chunk size the small bins are bumped up one.
1467 #define NBINS 128
1468 #define NSMALLBINS 64
1469 #define SMALLBIN_WIDTH MALLOC_ALIGNMENT
1470 #define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > 2 * SIZE_SZ)
1471 #define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
1473 #define in_smallbin_range(sz) \
1474 ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
1476 #define smallbin_index(sz) \
1477 ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
1478 + SMALLBIN_CORRECTION)
1480 #define largebin_index_32(sz) \
1481 (((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\
1482 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1483 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1484 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1485 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1486 126)
1488 #define largebin_index_32_big(sz) \
1489 (((((unsigned long) (sz)) >> 6) <= 45) ? 49 + (((unsigned long) (sz)) >> 6) :\
1490 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1491 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1492 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1493 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1494 126)
1496 // XXX It remains to be seen whether it is good to keep the widths of
1497 // XXX the buckets the same or whether it should be scaled by a factor
1498 // XXX of two as well.
1499 #define largebin_index_64(sz) \
1500 (((((unsigned long) (sz)) >> 6) <= 48) ? 48 + (((unsigned long) (sz)) >> 6) :\
1501 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1502 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1503 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1504 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1505 126)
1507 #define largebin_index(sz) \
1508 (SIZE_SZ == 8 ? largebin_index_64 (sz) \
1509 : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \
1510 : largebin_index_32 (sz))
1512 #define bin_index(sz) \
1513 ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))
1517 Unsorted chunks
1519 All remainders from chunk splits, as well as all returned chunks,
1520 are first placed in the "unsorted" bin. They are then placed
1521 in regular bins after malloc gives them ONE chance to be used before
1522 binning. So, basically, the unsorted_chunks list acts as a queue,
1523 with chunks being placed on it in free (and malloc_consolidate),
1524 and taken off (to be either used or placed in bins) in malloc.
1526 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
1527 does not have to be taken into account in size comparisons.
1530 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
1531 #define unsorted_chunks(M) (bin_at (M, 1))
1536 The top-most available chunk (i.e., the one bordering the end of
1537 available memory) is treated specially. It is never included in
1538 any bin, is used only if no other chunk is available, and is
1539 released back to the system if it is very large (see
1540 M_TRIM_THRESHOLD). Because top initially
1541 points to its own bin with initial zero size, thus forcing
1542 extension on the first malloc request, we avoid having any special
1543 code in malloc to check whether it even exists yet. But we still
1544 need to do so when getting memory from system, so we make
1545 initial_top treat the bin as a legal but unusable chunk during the
1546 interval between initialization and the first call to
1547 sysmalloc. (This is somewhat delicate, since it relies on
1548 the 2 preceding words to be zero during this interval as well.)
1551 /* Conveniently, the unsorted bin can be used as dummy top on first call */
1552 #define initial_top(M) (unsorted_chunks (M))
1555 Binmap
1557 To help compensate for the large number of bins, a one-level index
1558 structure is used for bin-by-bin searching. `binmap' is a
1559 bitvector recording whether bins are definitely empty so they can
1560 be skipped over during during traversals. The bits are NOT always
1561 cleared as soon as bins are empty, but instead only
1562 when they are noticed to be empty during traversal in malloc.
1565 /* Conservatively use 32 bits per map word, even if on 64bit system */
1566 #define BINMAPSHIFT 5
1567 #define BITSPERMAP (1U << BINMAPSHIFT)
1568 #define BINMAPSIZE (NBINS / BITSPERMAP)
1570 #define idx2block(i) ((i) >> BINMAPSHIFT)
1571 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
1573 #define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i))
1574 #define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
1575 #define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i))
1578 Fastbins
1580 An array of lists holding recently freed small chunks. Fastbins
1581 are not doubly linked. It is faster to single-link them, and
1582 since chunks are never removed from the middles of these lists,
1583 double linking is not necessary. Also, unlike regular bins, they
1584 are not even processed in FIFO order (they use faster LIFO) since
1585 ordering doesn't much matter in the transient contexts in which
1586 fastbins are normally used.
1588 Chunks in fastbins keep their inuse bit set, so they cannot
1589 be consolidated with other free chunks. malloc_consolidate
1590 releases all chunks in fastbins and consolidates them with
1591 other free chunks.
1594 typedef struct malloc_chunk *mfastbinptr;
1595 #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
1597 /* offset 2 to use otherwise unindexable first 2 bins */
1598 #define fastbin_index(sz) \
1599 ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
1602 /* The maximum fastbin request size we support */
1603 #define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
1605 #define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
1608 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
1609 that triggers automatic consolidation of possibly-surrounding
1610 fastbin chunks. This is a heuristic, so the exact value should not
1611 matter too much. It is defined at half the default trim threshold as a
1612 compromise heuristic to only attempt consolidation if it is likely
1613 to lead to trimming. However, it is not dynamically tunable, since
1614 consolidation reduces fragmentation surrounding large chunks even
1615 if trimming is not used.
1618 #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
1621 Since the lowest 2 bits in max_fast don't matter in size comparisons,
1622 they are used as flags.
1626 FASTCHUNKS_BIT held in max_fast indicates that there are probably
1627 some fastbin chunks. It is set true on entering a chunk into any
1628 fastbin, and cleared only in malloc_consolidate.
1630 The truth value is inverted so that have_fastchunks will be true
1631 upon startup (since statics are zero-filled), simplifying
1632 initialization checks.
1635 #define FASTCHUNKS_BIT (1U)
1637 #define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0)
1638 #define clear_fastchunks(M) catomic_or (&(M)->flags, FASTCHUNKS_BIT)
1639 #define set_fastchunks(M) catomic_and (&(M)->flags, ~FASTCHUNKS_BIT)
1642 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
1643 regions. Otherwise, contiguity is exploited in merging together,
1644 when possible, results from consecutive MORECORE calls.
1646 The initial value comes from MORECORE_CONTIGUOUS, but is
1647 changed dynamically if mmap is ever used as an sbrk substitute.
1650 #define NONCONTIGUOUS_BIT (2U)
1652 #define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
1653 #define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
1654 #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
1655 #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
1657 /* ARENA_CORRUPTION_BIT is set if a memory corruption was detected on the
1658 arena. Such an arena is no longer used to allocate chunks. Chunks
1659 allocated in that arena before detecting corruption are not freed. */
1661 #define ARENA_CORRUPTION_BIT (4U)
1663 #define arena_is_corrupt(A) (((A)->flags & ARENA_CORRUPTION_BIT))
1664 #define set_arena_corrupt(A) ((A)->flags |= ARENA_CORRUPTION_BIT)
1667 Set value of max_fast.
1668 Use impossibly small value if 0.
1669 Precondition: there are no existing fastbin chunks.
1670 Setting the value clears fastchunk bit but preserves noncontiguous bit.
1673 #define set_max_fast(s) \
1674 global_max_fast = (((s) == 0) \
1675 ? SMALLBIN_WIDTH : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
1676 #define get_max_fast() global_max_fast
1680 ----------- Internal state representation and initialization -----------
1683 struct malloc_state
1685 /* Serialize access. */
1686 mutex_t mutex;
1688 /* Flags (formerly in max_fast). */
1689 int flags;
1691 /* Fastbins */
1692 mfastbinptr fastbinsY[NFASTBINS];
1694 /* Base of the topmost chunk -- not otherwise kept in a bin */
1695 mchunkptr top;
1697 /* The remainder from the most recent split of a small request */
1698 mchunkptr last_remainder;
1700 /* Normal bins packed as described above */
1701 mchunkptr bins[NBINS * 2 - 2];
1703 /* Bitmap of bins */
1704 unsigned int binmap[BINMAPSIZE];
1706 /* Linked list */
1707 struct malloc_state *next;
1709 /* Linked list for free arenas. Access to this field is serialized
1710 by free_list_lock in arena.c. */
1711 struct malloc_state *next_free;
1713 /* Number of threads attached to this arena. 0 if the arena is on
1714 the free list. Access to this field is serialized by
1715 free_list_lock in arena.c. */
1716 INTERNAL_SIZE_T attached_threads;
1718 /* Memory allocated from the system in this arena. */
1719 INTERNAL_SIZE_T system_mem;
1720 INTERNAL_SIZE_T max_system_mem;
1723 struct malloc_par
1725 /* Tunable parameters */
1726 unsigned long trim_threshold;
1727 INTERNAL_SIZE_T top_pad;
1728 INTERNAL_SIZE_T mmap_threshold;
1729 INTERNAL_SIZE_T arena_test;
1730 INTERNAL_SIZE_T arena_max;
1732 /* Memory map support */
1733 int n_mmaps;
1734 int n_mmaps_max;
1735 int max_n_mmaps;
1736 /* the mmap_threshold is dynamic, until the user sets
1737 it manually, at which point we need to disable any
1738 dynamic behavior. */
1739 int no_dyn_threshold;
1741 /* Statistics */
1742 INTERNAL_SIZE_T mmapped_mem;
1743 INTERNAL_SIZE_T max_mmapped_mem;
1745 /* First address handed out by MORECORE/sbrk. */
1746 char *sbrk_base;
1749 /* There are several instances of this struct ("arenas") in this
1750 malloc. If you are adapting this malloc in a way that does NOT use
1751 a static or mmapped malloc_state, you MUST explicitly zero-fill it
1752 before using. This malloc relies on the property that malloc_state
1753 is initialized to all zeroes (as is true of C statics). */
1755 static struct malloc_state main_arena =
1757 .mutex = _LIBC_LOCK_INITIALIZER,
1758 .next = &main_arena,
1759 .attached_threads = 1
1762 /* There is only one instance of the malloc parameters. */
1764 static struct malloc_par mp_ =
1766 .top_pad = DEFAULT_TOP_PAD,
1767 .n_mmaps_max = DEFAULT_MMAP_MAX,
1768 .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
1769 .trim_threshold = DEFAULT_TRIM_THRESHOLD,
1770 #define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
1771 .arena_test = NARENAS_FROM_NCORES (1)
1775 /* Non public mallopt parameters. */
1776 #define M_ARENA_TEST -7
1777 #define M_ARENA_MAX -8
1780 /* Maximum size of memory handled in fastbins. */
1781 static INTERNAL_SIZE_T global_max_fast;
1784 Initialize a malloc_state struct.
1786 This is called only from within malloc_consolidate, which needs
1787 be called in the same contexts anyway. It is never called directly
1788 outside of malloc_consolidate because some optimizing compilers try
1789 to inline it at all call points, which turns out not to be an
1790 optimization at all. (Inlining it in malloc_consolidate is fine though.)
1793 static void
1794 malloc_init_state (mstate av)
1796 int i;
1797 mbinptr bin;
1799 /* Establish circular links for normal bins */
1800 for (i = 1; i < NBINS; ++i)
1802 bin = bin_at (av, i);
1803 bin->fd = bin->bk = bin;
1806 #if MORECORE_CONTIGUOUS
1807 if (av != &main_arena)
1808 #endif
1809 set_noncontiguous (av);
1810 if (av == &main_arena)
1811 set_max_fast (DEFAULT_MXFAST);
1812 av->flags |= FASTCHUNKS_BIT;
1814 av->top = initial_top (av);
1818 Other internal utilities operating on mstates
1821 static void *sysmalloc (INTERNAL_SIZE_T, mstate);
1822 static int systrim (size_t, mstate);
1823 static void malloc_consolidate (mstate);
1826 /* -------------- Early definitions for debugging hooks ---------------- */
1828 /* Define and initialize the hook variables. These weak definitions must
1829 appear before any use of the variables in a function (arena.c uses one). */
1830 #ifndef weak_variable
1831 /* In GNU libc we want the hook variables to be weak definitions to
1832 avoid a problem with Emacs. */
1833 # define weak_variable weak_function
1834 #endif
1836 /* Forward declarations. */
1837 static void *malloc_hook_ini (size_t sz,
1838 const void *caller) __THROW;
1839 static void *realloc_hook_ini (void *ptr, size_t sz,
1840 const void *caller) __THROW;
1841 static void *memalign_hook_ini (size_t alignment, size_t sz,
1842 const void *caller) __THROW;
1844 void weak_variable (*__malloc_initialize_hook) (void) = NULL;
1845 void weak_variable (*__free_hook) (void *__ptr,
1846 const void *) = NULL;
1847 void *weak_variable (*__malloc_hook)
1848 (size_t __size, const void *) = malloc_hook_ini;
1849 void *weak_variable (*__realloc_hook)
1850 (void *__ptr, size_t __size, const void *)
1851 = realloc_hook_ini;
1852 void *weak_variable (*__memalign_hook)
1853 (size_t __alignment, size_t __size, const void *)
1854 = memalign_hook_ini;
1855 void weak_variable (*__after_morecore_hook) (void) = NULL;
1858 /* ---------------- Error behavior ------------------------------------ */
1860 #ifndef DEFAULT_CHECK_ACTION
1861 # define DEFAULT_CHECK_ACTION 3
1862 #endif
1864 static int check_action = DEFAULT_CHECK_ACTION;
1867 /* ------------------ Testing support ----------------------------------*/
1869 static int perturb_byte;
1871 static void
1872 alloc_perturb (char *p, size_t n)
1874 if (__glibc_unlikely (perturb_byte))
1875 memset (p, perturb_byte ^ 0xff, n);
1878 static void
1879 free_perturb (char *p, size_t n)
1881 if (__glibc_unlikely (perturb_byte))
1882 memset (p, perturb_byte, n);
1887 #include <stap-probe.h>
1889 /* ------------------- Support for multiple arenas -------------------- */
1890 #include "arena.c"
1893 Debugging support
1895 These routines make a number of assertions about the states
1896 of data structures that should be true at all times. If any
1897 are not true, it's very likely that a user program has somehow
1898 trashed memory. (It's also possible that there is a coding error
1899 in malloc. In which case, please report it!)
1902 #if !MALLOC_DEBUG
1904 # define check_chunk(A, P)
1905 # define check_free_chunk(A, P)
1906 # define check_inuse_chunk(A, P)
1907 # define check_remalloced_chunk(A, P, N)
1908 # define check_malloced_chunk(A, P, N)
1909 # define check_malloc_state(A)
1911 #else
1913 # define check_chunk(A, P) do_check_chunk (A, P)
1914 # define check_free_chunk(A, P) do_check_free_chunk (A, P)
1915 # define check_inuse_chunk(A, P) do_check_inuse_chunk (A, P)
1916 # define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
1917 # define check_malloced_chunk(A, P, N) do_check_malloced_chunk (A, P, N)
1918 # define check_malloc_state(A) do_check_malloc_state (A)
1921 Properties of all chunks
1924 static void
1925 do_check_chunk (mstate av, mchunkptr p)
1927 unsigned long sz = chunksize (p);
1928 /* min and max possible addresses assuming contiguous allocation */
1929 char *max_address = (char *) (av->top) + chunksize (av->top);
1930 char *min_address = max_address - av->system_mem;
1932 if (!chunk_is_mmapped (p))
1934 /* Has legal address ... */
1935 if (p != av->top)
1937 if (contiguous (av))
1939 assert (((char *) p) >= min_address);
1940 assert (((char *) p + sz) <= ((char *) (av->top)));
1943 else
1945 /* top size is always at least MINSIZE */
1946 assert ((unsigned long) (sz) >= MINSIZE);
1947 /* top predecessor always marked inuse */
1948 assert (prev_inuse (p));
1951 else
1953 /* address is outside main heap */
1954 if (contiguous (av) && av->top != initial_top (av))
1956 assert (((char *) p) < min_address || ((char *) p) >= max_address);
1958 /* chunk is page-aligned */
1959 assert (((p->prev_size + sz) & (GLRO (dl_pagesize) - 1)) == 0);
1960 /* mem is aligned */
1961 assert (aligned_OK (chunk2mem (p)));
1966 Properties of free chunks
1969 static void
1970 do_check_free_chunk (mstate av, mchunkptr p)
1972 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE | NON_MAIN_ARENA);
1973 mchunkptr next = chunk_at_offset (p, sz);
1975 do_check_chunk (av, p);
1977 /* Chunk must claim to be free ... */
1978 assert (!inuse (p));
1979 assert (!chunk_is_mmapped (p));
1981 /* Unless a special marker, must have OK fields */
1982 if ((unsigned long) (sz) >= MINSIZE)
1984 assert ((sz & MALLOC_ALIGN_MASK) == 0);
1985 assert (aligned_OK (chunk2mem (p)));
1986 /* ... matching footer field */
1987 assert (next->prev_size == sz);
1988 /* ... and is fully consolidated */
1989 assert (prev_inuse (p));
1990 assert (next == av->top || inuse (next));
1992 /* ... and has minimally sane links */
1993 assert (p->fd->bk == p);
1994 assert (p->bk->fd == p);
1996 else /* markers are always of size SIZE_SZ */
1997 assert (sz == SIZE_SZ);
2001 Properties of inuse chunks
2004 static void
2005 do_check_inuse_chunk (mstate av, mchunkptr p)
2007 mchunkptr next;
2009 do_check_chunk (av, p);
2011 if (chunk_is_mmapped (p))
2012 return; /* mmapped chunks have no next/prev */
2014 /* Check whether it claims to be in use ... */
2015 assert (inuse (p));
2017 next = next_chunk (p);
2019 /* ... and is surrounded by OK chunks.
2020 Since more things can be checked with free chunks than inuse ones,
2021 if an inuse chunk borders them and debug is on, it's worth doing them.
2023 if (!prev_inuse (p))
2025 /* Note that we cannot even look at prev unless it is not inuse */
2026 mchunkptr prv = prev_chunk (p);
2027 assert (next_chunk (prv) == p);
2028 do_check_free_chunk (av, prv);
2031 if (next == av->top)
2033 assert (prev_inuse (next));
2034 assert (chunksize (next) >= MINSIZE);
2036 else if (!inuse (next))
2037 do_check_free_chunk (av, next);
2041 Properties of chunks recycled from fastbins
2044 static void
2045 do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2047 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE | NON_MAIN_ARENA);
2049 if (!chunk_is_mmapped (p))
2051 assert (av == arena_for_chunk (p));
2052 if (chunk_non_main_arena (p))
2053 assert (av != &main_arena);
2054 else
2055 assert (av == &main_arena);
2058 do_check_inuse_chunk (av, p);
2060 /* Legal size ... */
2061 assert ((sz & MALLOC_ALIGN_MASK) == 0);
2062 assert ((unsigned long) (sz) >= MINSIZE);
2063 /* ... and alignment */
2064 assert (aligned_OK (chunk2mem (p)));
2065 /* chunk is less than MINSIZE more than request */
2066 assert ((long) (sz) - (long) (s) >= 0);
2067 assert ((long) (sz) - (long) (s + MINSIZE) < 0);
2071 Properties of nonrecycled chunks at the point they are malloced
2074 static void
2075 do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2077 /* same as recycled case ... */
2078 do_check_remalloced_chunk (av, p, s);
2081 ... plus, must obey implementation invariant that prev_inuse is
2082 always true of any allocated chunk; i.e., that each allocated
2083 chunk borders either a previously allocated and still in-use
2084 chunk, or the base of its memory arena. This is ensured
2085 by making all allocations from the `lowest' part of any found
2086 chunk. This does not necessarily hold however for chunks
2087 recycled via fastbins.
2090 assert (prev_inuse (p));
2095 Properties of malloc_state.
2097 This may be useful for debugging malloc, as well as detecting user
2098 programmer errors that somehow write into malloc_state.
2100 If you are extending or experimenting with this malloc, you can
2101 probably figure out how to hack this routine to print out or
2102 display chunk addresses, sizes, bins, and other instrumentation.
2105 static void
2106 do_check_malloc_state (mstate av)
2108 int i;
2109 mchunkptr p;
2110 mchunkptr q;
2111 mbinptr b;
2112 unsigned int idx;
2113 INTERNAL_SIZE_T size;
2114 unsigned long total = 0;
2115 int max_fast_bin;
2117 /* internal size_t must be no wider than pointer type */
2118 assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *));
2120 /* alignment is a power of 2 */
2121 assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
2123 /* cannot run remaining checks until fully initialized */
2124 if (av->top == 0 || av->top == initial_top (av))
2125 return;
2127 /* pagesize is a power of 2 */
2128 assert (powerof2(GLRO (dl_pagesize)));
2130 /* A contiguous main_arena is consistent with sbrk_base. */
2131 if (av == &main_arena && contiguous (av))
2132 assert ((char *) mp_.sbrk_base + av->system_mem ==
2133 (char *) av->top + chunksize (av->top));
2135 /* properties of fastbins */
2137 /* max_fast is in allowed range */
2138 assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE));
2140 max_fast_bin = fastbin_index (get_max_fast ());
2142 for (i = 0; i < NFASTBINS; ++i)
2144 p = fastbin (av, i);
2146 /* The following test can only be performed for the main arena.
2147 While mallopt calls malloc_consolidate to get rid of all fast
2148 bins (especially those larger than the new maximum) this does
2149 only happen for the main arena. Trying to do this for any
2150 other arena would mean those arenas have to be locked and
2151 malloc_consolidate be called for them. This is excessive. And
2152 even if this is acceptable to somebody it still cannot solve
2153 the problem completely since if the arena is locked a
2154 concurrent malloc call might create a new arena which then
2155 could use the newly invalid fast bins. */
2157 /* all bins past max_fast are empty */
2158 if (av == &main_arena && i > max_fast_bin)
2159 assert (p == 0);
2161 while (p != 0)
2163 /* each chunk claims to be inuse */
2164 do_check_inuse_chunk (av, p);
2165 total += chunksize (p);
2166 /* chunk belongs in this bin */
2167 assert (fastbin_index (chunksize (p)) == i);
2168 p = p->fd;
2172 if (total != 0)
2173 assert (have_fastchunks (av));
2174 else if (!have_fastchunks (av))
2175 assert (total == 0);
2177 /* check normal bins */
2178 for (i = 1; i < NBINS; ++i)
2180 b = bin_at (av, i);
2182 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2183 if (i >= 2)
2185 unsigned int binbit = get_binmap (av, i);
2186 int empty = last (b) == b;
2187 if (!binbit)
2188 assert (empty);
2189 else if (!empty)
2190 assert (binbit);
2193 for (p = last (b); p != b; p = p->bk)
2195 /* each chunk claims to be free */
2196 do_check_free_chunk (av, p);
2197 size = chunksize (p);
2198 total += size;
2199 if (i >= 2)
2201 /* chunk belongs in bin */
2202 idx = bin_index (size);
2203 assert (idx == i);
2204 /* lists are sorted */
2205 assert (p->bk == b ||
2206 (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p));
2208 if (!in_smallbin_range (size))
2210 if (p->fd_nextsize != NULL)
2212 if (p->fd_nextsize == p)
2213 assert (p->bk_nextsize == p);
2214 else
2216 if (p->fd_nextsize == first (b))
2217 assert (chunksize (p) < chunksize (p->fd_nextsize));
2218 else
2219 assert (chunksize (p) > chunksize (p->fd_nextsize));
2221 if (p == first (b))
2222 assert (chunksize (p) > chunksize (p->bk_nextsize));
2223 else
2224 assert (chunksize (p) < chunksize (p->bk_nextsize));
2227 else
2228 assert (p->bk_nextsize == NULL);
2231 else if (!in_smallbin_range (size))
2232 assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
2233 /* chunk is followed by a legal chain of inuse chunks */
2234 for (q = next_chunk (p);
2235 (q != av->top && inuse (q) &&
2236 (unsigned long) (chunksize (q)) >= MINSIZE);
2237 q = next_chunk (q))
2238 do_check_inuse_chunk (av, q);
2242 /* top chunk is OK */
2243 check_chunk (av, av->top);
2245 #endif
2248 /* ----------------- Support for debugging hooks -------------------- */
2249 #include "hooks.c"
2252 /* ----------- Routines dealing with system allocation -------------- */
2255 sysmalloc handles malloc cases requiring more memory from the system.
2256 On entry, it is assumed that av->top does not have enough
2257 space to service request for nb bytes, thus requiring that av->top
2258 be extended or replaced.
2261 static void *
2262 sysmalloc (INTERNAL_SIZE_T nb, mstate av)
2264 mchunkptr old_top; /* incoming value of av->top */
2265 INTERNAL_SIZE_T old_size; /* its size */
2266 char *old_end; /* its end address */
2268 long size; /* arg to first MORECORE or mmap call */
2269 char *brk; /* return value from MORECORE */
2271 long correction; /* arg to 2nd MORECORE call */
2272 char *snd_brk; /* 2nd return val */
2274 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2275 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
2276 char *aligned_brk; /* aligned offset into brk */
2278 mchunkptr p; /* the allocated/returned chunk */
2279 mchunkptr remainder; /* remainder from allocation */
2280 unsigned long remainder_size; /* its size */
2283 size_t pagesize = GLRO (dl_pagesize);
2284 bool tried_mmap = false;
2288 If have mmap, and the request size meets the mmap threshold, and
2289 the system supports mmap, and there are few enough currently
2290 allocated mmapped regions, try to directly map this request
2291 rather than expanding top.
2294 if (av == NULL
2295 || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
2296 && (mp_.n_mmaps < mp_.n_mmaps_max)))
2298 char *mm; /* return value from mmap call*/
2300 try_mmap:
2302 Round up size to nearest page. For mmapped chunks, the overhead
2303 is one SIZE_SZ unit larger than for normal chunks, because there
2304 is no following chunk whose prev_size field could be used.
2306 See the front_misalign handling below, for glibc there is no
2307 need for further alignments unless we have have high alignment.
2309 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2310 size = ALIGN_UP (nb + SIZE_SZ, pagesize);
2311 else
2312 size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
2313 tried_mmap = true;
2315 /* Don't try if size wraps around 0 */
2316 if ((unsigned long) (size) > (unsigned long) (nb))
2318 mm = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
2320 if (mm != MAP_FAILED)
2323 The offset to the start of the mmapped region is stored
2324 in the prev_size field of the chunk. This allows us to adjust
2325 returned start address to meet alignment requirements here
2326 and in memalign(), and still be able to compute proper
2327 address argument for later munmap in free() and realloc().
2330 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2332 /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
2333 MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
2334 aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
2335 assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
2336 front_misalign = 0;
2338 else
2339 front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
2340 if (front_misalign > 0)
2342 correction = MALLOC_ALIGNMENT - front_misalign;
2343 p = (mchunkptr) (mm + correction);
2344 p->prev_size = correction;
2345 set_head (p, (size - correction) | IS_MMAPPED);
2347 else
2349 p = (mchunkptr) mm;
2350 set_head (p, size | IS_MMAPPED);
2353 /* update statistics */
2355 int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
2356 atomic_max (&mp_.max_n_mmaps, new);
2358 unsigned long sum;
2359 sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
2360 atomic_max (&mp_.max_mmapped_mem, sum);
2362 check_chunk (av, p);
2364 return chunk2mem (p);
2369 /* There are no usable arenas and mmap also failed. */
2370 if (av == NULL)
2371 return 0;
2373 /* Record incoming configuration of top */
2375 old_top = av->top;
2376 old_size = chunksize (old_top);
2377 old_end = (char *) (chunk_at_offset (old_top, old_size));
2379 brk = snd_brk = (char *) (MORECORE_FAILURE);
2382 If not the first time through, we require old_size to be
2383 at least MINSIZE and to have prev_inuse set.
2386 assert ((old_top == initial_top (av) && old_size == 0) ||
2387 ((unsigned long) (old_size) >= MINSIZE &&
2388 prev_inuse (old_top) &&
2389 ((unsigned long) old_end & (pagesize - 1)) == 0));
2391 /* Precondition: not enough current space to satisfy nb request */
2392 assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
2395 if (av != &main_arena)
2397 heap_info *old_heap, *heap;
2398 size_t old_heap_size;
2400 /* First try to extend the current heap. */
2401 old_heap = heap_for_ptr (old_top);
2402 old_heap_size = old_heap->size;
2403 if ((long) (MINSIZE + nb - old_size) > 0
2404 && grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
2406 av->system_mem += old_heap->size - old_heap_size;
2407 set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
2408 | PREV_INUSE);
2410 else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
2412 /* Use a newly allocated heap. */
2413 heap->ar_ptr = av;
2414 heap->prev = old_heap;
2415 av->system_mem += heap->size;
2416 /* Set up the new top. */
2417 top (av) = chunk_at_offset (heap, sizeof (*heap));
2418 set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
2420 /* Setup fencepost and free the old top chunk with a multiple of
2421 MALLOC_ALIGNMENT in size. */
2422 /* The fencepost takes at least MINSIZE bytes, because it might
2423 become the top chunk again later. Note that a footer is set
2424 up, too, although the chunk is marked in use. */
2425 old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
2426 set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), 0 | PREV_INUSE);
2427 if (old_size >= MINSIZE)
2429 set_head (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ) | PREV_INUSE);
2430 set_foot (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ));
2431 set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
2432 _int_free (av, old_top, 1);
2434 else
2436 set_head (old_top, (old_size + 2 * SIZE_SZ) | PREV_INUSE);
2437 set_foot (old_top, (old_size + 2 * SIZE_SZ));
2440 else if (!tried_mmap)
2441 /* We can at least try to use to mmap memory. */
2442 goto try_mmap;
2444 else /* av == main_arena */
2447 { /* Request enough space for nb + pad + overhead */
2448 size = nb + mp_.top_pad + MINSIZE;
2451 If contiguous, we can subtract out existing space that we hope to
2452 combine with new space. We add it back later only if
2453 we don't actually get contiguous space.
2456 if (contiguous (av))
2457 size -= old_size;
2460 Round to a multiple of page size.
2461 If MORECORE is not contiguous, this ensures that we only call it
2462 with whole-page arguments. And if MORECORE is contiguous and
2463 this is not first time through, this preserves page-alignment of
2464 previous calls. Otherwise, we correct to page-align below.
2467 size = ALIGN_UP (size, pagesize);
2470 Don't try to call MORECORE if argument is so big as to appear
2471 negative. Note that since mmap takes size_t arg, it may succeed
2472 below even if we cannot call MORECORE.
2475 if (size > 0)
2477 brk = (char *) (MORECORE (size));
2478 LIBC_PROBE (memory_sbrk_more, 2, brk, size);
2481 if (brk != (char *) (MORECORE_FAILURE))
2483 /* Call the `morecore' hook if necessary. */
2484 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2485 if (__builtin_expect (hook != NULL, 0))
2486 (*hook)();
2488 else
2491 If have mmap, try using it as a backup when MORECORE fails or
2492 cannot be used. This is worth doing on systems that have "holes" in
2493 address space, so sbrk cannot extend to give contiguous space, but
2494 space is available elsewhere. Note that we ignore mmap max count
2495 and threshold limits, since the space will not be used as a
2496 segregated mmap region.
2499 /* Cannot merge with old top, so add its size back in */
2500 if (contiguous (av))
2501 size = ALIGN_UP (size + old_size, pagesize);
2503 /* If we are relying on mmap as backup, then use larger units */
2504 if ((unsigned long) (size) < (unsigned long) (MMAP_AS_MORECORE_SIZE))
2505 size = MMAP_AS_MORECORE_SIZE;
2507 /* Don't try if size wraps around 0 */
2508 if ((unsigned long) (size) > (unsigned long) (nb))
2510 char *mbrk = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
2512 if (mbrk != MAP_FAILED)
2514 /* We do not need, and cannot use, another sbrk call to find end */
2515 brk = mbrk;
2516 snd_brk = brk + size;
2519 Record that we no longer have a contiguous sbrk region.
2520 After the first time mmap is used as backup, we do not
2521 ever rely on contiguous space since this could incorrectly
2522 bridge regions.
2524 set_noncontiguous (av);
2529 if (brk != (char *) (MORECORE_FAILURE))
2531 if (mp_.sbrk_base == 0)
2532 mp_.sbrk_base = brk;
2533 av->system_mem += size;
2536 If MORECORE extends previous space, we can likewise extend top size.
2539 if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
2540 set_head (old_top, (size + old_size) | PREV_INUSE);
2542 else if (contiguous (av) && old_size && brk < old_end)
2544 /* Oops! Someone else killed our space.. Can't touch anything. */
2545 malloc_printerr (3, "break adjusted to free malloc space", brk,
2546 av);
2550 Otherwise, make adjustments:
2552 * If the first time through or noncontiguous, we need to call sbrk
2553 just to find out where the end of memory lies.
2555 * We need to ensure that all returned chunks from malloc will meet
2556 MALLOC_ALIGNMENT
2558 * If there was an intervening foreign sbrk, we need to adjust sbrk
2559 request size to account for fact that we will not be able to
2560 combine new space with existing space in old_top.
2562 * Almost all systems internally allocate whole pages at a time, in
2563 which case we might as well use the whole last page of request.
2564 So we allocate enough more memory to hit a page boundary now,
2565 which in turn causes future contiguous calls to page-align.
2568 else
2570 front_misalign = 0;
2571 end_misalign = 0;
2572 correction = 0;
2573 aligned_brk = brk;
2575 /* handle contiguous cases */
2576 if (contiguous (av))
2578 /* Count foreign sbrk as system_mem. */
2579 if (old_size)
2580 av->system_mem += brk - old_end;
2582 /* Guarantee alignment of first new chunk made from this space */
2584 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2585 if (front_misalign > 0)
2588 Skip over some bytes to arrive at an aligned position.
2589 We don't need to specially mark these wasted front bytes.
2590 They will never be accessed anyway because
2591 prev_inuse of av->top (and any chunk created from its start)
2592 is always true after initialization.
2595 correction = MALLOC_ALIGNMENT - front_misalign;
2596 aligned_brk += correction;
2600 If this isn't adjacent to existing space, then we will not
2601 be able to merge with old_top space, so must add to 2nd request.
2604 correction += old_size;
2606 /* Extend the end address to hit a page boundary */
2607 end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
2608 correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
2610 assert (correction >= 0);
2611 snd_brk = (char *) (MORECORE (correction));
2614 If can't allocate correction, try to at least find out current
2615 brk. It might be enough to proceed without failing.
2617 Note that if second sbrk did NOT fail, we assume that space
2618 is contiguous with first sbrk. This is a safe assumption unless
2619 program is multithreaded but doesn't use locks and a foreign sbrk
2620 occurred between our first and second calls.
2623 if (snd_brk == (char *) (MORECORE_FAILURE))
2625 correction = 0;
2626 snd_brk = (char *) (MORECORE (0));
2628 else
2630 /* Call the `morecore' hook if necessary. */
2631 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2632 if (__builtin_expect (hook != NULL, 0))
2633 (*hook)();
2637 /* handle non-contiguous cases */
2638 else
2640 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2641 /* MORECORE/mmap must correctly align */
2642 assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
2643 else
2645 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2646 if (front_misalign > 0)
2649 Skip over some bytes to arrive at an aligned position.
2650 We don't need to specially mark these wasted front bytes.
2651 They will never be accessed anyway because
2652 prev_inuse of av->top (and any chunk created from its start)
2653 is always true after initialization.
2656 aligned_brk += MALLOC_ALIGNMENT - front_misalign;
2660 /* Find out current end of memory */
2661 if (snd_brk == (char *) (MORECORE_FAILURE))
2663 snd_brk = (char *) (MORECORE (0));
2667 /* Adjust top based on results of second sbrk */
2668 if (snd_brk != (char *) (MORECORE_FAILURE))
2670 av->top = (mchunkptr) aligned_brk;
2671 set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
2672 av->system_mem += correction;
2675 If not the first time through, we either have a
2676 gap due to foreign sbrk or a non-contiguous region. Insert a
2677 double fencepost at old_top to prevent consolidation with space
2678 we don't own. These fenceposts are artificial chunks that are
2679 marked as inuse and are in any case too small to use. We need
2680 two to make sizes and alignments work out.
2683 if (old_size != 0)
2686 Shrink old_top to insert fenceposts, keeping size a
2687 multiple of MALLOC_ALIGNMENT. We know there is at least
2688 enough space in old_top to do this.
2690 old_size = (old_size - 4 * SIZE_SZ) & ~MALLOC_ALIGN_MASK;
2691 set_head (old_top, old_size | PREV_INUSE);
2694 Note that the following assignments completely overwrite
2695 old_top when old_size was previously MINSIZE. This is
2696 intentional. We need the fencepost, even if old_top otherwise gets
2697 lost.
2699 chunk_at_offset (old_top, old_size)->size =
2700 (2 * SIZE_SZ) | PREV_INUSE;
2702 chunk_at_offset (old_top, old_size + 2 * SIZE_SZ)->size =
2703 (2 * SIZE_SZ) | PREV_INUSE;
2705 /* If possible, release the rest. */
2706 if (old_size >= MINSIZE)
2708 _int_free (av, old_top, 1);
2714 } /* if (av != &main_arena) */
2716 if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
2717 av->max_system_mem = av->system_mem;
2718 check_malloc_state (av);
2720 /* finally, do the allocation */
2721 p = av->top;
2722 size = chunksize (p);
2724 /* check that one of the above allocation paths succeeded */
2725 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
2727 remainder_size = size - nb;
2728 remainder = chunk_at_offset (p, nb);
2729 av->top = remainder;
2730 set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
2731 set_head (remainder, remainder_size | PREV_INUSE);
2732 check_malloced_chunk (av, p, nb);
2733 return chunk2mem (p);
2736 /* catch all failure paths */
2737 __set_errno (ENOMEM);
2738 return 0;
2743 systrim is an inverse of sorts to sysmalloc. It gives memory back
2744 to the system (via negative arguments to sbrk) if there is unused
2745 memory at the `high' end of the malloc pool. It is called
2746 automatically by free() when top space exceeds the trim
2747 threshold. It is also called by the public malloc_trim routine. It
2748 returns 1 if it actually released any memory, else 0.
2751 static int
2752 systrim (size_t pad, mstate av)
2754 long top_size; /* Amount of top-most memory */
2755 long extra; /* Amount to release */
2756 long released; /* Amount actually released */
2757 char *current_brk; /* address returned by pre-check sbrk call */
2758 char *new_brk; /* address returned by post-check sbrk call */
2759 size_t pagesize;
2760 long top_area;
2762 pagesize = GLRO (dl_pagesize);
2763 top_size = chunksize (av->top);
2765 top_area = top_size - MINSIZE - 1;
2766 if (top_area <= pad)
2767 return 0;
2769 /* Release in pagesize units and round down to the nearest page. */
2770 extra = ALIGN_DOWN(top_area - pad, pagesize);
2772 if (extra == 0)
2773 return 0;
2776 Only proceed if end of memory is where we last set it.
2777 This avoids problems if there were foreign sbrk calls.
2779 current_brk = (char *) (MORECORE (0));
2780 if (current_brk == (char *) (av->top) + top_size)
2783 Attempt to release memory. We ignore MORECORE return value,
2784 and instead call again to find out where new end of memory is.
2785 This avoids problems if first call releases less than we asked,
2786 of if failure somehow altered brk value. (We could still
2787 encounter problems if it altered brk in some very bad way,
2788 but the only thing we can do is adjust anyway, which will cause
2789 some downstream failure.)
2792 MORECORE (-extra);
2793 /* Call the `morecore' hook if necessary. */
2794 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2795 if (__builtin_expect (hook != NULL, 0))
2796 (*hook)();
2797 new_brk = (char *) (MORECORE (0));
2799 LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra);
2801 if (new_brk != (char *) MORECORE_FAILURE)
2803 released = (long) (current_brk - new_brk);
2805 if (released != 0)
2807 /* Success. Adjust top. */
2808 av->system_mem -= released;
2809 set_head (av->top, (top_size - released) | PREV_INUSE);
2810 check_malloc_state (av);
2811 return 1;
2815 return 0;
2818 static void
2819 internal_function
2820 munmap_chunk (mchunkptr p)
2822 INTERNAL_SIZE_T size = chunksize (p);
2824 assert (chunk_is_mmapped (p));
2826 uintptr_t block = (uintptr_t) p - p->prev_size;
2827 size_t total_size = p->prev_size + size;
2828 /* Unfortunately we have to do the compilers job by hand here. Normally
2829 we would test BLOCK and TOTAL-SIZE separately for compliance with the
2830 page size. But gcc does not recognize the optimization possibility
2831 (in the moment at least) so we combine the two values into one before
2832 the bit test. */
2833 if (__builtin_expect (((block | total_size) & (GLRO (dl_pagesize) - 1)) != 0, 0))
2835 malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
2836 chunk2mem (p), NULL);
2837 return;
2840 atomic_decrement (&mp_.n_mmaps);
2841 atomic_add (&mp_.mmapped_mem, -total_size);
2843 /* If munmap failed the process virtual memory address space is in a
2844 bad shape. Just leave the block hanging around, the process will
2845 terminate shortly anyway since not much can be done. */
2846 __munmap ((char *) block, total_size);
2849 #if HAVE_MREMAP
2851 static mchunkptr
2852 internal_function
2853 mremap_chunk (mchunkptr p, size_t new_size)
2855 size_t pagesize = GLRO (dl_pagesize);
2856 INTERNAL_SIZE_T offset = p->prev_size;
2857 INTERNAL_SIZE_T size = chunksize (p);
2858 char *cp;
2860 assert (chunk_is_mmapped (p));
2861 assert (((size + offset) & (GLRO (dl_pagesize) - 1)) == 0);
2863 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
2864 new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize);
2866 /* No need to remap if the number of pages does not change. */
2867 if (size + offset == new_size)
2868 return p;
2870 cp = (char *) __mremap ((char *) p - offset, size + offset, new_size,
2871 MREMAP_MAYMOVE);
2873 if (cp == MAP_FAILED)
2874 return 0;
2876 p = (mchunkptr) (cp + offset);
2878 assert (aligned_OK (chunk2mem (p)));
2880 assert ((p->prev_size == offset));
2881 set_head (p, (new_size - offset) | IS_MMAPPED);
2883 INTERNAL_SIZE_T new;
2884 new = atomic_exchange_and_add (&mp_.mmapped_mem, new_size - size - offset)
2885 + new_size - size - offset;
2886 atomic_max (&mp_.max_mmapped_mem, new);
2887 return p;
2889 #endif /* HAVE_MREMAP */
2891 /*------------------------ Public wrappers. --------------------------------*/
2893 void *
2894 __libc_malloc (size_t bytes)
2896 mstate ar_ptr;
2897 void *victim;
2899 void *(*hook) (size_t, const void *)
2900 = atomic_forced_read (__malloc_hook);
2901 if (__builtin_expect (hook != NULL, 0))
2902 return (*hook)(bytes, RETURN_ADDRESS (0));
2904 arena_get (ar_ptr, bytes);
2906 victim = _int_malloc (ar_ptr, bytes);
2907 /* Retry with another arena only if we were able to find a usable arena
2908 before. */
2909 if (!victim && ar_ptr != NULL)
2911 LIBC_PROBE (memory_malloc_retry, 1, bytes);
2912 ar_ptr = arena_get_retry (ar_ptr, bytes);
2913 victim = _int_malloc (ar_ptr, bytes);
2916 if (ar_ptr != NULL)
2917 (void) mutex_unlock (&ar_ptr->mutex);
2919 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
2920 ar_ptr == arena_for_chunk (mem2chunk (victim)));
2921 return victim;
2923 libc_hidden_def (__libc_malloc)
2925 void
2926 __libc_free (void *mem)
2928 mstate ar_ptr;
2929 mchunkptr p; /* chunk corresponding to mem */
2931 void (*hook) (void *, const void *)
2932 = atomic_forced_read (__free_hook);
2933 if (__builtin_expect (hook != NULL, 0))
2935 (*hook)(mem, RETURN_ADDRESS (0));
2936 return;
2939 if (mem == 0) /* free(0) has no effect */
2940 return;
2942 p = mem2chunk (mem);
2944 if (chunk_is_mmapped (p)) /* release mmapped memory. */
2946 /* see if the dynamic brk/mmap threshold needs adjusting */
2947 if (!mp_.no_dyn_threshold
2948 && p->size > mp_.mmap_threshold
2949 && p->size <= DEFAULT_MMAP_THRESHOLD_MAX)
2951 mp_.mmap_threshold = chunksize (p);
2952 mp_.trim_threshold = 2 * mp_.mmap_threshold;
2953 LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
2954 mp_.mmap_threshold, mp_.trim_threshold);
2956 munmap_chunk (p);
2957 return;
2960 ar_ptr = arena_for_chunk (p);
2961 _int_free (ar_ptr, p, 0);
2963 libc_hidden_def (__libc_free)
2965 void *
2966 __libc_realloc (void *oldmem, size_t bytes)
2968 mstate ar_ptr;
2969 INTERNAL_SIZE_T nb; /* padded request size */
2971 void *newp; /* chunk to return */
2973 void *(*hook) (void *, size_t, const void *) =
2974 atomic_forced_read (__realloc_hook);
2975 if (__builtin_expect (hook != NULL, 0))
2976 return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
2978 #if REALLOC_ZERO_BYTES_FREES
2979 if (bytes == 0 && oldmem != NULL)
2981 __libc_free (oldmem); return 0;
2983 #endif
2985 /* realloc of null is supposed to be same as malloc */
2986 if (oldmem == 0)
2987 return __libc_malloc (bytes);
2989 /* chunk corresponding to oldmem */
2990 const mchunkptr oldp = mem2chunk (oldmem);
2991 /* its size */
2992 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
2994 if (chunk_is_mmapped (oldp))
2995 ar_ptr = NULL;
2996 else
2997 ar_ptr = arena_for_chunk (oldp);
2999 /* Little security check which won't hurt performance: the
3000 allocator never wrapps around at the end of the address space.
3001 Therefore we can exclude some size values which might appear
3002 here by accident or by "design" from some intruder. */
3003 if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
3004 || __builtin_expect (misaligned_chunk (oldp), 0))
3006 malloc_printerr (check_action, "realloc(): invalid pointer", oldmem,
3007 ar_ptr);
3008 return NULL;
3011 checked_request2size (bytes, nb);
3013 if (chunk_is_mmapped (oldp))
3015 void *newmem;
3017 #if HAVE_MREMAP
3018 newp = mremap_chunk (oldp, nb);
3019 if (newp)
3020 return chunk2mem (newp);
3021 #endif
3022 /* Note the extra SIZE_SZ overhead. */
3023 if (oldsize - SIZE_SZ >= nb)
3024 return oldmem; /* do nothing */
3026 /* Must alloc, copy, free. */
3027 newmem = __libc_malloc (bytes);
3028 if (newmem == 0)
3029 return 0; /* propagate failure */
3031 memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
3032 munmap_chunk (oldp);
3033 return newmem;
3036 (void) mutex_lock (&ar_ptr->mutex);
3038 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
3040 (void) mutex_unlock (&ar_ptr->mutex);
3041 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3042 ar_ptr == arena_for_chunk (mem2chunk (newp)));
3044 if (newp == NULL)
3046 /* Try harder to allocate memory in other arenas. */
3047 LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
3048 newp = __libc_malloc (bytes);
3049 if (newp != NULL)
3051 memcpy (newp, oldmem, oldsize - SIZE_SZ);
3052 _int_free (ar_ptr, oldp, 0);
3056 return newp;
3058 libc_hidden_def (__libc_realloc)
3060 void *
3061 __libc_memalign (size_t alignment, size_t bytes)
3063 void *address = RETURN_ADDRESS (0);
3064 return _mid_memalign (alignment, bytes, address);
3067 static void *
3068 _mid_memalign (size_t alignment, size_t bytes, void *address)
3070 mstate ar_ptr;
3071 void *p;
3073 void *(*hook) (size_t, size_t, const void *) =
3074 atomic_forced_read (__memalign_hook);
3075 if (__builtin_expect (hook != NULL, 0))
3076 return (*hook)(alignment, bytes, address);
3078 /* If we need less alignment than we give anyway, just relay to malloc. */
3079 if (alignment <= MALLOC_ALIGNMENT)
3080 return __libc_malloc (bytes);
3082 /* Otherwise, ensure that it is at least a minimum chunk size */
3083 if (alignment < MINSIZE)
3084 alignment = MINSIZE;
3086 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
3087 power of 2 and will cause overflow in the check below. */
3088 if (alignment > SIZE_MAX / 2 + 1)
3090 __set_errno (EINVAL);
3091 return 0;
3094 /* Check for overflow. */
3095 if (bytes > SIZE_MAX - alignment - MINSIZE)
3097 __set_errno (ENOMEM);
3098 return 0;
3102 /* Make sure alignment is power of 2. */
3103 if (!powerof2 (alignment))
3105 size_t a = MALLOC_ALIGNMENT * 2;
3106 while (a < alignment)
3107 a <<= 1;
3108 alignment = a;
3111 arena_get (ar_ptr, bytes + alignment + MINSIZE);
3113 p = _int_memalign (ar_ptr, alignment, bytes);
3114 if (!p && ar_ptr != NULL)
3116 LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
3117 ar_ptr = arena_get_retry (ar_ptr, bytes);
3118 p = _int_memalign (ar_ptr, alignment, bytes);
3121 if (ar_ptr != NULL)
3122 (void) mutex_unlock (&ar_ptr->mutex);
3124 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3125 ar_ptr == arena_for_chunk (mem2chunk (p)));
3126 return p;
3128 /* For ISO C11. */
3129 weak_alias (__libc_memalign, aligned_alloc)
3130 libc_hidden_def (__libc_memalign)
3132 void *
3133 __libc_valloc (size_t bytes)
3135 if (__malloc_initialized < 0)
3136 ptmalloc_init ();
3138 void *address = RETURN_ADDRESS (0);
3139 size_t pagesize = GLRO (dl_pagesize);
3140 return _mid_memalign (pagesize, bytes, address);
3143 void *
3144 __libc_pvalloc (size_t bytes)
3146 if (__malloc_initialized < 0)
3147 ptmalloc_init ();
3149 void *address = RETURN_ADDRESS (0);
3150 size_t pagesize = GLRO (dl_pagesize);
3151 size_t rounded_bytes = ALIGN_UP (bytes, pagesize);
3153 /* Check for overflow. */
3154 if (bytes > SIZE_MAX - 2 * pagesize - MINSIZE)
3156 __set_errno (ENOMEM);
3157 return 0;
3160 return _mid_memalign (pagesize, rounded_bytes, address);
3163 void *
3164 __libc_calloc (size_t n, size_t elem_size)
3166 mstate av;
3167 mchunkptr oldtop, p;
3168 INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
3169 void *mem;
3170 unsigned long clearsize;
3171 unsigned long nclears;
3172 INTERNAL_SIZE_T *d;
3174 /* size_t is unsigned so the behavior on overflow is defined. */
3175 bytes = n * elem_size;
3176 #define HALF_INTERNAL_SIZE_T \
3177 (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
3178 if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0))
3180 if (elem_size != 0 && bytes / elem_size != n)
3182 __set_errno (ENOMEM);
3183 return 0;
3187 void *(*hook) (size_t, const void *) =
3188 atomic_forced_read (__malloc_hook);
3189 if (__builtin_expect (hook != NULL, 0))
3191 sz = bytes;
3192 mem = (*hook)(sz, RETURN_ADDRESS (0));
3193 if (mem == 0)
3194 return 0;
3196 return memset (mem, 0, sz);
3199 sz = bytes;
3201 arena_get (av, sz);
3202 if (av)
3204 /* Check if we hand out the top chunk, in which case there may be no
3205 need to clear. */
3206 #if MORECORE_CLEARS
3207 oldtop = top (av);
3208 oldtopsize = chunksize (top (av));
3209 # if MORECORE_CLEARS < 2
3210 /* Only newly allocated memory is guaranteed to be cleared. */
3211 if (av == &main_arena &&
3212 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
3213 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
3214 # endif
3215 if (av != &main_arena)
3217 heap_info *heap = heap_for_ptr (oldtop);
3218 if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
3219 oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
3221 #endif
3223 else
3225 /* No usable arenas. */
3226 oldtop = 0;
3227 oldtopsize = 0;
3229 mem = _int_malloc (av, sz);
3232 assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
3233 av == arena_for_chunk (mem2chunk (mem)));
3235 if (mem == 0 && av != NULL)
3237 LIBC_PROBE (memory_calloc_retry, 1, sz);
3238 av = arena_get_retry (av, sz);
3239 mem = _int_malloc (av, sz);
3242 if (av != NULL)
3243 (void) mutex_unlock (&av->mutex);
3245 /* Allocation failed even after a retry. */
3246 if (mem == 0)
3247 return 0;
3249 p = mem2chunk (mem);
3251 /* Two optional cases in which clearing not necessary */
3252 if (chunk_is_mmapped (p))
3254 if (__builtin_expect (perturb_byte, 0))
3255 return memset (mem, 0, sz);
3257 return mem;
3260 csz = chunksize (p);
3262 #if MORECORE_CLEARS
3263 if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
3265 /* clear only the bytes from non-freshly-sbrked memory */
3266 csz = oldtopsize;
3268 #endif
3270 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3271 contents have an odd number of INTERNAL_SIZE_T-sized words;
3272 minimally 3. */
3273 d = (INTERNAL_SIZE_T *) mem;
3274 clearsize = csz - SIZE_SZ;
3275 nclears = clearsize / sizeof (INTERNAL_SIZE_T);
3276 assert (nclears >= 3);
3278 if (nclears > 9)
3279 return memset (d, 0, clearsize);
3281 else
3283 *(d + 0) = 0;
3284 *(d + 1) = 0;
3285 *(d + 2) = 0;
3286 if (nclears > 4)
3288 *(d + 3) = 0;
3289 *(d + 4) = 0;
3290 if (nclears > 6)
3292 *(d + 5) = 0;
3293 *(d + 6) = 0;
3294 if (nclears > 8)
3296 *(d + 7) = 0;
3297 *(d + 8) = 0;
3303 return mem;
3307 ------------------------------ malloc ------------------------------
3310 static void *
3311 _int_malloc (mstate av, size_t bytes)
3313 INTERNAL_SIZE_T nb; /* normalized request size */
3314 unsigned int idx; /* associated bin index */
3315 mbinptr bin; /* associated bin */
3317 mchunkptr victim; /* inspected/selected chunk */
3318 INTERNAL_SIZE_T size; /* its size */
3319 int victim_index; /* its bin index */
3321 mchunkptr remainder; /* remainder from a split */
3322 unsigned long remainder_size; /* its size */
3324 unsigned int block; /* bit map traverser */
3325 unsigned int bit; /* bit map traverser */
3326 unsigned int map; /* current word of binmap */
3328 mchunkptr fwd; /* misc temp for linking */
3329 mchunkptr bck; /* misc temp for linking */
3331 const char *errstr = NULL;
3334 Convert request size to internal form by adding SIZE_SZ bytes
3335 overhead plus possibly more to obtain necessary alignment and/or
3336 to obtain a size of at least MINSIZE, the smallest allocatable
3337 size. Also, checked_request2size traps (returning 0) request sizes
3338 that are so large that they wrap around zero when padded and
3339 aligned.
3342 checked_request2size (bytes, nb);
3344 /* There are no usable arenas. Fall back to sysmalloc to get a chunk from
3345 mmap. */
3346 if (__glibc_unlikely (av == NULL))
3348 void *p = sysmalloc (nb, av);
3349 if (p != NULL)
3350 alloc_perturb (p, bytes);
3351 return p;
3355 If the size qualifies as a fastbin, first check corresponding bin.
3356 This code is safe to execute even if av is not yet initialized, so we
3357 can try it without checking, which saves some time on this fast path.
3360 if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
3362 idx = fastbin_index (nb);
3363 mfastbinptr *fb = &fastbin (av, idx);
3364 mchunkptr pp = *fb;
3367 victim = pp;
3368 if (victim == NULL)
3369 break;
3371 while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim))
3372 != victim);
3373 if (victim != 0)
3375 if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
3377 errstr = "malloc(): memory corruption (fast)";
3378 errout:
3379 malloc_printerr (check_action, errstr, chunk2mem (victim), av);
3380 return NULL;
3382 check_remalloced_chunk (av, victim, nb);
3383 void *p = chunk2mem (victim);
3384 alloc_perturb (p, bytes);
3385 return p;
3390 If a small request, check regular bin. Since these "smallbins"
3391 hold one size each, no searching within bins is necessary.
3392 (For a large request, we need to wait until unsorted chunks are
3393 processed to find best fit. But for small ones, fits are exact
3394 anyway, so we can check now, which is faster.)
3397 if (in_smallbin_range (nb))
3399 idx = smallbin_index (nb);
3400 bin = bin_at (av, idx);
3402 if ((victim = last (bin)) != bin)
3404 if (victim == 0) /* initialization check */
3405 malloc_consolidate (av);
3406 else
3408 bck = victim->bk;
3409 if (__glibc_unlikely (bck->fd != victim))
3411 errstr = "malloc(): smallbin double linked list corrupted";
3412 goto errout;
3414 set_inuse_bit_at_offset (victim, nb);
3415 bin->bk = bck;
3416 bck->fd = bin;
3418 if (av != &main_arena)
3419 victim->size |= NON_MAIN_ARENA;
3420 check_malloced_chunk (av, victim, nb);
3421 void *p = chunk2mem (victim);
3422 alloc_perturb (p, bytes);
3423 return p;
3429 If this is a large request, consolidate fastbins before continuing.
3430 While it might look excessive to kill all fastbins before
3431 even seeing if there is space available, this avoids
3432 fragmentation problems normally associated with fastbins.
3433 Also, in practice, programs tend to have runs of either small or
3434 large requests, but less often mixtures, so consolidation is not
3435 invoked all that often in most programs. And the programs that
3436 it is called frequently in otherwise tend to fragment.
3439 else
3441 idx = largebin_index (nb);
3442 if (have_fastchunks (av))
3443 malloc_consolidate (av);
3447 Process recently freed or remaindered chunks, taking one only if
3448 it is exact fit, or, if this a small request, the chunk is remainder from
3449 the most recent non-exact fit. Place other traversed chunks in
3450 bins. Note that this step is the only place in any routine where
3451 chunks are placed in bins.
3453 The outer loop here is needed because we might not realize until
3454 near the end of malloc that we should have consolidated, so must
3455 do so and retry. This happens at most once, and only when we would
3456 otherwise need to expand memory to service a "small" request.
3459 for (;; )
3461 int iters = 0;
3462 while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
3464 bck = victim->bk;
3465 if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)
3466 || __builtin_expect (victim->size > av->system_mem, 0))
3467 malloc_printerr (check_action, "malloc(): memory corruption",
3468 chunk2mem (victim), av);
3469 size = chunksize (victim);
3472 If a small request, try to use last remainder if it is the
3473 only chunk in unsorted bin. This helps promote locality for
3474 runs of consecutive small requests. This is the only
3475 exception to best-fit, and applies only when there is
3476 no exact fit for a small chunk.
3479 if (in_smallbin_range (nb) &&
3480 bck == unsorted_chunks (av) &&
3481 victim == av->last_remainder &&
3482 (unsigned long) (size) > (unsigned long) (nb + MINSIZE))
3484 /* split and reattach remainder */
3485 remainder_size = size - nb;
3486 remainder = chunk_at_offset (victim, nb);
3487 unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
3488 av->last_remainder = remainder;
3489 remainder->bk = remainder->fd = unsorted_chunks (av);
3490 if (!in_smallbin_range (remainder_size))
3492 remainder->fd_nextsize = NULL;
3493 remainder->bk_nextsize = NULL;
3496 set_head (victim, nb | PREV_INUSE |
3497 (av != &main_arena ? NON_MAIN_ARENA : 0));
3498 set_head (remainder, remainder_size | PREV_INUSE);
3499 set_foot (remainder, remainder_size);
3501 check_malloced_chunk (av, victim, nb);
3502 void *p = chunk2mem (victim);
3503 alloc_perturb (p, bytes);
3504 return p;
3507 /* remove from unsorted list */
3508 unsorted_chunks (av)->bk = bck;
3509 bck->fd = unsorted_chunks (av);
3511 /* Take now instead of binning if exact fit */
3513 if (size == nb)
3515 set_inuse_bit_at_offset (victim, size);
3516 if (av != &main_arena)
3517 victim->size |= NON_MAIN_ARENA;
3518 check_malloced_chunk (av, victim, nb);
3519 void *p = chunk2mem (victim);
3520 alloc_perturb (p, bytes);
3521 return p;
3524 /* place chunk in bin */
3526 if (in_smallbin_range (size))
3528 victim_index = smallbin_index (size);
3529 bck = bin_at (av, victim_index);
3530 fwd = bck->fd;
3532 else
3534 victim_index = largebin_index (size);
3535 bck = bin_at (av, victim_index);
3536 fwd = bck->fd;
3538 /* maintain large bins in sorted order */
3539 if (fwd != bck)
3541 /* Or with inuse bit to speed comparisons */
3542 size |= PREV_INUSE;
3543 /* if smaller than smallest, bypass loop below */
3544 assert ((bck->bk->size & NON_MAIN_ARENA) == 0);
3545 if ((unsigned long) (size) < (unsigned long) (bck->bk->size))
3547 fwd = bck;
3548 bck = bck->bk;
3550 victim->fd_nextsize = fwd->fd;
3551 victim->bk_nextsize = fwd->fd->bk_nextsize;
3552 fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
3554 else
3556 assert ((fwd->size & NON_MAIN_ARENA) == 0);
3557 while ((unsigned long) size < fwd->size)
3559 fwd = fwd->fd_nextsize;
3560 assert ((fwd->size & NON_MAIN_ARENA) == 0);
3563 if ((unsigned long) size == (unsigned long) fwd->size)
3564 /* Always insert in the second position. */
3565 fwd = fwd->fd;
3566 else
3568 victim->fd_nextsize = fwd;
3569 victim->bk_nextsize = fwd->bk_nextsize;
3570 fwd->bk_nextsize = victim;
3571 victim->bk_nextsize->fd_nextsize = victim;
3573 bck = fwd->bk;
3576 else
3577 victim->fd_nextsize = victim->bk_nextsize = victim;
3580 mark_bin (av, victim_index);
3581 victim->bk = bck;
3582 victim->fd = fwd;
3583 fwd->bk = victim;
3584 bck->fd = victim;
3586 #define MAX_ITERS 10000
3587 if (++iters >= MAX_ITERS)
3588 break;
3592 If a large request, scan through the chunks of current bin in
3593 sorted order to find smallest that fits. Use the skip list for this.
3596 if (!in_smallbin_range (nb))
3598 bin = bin_at (av, idx);
3600 /* skip scan if empty or largest chunk is too small */
3601 if ((victim = first (bin)) != bin &&
3602 (unsigned long) (victim->size) >= (unsigned long) (nb))
3604 victim = victim->bk_nextsize;
3605 while (((unsigned long) (size = chunksize (victim)) <
3606 (unsigned long) (nb)))
3607 victim = victim->bk_nextsize;
3609 /* Avoid removing the first entry for a size so that the skip
3610 list does not have to be rerouted. */
3611 if (victim != last (bin) && victim->size == victim->fd->size)
3612 victim = victim->fd;
3614 remainder_size = size - nb;
3615 unlink (av, victim, bck, fwd);
3617 /* Exhaust */
3618 if (remainder_size < MINSIZE)
3620 set_inuse_bit_at_offset (victim, size);
3621 if (av != &main_arena)
3622 victim->size |= NON_MAIN_ARENA;
3624 /* Split */
3625 else
3627 remainder = chunk_at_offset (victim, nb);
3628 /* We cannot assume the unsorted list is empty and therefore
3629 have to perform a complete insert here. */
3630 bck = unsorted_chunks (av);
3631 fwd = bck->fd;
3632 if (__glibc_unlikely (fwd->bk != bck))
3634 errstr = "malloc(): corrupted unsorted chunks";
3635 goto errout;
3637 remainder->bk = bck;
3638 remainder->fd = fwd;
3639 bck->fd = remainder;
3640 fwd->bk = remainder;
3641 if (!in_smallbin_range (remainder_size))
3643 remainder->fd_nextsize = NULL;
3644 remainder->bk_nextsize = NULL;
3646 set_head (victim, nb | PREV_INUSE |
3647 (av != &main_arena ? NON_MAIN_ARENA : 0));
3648 set_head (remainder, remainder_size | PREV_INUSE);
3649 set_foot (remainder, remainder_size);
3651 check_malloced_chunk (av, victim, nb);
3652 void *p = chunk2mem (victim);
3653 alloc_perturb (p, bytes);
3654 return p;
3659 Search for a chunk by scanning bins, starting with next largest
3660 bin. This search is strictly by best-fit; i.e., the smallest
3661 (with ties going to approximately the least recently used) chunk
3662 that fits is selected.
3664 The bitmap avoids needing to check that most blocks are nonempty.
3665 The particular case of skipping all bins during warm-up phases
3666 when no chunks have been returned yet is faster than it might look.
3669 ++idx;
3670 bin = bin_at (av, idx);
3671 block = idx2block (idx);
3672 map = av->binmap[block];
3673 bit = idx2bit (idx);
3675 for (;; )
3677 /* Skip rest of block if there are no more set bits in this block. */
3678 if (bit > map || bit == 0)
3682 if (++block >= BINMAPSIZE) /* out of bins */
3683 goto use_top;
3685 while ((map = av->binmap[block]) == 0);
3687 bin = bin_at (av, (block << BINMAPSHIFT));
3688 bit = 1;
3691 /* Advance to bin with set bit. There must be one. */
3692 while ((bit & map) == 0)
3694 bin = next_bin (bin);
3695 bit <<= 1;
3696 assert (bit != 0);
3699 /* Inspect the bin. It is likely to be non-empty */
3700 victim = last (bin);
3702 /* If a false alarm (empty bin), clear the bit. */
3703 if (victim == bin)
3705 av->binmap[block] = map &= ~bit; /* Write through */
3706 bin = next_bin (bin);
3707 bit <<= 1;
3710 else
3712 size = chunksize (victim);
3714 /* We know the first chunk in this bin is big enough to use. */
3715 assert ((unsigned long) (size) >= (unsigned long) (nb));
3717 remainder_size = size - nb;
3719 /* unlink */
3720 unlink (av, victim, bck, fwd);
3722 /* Exhaust */
3723 if (remainder_size < MINSIZE)
3725 set_inuse_bit_at_offset (victim, size);
3726 if (av != &main_arena)
3727 victim->size |= NON_MAIN_ARENA;
3730 /* Split */
3731 else
3733 remainder = chunk_at_offset (victim, nb);
3735 /* We cannot assume the unsorted list is empty and therefore
3736 have to perform a complete insert here. */
3737 bck = unsorted_chunks (av);
3738 fwd = bck->fd;
3739 if (__glibc_unlikely (fwd->bk != bck))
3741 errstr = "malloc(): corrupted unsorted chunks 2";
3742 goto errout;
3744 remainder->bk = bck;
3745 remainder->fd = fwd;
3746 bck->fd = remainder;
3747 fwd->bk = remainder;
3749 /* advertise as last remainder */
3750 if (in_smallbin_range (nb))
3751 av->last_remainder = remainder;
3752 if (!in_smallbin_range (remainder_size))
3754 remainder->fd_nextsize = NULL;
3755 remainder->bk_nextsize = NULL;
3757 set_head (victim, nb | PREV_INUSE |
3758 (av != &main_arena ? NON_MAIN_ARENA : 0));
3759 set_head (remainder, remainder_size | PREV_INUSE);
3760 set_foot (remainder, remainder_size);
3762 check_malloced_chunk (av, victim, nb);
3763 void *p = chunk2mem (victim);
3764 alloc_perturb (p, bytes);
3765 return p;
3769 use_top:
3771 If large enough, split off the chunk bordering the end of memory
3772 (held in av->top). Note that this is in accord with the best-fit
3773 search rule. In effect, av->top is treated as larger (and thus
3774 less well fitting) than any other available chunk since it can
3775 be extended to be as large as necessary (up to system
3776 limitations).
3778 We require that av->top always exists (i.e., has size >=
3779 MINSIZE) after initialization, so if it would otherwise be
3780 exhausted by current request, it is replenished. (The main
3781 reason for ensuring it exists is that we may need MINSIZE space
3782 to put in fenceposts in sysmalloc.)
3785 victim = av->top;
3786 size = chunksize (victim);
3788 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
3790 remainder_size = size - nb;
3791 remainder = chunk_at_offset (victim, nb);
3792 av->top = remainder;
3793 set_head (victim, nb | PREV_INUSE |
3794 (av != &main_arena ? NON_MAIN_ARENA : 0));
3795 set_head (remainder, remainder_size | PREV_INUSE);
3797 check_malloced_chunk (av, victim, nb);
3798 void *p = chunk2mem (victim);
3799 alloc_perturb (p, bytes);
3800 return p;
3803 /* When we are using atomic ops to free fast chunks we can get
3804 here for all block sizes. */
3805 else if (have_fastchunks (av))
3807 malloc_consolidate (av);
3808 /* restore original bin index */
3809 if (in_smallbin_range (nb))
3810 idx = smallbin_index (nb);
3811 else
3812 idx = largebin_index (nb);
3816 Otherwise, relay to handle system-dependent cases
3818 else
3820 void *p = sysmalloc (nb, av);
3821 if (p != NULL)
3822 alloc_perturb (p, bytes);
3823 return p;
3829 ------------------------------ free ------------------------------
3832 static void
3833 _int_free (mstate av, mchunkptr p, int have_lock)
3835 INTERNAL_SIZE_T size; /* its size */
3836 mfastbinptr *fb; /* associated fastbin */
3837 mchunkptr nextchunk; /* next contiguous chunk */
3838 INTERNAL_SIZE_T nextsize; /* its size */
3839 int nextinuse; /* true if nextchunk is used */
3840 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
3841 mchunkptr bck; /* misc temp for linking */
3842 mchunkptr fwd; /* misc temp for linking */
3844 const char *errstr = NULL;
3845 int locked = 0;
3847 size = chunksize (p);
3849 /* Little security check which won't hurt performance: the
3850 allocator never wrapps around at the end of the address space.
3851 Therefore we can exclude some size values which might appear
3852 here by accident or by "design" from some intruder. */
3853 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
3854 || __builtin_expect (misaligned_chunk (p), 0))
3856 errstr = "free(): invalid pointer";
3857 errout:
3858 if (!have_lock && locked)
3859 (void) mutex_unlock (&av->mutex);
3860 malloc_printerr (check_action, errstr, chunk2mem (p), av);
3861 return;
3863 /* We know that each chunk is at least MINSIZE bytes in size or a
3864 multiple of MALLOC_ALIGNMENT. */
3865 if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
3867 errstr = "free(): invalid size";
3868 goto errout;
3871 check_inuse_chunk(av, p);
3874 If eligible, place chunk on a fastbin so it can be found
3875 and used quickly in malloc.
3878 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
3880 #if TRIM_FASTBINS
3882 If TRIM_FASTBINS set, don't place chunks
3883 bordering top into fastbins
3885 && (chunk_at_offset(p, size) != av->top)
3886 #endif
3889 if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
3890 || __builtin_expect (chunksize (chunk_at_offset (p, size))
3891 >= av->system_mem, 0))
3893 /* We might not have a lock at this point and concurrent modifications
3894 of system_mem might have let to a false positive. Redo the test
3895 after getting the lock. */
3896 if (have_lock
3897 || ({ assert (locked == 0);
3898 mutex_lock(&av->mutex);
3899 locked = 1;
3900 chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
3901 || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
3904 errstr = "free(): invalid next size (fast)";
3905 goto errout;
3907 if (! have_lock)
3909 (void)mutex_unlock(&av->mutex);
3910 locked = 0;
3914 free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
3916 set_fastchunks(av);
3917 unsigned int idx = fastbin_index(size);
3918 fb = &fastbin (av, idx);
3920 /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
3921 mchunkptr old = *fb, old2;
3922 unsigned int old_idx = ~0u;
3925 /* Check that the top of the bin is not the record we are going to add
3926 (i.e., double free). */
3927 if (__builtin_expect (old == p, 0))
3929 errstr = "double free or corruption (fasttop)";
3930 goto errout;
3932 /* Check that size of fastbin chunk at the top is the same as
3933 size of the chunk that we are adding. We can dereference OLD
3934 only if we have the lock, otherwise it might have already been
3935 deallocated. See use of OLD_IDX below for the actual check. */
3936 if (have_lock && old != NULL)
3937 old_idx = fastbin_index(chunksize(old));
3938 p->fd = old2 = old;
3940 while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
3942 if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0))
3944 errstr = "invalid fastbin entry (free)";
3945 goto errout;
3950 Consolidate other non-mmapped chunks as they arrive.
3953 else if (!chunk_is_mmapped(p)) {
3954 if (! have_lock) {
3955 (void)mutex_lock(&av->mutex);
3956 locked = 1;
3959 nextchunk = chunk_at_offset(p, size);
3961 /* Lightweight tests: check whether the block is already the
3962 top block. */
3963 if (__glibc_unlikely (p == av->top))
3965 errstr = "double free or corruption (top)";
3966 goto errout;
3968 /* Or whether the next chunk is beyond the boundaries of the arena. */
3969 if (__builtin_expect (contiguous (av)
3970 && (char *) nextchunk
3971 >= ((char *) av->top + chunksize(av->top)), 0))
3973 errstr = "double free or corruption (out)";
3974 goto errout;
3976 /* Or whether the block is actually not marked used. */
3977 if (__glibc_unlikely (!prev_inuse(nextchunk)))
3979 errstr = "double free or corruption (!prev)";
3980 goto errout;
3983 nextsize = chunksize(nextchunk);
3984 if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
3985 || __builtin_expect (nextsize >= av->system_mem, 0))
3987 errstr = "free(): invalid next size (normal)";
3988 goto errout;
3991 free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
3993 /* consolidate backward */
3994 if (!prev_inuse(p)) {
3995 prevsize = p->prev_size;
3996 size += prevsize;
3997 p = chunk_at_offset(p, -((long) prevsize));
3998 unlink(av, p, bck, fwd);
4001 if (nextchunk != av->top) {
4002 /* get and clear inuse bit */
4003 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4005 /* consolidate forward */
4006 if (!nextinuse) {
4007 unlink(av, nextchunk, bck, fwd);
4008 size += nextsize;
4009 } else
4010 clear_inuse_bit_at_offset(nextchunk, 0);
4013 Place the chunk in unsorted chunk list. Chunks are
4014 not placed into regular bins until after they have
4015 been given one chance to be used in malloc.
4018 bck = unsorted_chunks(av);
4019 fwd = bck->fd;
4020 if (__glibc_unlikely (fwd->bk != bck))
4022 errstr = "free(): corrupted unsorted chunks";
4023 goto errout;
4025 p->fd = fwd;
4026 p->bk = bck;
4027 if (!in_smallbin_range(size))
4029 p->fd_nextsize = NULL;
4030 p->bk_nextsize = NULL;
4032 bck->fd = p;
4033 fwd->bk = p;
4035 set_head(p, size | PREV_INUSE);
4036 set_foot(p, size);
4038 check_free_chunk(av, p);
4042 If the chunk borders the current high end of memory,
4043 consolidate into top
4046 else {
4047 size += nextsize;
4048 set_head(p, size | PREV_INUSE);
4049 av->top = p;
4050 check_chunk(av, p);
4054 If freeing a large space, consolidate possibly-surrounding
4055 chunks. Then, if the total unused topmost memory exceeds trim
4056 threshold, ask malloc_trim to reduce top.
4058 Unless max_fast is 0, we don't know if there are fastbins
4059 bordering top, so we cannot tell for sure whether threshold
4060 has been reached unless fastbins are consolidated. But we
4061 don't want to consolidate on each free. As a compromise,
4062 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4063 is reached.
4066 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
4067 if (have_fastchunks(av))
4068 malloc_consolidate(av);
4070 if (av == &main_arena) {
4071 #ifndef MORECORE_CANNOT_TRIM
4072 if ((unsigned long)(chunksize(av->top)) >=
4073 (unsigned long)(mp_.trim_threshold))
4074 systrim(mp_.top_pad, av);
4075 #endif
4076 } else {
4077 /* Always try heap_trim(), even if the top chunk is not
4078 large, because the corresponding heap might go away. */
4079 heap_info *heap = heap_for_ptr(top(av));
4081 assert(heap->ar_ptr == av);
4082 heap_trim(heap, mp_.top_pad);
4086 if (! have_lock) {
4087 assert (locked);
4088 (void)mutex_unlock(&av->mutex);
4092 If the chunk was allocated via mmap, release via munmap().
4095 else {
4096 munmap_chunk (p);
4101 ------------------------- malloc_consolidate -------------------------
4103 malloc_consolidate is a specialized version of free() that tears
4104 down chunks held in fastbins. Free itself cannot be used for this
4105 purpose since, among other things, it might place chunks back onto
4106 fastbins. So, instead, we need to use a minor variant of the same
4107 code.
4109 Also, because this routine needs to be called the first time through
4110 malloc anyway, it turns out to be the perfect place to trigger
4111 initialization code.
4114 static void malloc_consolidate(mstate av)
4116 mfastbinptr* fb; /* current fastbin being consolidated */
4117 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4118 mchunkptr p; /* current chunk being consolidated */
4119 mchunkptr nextp; /* next chunk to consolidate */
4120 mchunkptr unsorted_bin; /* bin header */
4121 mchunkptr first_unsorted; /* chunk to link to */
4123 /* These have same use as in free() */
4124 mchunkptr nextchunk;
4125 INTERNAL_SIZE_T size;
4126 INTERNAL_SIZE_T nextsize;
4127 INTERNAL_SIZE_T prevsize;
4128 int nextinuse;
4129 mchunkptr bck;
4130 mchunkptr fwd;
4133 If max_fast is 0, we know that av hasn't
4134 yet been initialized, in which case do so below
4137 if (get_max_fast () != 0) {
4138 clear_fastchunks(av);
4140 unsorted_bin = unsorted_chunks(av);
4143 Remove each chunk from fast bin and consolidate it, placing it
4144 then in unsorted bin. Among other reasons for doing this,
4145 placing in unsorted bin avoids needing to calculate actual bins
4146 until malloc is sure that chunks aren't immediately going to be
4147 reused anyway.
4150 maxfb = &fastbin (av, NFASTBINS - 1);
4151 fb = &fastbin (av, 0);
4152 do {
4153 p = atomic_exchange_acq (fb, NULL);
4154 if (p != 0) {
4155 do {
4156 check_inuse_chunk(av, p);
4157 nextp = p->fd;
4159 /* Slightly streamlined version of consolidation code in free() */
4160 size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
4161 nextchunk = chunk_at_offset(p, size);
4162 nextsize = chunksize(nextchunk);
4164 if (!prev_inuse(p)) {
4165 prevsize = p->prev_size;
4166 size += prevsize;
4167 p = chunk_at_offset(p, -((long) prevsize));
4168 unlink(av, p, bck, fwd);
4171 if (nextchunk != av->top) {
4172 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4174 if (!nextinuse) {
4175 size += nextsize;
4176 unlink(av, nextchunk, bck, fwd);
4177 } else
4178 clear_inuse_bit_at_offset(nextchunk, 0);
4180 first_unsorted = unsorted_bin->fd;
4181 unsorted_bin->fd = p;
4182 first_unsorted->bk = p;
4184 if (!in_smallbin_range (size)) {
4185 p->fd_nextsize = NULL;
4186 p->bk_nextsize = NULL;
4189 set_head(p, size | PREV_INUSE);
4190 p->bk = unsorted_bin;
4191 p->fd = first_unsorted;
4192 set_foot(p, size);
4195 else {
4196 size += nextsize;
4197 set_head(p, size | PREV_INUSE);
4198 av->top = p;
4201 } while ( (p = nextp) != 0);
4204 } while (fb++ != maxfb);
4206 else {
4207 malloc_init_state(av);
4208 check_malloc_state(av);
4213 ------------------------------ realloc ------------------------------
4216 void*
4217 _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
4218 INTERNAL_SIZE_T nb)
4220 mchunkptr newp; /* chunk to return */
4221 INTERNAL_SIZE_T newsize; /* its size */
4222 void* newmem; /* corresponding user mem */
4224 mchunkptr next; /* next contiguous chunk after oldp */
4226 mchunkptr remainder; /* extra space at end of newp */
4227 unsigned long remainder_size; /* its size */
4229 mchunkptr bck; /* misc temp for linking */
4230 mchunkptr fwd; /* misc temp for linking */
4232 unsigned long copysize; /* bytes to copy */
4233 unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
4234 INTERNAL_SIZE_T* s; /* copy source */
4235 INTERNAL_SIZE_T* d; /* copy destination */
4237 const char *errstr = NULL;
4239 /* oldmem size */
4240 if (__builtin_expect (oldp->size <= 2 * SIZE_SZ, 0)
4241 || __builtin_expect (oldsize >= av->system_mem, 0))
4243 errstr = "realloc(): invalid old size";
4244 errout:
4245 malloc_printerr (check_action, errstr, chunk2mem (oldp), av);
4246 return NULL;
4249 check_inuse_chunk (av, oldp);
4251 /* All callers already filter out mmap'ed chunks. */
4252 assert (!chunk_is_mmapped (oldp));
4254 next = chunk_at_offset (oldp, oldsize);
4255 INTERNAL_SIZE_T nextsize = chunksize (next);
4256 if (__builtin_expect (next->size <= 2 * SIZE_SZ, 0)
4257 || __builtin_expect (nextsize >= av->system_mem, 0))
4259 errstr = "realloc(): invalid next size";
4260 goto errout;
4263 if ((unsigned long) (oldsize) >= (unsigned long) (nb))
4265 /* already big enough; split below */
4266 newp = oldp;
4267 newsize = oldsize;
4270 else
4272 /* Try to expand forward into top */
4273 if (next == av->top &&
4274 (unsigned long) (newsize = oldsize + nextsize) >=
4275 (unsigned long) (nb + MINSIZE))
4277 set_head_size (oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4278 av->top = chunk_at_offset (oldp, nb);
4279 set_head (av->top, (newsize - nb) | PREV_INUSE);
4280 check_inuse_chunk (av, oldp);
4281 return chunk2mem (oldp);
4284 /* Try to expand forward into next chunk; split off remainder below */
4285 else if (next != av->top &&
4286 !inuse (next) &&
4287 (unsigned long) (newsize = oldsize + nextsize) >=
4288 (unsigned long) (nb))
4290 newp = oldp;
4291 unlink (av, next, bck, fwd);
4294 /* allocate, copy, free */
4295 else
4297 newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK);
4298 if (newmem == 0)
4299 return 0; /* propagate failure */
4301 newp = mem2chunk (newmem);
4302 newsize = chunksize (newp);
4305 Avoid copy if newp is next chunk after oldp.
4307 if (newp == next)
4309 newsize += oldsize;
4310 newp = oldp;
4312 else
4315 Unroll copy of <= 36 bytes (72 if 8byte sizes)
4316 We know that contents have an odd number of
4317 INTERNAL_SIZE_T-sized words; minimally 3.
4320 copysize = oldsize - SIZE_SZ;
4321 s = (INTERNAL_SIZE_T *) (chunk2mem (oldp));
4322 d = (INTERNAL_SIZE_T *) (newmem);
4323 ncopies = copysize / sizeof (INTERNAL_SIZE_T);
4324 assert (ncopies >= 3);
4326 if (ncopies > 9)
4327 memcpy (d, s, copysize);
4329 else
4331 *(d + 0) = *(s + 0);
4332 *(d + 1) = *(s + 1);
4333 *(d + 2) = *(s + 2);
4334 if (ncopies > 4)
4336 *(d + 3) = *(s + 3);
4337 *(d + 4) = *(s + 4);
4338 if (ncopies > 6)
4340 *(d + 5) = *(s + 5);
4341 *(d + 6) = *(s + 6);
4342 if (ncopies > 8)
4344 *(d + 7) = *(s + 7);
4345 *(d + 8) = *(s + 8);
4351 _int_free (av, oldp, 1);
4352 check_inuse_chunk (av, newp);
4353 return chunk2mem (newp);
4358 /* If possible, free extra space in old or extended chunk */
4360 assert ((unsigned long) (newsize) >= (unsigned long) (nb));
4362 remainder_size = newsize - nb;
4364 if (remainder_size < MINSIZE) /* not enough extra to split off */
4366 set_head_size (newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4367 set_inuse_bit_at_offset (newp, newsize);
4369 else /* split remainder */
4371 remainder = chunk_at_offset (newp, nb);
4372 set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4373 set_head (remainder, remainder_size | PREV_INUSE |
4374 (av != &main_arena ? NON_MAIN_ARENA : 0));
4375 /* Mark remainder as inuse so free() won't complain */
4376 set_inuse_bit_at_offset (remainder, remainder_size);
4377 _int_free (av, remainder, 1);
4380 check_inuse_chunk (av, newp);
4381 return chunk2mem (newp);
4385 ------------------------------ memalign ------------------------------
4388 static void *
4389 _int_memalign (mstate av, size_t alignment, size_t bytes)
4391 INTERNAL_SIZE_T nb; /* padded request size */
4392 char *m; /* memory returned by malloc call */
4393 mchunkptr p; /* corresponding chunk */
4394 char *brk; /* alignment point within p */
4395 mchunkptr newp; /* chunk to return */
4396 INTERNAL_SIZE_T newsize; /* its size */
4397 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
4398 mchunkptr remainder; /* spare room at end to split off */
4399 unsigned long remainder_size; /* its size */
4400 INTERNAL_SIZE_T size;
4404 checked_request2size (bytes, nb);
4407 Strategy: find a spot within that chunk that meets the alignment
4408 request, and then possibly free the leading and trailing space.
4412 /* Call malloc with worst case padding to hit alignment. */
4414 m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
4416 if (m == 0)
4417 return 0; /* propagate failure */
4419 p = mem2chunk (m);
4421 if ((((unsigned long) (m)) % alignment) != 0) /* misaligned */
4423 { /*
4424 Find an aligned spot inside chunk. Since we need to give back
4425 leading space in a chunk of at least MINSIZE, if the first
4426 calculation places us at a spot with less than MINSIZE leader,
4427 we can move to the next aligned spot -- we've allocated enough
4428 total room so that this is always possible.
4430 brk = (char *) mem2chunk (((unsigned long) (m + alignment - 1)) &
4431 - ((signed long) alignment));
4432 if ((unsigned long) (brk - (char *) (p)) < MINSIZE)
4433 brk += alignment;
4435 newp = (mchunkptr) brk;
4436 leadsize = brk - (char *) (p);
4437 newsize = chunksize (p) - leadsize;
4439 /* For mmapped chunks, just adjust offset */
4440 if (chunk_is_mmapped (p))
4442 newp->prev_size = p->prev_size + leadsize;
4443 set_head (newp, newsize | IS_MMAPPED);
4444 return chunk2mem (newp);
4447 /* Otherwise, give back leader, use the rest */
4448 set_head (newp, newsize | PREV_INUSE |
4449 (av != &main_arena ? NON_MAIN_ARENA : 0));
4450 set_inuse_bit_at_offset (newp, newsize);
4451 set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4452 _int_free (av, p, 1);
4453 p = newp;
4455 assert (newsize >= nb &&
4456 (((unsigned long) (chunk2mem (p))) % alignment) == 0);
4459 /* Also give back spare room at the end */
4460 if (!chunk_is_mmapped (p))
4462 size = chunksize (p);
4463 if ((unsigned long) (size) > (unsigned long) (nb + MINSIZE))
4465 remainder_size = size - nb;
4466 remainder = chunk_at_offset (p, nb);
4467 set_head (remainder, remainder_size | PREV_INUSE |
4468 (av != &main_arena ? NON_MAIN_ARENA : 0));
4469 set_head_size (p, nb);
4470 _int_free (av, remainder, 1);
4474 check_inuse_chunk (av, p);
4475 return chunk2mem (p);
4480 ------------------------------ malloc_trim ------------------------------
4483 static int
4484 mtrim (mstate av, size_t pad)
4486 /* Don't touch corrupt arenas. */
4487 if (arena_is_corrupt (av))
4488 return 0;
4490 /* Ensure initialization/consolidation */
4491 malloc_consolidate (av);
4493 const size_t ps = GLRO (dl_pagesize);
4494 int psindex = bin_index (ps);
4495 const size_t psm1 = ps - 1;
4497 int result = 0;
4498 for (int i = 1; i < NBINS; ++i)
4499 if (i == 1 || i >= psindex)
4501 mbinptr bin = bin_at (av, i);
4503 for (mchunkptr p = last (bin); p != bin; p = p->bk)
4505 INTERNAL_SIZE_T size = chunksize (p);
4507 if (size > psm1 + sizeof (struct malloc_chunk))
4509 /* See whether the chunk contains at least one unused page. */
4510 char *paligned_mem = (char *) (((uintptr_t) p
4511 + sizeof (struct malloc_chunk)
4512 + psm1) & ~psm1);
4514 assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem);
4515 assert ((char *) p + size > paligned_mem);
4517 /* This is the size we could potentially free. */
4518 size -= paligned_mem - (char *) p;
4520 if (size > psm1)
4522 #if MALLOC_DEBUG
4523 /* When debugging we simulate destroying the memory
4524 content. */
4525 memset (paligned_mem, 0x89, size & ~psm1);
4526 #endif
4527 __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
4529 result = 1;
4535 #ifndef MORECORE_CANNOT_TRIM
4536 return result | (av == &main_arena ? systrim (pad, av) : 0);
4538 #else
4539 return result;
4540 #endif
4545 __malloc_trim (size_t s)
4547 int result = 0;
4549 if (__malloc_initialized < 0)
4550 ptmalloc_init ();
4552 mstate ar_ptr = &main_arena;
4555 (void) mutex_lock (&ar_ptr->mutex);
4556 result |= mtrim (ar_ptr, s);
4557 (void) mutex_unlock (&ar_ptr->mutex);
4559 ar_ptr = ar_ptr->next;
4561 while (ar_ptr != &main_arena);
4563 return result;
4568 ------------------------- malloc_usable_size -------------------------
4571 static size_t
4572 musable (void *mem)
4574 mchunkptr p;
4575 if (mem != 0)
4577 p = mem2chunk (mem);
4579 if (__builtin_expect (using_malloc_checking == 1, 0))
4580 return malloc_check_get_size (p);
4582 if (chunk_is_mmapped (p))
4583 return chunksize (p) - 2 * SIZE_SZ;
4584 else if (inuse (p))
4585 return chunksize (p) - SIZE_SZ;
4587 return 0;
4591 size_t
4592 __malloc_usable_size (void *m)
4594 size_t result;
4596 result = musable (m);
4597 return result;
4601 ------------------------------ mallinfo ------------------------------
4602 Accumulate malloc statistics for arena AV into M.
4605 static void
4606 int_mallinfo (mstate av, struct mallinfo *m)
4608 size_t i;
4609 mbinptr b;
4610 mchunkptr p;
4611 INTERNAL_SIZE_T avail;
4612 INTERNAL_SIZE_T fastavail;
4613 int nblocks;
4614 int nfastblocks;
4616 /* Ensure initialization */
4617 if (av->top == 0)
4618 malloc_consolidate (av);
4620 check_malloc_state (av);
4622 /* Account for top */
4623 avail = chunksize (av->top);
4624 nblocks = 1; /* top always exists */
4626 /* traverse fastbins */
4627 nfastblocks = 0;
4628 fastavail = 0;
4630 for (i = 0; i < NFASTBINS; ++i)
4632 for (p = fastbin (av, i); p != 0; p = p->fd)
4634 ++nfastblocks;
4635 fastavail += chunksize (p);
4639 avail += fastavail;
4641 /* traverse regular bins */
4642 for (i = 1; i < NBINS; ++i)
4644 b = bin_at (av, i);
4645 for (p = last (b); p != b; p = p->bk)
4647 ++nblocks;
4648 avail += chunksize (p);
4652 m->smblks += nfastblocks;
4653 m->ordblks += nblocks;
4654 m->fordblks += avail;
4655 m->uordblks += av->system_mem - avail;
4656 m->arena += av->system_mem;
4657 m->fsmblks += fastavail;
4658 if (av == &main_arena)
4660 m->hblks = mp_.n_mmaps;
4661 m->hblkhd = mp_.mmapped_mem;
4662 m->usmblks = 0;
4663 m->keepcost = chunksize (av->top);
4668 struct mallinfo
4669 __libc_mallinfo (void)
4671 struct mallinfo m;
4672 mstate ar_ptr;
4674 if (__malloc_initialized < 0)
4675 ptmalloc_init ();
4677 memset (&m, 0, sizeof (m));
4678 ar_ptr = &main_arena;
4681 (void) mutex_lock (&ar_ptr->mutex);
4682 int_mallinfo (ar_ptr, &m);
4683 (void) mutex_unlock (&ar_ptr->mutex);
4685 ar_ptr = ar_ptr->next;
4687 while (ar_ptr != &main_arena);
4689 return m;
4693 ------------------------------ malloc_stats ------------------------------
4696 void
4697 __malloc_stats (void)
4699 int i;
4700 mstate ar_ptr;
4701 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
4703 if (__malloc_initialized < 0)
4704 ptmalloc_init ();
4705 _IO_flockfile (stderr);
4706 int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
4707 ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
4708 for (i = 0, ar_ptr = &main_arena;; i++)
4710 struct mallinfo mi;
4712 memset (&mi, 0, sizeof (mi));
4713 (void) mutex_lock (&ar_ptr->mutex);
4714 int_mallinfo (ar_ptr, &mi);
4715 fprintf (stderr, "Arena %d:\n", i);
4716 fprintf (stderr, "system bytes = %10u\n", (unsigned int) mi.arena);
4717 fprintf (stderr, "in use bytes = %10u\n", (unsigned int) mi.uordblks);
4718 #if MALLOC_DEBUG > 1
4719 if (i > 0)
4720 dump_heap (heap_for_ptr (top (ar_ptr)));
4721 #endif
4722 system_b += mi.arena;
4723 in_use_b += mi.uordblks;
4724 (void) mutex_unlock (&ar_ptr->mutex);
4725 ar_ptr = ar_ptr->next;
4726 if (ar_ptr == &main_arena)
4727 break;
4729 fprintf (stderr, "Total (incl. mmap):\n");
4730 fprintf (stderr, "system bytes = %10u\n", system_b);
4731 fprintf (stderr, "in use bytes = %10u\n", in_use_b);
4732 fprintf (stderr, "max mmap regions = %10u\n", (unsigned int) mp_.max_n_mmaps);
4733 fprintf (stderr, "max mmap bytes = %10lu\n",
4734 (unsigned long) mp_.max_mmapped_mem);
4735 ((_IO_FILE *) stderr)->_flags2 |= old_flags2;
4736 _IO_funlockfile (stderr);
4741 ------------------------------ mallopt ------------------------------
4745 __libc_mallopt (int param_number, int value)
4747 mstate av = &main_arena;
4748 int res = 1;
4750 if (__malloc_initialized < 0)
4751 ptmalloc_init ();
4752 (void) mutex_lock (&av->mutex);
4753 /* Ensure initialization/consolidation */
4754 malloc_consolidate (av);
4756 LIBC_PROBE (memory_mallopt, 2, param_number, value);
4758 switch (param_number)
4760 case M_MXFAST:
4761 if (value >= 0 && value <= MAX_FAST_SIZE)
4763 LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ());
4764 set_max_fast (value);
4766 else
4767 res = 0;
4768 break;
4770 case M_TRIM_THRESHOLD:
4771 LIBC_PROBE (memory_mallopt_trim_threshold, 3, value,
4772 mp_.trim_threshold, mp_.no_dyn_threshold);
4773 mp_.trim_threshold = value;
4774 mp_.no_dyn_threshold = 1;
4775 break;
4777 case M_TOP_PAD:
4778 LIBC_PROBE (memory_mallopt_top_pad, 3, value,
4779 mp_.top_pad, mp_.no_dyn_threshold);
4780 mp_.top_pad = value;
4781 mp_.no_dyn_threshold = 1;
4782 break;
4784 case M_MMAP_THRESHOLD:
4785 /* Forbid setting the threshold too high. */
4786 if ((unsigned long) value > HEAP_MAX_SIZE / 2)
4787 res = 0;
4788 else
4790 LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value,
4791 mp_.mmap_threshold, mp_.no_dyn_threshold);
4792 mp_.mmap_threshold = value;
4793 mp_.no_dyn_threshold = 1;
4795 break;
4797 case M_MMAP_MAX:
4798 LIBC_PROBE (memory_mallopt_mmap_max, 3, value,
4799 mp_.n_mmaps_max, mp_.no_dyn_threshold);
4800 mp_.n_mmaps_max = value;
4801 mp_.no_dyn_threshold = 1;
4802 break;
4804 case M_CHECK_ACTION:
4805 LIBC_PROBE (memory_mallopt_check_action, 2, value, check_action);
4806 check_action = value;
4807 break;
4809 case M_PERTURB:
4810 LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
4811 perturb_byte = value;
4812 break;
4814 case M_ARENA_TEST:
4815 if (value > 0)
4817 LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
4818 mp_.arena_test = value;
4820 break;
4822 case M_ARENA_MAX:
4823 if (value > 0)
4825 LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
4826 mp_.arena_max = value;
4828 break;
4830 (void) mutex_unlock (&av->mutex);
4831 return res;
4833 libc_hidden_def (__libc_mallopt)
4837 -------------------- Alternative MORECORE functions --------------------
4842 General Requirements for MORECORE.
4844 The MORECORE function must have the following properties:
4846 If MORECORE_CONTIGUOUS is false:
4848 * MORECORE must allocate in multiples of pagesize. It will
4849 only be called with arguments that are multiples of pagesize.
4851 * MORECORE(0) must return an address that is at least
4852 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
4854 else (i.e. If MORECORE_CONTIGUOUS is true):
4856 * Consecutive calls to MORECORE with positive arguments
4857 return increasing addresses, indicating that space has been
4858 contiguously extended.
4860 * MORECORE need not allocate in multiples of pagesize.
4861 Calls to MORECORE need not have args of multiples of pagesize.
4863 * MORECORE need not page-align.
4865 In either case:
4867 * MORECORE may allocate more memory than requested. (Or even less,
4868 but this will generally result in a malloc failure.)
4870 * MORECORE must not allocate memory when given argument zero, but
4871 instead return one past the end address of memory from previous
4872 nonzero call. This malloc does NOT call MORECORE(0)
4873 until at least one call with positive arguments is made, so
4874 the initial value returned is not important.
4876 * Even though consecutive calls to MORECORE need not return contiguous
4877 addresses, it must be OK for malloc'ed chunks to span multiple
4878 regions in those cases where they do happen to be contiguous.
4880 * MORECORE need not handle negative arguments -- it may instead
4881 just return MORECORE_FAILURE when given negative arguments.
4882 Negative arguments are always multiples of pagesize. MORECORE
4883 must not misinterpret negative args as large positive unsigned
4884 args. You can suppress all such calls from even occurring by defining
4885 MORECORE_CANNOT_TRIM,
4887 There is some variation across systems about the type of the
4888 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
4889 actually be size_t, because sbrk supports negative args, so it is
4890 normally the signed type of the same width as size_t (sometimes
4891 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
4892 matter though. Internally, we use "long" as arguments, which should
4893 work across all reasonable possibilities.
4895 Additionally, if MORECORE ever returns failure for a positive
4896 request, then mmap is used as a noncontiguous system allocator. This
4897 is a useful backup strategy for systems with holes in address spaces
4898 -- in this case sbrk cannot contiguously expand the heap, but mmap
4899 may be able to map noncontiguous space.
4901 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
4902 a function that always returns MORECORE_FAILURE.
4904 If you are using this malloc with something other than sbrk (or its
4905 emulation) to supply memory regions, you probably want to set
4906 MORECORE_CONTIGUOUS as false. As an example, here is a custom
4907 allocator kindly contributed for pre-OSX macOS. It uses virtually
4908 but not necessarily physically contiguous non-paged memory (locked
4909 in, present and won't get swapped out). You can use it by
4910 uncommenting this section, adding some #includes, and setting up the
4911 appropriate defines above:
4913 *#define MORECORE osMoreCore
4914 *#define MORECORE_CONTIGUOUS 0
4916 There is also a shutdown routine that should somehow be called for
4917 cleanup upon program exit.
4919 *#define MAX_POOL_ENTRIES 100
4920 *#define MINIMUM_MORECORE_SIZE (64 * 1024)
4921 static int next_os_pool;
4922 void *our_os_pools[MAX_POOL_ENTRIES];
4924 void *osMoreCore(int size)
4926 void *ptr = 0;
4927 static void *sbrk_top = 0;
4929 if (size > 0)
4931 if (size < MINIMUM_MORECORE_SIZE)
4932 size = MINIMUM_MORECORE_SIZE;
4933 if (CurrentExecutionLevel() == kTaskLevel)
4934 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
4935 if (ptr == 0)
4937 return (void *) MORECORE_FAILURE;
4939 // save ptrs so they can be freed during cleanup
4940 our_os_pools[next_os_pool] = ptr;
4941 next_os_pool++;
4942 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
4943 sbrk_top = (char *) ptr + size;
4944 return ptr;
4946 else if (size < 0)
4948 // we don't currently support shrink behavior
4949 return (void *) MORECORE_FAILURE;
4951 else
4953 return sbrk_top;
4957 // cleanup any allocated memory pools
4958 // called as last thing before shutting down driver
4960 void osCleanupMem(void)
4962 void **ptr;
4964 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
4965 if (*ptr)
4967 PoolDeallocate(*ptr);
4968 * ptr = 0;
4975 /* Helper code. */
4977 extern char **__libc_argv attribute_hidden;
4979 static void
4980 malloc_printerr (int action, const char *str, void *ptr, mstate ar_ptr)
4982 /* Avoid using this arena in future. We do not attempt to synchronize this
4983 with anything else because we minimally want to ensure that __libc_message
4984 gets its resources safely without stumbling on the current corruption. */
4985 if (ar_ptr)
4986 set_arena_corrupt (ar_ptr);
4988 if ((action & 5) == 5)
4989 __libc_message (action & 2, "%s\n", str);
4990 else if (action & 1)
4992 char buf[2 * sizeof (uintptr_t) + 1];
4994 buf[sizeof (buf) - 1] = '\0';
4995 char *cp = _itoa_word ((uintptr_t) ptr, &buf[sizeof (buf) - 1], 16, 0);
4996 while (cp > buf)
4997 *--cp = '0';
4999 __libc_message (action & 2, "*** Error in `%s': %s: 0x%s ***\n",
5000 __libc_argv[0] ? : "<unknown>", str, cp);
5002 else if (action & 2)
5003 abort ();
5006 /* We need a wrapper function for one of the additions of POSIX. */
5008 __posix_memalign (void **memptr, size_t alignment, size_t size)
5010 void *mem;
5012 /* Test whether the SIZE argument is valid. It must be a power of
5013 two multiple of sizeof (void *). */
5014 if (alignment % sizeof (void *) != 0
5015 || !powerof2 (alignment / sizeof (void *))
5016 || alignment == 0)
5017 return EINVAL;
5020 void *address = RETURN_ADDRESS (0);
5021 mem = _mid_memalign (alignment, size, address);
5023 if (mem != NULL)
5025 *memptr = mem;
5026 return 0;
5029 return ENOMEM;
5031 weak_alias (__posix_memalign, posix_memalign)
5035 __malloc_info (int options, FILE *fp)
5037 /* For now, at least. */
5038 if (options != 0)
5039 return EINVAL;
5041 int n = 0;
5042 size_t total_nblocks = 0;
5043 size_t total_nfastblocks = 0;
5044 size_t total_avail = 0;
5045 size_t total_fastavail = 0;
5046 size_t total_system = 0;
5047 size_t total_max_system = 0;
5048 size_t total_aspace = 0;
5049 size_t total_aspace_mprotect = 0;
5053 if (__malloc_initialized < 0)
5054 ptmalloc_init ();
5056 fputs ("<malloc version=\"1\">\n", fp);
5058 /* Iterate over all arenas currently in use. */
5059 mstate ar_ptr = &main_arena;
5062 fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
5064 size_t nblocks = 0;
5065 size_t nfastblocks = 0;
5066 size_t avail = 0;
5067 size_t fastavail = 0;
5068 struct
5070 size_t from;
5071 size_t to;
5072 size_t total;
5073 size_t count;
5074 } sizes[NFASTBINS + NBINS - 1];
5075 #define nsizes (sizeof (sizes) / sizeof (sizes[0]))
5077 mutex_lock (&ar_ptr->mutex);
5079 for (size_t i = 0; i < NFASTBINS; ++i)
5081 mchunkptr p = fastbin (ar_ptr, i);
5082 if (p != NULL)
5084 size_t nthissize = 0;
5085 size_t thissize = chunksize (p);
5087 while (p != NULL)
5089 ++nthissize;
5090 p = p->fd;
5093 fastavail += nthissize * thissize;
5094 nfastblocks += nthissize;
5095 sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
5096 sizes[i].to = thissize;
5097 sizes[i].count = nthissize;
5099 else
5100 sizes[i].from = sizes[i].to = sizes[i].count = 0;
5102 sizes[i].total = sizes[i].count * sizes[i].to;
5106 mbinptr bin;
5107 struct malloc_chunk *r;
5109 for (size_t i = 1; i < NBINS; ++i)
5111 bin = bin_at (ar_ptr, i);
5112 r = bin->fd;
5113 sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
5114 sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
5115 = sizes[NFASTBINS - 1 + i].count = 0;
5117 if (r != NULL)
5118 while (r != bin)
5120 ++sizes[NFASTBINS - 1 + i].count;
5121 sizes[NFASTBINS - 1 + i].total += r->size;
5122 sizes[NFASTBINS - 1 + i].from
5123 = MIN (sizes[NFASTBINS - 1 + i].from, r->size);
5124 sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
5125 r->size);
5127 r = r->fd;
5130 if (sizes[NFASTBINS - 1 + i].count == 0)
5131 sizes[NFASTBINS - 1 + i].from = 0;
5132 nblocks += sizes[NFASTBINS - 1 + i].count;
5133 avail += sizes[NFASTBINS - 1 + i].total;
5136 mutex_unlock (&ar_ptr->mutex);
5138 total_nfastblocks += nfastblocks;
5139 total_fastavail += fastavail;
5141 total_nblocks += nblocks;
5142 total_avail += avail;
5144 for (size_t i = 0; i < nsizes; ++i)
5145 if (sizes[i].count != 0 && i != NFASTBINS)
5146 fprintf (fp, " \
5147 <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5148 sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
5150 if (sizes[NFASTBINS].count != 0)
5151 fprintf (fp, "\
5152 <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5153 sizes[NFASTBINS].from, sizes[NFASTBINS].to,
5154 sizes[NFASTBINS].total, sizes[NFASTBINS].count);
5156 total_system += ar_ptr->system_mem;
5157 total_max_system += ar_ptr->max_system_mem;
5159 fprintf (fp,
5160 "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5161 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5162 "<system type=\"current\" size=\"%zu\"/>\n"
5163 "<system type=\"max\" size=\"%zu\"/>\n",
5164 nfastblocks, fastavail, nblocks, avail,
5165 ar_ptr->system_mem, ar_ptr->max_system_mem);
5167 if (ar_ptr != &main_arena)
5169 heap_info *heap = heap_for_ptr (top (ar_ptr));
5170 fprintf (fp,
5171 "<aspace type=\"total\" size=\"%zu\"/>\n"
5172 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
5173 heap->size, heap->mprotect_size);
5174 total_aspace += heap->size;
5175 total_aspace_mprotect += heap->mprotect_size;
5177 else
5179 fprintf (fp,
5180 "<aspace type=\"total\" size=\"%zu\"/>\n"
5181 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
5182 ar_ptr->system_mem, ar_ptr->system_mem);
5183 total_aspace += ar_ptr->system_mem;
5184 total_aspace_mprotect += ar_ptr->system_mem;
5187 fputs ("</heap>\n", fp);
5188 ar_ptr = ar_ptr->next;
5190 while (ar_ptr != &main_arena);
5192 fprintf (fp,
5193 "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5194 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5195 "<total type=\"mmap\" count=\"%d\" size=\"%zu\"/>\n"
5196 "<system type=\"current\" size=\"%zu\"/>\n"
5197 "<system type=\"max\" size=\"%zu\"/>\n"
5198 "<aspace type=\"total\" size=\"%zu\"/>\n"
5199 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5200 "</malloc>\n",
5201 total_nfastblocks, total_fastavail, total_nblocks, total_avail,
5202 mp_.n_mmaps, mp_.mmapped_mem,
5203 total_system, total_max_system,
5204 total_aspace, total_aspace_mprotect);
5206 return 0;
5208 weak_alias (__malloc_info, malloc_info)
5211 strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
5212 strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
5213 strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
5214 strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
5215 strong_alias (__libc_memalign, __memalign)
5216 weak_alias (__libc_memalign, memalign)
5217 strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
5218 strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
5219 strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
5220 strong_alias (__libc_mallinfo, __mallinfo)
5221 weak_alias (__libc_mallinfo, mallinfo)
5222 strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
5224 weak_alias (__malloc_stats, malloc_stats)
5225 weak_alias (__malloc_usable_size, malloc_usable_size)
5226 weak_alias (__malloc_trim, malloc_trim)
5227 weak_alias (__malloc_get_state, malloc_get_state)
5228 weak_alias (__malloc_set_state, malloc_set_state)
5231 /* ------------------------------------------------------------
5232 History:
5234 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
5238 * Local variables:
5239 * c-basic-offset: 2
5240 * End: