1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 1996-2006, 2007, 2008, 2009 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If not,
19 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
23 This is a version (aka ptmalloc2) of malloc/free/realloc written by
24 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
26 There have been substantial changesmade after the integration into
27 glibc in all parts of the code. Do not look for much commonality
28 with the ptmalloc2 version.
30 * Version ptmalloc2-20011215
32 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
36 In order to compile this implementation, a Makefile is provided with
37 the ptmalloc2 distribution, which has pre-defined targets for some
38 popular systems (e.g. "make posix" for Posix threads). All that is
39 typically required with regard to compiler flags is the selection of
40 the thread package via defining one out of USE_PTHREADS, USE_THR or
41 USE_SPROC. Check the thread-m.h file for what effects this has.
42 Many/most systems will additionally require USE_TSD_DATA_HACK to be
43 defined, so this is the default for "make posix".
45 * Why use this malloc?
47 This is not the fastest, most space-conserving, most portable, or
48 most tunable malloc ever written. However it is among the fastest
49 while also being among the most space-conserving, portable and tunable.
50 Consistent balance across these factors results in a good general-purpose
51 allocator for malloc-intensive programs.
53 The main properties of the algorithms are:
54 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
55 with ties normally decided via FIFO (i.e. least recently used).
56 * For small (<= 64 bytes by default) requests, it is a caching
57 allocator, that maintains pools of quickly recycled chunks.
58 * In between, and for combinations of large and small requests, it does
59 the best it can trying to meet both goals at once.
60 * For very large requests (>= 128KB by default), it relies on system
61 memory mapping facilities, if supported.
63 For a longer but slightly out of date high-level description, see
64 http://gee.cs.oswego.edu/dl/html/malloc.html
66 You may already by default be using a C library containing a malloc
67 that is based on some version of this malloc (for example in
68 linux). You might still want to use the one in this file in order to
69 customize settings or to avoid overheads associated with library
72 * Contents, described in more detail in "description of public routines" below.
74 Standard (ANSI/SVID/...) functions:
76 calloc(size_t n_elements, size_t element_size);
78 realloc(Void_t* p, size_t n);
79 memalign(size_t alignment, size_t n);
82 mallopt(int parameter_number, int parameter_value)
85 independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]);
86 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
89 malloc_trim(size_t pad);
90 malloc_usable_size(Void_t* p);
95 Supported pointer representation: 4 or 8 bytes
96 Supported size_t representation: 4 or 8 bytes
97 Note that size_t is allowed to be 4 bytes even if pointers are 8.
98 You can adjust this by defining INTERNAL_SIZE_T
100 Alignment: 2 * sizeof(size_t) (default)
101 (i.e., 8 byte alignment with 4byte size_t). This suffices for
102 nearly all current machines and C compilers. However, you can
103 define MALLOC_ALIGNMENT to be wider than this if necessary.
105 Minimum overhead per allocated chunk: 4 or 8 bytes
106 Each malloced chunk has a hidden word of overhead holding size
107 and status information.
109 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
110 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
112 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
113 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
114 needed; 4 (8) for a trailing size field and 8 (16) bytes for
115 free list pointers. Thus, the minimum allocatable size is
118 Even a request for zero bytes (i.e., malloc(0)) returns a
119 pointer to something of the minimum allocatable size.
121 The maximum overhead wastage (i.e., number of extra bytes
122 allocated than were requested in malloc) is less than or equal
123 to the minimum size, except for requests >= mmap_threshold that
124 are serviced via mmap(), where the worst case wastage is 2 *
125 sizeof(size_t) bytes plus the remainder from a system page (the
126 minimal mmap unit); typically 4096 or 8192 bytes.
128 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
129 8-byte size_t: 2^64 minus about two pages
131 It is assumed that (possibly signed) size_t values suffice to
132 represent chunk sizes. `Possibly signed' is due to the fact
133 that `size_t' may be defined on a system as either a signed or
134 an unsigned type. The ISO C standard says that it must be
135 unsigned, but a few systems are known not to adhere to this.
136 Additionally, even when size_t is unsigned, sbrk (which is by
137 default used to obtain memory from system) accepts signed
138 arguments, and may not be able to handle size_t-wide arguments
139 with negative sign bit. Generally, values that would
140 appear as negative after accounting for overhead and alignment
141 are supported only via mmap(), which does not have this
144 Requests for sizes outside the allowed range will perform an optional
145 failure action and then return null. (Requests may also
146 also fail because a system is out of memory.)
148 Thread-safety: thread-safe unless NO_THREADS is defined
150 Compliance: I believe it is compliant with the 1997 Single Unix Specification
151 (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably
154 * Synopsis of compile-time options:
156 People have reported using previous versions of this malloc on all
157 versions of Unix, sometimes by tweaking some of the defines
158 below. It has been tested most extensively on Solaris and
159 Linux. It is also reported to work on WIN32 platforms.
160 People also report using it in stand-alone embedded systems.
162 The implementation is in straight, hand-tuned ANSI C. It is not
163 at all modular. (Sorry!) It uses a lot of macros. To be at all
164 usable, this code should be compiled using an optimizing compiler
165 (for example gcc -O3) that can simplify expressions and control
166 paths. (FAQ: some macros import variables as arguments rather than
167 declare locals because people reported that some debuggers
168 otherwise get confused.)
172 Compilation Environment options:
174 __STD_C derived from C compiler defines
177 USE_MEMCPY 1 if HAVE_MEMCPY is defined
178 HAVE_MMAP defined as 1
180 HAVE_MREMAP 0 unless linux defined
181 USE_ARENAS the same as HAVE_MMAP
182 malloc_getpagesize derived from system #includes, or 4096 if not
183 HAVE_USR_INCLUDE_MALLOC_H NOT defined
184 LACKS_UNISTD_H NOT defined unless WIN32
185 LACKS_SYS_PARAM_H NOT defined unless WIN32
186 LACKS_SYS_MMAN_H NOT defined unless WIN32
188 Changing default word sizes:
190 INTERNAL_SIZE_T size_t
191 MALLOC_ALIGNMENT MAX (2 * sizeof(INTERNAL_SIZE_T),
192 __alignof__ (long double))
194 Configuration and functionality options:
196 USE_DL_PREFIX NOT defined
197 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
198 USE_MALLOC_LOCK NOT defined
199 MALLOC_DEBUG NOT defined
200 REALLOC_ZERO_BYTES_FREES 1
201 MALLOC_FAILURE_ACTION errno = ENOMEM, if __STD_C defined, else no-op
204 Options for customizing MORECORE:
208 MORECORE_CONTIGUOUS 1
209 MORECORE_CANNOT_TRIM NOT defined
211 MMAP_AS_MORECORE_SIZE (1024 * 1024)
213 Tuning options that are also dynamically changeable via mallopt:
215 DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
216 DEFAULT_TRIM_THRESHOLD 128 * 1024
218 DEFAULT_MMAP_THRESHOLD 128 * 1024
219 DEFAULT_MMAP_MAX 65536
221 There are several other #defined constants and macros that you
222 probably don't want to touch unless you are extending or adapting malloc. */
225 __STD_C should be nonzero if using ANSI-standard C compiler, a C++
226 compiler, or a C compiler sufficiently close to ANSI to get away
231 #if defined(__STDC__) || defined(__cplusplus)
240 Void_t* is the pointer type that malloc should say it returns
244 #if (__STD_C || defined(WIN32))
252 #include <stddef.h> /* for size_t */
253 #include <stdlib.h> /* for getenv(), abort() */
255 #include <sys/types.h>
258 #include <malloc-machine.h>
261 #ifdef ATOMIC_FASTBINS
264 #include <stdio-common/_itoa.h>
265 #include <bits/wordsize.h>
266 #include <sys/sysinfo.h>
273 /* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */
275 /* #define LACKS_UNISTD_H */
277 #ifndef LACKS_UNISTD_H
281 /* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */
283 /* #define LACKS_SYS_PARAM_H */
286 #include <stdio.h> /* needed for malloc_stats */
287 #include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */
292 /* For va_arg, va_start, va_end. */
295 /* For writev and struct iovec. */
298 #include <sys/syslog.h>
300 /* For various dynamic linking things. */
307 Because freed chunks may be overwritten with bookkeeping fields, this
308 malloc will often die when freed memory is overwritten by user
309 programs. This can be very effective (albeit in an annoying way)
310 in helping track down dangling pointers.
312 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
313 enabled that will catch more memory errors. You probably won't be
314 able to make much sense of the actual assertion errors, but they
315 should help you locate incorrectly overwritten memory. The checking
316 is fairly extensive, and will slow down execution
317 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
318 will attempt to check every non-mmapped allocated and free chunk in
319 the course of computing the summmaries. (By nature, mmapped regions
320 cannot be checked very much automatically.)
322 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
323 this code. The assertions in the check routines spell out in more
324 detail the assumptions and invariants underlying the algorithms.
326 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
327 checking that all accesses to malloced memory stay within their
328 bounds. However, there are several add-ons and adaptations of this
329 or other mallocs available that do this.
333 # define assert(expr) ((void) 0)
335 # define assert(expr) \
338 : __malloc_assert (__STRING (expr), __FILE__, __LINE__, __func__))
340 extern const char *__progname
;
343 __malloc_assert (const char *assertion
, const char *file
, unsigned int line
,
344 const char *function
)
346 (void) __fxprintf (NULL
, "%s%s%s:%u: %s%sAssertion `%s' failed.\n",
347 __progname
, __progname
[0] ? ": " : "",
349 function
? function
: "", function
? ": " : "",
358 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
361 The default version is the same as size_t.
363 While not strictly necessary, it is best to define this as an
364 unsigned type, even if size_t is a signed type. This may avoid some
365 artificial size limitations on some systems.
367 On a 64-bit machine, you may be able to reduce malloc overhead by
368 defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
369 expense of not being able to handle more than 2^32 of malloced
370 space. If this limitation is acceptable, you are encouraged to set
371 this unless you are on a platform requiring 16byte alignments. In
372 this case the alignment requirements turn out to negate any
373 potential advantages of decreasing size_t word size.
375 Implementors: Beware of the possible combinations of:
376 - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
377 and might be the same width as int or as long
378 - size_t might have different width and signedness as INTERNAL_SIZE_T
379 - int and long might be 32 or 64 bits, and might be the same width
380 To deal with this, most comparisons and difference computations
381 among INTERNAL_SIZE_Ts should cast them to unsigned long, being
382 aware of the fact that casting an unsigned int to a wider long does
383 not sign-extend. (This also makes checking for negative numbers
384 awkward.) Some of these casts result in harmless compiler warnings
388 #ifndef INTERNAL_SIZE_T
389 #define INTERNAL_SIZE_T size_t
392 /* The corresponding word size */
393 #define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
397 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
398 It must be a power of two at least 2 * SIZE_SZ, even on machines
399 for which smaller alignments would suffice. It may be defined as
400 larger than this though. Note however that code and data structures
401 are optimized for the case of 8-byte alignment.
405 #ifndef MALLOC_ALIGNMENT
406 /* XXX This is the correct definition. It differs from 2*SIZE_SZ only on
407 powerpc32. For the time being, changing this is causing more
408 compatibility problems due to malloc_get_state/malloc_set_state than
409 will returning blocks not adequately aligned for long double objects
410 under -mlong-double-128.
412 #define MALLOC_ALIGNMENT (2 * SIZE_SZ < __alignof__ (long double) \
413 ? __alignof__ (long double) : 2 * SIZE_SZ)
415 #define MALLOC_ALIGNMENT (2 * SIZE_SZ)
418 /* The corresponding bit mask value */
419 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
424 REALLOC_ZERO_BYTES_FREES should be set if a call to
425 realloc with zero bytes should be the same as a call to free.
426 This is required by the C standard. Otherwise, since this malloc
427 returns a unique pointer for malloc(0), so does realloc(p, 0).
430 #ifndef REALLOC_ZERO_BYTES_FREES
431 #define REALLOC_ZERO_BYTES_FREES 1
435 TRIM_FASTBINS controls whether free() of a very small chunk can
436 immediately lead to trimming. Setting to true (1) can reduce memory
437 footprint, but will almost always slow down programs that use a lot
440 Define this only if you are willing to give up some speed to more
441 aggressively reduce system-level memory footprint when releasing
442 memory in programs that use many small chunks. You can get
443 essentially the same effect by setting MXFAST to 0, but this can
444 lead to even greater slowdowns in programs using many small chunks.
445 TRIM_FASTBINS is an in-between compile-time option, that disables
446 only those chunks bordering topmost memory from being placed in
450 #ifndef TRIM_FASTBINS
451 #define TRIM_FASTBINS 0
456 USE_DL_PREFIX will prefix all public routines with the string 'dl'.
457 This is necessary when you only want to use this malloc in one part
458 of a program, using your regular system malloc elsewhere.
461 /* #define USE_DL_PREFIX */
465 Two-phase name translation.
466 All of the actual routines are given mangled names.
467 When wrappers are used, they become the public callable versions.
468 When DL_PREFIX is used, the callable names are prefixed.
472 #define public_cALLOc dlcalloc
473 #define public_fREe dlfree
474 #define public_cFREe dlcfree
475 #define public_mALLOc dlmalloc
476 #define public_mEMALIGn dlmemalign
477 #define public_rEALLOc dlrealloc
478 #define public_vALLOc dlvalloc
479 #define public_pVALLOc dlpvalloc
480 #define public_mALLINFo dlmallinfo
481 #define public_mALLOPt dlmallopt
482 #define public_mTRIm dlmalloc_trim
483 #define public_mSTATs dlmalloc_stats
484 #define public_mUSABLe dlmalloc_usable_size
485 #define public_iCALLOc dlindependent_calloc
486 #define public_iCOMALLOc dlindependent_comalloc
487 #define public_gET_STATe dlget_state
488 #define public_sET_STATe dlset_state
489 #else /* USE_DL_PREFIX */
492 /* Special defines for the GNU C library. */
493 #define public_cALLOc __libc_calloc
494 #define public_fREe __libc_free
495 #define public_cFREe __libc_cfree
496 #define public_mALLOc __libc_malloc
497 #define public_mEMALIGn __libc_memalign
498 #define public_rEALLOc __libc_realloc
499 #define public_vALLOc __libc_valloc
500 #define public_pVALLOc __libc_pvalloc
501 #define public_mALLINFo __libc_mallinfo
502 #define public_mALLOPt __libc_mallopt
503 #define public_mTRIm __malloc_trim
504 #define public_mSTATs __malloc_stats
505 #define public_mUSABLe __malloc_usable_size
506 #define public_iCALLOc __libc_independent_calloc
507 #define public_iCOMALLOc __libc_independent_comalloc
508 #define public_gET_STATe __malloc_get_state
509 #define public_sET_STATe __malloc_set_state
510 #define malloc_getpagesize __getpagesize()
513 #define munmap __munmap
514 #define mremap __mremap
515 #define mprotect __mprotect
516 #define MORECORE (*__morecore)
517 #define MORECORE_FAILURE 0
519 Void_t
* __default_morecore (ptrdiff_t);
520 Void_t
*(*__morecore
)(ptrdiff_t) = __default_morecore
;
523 #define public_cALLOc calloc
524 #define public_fREe free
525 #define public_cFREe cfree
526 #define public_mALLOc malloc
527 #define public_mEMALIGn memalign
528 #define public_rEALLOc realloc
529 #define public_vALLOc valloc
530 #define public_pVALLOc pvalloc
531 #define public_mALLINFo mallinfo
532 #define public_mALLOPt mallopt
533 #define public_mTRIm malloc_trim
534 #define public_mSTATs malloc_stats
535 #define public_mUSABLe malloc_usable_size
536 #define public_iCALLOc independent_calloc
537 #define public_iCOMALLOc independent_comalloc
538 #define public_gET_STATe malloc_get_state
539 #define public_sET_STATe malloc_set_state
541 #endif /* USE_DL_PREFIX */
544 #define __builtin_expect(expr, val) (expr)
546 #define fwrite(buf, size, count, fp) _IO_fwrite (buf, size, count, fp)
550 HAVE_MEMCPY should be defined if you are not otherwise using
551 ANSI STD C, but still have memcpy and memset in your C library
552 and want to use them in calloc and realloc. Otherwise simple
553 macro versions are defined below.
555 USE_MEMCPY should be defined as 1 if you actually want to
556 have memset and memcpy called. People report that the macro
557 versions are faster than libc versions on some systems.
559 Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
560 (of <= 36 bytes) are manually unrolled in realloc and calloc.
574 #if (__STD_C || defined(HAVE_MEMCPY))
580 /* On Win32 memset and memcpy are already declared in windows.h */
583 void* memset(void*, int, size_t);
584 void* memcpy(void*, const void*, size_t);
594 /* Force a value to be in a register and stop the compiler referring
595 to the source (mostly memory location) again. */
596 #define force_reg(val) \
597 ({ __typeof (val) _v; asm ("" : "=r" (_v) : "0" (val)); _v; })
601 MALLOC_FAILURE_ACTION is the action to take before "return 0" when
602 malloc fails to be able to return memory, either because memory is
603 exhausted or because of illegal arguments.
605 By default, sets errno if running on STD_C platform, else does nothing.
608 #ifndef MALLOC_FAILURE_ACTION
610 #define MALLOC_FAILURE_ACTION \
614 #define MALLOC_FAILURE_ACTION
619 MORECORE-related declarations. By default, rely on sbrk
623 #ifdef LACKS_UNISTD_H
624 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
626 extern Void_t
* sbrk(ptrdiff_t);
628 extern Void_t
* sbrk();
634 MORECORE is the name of the routine to call to obtain more memory
635 from the system. See below for general guidance on writing
636 alternative MORECORE functions, as well as a version for WIN32 and a
637 sample version for pre-OSX macos.
641 #define MORECORE sbrk
645 MORECORE_FAILURE is the value returned upon failure of MORECORE
646 as well as mmap. Since it cannot be an otherwise valid memory address,
647 and must reflect values of standard sys calls, you probably ought not
651 #ifndef MORECORE_FAILURE
652 #define MORECORE_FAILURE (-1)
656 If MORECORE_CONTIGUOUS is true, take advantage of fact that
657 consecutive calls to MORECORE with positive arguments always return
658 contiguous increasing addresses. This is true of unix sbrk. Even
659 if not defined, when regions happen to be contiguous, malloc will
660 permit allocations spanning regions obtained from different
661 calls. But defining this when applicable enables some stronger
662 consistency checks and space efficiencies.
665 #ifndef MORECORE_CONTIGUOUS
666 #define MORECORE_CONTIGUOUS 1
670 Define MORECORE_CANNOT_TRIM if your version of MORECORE
671 cannot release space back to the system when given negative
672 arguments. This is generally necessary only if you are using
673 a hand-crafted MORECORE function that cannot handle negative arguments.
676 /* #define MORECORE_CANNOT_TRIM */
678 /* MORECORE_CLEARS (default 1)
679 The degree to which the routine mapped to MORECORE zeroes out
680 memory: never (0), only for newly allocated space (1) or always
681 (2). The distinction between (1) and (2) is necessary because on
682 some systems, if the application first decrements and then
683 increments the break value, the contents of the reallocated space
687 #ifndef MORECORE_CLEARS
688 #define MORECORE_CLEARS 1
693 Define HAVE_MMAP as true to optionally make malloc() use mmap() to
694 allocate very large blocks. These will be returned to the
695 operating system immediately after a free(). Also, if mmap
696 is available, it is used as a backup strategy in cases where
697 MORECORE fails to provide space from system.
699 This malloc is best tuned to work with mmap for large requests.
700 If you do not have mmap, operations involving very large chunks (1MB
701 or so) may be slower than you'd like.
708 Standard unix mmap using /dev/zero clears memory so calloc doesn't
713 #define MMAP_CLEARS 1
718 #define MMAP_CLEARS 0
724 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
725 sbrk fails, and mmap is used as a backup (which is done only if
726 HAVE_MMAP). The value must be a multiple of page size. This
727 backup strategy generally applies only when systems have "holes" in
728 address space, so sbrk cannot perform contiguous expansion, but
729 there is still space available on system. On systems for which
730 this is known to be useful (i.e. most linux kernels), this occurs
731 only when programs allocate huge amounts of memory. Between this,
732 and the fact that mmap regions tend to be limited, the size should
733 be large, to avoid too many mmap calls and thus avoid running out
737 #ifndef MMAP_AS_MORECORE_SIZE
738 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
742 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
743 large blocks. This is currently only possible on Linux with
744 kernel versions newer than 1.3.77.
749 #define HAVE_MREMAP 1
751 #define HAVE_MREMAP 0
754 #endif /* HAVE_MMAP */
756 /* Define USE_ARENAS to enable support for multiple `arenas'. These
757 are allocated using mmap(), are necessary for threads and
758 occasionally useful to overcome address space limitations affecting
762 #define USE_ARENAS HAVE_MMAP
767 The system page size. To the extent possible, this malloc manages
768 memory from the system in page-size units. Note that this value is
769 cached during initialization into a field of malloc_state. So even
770 if malloc_getpagesize is a function, it is only called once.
772 The following mechanics for getpagesize were adapted from bsd/gnu
773 getpagesize.h. If none of the system-probes here apply, a value of
774 4096 is used, which should be OK: If they don't apply, then using
775 the actual value probably doesn't impact performance.
779 #ifndef malloc_getpagesize
781 #ifndef LACKS_UNISTD_H
785 # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
786 # ifndef _SC_PAGE_SIZE
787 # define _SC_PAGE_SIZE _SC_PAGESIZE
791 # ifdef _SC_PAGE_SIZE
792 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
794 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
795 extern size_t getpagesize();
796 # define malloc_getpagesize getpagesize()
798 # ifdef WIN32 /* use supplied emulation of getpagesize */
799 # define malloc_getpagesize getpagesize()
801 # ifndef LACKS_SYS_PARAM_H
802 # include <sys/param.h>
804 # ifdef EXEC_PAGESIZE
805 # define malloc_getpagesize EXEC_PAGESIZE
809 # define malloc_getpagesize NBPG
811 # define malloc_getpagesize (NBPG * CLSIZE)
815 # define malloc_getpagesize NBPC
818 # define malloc_getpagesize PAGESIZE
819 # else /* just guess */
820 # define malloc_getpagesize (4096)
831 This version of malloc supports the standard SVID/XPG mallinfo
832 routine that returns a struct containing usage properties and
833 statistics. It should work on any SVID/XPG compliant system that has
834 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
835 install such a thing yourself, cut out the preliminary declarations
836 as described above and below and save them in a malloc.h file. But
837 there's no compelling reason to bother to do this.)
839 The main declaration needed is the mallinfo struct that is returned
840 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
841 bunch of fields that are not even meaningful in this version of
842 malloc. These fields are are instead filled by mallinfo() with
843 other numbers that might be of interest.
845 HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
846 /usr/include/malloc.h file that includes a declaration of struct
847 mallinfo. If so, it is included; else an SVID2/XPG2 compliant
848 version is declared below. These must be precisely the same for
849 mallinfo() to work. The original SVID version of this struct,
850 defined on most systems with mallinfo, declares all fields as
851 ints. But some others define as unsigned long. If your system
852 defines the fields using a type of different width than listed here,
853 you must #include your system version and #define
854 HAVE_USR_INCLUDE_MALLOC_H.
857 /* #define HAVE_USR_INCLUDE_MALLOC_H */
859 #ifdef HAVE_USR_INCLUDE_MALLOC_H
860 #include "/usr/include/malloc.h"
864 /* ---------- description of public routines ------------ */
868 Returns a pointer to a newly allocated chunk of at least n bytes, or null
869 if no space is available. Additionally, on failure, errno is
870 set to ENOMEM on ANSI C systems.
872 If n is zero, malloc returns a minumum-sized chunk. (The minimum
873 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
874 systems.) On most systems, size_t is an unsigned type, so calls
875 with negative arguments are interpreted as requests for huge amounts
876 of space, which will often fail. The maximum supported value of n
877 differs across systems, but is in all cases less than the maximum
878 representable value of a size_t.
881 Void_t
* public_mALLOc(size_t);
883 Void_t
* public_mALLOc();
885 #ifdef libc_hidden_proto
886 libc_hidden_proto (public_mALLOc
)
891 Releases the chunk of memory pointed to by p, that had been previously
892 allocated using malloc or a related routine such as realloc.
893 It has no effect if p is null. It can have arbitrary (i.e., bad!)
894 effects if p has already been freed.
896 Unless disabled (using mallopt), freeing very large spaces will
897 when possible, automatically trigger operations that give
898 back unused memory to the system, thus reducing program footprint.
901 void public_fREe(Void_t
*);
905 #ifdef libc_hidden_proto
906 libc_hidden_proto (public_fREe
)
910 calloc(size_t n_elements, size_t element_size);
911 Returns a pointer to n_elements * element_size bytes, with all locations
915 Void_t
* public_cALLOc(size_t, size_t);
917 Void_t
* public_cALLOc();
921 realloc(Void_t* p, size_t n)
922 Returns a pointer to a chunk of size n that contains the same data
923 as does chunk p up to the minimum of (n, p's size) bytes, or null
924 if no space is available.
926 The returned pointer may or may not be the same as p. The algorithm
927 prefers extending p when possible, otherwise it employs the
928 equivalent of a malloc-copy-free sequence.
930 If p is null, realloc is equivalent to malloc.
932 If space is not available, realloc returns null, errno is set (if on
933 ANSI) and p is NOT freed.
935 if n is for fewer bytes than already held by p, the newly unused
936 space is lopped off and freed if possible. Unless the #define
937 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
938 zero (re)allocates a minimum-sized chunk.
940 Large chunks that were internally obtained via mmap will always
941 be reallocated using malloc-copy-free sequences unless
942 the system supports MREMAP (currently only linux).
944 The old unix realloc convention of allowing the last-free'd chunk
945 to be used as an argument to realloc is not supported.
948 Void_t
* public_rEALLOc(Void_t
*, size_t);
950 Void_t
* public_rEALLOc();
952 #ifdef libc_hidden_proto
953 libc_hidden_proto (public_rEALLOc
)
957 memalign(size_t alignment, size_t n);
958 Returns a pointer to a newly allocated chunk of n bytes, aligned
959 in accord with the alignment argument.
961 The alignment argument should be a power of two. If the argument is
962 not a power of two, the nearest greater power is used.
963 8-byte alignment is guaranteed by normal malloc calls, so don't
964 bother calling memalign with an argument of 8 or less.
966 Overreliance on memalign is a sure way to fragment space.
969 Void_t
* public_mEMALIGn(size_t, size_t);
971 Void_t
* public_mEMALIGn();
973 #ifdef libc_hidden_proto
974 libc_hidden_proto (public_mEMALIGn
)
979 Equivalent to memalign(pagesize, n), where pagesize is the page
980 size of the system. If the pagesize is unknown, 4096 is used.
983 Void_t
* public_vALLOc(size_t);
985 Void_t
* public_vALLOc();
991 mallopt(int parameter_number, int parameter_value)
992 Sets tunable parameters The format is to provide a
993 (parameter-number, parameter-value) pair. mallopt then sets the
994 corresponding parameter to the argument value if it can (i.e., so
995 long as the value is meaningful), and returns 1 if successful else
996 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
997 normally defined in malloc.h. Only one of these (M_MXFAST) is used
998 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
999 so setting them has no effect. But this malloc also supports four
1000 other options in mallopt. See below for details. Briefly, supported
1001 parameters are as follows (listed defaults are for "typical"
1004 Symbol param # default allowed param values
1005 M_MXFAST 1 64 0-80 (0 disables fastbins)
1006 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
1008 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
1009 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
1012 int public_mALLOPt(int, int);
1014 int public_mALLOPt();
1020 Returns (by copy) a struct containing various summary statistics:
1022 arena: current total non-mmapped bytes allocated from system
1023 ordblks: the number of free chunks
1024 smblks: the number of fastbin blocks (i.e., small chunks that
1025 have been freed but not use resused or consolidated)
1026 hblks: current number of mmapped regions
1027 hblkhd: total bytes held in mmapped regions
1028 usmblks: the maximum total allocated space. This will be greater
1029 than current total if trimming has occurred.
1030 fsmblks: total bytes held in fastbin blocks
1031 uordblks: current total allocated space (normal or mmapped)
1032 fordblks: total free space
1033 keepcost: the maximum number of bytes that could ideally be released
1034 back to system via malloc_trim. ("ideally" means that
1035 it ignores page restrictions etc.)
1037 Because these fields are ints, but internal bookkeeping may
1038 be kept as longs, the reported values may wrap around zero and
1042 struct mallinfo
public_mALLINFo(void);
1044 struct mallinfo
public_mALLINFo();
1049 independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]);
1051 independent_calloc is similar to calloc, but instead of returning a
1052 single cleared space, it returns an array of pointers to n_elements
1053 independent elements that can hold contents of size elem_size, each
1054 of which starts out cleared, and can be independently freed,
1055 realloc'ed etc. The elements are guaranteed to be adjacently
1056 allocated (this is not guaranteed to occur with multiple callocs or
1057 mallocs), which may also improve cache locality in some
1060 The "chunks" argument is optional (i.e., may be null, which is
1061 probably the most typical usage). If it is null, the returned array
1062 is itself dynamically allocated and should also be freed when it is
1063 no longer needed. Otherwise, the chunks array must be of at least
1064 n_elements in length. It is filled in with the pointers to the
1067 In either case, independent_calloc returns this pointer array, or
1068 null if the allocation failed. If n_elements is zero and "chunks"
1069 is null, it returns a chunk representing an array with zero elements
1070 (which should be freed if not wanted).
1072 Each element must be individually freed when it is no longer
1073 needed. If you'd like to instead be able to free all at once, you
1074 should instead use regular calloc and assign pointers into this
1075 space to represent elements. (In this case though, you cannot
1076 independently free elements.)
1078 independent_calloc simplifies and speeds up implementations of many
1079 kinds of pools. It may also be useful when constructing large data
1080 structures that initially have a fixed number of fixed-sized nodes,
1081 but the number is not known at compile time, and some of the nodes
1082 may later need to be freed. For example:
1084 struct Node { int item; struct Node* next; };
1086 struct Node* build_list() {
1088 int n = read_number_of_nodes_needed();
1089 if (n <= 0) return 0;
1090 pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
1091 if (pool == 0) die();
1092 // organize into a linked list...
1093 struct Node* first = pool[0];
1094 for (i = 0; i < n-1; ++i)
1095 pool[i]->next = pool[i+1];
1096 free(pool); // Can now free the array (or not, if it is needed later)
1101 Void_t
** public_iCALLOc(size_t, size_t, Void_t
**);
1103 Void_t
** public_iCALLOc();
1107 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
1109 independent_comalloc allocates, all at once, a set of n_elements
1110 chunks with sizes indicated in the "sizes" array. It returns
1111 an array of pointers to these elements, each of which can be
1112 independently freed, realloc'ed etc. The elements are guaranteed to
1113 be adjacently allocated (this is not guaranteed to occur with
1114 multiple callocs or mallocs), which may also improve cache locality
1115 in some applications.
1117 The "chunks" argument is optional (i.e., may be null). If it is null
1118 the returned array is itself dynamically allocated and should also
1119 be freed when it is no longer needed. Otherwise, the chunks array
1120 must be of at least n_elements in length. It is filled in with the
1121 pointers to the chunks.
1123 In either case, independent_comalloc returns this pointer array, or
1124 null if the allocation failed. If n_elements is zero and chunks is
1125 null, it returns a chunk representing an array with zero elements
1126 (which should be freed if not wanted).
1128 Each element must be individually freed when it is no longer
1129 needed. If you'd like to instead be able to free all at once, you
1130 should instead use a single regular malloc, and assign pointers at
1131 particular offsets in the aggregate space. (In this case though, you
1132 cannot independently free elements.)
1134 independent_comallac differs from independent_calloc in that each
1135 element may have a different size, and also that it does not
1136 automatically clear elements.
1138 independent_comalloc can be used to speed up allocation in cases
1139 where several structs or objects must always be allocated at the
1140 same time. For example:
1145 void send_message(char* msg) {
1146 int msglen = strlen(msg);
1147 size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
1149 if (independent_comalloc(3, sizes, chunks) == 0)
1151 struct Head* head = (struct Head*)(chunks[0]);
1152 char* body = (char*)(chunks[1]);
1153 struct Foot* foot = (struct Foot*)(chunks[2]);
1157 In general though, independent_comalloc is worth using only for
1158 larger values of n_elements. For small values, you probably won't
1159 detect enough difference from series of malloc calls to bother.
1161 Overuse of independent_comalloc can increase overall memory usage,
1162 since it cannot reuse existing noncontiguous small chunks that
1163 might be available for some of the elements.
1166 Void_t
** public_iCOMALLOc(size_t, size_t*, Void_t
**);
1168 Void_t
** public_iCOMALLOc();
1176 Equivalent to valloc(minimum-page-that-holds(n)), that is,
1177 round up n to nearest pagesize.
1180 Void_t
* public_pVALLOc(size_t);
1182 Void_t
* public_pVALLOc();
1187 Equivalent to free(p).
1189 cfree is needed/defined on some systems that pair it with calloc,
1190 for odd historical reasons (such as: cfree is used in example
1191 code in the first edition of K&R).
1194 void public_cFREe(Void_t
*);
1196 void public_cFREe();
1200 malloc_trim(size_t pad);
1202 If possible, gives memory back to the system (via negative
1203 arguments to sbrk) if there is unused memory at the `high' end of
1204 the malloc pool. You can call this after freeing large blocks of
1205 memory to potentially reduce the system-level memory requirements
1206 of a program. However, it cannot guarantee to reduce memory. Under
1207 some allocation patterns, some large free blocks of memory will be
1208 locked between two used chunks, so they cannot be given back to
1211 The `pad' argument to malloc_trim represents the amount of free
1212 trailing space to leave untrimmed. If this argument is zero,
1213 only the minimum amount of memory to maintain internal data
1214 structures will be left (one page or less). Non-zero arguments
1215 can be supplied to maintain enough trailing space to service
1216 future expected allocations without having to re-obtain memory
1219 Malloc_trim returns 1 if it actually released any memory, else 0.
1220 On systems that do not support "negative sbrks", it will always
1224 int public_mTRIm(size_t);
1230 malloc_usable_size(Void_t* p);
1232 Returns the number of bytes you can actually use in
1233 an allocated chunk, which may be more than you requested (although
1234 often not) due to alignment and minimum size constraints.
1235 You can use this many bytes without worrying about
1236 overwriting other allocated objects. This is not a particularly great
1237 programming practice. malloc_usable_size can be more useful in
1238 debugging and assertions, for example:
1241 assert(malloc_usable_size(p) >= 256);
1245 size_t public_mUSABLe(Void_t
*);
1247 size_t public_mUSABLe();
1252 Prints on stderr the amount of space obtained from the system (both
1253 via sbrk and mmap), the maximum amount (which may be more than
1254 current if malloc_trim and/or munmap got called), and the current
1255 number of bytes allocated via malloc (or realloc, etc) but not yet
1256 freed. Note that this is the number of bytes allocated, not the
1257 number requested. It will be larger than the number requested
1258 because of alignment and bookkeeping overhead. Because it includes
1259 alignment wastage as being in use, this figure may be greater than
1260 zero even when no user-level chunks are allocated.
1262 The reported current and maximum system memory can be inaccurate if
1263 a program makes other calls to system memory allocation functions
1264 (normally sbrk) outside of malloc.
1266 malloc_stats prints only the most commonly interesting statistics.
1267 More information can be obtained by calling mallinfo.
1271 void public_mSTATs(void);
1273 void public_mSTATs();
1277 malloc_get_state(void);
1279 Returns the state of all malloc variables in an opaque data
1283 Void_t
* public_gET_STATe(void);
1285 Void_t
* public_gET_STATe();
1289 malloc_set_state(Void_t* state);
1291 Restore the state of all malloc variables from data obtained with
1295 int public_sET_STATe(Void_t
*);
1297 int public_sET_STATe();
1302 posix_memalign(void **memptr, size_t alignment, size_t size);
1304 POSIX wrapper like memalign(), checking for validity of size.
1306 int __posix_memalign(void **, size_t, size_t);
1309 /* mallopt tuning options */
1312 M_MXFAST is the maximum request size used for "fastbins", special bins
1313 that hold returned chunks without consolidating their spaces. This
1314 enables future requests for chunks of the same size to be handled
1315 very quickly, but can increase fragmentation, and thus increase the
1316 overall memory footprint of a program.
1318 This malloc manages fastbins very conservatively yet still
1319 efficiently, so fragmentation is rarely a problem for values less
1320 than or equal to the default. The maximum supported value of MXFAST
1321 is 80. You wouldn't want it any higher than this anyway. Fastbins
1322 are designed especially for use with many small structs, objects or
1323 strings -- the default handles structs/objects/arrays with sizes up
1324 to 8 4byte fields, or small strings representing words, tokens,
1325 etc. Using fastbins for larger objects normally worsens
1326 fragmentation without improving speed.
1328 M_MXFAST is set in REQUEST size units. It is internally used in
1329 chunksize units, which adds padding and alignment. You can reduce
1330 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
1331 algorithm to be a closer approximation of fifo-best-fit in all cases,
1332 not just for larger requests, but will generally cause it to be
1337 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
1342 #ifndef DEFAULT_MXFAST
1343 #define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
1348 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
1349 to keep before releasing via malloc_trim in free().
1351 Automatic trimming is mainly useful in long-lived programs.
1352 Because trimming via sbrk can be slow on some systems, and can
1353 sometimes be wasteful (in cases where programs immediately
1354 afterward allocate more large chunks) the value should be high
1355 enough so that your overall system performance would improve by
1356 releasing this much memory.
1358 The trim threshold and the mmap control parameters (see below)
1359 can be traded off with one another. Trimming and mmapping are
1360 two different ways of releasing unused memory back to the
1361 system. Between these two, it is often possible to keep
1362 system-level demands of a long-lived program down to a bare
1363 minimum. For example, in one test suite of sessions measuring
1364 the XF86 X server on Linux, using a trim threshold of 128K and a
1365 mmap threshold of 192K led to near-minimal long term resource
1368 If you are using this malloc in a long-lived program, it should
1369 pay to experiment with these values. As a rough guide, you
1370 might set to a value close to the average size of a process
1371 (program) running on your system. Releasing this much memory
1372 would allow such a process to run in memory. Generally, it's
1373 worth it to tune for trimming rather tham memory mapping when a
1374 program undergoes phases where several large chunks are
1375 allocated and released in ways that can reuse each other's
1376 storage, perhaps mixed with phases where there are no such
1377 chunks at all. And in well-behaved long-lived programs,
1378 controlling release of large blocks via trimming versus mapping
1381 However, in most programs, these parameters serve mainly as
1382 protection against the system-level effects of carrying around
1383 massive amounts of unneeded memory. Since frequent calls to
1384 sbrk, mmap, and munmap otherwise degrade performance, the default
1385 parameters are set to relatively high values that serve only as
1388 The trim value It must be greater than page size to have any useful
1389 effect. To disable trimming completely, you can set to
1392 Trim settings interact with fastbin (MXFAST) settings: Unless
1393 TRIM_FASTBINS is defined, automatic trimming never takes place upon
1394 freeing a chunk with size less than or equal to MXFAST. Trimming is
1395 instead delayed until subsequent freeing of larger chunks. However,
1396 you can still force an attempted trim by calling malloc_trim.
1398 Also, trimming is not generally possible in cases where
1399 the main arena is obtained via mmap.
1401 Note that the trick some people use of mallocing a huge space and
1402 then freeing it at program startup, in an attempt to reserve system
1403 memory, doesn't have the intended effect under automatic trimming,
1404 since that memory will immediately be returned to the system.
1407 #define M_TRIM_THRESHOLD -1
1409 #ifndef DEFAULT_TRIM_THRESHOLD
1410 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
1414 M_TOP_PAD is the amount of extra `padding' space to allocate or
1415 retain whenever sbrk is called. It is used in two ways internally:
1417 * When sbrk is called to extend the top of the arena to satisfy
1418 a new malloc request, this much padding is added to the sbrk
1421 * When malloc_trim is called automatically from free(),
1422 it is used as the `pad' argument.
1424 In both cases, the actual amount of padding is rounded
1425 so that the end of the arena is always a system page boundary.
1427 The main reason for using padding is to avoid calling sbrk so
1428 often. Having even a small pad greatly reduces the likelihood
1429 that nearly every malloc request during program start-up (or
1430 after trimming) will invoke sbrk, which needlessly wastes
1433 Automatic rounding-up to page-size units is normally sufficient
1434 to avoid measurable overhead, so the default is 0. However, in
1435 systems where sbrk is relatively slow, it can pay to increase
1436 this value, at the expense of carrying around more memory than
1440 #define M_TOP_PAD -2
1442 #ifndef DEFAULT_TOP_PAD
1443 #define DEFAULT_TOP_PAD (0)
1447 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
1448 adjusted MMAP_THRESHOLD.
1451 #ifndef DEFAULT_MMAP_THRESHOLD_MIN
1452 #define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
1455 #ifndef DEFAULT_MMAP_THRESHOLD_MAX
1456 /* For 32-bit platforms we cannot increase the maximum mmap
1457 threshold much because it is also the minimum value for the
1458 maximum heap size and its alignment. Going above 512k (i.e., 1M
1459 for new heaps) wastes too much address space. */
1460 # if __WORDSIZE == 32
1461 # define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
1463 # define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
1468 M_MMAP_THRESHOLD is the request size threshold for using mmap()
1469 to service a request. Requests of at least this size that cannot
1470 be allocated using already-existing space will be serviced via mmap.
1471 (If enough normal freed space already exists it is used instead.)
1473 Using mmap segregates relatively large chunks of memory so that
1474 they can be individually obtained and released from the host
1475 system. A request serviced through mmap is never reused by any
1476 other request (at least not directly; the system may just so
1477 happen to remap successive requests to the same locations).
1479 Segregating space in this way has the benefits that:
1481 1. Mmapped space can ALWAYS be individually released back
1482 to the system, which helps keep the system level memory
1483 demands of a long-lived program low.
1484 2. Mapped memory can never become `locked' between
1485 other chunks, as can happen with normally allocated chunks, which
1486 means that even trimming via malloc_trim would not release them.
1487 3. On some systems with "holes" in address spaces, mmap can obtain
1488 memory that sbrk cannot.
1490 However, it has the disadvantages that:
1492 1. The space cannot be reclaimed, consolidated, and then
1493 used to service later requests, as happens with normal chunks.
1494 2. It can lead to more wastage because of mmap page alignment
1496 3. It causes malloc performance to be more dependent on host
1497 system memory management support routines which may vary in
1498 implementation quality and may impose arbitrary
1499 limitations. Generally, servicing a request via normal
1500 malloc steps is faster than going through a system's mmap.
1502 The advantages of mmap nearly always outweigh disadvantages for
1503 "large" chunks, but the value of "large" varies across systems. The
1504 default is an empirically derived value that works well in most
1509 The above was written in 2001. Since then the world has changed a lot.
1510 Memory got bigger. Applications got bigger. The virtual address space
1511 layout in 32 bit linux changed.
1513 In the new situation, brk() and mmap space is shared and there are no
1514 artificial limits on brk size imposed by the kernel. What is more,
1515 applications have started using transient allocations larger than the
1516 128Kb as was imagined in 2001.
1518 The price for mmap is also high now; each time glibc mmaps from the
1519 kernel, the kernel is forced to zero out the memory it gives to the
1520 application. Zeroing memory is expensive and eats a lot of cache and
1521 memory bandwidth. This has nothing to do with the efficiency of the
1522 virtual memory system, by doing mmap the kernel just has no choice but
1525 In 2001, the kernel had a maximum size for brk() which was about 800
1526 megabytes on 32 bit x86, at that point brk() would hit the first
1527 mmaped shared libaries and couldn't expand anymore. With current 2.6
1528 kernels, the VA space layout is different and brk() and mmap
1529 both can span the entire heap at will.
1531 Rather than using a static threshold for the brk/mmap tradeoff,
1532 we are now using a simple dynamic one. The goal is still to avoid
1533 fragmentation. The old goals we kept are
1534 1) try to get the long lived large allocations to use mmap()
1535 2) really large allocations should always use mmap()
1536 and we're adding now:
1537 3) transient allocations should use brk() to avoid forcing the kernel
1538 having to zero memory over and over again
1540 The implementation works with a sliding threshold, which is by default
1541 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
1542 out at 128Kb as per the 2001 default.
1544 This allows us to satisfy requirement 1) under the assumption that long
1545 lived allocations are made early in the process' lifespan, before it has
1546 started doing dynamic allocations of the same size (which will
1547 increase the threshold).
1549 The upperbound on the threshold satisfies requirement 2)
1551 The threshold goes up in value when the application frees memory that was
1552 allocated with the mmap allocator. The idea is that once the application
1553 starts freeing memory of a certain size, it's highly probable that this is
1554 a size the application uses for transient allocations. This estimator
1555 is there to satisfy the new third requirement.
1559 #define M_MMAP_THRESHOLD -3
1561 #ifndef DEFAULT_MMAP_THRESHOLD
1562 #define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
1566 M_MMAP_MAX is the maximum number of requests to simultaneously
1567 service using mmap. This parameter exists because
1568 some systems have a limited number of internal tables for
1569 use by mmap, and using more than a few of them may degrade
1572 The default is set to a value that serves only as a safeguard.
1573 Setting to 0 disables use of mmap for servicing large requests. If
1574 HAVE_MMAP is not set, the default value is 0, and attempts to set it
1575 to non-zero values in mallopt will fail.
1578 #define M_MMAP_MAX -4
1580 #ifndef DEFAULT_MMAP_MAX
1582 #define DEFAULT_MMAP_MAX (65536)
1584 #define DEFAULT_MMAP_MAX (0)
1589 } /* end of extern "C" */
1595 #define BOUNDED_N(ptr, sz) (ptr)
1597 #ifndef RETURN_ADDRESS
1598 #define RETURN_ADDRESS(X_) (NULL)
1601 /* On some platforms we can compile internal, not exported functions better.
1602 Let the environment provide a macro and define it to be empty if it
1603 is not available. */
1604 #ifndef internal_function
1605 # define internal_function
1608 /* Forward declarations. */
1609 struct malloc_chunk
;
1610 typedef struct malloc_chunk
* mchunkptr
;
1612 /* Internal routines. */
1616 static Void_t
* _int_malloc(mstate
, size_t);
1617 #ifdef ATOMIC_FASTBINS
1618 static void _int_free(mstate
, mchunkptr
, int);
1620 static void _int_free(mstate
, mchunkptr
);
1622 static Void_t
* _int_realloc(mstate
, mchunkptr
, INTERNAL_SIZE_T
,
1624 static Void_t
* _int_memalign(mstate
, size_t, size_t);
1625 static Void_t
* _int_valloc(mstate
, size_t);
1626 static Void_t
* _int_pvalloc(mstate
, size_t);
1627 /*static Void_t* cALLOc(size_t, size_t);*/
1629 static Void_t
** _int_icalloc(mstate
, size_t, size_t, Void_t
**);
1630 static Void_t
** _int_icomalloc(mstate
, size_t, size_t*, Void_t
**);
1632 static int mTRIm(mstate
, size_t);
1633 static size_t mUSABLe(Void_t
*);
1634 static void mSTATs(void);
1635 static int mALLOPt(int, int);
1636 static struct mallinfo
mALLINFo(mstate
);
1637 static void malloc_printerr(int action
, const char *str
, void *ptr
);
1639 static Void_t
* internal_function
mem2mem_check(Void_t
*p
, size_t sz
);
1640 static int internal_function
top_check(void);
1641 static void internal_function
munmap_chunk(mchunkptr p
);
1643 static mchunkptr internal_function
mremap_chunk(mchunkptr p
, size_t new_size
);
1646 static Void_t
* malloc_check(size_t sz
, const Void_t
*caller
);
1647 static void free_check(Void_t
* mem
, const Void_t
*caller
);
1648 static Void_t
* realloc_check(Void_t
* oldmem
, size_t bytes
,
1649 const Void_t
*caller
);
1650 static Void_t
* memalign_check(size_t alignment
, size_t bytes
,
1651 const Void_t
*caller
);
1654 # if USE___THREAD || !defined SHARED
1655 /* These routines are never needed in this configuration. */
1662 static Void_t
* malloc_starter(size_t sz
, const Void_t
*caller
);
1663 static Void_t
* memalign_starter(size_t aln
, size_t sz
, const Void_t
*caller
);
1664 static void free_starter(Void_t
* mem
, const Void_t
*caller
);
1666 static Void_t
* malloc_atfork(size_t sz
, const Void_t
*caller
);
1667 static void free_atfork(Void_t
* mem
, const Void_t
*caller
);
1672 static Void_t
* _int_malloc();
1673 static void _int_free();
1674 static Void_t
* _int_realloc();
1675 static Void_t
* _int_memalign();
1676 static Void_t
* _int_valloc();
1677 static Void_t
* _int_pvalloc();
1678 /*static Void_t* cALLOc();*/
1679 static Void_t
** _int_icalloc();
1680 static Void_t
** _int_icomalloc();
1682 static size_t mUSABLe();
1683 static void mSTATs();
1684 static int mALLOPt();
1685 static struct mallinfo
mALLINFo();
1692 /* ------------- Optional versions of memcopy ---------------- */
1698 Note: memcpy is ONLY invoked with non-overlapping regions,
1699 so the (usually slower) memmove is not needed.
1702 #define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
1703 #define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
1705 #else /* !USE_MEMCPY */
1707 /* Use Duff's device for good zeroing/copying performance. */
1709 #define MALLOC_ZERO(charp, nbytes) \
1711 INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
1712 unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1714 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1716 case 0: for(;;) { *mzp++ = 0; \
1717 case 7: *mzp++ = 0; \
1718 case 6: *mzp++ = 0; \
1719 case 5: *mzp++ = 0; \
1720 case 4: *mzp++ = 0; \
1721 case 3: *mzp++ = 0; \
1722 case 2: *mzp++ = 0; \
1723 case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
1727 #define MALLOC_COPY(dest,src,nbytes) \
1729 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
1730 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
1731 unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1733 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1735 case 0: for(;;) { *mcdst++ = *mcsrc++; \
1736 case 7: *mcdst++ = *mcsrc++; \
1737 case 6: *mcdst++ = *mcsrc++; \
1738 case 5: *mcdst++ = *mcsrc++; \
1739 case 4: *mcdst++ = *mcsrc++; \
1740 case 3: *mcdst++ = *mcsrc++; \
1741 case 2: *mcdst++ = *mcsrc++; \
1742 case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
1748 /* ------------------ MMAP support ------------------ */
1754 #ifndef LACKS_SYS_MMAN_H
1755 #include <sys/mman.h>
1758 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1759 # define MAP_ANONYMOUS MAP_ANON
1761 #if !defined(MAP_FAILED)
1762 # define MAP_FAILED ((char*)-1)
1765 #ifndef MAP_NORESERVE
1766 # ifdef MAP_AUTORESRV
1767 # define MAP_NORESERVE MAP_AUTORESRV
1769 # define MAP_NORESERVE 0
1774 Nearly all versions of mmap support MAP_ANONYMOUS,
1775 so the following is unlikely to be needed, but is
1776 supplied just in case.
1779 #ifndef MAP_ANONYMOUS
1781 static int dev_zero_fd
= -1; /* Cached file descriptor for /dev/zero. */
1783 #define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
1784 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1785 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
1786 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
1790 #define MMAP(addr, size, prot, flags) \
1791 (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1796 #endif /* HAVE_MMAP */
1800 ----------------------- Chunk representations -----------------------
1805 This struct declaration is misleading (but accurate and necessary).
1806 It declares a "view" into memory allowing access to necessary
1807 fields at known offsets from a given base. See explanation below.
1810 struct malloc_chunk
{
1812 INTERNAL_SIZE_T prev_size
; /* Size of previous chunk (if free). */
1813 INTERNAL_SIZE_T size
; /* Size in bytes, including overhead. */
1815 struct malloc_chunk
* fd
; /* double links -- used only if free. */
1816 struct malloc_chunk
* bk
;
1818 /* Only used for large blocks: pointer to next larger size. */
1819 struct malloc_chunk
* fd_nextsize
; /* double links -- used only if free. */
1820 struct malloc_chunk
* bk_nextsize
;
1825 malloc_chunk details:
1827 (The following includes lightly edited explanations by Colin Plumb.)
1829 Chunks of memory are maintained using a `boundary tag' method as
1830 described in e.g., Knuth or Standish. (See the paper by Paul
1831 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1832 survey of such techniques.) Sizes of free chunks are stored both
1833 in the front of each chunk and at the end. This makes
1834 consolidating fragmented chunks into bigger chunks very fast. The
1835 size fields also hold bits representing whether chunks are free or
1838 An allocated chunk looks like this:
1841 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1842 | Size of previous chunk, if allocated | |
1843 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1844 | Size of chunk, in bytes |M|P|
1845 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1846 | User data starts here... .
1848 . (malloc_usable_size() bytes) .
1850 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1852 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1855 Where "chunk" is the front of the chunk for the purpose of most of
1856 the malloc code, but "mem" is the pointer that is returned to the
1857 user. "Nextchunk" is the beginning of the next contiguous chunk.
1859 Chunks always begin on even word boundries, so the mem portion
1860 (which is returned to the user) is also on an even word boundary, and
1861 thus at least double-word aligned.
1863 Free chunks are stored in circular doubly-linked lists, and look like this:
1865 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1866 | Size of previous chunk |
1867 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1868 `head:' | Size of chunk, in bytes |P|
1869 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1870 | Forward pointer to next chunk in list |
1871 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1872 | Back pointer to previous chunk in list |
1873 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1874 | Unused space (may be 0 bytes long) .
1877 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1878 `foot:' | Size of chunk, in bytes |
1879 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1881 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1882 chunk size (which is always a multiple of two words), is an in-use
1883 bit for the *previous* chunk. If that bit is *clear*, then the
1884 word before the current chunk size contains the previous chunk
1885 size, and can be used to find the front of the previous chunk.
1886 The very first chunk allocated always has this bit set,
1887 preventing access to non-existent (or non-owned) memory. If
1888 prev_inuse is set for any given chunk, then you CANNOT determine
1889 the size of the previous chunk, and might even get a memory
1890 addressing fault when trying to do so.
1892 Note that the `foot' of the current chunk is actually represented
1893 as the prev_size of the NEXT chunk. This makes it easier to
1894 deal with alignments etc but can be very confusing when trying
1895 to extend or adapt this code.
1897 The two exceptions to all this are
1899 1. The special chunk `top' doesn't bother using the
1900 trailing size field since there is no next contiguous chunk
1901 that would have to index off it. After initialization, `top'
1902 is forced to always exist. If it would become less than
1903 MINSIZE bytes long, it is replenished.
1905 2. Chunks allocated via mmap, which have the second-lowest-order
1906 bit M (IS_MMAPPED) set in their size fields. Because they are
1907 allocated one-by-one, each must contain its own trailing size field.
1912 ---------- Size and alignment checks and conversions ----------
1915 /* conversion from malloc headers to user pointers, and back */
1917 #define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
1918 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
1920 /* The smallest possible chunk */
1921 #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
1923 /* The smallest size we can malloc is an aligned minimal chunk */
1926 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1928 /* Check if m has acceptable alignment */
1930 #define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1932 #define misaligned_chunk(p) \
1933 ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
1934 & MALLOC_ALIGN_MASK)
1938 Check if a request is so large that it would wrap around zero when
1939 padded and aligned. To simplify some other code, the bound is made
1940 low enough so that adding MINSIZE will also not wrap around zero.
1943 #define REQUEST_OUT_OF_RANGE(req) \
1944 ((unsigned long)(req) >= \
1945 (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))
1947 /* pad request bytes into a usable size -- internal version */
1949 #define request2size(req) \
1950 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1952 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1954 /* Same, except also perform argument check */
1956 #define checked_request2size(req, sz) \
1957 if (REQUEST_OUT_OF_RANGE(req)) { \
1958 MALLOC_FAILURE_ACTION; \
1961 (sz) = request2size(req);
1964 --------------- Physical chunk operations ---------------
1968 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1969 #define PREV_INUSE 0x1
1971 /* extract inuse bit of previous chunk */
1972 #define prev_inuse(p) ((p)->size & PREV_INUSE)
1975 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1976 #define IS_MMAPPED 0x2
1978 /* check for mmap()'ed chunk */
1979 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1982 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1983 from a non-main arena. This is only set immediately before handing
1984 the chunk to the user, if necessary. */
1985 #define NON_MAIN_ARENA 0x4
1987 /* check for chunk from non-main arena */
1988 #define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
1992 Bits to mask off when extracting size
1994 Note: IS_MMAPPED is intentionally not masked off from size field in
1995 macros for which mmapped chunks should never be seen. This should
1996 cause helpful core dumps to occur if it is tried by accident by
1997 people extending or adapting this malloc.
1999 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA)
2001 /* Get size, ignoring use bits */
2002 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
2005 /* Ptr to next physical malloc_chunk. */
2006 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) ))
2008 /* Ptr to previous physical malloc_chunk */
2009 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
2011 /* Treat space at ptr + offset as a chunk */
2012 #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
2014 /* extract p's inuse bit */
2016 ((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
2018 /* set/clear chunk as being inuse without otherwise disturbing */
2019 #define set_inuse(p)\
2020 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
2022 #define clear_inuse(p)\
2023 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
2026 /* check/set/clear inuse bits in known places */
2027 #define inuse_bit_at_offset(p, s)\
2028 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
2030 #define set_inuse_bit_at_offset(p, s)\
2031 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
2033 #define clear_inuse_bit_at_offset(p, s)\
2034 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
2037 /* Set size at head, without disturbing its use bit */
2038 #define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_BITS) | (s)))
2040 /* Set size/use field */
2041 #define set_head(p, s) ((p)->size = (s))
2043 /* Set size at footer (only when chunk is not in use) */
2044 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
2048 -------------------- Internal data structures --------------------
2050 All internal state is held in an instance of malloc_state defined
2051 below. There are no other static variables, except in two optional
2053 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
2054 * If HAVE_MMAP is true, but mmap doesn't support
2055 MAP_ANONYMOUS, a dummy file descriptor for mmap.
2057 Beware of lots of tricks that minimize the total bookkeeping space
2058 requirements. The result is a little over 1K bytes (for 4byte
2059 pointers and size_t.)
2065 An array of bin headers for free chunks. Each bin is doubly
2066 linked. The bins are approximately proportionally (log) spaced.
2067 There are a lot of these bins (128). This may look excessive, but
2068 works very well in practice. Most bins hold sizes that are
2069 unusual as malloc request sizes, but are more usual for fragments
2070 and consolidated sets of chunks, which is what these bins hold, so
2071 they can be found quickly. All procedures maintain the invariant
2072 that no consolidated chunk physically borders another one, so each
2073 chunk in a list is known to be preceeded and followed by either
2074 inuse chunks or the ends of memory.
2076 Chunks in bins are kept in size order, with ties going to the
2077 approximately least recently used chunk. Ordering isn't needed
2078 for the small bins, which all contain the same-sized chunks, but
2079 facilitates best-fit allocation for larger chunks. These lists
2080 are just sequential. Keeping them in order almost never requires
2081 enough traversal to warrant using fancier ordered data
2084 Chunks of the same size are linked with the most
2085 recently freed at the front, and allocations are taken from the
2086 back. This results in LRU (FIFO) allocation order, which tends
2087 to give each chunk an equal opportunity to be consolidated with
2088 adjacent freed chunks, resulting in larger free chunks and less
2091 To simplify use in double-linked lists, each bin header acts
2092 as a malloc_chunk. This avoids special-casing for headers.
2093 But to conserve space and improve locality, we allocate
2094 only the fd/bk pointers of bins, and then use repositioning tricks
2095 to treat these as the fields of a malloc_chunk*.
2098 typedef struct malloc_chunk
* mbinptr
;
2100 /* addressing -- note that bin_at(0) does not exist */
2101 #define bin_at(m, i) \
2102 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
2103 - offsetof (struct malloc_chunk, fd))
2105 /* analog of ++bin */
2106 #define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
2108 /* Reminders about list directionality within bins */
2109 #define first(b) ((b)->fd)
2110 #define last(b) ((b)->bk)
2112 /* Take a chunk off a bin list */
2113 #define unlink(P, BK, FD) { \
2116 if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \
2117 malloc_printerr (check_action, "corrupted double-linked list", P); \
2121 if (!in_smallbin_range (P->size) \
2122 && __builtin_expect (P->fd_nextsize != NULL, 0)) { \
2123 assert (P->fd_nextsize->bk_nextsize == P); \
2124 assert (P->bk_nextsize->fd_nextsize == P); \
2125 if (FD->fd_nextsize == NULL) { \
2126 if (P->fd_nextsize == P) \
2127 FD->fd_nextsize = FD->bk_nextsize = FD; \
2129 FD->fd_nextsize = P->fd_nextsize; \
2130 FD->bk_nextsize = P->bk_nextsize; \
2131 P->fd_nextsize->bk_nextsize = FD; \
2132 P->bk_nextsize->fd_nextsize = FD; \
2135 P->fd_nextsize->bk_nextsize = P->bk_nextsize; \
2136 P->bk_nextsize->fd_nextsize = P->fd_nextsize; \
2145 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
2146 8 bytes apart. Larger bins are approximately logarithmically spaced:
2152 4 bins of size 32768
2153 2 bins of size 262144
2154 1 bin of size what's left
2156 There is actually a little bit of slop in the numbers in bin_index
2157 for the sake of speed. This makes no difference elsewhere.
2159 The bins top out around 1MB because we expect to service large
2164 #define NSMALLBINS 64
2165 #define SMALLBIN_WIDTH MALLOC_ALIGNMENT
2166 #define MIN_LARGE_SIZE (NSMALLBINS * SMALLBIN_WIDTH)
2168 #define in_smallbin_range(sz) \
2169 ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
2171 #define smallbin_index(sz) \
2172 (SMALLBIN_WIDTH == 16 ? (((unsigned)(sz)) >> 4) : (((unsigned)(sz)) >> 3))
2174 #define largebin_index_32(sz) \
2175 (((((unsigned long)(sz)) >> 6) <= 38)? 56 + (((unsigned long)(sz)) >> 6): \
2176 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
2177 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
2178 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
2179 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
2182 // XXX It remains to be seen whether it is good to keep the widths of
2183 // XXX the buckets the same or whether it should be scaled by a factor
2184 // XXX of two as well.
2185 #define largebin_index_64(sz) \
2186 (((((unsigned long)(sz)) >> 6) <= 48)? 48 + (((unsigned long)(sz)) >> 6): \
2187 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
2188 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
2189 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
2190 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
2193 #define largebin_index(sz) \
2194 (SIZE_SZ == 8 ? largebin_index_64 (sz) : largebin_index_32 (sz))
2196 #define bin_index(sz) \
2197 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
2203 All remainders from chunk splits, as well as all returned chunks,
2204 are first placed in the "unsorted" bin. They are then placed
2205 in regular bins after malloc gives them ONE chance to be used before
2206 binning. So, basically, the unsorted_chunks list acts as a queue,
2207 with chunks being placed on it in free (and malloc_consolidate),
2208 and taken off (to be either used or placed in bins) in malloc.
2210 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
2211 does not have to be taken into account in size comparisons.
2214 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
2215 #define unsorted_chunks(M) (bin_at(M, 1))
2220 The top-most available chunk (i.e., the one bordering the end of
2221 available memory) is treated specially. It is never included in
2222 any bin, is used only if no other chunk is available, and is
2223 released back to the system if it is very large (see
2224 M_TRIM_THRESHOLD). Because top initially
2225 points to its own bin with initial zero size, thus forcing
2226 extension on the first malloc request, we avoid having any special
2227 code in malloc to check whether it even exists yet. But we still
2228 need to do so when getting memory from system, so we make
2229 initial_top treat the bin as a legal but unusable chunk during the
2230 interval between initialization and the first call to
2231 sYSMALLOc. (This is somewhat delicate, since it relies on
2232 the 2 preceding words to be zero during this interval as well.)
2235 /* Conveniently, the unsorted bin can be used as dummy top on first call */
2236 #define initial_top(M) (unsorted_chunks(M))
2241 To help compensate for the large number of bins, a one-level index
2242 structure is used for bin-by-bin searching. `binmap' is a
2243 bitvector recording whether bins are definitely empty so they can
2244 be skipped over during during traversals. The bits are NOT always
2245 cleared as soon as bins are empty, but instead only
2246 when they are noticed to be empty during traversal in malloc.
2249 /* Conservatively use 32 bits per map word, even if on 64bit system */
2250 #define BINMAPSHIFT 5
2251 #define BITSPERMAP (1U << BINMAPSHIFT)
2252 #define BINMAPSIZE (NBINS / BITSPERMAP)
2254 #define idx2block(i) ((i) >> BINMAPSHIFT)
2255 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
2257 #define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
2258 #define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
2259 #define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
2264 An array of lists holding recently freed small chunks. Fastbins
2265 are not doubly linked. It is faster to single-link them, and
2266 since chunks are never removed from the middles of these lists,
2267 double linking is not necessary. Also, unlike regular bins, they
2268 are not even processed in FIFO order (they use faster LIFO) since
2269 ordering doesn't much matter in the transient contexts in which
2270 fastbins are normally used.
2272 Chunks in fastbins keep their inuse bit set, so they cannot
2273 be consolidated with other free chunks. malloc_consolidate
2274 releases all chunks in fastbins and consolidates them with
2278 typedef struct malloc_chunk
* mfastbinptr
;
2279 #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
2281 /* offset 2 to use otherwise unindexable first 2 bins */
2282 #define fastbin_index(sz) \
2283 ((((unsigned int)(sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
2286 /* The maximum fastbin request size we support */
2287 #define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
2289 #define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
2292 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
2293 that triggers automatic consolidation of possibly-surrounding
2294 fastbin chunks. This is a heuristic, so the exact value should not
2295 matter too much. It is defined at half the default trim threshold as a
2296 compromise heuristic to only attempt consolidation if it is likely
2297 to lead to trimming. However, it is not dynamically tunable, since
2298 consolidation reduces fragmentation surrounding large chunks even
2299 if trimming is not used.
2302 #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
2305 Since the lowest 2 bits in max_fast don't matter in size comparisons,
2306 they are used as flags.
2310 FASTCHUNKS_BIT held in max_fast indicates that there are probably
2311 some fastbin chunks. It is set true on entering a chunk into any
2312 fastbin, and cleared only in malloc_consolidate.
2314 The truth value is inverted so that have_fastchunks will be true
2315 upon startup (since statics are zero-filled), simplifying
2316 initialization checks.
2319 #define FASTCHUNKS_BIT (1U)
2321 #define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0)
2322 #ifdef ATOMIC_FASTBINS
2323 #define clear_fastchunks(M) catomic_or (&(M)->flags, FASTCHUNKS_BIT)
2324 #define set_fastchunks(M) catomic_and (&(M)->flags, ~FASTCHUNKS_BIT)
2326 #define clear_fastchunks(M) ((M)->flags |= FASTCHUNKS_BIT)
2327 #define set_fastchunks(M) ((M)->flags &= ~FASTCHUNKS_BIT)
2331 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
2332 regions. Otherwise, contiguity is exploited in merging together,
2333 when possible, results from consecutive MORECORE calls.
2335 The initial value comes from MORECORE_CONTIGUOUS, but is
2336 changed dynamically if mmap is ever used as an sbrk substitute.
2339 #define NONCONTIGUOUS_BIT (2U)
2341 #define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
2342 #define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
2343 #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
2344 #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
2347 Set value of max_fast.
2348 Use impossibly small value if 0.
2349 Precondition: there are no existing fastbin chunks.
2350 Setting the value clears fastchunk bit but preserves noncontiguous bit.
2353 #define set_max_fast(s) \
2354 global_max_fast = ((s) == 0)? SMALLBIN_WIDTH: request2size(s)
2355 #define get_max_fast() global_max_fast
2359 ----------- Internal state representation and initialization -----------
2362 struct malloc_state
{
2363 /* Serialize access. */
2366 /* Flags (formerly in max_fast). */
2370 /* Statistics for locking. Only used if THREAD_STATS is defined. */
2371 long stat_lock_direct
, stat_lock_loop
, stat_lock_wait
;
2375 mfastbinptr fastbinsY
[NFASTBINS
];
2377 /* Base of the topmost chunk -- not otherwise kept in a bin */
2380 /* The remainder from the most recent split of a small request */
2381 mchunkptr last_remainder
;
2383 /* Normal bins packed as described above */
2384 mchunkptr bins
[NBINS
* 2 - 2];
2386 /* Bitmap of bins */
2387 unsigned int binmap
[BINMAPSIZE
];
2390 struct malloc_state
*next
;
2393 /* Linked list for free arenas. */
2394 struct malloc_state
*next_free
;
2397 /* Memory allocated from the system in this arena. */
2398 INTERNAL_SIZE_T system_mem
;
2399 INTERNAL_SIZE_T max_system_mem
;
2403 /* Tunable parameters */
2404 unsigned long trim_threshold
;
2405 INTERNAL_SIZE_T top_pad
;
2406 INTERNAL_SIZE_T mmap_threshold
;
2408 INTERNAL_SIZE_T arena_test
;
2409 INTERNAL_SIZE_T arena_max
;
2412 /* Memory map support */
2416 /* the mmap_threshold is dynamic, until the user sets
2417 it manually, at which point we need to disable any
2418 dynamic behavior. */
2419 int no_dyn_threshold
;
2421 /* Cache malloc_getpagesize */
2422 unsigned int pagesize
;
2425 INTERNAL_SIZE_T mmapped_mem
;
2426 /*INTERNAL_SIZE_T sbrked_mem;*/
2427 /*INTERNAL_SIZE_T max_sbrked_mem;*/
2428 INTERNAL_SIZE_T max_mmapped_mem
;
2429 INTERNAL_SIZE_T max_total_mem
; /* only kept for NO_THREADS */
2431 /* First address handed out by MORECORE/sbrk. */
2435 /* There are several instances of this struct ("arenas") in this
2436 malloc. If you are adapting this malloc in a way that does NOT use
2437 a static or mmapped malloc_state, you MUST explicitly zero-fill it
2438 before using. This malloc relies on the property that malloc_state
2439 is initialized to all zeroes (as is true of C statics). */
2441 static struct malloc_state main_arena
;
2443 /* There is only one instance of the malloc parameters. */
2445 static struct malloc_par mp_
;
2449 /* Non public mallopt parameters. */
2450 #define M_ARENA_TEST -7
2451 #define M_ARENA_MAX -8
2455 /* Maximum size of memory handled in fastbins. */
2456 static INTERNAL_SIZE_T global_max_fast
;
2459 Initialize a malloc_state struct.
2461 This is called only from within malloc_consolidate, which needs
2462 be called in the same contexts anyway. It is never called directly
2463 outside of malloc_consolidate because some optimizing compilers try
2464 to inline it at all call points, which turns out not to be an
2465 optimization at all. (Inlining it in malloc_consolidate is fine though.)
2469 static void malloc_init_state(mstate av
)
2471 static void malloc_init_state(av
) mstate av
;
2477 /* Establish circular links for normal bins */
2478 for (i
= 1; i
< NBINS
; ++i
) {
2480 bin
->fd
= bin
->bk
= bin
;
2483 #if MORECORE_CONTIGUOUS
2484 if (av
!= &main_arena
)
2486 set_noncontiguous(av
);
2487 if (av
== &main_arena
)
2488 set_max_fast(DEFAULT_MXFAST
);
2489 av
->flags
|= FASTCHUNKS_BIT
;
2491 av
->top
= initial_top(av
);
2495 Other internal utilities operating on mstates
2499 static Void_t
* sYSMALLOc(INTERNAL_SIZE_T
, mstate
);
2500 static int sYSTRIm(size_t, mstate
);
2501 static void malloc_consolidate(mstate
);
2503 static Void_t
** iALLOc(mstate
, size_t, size_t*, int, Void_t
**);
2506 static Void_t
* sYSMALLOc();
2507 static int sYSTRIm();
2508 static void malloc_consolidate();
2509 static Void_t
** iALLOc();
2513 /* -------------- Early definitions for debugging hooks ---------------- */
2515 /* Define and initialize the hook variables. These weak definitions must
2516 appear before any use of the variables in a function (arena.c uses one). */
2517 #ifndef weak_variable
2519 #define weak_variable /**/
2521 /* In GNU libc we want the hook variables to be weak definitions to
2522 avoid a problem with Emacs. */
2523 #define weak_variable weak_function
2527 /* Forward declarations. */
2528 static Void_t
* malloc_hook_ini
__MALLOC_P ((size_t sz
,
2529 const __malloc_ptr_t caller
));
2530 static Void_t
* realloc_hook_ini
__MALLOC_P ((Void_t
* ptr
, size_t sz
,
2531 const __malloc_ptr_t caller
));
2532 static Void_t
* memalign_hook_ini
__MALLOC_P ((size_t alignment
, size_t sz
,
2533 const __malloc_ptr_t caller
));
2535 void weak_variable (*__malloc_initialize_hook
) (void) = NULL
;
2536 void weak_variable (*__free_hook
) (__malloc_ptr_t __ptr
,
2537 const __malloc_ptr_t
) = NULL
;
2538 __malloc_ptr_t
weak_variable (*__malloc_hook
)
2539 (size_t __size
, const __malloc_ptr_t
) = malloc_hook_ini
;
2540 __malloc_ptr_t
weak_variable (*__realloc_hook
)
2541 (__malloc_ptr_t __ptr
, size_t __size
, const __malloc_ptr_t
)
2543 __malloc_ptr_t
weak_variable (*__memalign_hook
)
2544 (size_t __alignment
, size_t __size
, const __malloc_ptr_t
)
2545 = memalign_hook_ini
;
2546 void weak_variable (*__after_morecore_hook
) (void) = NULL
;
2549 /* ---------------- Error behavior ------------------------------------ */
2551 #ifndef DEFAULT_CHECK_ACTION
2552 #define DEFAULT_CHECK_ACTION 3
2555 static int check_action
= DEFAULT_CHECK_ACTION
;
2558 /* ------------------ Testing support ----------------------------------*/
2560 static int perturb_byte
;
2562 #define alloc_perturb(p, n) memset (p, (perturb_byte ^ 0xff) & 0xff, n)
2563 #define free_perturb(p, n) memset (p, perturb_byte & 0xff, n)
2566 /* ------------------- Support for multiple arenas -------------------- */
2572 These routines make a number of assertions about the states
2573 of data structures that should be true at all times. If any
2574 are not true, it's very likely that a user program has somehow
2575 trashed memory. (It's also possible that there is a coding error
2576 in malloc. In which case, please report it!)
2581 #define check_chunk(A,P)
2582 #define check_free_chunk(A,P)
2583 #define check_inuse_chunk(A,P)
2584 #define check_remalloced_chunk(A,P,N)
2585 #define check_malloced_chunk(A,P,N)
2586 #define check_malloc_state(A)
2590 #define check_chunk(A,P) do_check_chunk(A,P)
2591 #define check_free_chunk(A,P) do_check_free_chunk(A,P)
2592 #define check_inuse_chunk(A,P) do_check_inuse_chunk(A,P)
2593 #define check_remalloced_chunk(A,P,N) do_check_remalloced_chunk(A,P,N)
2594 #define check_malloced_chunk(A,P,N) do_check_malloced_chunk(A,P,N)
2595 #define check_malloc_state(A) do_check_malloc_state(A)
2598 Properties of all chunks
2602 static void do_check_chunk(mstate av
, mchunkptr p
)
2604 static void do_check_chunk(av
, p
) mstate av
; mchunkptr p
;
2607 unsigned long sz
= chunksize(p
);
2608 /* min and max possible addresses assuming contiguous allocation */
2609 char* max_address
= (char*)(av
->top
) + chunksize(av
->top
);
2610 char* min_address
= max_address
- av
->system_mem
;
2612 if (!chunk_is_mmapped(p
)) {
2614 /* Has legal address ... */
2616 if (contiguous(av
)) {
2617 assert(((char*)p
) >= min_address
);
2618 assert(((char*)p
+ sz
) <= ((char*)(av
->top
)));
2622 /* top size is always at least MINSIZE */
2623 assert((unsigned long)(sz
) >= MINSIZE
);
2624 /* top predecessor always marked inuse */
2625 assert(prev_inuse(p
));
2631 /* address is outside main heap */
2632 if (contiguous(av
) && av
->top
!= initial_top(av
)) {
2633 assert(((char*)p
) < min_address
|| ((char*)p
) >= max_address
);
2635 /* chunk is page-aligned */
2636 assert(((p
->prev_size
+ sz
) & (mp_
.pagesize
-1)) == 0);
2637 /* mem is aligned */
2638 assert(aligned_OK(chunk2mem(p
)));
2640 /* force an appropriate assert violation if debug set */
2641 assert(!chunk_is_mmapped(p
));
2647 Properties of free chunks
2651 static void do_check_free_chunk(mstate av
, mchunkptr p
)
2653 static void do_check_free_chunk(av
, p
) mstate av
; mchunkptr p
;
2656 INTERNAL_SIZE_T sz
= p
->size
& ~(PREV_INUSE
|NON_MAIN_ARENA
);
2657 mchunkptr next
= chunk_at_offset(p
, sz
);
2659 do_check_chunk(av
, p
);
2661 /* Chunk must claim to be free ... */
2663 assert (!chunk_is_mmapped(p
));
2665 /* Unless a special marker, must have OK fields */
2666 if ((unsigned long)(sz
) >= MINSIZE
)
2668 assert((sz
& MALLOC_ALIGN_MASK
) == 0);
2669 assert(aligned_OK(chunk2mem(p
)));
2670 /* ... matching footer field */
2671 assert(next
->prev_size
== sz
);
2672 /* ... and is fully consolidated */
2673 assert(prev_inuse(p
));
2674 assert (next
== av
->top
|| inuse(next
));
2676 /* ... and has minimally sane links */
2677 assert(p
->fd
->bk
== p
);
2678 assert(p
->bk
->fd
== p
);
2680 else /* markers are always of size SIZE_SZ */
2681 assert(sz
== SIZE_SZ
);
2685 Properties of inuse chunks
2689 static void do_check_inuse_chunk(mstate av
, mchunkptr p
)
2691 static void do_check_inuse_chunk(av
, p
) mstate av
; mchunkptr p
;
2696 do_check_chunk(av
, p
);
2698 if (chunk_is_mmapped(p
))
2699 return; /* mmapped chunks have no next/prev */
2701 /* Check whether it claims to be in use ... */
2704 next
= next_chunk(p
);
2706 /* ... and is surrounded by OK chunks.
2707 Since more things can be checked with free chunks than inuse ones,
2708 if an inuse chunk borders them and debug is on, it's worth doing them.
2710 if (!prev_inuse(p
)) {
2711 /* Note that we cannot even look at prev unless it is not inuse */
2712 mchunkptr prv
= prev_chunk(p
);
2713 assert(next_chunk(prv
) == p
);
2714 do_check_free_chunk(av
, prv
);
2717 if (next
== av
->top
) {
2718 assert(prev_inuse(next
));
2719 assert(chunksize(next
) >= MINSIZE
);
2721 else if (!inuse(next
))
2722 do_check_free_chunk(av
, next
);
2726 Properties of chunks recycled from fastbins
2730 static void do_check_remalloced_chunk(mstate av
, mchunkptr p
, INTERNAL_SIZE_T s
)
2732 static void do_check_remalloced_chunk(av
, p
, s
)
2733 mstate av
; mchunkptr p
; INTERNAL_SIZE_T s
;
2736 INTERNAL_SIZE_T sz
= p
->size
& ~(PREV_INUSE
|NON_MAIN_ARENA
);
2738 if (!chunk_is_mmapped(p
)) {
2739 assert(av
== arena_for_chunk(p
));
2740 if (chunk_non_main_arena(p
))
2741 assert(av
!= &main_arena
);
2743 assert(av
== &main_arena
);
2746 do_check_inuse_chunk(av
, p
);
2748 /* Legal size ... */
2749 assert((sz
& MALLOC_ALIGN_MASK
) == 0);
2750 assert((unsigned long)(sz
) >= MINSIZE
);
2751 /* ... and alignment */
2752 assert(aligned_OK(chunk2mem(p
)));
2753 /* chunk is less than MINSIZE more than request */
2754 assert((long)(sz
) - (long)(s
) >= 0);
2755 assert((long)(sz
) - (long)(s
+ MINSIZE
) < 0);
2759 Properties of nonrecycled chunks at the point they are malloced
2763 static void do_check_malloced_chunk(mstate av
, mchunkptr p
, INTERNAL_SIZE_T s
)
2765 static void do_check_malloced_chunk(av
, p
, s
)
2766 mstate av
; mchunkptr p
; INTERNAL_SIZE_T s
;
2769 /* same as recycled case ... */
2770 do_check_remalloced_chunk(av
, p
, s
);
2773 ... plus, must obey implementation invariant that prev_inuse is
2774 always true of any allocated chunk; i.e., that each allocated
2775 chunk borders either a previously allocated and still in-use
2776 chunk, or the base of its memory arena. This is ensured
2777 by making all allocations from the the `lowest' part of any found
2778 chunk. This does not necessarily hold however for chunks
2779 recycled via fastbins.
2782 assert(prev_inuse(p
));
2787 Properties of malloc_state.
2789 This may be useful for debugging malloc, as well as detecting user
2790 programmer errors that somehow write into malloc_state.
2792 If you are extending or experimenting with this malloc, you can
2793 probably figure out how to hack this routine to print out or
2794 display chunk addresses, sizes, bins, and other instrumentation.
2797 static void do_check_malloc_state(mstate av
)
2804 INTERNAL_SIZE_T size
;
2805 unsigned long total
= 0;
2808 /* internal size_t must be no wider than pointer type */
2809 assert(sizeof(INTERNAL_SIZE_T
) <= sizeof(char*));
2811 /* alignment is a power of 2 */
2812 assert((MALLOC_ALIGNMENT
& (MALLOC_ALIGNMENT
-1)) == 0);
2814 /* cannot run remaining checks until fully initialized */
2815 if (av
->top
== 0 || av
->top
== initial_top(av
))
2818 /* pagesize is a power of 2 */
2819 assert((mp_
.pagesize
& (mp_
.pagesize
-1)) == 0);
2821 /* A contiguous main_arena is consistent with sbrk_base. */
2822 if (av
== &main_arena
&& contiguous(av
))
2823 assert((char*)mp_
.sbrk_base
+ av
->system_mem
==
2824 (char*)av
->top
+ chunksize(av
->top
));
2826 /* properties of fastbins */
2828 /* max_fast is in allowed range */
2829 assert((get_max_fast () & ~1) <= request2size(MAX_FAST_SIZE
));
2831 max_fast_bin
= fastbin_index(get_max_fast ());
2833 for (i
= 0; i
< NFASTBINS
; ++i
) {
2834 p
= av
->fastbins
[i
];
2836 /* The following test can only be performed for the main arena.
2837 While mallopt calls malloc_consolidate to get rid of all fast
2838 bins (especially those larger than the new maximum) this does
2839 only happen for the main arena. Trying to do this for any
2840 other arena would mean those arenas have to be locked and
2841 malloc_consolidate be called for them. This is excessive. And
2842 even if this is acceptable to somebody it still cannot solve
2843 the problem completely since if the arena is locked a
2844 concurrent malloc call might create a new arena which then
2845 could use the newly invalid fast bins. */
2847 /* all bins past max_fast are empty */
2848 if (av
== &main_arena
&& i
> max_fast_bin
)
2852 /* each chunk claims to be inuse */
2853 do_check_inuse_chunk(av
, p
);
2854 total
+= chunksize(p
);
2855 /* chunk belongs in this bin */
2856 assert(fastbin_index(chunksize(p
)) == i
);
2862 assert(have_fastchunks(av
));
2863 else if (!have_fastchunks(av
))
2866 /* check normal bins */
2867 for (i
= 1; i
< NBINS
; ++i
) {
2870 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2872 unsigned int binbit
= get_binmap(av
,i
);
2873 int empty
= last(b
) == b
;
2880 for (p
= last(b
); p
!= b
; p
= p
->bk
) {
2881 /* each chunk claims to be free */
2882 do_check_free_chunk(av
, p
);
2883 size
= chunksize(p
);
2886 /* chunk belongs in bin */
2887 idx
= bin_index(size
);
2889 /* lists are sorted */
2890 assert(p
->bk
== b
||
2891 (unsigned long)chunksize(p
->bk
) >= (unsigned long)chunksize(p
));
2893 if (!in_smallbin_range(size
))
2895 if (p
->fd_nextsize
!= NULL
)
2897 if (p
->fd_nextsize
== p
)
2898 assert (p
->bk_nextsize
== p
);
2901 if (p
->fd_nextsize
== first (b
))
2902 assert (chunksize (p
) < chunksize (p
->fd_nextsize
));
2904 assert (chunksize (p
) > chunksize (p
->fd_nextsize
));
2907 assert (chunksize (p
) > chunksize (p
->bk_nextsize
));
2909 assert (chunksize (p
) < chunksize (p
->bk_nextsize
));
2913 assert (p
->bk_nextsize
== NULL
);
2915 } else if (!in_smallbin_range(size
))
2916 assert (p
->fd_nextsize
== NULL
&& p
->bk_nextsize
== NULL
);
2917 /* chunk is followed by a legal chain of inuse chunks */
2918 for (q
= next_chunk(p
);
2919 (q
!= av
->top
&& inuse(q
) &&
2920 (unsigned long)(chunksize(q
)) >= MINSIZE
);
2922 do_check_inuse_chunk(av
, q
);
2926 /* top chunk is OK */
2927 check_chunk(av
, av
->top
);
2929 /* sanity checks for statistics */
2932 assert(total
<= (unsigned long)(mp_
.max_total_mem
));
2933 assert(mp_
.n_mmaps
>= 0);
2935 assert(mp_
.n_mmaps
<= mp_
.max_n_mmaps
);
2937 assert((unsigned long)(av
->system_mem
) <=
2938 (unsigned long)(av
->max_system_mem
));
2940 assert((unsigned long)(mp_
.mmapped_mem
) <=
2941 (unsigned long)(mp_
.max_mmapped_mem
));
2944 assert((unsigned long)(mp_
.max_total_mem
) >=
2945 (unsigned long)(mp_
.mmapped_mem
) + (unsigned long)(av
->system_mem
));
2951 /* ----------------- Support for debugging hooks -------------------- */
2955 /* ----------- Routines dealing with system allocation -------------- */
2958 sysmalloc handles malloc cases requiring more memory from the system.
2959 On entry, it is assumed that av->top does not have enough
2960 space to service request for nb bytes, thus requiring that av->top
2961 be extended or replaced.
2965 static Void_t
* sYSMALLOc(INTERNAL_SIZE_T nb
, mstate av
)
2967 static Void_t
* sYSMALLOc(nb
, av
) INTERNAL_SIZE_T nb
; mstate av
;
2970 mchunkptr old_top
; /* incoming value of av->top */
2971 INTERNAL_SIZE_T old_size
; /* its size */
2972 char* old_end
; /* its end address */
2974 long size
; /* arg to first MORECORE or mmap call */
2975 char* brk
; /* return value from MORECORE */
2977 long correction
; /* arg to 2nd MORECORE call */
2978 char* snd_brk
; /* 2nd return val */
2980 INTERNAL_SIZE_T front_misalign
; /* unusable bytes at front of new space */
2981 INTERNAL_SIZE_T end_misalign
; /* partial page left at end of new space */
2982 char* aligned_brk
; /* aligned offset into brk */
2984 mchunkptr p
; /* the allocated/returned chunk */
2985 mchunkptr remainder
; /* remainder from allocation */
2986 unsigned long remainder_size
; /* its size */
2988 unsigned long sum
; /* for updating stats */
2990 size_t pagemask
= mp_
.pagesize
- 1;
2991 bool tried_mmap
= false;
2997 If have mmap, and the request size meets the mmap threshold, and
2998 the system supports mmap, and there are few enough currently
2999 allocated mmapped regions, try to directly map this request
3000 rather than expanding top.
3003 if ((unsigned long)(nb
) >= (unsigned long)(mp_
.mmap_threshold
) &&
3004 (mp_
.n_mmaps
< mp_
.n_mmaps_max
)) {
3006 char* mm
; /* return value from mmap call*/
3010 Round up size to nearest page. For mmapped chunks, the overhead
3011 is one SIZE_SZ unit larger than for normal chunks, because there
3012 is no following chunk whose prev_size field could be used.
3015 /* See the front_misalign handling below, for glibc there is no
3016 need for further alignments. */
3017 size
= (nb
+ SIZE_SZ
+ pagemask
) & ~pagemask
;
3019 size
= (nb
+ SIZE_SZ
+ MALLOC_ALIGN_MASK
+ pagemask
) & ~pagemask
;
3023 /* Don't try if size wraps around 0 */
3024 if ((unsigned long)(size
) > (unsigned long)(nb
)) {
3026 mm
= (char*)(MMAP(0, size
, PROT_READ
|PROT_WRITE
, MAP_PRIVATE
));
3028 if (mm
!= MAP_FAILED
) {
3031 The offset to the start of the mmapped region is stored
3032 in the prev_size field of the chunk. This allows us to adjust
3033 returned start address to meet alignment requirements here
3034 and in memalign(), and still be able to compute proper
3035 address argument for later munmap in free() and realloc().
3039 /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
3040 MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
3041 aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
3042 assert (((INTERNAL_SIZE_T
)chunk2mem(mm
) & MALLOC_ALIGN_MASK
) == 0);
3044 front_misalign
= (INTERNAL_SIZE_T
)chunk2mem(mm
) & MALLOC_ALIGN_MASK
;
3045 if (front_misalign
> 0) {
3046 correction
= MALLOC_ALIGNMENT
- front_misalign
;
3047 p
= (mchunkptr
)(mm
+ correction
);
3048 p
->prev_size
= correction
;
3049 set_head(p
, (size
- correction
) |IS_MMAPPED
);
3055 set_head(p
, size
|IS_MMAPPED
);
3058 /* update statistics */
3060 if (++mp_
.n_mmaps
> mp_
.max_n_mmaps
)
3061 mp_
.max_n_mmaps
= mp_
.n_mmaps
;
3063 sum
= mp_
.mmapped_mem
+= size
;
3064 if (sum
> (unsigned long)(mp_
.max_mmapped_mem
))
3065 mp_
.max_mmapped_mem
= sum
;
3067 sum
+= av
->system_mem
;
3068 if (sum
> (unsigned long)(mp_
.max_total_mem
))
3069 mp_
.max_total_mem
= sum
;
3074 return chunk2mem(p
);
3080 /* Record incoming configuration of top */
3083 old_size
= chunksize(old_top
);
3084 old_end
= (char*)(chunk_at_offset(old_top
, old_size
));
3086 brk
= snd_brk
= (char*)(MORECORE_FAILURE
);
3089 If not the first time through, we require old_size to be
3090 at least MINSIZE and to have prev_inuse set.
3093 assert((old_top
== initial_top(av
) && old_size
== 0) ||
3094 ((unsigned long) (old_size
) >= MINSIZE
&&
3095 prev_inuse(old_top
) &&
3096 ((unsigned long)old_end
& pagemask
) == 0));
3098 /* Precondition: not enough current space to satisfy nb request */
3099 assert((unsigned long)(old_size
) < (unsigned long)(nb
+ MINSIZE
));
3101 #ifndef ATOMIC_FASTBINS
3102 /* Precondition: all fastbins are consolidated */
3103 assert(!have_fastchunks(av
));
3107 if (av
!= &main_arena
) {
3109 heap_info
*old_heap
, *heap
;
3110 size_t old_heap_size
;
3112 /* First try to extend the current heap. */
3113 old_heap
= heap_for_ptr(old_top
);
3114 old_heap_size
= old_heap
->size
;
3115 if ((long) (MINSIZE
+ nb
- old_size
) > 0
3116 && grow_heap(old_heap
, MINSIZE
+ nb
- old_size
) == 0) {
3117 av
->system_mem
+= old_heap
->size
- old_heap_size
;
3118 arena_mem
+= old_heap
->size
- old_heap_size
;
3120 if(mmapped_mem
+ arena_mem
+ sbrked_mem
> max_total_mem
)
3121 max_total_mem
= mmapped_mem
+ arena_mem
+ sbrked_mem
;
3123 set_head(old_top
, (((char *)old_heap
+ old_heap
->size
) - (char *)old_top
)
3126 else if ((heap
= new_heap(nb
+ (MINSIZE
+ sizeof(*heap
)), mp_
.top_pad
))) {
3127 /* Use a newly allocated heap. */
3129 heap
->prev
= old_heap
;
3130 av
->system_mem
+= heap
->size
;
3131 arena_mem
+= heap
->size
;
3133 if((unsigned long)(mmapped_mem
+ arena_mem
+ sbrked_mem
) > max_total_mem
)
3134 max_total_mem
= mmapped_mem
+ arena_mem
+ sbrked_mem
;
3136 /* Set up the new top. */
3137 top(av
) = chunk_at_offset(heap
, sizeof(*heap
));
3138 set_head(top(av
), (heap
->size
- sizeof(*heap
)) | PREV_INUSE
);
3140 /* Setup fencepost and free the old top chunk. */
3141 /* The fencepost takes at least MINSIZE bytes, because it might
3142 become the top chunk again later. Note that a footer is set
3143 up, too, although the chunk is marked in use. */
3144 old_size
-= MINSIZE
;
3145 set_head(chunk_at_offset(old_top
, old_size
+ 2*SIZE_SZ
), 0|PREV_INUSE
);
3146 if (old_size
>= MINSIZE
) {
3147 set_head(chunk_at_offset(old_top
, old_size
), (2*SIZE_SZ
)|PREV_INUSE
);
3148 set_foot(chunk_at_offset(old_top
, old_size
), (2*SIZE_SZ
));
3149 set_head(old_top
, old_size
|PREV_INUSE
|NON_MAIN_ARENA
);
3150 #ifdef ATOMIC_FASTBINS
3151 _int_free(av
, old_top
, 1);
3153 _int_free(av
, old_top
);
3156 set_head(old_top
, (old_size
+ 2*SIZE_SZ
)|PREV_INUSE
);
3157 set_foot(old_top
, (old_size
+ 2*SIZE_SZ
));
3160 else if (!tried_mmap
)
3161 /* We can at least try to use to mmap memory. */
3164 } else { /* av == main_arena */
3167 /* Request enough space for nb + pad + overhead */
3169 size
= nb
+ mp_
.top_pad
+ MINSIZE
;
3172 If contiguous, we can subtract out existing space that we hope to
3173 combine with new space. We add it back later only if
3174 we don't actually get contiguous space.
3181 Round to a multiple of page size.
3182 If MORECORE is not contiguous, this ensures that we only call it
3183 with whole-page arguments. And if MORECORE is contiguous and
3184 this is not first time through, this preserves page-alignment of
3185 previous calls. Otherwise, we correct to page-align below.
3188 size
= (size
+ pagemask
) & ~pagemask
;
3191 Don't try to call MORECORE if argument is so big as to appear
3192 negative. Note that since mmap takes size_t arg, it may succeed
3193 below even if we cannot call MORECORE.
3197 brk
= (char*)(MORECORE(size
));
3199 if (brk
!= (char*)(MORECORE_FAILURE
)) {
3200 /* Call the `morecore' hook if necessary. */
3201 void (*hook
) (void) = force_reg (__after_morecore_hook
);
3202 if (__builtin_expect (hook
!= NULL
, 0))
3206 If have mmap, try using it as a backup when MORECORE fails or
3207 cannot be used. This is worth doing on systems that have "holes" in
3208 address space, so sbrk cannot extend to give contiguous space, but
3209 space is available elsewhere. Note that we ignore mmap max count
3210 and threshold limits, since the space will not be used as a
3211 segregated mmap region.
3215 /* Cannot merge with old top, so add its size back in */
3217 size
= (size
+ old_size
+ pagemask
) & ~pagemask
;
3219 /* If we are relying on mmap as backup, then use larger units */
3220 if ((unsigned long)(size
) < (unsigned long)(MMAP_AS_MORECORE_SIZE
))
3221 size
= MMAP_AS_MORECORE_SIZE
;
3223 /* Don't try if size wraps around 0 */
3224 if ((unsigned long)(size
) > (unsigned long)(nb
)) {
3226 char *mbrk
= (char*)(MMAP(0, size
, PROT_READ
|PROT_WRITE
, MAP_PRIVATE
));
3228 if (mbrk
!= MAP_FAILED
) {
3230 /* We do not need, and cannot use, another sbrk call to find end */
3232 snd_brk
= brk
+ size
;
3235 Record that we no longer have a contiguous sbrk region.
3236 After the first time mmap is used as backup, we do not
3237 ever rely on contiguous space since this could incorrectly
3240 set_noncontiguous(av
);
3246 if (brk
!= (char*)(MORECORE_FAILURE
)) {
3247 if (mp_
.sbrk_base
== 0)
3248 mp_
.sbrk_base
= brk
;
3249 av
->system_mem
+= size
;
3252 If MORECORE extends previous space, we can likewise extend top size.
3255 if (brk
== old_end
&& snd_brk
== (char*)(MORECORE_FAILURE
))
3256 set_head(old_top
, (size
+ old_size
) | PREV_INUSE
);
3258 else if (contiguous(av
) && old_size
&& brk
< old_end
) {
3259 /* Oops! Someone else killed our space.. Can't touch anything. */
3260 malloc_printerr (3, "break adjusted to free malloc space", brk
);
3264 Otherwise, make adjustments:
3266 * If the first time through or noncontiguous, we need to call sbrk
3267 just to find out where the end of memory lies.
3269 * We need to ensure that all returned chunks from malloc will meet
3272 * If there was an intervening foreign sbrk, we need to adjust sbrk
3273 request size to account for fact that we will not be able to
3274 combine new space with existing space in old_top.
3276 * Almost all systems internally allocate whole pages at a time, in
3277 which case we might as well use the whole last page of request.
3278 So we allocate enough more memory to hit a page boundary now,
3279 which in turn causes future contiguous calls to page-align.
3288 /* handle contiguous cases */
3289 if (contiguous(av
)) {
3291 /* Count foreign sbrk as system_mem. */
3293 av
->system_mem
+= brk
- old_end
;
3295 /* Guarantee alignment of first new chunk made from this space */
3297 front_misalign
= (INTERNAL_SIZE_T
)chunk2mem(brk
) & MALLOC_ALIGN_MASK
;
3298 if (front_misalign
> 0) {
3301 Skip over some bytes to arrive at an aligned position.
3302 We don't need to specially mark these wasted front bytes.
3303 They will never be accessed anyway because
3304 prev_inuse of av->top (and any chunk created from its start)
3305 is always true after initialization.
3308 correction
= MALLOC_ALIGNMENT
- front_misalign
;
3309 aligned_brk
+= correction
;
3313 If this isn't adjacent to existing space, then we will not
3314 be able to merge with old_top space, so must add to 2nd request.
3317 correction
+= old_size
;
3319 /* Extend the end address to hit a page boundary */
3320 end_misalign
= (INTERNAL_SIZE_T
)(brk
+ size
+ correction
);
3321 correction
+= ((end_misalign
+ pagemask
) & ~pagemask
) - end_misalign
;
3323 assert(correction
>= 0);
3324 snd_brk
= (char*)(MORECORE(correction
));
3327 If can't allocate correction, try to at least find out current
3328 brk. It might be enough to proceed without failing.
3330 Note that if second sbrk did NOT fail, we assume that space
3331 is contiguous with first sbrk. This is a safe assumption unless
3332 program is multithreaded but doesn't use locks and a foreign sbrk
3333 occurred between our first and second calls.
3336 if (snd_brk
== (char*)(MORECORE_FAILURE
)) {
3338 snd_brk
= (char*)(MORECORE(0));
3340 /* Call the `morecore' hook if necessary. */
3341 void (*hook
) (void) = force_reg (__after_morecore_hook
);
3342 if (__builtin_expect (hook
!= NULL
, 0))
3347 /* handle non-contiguous cases */
3349 /* MORECORE/mmap must correctly align */
3350 assert(((unsigned long)chunk2mem(brk
) & MALLOC_ALIGN_MASK
) == 0);
3352 /* Find out current end of memory */
3353 if (snd_brk
== (char*)(MORECORE_FAILURE
)) {
3354 snd_brk
= (char*)(MORECORE(0));
3358 /* Adjust top based on results of second sbrk */
3359 if (snd_brk
!= (char*)(MORECORE_FAILURE
)) {
3360 av
->top
= (mchunkptr
)aligned_brk
;
3361 set_head(av
->top
, (snd_brk
- aligned_brk
+ correction
) | PREV_INUSE
);
3362 av
->system_mem
+= correction
;
3365 If not the first time through, we either have a
3366 gap due to foreign sbrk or a non-contiguous region. Insert a
3367 double fencepost at old_top to prevent consolidation with space
3368 we don't own. These fenceposts are artificial chunks that are
3369 marked as inuse and are in any case too small to use. We need
3370 two to make sizes and alignments work out.
3373 if (old_size
!= 0) {
3375 Shrink old_top to insert fenceposts, keeping size a
3376 multiple of MALLOC_ALIGNMENT. We know there is at least
3377 enough space in old_top to do this.
3379 old_size
= (old_size
- 4*SIZE_SZ
) & ~MALLOC_ALIGN_MASK
;
3380 set_head(old_top
, old_size
| PREV_INUSE
);
3383 Note that the following assignments completely overwrite
3384 old_top when old_size was previously MINSIZE. This is
3385 intentional. We need the fencepost, even if old_top otherwise gets
3388 chunk_at_offset(old_top
, old_size
)->size
=
3389 (2*SIZE_SZ
)|PREV_INUSE
;
3391 chunk_at_offset(old_top
, old_size
+ 2*SIZE_SZ
)->size
=
3392 (2*SIZE_SZ
)|PREV_INUSE
;
3394 /* If possible, release the rest. */
3395 if (old_size
>= MINSIZE
) {
3396 #ifdef ATOMIC_FASTBINS
3397 _int_free(av
, old_top
, 1);
3399 _int_free(av
, old_top
);
3407 /* Update statistics */
3409 sum
= av
->system_mem
+ mp_
.mmapped_mem
;
3410 if (sum
> (unsigned long)(mp_
.max_total_mem
))
3411 mp_
.max_total_mem
= sum
;
3416 } /* if (av != &main_arena) */
3418 if ((unsigned long)av
->system_mem
> (unsigned long)(av
->max_system_mem
))
3419 av
->max_system_mem
= av
->system_mem
;
3420 check_malloc_state(av
);
3422 /* finally, do the allocation */
3424 size
= chunksize(p
);
3426 /* check that one of the above allocation paths succeeded */
3427 if ((unsigned long)(size
) >= (unsigned long)(nb
+ MINSIZE
)) {
3428 remainder_size
= size
- nb
;
3429 remainder
= chunk_at_offset(p
, nb
);
3430 av
->top
= remainder
;
3431 set_head(p
, nb
| PREV_INUSE
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
3432 set_head(remainder
, remainder_size
| PREV_INUSE
);
3433 check_malloced_chunk(av
, p
, nb
);
3434 return chunk2mem(p
);
3437 /* catch all failure paths */
3438 MALLOC_FAILURE_ACTION
;
3444 sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
3445 to the system (via negative arguments to sbrk) if there is unused
3446 memory at the `high' end of the malloc pool. It is called
3447 automatically by free() when top space exceeds the trim
3448 threshold. It is also called by the public malloc_trim routine. It
3449 returns 1 if it actually released any memory, else 0.
3453 static int sYSTRIm(size_t pad
, mstate av
)
3455 static int sYSTRIm(pad
, av
) size_t pad
; mstate av
;
3458 long top_size
; /* Amount of top-most memory */
3459 long extra
; /* Amount to release */
3460 long released
; /* Amount actually released */
3461 char* current_brk
; /* address returned by pre-check sbrk call */
3462 char* new_brk
; /* address returned by post-check sbrk call */
3465 pagesz
= mp_
.pagesize
;
3466 top_size
= chunksize(av
->top
);
3468 /* Release in pagesize units, keeping at least one page */
3469 extra
= ((top_size
- pad
- MINSIZE
+ (pagesz
-1)) / pagesz
- 1) * pagesz
;
3474 Only proceed if end of memory is where we last set it.
3475 This avoids problems if there were foreign sbrk calls.
3477 current_brk
= (char*)(MORECORE(0));
3478 if (current_brk
== (char*)(av
->top
) + top_size
) {
3481 Attempt to release memory. We ignore MORECORE return value,
3482 and instead call again to find out where new end of memory is.
3483 This avoids problems if first call releases less than we asked,
3484 of if failure somehow altered brk value. (We could still
3485 encounter problems if it altered brk in some very bad way,
3486 but the only thing we can do is adjust anyway, which will cause
3487 some downstream failure.)
3491 /* Call the `morecore' hook if necessary. */
3492 void (*hook
) (void) = force_reg (__after_morecore_hook
);
3493 if (__builtin_expect (hook
!= NULL
, 0))
3495 new_brk
= (char*)(MORECORE(0));
3497 if (new_brk
!= (char*)MORECORE_FAILURE
) {
3498 released
= (long)(current_brk
- new_brk
);
3500 if (released
!= 0) {
3501 /* Success. Adjust top. */
3502 av
->system_mem
-= released
;
3503 set_head(av
->top
, (top_size
- released
) | PREV_INUSE
);
3504 check_malloc_state(av
);
3518 munmap_chunk(mchunkptr p
)
3520 munmap_chunk(p
) mchunkptr p
;
3523 INTERNAL_SIZE_T size
= chunksize(p
);
3525 assert (chunk_is_mmapped(p
));
3527 assert(! ((char*)p
>= mp_
.sbrk_base
&& (char*)p
< mp_
.sbrk_base
+ mp_
.sbrked_mem
));
3528 assert((mp_
.n_mmaps
> 0));
3531 uintptr_t block
= (uintptr_t) p
- p
->prev_size
;
3532 size_t total_size
= p
->prev_size
+ size
;
3533 /* Unfortunately we have to do the compilers job by hand here. Normally
3534 we would test BLOCK and TOTAL-SIZE separately for compliance with the
3535 page size. But gcc does not recognize the optimization possibility
3536 (in the moment at least) so we combine the two values into one before
3538 if (__builtin_expect (((block
| total_size
) & (mp_
.pagesize
- 1)) != 0, 0))
3540 malloc_printerr (check_action
, "munmap_chunk(): invalid pointer",
3546 mp_
.mmapped_mem
-= total_size
;
3548 int ret
__attribute__ ((unused
)) = munmap((char *)block
, total_size
);
3550 /* munmap returns non-zero on failure */
3559 mremap_chunk(mchunkptr p
, size_t new_size
)
3561 mremap_chunk(p
, new_size
) mchunkptr p
; size_t new_size
;
3564 size_t page_mask
= mp_
.pagesize
- 1;
3565 INTERNAL_SIZE_T offset
= p
->prev_size
;
3566 INTERNAL_SIZE_T size
= chunksize(p
);
3569 assert (chunk_is_mmapped(p
));
3571 assert(! ((char*)p
>= mp_
.sbrk_base
&& (char*)p
< mp_
.sbrk_base
+ mp_
.sbrked_mem
));
3572 assert((mp_
.n_mmaps
> 0));
3574 assert(((size
+ offset
) & (mp_
.pagesize
-1)) == 0);
3576 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
3577 new_size
= (new_size
+ offset
+ SIZE_SZ
+ page_mask
) & ~page_mask
;
3579 /* No need to remap if the number of pages does not change. */
3580 if (size
+ offset
== new_size
)
3583 cp
= (char *)mremap((char *)p
- offset
, size
+ offset
, new_size
,
3586 if (cp
== MAP_FAILED
) return 0;
3588 p
= (mchunkptr
)(cp
+ offset
);
3590 assert(aligned_OK(chunk2mem(p
)));
3592 assert((p
->prev_size
== offset
));
3593 set_head(p
, (new_size
- offset
)|IS_MMAPPED
);
3595 mp_
.mmapped_mem
-= size
+ offset
;
3596 mp_
.mmapped_mem
+= new_size
;
3597 if ((unsigned long)mp_
.mmapped_mem
> (unsigned long)mp_
.max_mmapped_mem
)
3598 mp_
.max_mmapped_mem
= mp_
.mmapped_mem
;
3600 if ((unsigned long)(mp_
.mmapped_mem
+ arena_mem
+ main_arena
.system_mem
) >
3602 mp_
.max_total_mem
= mp_
.mmapped_mem
+ arena_mem
+ main_arena
.system_mem
;
3607 #endif /* HAVE_MREMAP */
3609 #endif /* HAVE_MMAP */
3611 /*------------------------ Public wrappers. --------------------------------*/
3614 public_mALLOc(size_t bytes
)
3619 __malloc_ptr_t (*hook
) (size_t, __const __malloc_ptr_t
)
3620 = force_reg (__malloc_hook
);
3621 if (__builtin_expect (hook
!= NULL
, 0))
3622 return (*hook
)(bytes
, RETURN_ADDRESS (0));
3624 arena_lookup(ar_ptr
);
3626 // XXX We need double-word CAS and fastbins must be extended to also
3627 // XXX hold a generation counter for each entry.
3629 INTERNAL_SIZE_T nb
; /* normalized request size */
3630 checked_request2size(bytes
, nb
);
3631 if (nb
<= get_max_fast ()) {
3632 long int idx
= fastbin_index(nb
);
3633 mfastbinptr
* fb
= &fastbin (ar_ptr
, idx
);
3642 while ((pp
= catomic_compare_and_exchange_val_acq (fb
, v
->fd
, v
)) != v
);
3644 if (__builtin_expect (fastbin_index (chunksize (v
)) != idx
, 0))
3645 malloc_printerr (check_action
, "malloc(): memory corruption (fast)",
3647 check_remalloced_chunk(ar_ptr
, v
, nb
);
3648 void *p
= chunk2mem(v
);
3649 if (__builtin_expect (perturb_byte
, 0))
3650 alloc_perturb (p
, bytes
);
3657 arena_lock(ar_ptr
, bytes
);
3660 victim
= _int_malloc(ar_ptr
, bytes
);
3662 /* Maybe the failure is due to running out of mmapped areas. */
3663 if(ar_ptr
!= &main_arena
) {
3664 (void)mutex_unlock(&ar_ptr
->mutex
);
3665 ar_ptr
= &main_arena
;
3666 (void)mutex_lock(&ar_ptr
->mutex
);
3667 victim
= _int_malloc(ar_ptr
, bytes
);
3668 (void)mutex_unlock(&ar_ptr
->mutex
);
3671 /* ... or sbrk() has failed and there is still a chance to mmap() */
3672 ar_ptr
= arena_get2(ar_ptr
->next
? ar_ptr
: 0, bytes
);
3673 (void)mutex_unlock(&main_arena
.mutex
);
3675 victim
= _int_malloc(ar_ptr
, bytes
);
3676 (void)mutex_unlock(&ar_ptr
->mutex
);
3681 (void)mutex_unlock(&ar_ptr
->mutex
);
3682 assert(!victim
|| chunk_is_mmapped(mem2chunk(victim
)) ||
3683 ar_ptr
== arena_for_chunk(mem2chunk(victim
)));
3686 #ifdef libc_hidden_def
3687 libc_hidden_def(public_mALLOc
)
3691 public_fREe(Void_t
* mem
)
3694 mchunkptr p
; /* chunk corresponding to mem */
3696 void (*hook
) (__malloc_ptr_t
, __const __malloc_ptr_t
)
3697 = force_reg (__free_hook
);
3698 if (__builtin_expect (hook
!= NULL
, 0)) {
3699 (*hook
)(mem
, RETURN_ADDRESS (0));
3703 if (mem
== 0) /* free(0) has no effect */
3709 if (chunk_is_mmapped(p
)) /* release mmapped memory. */
3711 /* see if the dynamic brk/mmap threshold needs adjusting */
3712 if (!mp_
.no_dyn_threshold
3713 && p
->size
> mp_
.mmap_threshold
3714 && p
->size
<= DEFAULT_MMAP_THRESHOLD_MAX
)
3716 mp_
.mmap_threshold
= chunksize (p
);
3717 mp_
.trim_threshold
= 2 * mp_
.mmap_threshold
;
3724 ar_ptr
= arena_for_chunk(p
);
3725 #ifdef ATOMIC_FASTBINS
3726 _int_free(ar_ptr
, p
, 0);
3729 if(!mutex_trylock(&ar_ptr
->mutex
))
3730 ++(ar_ptr
->stat_lock_direct
);
3732 (void)mutex_lock(&ar_ptr
->mutex
);
3733 ++(ar_ptr
->stat_lock_wait
);
3736 (void)mutex_lock(&ar_ptr
->mutex
);
3738 _int_free(ar_ptr
, p
);
3739 (void)mutex_unlock(&ar_ptr
->mutex
);
3742 #ifdef libc_hidden_def
3743 libc_hidden_def (public_fREe
)
3747 public_rEALLOc(Void_t
* oldmem
, size_t bytes
)
3750 INTERNAL_SIZE_T nb
; /* padded request size */
3752 Void_t
* newp
; /* chunk to return */
3754 __malloc_ptr_t (*hook
) (__malloc_ptr_t
, size_t, __const __malloc_ptr_t
) =
3755 force_reg (__realloc_hook
);
3756 if (__builtin_expect (hook
!= NULL
, 0))
3757 return (*hook
)(oldmem
, bytes
, RETURN_ADDRESS (0));
3759 #if REALLOC_ZERO_BYTES_FREES
3760 if (bytes
== 0 && oldmem
!= NULL
) { public_fREe(oldmem
); return 0; }
3763 /* realloc of null is supposed to be same as malloc */
3764 if (oldmem
== 0) return public_mALLOc(bytes
);
3766 /* chunk corresponding to oldmem */
3767 const mchunkptr oldp
= mem2chunk(oldmem
);
3769 const INTERNAL_SIZE_T oldsize
= chunksize(oldp
);
3771 /* Little security check which won't hurt performance: the
3772 allocator never wrapps around at the end of the address space.
3773 Therefore we can exclude some size values which might appear
3774 here by accident or by "design" from some intruder. */
3775 if (__builtin_expect ((uintptr_t) oldp
> (uintptr_t) -oldsize
, 0)
3776 || __builtin_expect (misaligned_chunk (oldp
), 0))
3778 malloc_printerr (check_action
, "realloc(): invalid pointer", oldmem
);
3782 checked_request2size(bytes
, nb
);
3785 if (chunk_is_mmapped(oldp
))
3790 newp
= mremap_chunk(oldp
, nb
);
3791 if(newp
) return chunk2mem(newp
);
3793 /* Note the extra SIZE_SZ overhead. */
3794 if(oldsize
- SIZE_SZ
>= nb
) return oldmem
; /* do nothing */
3795 /* Must alloc, copy, free. */
3796 newmem
= public_mALLOc(bytes
);
3797 if (newmem
== 0) return 0; /* propagate failure */
3798 MALLOC_COPY(newmem
, oldmem
, oldsize
- 2*SIZE_SZ
);
3804 ar_ptr
= arena_for_chunk(oldp
);
3806 if(!mutex_trylock(&ar_ptr
->mutex
))
3807 ++(ar_ptr
->stat_lock_direct
);
3809 (void)mutex_lock(&ar_ptr
->mutex
);
3810 ++(ar_ptr
->stat_lock_wait
);
3813 (void)mutex_lock(&ar_ptr
->mutex
);
3816 #if !defined NO_THREADS && !defined PER_THREAD
3817 /* As in malloc(), remember this arena for the next allocation. */
3818 tsd_setspecific(arena_key
, (Void_t
*)ar_ptr
);
3821 newp
= _int_realloc(ar_ptr
, oldp
, oldsize
, nb
);
3823 (void)mutex_unlock(&ar_ptr
->mutex
);
3824 assert(!newp
|| chunk_is_mmapped(mem2chunk(newp
)) ||
3825 ar_ptr
== arena_for_chunk(mem2chunk(newp
)));
3829 /* Try harder to allocate memory in other arenas. */
3830 newp
= public_mALLOc(bytes
);
3833 MALLOC_COPY (newp
, oldmem
, oldsize
- SIZE_SZ
);
3834 #ifdef ATOMIC_FASTBINS
3835 _int_free(ar_ptr
, oldp
, 0);
3838 if(!mutex_trylock(&ar_ptr
->mutex
))
3839 ++(ar_ptr
->stat_lock_direct
);
3841 (void)mutex_lock(&ar_ptr
->mutex
);
3842 ++(ar_ptr
->stat_lock_wait
);
3845 (void)mutex_lock(&ar_ptr
->mutex
);
3847 _int_free(ar_ptr
, oldp
);
3848 (void)mutex_unlock(&ar_ptr
->mutex
);
3855 #ifdef libc_hidden_def
3856 libc_hidden_def (public_rEALLOc
)
3860 public_mEMALIGn(size_t alignment
, size_t bytes
)
3865 __malloc_ptr_t (*hook
) __MALLOC_PMT ((size_t, size_t,
3866 __const __malloc_ptr_t
)) =
3867 force_reg (__memalign_hook
);
3868 if (__builtin_expect (hook
!= NULL
, 0))
3869 return (*hook
)(alignment
, bytes
, RETURN_ADDRESS (0));
3871 /* If need less alignment than we give anyway, just relay to malloc */
3872 if (alignment
<= MALLOC_ALIGNMENT
) return public_mALLOc(bytes
);
3874 /* Otherwise, ensure that it is at least a minimum chunk size */
3875 if (alignment
< MINSIZE
) alignment
= MINSIZE
;
3877 arena_get(ar_ptr
, bytes
+ alignment
+ MINSIZE
);
3880 p
= _int_memalign(ar_ptr
, alignment
, bytes
);
3882 /* Maybe the failure is due to running out of mmapped areas. */
3883 if(ar_ptr
!= &main_arena
) {
3884 (void)mutex_unlock(&ar_ptr
->mutex
);
3885 ar_ptr
= &main_arena
;
3886 (void)mutex_lock(&ar_ptr
->mutex
);
3887 p
= _int_memalign(ar_ptr
, alignment
, bytes
);
3888 (void)mutex_unlock(&ar_ptr
->mutex
);
3891 /* ... or sbrk() has failed and there is still a chance to mmap() */
3892 mstate prev
= ar_ptr
->next
? ar_ptr
: 0;
3893 (void)mutex_unlock(&ar_ptr
->mutex
);
3894 ar_ptr
= arena_get2(prev
, bytes
);
3896 p
= _int_memalign(ar_ptr
, alignment
, bytes
);
3897 (void)mutex_unlock(&ar_ptr
->mutex
);
3902 (void)mutex_unlock(&ar_ptr
->mutex
);
3903 assert(!p
|| chunk_is_mmapped(mem2chunk(p
)) ||
3904 ar_ptr
== arena_for_chunk(mem2chunk(p
)));
3907 #ifdef libc_hidden_def
3908 libc_hidden_def (public_mEMALIGn
)
3912 public_vALLOc(size_t bytes
)
3917 if(__malloc_initialized
< 0)
3920 size_t pagesz
= mp_
.pagesize
;
3922 __malloc_ptr_t (*hook
) __MALLOC_PMT ((size_t, size_t,
3923 __const __malloc_ptr_t
)) =
3924 force_reg (__memalign_hook
);
3925 if (__builtin_expect (hook
!= NULL
, 0))
3926 return (*hook
)(pagesz
, bytes
, RETURN_ADDRESS (0));
3928 arena_get(ar_ptr
, bytes
+ pagesz
+ MINSIZE
);
3931 p
= _int_valloc(ar_ptr
, bytes
);
3932 (void)mutex_unlock(&ar_ptr
->mutex
);
3934 /* Maybe the failure is due to running out of mmapped areas. */
3935 if(ar_ptr
!= &main_arena
) {
3936 (void)mutex_lock(&main_arena
.mutex
);
3937 p
= _int_memalign(&main_arena
, pagesz
, bytes
);
3938 (void)mutex_unlock(&main_arena
.mutex
);
3941 /* ... or sbrk() has failed and there is still a chance to mmap() */
3942 ar_ptr
= arena_get2(ar_ptr
->next
? ar_ptr
: 0, bytes
);
3944 p
= _int_memalign(ar_ptr
, pagesz
, bytes
);
3945 (void)mutex_unlock(&ar_ptr
->mutex
);
3950 assert(!p
|| chunk_is_mmapped(mem2chunk(p
)) ||
3951 ar_ptr
== arena_for_chunk(mem2chunk(p
)));
3957 public_pVALLOc(size_t bytes
)
3962 if(__malloc_initialized
< 0)
3965 size_t pagesz
= mp_
.pagesize
;
3966 size_t page_mask
= mp_
.pagesize
- 1;
3967 size_t rounded_bytes
= (bytes
+ page_mask
) & ~(page_mask
);
3969 __malloc_ptr_t (*hook
) __MALLOC_PMT ((size_t, size_t,
3970 __const __malloc_ptr_t
)) =
3971 force_reg (__memalign_hook
);
3972 if (__builtin_expect (hook
!= NULL
, 0))
3973 return (*hook
)(pagesz
, rounded_bytes
, RETURN_ADDRESS (0));
3975 arena_get(ar_ptr
, bytes
+ 2*pagesz
+ MINSIZE
);
3976 p
= _int_pvalloc(ar_ptr
, bytes
);
3977 (void)mutex_unlock(&ar_ptr
->mutex
);
3979 /* Maybe the failure is due to running out of mmapped areas. */
3980 if(ar_ptr
!= &main_arena
) {
3981 (void)mutex_lock(&main_arena
.mutex
);
3982 p
= _int_memalign(&main_arena
, pagesz
, rounded_bytes
);
3983 (void)mutex_unlock(&main_arena
.mutex
);
3986 /* ... or sbrk() has failed and there is still a chance to mmap() */
3987 ar_ptr
= arena_get2(ar_ptr
->next
? ar_ptr
: 0,
3988 bytes
+ 2*pagesz
+ MINSIZE
);
3990 p
= _int_memalign(ar_ptr
, pagesz
, rounded_bytes
);
3991 (void)mutex_unlock(&ar_ptr
->mutex
);
3996 assert(!p
|| chunk_is_mmapped(mem2chunk(p
)) ||
3997 ar_ptr
== arena_for_chunk(mem2chunk(p
)));
4003 public_cALLOc(size_t n
, size_t elem_size
)
4006 mchunkptr oldtop
, p
;
4007 INTERNAL_SIZE_T bytes
, sz
, csz
, oldtopsize
;
4009 unsigned long clearsize
;
4010 unsigned long nclears
;
4013 /* size_t is unsigned so the behavior on overflow is defined. */
4014 bytes
= n
* elem_size
;
4015 #define HALF_INTERNAL_SIZE_T \
4016 (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
4017 if (__builtin_expect ((n
| elem_size
) >= HALF_INTERNAL_SIZE_T
, 0)) {
4018 if (elem_size
!= 0 && bytes
/ elem_size
!= n
) {
4019 MALLOC_FAILURE_ACTION
;
4024 __malloc_ptr_t (*hook
) __MALLOC_PMT ((size_t, __const __malloc_ptr_t
)) =
4025 force_reg (__malloc_hook
);
4026 if (__builtin_expect (hook
!= NULL
, 0)) {
4028 mem
= (*hook
)(sz
, RETURN_ADDRESS (0));
4032 return memset(mem
, 0, sz
);
4034 while(sz
> 0) ((char*)mem
)[--sz
] = 0; /* rather inefficient */
4045 /* Check if we hand out the top chunk, in which case there may be no
4049 oldtopsize
= chunksize(top(av
));
4050 #if MORECORE_CLEARS < 2
4051 /* Only newly allocated memory is guaranteed to be cleared. */
4052 if (av
== &main_arena
&&
4053 oldtopsize
< mp_
.sbrk_base
+ av
->max_system_mem
- (char *)oldtop
)
4054 oldtopsize
= (mp_
.sbrk_base
+ av
->max_system_mem
- (char *)oldtop
);
4056 if (av
!= &main_arena
)
4058 heap_info
*heap
= heap_for_ptr (oldtop
);
4059 if (oldtopsize
< (char *) heap
+ heap
->mprotect_size
- (char *) oldtop
)
4060 oldtopsize
= (char *) heap
+ heap
->mprotect_size
- (char *) oldtop
;
4063 mem
= _int_malloc(av
, sz
);
4065 /* Only clearing follows, so we can unlock early. */
4066 (void)mutex_unlock(&av
->mutex
);
4068 assert(!mem
|| chunk_is_mmapped(mem2chunk(mem
)) ||
4069 av
== arena_for_chunk(mem2chunk(mem
)));
4072 /* Maybe the failure is due to running out of mmapped areas. */
4073 if(av
!= &main_arena
) {
4074 (void)mutex_lock(&main_arena
.mutex
);
4075 mem
= _int_malloc(&main_arena
, sz
);
4076 (void)mutex_unlock(&main_arena
.mutex
);
4079 /* ... or sbrk() has failed and there is still a chance to mmap() */
4080 (void)mutex_lock(&main_arena
.mutex
);
4081 av
= arena_get2(av
->next
? av
: 0, sz
);
4082 (void)mutex_unlock(&main_arena
.mutex
);
4084 mem
= _int_malloc(av
, sz
);
4085 (void)mutex_unlock(&av
->mutex
);
4089 if (mem
== 0) return 0;
4093 /* Two optional cases in which clearing not necessary */
4095 if (chunk_is_mmapped (p
))
4097 if (__builtin_expect (perturb_byte
, 0))
4098 MALLOC_ZERO (mem
, sz
);
4106 if (perturb_byte
== 0 && (p
== oldtop
&& csz
> oldtopsize
)) {
4107 /* clear only the bytes from non-freshly-sbrked memory */
4112 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
4113 contents have an odd number of INTERNAL_SIZE_T-sized words;
4115 d
= (INTERNAL_SIZE_T
*)mem
;
4116 clearsize
= csz
- SIZE_SZ
;
4117 nclears
= clearsize
/ sizeof(INTERNAL_SIZE_T
);
4118 assert(nclears
>= 3);
4121 MALLOC_ZERO(d
, clearsize
);
4147 public_iCALLOc(size_t n
, size_t elem_size
, Void_t
** chunks
)
4152 arena_get(ar_ptr
, n
*elem_size
);
4156 m
= _int_icalloc(ar_ptr
, n
, elem_size
, chunks
);
4157 (void)mutex_unlock(&ar_ptr
->mutex
);
4162 public_iCOMALLOc(size_t n
, size_t sizes
[], Void_t
** chunks
)
4167 arena_get(ar_ptr
, 0);
4171 m
= _int_icomalloc(ar_ptr
, n
, sizes
, chunks
);
4172 (void)mutex_unlock(&ar_ptr
->mutex
);
4177 public_cFREe(Void_t
* m
)
4185 public_mTRIm(size_t s
)
4189 if(__malloc_initialized
< 0)
4192 mstate ar_ptr
= &main_arena
;
4195 (void) mutex_lock (&ar_ptr
->mutex
);
4196 result
|= mTRIm (ar_ptr
, s
);
4197 (void) mutex_unlock (&ar_ptr
->mutex
);
4199 ar_ptr
= ar_ptr
->next
;
4201 while (ar_ptr
!= &main_arena
);
4207 public_mUSABLe(Void_t
* m
)
4211 result
= mUSABLe(m
);
4221 struct mallinfo
public_mALLINFo()
4225 if(__malloc_initialized
< 0)
4227 (void)mutex_lock(&main_arena
.mutex
);
4228 m
= mALLINFo(&main_arena
);
4229 (void)mutex_unlock(&main_arena
.mutex
);
4234 public_mALLOPt(int p
, int v
)
4237 result
= mALLOPt(p
, v
);
4242 ------------------------------ malloc ------------------------------
4246 _int_malloc(mstate av
, size_t bytes
)
4248 INTERNAL_SIZE_T nb
; /* normalized request size */
4249 unsigned int idx
; /* associated bin index */
4250 mbinptr bin
; /* associated bin */
4252 mchunkptr victim
; /* inspected/selected chunk */
4253 INTERNAL_SIZE_T size
; /* its size */
4254 int victim_index
; /* its bin index */
4256 mchunkptr remainder
; /* remainder from a split */
4257 unsigned long remainder_size
; /* its size */
4259 unsigned int block
; /* bit map traverser */
4260 unsigned int bit
; /* bit map traverser */
4261 unsigned int map
; /* current word of binmap */
4263 mchunkptr fwd
; /* misc temp for linking */
4264 mchunkptr bck
; /* misc temp for linking */
4267 Convert request size to internal form by adding SIZE_SZ bytes
4268 overhead plus possibly more to obtain necessary alignment and/or
4269 to obtain a size of at least MINSIZE, the smallest allocatable
4270 size. Also, checked_request2size traps (returning 0) request sizes
4271 that are so large that they wrap around zero when padded and
4275 checked_request2size(bytes
, nb
);
4278 If the size qualifies as a fastbin, first check corresponding bin.
4279 This code is safe to execute even if av is not yet initialized, so we
4280 can try it without checking, which saves some time on this fast path.
4283 if ((unsigned long)(nb
) <= (unsigned long)(get_max_fast ())) {
4284 idx
= fastbin_index(nb
);
4285 mfastbinptr
* fb
= &fastbin (av
, idx
);
4286 #ifdef ATOMIC_FASTBINS
4294 while ((pp
= catomic_compare_and_exchange_val_acq (fb
, victim
->fd
, victim
))
4300 if (__builtin_expect (fastbin_index (chunksize (victim
)) != idx
, 0))
4301 malloc_printerr (check_action
, "malloc(): memory corruption (fast)",
4302 chunk2mem (victim
));
4303 #ifndef ATOMIC_FASTBINS
4306 check_remalloced_chunk(av
, victim
, nb
);
4307 void *p
= chunk2mem(victim
);
4308 if (__builtin_expect (perturb_byte
, 0))
4309 alloc_perturb (p
, bytes
);
4315 If a small request, check regular bin. Since these "smallbins"
4316 hold one size each, no searching within bins is necessary.
4317 (For a large request, we need to wait until unsorted chunks are
4318 processed to find best fit. But for small ones, fits are exact
4319 anyway, so we can check now, which is faster.)
4322 if (in_smallbin_range(nb
)) {
4323 idx
= smallbin_index(nb
);
4324 bin
= bin_at(av
,idx
);
4326 if ( (victim
= last(bin
)) != bin
) {
4327 if (victim
== 0) /* initialization check */
4328 malloc_consolidate(av
);
4331 set_inuse_bit_at_offset(victim
, nb
);
4335 if (av
!= &main_arena
)
4336 victim
->size
|= NON_MAIN_ARENA
;
4337 check_malloced_chunk(av
, victim
, nb
);
4338 void *p
= chunk2mem(victim
);
4339 if (__builtin_expect (perturb_byte
, 0))
4340 alloc_perturb (p
, bytes
);
4347 If this is a large request, consolidate fastbins before continuing.
4348 While it might look excessive to kill all fastbins before
4349 even seeing if there is space available, this avoids
4350 fragmentation problems normally associated with fastbins.
4351 Also, in practice, programs tend to have runs of either small or
4352 large requests, but less often mixtures, so consolidation is not
4353 invoked all that often in most programs. And the programs that
4354 it is called frequently in otherwise tend to fragment.
4358 idx
= largebin_index(nb
);
4359 if (have_fastchunks(av
))
4360 malloc_consolidate(av
);
4364 Process recently freed or remaindered chunks, taking one only if
4365 it is exact fit, or, if this a small request, the chunk is remainder from
4366 the most recent non-exact fit. Place other traversed chunks in
4367 bins. Note that this step is the only place in any routine where
4368 chunks are placed in bins.
4370 The outer loop here is needed because we might not realize until
4371 near the end of malloc that we should have consolidated, so must
4372 do so and retry. This happens at most once, and only when we would
4373 otherwise need to expand memory to service a "small" request.
4379 while ( (victim
= unsorted_chunks(av
)->bk
) != unsorted_chunks(av
)) {
4381 if (__builtin_expect (victim
->size
<= 2 * SIZE_SZ
, 0)
4382 || __builtin_expect (victim
->size
> av
->system_mem
, 0))
4383 malloc_printerr (check_action
, "malloc(): memory corruption",
4384 chunk2mem (victim
));
4385 size
= chunksize(victim
);
4388 If a small request, try to use last remainder if it is the
4389 only chunk in unsorted bin. This helps promote locality for
4390 runs of consecutive small requests. This is the only
4391 exception to best-fit, and applies only when there is
4392 no exact fit for a small chunk.
4395 if (in_smallbin_range(nb
) &&
4396 bck
== unsorted_chunks(av
) &&
4397 victim
== av
->last_remainder
&&
4398 (unsigned long)(size
) > (unsigned long)(nb
+ MINSIZE
)) {
4400 /* split and reattach remainder */
4401 remainder_size
= size
- nb
;
4402 remainder
= chunk_at_offset(victim
, nb
);
4403 unsorted_chunks(av
)->bk
= unsorted_chunks(av
)->fd
= remainder
;
4404 av
->last_remainder
= remainder
;
4405 remainder
->bk
= remainder
->fd
= unsorted_chunks(av
);
4406 if (!in_smallbin_range(remainder_size
))
4408 remainder
->fd_nextsize
= NULL
;
4409 remainder
->bk_nextsize
= NULL
;
4412 set_head(victim
, nb
| PREV_INUSE
|
4413 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4414 set_head(remainder
, remainder_size
| PREV_INUSE
);
4415 set_foot(remainder
, remainder_size
);
4417 check_malloced_chunk(av
, victim
, nb
);
4418 void *p
= chunk2mem(victim
);
4419 if (__builtin_expect (perturb_byte
, 0))
4420 alloc_perturb (p
, bytes
);
4424 /* remove from unsorted list */
4425 unsorted_chunks(av
)->bk
= bck
;
4426 bck
->fd
= unsorted_chunks(av
);
4428 /* Take now instead of binning if exact fit */
4431 set_inuse_bit_at_offset(victim
, size
);
4432 if (av
!= &main_arena
)
4433 victim
->size
|= NON_MAIN_ARENA
;
4434 check_malloced_chunk(av
, victim
, nb
);
4435 void *p
= chunk2mem(victim
);
4436 if (__builtin_expect (perturb_byte
, 0))
4437 alloc_perturb (p
, bytes
);
4441 /* place chunk in bin */
4443 if (in_smallbin_range(size
)) {
4444 victim_index
= smallbin_index(size
);
4445 bck
= bin_at(av
, victim_index
);
4449 victim_index
= largebin_index(size
);
4450 bck
= bin_at(av
, victim_index
);
4453 /* maintain large bins in sorted order */
4455 /* Or with inuse bit to speed comparisons */
4457 /* if smaller than smallest, bypass loop below */
4458 assert((bck
->bk
->size
& NON_MAIN_ARENA
) == 0);
4459 if ((unsigned long)(size
) < (unsigned long)(bck
->bk
->size
)) {
4463 victim
->fd_nextsize
= fwd
->fd
;
4464 victim
->bk_nextsize
= fwd
->fd
->bk_nextsize
;
4465 fwd
->fd
->bk_nextsize
= victim
->bk_nextsize
->fd_nextsize
= victim
;
4468 assert((fwd
->size
& NON_MAIN_ARENA
) == 0);
4469 while ((unsigned long) size
< fwd
->size
)
4471 fwd
= fwd
->fd_nextsize
;
4472 assert((fwd
->size
& NON_MAIN_ARENA
) == 0);
4475 if ((unsigned long) size
== (unsigned long) fwd
->size
)
4476 /* Always insert in the second position. */
4480 victim
->fd_nextsize
= fwd
;
4481 victim
->bk_nextsize
= fwd
->bk_nextsize
;
4482 fwd
->bk_nextsize
= victim
;
4483 victim
->bk_nextsize
->fd_nextsize
= victim
;
4488 victim
->fd_nextsize
= victim
->bk_nextsize
= victim
;
4491 mark_bin(av
, victim_index
);
4497 #define MAX_ITERS 10000
4498 if (++iters
>= MAX_ITERS
)
4503 If a large request, scan through the chunks of current bin in
4504 sorted order to find smallest that fits. Use the skip list for this.
4507 if (!in_smallbin_range(nb
)) {
4508 bin
= bin_at(av
, idx
);
4510 /* skip scan if empty or largest chunk is too small */
4511 if ((victim
= first(bin
)) != bin
&&
4512 (unsigned long)(victim
->size
) >= (unsigned long)(nb
)) {
4514 victim
= victim
->bk_nextsize
;
4515 while (((unsigned long)(size
= chunksize(victim
)) <
4516 (unsigned long)(nb
)))
4517 victim
= victim
->bk_nextsize
;
4519 /* Avoid removing the first entry for a size so that the skip
4520 list does not have to be rerouted. */
4521 if (victim
!= last(bin
) && victim
->size
== victim
->fd
->size
)
4522 victim
= victim
->fd
;
4524 remainder_size
= size
- nb
;
4525 unlink(victim
, bck
, fwd
);
4528 if (remainder_size
< MINSIZE
) {
4529 set_inuse_bit_at_offset(victim
, size
);
4530 if (av
!= &main_arena
)
4531 victim
->size
|= NON_MAIN_ARENA
;
4535 remainder
= chunk_at_offset(victim
, nb
);
4536 /* We cannot assume the unsorted list is empty and therefore
4537 have to perform a complete insert here. */
4538 bck
= unsorted_chunks(av
);
4540 remainder
->bk
= bck
;
4541 remainder
->fd
= fwd
;
4542 bck
->fd
= remainder
;
4543 fwd
->bk
= remainder
;
4544 if (!in_smallbin_range(remainder_size
))
4546 remainder
->fd_nextsize
= NULL
;
4547 remainder
->bk_nextsize
= NULL
;
4549 set_head(victim
, nb
| PREV_INUSE
|
4550 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4551 set_head(remainder
, remainder_size
| PREV_INUSE
);
4552 set_foot(remainder
, remainder_size
);
4554 check_malloced_chunk(av
, victim
, nb
);
4555 void *p
= chunk2mem(victim
);
4556 if (__builtin_expect (perturb_byte
, 0))
4557 alloc_perturb (p
, bytes
);
4563 Search for a chunk by scanning bins, starting with next largest
4564 bin. This search is strictly by best-fit; i.e., the smallest
4565 (with ties going to approximately the least recently used) chunk
4566 that fits is selected.
4568 The bitmap avoids needing to check that most blocks are nonempty.
4569 The particular case of skipping all bins during warm-up phases
4570 when no chunks have been returned yet is faster than it might look.
4574 bin
= bin_at(av
,idx
);
4575 block
= idx2block(idx
);
4576 map
= av
->binmap
[block
];
4581 /* Skip rest of block if there are no more set bits in this block. */
4582 if (bit
> map
|| bit
== 0) {
4584 if (++block
>= BINMAPSIZE
) /* out of bins */
4586 } while ( (map
= av
->binmap
[block
]) == 0);
4588 bin
= bin_at(av
, (block
<< BINMAPSHIFT
));
4592 /* Advance to bin with set bit. There must be one. */
4593 while ((bit
& map
) == 0) {
4594 bin
= next_bin(bin
);
4599 /* Inspect the bin. It is likely to be non-empty */
4602 /* If a false alarm (empty bin), clear the bit. */
4603 if (victim
== bin
) {
4604 av
->binmap
[block
] = map
&= ~bit
; /* Write through */
4605 bin
= next_bin(bin
);
4610 size
= chunksize(victim
);
4612 /* We know the first chunk in this bin is big enough to use. */
4613 assert((unsigned long)(size
) >= (unsigned long)(nb
));
4615 remainder_size
= size
- nb
;
4618 unlink(victim
, bck
, fwd
);
4621 if (remainder_size
< MINSIZE
) {
4622 set_inuse_bit_at_offset(victim
, size
);
4623 if (av
!= &main_arena
)
4624 victim
->size
|= NON_MAIN_ARENA
;
4629 remainder
= chunk_at_offset(victim
, nb
);
4631 /* We cannot assume the unsorted list is empty and therefore
4632 have to perform a complete insert here. */
4633 bck
= unsorted_chunks(av
);
4635 remainder
->bk
= bck
;
4636 remainder
->fd
= fwd
;
4637 bck
->fd
= remainder
;
4638 fwd
->bk
= remainder
;
4640 /* advertise as last remainder */
4641 if (in_smallbin_range(nb
))
4642 av
->last_remainder
= remainder
;
4643 if (!in_smallbin_range(remainder_size
))
4645 remainder
->fd_nextsize
= NULL
;
4646 remainder
->bk_nextsize
= NULL
;
4648 set_head(victim
, nb
| PREV_INUSE
|
4649 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4650 set_head(remainder
, remainder_size
| PREV_INUSE
);
4651 set_foot(remainder
, remainder_size
);
4653 check_malloced_chunk(av
, victim
, nb
);
4654 void *p
= chunk2mem(victim
);
4655 if (__builtin_expect (perturb_byte
, 0))
4656 alloc_perturb (p
, bytes
);
4663 If large enough, split off the chunk bordering the end of memory
4664 (held in av->top). Note that this is in accord with the best-fit
4665 search rule. In effect, av->top is treated as larger (and thus
4666 less well fitting) than any other available chunk since it can
4667 be extended to be as large as necessary (up to system
4670 We require that av->top always exists (i.e., has size >=
4671 MINSIZE) after initialization, so if it would otherwise be
4672 exhausted by current request, it is replenished. (The main
4673 reason for ensuring it exists is that we may need MINSIZE space
4674 to put in fenceposts in sysmalloc.)
4678 size
= chunksize(victim
);
4680 if ((unsigned long)(size
) >= (unsigned long)(nb
+ MINSIZE
)) {
4681 remainder_size
= size
- nb
;
4682 remainder
= chunk_at_offset(victim
, nb
);
4683 av
->top
= remainder
;
4684 set_head(victim
, nb
| PREV_INUSE
|
4685 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4686 set_head(remainder
, remainder_size
| PREV_INUSE
);
4688 check_malloced_chunk(av
, victim
, nb
);
4689 void *p
= chunk2mem(victim
);
4690 if (__builtin_expect (perturb_byte
, 0))
4691 alloc_perturb (p
, bytes
);
4695 #ifdef ATOMIC_FASTBINS
4696 /* When we are using atomic ops to free fast chunks we can get
4697 here for all block sizes. */
4698 else if (have_fastchunks(av
)) {
4699 malloc_consolidate(av
);
4700 /* restore original bin index */
4701 if (in_smallbin_range(nb
))
4702 idx
= smallbin_index(nb
);
4704 idx
= largebin_index(nb
);
4708 If there is space available in fastbins, consolidate and retry,
4709 to possibly avoid expanding memory. This can occur only if nb is
4710 in smallbin range so we didn't consolidate upon entry.
4713 else if (have_fastchunks(av
)) {
4714 assert(in_smallbin_range(nb
));
4715 malloc_consolidate(av
);
4716 idx
= smallbin_index(nb
); /* restore original bin index */
4721 Otherwise, relay to handle system-dependent cases
4724 void *p
= sYSMALLOc(nb
, av
);
4725 if (p
!= NULL
&& __builtin_expect (perturb_byte
, 0))
4726 alloc_perturb (p
, bytes
);
4733 ------------------------------ free ------------------------------
4737 #ifdef ATOMIC_FASTBINS
4738 _int_free(mstate av
, mchunkptr p
, int have_lock
)
4740 _int_free(mstate av
, mchunkptr p
)
4743 INTERNAL_SIZE_T size
; /* its size */
4744 mfastbinptr
* fb
; /* associated fastbin */
4745 mchunkptr nextchunk
; /* next contiguous chunk */
4746 INTERNAL_SIZE_T nextsize
; /* its size */
4747 int nextinuse
; /* true if nextchunk is used */
4748 INTERNAL_SIZE_T prevsize
; /* size of previous contiguous chunk */
4749 mchunkptr bck
; /* misc temp for linking */
4750 mchunkptr fwd
; /* misc temp for linking */
4752 const char *errstr
= NULL
;
4753 #ifdef ATOMIC_FASTBINS
4757 size
= chunksize(p
);
4759 /* Little security check which won't hurt performance: the
4760 allocator never wrapps around at the end of the address space.
4761 Therefore we can exclude some size values which might appear
4762 here by accident or by "design" from some intruder. */
4763 if (__builtin_expect ((uintptr_t) p
> (uintptr_t) -size
, 0)
4764 || __builtin_expect (misaligned_chunk (p
), 0))
4766 errstr
= "free(): invalid pointer";
4768 #ifdef ATOMIC_FASTBINS
4769 if (! have_lock
&& locked
)
4770 (void)mutex_unlock(&av
->mutex
);
4772 malloc_printerr (check_action
, errstr
, chunk2mem(p
));
4775 /* We know that each chunk is at least MINSIZE bytes in size. */
4776 if (__builtin_expect (size
< MINSIZE
, 0))
4778 errstr
= "free(): invalid size";
4782 check_inuse_chunk(av
, p
);
4785 If eligible, place chunk on a fastbin so it can be found
4786 and used quickly in malloc.
4789 if ((unsigned long)(size
) <= (unsigned long)(get_max_fast ())
4793 If TRIM_FASTBINS set, don't place chunks
4794 bordering top into fastbins
4796 && (chunk_at_offset(p
, size
) != av
->top
)
4800 if (__builtin_expect (chunk_at_offset (p
, size
)->size
<= 2 * SIZE_SZ
, 0)
4801 || __builtin_expect (chunksize (chunk_at_offset (p
, size
))
4802 >= av
->system_mem
, 0))
4804 #ifdef ATOMIC_FASTBINS
4805 /* We might not have a lock at this point and concurrent modifications
4806 of system_mem might have let to a false positive. Redo the test
4807 after getting the lock. */
4809 || ({ assert (locked
== 0);
4810 mutex_lock(&av
->mutex
);
4812 chunk_at_offset (p
, size
)->size
<= 2 * SIZE_SZ
4813 || chunksize (chunk_at_offset (p
, size
)) >= av
->system_mem
;
4817 errstr
= "free(): invalid next size (fast)";
4820 #ifdef ATOMIC_FASTBINS
4823 (void)mutex_unlock(&av
->mutex
);
4829 if (__builtin_expect (perturb_byte
, 0))
4830 free_perturb (chunk2mem(p
), size
- SIZE_SZ
);
4833 fb
= &fastbin (av
, fastbin_index(size
));
4835 #ifdef ATOMIC_FASTBINS
4837 mchunkptr old
= *fb
;
4840 /* Another simple check: make sure the top of the bin is not the
4841 record we are going to add (i.e., double free). */
4842 if (__builtin_expect (old
== p
, 0))
4844 errstr
= "double free or corruption (fasttop)";
4849 while ((old
= catomic_compare_and_exchange_val_rel (fb
, p
, fd
)) != fd
);
4851 /* Another simple check: make sure the top of the bin is not the
4852 record we are going to add (i.e., double free). */
4853 if (__builtin_expect (*fb
== p
, 0))
4855 errstr
= "double free or corruption (fasttop)";
4865 Consolidate other non-mmapped chunks as they arrive.
4868 else if (!chunk_is_mmapped(p
)) {
4869 #ifdef ATOMIC_FASTBINS
4872 if(!mutex_trylock(&av
->mutex
))
4873 ++(av
->stat_lock_direct
);
4875 (void)mutex_lock(&av
->mutex
);
4876 ++(av
->stat_lock_wait
);
4879 (void)mutex_lock(&av
->mutex
);
4885 nextchunk
= chunk_at_offset(p
, size
);
4887 /* Lightweight tests: check whether the block is already the
4889 if (__builtin_expect (p
== av
->top
, 0))
4891 errstr
= "double free or corruption (top)";
4894 /* Or whether the next chunk is beyond the boundaries of the arena. */
4895 if (__builtin_expect (contiguous (av
)
4896 && (char *) nextchunk
4897 >= ((char *) av
->top
+ chunksize(av
->top
)), 0))
4899 errstr
= "double free or corruption (out)";
4902 /* Or whether the block is actually not marked used. */
4903 if (__builtin_expect (!prev_inuse(nextchunk
), 0))
4905 errstr
= "double free or corruption (!prev)";
4909 nextsize
= chunksize(nextchunk
);
4910 if (__builtin_expect (nextchunk
->size
<= 2 * SIZE_SZ
, 0)
4911 || __builtin_expect (nextsize
>= av
->system_mem
, 0))
4913 errstr
= "free(): invalid next size (normal)";
4917 if (__builtin_expect (perturb_byte
, 0))
4918 free_perturb (chunk2mem(p
), size
- SIZE_SZ
);
4920 /* consolidate backward */
4921 if (!prev_inuse(p
)) {
4922 prevsize
= p
->prev_size
;
4924 p
= chunk_at_offset(p
, -((long) prevsize
));
4925 unlink(p
, bck
, fwd
);
4928 if (nextchunk
!= av
->top
) {
4929 /* get and clear inuse bit */
4930 nextinuse
= inuse_bit_at_offset(nextchunk
, nextsize
);
4932 /* consolidate forward */
4934 unlink(nextchunk
, bck
, fwd
);
4937 clear_inuse_bit_at_offset(nextchunk
, 0);
4940 Place the chunk in unsorted chunk list. Chunks are
4941 not placed into regular bins until after they have
4942 been given one chance to be used in malloc.
4945 bck
= unsorted_chunks(av
);
4949 if (!in_smallbin_range(size
))
4951 p
->fd_nextsize
= NULL
;
4952 p
->bk_nextsize
= NULL
;
4957 set_head(p
, size
| PREV_INUSE
);
4960 check_free_chunk(av
, p
);
4964 If the chunk borders the current high end of memory,
4965 consolidate into top
4970 set_head(p
, size
| PREV_INUSE
);
4976 If freeing a large space, consolidate possibly-surrounding
4977 chunks. Then, if the total unused topmost memory exceeds trim
4978 threshold, ask malloc_trim to reduce top.
4980 Unless max_fast is 0, we don't know if there are fastbins
4981 bordering top, so we cannot tell for sure whether threshold
4982 has been reached unless fastbins are consolidated. But we
4983 don't want to consolidate on each free. As a compromise,
4984 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4988 if ((unsigned long)(size
) >= FASTBIN_CONSOLIDATION_THRESHOLD
) {
4989 if (have_fastchunks(av
))
4990 malloc_consolidate(av
);
4992 if (av
== &main_arena
) {
4993 #ifndef MORECORE_CANNOT_TRIM
4994 if ((unsigned long)(chunksize(av
->top
)) >=
4995 (unsigned long)(mp_
.trim_threshold
))
4996 sYSTRIm(mp_
.top_pad
, av
);
4999 /* Always try heap_trim(), even if the top chunk is not
5000 large, because the corresponding heap might go away. */
5001 heap_info
*heap
= heap_for_ptr(top(av
));
5003 assert(heap
->ar_ptr
== av
);
5004 heap_trim(heap
, mp_
.top_pad
);
5008 #ifdef ATOMIC_FASTBINS
5011 (void)mutex_unlock(&av
->mutex
);
5016 If the chunk was allocated via mmap, release via munmap(). Note
5017 that if HAVE_MMAP is false but chunk_is_mmapped is true, then
5018 user must have overwritten memory. There's nothing we can do to
5019 catch this error unless MALLOC_DEBUG is set, in which case
5020 check_inuse_chunk (above) will have triggered error.
5031 ------------------------- malloc_consolidate -------------------------
5033 malloc_consolidate is a specialized version of free() that tears
5034 down chunks held in fastbins. Free itself cannot be used for this
5035 purpose since, among other things, it might place chunks back onto
5036 fastbins. So, instead, we need to use a minor variant of the same
5039 Also, because this routine needs to be called the first time through
5040 malloc anyway, it turns out to be the perfect place to trigger
5041 initialization code.
5045 static void malloc_consolidate(mstate av
)
5047 static void malloc_consolidate(av
) mstate av
;
5050 mfastbinptr
* fb
; /* current fastbin being consolidated */
5051 mfastbinptr
* maxfb
; /* last fastbin (for loop control) */
5052 mchunkptr p
; /* current chunk being consolidated */
5053 mchunkptr nextp
; /* next chunk to consolidate */
5054 mchunkptr unsorted_bin
; /* bin header */
5055 mchunkptr first_unsorted
; /* chunk to link to */
5057 /* These have same use as in free() */
5058 mchunkptr nextchunk
;
5059 INTERNAL_SIZE_T size
;
5060 INTERNAL_SIZE_T nextsize
;
5061 INTERNAL_SIZE_T prevsize
;
5067 If max_fast is 0, we know that av hasn't
5068 yet been initialized, in which case do so below
5071 if (get_max_fast () != 0) {
5072 clear_fastchunks(av
);
5074 unsorted_bin
= unsorted_chunks(av
);
5077 Remove each chunk from fast bin and consolidate it, placing it
5078 then in unsorted bin. Among other reasons for doing this,
5079 placing in unsorted bin avoids needing to calculate actual bins
5080 until malloc is sure that chunks aren't immediately going to be
5085 /* It is wrong to limit the fast bins to search using get_max_fast
5086 because, except for the main arena, all the others might have
5087 blocks in the high fast bins. It's not worth it anyway, just
5088 search all bins all the time. */
5089 maxfb
= &fastbin (av
, fastbin_index(get_max_fast ()));
5091 maxfb
= &fastbin (av
, NFASTBINS
- 1);
5093 fb
= &fastbin (av
, 0);
5095 #ifdef ATOMIC_FASTBINS
5096 p
= atomic_exchange_acq (fb
, 0);
5101 #ifndef ATOMIC_FASTBINS
5105 check_inuse_chunk(av
, p
);
5108 /* Slightly streamlined version of consolidation code in free() */
5109 size
= p
->size
& ~(PREV_INUSE
|NON_MAIN_ARENA
);
5110 nextchunk
= chunk_at_offset(p
, size
);
5111 nextsize
= chunksize(nextchunk
);
5113 if (!prev_inuse(p
)) {
5114 prevsize
= p
->prev_size
;
5116 p
= chunk_at_offset(p
, -((long) prevsize
));
5117 unlink(p
, bck
, fwd
);
5120 if (nextchunk
!= av
->top
) {
5121 nextinuse
= inuse_bit_at_offset(nextchunk
, nextsize
);
5125 unlink(nextchunk
, bck
, fwd
);
5127 clear_inuse_bit_at_offset(nextchunk
, 0);
5129 first_unsorted
= unsorted_bin
->fd
;
5130 unsorted_bin
->fd
= p
;
5131 first_unsorted
->bk
= p
;
5133 if (!in_smallbin_range (size
)) {
5134 p
->fd_nextsize
= NULL
;
5135 p
->bk_nextsize
= NULL
;
5138 set_head(p
, size
| PREV_INUSE
);
5139 p
->bk
= unsorted_bin
;
5140 p
->fd
= first_unsorted
;
5146 set_head(p
, size
| PREV_INUSE
);
5150 } while ( (p
= nextp
) != 0);
5153 } while (fb
++ != maxfb
);
5156 malloc_init_state(av
);
5157 check_malloc_state(av
);
5162 ------------------------------ realloc ------------------------------
5166 _int_realloc(mstate av
, mchunkptr oldp
, INTERNAL_SIZE_T oldsize
,
5169 mchunkptr newp
; /* chunk to return */
5170 INTERNAL_SIZE_T newsize
; /* its size */
5171 Void_t
* newmem
; /* corresponding user mem */
5173 mchunkptr next
; /* next contiguous chunk after oldp */
5175 mchunkptr remainder
; /* extra space at end of newp */
5176 unsigned long remainder_size
; /* its size */
5178 mchunkptr bck
; /* misc temp for linking */
5179 mchunkptr fwd
; /* misc temp for linking */
5181 unsigned long copysize
; /* bytes to copy */
5182 unsigned int ncopies
; /* INTERNAL_SIZE_T words to copy */
5183 INTERNAL_SIZE_T
* s
; /* copy source */
5184 INTERNAL_SIZE_T
* d
; /* copy destination */
5186 const char *errstr
= NULL
;
5189 if (__builtin_expect (oldp
->size
<= 2 * SIZE_SZ
, 0)
5190 || __builtin_expect (oldsize
>= av
->system_mem
, 0))
5192 errstr
= "realloc(): invalid old size";
5194 malloc_printerr (check_action
, errstr
, chunk2mem(oldp
));
5198 check_inuse_chunk(av
, oldp
);
5200 /* All callers already filter out mmap'ed chunks. */
5202 if (!chunk_is_mmapped(oldp
))
5204 assert (!chunk_is_mmapped(oldp
));
5208 next
= chunk_at_offset(oldp
, oldsize
);
5209 INTERNAL_SIZE_T nextsize
= chunksize(next
);
5210 if (__builtin_expect (next
->size
<= 2 * SIZE_SZ
, 0)
5211 || __builtin_expect (nextsize
>= av
->system_mem
, 0))
5213 errstr
= "realloc(): invalid next size";
5217 if ((unsigned long)(oldsize
) >= (unsigned long)(nb
)) {
5218 /* already big enough; split below */
5224 /* Try to expand forward into top */
5225 if (next
== av
->top
&&
5226 (unsigned long)(newsize
= oldsize
+ nextsize
) >=
5227 (unsigned long)(nb
+ MINSIZE
)) {
5228 set_head_size(oldp
, nb
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
5229 av
->top
= chunk_at_offset(oldp
, nb
);
5230 set_head(av
->top
, (newsize
- nb
) | PREV_INUSE
);
5231 check_inuse_chunk(av
, oldp
);
5232 return chunk2mem(oldp
);
5235 /* Try to expand forward into next chunk; split off remainder below */
5236 else if (next
!= av
->top
&&
5238 (unsigned long)(newsize
= oldsize
+ nextsize
) >=
5239 (unsigned long)(nb
)) {
5241 unlink(next
, bck
, fwd
);
5244 /* allocate, copy, free */
5246 newmem
= _int_malloc(av
, nb
- MALLOC_ALIGN_MASK
);
5248 return 0; /* propagate failure */
5250 newp
= mem2chunk(newmem
);
5251 newsize
= chunksize(newp
);
5254 Avoid copy if newp is next chunk after oldp.
5262 Unroll copy of <= 36 bytes (72 if 8byte sizes)
5263 We know that contents have an odd number of
5264 INTERNAL_SIZE_T-sized words; minimally 3.
5267 copysize
= oldsize
- SIZE_SZ
;
5268 s
= (INTERNAL_SIZE_T
*)(chunk2mem(oldp
));
5269 d
= (INTERNAL_SIZE_T
*)(newmem
);
5270 ncopies
= copysize
/ sizeof(INTERNAL_SIZE_T
);
5271 assert(ncopies
>= 3);
5274 MALLOC_COPY(d
, s
, copysize
);
5294 #ifdef ATOMIC_FASTBINS
5295 _int_free(av
, oldp
, 1);
5297 _int_free(av
, oldp
);
5299 check_inuse_chunk(av
, newp
);
5300 return chunk2mem(newp
);
5305 /* If possible, free extra space in old or extended chunk */
5307 assert((unsigned long)(newsize
) >= (unsigned long)(nb
));
5309 remainder_size
= newsize
- nb
;
5311 if (remainder_size
< MINSIZE
) { /* not enough extra to split off */
5312 set_head_size(newp
, newsize
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
5313 set_inuse_bit_at_offset(newp
, newsize
);
5315 else { /* split remainder */
5316 remainder
= chunk_at_offset(newp
, nb
);
5317 set_head_size(newp
, nb
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
5318 set_head(remainder
, remainder_size
| PREV_INUSE
|
5319 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
5320 /* Mark remainder as inuse so free() won't complain */
5321 set_inuse_bit_at_offset(remainder
, remainder_size
);
5322 #ifdef ATOMIC_FASTBINS
5323 _int_free(av
, remainder
, 1);
5325 _int_free(av
, remainder
);
5329 check_inuse_chunk(av
, newp
);
5330 return chunk2mem(newp
);
5342 INTERNAL_SIZE_T offset
= oldp
->prev_size
;
5343 size_t pagemask
= mp_
.pagesize
- 1;
5347 /* Note the extra SIZE_SZ overhead */
5348 newsize
= (nb
+ offset
+ SIZE_SZ
+ pagemask
) & ~pagemask
;
5350 /* don't need to remap if still within same page */
5351 if (oldsize
== newsize
- offset
)
5352 return chunk2mem(oldp
);
5354 cp
= (char*)mremap((char*)oldp
- offset
, oldsize
+ offset
, newsize
, 1);
5356 if (cp
!= MAP_FAILED
) {
5358 newp
= (mchunkptr
)(cp
+ offset
);
5359 set_head(newp
, (newsize
- offset
)|IS_MMAPPED
);
5361 assert(aligned_OK(chunk2mem(newp
)));
5362 assert((newp
->prev_size
== offset
));
5364 /* update statistics */
5365 sum
= mp_
.mmapped_mem
+= newsize
- oldsize
;
5366 if (sum
> (unsigned long)(mp_
.max_mmapped_mem
))
5367 mp_
.max_mmapped_mem
= sum
;
5369 sum
+= main_arena
.system_mem
;
5370 if (sum
> (unsigned long)(mp_
.max_total_mem
))
5371 mp_
.max_total_mem
= sum
;
5374 return chunk2mem(newp
);
5378 /* Note the extra SIZE_SZ overhead. */
5379 if ((unsigned long)(oldsize
) >= (unsigned long)(nb
+ SIZE_SZ
))
5380 newmem
= chunk2mem(oldp
); /* do nothing */
5382 /* Must alloc, copy, free. */
5383 newmem
= _int_malloc(av
, nb
- MALLOC_ALIGN_MASK
);
5385 MALLOC_COPY(newmem
, chunk2mem(oldp
), oldsize
- 2*SIZE_SZ
);
5386 #ifdef ATOMIC_FASTBINS
5387 _int_free(av
, oldp
, 1);
5389 _int_free(av
, oldp
);
5396 /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
5397 check_malloc_state(av
);
5398 MALLOC_FAILURE_ACTION
;
5406 ------------------------------ memalign ------------------------------
5410 _int_memalign(mstate av
, size_t alignment
, size_t bytes
)
5412 INTERNAL_SIZE_T nb
; /* padded request size */
5413 char* m
; /* memory returned by malloc call */
5414 mchunkptr p
; /* corresponding chunk */
5415 char* brk
; /* alignment point within p */
5416 mchunkptr newp
; /* chunk to return */
5417 INTERNAL_SIZE_T newsize
; /* its size */
5418 INTERNAL_SIZE_T leadsize
; /* leading space before alignment point */
5419 mchunkptr remainder
; /* spare room at end to split off */
5420 unsigned long remainder_size
; /* its size */
5421 INTERNAL_SIZE_T size
;
5423 /* If need less alignment than we give anyway, just relay to malloc */
5425 if (alignment
<= MALLOC_ALIGNMENT
) return _int_malloc(av
, bytes
);
5427 /* Otherwise, ensure that it is at least a minimum chunk size */
5429 if (alignment
< MINSIZE
) alignment
= MINSIZE
;
5431 /* Make sure alignment is power of 2 (in case MINSIZE is not). */
5432 if ((alignment
& (alignment
- 1)) != 0) {
5433 size_t a
= MALLOC_ALIGNMENT
* 2;
5434 while ((unsigned long)a
< (unsigned long)alignment
) a
<<= 1;
5438 checked_request2size(bytes
, nb
);
5441 Strategy: find a spot within that chunk that meets the alignment
5442 request, and then possibly free the leading and trailing space.
5446 /* Call malloc with worst case padding to hit alignment. */
5448 m
= (char*)(_int_malloc(av
, nb
+ alignment
+ MINSIZE
));
5450 if (m
== 0) return 0; /* propagate failure */
5454 if ((((unsigned long)(m
)) % alignment
) != 0) { /* misaligned */
5457 Find an aligned spot inside chunk. Since we need to give back
5458 leading space in a chunk of at least MINSIZE, if the first
5459 calculation places us at a spot with less than MINSIZE leader,
5460 we can move to the next aligned spot -- we've allocated enough
5461 total room so that this is always possible.
5464 brk
= (char*)mem2chunk(((unsigned long)(m
+ alignment
- 1)) &
5465 -((signed long) alignment
));
5466 if ((unsigned long)(brk
- (char*)(p
)) < MINSIZE
)
5469 newp
= (mchunkptr
)brk
;
5470 leadsize
= brk
- (char*)(p
);
5471 newsize
= chunksize(p
) - leadsize
;
5473 /* For mmapped chunks, just adjust offset */
5474 if (chunk_is_mmapped(p
)) {
5475 newp
->prev_size
= p
->prev_size
+ leadsize
;
5476 set_head(newp
, newsize
|IS_MMAPPED
);
5477 return chunk2mem(newp
);
5480 /* Otherwise, give back leader, use the rest */
5481 set_head(newp
, newsize
| PREV_INUSE
|
5482 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
5483 set_inuse_bit_at_offset(newp
, newsize
);
5484 set_head_size(p
, leadsize
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
5485 #ifdef ATOMIC_FASTBINS
5486 _int_free(av
, p
, 1);
5492 assert (newsize
>= nb
&&
5493 (((unsigned long)(chunk2mem(p
))) % alignment
) == 0);
5496 /* Also give back spare room at the end */
5497 if (!chunk_is_mmapped(p
)) {
5498 size
= chunksize(p
);
5499 if ((unsigned long)(size
) > (unsigned long)(nb
+ MINSIZE
)) {
5500 remainder_size
= size
- nb
;
5501 remainder
= chunk_at_offset(p
, nb
);
5502 set_head(remainder
, remainder_size
| PREV_INUSE
|
5503 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
5504 set_head_size(p
, nb
);
5505 #ifdef ATOMIC_FASTBINS
5506 _int_free(av
, remainder
, 1);
5508 _int_free(av
, remainder
);
5513 check_inuse_chunk(av
, p
);
5514 return chunk2mem(p
);
5519 ------------------------------ calloc ------------------------------
5523 Void_t
* cALLOc(size_t n_elements
, size_t elem_size
)
5525 Void_t
* cALLOc(n_elements
, elem_size
) size_t n_elements
; size_t elem_size
;
5529 unsigned long clearsize
;
5530 unsigned long nclears
;
5533 Void_t
* mem
= mALLOc(n_elements
* elem_size
);
5539 if (!chunk_is_mmapped(p
)) /* don't need to clear mmapped space */
5543 Unroll clear of <= 36 bytes (72 if 8byte sizes)
5544 We know that contents have an odd number of
5545 INTERNAL_SIZE_T-sized words; minimally 3.
5548 d
= (INTERNAL_SIZE_T
*)mem
;
5549 clearsize
= chunksize(p
) - SIZE_SZ
;
5550 nclears
= clearsize
/ sizeof(INTERNAL_SIZE_T
);
5551 assert(nclears
>= 3);
5554 MALLOC_ZERO(d
, clearsize
);
5581 ------------------------- independent_calloc -------------------------
5586 _int_icalloc(mstate av
, size_t n_elements
, size_t elem_size
, Void_t
* chunks
[])
5588 _int_icalloc(av
, n_elements
, elem_size
, chunks
)
5589 mstate av
; size_t n_elements
; size_t elem_size
; Void_t
* chunks
[];
5592 size_t sz
= elem_size
; /* serves as 1-element array */
5593 /* opts arg of 3 means all elements are same size, and should be cleared */
5594 return iALLOc(av
, n_elements
, &sz
, 3, chunks
);
5598 ------------------------- independent_comalloc -------------------------
5603 _int_icomalloc(mstate av
, size_t n_elements
, size_t sizes
[], Void_t
* chunks
[])
5605 _int_icomalloc(av
, n_elements
, sizes
, chunks
)
5606 mstate av
; size_t n_elements
; size_t sizes
[]; Void_t
* chunks
[];
5609 return iALLOc(av
, n_elements
, sizes
, 0, chunks
);
5614 ------------------------------ ialloc ------------------------------
5615 ialloc provides common support for independent_X routines, handling all of
5616 the combinations that can result.
5619 bit 0 set if all elements are same size (using sizes[0])
5620 bit 1 set if elements should be zeroed
5626 iALLOc(mstate av
, size_t n_elements
, size_t* sizes
, int opts
, Void_t
* chunks
[])
5628 iALLOc(av
, n_elements
, sizes
, opts
, chunks
)
5629 mstate av
; size_t n_elements
; size_t* sizes
; int opts
; Void_t
* chunks
[];
5632 INTERNAL_SIZE_T element_size
; /* chunksize of each element, if all same */
5633 INTERNAL_SIZE_T contents_size
; /* total size of elements */
5634 INTERNAL_SIZE_T array_size
; /* request size of pointer array */
5635 Void_t
* mem
; /* malloced aggregate space */
5636 mchunkptr p
; /* corresponding chunk */
5637 INTERNAL_SIZE_T remainder_size
; /* remaining bytes while splitting */
5638 Void_t
** marray
; /* either "chunks" or malloced ptr array */
5639 mchunkptr array_chunk
; /* chunk for malloced ptr array */
5640 int mmx
; /* to disable mmap */
5641 INTERNAL_SIZE_T size
;
5642 INTERNAL_SIZE_T size_flags
;
5645 /* Ensure initialization/consolidation */
5646 if (have_fastchunks(av
)) malloc_consolidate(av
);
5648 /* compute array length, if needed */
5650 if (n_elements
== 0)
5651 return chunks
; /* nothing to do */
5656 /* if empty req, must still return chunk representing empty array */
5657 if (n_elements
== 0)
5658 return (Void_t
**) _int_malloc(av
, 0);
5660 array_size
= request2size(n_elements
* (sizeof(Void_t
*)));
5663 /* compute total element size */
5664 if (opts
& 0x1) { /* all-same-size */
5665 element_size
= request2size(*sizes
);
5666 contents_size
= n_elements
* element_size
;
5668 else { /* add up all the sizes */
5671 for (i
= 0; i
!= n_elements
; ++i
)
5672 contents_size
+= request2size(sizes
[i
]);
5675 /* subtract out alignment bytes from total to minimize overallocation */
5676 size
= contents_size
+ array_size
- MALLOC_ALIGN_MASK
;
5679 Allocate the aggregate chunk.
5680 But first disable mmap so malloc won't use it, since
5681 we would not be able to later free/realloc space internal
5682 to a segregated mmap region.
5684 mmx
= mp_
.n_mmaps_max
; /* disable mmap */
5685 mp_
.n_mmaps_max
= 0;
5686 mem
= _int_malloc(av
, size
);
5687 mp_
.n_mmaps_max
= mmx
; /* reset mmap */
5692 assert(!chunk_is_mmapped(p
));
5693 remainder_size
= chunksize(p
);
5695 if (opts
& 0x2) { /* optionally clear the elements */
5696 MALLOC_ZERO(mem
, remainder_size
- SIZE_SZ
- array_size
);
5699 size_flags
= PREV_INUSE
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0);
5701 /* If not provided, allocate the pointer array as final part of chunk */
5703 array_chunk
= chunk_at_offset(p
, contents_size
);
5704 marray
= (Void_t
**) (chunk2mem(array_chunk
));
5705 set_head(array_chunk
, (remainder_size
- contents_size
) | size_flags
);
5706 remainder_size
= contents_size
;
5709 /* split out elements */
5710 for (i
= 0; ; ++i
) {
5711 marray
[i
] = chunk2mem(p
);
5712 if (i
!= n_elements
-1) {
5713 if (element_size
!= 0)
5714 size
= element_size
;
5716 size
= request2size(sizes
[i
]);
5717 remainder_size
-= size
;
5718 set_head(p
, size
| size_flags
);
5719 p
= chunk_at_offset(p
, size
);
5721 else { /* the final element absorbs any overallocation slop */
5722 set_head(p
, remainder_size
| size_flags
);
5728 if (marray
!= chunks
) {
5729 /* final element must have exactly exhausted chunk */
5730 if (element_size
!= 0)
5731 assert(remainder_size
== element_size
);
5733 assert(remainder_size
== request2size(sizes
[i
]));
5734 check_inuse_chunk(av
, mem2chunk(marray
));
5737 for (i
= 0; i
!= n_elements
; ++i
)
5738 check_inuse_chunk(av
, mem2chunk(marray
[i
]));
5747 ------------------------------ valloc ------------------------------
5752 _int_valloc(mstate av
, size_t bytes
)
5754 _int_valloc(av
, bytes
) mstate av
; size_t bytes
;
5757 /* Ensure initialization/consolidation */
5758 if (have_fastchunks(av
)) malloc_consolidate(av
);
5759 return _int_memalign(av
, mp_
.pagesize
, bytes
);
5763 ------------------------------ pvalloc ------------------------------
5769 _int_pvalloc(mstate av
, size_t bytes
)
5771 _int_pvalloc(av
, bytes
) mstate av
, size_t bytes
;
5776 /* Ensure initialization/consolidation */
5777 if (have_fastchunks(av
)) malloc_consolidate(av
);
5778 pagesz
= mp_
.pagesize
;
5779 return _int_memalign(av
, pagesz
, (bytes
+ pagesz
- 1) & ~(pagesz
- 1));
5784 ------------------------------ malloc_trim ------------------------------
5788 static int mTRIm(mstate av
, size_t pad
)
5790 static int mTRIm(av
, pad
) mstate av
; size_t pad
;
5793 /* Ensure initialization/consolidation */
5794 malloc_consolidate (av
);
5796 const size_t ps
= mp_
.pagesize
;
5797 int psindex
= bin_index (ps
);
5798 const size_t psm1
= ps
- 1;
5801 for (int i
= 1; i
< NBINS
; ++i
)
5802 if (i
== 1 || i
>= psindex
)
5804 mbinptr bin
= bin_at (av
, i
);
5806 for (mchunkptr p
= last (bin
); p
!= bin
; p
= p
->bk
)
5808 INTERNAL_SIZE_T size
= chunksize (p
);
5810 if (size
> psm1
+ sizeof (struct malloc_chunk
))
5812 /* See whether the chunk contains at least one unused page. */
5813 char *paligned_mem
= (char *) (((uintptr_t) p
5814 + sizeof (struct malloc_chunk
)
5817 assert ((char *) chunk2mem (p
) + 4 * SIZE_SZ
<= paligned_mem
);
5818 assert ((char *) p
+ size
> paligned_mem
);
5820 /* This is the size we could potentially free. */
5821 size
-= paligned_mem
- (char *) p
;
5826 /* When debugging we simulate destroying the memory
5828 memset (paligned_mem
, 0x89, size
& ~psm1
);
5830 madvise (paligned_mem
, size
& ~psm1
, MADV_DONTNEED
);
5838 #ifndef MORECORE_CANNOT_TRIM
5839 return result
| (av
== &main_arena
? sYSTRIm (pad
, av
) : 0);
5847 ------------------------- malloc_usable_size -------------------------
5851 size_t mUSABLe(Void_t
* mem
)
5853 size_t mUSABLe(mem
) Void_t
* mem
;
5859 if (chunk_is_mmapped(p
))
5860 return chunksize(p
) - 2*SIZE_SZ
;
5862 return chunksize(p
) - SIZE_SZ
;
5868 ------------------------------ mallinfo ------------------------------
5871 struct mallinfo
mALLINFo(mstate av
)
5877 INTERNAL_SIZE_T avail
;
5878 INTERNAL_SIZE_T fastavail
;
5882 /* Ensure initialization */
5883 if (av
->top
== 0) malloc_consolidate(av
);
5885 check_malloc_state(av
);
5887 /* Account for top */
5888 avail
= chunksize(av
->top
);
5889 nblocks
= 1; /* top always exists */
5891 /* traverse fastbins */
5895 for (i
= 0; i
< NFASTBINS
; ++i
) {
5896 for (p
= fastbin (av
, i
); p
!= 0; p
= p
->fd
) {
5898 fastavail
+= chunksize(p
);
5904 /* traverse regular bins */
5905 for (i
= 1; i
< NBINS
; ++i
) {
5907 for (p
= last(b
); p
!= b
; p
= p
->bk
) {
5909 avail
+= chunksize(p
);
5913 mi
.smblks
= nfastblocks
;
5914 mi
.ordblks
= nblocks
;
5915 mi
.fordblks
= avail
;
5916 mi
.uordblks
= av
->system_mem
- avail
;
5917 mi
.arena
= av
->system_mem
;
5918 mi
.hblks
= mp_
.n_mmaps
;
5919 mi
.hblkhd
= mp_
.mmapped_mem
;
5920 mi
.fsmblks
= fastavail
;
5921 mi
.keepcost
= chunksize(av
->top
);
5922 mi
.usmblks
= mp_
.max_total_mem
;
5927 ------------------------------ malloc_stats ------------------------------
5935 unsigned int in_use_b
= mp_
.mmapped_mem
, system_b
= in_use_b
;
5937 long stat_lock_direct
= 0, stat_lock_loop
= 0, stat_lock_wait
= 0;
5940 if(__malloc_initialized
< 0)
5943 _IO_flockfile (stderr
);
5944 int old_flags2
= ((_IO_FILE
*) stderr
)->_flags2
;
5945 ((_IO_FILE
*) stderr
)->_flags2
|= _IO_FLAGS2_NOTCANCEL
;
5947 for (i
=0, ar_ptr
= &main_arena
;; i
++) {
5948 (void)mutex_lock(&ar_ptr
->mutex
);
5949 mi
= mALLINFo(ar_ptr
);
5950 fprintf(stderr
, "Arena %d:\n", i
);
5951 fprintf(stderr
, "system bytes = %10u\n", (unsigned int)mi
.arena
);
5952 fprintf(stderr
, "in use bytes = %10u\n", (unsigned int)mi
.uordblks
);
5953 #if MALLOC_DEBUG > 1
5955 dump_heap(heap_for_ptr(top(ar_ptr
)));
5957 system_b
+= mi
.arena
;
5958 in_use_b
+= mi
.uordblks
;
5960 stat_lock_direct
+= ar_ptr
->stat_lock_direct
;
5961 stat_lock_loop
+= ar_ptr
->stat_lock_loop
;
5962 stat_lock_wait
+= ar_ptr
->stat_lock_wait
;
5964 (void)mutex_unlock(&ar_ptr
->mutex
);
5965 ar_ptr
= ar_ptr
->next
;
5966 if(ar_ptr
== &main_arena
) break;
5969 fprintf(stderr
, "Total (incl. mmap):\n");
5971 fprintf(stderr
, "Total:\n");
5973 fprintf(stderr
, "system bytes = %10u\n", system_b
);
5974 fprintf(stderr
, "in use bytes = %10u\n", in_use_b
);
5976 fprintf(stderr
, "max system bytes = %10u\n", (unsigned int)mp_
.max_total_mem
);
5979 fprintf(stderr
, "max mmap regions = %10u\n", (unsigned int)mp_
.max_n_mmaps
);
5980 fprintf(stderr
, "max mmap bytes = %10lu\n",
5981 (unsigned long)mp_
.max_mmapped_mem
);
5984 fprintf(stderr
, "heaps created = %10d\n", stat_n_heaps
);
5985 fprintf(stderr
, "locked directly = %10ld\n", stat_lock_direct
);
5986 fprintf(stderr
, "locked in loop = %10ld\n", stat_lock_loop
);
5987 fprintf(stderr
, "locked waiting = %10ld\n", stat_lock_wait
);
5988 fprintf(stderr
, "locked total = %10ld\n",
5989 stat_lock_direct
+ stat_lock_loop
+ stat_lock_wait
);
5992 ((_IO_FILE
*) stderr
)->_flags2
|= old_flags2
;
5993 _IO_funlockfile (stderr
);
5999 ------------------------------ mallopt ------------------------------
6003 int mALLOPt(int param_number
, int value
)
6005 int mALLOPt(param_number
, value
) int param_number
; int value
;
6008 mstate av
= &main_arena
;
6011 if(__malloc_initialized
< 0)
6013 (void)mutex_lock(&av
->mutex
);
6014 /* Ensure initialization/consolidation */
6015 malloc_consolidate(av
);
6017 switch(param_number
) {
6019 if (value
>= 0 && value
<= MAX_FAST_SIZE
) {
6020 set_max_fast(value
);
6026 case M_TRIM_THRESHOLD
:
6027 mp_
.trim_threshold
= value
;
6028 mp_
.no_dyn_threshold
= 1;
6032 mp_
.top_pad
= value
;
6033 mp_
.no_dyn_threshold
= 1;
6036 case M_MMAP_THRESHOLD
:
6038 /* Forbid setting the threshold too high. */
6039 if((unsigned long)value
> HEAP_MAX_SIZE
/2)
6043 mp_
.mmap_threshold
= value
;
6044 mp_
.no_dyn_threshold
= 1;
6053 mp_
.n_mmaps_max
= value
;
6054 mp_
.no_dyn_threshold
= 1;
6057 case M_CHECK_ACTION
:
6058 check_action
= value
;
6062 perturb_byte
= value
;
6068 mp_
.arena_test
= value
;
6073 mp_
.arena_max
= value
;
6077 (void)mutex_unlock(&av
->mutex
);
6083 -------------------- Alternative MORECORE functions --------------------
6088 General Requirements for MORECORE.
6090 The MORECORE function must have the following properties:
6092 If MORECORE_CONTIGUOUS is false:
6094 * MORECORE must allocate in multiples of pagesize. It will
6095 only be called with arguments that are multiples of pagesize.
6097 * MORECORE(0) must return an address that is at least
6098 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
6100 else (i.e. If MORECORE_CONTIGUOUS is true):
6102 * Consecutive calls to MORECORE with positive arguments
6103 return increasing addresses, indicating that space has been
6104 contiguously extended.
6106 * MORECORE need not allocate in multiples of pagesize.
6107 Calls to MORECORE need not have args of multiples of pagesize.
6109 * MORECORE need not page-align.
6113 * MORECORE may allocate more memory than requested. (Or even less,
6114 but this will generally result in a malloc failure.)
6116 * MORECORE must not allocate memory when given argument zero, but
6117 instead return one past the end address of memory from previous
6118 nonzero call. This malloc does NOT call MORECORE(0)
6119 until at least one call with positive arguments is made, so
6120 the initial value returned is not important.
6122 * Even though consecutive calls to MORECORE need not return contiguous
6123 addresses, it must be OK for malloc'ed chunks to span multiple
6124 regions in those cases where they do happen to be contiguous.
6126 * MORECORE need not handle negative arguments -- it may instead
6127 just return MORECORE_FAILURE when given negative arguments.
6128 Negative arguments are always multiples of pagesize. MORECORE
6129 must not misinterpret negative args as large positive unsigned
6130 args. You can suppress all such calls from even occurring by defining
6131 MORECORE_CANNOT_TRIM,
6133 There is some variation across systems about the type of the
6134 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
6135 actually be size_t, because sbrk supports negative args, so it is
6136 normally the signed type of the same width as size_t (sometimes
6137 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
6138 matter though. Internally, we use "long" as arguments, which should
6139 work across all reasonable possibilities.
6141 Additionally, if MORECORE ever returns failure for a positive
6142 request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
6143 system allocator. This is a useful backup strategy for systems with
6144 holes in address spaces -- in this case sbrk cannot contiguously
6145 expand the heap, but mmap may be able to map noncontiguous space.
6147 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
6148 a function that always returns MORECORE_FAILURE.
6150 If you are using this malloc with something other than sbrk (or its
6151 emulation) to supply memory regions, you probably want to set
6152 MORECORE_CONTIGUOUS as false. As an example, here is a custom
6153 allocator kindly contributed for pre-OSX macOS. It uses virtually
6154 but not necessarily physically contiguous non-paged memory (locked
6155 in, present and won't get swapped out). You can use it by
6156 uncommenting this section, adding some #includes, and setting up the
6157 appropriate defines above:
6159 #define MORECORE osMoreCore
6160 #define MORECORE_CONTIGUOUS 0
6162 There is also a shutdown routine that should somehow be called for
6163 cleanup upon program exit.
6165 #define MAX_POOL_ENTRIES 100
6166 #define MINIMUM_MORECORE_SIZE (64 * 1024)
6167 static int next_os_pool;
6168 void *our_os_pools[MAX_POOL_ENTRIES];
6170 void *osMoreCore(int size)
6173 static void *sbrk_top = 0;
6177 if (size < MINIMUM_MORECORE_SIZE)
6178 size = MINIMUM_MORECORE_SIZE;
6179 if (CurrentExecutionLevel() == kTaskLevel)
6180 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
6183 return (void *) MORECORE_FAILURE;
6185 // save ptrs so they can be freed during cleanup
6186 our_os_pools[next_os_pool] = ptr;
6188 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
6189 sbrk_top = (char *) ptr + size;
6194 // we don't currently support shrink behavior
6195 return (void *) MORECORE_FAILURE;
6203 // cleanup any allocated memory pools
6204 // called as last thing before shutting down driver
6206 void osCleanupMem(void)
6210 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
6213 PoolDeallocate(*ptr);
6223 extern char **__libc_argv attribute_hidden
;
6226 malloc_printerr(int action
, const char *str
, void *ptr
)
6228 if ((action
& 5) == 5)
6229 __libc_message (action
& 2, "%s\n", str
);
6230 else if (action
& 1)
6232 char buf
[2 * sizeof (uintptr_t) + 1];
6234 buf
[sizeof (buf
) - 1] = '\0';
6235 char *cp
= _itoa_word ((uintptr_t) ptr
, &buf
[sizeof (buf
) - 1], 16, 0);
6239 __libc_message (action
& 2,
6240 "*** glibc detected *** %s: %s: 0x%s ***\n",
6241 __libc_argv
[0] ?: "<unknown>", str
, cp
);
6243 else if (action
& 2)
6248 # include <sys/param.h>
6250 /* We need a wrapper function for one of the additions of POSIX. */
6252 __posix_memalign (void **memptr
, size_t alignment
, size_t size
)
6256 /* Test whether the SIZE argument is valid. It must be a power of
6257 two multiple of sizeof (void *). */
6258 if (alignment
% sizeof (void *) != 0
6259 || !powerof2 (alignment
/ sizeof (void *)) != 0
6263 /* Call the hook here, so that caller is posix_memalign's caller
6264 and not posix_memalign itself. */
6265 __malloc_ptr_t (*hook
) __MALLOC_PMT ((size_t, size_t,
6266 __const __malloc_ptr_t
)) =
6267 force_reg (__memalign_hook
);
6268 if (__builtin_expect (hook
!= NULL
, 0))
6269 mem
= (*hook
)(alignment
, size
, RETURN_ADDRESS (0));
6271 mem
= public_mEMALIGn (alignment
, size
);
6280 weak_alias (__posix_memalign
, posix_memalign
)
6284 malloc_info (int options
, FILE *fp
)
6286 /* For now, at least. */
6291 size_t total_nblocks
= 0;
6292 size_t total_nfastblocks
= 0;
6293 size_t total_avail
= 0;
6294 size_t total_fastavail
= 0;
6295 size_t total_system
= 0;
6296 size_t total_max_system
= 0;
6297 size_t total_aspace
= 0;
6298 size_t total_aspace_mprotect
= 0;
6300 void mi_arena (mstate ar_ptr
)
6302 fprintf (fp
, "<heap nr=\"%d\">\n<sizes>\n", n
++);
6305 size_t nfastblocks
= 0;
6307 size_t fastavail
= 0;
6314 } sizes
[NFASTBINS
+ NBINS
- 1];
6315 #define nsizes (sizeof (sizes) / sizeof (sizes[0]))
6317 mutex_lock (&ar_ptr
->mutex
);
6319 for (size_t i
= 0; i
< NFASTBINS
; ++i
)
6321 mchunkptr p
= fastbin (ar_ptr
, i
);
6324 size_t nthissize
= 0;
6325 size_t thissize
= chunksize (p
);
6333 fastavail
+= nthissize
* thissize
;
6334 nfastblocks
+= nthissize
;
6335 sizes
[i
].from
= thissize
- (MALLOC_ALIGNMENT
- 1);
6336 sizes
[i
].to
= thissize
;
6337 sizes
[i
].count
= nthissize
;
6340 sizes
[i
].from
= sizes
[i
].to
= sizes
[i
].count
= 0;
6342 sizes
[i
].total
= sizes
[i
].count
* sizes
[i
].to
;
6345 mbinptr bin
= bin_at (ar_ptr
, 1);
6346 struct malloc_chunk
*r
= bin
->fd
;
6349 ++sizes
[NFASTBINS
].count
;
6350 sizes
[NFASTBINS
].total
+= r
->size
;
6351 sizes
[NFASTBINS
].from
= MIN (sizes
[NFASTBINS
].from
, r
->size
);
6352 sizes
[NFASTBINS
].to
= MAX (sizes
[NFASTBINS
].to
, r
->size
);
6355 nblocks
+= sizes
[NFASTBINS
].count
;
6356 avail
+= sizes
[NFASTBINS
].total
;
6358 for (size_t i
= 2; i
< NBINS
; ++i
)
6360 bin
= bin_at (ar_ptr
, i
);
6362 sizes
[NFASTBINS
- 1 + i
].from
= ~((size_t) 0);
6363 sizes
[NFASTBINS
- 1 + i
].to
= sizes
[NFASTBINS
- 1 + i
].total
6364 = sizes
[NFASTBINS
- 1 + i
].count
= 0;
6368 ++sizes
[NFASTBINS
- 1 + i
].count
;
6369 sizes
[NFASTBINS
- 1 + i
].total
+= r
->size
;
6370 sizes
[NFASTBINS
- 1 + i
].from
= MIN (sizes
[NFASTBINS
- 1 + i
].from
,
6372 sizes
[NFASTBINS
- 1 + i
].to
= MAX (sizes
[NFASTBINS
- 1 + i
].to
,
6378 if (sizes
[NFASTBINS
- 1 + i
].count
== 0)
6379 sizes
[NFASTBINS
- 1 + i
].from
= 0;
6380 nblocks
+= sizes
[NFASTBINS
- 1 + i
].count
;
6381 avail
+= sizes
[NFASTBINS
- 1 + i
].total
;
6384 mutex_unlock (&ar_ptr
->mutex
);
6386 total_nfastblocks
+= nfastblocks
;
6387 total_fastavail
+= fastavail
;
6389 total_nblocks
+= nblocks
;
6390 total_avail
+= avail
;
6392 for (size_t i
= 0; i
< nsizes
; ++i
)
6393 if (sizes
[i
].count
!= 0 && i
!= NFASTBINS
)
6395 <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
6396 sizes
[i
].from
, sizes
[i
].to
, sizes
[i
].total
, sizes
[i
].count
);
6398 if (sizes
[NFASTBINS
].count
!= 0)
6400 <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
6401 sizes
[NFASTBINS
].from
, sizes
[NFASTBINS
].to
,
6402 sizes
[NFASTBINS
].total
, sizes
[NFASTBINS
].count
);
6404 total_system
+= ar_ptr
->system_mem
;
6405 total_max_system
+= ar_ptr
->max_system_mem
;
6408 "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
6409 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
6410 "<system type=\"current\" size=\"%zu\"/>\n"
6411 "<system type=\"max\" size=\"%zu\"/>\n",
6412 nfastblocks
, fastavail
, nblocks
, avail
,
6413 ar_ptr
->system_mem
, ar_ptr
->max_system_mem
);
6415 if (ar_ptr
!= &main_arena
)
6417 heap_info
*heap
= heap_for_ptr(top(ar_ptr
));
6419 "<aspace type=\"total\" size=\"%zu\"/>\n"
6420 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
6421 heap
->size
, heap
->mprotect_size
);
6422 total_aspace
+= heap
->size
;
6423 total_aspace_mprotect
+= heap
->mprotect_size
;
6428 "<aspace type=\"total\" size=\"%zu\"/>\n"
6429 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
6430 ar_ptr
->system_mem
, ar_ptr
->system_mem
);
6431 total_aspace
+= ar_ptr
->system_mem
;
6432 total_aspace_mprotect
+= ar_ptr
->system_mem
;
6435 fputs ("</heap>\n", fp
);
6438 fputs ("<malloc version=\"1\">\n", fp
);
6440 /* Iterate over all arenas currently in use. */
6441 mstate ar_ptr
= &main_arena
;
6445 ar_ptr
= ar_ptr
->next
;
6447 while (ar_ptr
!= &main_arena
);
6450 "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
6451 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
6452 "<system type=\"current\" size=\"%zu\n/>\n"
6453 "<system type=\"max\" size=\"%zu\n/>\n"
6454 "<aspace type=\"total\" size=\"%zu\"/>\n"
6455 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
6457 total_nfastblocks
, total_fastavail
, total_nblocks
, total_avail
,
6458 total_system
, total_max_system
,
6459 total_aspace
, total_aspace_mprotect
);
6465 strong_alias (__libc_calloc
, __calloc
) weak_alias (__libc_calloc
, calloc
)
6466 strong_alias (__libc_free
, __cfree
) weak_alias (__libc_free
, cfree
)
6467 strong_alias (__libc_free
, __free
) strong_alias (__libc_free
, free
)
6468 strong_alias (__libc_malloc
, __malloc
) strong_alias (__libc_malloc
, malloc
)
6469 strong_alias (__libc_memalign
, __memalign
)
6470 weak_alias (__libc_memalign
, memalign
)
6471 strong_alias (__libc_realloc
, __realloc
) strong_alias (__libc_realloc
, realloc
)
6472 strong_alias (__libc_valloc
, __valloc
) weak_alias (__libc_valloc
, valloc
)
6473 strong_alias (__libc_pvalloc
, __pvalloc
) weak_alias (__libc_pvalloc
, pvalloc
)
6474 strong_alias (__libc_mallinfo
, __mallinfo
)
6475 weak_alias (__libc_mallinfo
, mallinfo
)
6476 strong_alias (__libc_mallopt
, __mallopt
) weak_alias (__libc_mallopt
, mallopt
)
6478 weak_alias (__malloc_stats
, malloc_stats
)
6479 weak_alias (__malloc_usable_size
, malloc_usable_size
)
6480 weak_alias (__malloc_trim
, malloc_trim
)
6481 weak_alias (__malloc_get_state
, malloc_get_state
)
6482 weak_alias (__malloc_set_state
, malloc_set_state
)
6486 /* ------------------------------------------------------------
6489 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]