1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 1996-2002, 2003, 2004 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If not,
19 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
23 This is a version (aka ptmalloc2) of malloc/free/realloc written by
24 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
26 * Version ptmalloc2-20011215
29 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
31 Note: There may be an updated version of this malloc obtainable at
32 http://www.malloc.de/malloc/ptmalloc2.tar.gz
33 Check before installing!
37 In order to compile this implementation, a Makefile is provided with
38 the ptmalloc2 distribution, which has pre-defined targets for some
39 popular systems (e.g. "make posix" for Posix threads). All that is
40 typically required with regard to compiler flags is the selection of
41 the thread package via defining one out of USE_PTHREADS, USE_THR or
42 USE_SPROC. Check the thread-m.h file for what effects this has.
43 Many/most systems will additionally require USE_TSD_DATA_HACK to be
44 defined, so this is the default for "make posix".
46 * Why use this malloc?
48 This is not the fastest, most space-conserving, most portable, or
49 most tunable malloc ever written. However it is among the fastest
50 while also being among the most space-conserving, portable and tunable.
51 Consistent balance across these factors results in a good general-purpose
52 allocator for malloc-intensive programs.
54 The main properties of the algorithms are:
55 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
56 with ties normally decided via FIFO (i.e. least recently used).
57 * For small (<= 64 bytes by default) requests, it is a caching
58 allocator, that maintains pools of quickly recycled chunks.
59 * In between, and for combinations of large and small requests, it does
60 the best it can trying to meet both goals at once.
61 * For very large requests (>= 128KB by default), it relies on system
62 memory mapping facilities, if supported.
64 For a longer but slightly out of date high-level description, see
65 http://gee.cs.oswego.edu/dl/html/malloc.html
67 You may already by default be using a C library containing a malloc
68 that is based on some version of this malloc (for example in
69 linux). You might still want to use the one in this file in order to
70 customize settings or to avoid overheads associated with library
73 * Contents, described in more detail in "description of public routines" below.
75 Standard (ANSI/SVID/...) functions:
77 calloc(size_t n_elements, size_t element_size);
79 realloc(Void_t* p, size_t n);
80 memalign(size_t alignment, size_t n);
83 mallopt(int parameter_number, int parameter_value)
86 independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]);
87 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
90 malloc_trim(size_t pad);
91 malloc_usable_size(Void_t* p);
96 Supported pointer representation: 4 or 8 bytes
97 Supported size_t representation: 4 or 8 bytes
98 Note that size_t is allowed to be 4 bytes even if pointers are 8.
99 You can adjust this by defining INTERNAL_SIZE_T
101 Alignment: 2 * sizeof(size_t) (default)
102 (i.e., 8 byte alignment with 4byte size_t). This suffices for
103 nearly all current machines and C compilers. However, you can
104 define MALLOC_ALIGNMENT to be wider than this if necessary.
106 Minimum overhead per allocated chunk: 4 or 8 bytes
107 Each malloced chunk has a hidden word of overhead holding size
108 and status information.
110 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
111 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
113 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
114 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
115 needed; 4 (8) for a trailing size field and 8 (16) bytes for
116 free list pointers. Thus, the minimum allocatable size is
119 Even a request for zero bytes (i.e., malloc(0)) returns a
120 pointer to something of the minimum allocatable size.
122 The maximum overhead wastage (i.e., number of extra bytes
123 allocated than were requested in malloc) is less than or equal
124 to the minimum size, except for requests >= mmap_threshold that
125 are serviced via mmap(), where the worst case wastage is 2 *
126 sizeof(size_t) bytes plus the remainder from a system page (the
127 minimal mmap unit); typically 4096 or 8192 bytes.
129 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
130 8-byte size_t: 2^64 minus about two pages
132 It is assumed that (possibly signed) size_t values suffice to
133 represent chunk sizes. `Possibly signed' is due to the fact
134 that `size_t' may be defined on a system as either a signed or
135 an unsigned type. The ISO C standard says that it must be
136 unsigned, but a few systems are known not to adhere to this.
137 Additionally, even when size_t is unsigned, sbrk (which is by
138 default used to obtain memory from system) accepts signed
139 arguments, and may not be able to handle size_t-wide arguments
140 with negative sign bit. Generally, values that would
141 appear as negative after accounting for overhead and alignment
142 are supported only via mmap(), which does not have this
145 Requests for sizes outside the allowed range will perform an optional
146 failure action and then return null. (Requests may also
147 also fail because a system is out of memory.)
149 Thread-safety: thread-safe unless NO_THREADS is defined
151 Compliance: I believe it is compliant with the 1997 Single Unix Specification
152 (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably
155 * Synopsis of compile-time options:
157 People have reported using previous versions of this malloc on all
158 versions of Unix, sometimes by tweaking some of the defines
159 below. It has been tested most extensively on Solaris and
160 Linux. It is also reported to work on WIN32 platforms.
161 People also report using it in stand-alone embedded systems.
163 The implementation is in straight, hand-tuned ANSI C. It is not
164 at all modular. (Sorry!) It uses a lot of macros. To be at all
165 usable, this code should be compiled using an optimizing compiler
166 (for example gcc -O3) that can simplify expressions and control
167 paths. (FAQ: some macros import variables as arguments rather than
168 declare locals because people reported that some debuggers
169 otherwise get confused.)
173 Compilation Environment options:
175 __STD_C derived from C compiler defines
178 USE_MEMCPY 1 if HAVE_MEMCPY is defined
179 HAVE_MMAP defined as 1
181 HAVE_MREMAP 0 unless linux defined
182 USE_ARENAS the same as HAVE_MMAP
183 malloc_getpagesize derived from system #includes, or 4096 if not
184 HAVE_USR_INCLUDE_MALLOC_H NOT defined
185 LACKS_UNISTD_H NOT defined unless WIN32
186 LACKS_SYS_PARAM_H NOT defined unless WIN32
187 LACKS_SYS_MMAN_H NOT defined unless WIN32
189 Changing default word sizes:
191 INTERNAL_SIZE_T size_t
192 MALLOC_ALIGNMENT 2 * sizeof(INTERNAL_SIZE_T)
194 Configuration and functionality options:
196 USE_DL_PREFIX NOT defined
197 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
198 USE_MALLOC_LOCK NOT defined
199 MALLOC_DEBUG NOT defined
200 REALLOC_ZERO_BYTES_FREES 1
201 MALLOC_FAILURE_ACTION errno = ENOMEM, if __STD_C defined, else no-op
204 Options for customizing MORECORE:
208 MORECORE_CONTIGUOUS 1
209 MORECORE_CANNOT_TRIM NOT defined
211 MMAP_AS_MORECORE_SIZE (1024 * 1024)
213 Tuning options that are also dynamically changeable via mallopt:
216 DEFAULT_TRIM_THRESHOLD 128 * 1024
218 DEFAULT_MMAP_THRESHOLD 128 * 1024
219 DEFAULT_MMAP_MAX 65536
221 There are several other #defined constants and macros that you
222 probably don't want to touch unless you are extending or adapting malloc. */
225 __STD_C should be nonzero if using ANSI-standard C compiler, a C++
226 compiler, or a C compiler sufficiently close to ANSI to get away
231 #if defined(__STDC__) || defined(__cplusplus)
240 Void_t* is the pointer type that malloc should say it returns
244 #if (__STD_C || defined(WIN32))
252 #include <stddef.h> /* for size_t */
253 #include <stdlib.h> /* for getenv(), abort() */
255 #include <sys/types.h>
258 #include <malloc-machine.h>
261 #include <stdio-common/_itoa.h>
268 /* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */
270 /* #define LACKS_UNISTD_H */
272 #ifndef LACKS_UNISTD_H
276 /* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */
278 /* #define LACKS_SYS_PARAM_H */
281 #include <stdio.h> /* needed for malloc_stats */
282 #include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */
287 /* For va_arg, va_start, va_end. */
290 /* For writev and struct iovec. */
293 #include <sys/syslog.h>
295 /* For various dynamic linking things. */
302 Because freed chunks may be overwritten with bookkeeping fields, this
303 malloc will often die when freed memory is overwritten by user
304 programs. This can be very effective (albeit in an annoying way)
305 in helping track down dangling pointers.
307 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
308 enabled that will catch more memory errors. You probably won't be
309 able to make much sense of the actual assertion errors, but they
310 should help you locate incorrectly overwritten memory. The checking
311 is fairly extensive, and will slow down execution
312 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
313 will attempt to check every non-mmapped allocated and free chunk in
314 the course of computing the summmaries. (By nature, mmapped regions
315 cannot be checked very much automatically.)
317 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
318 this code. The assertions in the check routines spell out in more
319 detail the assumptions and invariants underlying the algorithms.
321 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
322 checking that all accesses to malloced memory stay within their
323 bounds. However, there are several add-ons and adaptations of this
324 or other mallocs available that do this.
331 #define assert(x) ((void)0)
336 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
339 The default version is the same as size_t.
341 While not strictly necessary, it is best to define this as an
342 unsigned type, even if size_t is a signed type. This may avoid some
343 artificial size limitations on some systems.
345 On a 64-bit machine, you may be able to reduce malloc overhead by
346 defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
347 expense of not being able to handle more than 2^32 of malloced
348 space. If this limitation is acceptable, you are encouraged to set
349 this unless you are on a platform requiring 16byte alignments. In
350 this case the alignment requirements turn out to negate any
351 potential advantages of decreasing size_t word size.
353 Implementors: Beware of the possible combinations of:
354 - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
355 and might be the same width as int or as long
356 - size_t might have different width and signedness as INTERNAL_SIZE_T
357 - int and long might be 32 or 64 bits, and might be the same width
358 To deal with this, most comparisons and difference computations
359 among INTERNAL_SIZE_Ts should cast them to unsigned long, being
360 aware of the fact that casting an unsigned int to a wider long does
361 not sign-extend. (This also makes checking for negative numbers
362 awkward.) Some of these casts result in harmless compiler warnings
366 #ifndef INTERNAL_SIZE_T
367 #define INTERNAL_SIZE_T size_t
370 /* The corresponding word size */
371 #define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
375 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
376 It must be a power of two at least 2 * SIZE_SZ, even on machines
377 for which smaller alignments would suffice. It may be defined as
378 larger than this though. Note however that code and data structures
379 are optimized for the case of 8-byte alignment.
383 #ifndef MALLOC_ALIGNMENT
384 #define MALLOC_ALIGNMENT (2 * SIZE_SZ)
387 /* The corresponding bit mask value */
388 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
393 REALLOC_ZERO_BYTES_FREES should be set if a call to
394 realloc with zero bytes should be the same as a call to free.
395 This is required by the C standard. Otherwise, since this malloc
396 returns a unique pointer for malloc(0), so does realloc(p, 0).
399 #ifndef REALLOC_ZERO_BYTES_FREES
400 #define REALLOC_ZERO_BYTES_FREES 1
404 TRIM_FASTBINS controls whether free() of a very small chunk can
405 immediately lead to trimming. Setting to true (1) can reduce memory
406 footprint, but will almost always slow down programs that use a lot
409 Define this only if you are willing to give up some speed to more
410 aggressively reduce system-level memory footprint when releasing
411 memory in programs that use many small chunks. You can get
412 essentially the same effect by setting MXFAST to 0, but this can
413 lead to even greater slowdowns in programs using many small chunks.
414 TRIM_FASTBINS is an in-between compile-time option, that disables
415 only those chunks bordering topmost memory from being placed in
419 #ifndef TRIM_FASTBINS
420 #define TRIM_FASTBINS 0
425 USE_DL_PREFIX will prefix all public routines with the string 'dl'.
426 This is necessary when you only want to use this malloc in one part
427 of a program, using your regular system malloc elsewhere.
430 /* #define USE_DL_PREFIX */
434 Two-phase name translation.
435 All of the actual routines are given mangled names.
436 When wrappers are used, they become the public callable versions.
437 When DL_PREFIX is used, the callable names are prefixed.
441 #define public_cALLOc dlcalloc
442 #define public_fREe dlfree
443 #define public_cFREe dlcfree
444 #define public_mALLOc dlmalloc
445 #define public_mEMALIGn dlmemalign
446 #define public_rEALLOc dlrealloc
447 #define public_vALLOc dlvalloc
448 #define public_pVALLOc dlpvalloc
449 #define public_mALLINFo dlmallinfo
450 #define public_mALLOPt dlmallopt
451 #define public_mTRIm dlmalloc_trim
452 #define public_mSTATs dlmalloc_stats
453 #define public_mUSABLe dlmalloc_usable_size
454 #define public_iCALLOc dlindependent_calloc
455 #define public_iCOMALLOc dlindependent_comalloc
456 #define public_gET_STATe dlget_state
457 #define public_sET_STATe dlset_state
458 #else /* USE_DL_PREFIX */
461 /* Special defines for the GNU C library. */
462 #define public_cALLOc __libc_calloc
463 #define public_fREe __libc_free
464 #define public_cFREe __libc_cfree
465 #define public_mALLOc __libc_malloc
466 #define public_mEMALIGn __libc_memalign
467 #define public_rEALLOc __libc_realloc
468 #define public_vALLOc __libc_valloc
469 #define public_pVALLOc __libc_pvalloc
470 #define public_mALLINFo __libc_mallinfo
471 #define public_mALLOPt __libc_mallopt
472 #define public_mTRIm __malloc_trim
473 #define public_mSTATs __malloc_stats
474 #define public_mUSABLe __malloc_usable_size
475 #define public_iCALLOc __libc_independent_calloc
476 #define public_iCOMALLOc __libc_independent_comalloc
477 #define public_gET_STATe __malloc_get_state
478 #define public_sET_STATe __malloc_set_state
479 #define malloc_getpagesize __getpagesize()
482 #define munmap __munmap
483 #define mremap __mremap
484 #define mprotect __mprotect
485 #define MORECORE (*__morecore)
486 #define MORECORE_FAILURE 0
488 Void_t
* __default_morecore (ptrdiff_t);
489 Void_t
*(*__morecore
)(ptrdiff_t) = __default_morecore
;
492 #define public_cALLOc calloc
493 #define public_fREe free
494 #define public_cFREe cfree
495 #define public_mALLOc malloc
496 #define public_mEMALIGn memalign
497 #define public_rEALLOc realloc
498 #define public_vALLOc valloc
499 #define public_pVALLOc pvalloc
500 #define public_mALLINFo mallinfo
501 #define public_mALLOPt mallopt
502 #define public_mTRIm malloc_trim
503 #define public_mSTATs malloc_stats
504 #define public_mUSABLe malloc_usable_size
505 #define public_iCALLOc independent_calloc
506 #define public_iCOMALLOc independent_comalloc
507 #define public_gET_STATe malloc_get_state
508 #define public_sET_STATe malloc_set_state
510 #endif /* USE_DL_PREFIX */
513 #define __builtin_expect(expr, val) (expr)
515 #define fwrite(buf, size, count, fp) _IO_fwrite (buf, size, count, fp)
519 HAVE_MEMCPY should be defined if you are not otherwise using
520 ANSI STD C, but still have memcpy and memset in your C library
521 and want to use them in calloc and realloc. Otherwise simple
522 macro versions are defined below.
524 USE_MEMCPY should be defined as 1 if you actually want to
525 have memset and memcpy called. People report that the macro
526 versions are faster than libc versions on some systems.
528 Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
529 (of <= 36 bytes) are manually unrolled in realloc and calloc.
543 #if (__STD_C || defined(HAVE_MEMCPY))
549 /* On Win32 memset and memcpy are already declared in windows.h */
552 void* memset(void*, int, size_t);
553 void* memcpy(void*, const void*, size_t);
563 MALLOC_FAILURE_ACTION is the action to take before "return 0" when
564 malloc fails to be able to return memory, either because memory is
565 exhausted or because of illegal arguments.
567 By default, sets errno if running on STD_C platform, else does nothing.
570 #ifndef MALLOC_FAILURE_ACTION
572 #define MALLOC_FAILURE_ACTION \
576 #define MALLOC_FAILURE_ACTION
581 MORECORE-related declarations. By default, rely on sbrk
585 #ifdef LACKS_UNISTD_H
586 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
588 extern Void_t
* sbrk(ptrdiff_t);
590 extern Void_t
* sbrk();
596 MORECORE is the name of the routine to call to obtain more memory
597 from the system. See below for general guidance on writing
598 alternative MORECORE functions, as well as a version for WIN32 and a
599 sample version for pre-OSX macos.
603 #define MORECORE sbrk
607 MORECORE_FAILURE is the value returned upon failure of MORECORE
608 as well as mmap. Since it cannot be an otherwise valid memory address,
609 and must reflect values of standard sys calls, you probably ought not
613 #ifndef MORECORE_FAILURE
614 #define MORECORE_FAILURE (-1)
618 If MORECORE_CONTIGUOUS is true, take advantage of fact that
619 consecutive calls to MORECORE with positive arguments always return
620 contiguous increasing addresses. This is true of unix sbrk. Even
621 if not defined, when regions happen to be contiguous, malloc will
622 permit allocations spanning regions obtained from different
623 calls. But defining this when applicable enables some stronger
624 consistency checks and space efficiencies.
627 #ifndef MORECORE_CONTIGUOUS
628 #define MORECORE_CONTIGUOUS 1
632 Define MORECORE_CANNOT_TRIM if your version of MORECORE
633 cannot release space back to the system when given negative
634 arguments. This is generally necessary only if you are using
635 a hand-crafted MORECORE function that cannot handle negative arguments.
638 /* #define MORECORE_CANNOT_TRIM */
640 /* MORECORE_CLEARS (default 1)
641 The degree to which the routine mapped to MORECORE zeroes out
642 memory: never (0), only for newly allocated space (1) or always
643 (2). The distinction between (1) and (2) is necessary because on
644 some systems, if the application first decrements and then
645 increments the break value, the contents of the reallocated space
649 #ifndef MORECORE_CLEARS
650 #define MORECORE_CLEARS 1
655 Define HAVE_MMAP as true to optionally make malloc() use mmap() to
656 allocate very large blocks. These will be returned to the
657 operating system immediately after a free(). Also, if mmap
658 is available, it is used as a backup strategy in cases where
659 MORECORE fails to provide space from system.
661 This malloc is best tuned to work with mmap for large requests.
662 If you do not have mmap, operations involving very large chunks (1MB
663 or so) may be slower than you'd like.
670 Standard unix mmap using /dev/zero clears memory so calloc doesn't
675 #define MMAP_CLEARS 1
680 #define MMAP_CLEARS 0
686 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
687 sbrk fails, and mmap is used as a backup (which is done only if
688 HAVE_MMAP). The value must be a multiple of page size. This
689 backup strategy generally applies only when systems have "holes" in
690 address space, so sbrk cannot perform contiguous expansion, but
691 there is still space available on system. On systems for which
692 this is known to be useful (i.e. most linux kernels), this occurs
693 only when programs allocate huge amounts of memory. Between this,
694 and the fact that mmap regions tend to be limited, the size should
695 be large, to avoid too many mmap calls and thus avoid running out
699 #ifndef MMAP_AS_MORECORE_SIZE
700 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
704 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
705 large blocks. This is currently only possible on Linux with
706 kernel versions newer than 1.3.77.
711 #define HAVE_MREMAP 1
713 #define HAVE_MREMAP 0
716 #endif /* HAVE_MMAP */
718 /* Define USE_ARENAS to enable support for multiple `arenas'. These
719 are allocated using mmap(), are necessary for threads and
720 occasionally useful to overcome address space limitations affecting
724 #define USE_ARENAS HAVE_MMAP
729 The system page size. To the extent possible, this malloc manages
730 memory from the system in page-size units. Note that this value is
731 cached during initialization into a field of malloc_state. So even
732 if malloc_getpagesize is a function, it is only called once.
734 The following mechanics for getpagesize were adapted from bsd/gnu
735 getpagesize.h. If none of the system-probes here apply, a value of
736 4096 is used, which should be OK: If they don't apply, then using
737 the actual value probably doesn't impact performance.
741 #ifndef malloc_getpagesize
743 #ifndef LACKS_UNISTD_H
747 # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
748 # ifndef _SC_PAGE_SIZE
749 # define _SC_PAGE_SIZE _SC_PAGESIZE
753 # ifdef _SC_PAGE_SIZE
754 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
756 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
757 extern size_t getpagesize();
758 # define malloc_getpagesize getpagesize()
760 # ifdef WIN32 /* use supplied emulation of getpagesize */
761 # define malloc_getpagesize getpagesize()
763 # ifndef LACKS_SYS_PARAM_H
764 # include <sys/param.h>
766 # ifdef EXEC_PAGESIZE
767 # define malloc_getpagesize EXEC_PAGESIZE
771 # define malloc_getpagesize NBPG
773 # define malloc_getpagesize (NBPG * CLSIZE)
777 # define malloc_getpagesize NBPC
780 # define malloc_getpagesize PAGESIZE
781 # else /* just guess */
782 # define malloc_getpagesize (4096)
793 This version of malloc supports the standard SVID/XPG mallinfo
794 routine that returns a struct containing usage properties and
795 statistics. It should work on any SVID/XPG compliant system that has
796 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
797 install such a thing yourself, cut out the preliminary declarations
798 as described above and below and save them in a malloc.h file. But
799 there's no compelling reason to bother to do this.)
801 The main declaration needed is the mallinfo struct that is returned
802 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
803 bunch of fields that are not even meaningful in this version of
804 malloc. These fields are are instead filled by mallinfo() with
805 other numbers that might be of interest.
807 HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
808 /usr/include/malloc.h file that includes a declaration of struct
809 mallinfo. If so, it is included; else an SVID2/XPG2 compliant
810 version is declared below. These must be precisely the same for
811 mallinfo() to work. The original SVID version of this struct,
812 defined on most systems with mallinfo, declares all fields as
813 ints. But some others define as unsigned long. If your system
814 defines the fields using a type of different width than listed here,
815 you must #include your system version and #define
816 HAVE_USR_INCLUDE_MALLOC_H.
819 /* #define HAVE_USR_INCLUDE_MALLOC_H */
821 #ifdef HAVE_USR_INCLUDE_MALLOC_H
822 #include "/usr/include/malloc.h"
826 /* ---------- description of public routines ------------ */
830 Returns a pointer to a newly allocated chunk of at least n bytes, or null
831 if no space is available. Additionally, on failure, errno is
832 set to ENOMEM on ANSI C systems.
834 If n is zero, malloc returns a minumum-sized chunk. (The minimum
835 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
836 systems.) On most systems, size_t is an unsigned type, so calls
837 with negative arguments are interpreted as requests for huge amounts
838 of space, which will often fail. The maximum supported value of n
839 differs across systems, but is in all cases less than the maximum
840 representable value of a size_t.
843 Void_t
* public_mALLOc(size_t);
845 Void_t
* public_mALLOc();
847 #ifdef libc_hidden_proto
848 libc_hidden_proto (public_mALLOc
)
853 Releases the chunk of memory pointed to by p, that had been previously
854 allocated using malloc or a related routine such as realloc.
855 It has no effect if p is null. It can have arbitrary (i.e., bad!)
856 effects if p has already been freed.
858 Unless disabled (using mallopt), freeing very large spaces will
859 when possible, automatically trigger operations that give
860 back unused memory to the system, thus reducing program footprint.
863 void public_fREe(Void_t
*);
867 #ifdef libc_hidden_proto
868 libc_hidden_proto (public_fREe
)
872 calloc(size_t n_elements, size_t element_size);
873 Returns a pointer to n_elements * element_size bytes, with all locations
877 Void_t
* public_cALLOc(size_t, size_t);
879 Void_t
* public_cALLOc();
883 realloc(Void_t* p, size_t n)
884 Returns a pointer to a chunk of size n that contains the same data
885 as does chunk p up to the minimum of (n, p's size) bytes, or null
886 if no space is available.
888 The returned pointer may or may not be the same as p. The algorithm
889 prefers extending p when possible, otherwise it employs the
890 equivalent of a malloc-copy-free sequence.
892 If p is null, realloc is equivalent to malloc.
894 If space is not available, realloc returns null, errno is set (if on
895 ANSI) and p is NOT freed.
897 if n is for fewer bytes than already held by p, the newly unused
898 space is lopped off and freed if possible. Unless the #define
899 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
900 zero (re)allocates a minimum-sized chunk.
902 Large chunks that were internally obtained via mmap will always
903 be reallocated using malloc-copy-free sequences unless
904 the system supports MREMAP (currently only linux).
906 The old unix realloc convention of allowing the last-free'd chunk
907 to be used as an argument to realloc is not supported.
910 Void_t
* public_rEALLOc(Void_t
*, size_t);
912 Void_t
* public_rEALLOc();
914 #ifdef libc_hidden_proto
915 libc_hidden_proto (public_rEALLOc
)
919 memalign(size_t alignment, size_t n);
920 Returns a pointer to a newly allocated chunk of n bytes, aligned
921 in accord with the alignment argument.
923 The alignment argument should be a power of two. If the argument is
924 not a power of two, the nearest greater power is used.
925 8-byte alignment is guaranteed by normal malloc calls, so don't
926 bother calling memalign with an argument of 8 or less.
928 Overreliance on memalign is a sure way to fragment space.
931 Void_t
* public_mEMALIGn(size_t, size_t);
933 Void_t
* public_mEMALIGn();
935 #ifdef libc_hidden_proto
936 libc_hidden_proto (public_mEMALIGn
)
941 Equivalent to memalign(pagesize, n), where pagesize is the page
942 size of the system. If the pagesize is unknown, 4096 is used.
945 Void_t
* public_vALLOc(size_t);
947 Void_t
* public_vALLOc();
953 mallopt(int parameter_number, int parameter_value)
954 Sets tunable parameters The format is to provide a
955 (parameter-number, parameter-value) pair. mallopt then sets the
956 corresponding parameter to the argument value if it can (i.e., so
957 long as the value is meaningful), and returns 1 if successful else
958 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
959 normally defined in malloc.h. Only one of these (M_MXFAST) is used
960 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
961 so setting them has no effect. But this malloc also supports four
962 other options in mallopt. See below for details. Briefly, supported
963 parameters are as follows (listed defaults are for "typical"
966 Symbol param # default allowed param values
967 M_MXFAST 1 64 0-80 (0 disables fastbins)
968 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
970 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
971 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
974 int public_mALLOPt(int, int);
976 int public_mALLOPt();
982 Returns (by copy) a struct containing various summary statistics:
984 arena: current total non-mmapped bytes allocated from system
985 ordblks: the number of free chunks
986 smblks: the number of fastbin blocks (i.e., small chunks that
987 have been freed but not use resused or consolidated)
988 hblks: current number of mmapped regions
989 hblkhd: total bytes held in mmapped regions
990 usmblks: the maximum total allocated space. This will be greater
991 than current total if trimming has occurred.
992 fsmblks: total bytes held in fastbin blocks
993 uordblks: current total allocated space (normal or mmapped)
994 fordblks: total free space
995 keepcost: the maximum number of bytes that could ideally be released
996 back to system via malloc_trim. ("ideally" means that
997 it ignores page restrictions etc.)
999 Because these fields are ints, but internal bookkeeping may
1000 be kept as longs, the reported values may wrap around zero and
1004 struct mallinfo
public_mALLINFo(void);
1006 struct mallinfo
public_mALLINFo();
1010 independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]);
1012 independent_calloc is similar to calloc, but instead of returning a
1013 single cleared space, it returns an array of pointers to n_elements
1014 independent elements that can hold contents of size elem_size, each
1015 of which starts out cleared, and can be independently freed,
1016 realloc'ed etc. The elements are guaranteed to be adjacently
1017 allocated (this is not guaranteed to occur with multiple callocs or
1018 mallocs), which may also improve cache locality in some
1021 The "chunks" argument is optional (i.e., may be null, which is
1022 probably the most typical usage). If it is null, the returned array
1023 is itself dynamically allocated and should also be freed when it is
1024 no longer needed. Otherwise, the chunks array must be of at least
1025 n_elements in length. It is filled in with the pointers to the
1028 In either case, independent_calloc returns this pointer array, or
1029 null if the allocation failed. If n_elements is zero and "chunks"
1030 is null, it returns a chunk representing an array with zero elements
1031 (which should be freed if not wanted).
1033 Each element must be individually freed when it is no longer
1034 needed. If you'd like to instead be able to free all at once, you
1035 should instead use regular calloc and assign pointers into this
1036 space to represent elements. (In this case though, you cannot
1037 independently free elements.)
1039 independent_calloc simplifies and speeds up implementations of many
1040 kinds of pools. It may also be useful when constructing large data
1041 structures that initially have a fixed number of fixed-sized nodes,
1042 but the number is not known at compile time, and some of the nodes
1043 may later need to be freed. For example:
1045 struct Node { int item; struct Node* next; };
1047 struct Node* build_list() {
1049 int n = read_number_of_nodes_needed();
1050 if (n <= 0) return 0;
1051 pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
1052 if (pool == 0) die();
1053 // organize into a linked list...
1054 struct Node* first = pool[0];
1055 for (i = 0; i < n-1; ++i)
1056 pool[i]->next = pool[i+1];
1057 free(pool); // Can now free the array (or not, if it is needed later)
1062 Void_t
** public_iCALLOc(size_t, size_t, Void_t
**);
1064 Void_t
** public_iCALLOc();
1068 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
1070 independent_comalloc allocates, all at once, a set of n_elements
1071 chunks with sizes indicated in the "sizes" array. It returns
1072 an array of pointers to these elements, each of which can be
1073 independently freed, realloc'ed etc. The elements are guaranteed to
1074 be adjacently allocated (this is not guaranteed to occur with
1075 multiple callocs or mallocs), which may also improve cache locality
1076 in some applications.
1078 The "chunks" argument is optional (i.e., may be null). If it is null
1079 the returned array is itself dynamically allocated and should also
1080 be freed when it is no longer needed. Otherwise, the chunks array
1081 must be of at least n_elements in length. It is filled in with the
1082 pointers to the chunks.
1084 In either case, independent_comalloc returns this pointer array, or
1085 null if the allocation failed. If n_elements is zero and chunks is
1086 null, it returns a chunk representing an array with zero elements
1087 (which should be freed if not wanted).
1089 Each element must be individually freed when it is no longer
1090 needed. If you'd like to instead be able to free all at once, you
1091 should instead use a single regular malloc, and assign pointers at
1092 particular offsets in the aggregate space. (In this case though, you
1093 cannot independently free elements.)
1095 independent_comallac differs from independent_calloc in that each
1096 element may have a different size, and also that it does not
1097 automatically clear elements.
1099 independent_comalloc can be used to speed up allocation in cases
1100 where several structs or objects must always be allocated at the
1101 same time. For example:
1106 void send_message(char* msg) {
1107 int msglen = strlen(msg);
1108 size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
1110 if (independent_comalloc(3, sizes, chunks) == 0)
1112 struct Head* head = (struct Head*)(chunks[0]);
1113 char* body = (char*)(chunks[1]);
1114 struct Foot* foot = (struct Foot*)(chunks[2]);
1118 In general though, independent_comalloc is worth using only for
1119 larger values of n_elements. For small values, you probably won't
1120 detect enough difference from series of malloc calls to bother.
1122 Overuse of independent_comalloc can increase overall memory usage,
1123 since it cannot reuse existing noncontiguous small chunks that
1124 might be available for some of the elements.
1127 Void_t
** public_iCOMALLOc(size_t, size_t*, Void_t
**);
1129 Void_t
** public_iCOMALLOc();
1135 Equivalent to valloc(minimum-page-that-holds(n)), that is,
1136 round up n to nearest pagesize.
1139 Void_t
* public_pVALLOc(size_t);
1141 Void_t
* public_pVALLOc();
1146 Equivalent to free(p).
1148 cfree is needed/defined on some systems that pair it with calloc,
1149 for odd historical reasons (such as: cfree is used in example
1150 code in the first edition of K&R).
1153 void public_cFREe(Void_t
*);
1155 void public_cFREe();
1159 malloc_trim(size_t pad);
1161 If possible, gives memory back to the system (via negative
1162 arguments to sbrk) if there is unused memory at the `high' end of
1163 the malloc pool. You can call this after freeing large blocks of
1164 memory to potentially reduce the system-level memory requirements
1165 of a program. However, it cannot guarantee to reduce memory. Under
1166 some allocation patterns, some large free blocks of memory will be
1167 locked between two used chunks, so they cannot be given back to
1170 The `pad' argument to malloc_trim represents the amount of free
1171 trailing space to leave untrimmed. If this argument is zero,
1172 only the minimum amount of memory to maintain internal data
1173 structures will be left (one page or less). Non-zero arguments
1174 can be supplied to maintain enough trailing space to service
1175 future expected allocations without having to re-obtain memory
1178 Malloc_trim returns 1 if it actually released any memory, else 0.
1179 On systems that do not support "negative sbrks", it will always
1183 int public_mTRIm(size_t);
1189 malloc_usable_size(Void_t* p);
1191 Returns the number of bytes you can actually use in
1192 an allocated chunk, which may be more than you requested (although
1193 often not) due to alignment and minimum size constraints.
1194 You can use this many bytes without worrying about
1195 overwriting other allocated objects. This is not a particularly great
1196 programming practice. malloc_usable_size can be more useful in
1197 debugging and assertions, for example:
1200 assert(malloc_usable_size(p) >= 256);
1204 size_t public_mUSABLe(Void_t
*);
1206 size_t public_mUSABLe();
1211 Prints on stderr the amount of space obtained from the system (both
1212 via sbrk and mmap), the maximum amount (which may be more than
1213 current if malloc_trim and/or munmap got called), and the current
1214 number of bytes allocated via malloc (or realloc, etc) but not yet
1215 freed. Note that this is the number of bytes allocated, not the
1216 number requested. It will be larger than the number requested
1217 because of alignment and bookkeeping overhead. Because it includes
1218 alignment wastage as being in use, this figure may be greater than
1219 zero even when no user-level chunks are allocated.
1221 The reported current and maximum system memory can be inaccurate if
1222 a program makes other calls to system memory allocation functions
1223 (normally sbrk) outside of malloc.
1225 malloc_stats prints only the most commonly interesting statistics.
1226 More information can be obtained by calling mallinfo.
1230 void public_mSTATs(void);
1232 void public_mSTATs();
1236 malloc_get_state(void);
1238 Returns the state of all malloc variables in an opaque data
1242 Void_t
* public_gET_STATe(void);
1244 Void_t
* public_gET_STATe();
1248 malloc_set_state(Void_t* state);
1250 Restore the state of all malloc variables from data obtained with
1254 int public_sET_STATe(Void_t
*);
1256 int public_sET_STATe();
1261 posix_memalign(void **memptr, size_t alignment, size_t size);
1263 POSIX wrapper like memalign(), checking for validity of size.
1265 int __posix_memalign(void **, size_t, size_t);
1268 /* mallopt tuning options */
1271 M_MXFAST is the maximum request size used for "fastbins", special bins
1272 that hold returned chunks without consolidating their spaces. This
1273 enables future requests for chunks of the same size to be handled
1274 very quickly, but can increase fragmentation, and thus increase the
1275 overall memory footprint of a program.
1277 This malloc manages fastbins very conservatively yet still
1278 efficiently, so fragmentation is rarely a problem for values less
1279 than or equal to the default. The maximum supported value of MXFAST
1280 is 80. You wouldn't want it any higher than this anyway. Fastbins
1281 are designed especially for use with many small structs, objects or
1282 strings -- the default handles structs/objects/arrays with sizes up
1283 to 8 4byte fields, or small strings representing words, tokens,
1284 etc. Using fastbins for larger objects normally worsens
1285 fragmentation without improving speed.
1287 M_MXFAST is set in REQUEST size units. It is internally used in
1288 chunksize units, which adds padding and alignment. You can reduce
1289 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
1290 algorithm to be a closer approximation of fifo-best-fit in all cases,
1291 not just for larger requests, but will generally cause it to be
1296 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
1301 #ifndef DEFAULT_MXFAST
1302 #define DEFAULT_MXFAST 64
1307 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
1308 to keep before releasing via malloc_trim in free().
1310 Automatic trimming is mainly useful in long-lived programs.
1311 Because trimming via sbrk can be slow on some systems, and can
1312 sometimes be wasteful (in cases where programs immediately
1313 afterward allocate more large chunks) the value should be high
1314 enough so that your overall system performance would improve by
1315 releasing this much memory.
1317 The trim threshold and the mmap control parameters (see below)
1318 can be traded off with one another. Trimming and mmapping are
1319 two different ways of releasing unused memory back to the
1320 system. Between these two, it is often possible to keep
1321 system-level demands of a long-lived program down to a bare
1322 minimum. For example, in one test suite of sessions measuring
1323 the XF86 X server on Linux, using a trim threshold of 128K and a
1324 mmap threshold of 192K led to near-minimal long term resource
1327 If you are using this malloc in a long-lived program, it should
1328 pay to experiment with these values. As a rough guide, you
1329 might set to a value close to the average size of a process
1330 (program) running on your system. Releasing this much memory
1331 would allow such a process to run in memory. Generally, it's
1332 worth it to tune for trimming rather tham memory mapping when a
1333 program undergoes phases where several large chunks are
1334 allocated and released in ways that can reuse each other's
1335 storage, perhaps mixed with phases where there are no such
1336 chunks at all. And in well-behaved long-lived programs,
1337 controlling release of large blocks via trimming versus mapping
1340 However, in most programs, these parameters serve mainly as
1341 protection against the system-level effects of carrying around
1342 massive amounts of unneeded memory. Since frequent calls to
1343 sbrk, mmap, and munmap otherwise degrade performance, the default
1344 parameters are set to relatively high values that serve only as
1347 The trim value It must be greater than page size to have any useful
1348 effect. To disable trimming completely, you can set to
1351 Trim settings interact with fastbin (MXFAST) settings: Unless
1352 TRIM_FASTBINS is defined, automatic trimming never takes place upon
1353 freeing a chunk with size less than or equal to MXFAST. Trimming is
1354 instead delayed until subsequent freeing of larger chunks. However,
1355 you can still force an attempted trim by calling malloc_trim.
1357 Also, trimming is not generally possible in cases where
1358 the main arena is obtained via mmap.
1360 Note that the trick some people use of mallocing a huge space and
1361 then freeing it at program startup, in an attempt to reserve system
1362 memory, doesn't have the intended effect under automatic trimming,
1363 since that memory will immediately be returned to the system.
1366 #define M_TRIM_THRESHOLD -1
1368 #ifndef DEFAULT_TRIM_THRESHOLD
1369 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
1373 M_TOP_PAD is the amount of extra `padding' space to allocate or
1374 retain whenever sbrk is called. It is used in two ways internally:
1376 * When sbrk is called to extend the top of the arena to satisfy
1377 a new malloc request, this much padding is added to the sbrk
1380 * When malloc_trim is called automatically from free(),
1381 it is used as the `pad' argument.
1383 In both cases, the actual amount of padding is rounded
1384 so that the end of the arena is always a system page boundary.
1386 The main reason for using padding is to avoid calling sbrk so
1387 often. Having even a small pad greatly reduces the likelihood
1388 that nearly every malloc request during program start-up (or
1389 after trimming) will invoke sbrk, which needlessly wastes
1392 Automatic rounding-up to page-size units is normally sufficient
1393 to avoid measurable overhead, so the default is 0. However, in
1394 systems where sbrk is relatively slow, it can pay to increase
1395 this value, at the expense of carrying around more memory than
1399 #define M_TOP_PAD -2
1401 #ifndef DEFAULT_TOP_PAD
1402 #define DEFAULT_TOP_PAD (0)
1406 M_MMAP_THRESHOLD is the request size threshold for using mmap()
1407 to service a request. Requests of at least this size that cannot
1408 be allocated using already-existing space will be serviced via mmap.
1409 (If enough normal freed space already exists it is used instead.)
1411 Using mmap segregates relatively large chunks of memory so that
1412 they can be individually obtained and released from the host
1413 system. A request serviced through mmap is never reused by any
1414 other request (at least not directly; the system may just so
1415 happen to remap successive requests to the same locations).
1417 Segregating space in this way has the benefits that:
1419 1. Mmapped space can ALWAYS be individually released back
1420 to the system, which helps keep the system level memory
1421 demands of a long-lived program low.
1422 2. Mapped memory can never become `locked' between
1423 other chunks, as can happen with normally allocated chunks, which
1424 means that even trimming via malloc_trim would not release them.
1425 3. On some systems with "holes" in address spaces, mmap can obtain
1426 memory that sbrk cannot.
1428 However, it has the disadvantages that:
1430 1. The space cannot be reclaimed, consolidated, and then
1431 used to service later requests, as happens with normal chunks.
1432 2. It can lead to more wastage because of mmap page alignment
1434 3. It causes malloc performance to be more dependent on host
1435 system memory management support routines which may vary in
1436 implementation quality and may impose arbitrary
1437 limitations. Generally, servicing a request via normal
1438 malloc steps is faster than going through a system's mmap.
1440 The advantages of mmap nearly always outweigh disadvantages for
1441 "large" chunks, but the value of "large" varies across systems. The
1442 default is an empirically derived value that works well in most
1446 #define M_MMAP_THRESHOLD -3
1448 #ifndef DEFAULT_MMAP_THRESHOLD
1449 #define DEFAULT_MMAP_THRESHOLD (128 * 1024)
1453 M_MMAP_MAX is the maximum number of requests to simultaneously
1454 service using mmap. This parameter exists because
1455 some systems have a limited number of internal tables for
1456 use by mmap, and using more than a few of them may degrade
1459 The default is set to a value that serves only as a safeguard.
1460 Setting to 0 disables use of mmap for servicing large requests. If
1461 HAVE_MMAP is not set, the default value is 0, and attempts to set it
1462 to non-zero values in mallopt will fail.
1465 #define M_MMAP_MAX -4
1467 #ifndef DEFAULT_MMAP_MAX
1469 #define DEFAULT_MMAP_MAX (65536)
1471 #define DEFAULT_MMAP_MAX (0)
1476 } /* end of extern "C" */
1482 #define BOUNDED_N(ptr, sz) (ptr)
1484 #ifndef RETURN_ADDRESS
1485 #define RETURN_ADDRESS(X_) (NULL)
1488 /* On some platforms we can compile internal, not exported functions better.
1489 Let the environment provide a macro and define it to be empty if it
1490 is not available. */
1491 #ifndef internal_function
1492 # define internal_function
1495 /* Forward declarations. */
1496 struct malloc_chunk
;
1497 typedef struct malloc_chunk
* mchunkptr
;
1499 /* Internal routines. */
1503 Void_t
* _int_malloc(mstate
, size_t);
1504 void _int_free(mstate
, Void_t
*);
1505 Void_t
* _int_realloc(mstate
, Void_t
*, size_t);
1506 Void_t
* _int_memalign(mstate
, size_t, size_t);
1507 Void_t
* _int_valloc(mstate
, size_t);
1508 static Void_t
* _int_pvalloc(mstate
, size_t);
1509 /*static Void_t* cALLOc(size_t, size_t);*/
1510 static Void_t
** _int_icalloc(mstate
, size_t, size_t, Void_t
**);
1511 static Void_t
** _int_icomalloc(mstate
, size_t, size_t*, Void_t
**);
1512 static int mTRIm(size_t);
1513 static size_t mUSABLe(Void_t
*);
1514 static void mSTATs(void);
1515 static int mALLOPt(int, int);
1516 static struct mallinfo
mALLINFo(mstate
);
1517 static void malloc_printerr(int action
, const char *str
, void *ptr
);
1519 static Void_t
* internal_function
mem2mem_check(Void_t
*p
, size_t sz
);
1520 static int internal_function
top_check(void);
1521 static void internal_function
munmap_chunk(mchunkptr p
);
1523 static mchunkptr internal_function
mremap_chunk(mchunkptr p
, size_t new_size
);
1526 static Void_t
* malloc_check(size_t sz
, const Void_t
*caller
);
1527 static void free_check(Void_t
* mem
, const Void_t
*caller
);
1528 static Void_t
* realloc_check(Void_t
* oldmem
, size_t bytes
,
1529 const Void_t
*caller
);
1530 static Void_t
* memalign_check(size_t alignment
, size_t bytes
,
1531 const Void_t
*caller
);
1534 # if USE___THREAD || (defined USE_TLS && !defined SHARED)
1535 /* These routines are never needed in this configuration. */
1542 static Void_t
* malloc_starter(size_t sz
, const Void_t
*caller
);
1543 static Void_t
* memalign_starter(size_t aln
, size_t sz
, const Void_t
*caller
);
1544 static void free_starter(Void_t
* mem
, const Void_t
*caller
);
1546 static Void_t
* malloc_atfork(size_t sz
, const Void_t
*caller
);
1547 static void free_atfork(Void_t
* mem
, const Void_t
*caller
);
1552 Void_t
* _int_malloc();
1554 Void_t
* _int_realloc();
1555 Void_t
* _int_memalign();
1556 Void_t
* _int_valloc();
1557 Void_t
* _int_pvalloc();
1558 /*static Void_t* cALLOc();*/
1559 static Void_t
** _int_icalloc();
1560 static Void_t
** _int_icomalloc();
1562 static size_t mUSABLe();
1563 static void mSTATs();
1564 static int mALLOPt();
1565 static struct mallinfo
mALLINFo();
1572 /* ------------- Optional versions of memcopy ---------------- */
1578 Note: memcpy is ONLY invoked with non-overlapping regions,
1579 so the (usually slower) memmove is not needed.
1582 #define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
1583 #define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
1585 #else /* !USE_MEMCPY */
1587 /* Use Duff's device for good zeroing/copying performance. */
1589 #define MALLOC_ZERO(charp, nbytes) \
1591 INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
1592 unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1594 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1596 case 0: for(;;) { *mzp++ = 0; \
1597 case 7: *mzp++ = 0; \
1598 case 6: *mzp++ = 0; \
1599 case 5: *mzp++ = 0; \
1600 case 4: *mzp++ = 0; \
1601 case 3: *mzp++ = 0; \
1602 case 2: *mzp++ = 0; \
1603 case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
1607 #define MALLOC_COPY(dest,src,nbytes) \
1609 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
1610 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
1611 unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1613 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1615 case 0: for(;;) { *mcdst++ = *mcsrc++; \
1616 case 7: *mcdst++ = *mcsrc++; \
1617 case 6: *mcdst++ = *mcsrc++; \
1618 case 5: *mcdst++ = *mcsrc++; \
1619 case 4: *mcdst++ = *mcsrc++; \
1620 case 3: *mcdst++ = *mcsrc++; \
1621 case 2: *mcdst++ = *mcsrc++; \
1622 case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
1628 /* ------------------ MMAP support ------------------ */
1634 #ifndef LACKS_SYS_MMAN_H
1635 #include <sys/mman.h>
1638 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1639 # define MAP_ANONYMOUS MAP_ANON
1641 #if !defined(MAP_FAILED)
1642 # define MAP_FAILED ((char*)-1)
1645 #ifndef MAP_NORESERVE
1646 # ifdef MAP_AUTORESRV
1647 # define MAP_NORESERVE MAP_AUTORESRV
1649 # define MAP_NORESERVE 0
1654 Nearly all versions of mmap support MAP_ANONYMOUS,
1655 so the following is unlikely to be needed, but is
1656 supplied just in case.
1659 #ifndef MAP_ANONYMOUS
1661 static int dev_zero_fd
= -1; /* Cached file descriptor for /dev/zero. */
1663 #define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
1664 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1665 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
1666 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
1670 #define MMAP(addr, size, prot, flags) \
1671 (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1676 #endif /* HAVE_MMAP */
1680 ----------------------- Chunk representations -----------------------
1685 This struct declaration is misleading (but accurate and necessary).
1686 It declares a "view" into memory allowing access to necessary
1687 fields at known offsets from a given base. See explanation below.
1690 struct malloc_chunk
{
1692 INTERNAL_SIZE_T prev_size
; /* Size of previous chunk (if free). */
1693 INTERNAL_SIZE_T size
; /* Size in bytes, including overhead. */
1695 struct malloc_chunk
* fd
; /* double links -- used only if free. */
1696 struct malloc_chunk
* bk
;
1701 malloc_chunk details:
1703 (The following includes lightly edited explanations by Colin Plumb.)
1705 Chunks of memory are maintained using a `boundary tag' method as
1706 described in e.g., Knuth or Standish. (See the paper by Paul
1707 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1708 survey of such techniques.) Sizes of free chunks are stored both
1709 in the front of each chunk and at the end. This makes
1710 consolidating fragmented chunks into bigger chunks very fast. The
1711 size fields also hold bits representing whether chunks are free or
1714 An allocated chunk looks like this:
1717 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1718 | Size of previous chunk, if allocated | |
1719 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1720 | Size of chunk, in bytes |M|P|
1721 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1722 | User data starts here... .
1724 . (malloc_usable_space() bytes) .
1726 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1728 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1731 Where "chunk" is the front of the chunk for the purpose of most of
1732 the malloc code, but "mem" is the pointer that is returned to the
1733 user. "Nextchunk" is the beginning of the next contiguous chunk.
1735 Chunks always begin on even word boundries, so the mem portion
1736 (which is returned to the user) is also on an even word boundary, and
1737 thus at least double-word aligned.
1739 Free chunks are stored in circular doubly-linked lists, and look like this:
1741 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1742 | Size of previous chunk |
1743 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1744 `head:' | Size of chunk, in bytes |P|
1745 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1746 | Forward pointer to next chunk in list |
1747 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1748 | Back pointer to previous chunk in list |
1749 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1750 | Unused space (may be 0 bytes long) .
1753 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1754 `foot:' | Size of chunk, in bytes |
1755 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1757 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1758 chunk size (which is always a multiple of two words), is an in-use
1759 bit for the *previous* chunk. If that bit is *clear*, then the
1760 word before the current chunk size contains the previous chunk
1761 size, and can be used to find the front of the previous chunk.
1762 The very first chunk allocated always has this bit set,
1763 preventing access to non-existent (or non-owned) memory. If
1764 prev_inuse is set for any given chunk, then you CANNOT determine
1765 the size of the previous chunk, and might even get a memory
1766 addressing fault when trying to do so.
1768 Note that the `foot' of the current chunk is actually represented
1769 as the prev_size of the NEXT chunk. This makes it easier to
1770 deal with alignments etc but can be very confusing when trying
1771 to extend or adapt this code.
1773 The two exceptions to all this are
1775 1. The special chunk `top' doesn't bother using the
1776 trailing size field since there is no next contiguous chunk
1777 that would have to index off it. After initialization, `top'
1778 is forced to always exist. If it would become less than
1779 MINSIZE bytes long, it is replenished.
1781 2. Chunks allocated via mmap, which have the second-lowest-order
1782 bit M (IS_MMAPPED) set in their size fields. Because they are
1783 allocated one-by-one, each must contain its own trailing size field.
1788 ---------- Size and alignment checks and conversions ----------
1791 /* conversion from malloc headers to user pointers, and back */
1793 #define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
1794 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
1796 /* The smallest possible chunk */
1797 #define MIN_CHUNK_SIZE (sizeof(struct malloc_chunk))
1799 /* The smallest size we can malloc is an aligned minimal chunk */
1802 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1804 /* Check if m has acceptable alignment */
1806 #define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
1810 Check if a request is so large that it would wrap around zero when
1811 padded and aligned. To simplify some other code, the bound is made
1812 low enough so that adding MINSIZE will also not wrap around zero.
1815 #define REQUEST_OUT_OF_RANGE(req) \
1816 ((unsigned long)(req) >= \
1817 (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))
1819 /* pad request bytes into a usable size -- internal version */
1821 #define request2size(req) \
1822 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1824 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1826 /* Same, except also perform argument check */
1828 #define checked_request2size(req, sz) \
1829 if (REQUEST_OUT_OF_RANGE(req)) { \
1830 MALLOC_FAILURE_ACTION; \
1833 (sz) = request2size(req);
1836 --------------- Physical chunk operations ---------------
1840 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1841 #define PREV_INUSE 0x1
1843 /* extract inuse bit of previous chunk */
1844 #define prev_inuse(p) ((p)->size & PREV_INUSE)
1847 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1848 #define IS_MMAPPED 0x2
1850 /* check for mmap()'ed chunk */
1851 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1854 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1855 from a non-main arena. This is only set immediately before handing
1856 the chunk to the user, if necessary. */
1857 #define NON_MAIN_ARENA 0x4
1859 /* check for chunk from non-main arena */
1860 #define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
1864 Bits to mask off when extracting size
1866 Note: IS_MMAPPED is intentionally not masked off from size field in
1867 macros for which mmapped chunks should never be seen. This should
1868 cause helpful core dumps to occur if it is tried by accident by
1869 people extending or adapting this malloc.
1871 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA)
1873 /* Get size, ignoring use bits */
1874 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
1877 /* Ptr to next physical malloc_chunk. */
1878 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) ))
1880 /* Ptr to previous physical malloc_chunk */
1881 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
1883 /* Treat space at ptr + offset as a chunk */
1884 #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
1886 /* extract p's inuse bit */
1888 ((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
1890 /* set/clear chunk as being inuse without otherwise disturbing */
1891 #define set_inuse(p)\
1892 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
1894 #define clear_inuse(p)\
1895 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
1898 /* check/set/clear inuse bits in known places */
1899 #define inuse_bit_at_offset(p, s)\
1900 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
1902 #define set_inuse_bit_at_offset(p, s)\
1903 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
1905 #define clear_inuse_bit_at_offset(p, s)\
1906 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
1909 /* Set size at head, without disturbing its use bit */
1910 #define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_BITS) | (s)))
1912 /* Set size/use field */
1913 #define set_head(p, s) ((p)->size = (s))
1915 /* Set size at footer (only when chunk is not in use) */
1916 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
1920 -------------------- Internal data structures --------------------
1922 All internal state is held in an instance of malloc_state defined
1923 below. There are no other static variables, except in two optional
1925 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1926 * If HAVE_MMAP is true, but mmap doesn't support
1927 MAP_ANONYMOUS, a dummy file descriptor for mmap.
1929 Beware of lots of tricks that minimize the total bookkeeping space
1930 requirements. The result is a little over 1K bytes (for 4byte
1931 pointers and size_t.)
1937 An array of bin headers for free chunks. Each bin is doubly
1938 linked. The bins are approximately proportionally (log) spaced.
1939 There are a lot of these bins (128). This may look excessive, but
1940 works very well in practice. Most bins hold sizes that are
1941 unusual as malloc request sizes, but are more usual for fragments
1942 and consolidated sets of chunks, which is what these bins hold, so
1943 they can be found quickly. All procedures maintain the invariant
1944 that no consolidated chunk physically borders another one, so each
1945 chunk in a list is known to be preceeded and followed by either
1946 inuse chunks or the ends of memory.
1948 Chunks in bins are kept in size order, with ties going to the
1949 approximately least recently used chunk. Ordering isn't needed
1950 for the small bins, which all contain the same-sized chunks, but
1951 facilitates best-fit allocation for larger chunks. These lists
1952 are just sequential. Keeping them in order almost never requires
1953 enough traversal to warrant using fancier ordered data
1956 Chunks of the same size are linked with the most
1957 recently freed at the front, and allocations are taken from the
1958 back. This results in LRU (FIFO) allocation order, which tends
1959 to give each chunk an equal opportunity to be consolidated with
1960 adjacent freed chunks, resulting in larger free chunks and less
1963 To simplify use in double-linked lists, each bin header acts
1964 as a malloc_chunk. This avoids special-casing for headers.
1965 But to conserve space and improve locality, we allocate
1966 only the fd/bk pointers of bins, and then use repositioning tricks
1967 to treat these as the fields of a malloc_chunk*.
1970 typedef struct malloc_chunk
* mbinptr
;
1972 /* addressing -- note that bin_at(0) does not exist */
1973 #define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - (SIZE_SZ<<1)))
1975 /* analog of ++bin */
1976 #define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
1978 /* Reminders about list directionality within bins */
1979 #define first(b) ((b)->fd)
1980 #define last(b) ((b)->bk)
1982 /* Take a chunk off a bin list */
1983 #define unlink(P, BK, FD) { \
1986 if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \
1987 malloc_printerr (check_action, "corrupted double-linked list", P); \
1997 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1998 8 bytes apart. Larger bins are approximately logarithmically spaced:
2004 4 bins of size 32768
2005 2 bins of size 262144
2006 1 bin of size what's left
2008 There is actually a little bit of slop in the numbers in bin_index
2009 for the sake of speed. This makes no difference elsewhere.
2011 The bins top out around 1MB because we expect to service large
2016 #define NSMALLBINS 64
2017 #define SMALLBIN_WIDTH 8
2018 #define MIN_LARGE_SIZE 512
2020 #define in_smallbin_range(sz) \
2021 ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
2023 #define smallbin_index(sz) (((unsigned)(sz)) >> 3)
2025 #define largebin_index(sz) \
2026 (((((unsigned long)(sz)) >> 6) <= 32)? 56 + (((unsigned long)(sz)) >> 6): \
2027 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
2028 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
2029 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
2030 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
2033 #define bin_index(sz) \
2034 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
2040 All remainders from chunk splits, as well as all returned chunks,
2041 are first placed in the "unsorted" bin. They are then placed
2042 in regular bins after malloc gives them ONE chance to be used before
2043 binning. So, basically, the unsorted_chunks list acts as a queue,
2044 with chunks being placed on it in free (and malloc_consolidate),
2045 and taken off (to be either used or placed in bins) in malloc.
2047 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
2048 does not have to be taken into account in size comparisons.
2051 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
2052 #define unsorted_chunks(M) (bin_at(M, 1))
2057 The top-most available chunk (i.e., the one bordering the end of
2058 available memory) is treated specially. It is never included in
2059 any bin, is used only if no other chunk is available, and is
2060 released back to the system if it is very large (see
2061 M_TRIM_THRESHOLD). Because top initially
2062 points to its own bin with initial zero size, thus forcing
2063 extension on the first malloc request, we avoid having any special
2064 code in malloc to check whether it even exists yet. But we still
2065 need to do so when getting memory from system, so we make
2066 initial_top treat the bin as a legal but unusable chunk during the
2067 interval between initialization and the first call to
2068 sYSMALLOc. (This is somewhat delicate, since it relies on
2069 the 2 preceding words to be zero during this interval as well.)
2072 /* Conveniently, the unsorted bin can be used as dummy top on first call */
2073 #define initial_top(M) (unsorted_chunks(M))
2078 To help compensate for the large number of bins, a one-level index
2079 structure is used for bin-by-bin searching. `binmap' is a
2080 bitvector recording whether bins are definitely empty so they can
2081 be skipped over during during traversals. The bits are NOT always
2082 cleared as soon as bins are empty, but instead only
2083 when they are noticed to be empty during traversal in malloc.
2086 /* Conservatively use 32 bits per map word, even if on 64bit system */
2087 #define BINMAPSHIFT 5
2088 #define BITSPERMAP (1U << BINMAPSHIFT)
2089 #define BINMAPSIZE (NBINS / BITSPERMAP)
2091 #define idx2block(i) ((i) >> BINMAPSHIFT)
2092 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
2094 #define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
2095 #define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
2096 #define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
2101 An array of lists holding recently freed small chunks. Fastbins
2102 are not doubly linked. It is faster to single-link them, and
2103 since chunks are never removed from the middles of these lists,
2104 double linking is not necessary. Also, unlike regular bins, they
2105 are not even processed in FIFO order (they use faster LIFO) since
2106 ordering doesn't much matter in the transient contexts in which
2107 fastbins are normally used.
2109 Chunks in fastbins keep their inuse bit set, so they cannot
2110 be consolidated with other free chunks. malloc_consolidate
2111 releases all chunks in fastbins and consolidates them with
2115 typedef struct malloc_chunk
* mfastbinptr
;
2117 /* offset 2 to use otherwise unindexable first 2 bins */
2118 #define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
2120 /* The maximum fastbin request size we support */
2121 #define MAX_FAST_SIZE 80
2123 #define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
2126 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
2127 that triggers automatic consolidation of possibly-surrounding
2128 fastbin chunks. This is a heuristic, so the exact value should not
2129 matter too much. It is defined at half the default trim threshold as a
2130 compromise heuristic to only attempt consolidation if it is likely
2131 to lead to trimming. However, it is not dynamically tunable, since
2132 consolidation reduces fragmentation surrounding large chunks even
2133 if trimming is not used.
2136 #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
2139 Since the lowest 2 bits in max_fast don't matter in size comparisons,
2140 they are used as flags.
2144 FASTCHUNKS_BIT held in max_fast indicates that there are probably
2145 some fastbin chunks. It is set true on entering a chunk into any
2146 fastbin, and cleared only in malloc_consolidate.
2148 The truth value is inverted so that have_fastchunks will be true
2149 upon startup (since statics are zero-filled), simplifying
2150 initialization checks.
2153 #define FASTCHUNKS_BIT (1U)
2155 #define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT) == 0)
2156 #define clear_fastchunks(M) ((M)->max_fast |= FASTCHUNKS_BIT)
2157 #define set_fastchunks(M) ((M)->max_fast &= ~FASTCHUNKS_BIT)
2160 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
2161 regions. Otherwise, contiguity is exploited in merging together,
2162 when possible, results from consecutive MORECORE calls.
2164 The initial value comes from MORECORE_CONTIGUOUS, but is
2165 changed dynamically if mmap is ever used as an sbrk substitute.
2168 #define NONCONTIGUOUS_BIT (2U)
2170 #define contiguous(M) (((M)->max_fast & NONCONTIGUOUS_BIT) == 0)
2171 #define noncontiguous(M) (((M)->max_fast & NONCONTIGUOUS_BIT) != 0)
2172 #define set_noncontiguous(M) ((M)->max_fast |= NONCONTIGUOUS_BIT)
2173 #define set_contiguous(M) ((M)->max_fast &= ~NONCONTIGUOUS_BIT)
2176 Set value of max_fast.
2177 Use impossibly small value if 0.
2178 Precondition: there are no existing fastbin chunks.
2179 Setting the value clears fastchunk bit but preserves noncontiguous bit.
2182 #define set_max_fast(M, s) \
2183 (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
2185 ((M)->max_fast & NONCONTIGUOUS_BIT)
2189 ----------- Internal state representation and initialization -----------
2192 struct malloc_state
{
2193 /* Serialize access. */
2196 /* Statistics for locking. Only used if THREAD_STATS is defined. */
2197 long stat_lock_direct
, stat_lock_loop
, stat_lock_wait
;
2198 long pad0_
[1]; /* try to give the mutex its own cacheline */
2200 /* The maximum chunk size to be eligible for fastbin */
2201 INTERNAL_SIZE_T max_fast
; /* low 2 bits used as flags */
2204 mfastbinptr fastbins
[NFASTBINS
];
2206 /* Base of the topmost chunk -- not otherwise kept in a bin */
2209 /* The remainder from the most recent split of a small request */
2210 mchunkptr last_remainder
;
2212 /* Normal bins packed as described above */
2213 mchunkptr bins
[NBINS
* 2];
2215 /* Bitmap of bins */
2216 unsigned int binmap
[BINMAPSIZE
];
2219 struct malloc_state
*next
;
2221 /* Memory allocated from the system in this arena. */
2222 INTERNAL_SIZE_T system_mem
;
2223 INTERNAL_SIZE_T max_system_mem
;
2227 /* Tunable parameters */
2228 unsigned long trim_threshold
;
2229 INTERNAL_SIZE_T top_pad
;
2230 INTERNAL_SIZE_T mmap_threshold
;
2232 /* Memory map support */
2237 /* Cache malloc_getpagesize */
2238 unsigned int pagesize
;
2241 INTERNAL_SIZE_T mmapped_mem
;
2242 /*INTERNAL_SIZE_T sbrked_mem;*/
2243 /*INTERNAL_SIZE_T max_sbrked_mem;*/
2244 INTERNAL_SIZE_T max_mmapped_mem
;
2245 INTERNAL_SIZE_T max_total_mem
; /* only kept for NO_THREADS */
2247 /* First address handed out by MORECORE/sbrk. */
2251 /* There are several instances of this struct ("arenas") in this
2252 malloc. If you are adapting this malloc in a way that does NOT use
2253 a static or mmapped malloc_state, you MUST explicitly zero-fill it
2254 before using. This malloc relies on the property that malloc_state
2255 is initialized to all zeroes (as is true of C statics). */
2257 static struct malloc_state main_arena
;
2259 /* There is only one instance of the malloc parameters. */
2261 static struct malloc_par mp_
;
2264 Initialize a malloc_state struct.
2266 This is called only from within malloc_consolidate, which needs
2267 be called in the same contexts anyway. It is never called directly
2268 outside of malloc_consolidate because some optimizing compilers try
2269 to inline it at all call points, which turns out not to be an
2270 optimization at all. (Inlining it in malloc_consolidate is fine though.)
2274 static void malloc_init_state(mstate av
)
2276 static void malloc_init_state(av
) mstate av
;
2282 /* Establish circular links for normal bins */
2283 for (i
= 1; i
< NBINS
; ++i
) {
2285 bin
->fd
= bin
->bk
= bin
;
2288 #if MORECORE_CONTIGUOUS
2289 if (av
!= &main_arena
)
2291 set_noncontiguous(av
);
2293 set_max_fast(av
, DEFAULT_MXFAST
);
2295 av
->top
= initial_top(av
);
2299 Other internal utilities operating on mstates
2303 static Void_t
* sYSMALLOc(INTERNAL_SIZE_T
, mstate
);
2304 static int sYSTRIm(size_t, mstate
);
2305 static void malloc_consolidate(mstate
);
2306 static Void_t
** iALLOc(mstate
, size_t, size_t*, int, Void_t
**);
2308 static Void_t
* sYSMALLOc();
2309 static int sYSTRIm();
2310 static void malloc_consolidate();
2311 static Void_t
** iALLOc();
2315 /* -------------- Early definitions for debugging hooks ---------------- */
2317 /* Define and initialize the hook variables. These weak definitions must
2318 appear before any use of the variables in a function (arena.c uses one). */
2319 #ifndef weak_variable
2321 #define weak_variable /**/
2323 /* In GNU libc we want the hook variables to be weak definitions to
2324 avoid a problem with Emacs. */
2325 #define weak_variable weak_function
2329 /* Forward declarations. */
2330 static Void_t
* malloc_hook_ini
__MALLOC_P ((size_t sz
,
2331 const __malloc_ptr_t caller
));
2332 static Void_t
* realloc_hook_ini
__MALLOC_P ((Void_t
* ptr
, size_t sz
,
2333 const __malloc_ptr_t caller
));
2334 static Void_t
* memalign_hook_ini
__MALLOC_P ((size_t alignment
, size_t sz
,
2335 const __malloc_ptr_t caller
));
2337 void weak_variable (*__malloc_initialize_hook
) (void) = NULL
;
2338 void weak_variable (*__free_hook
) (__malloc_ptr_t __ptr
,
2339 const __malloc_ptr_t
) = NULL
;
2340 __malloc_ptr_t
weak_variable (*__malloc_hook
)
2341 (size_t __size
, const __malloc_ptr_t
) = malloc_hook_ini
;
2342 __malloc_ptr_t
weak_variable (*__realloc_hook
)
2343 (__malloc_ptr_t __ptr
, size_t __size
, const __malloc_ptr_t
)
2345 __malloc_ptr_t
weak_variable (*__memalign_hook
)
2346 (size_t __alignment
, size_t __size
, const __malloc_ptr_t
)
2347 = memalign_hook_ini
;
2348 void weak_variable (*__after_morecore_hook
) (void) = NULL
;
2351 /* ---------------- Error behavior ------------------------------------ */
2353 #ifndef DEFAULT_CHECK_ACTION
2354 #define DEFAULT_CHECK_ACTION 3
2357 static int check_action
= DEFAULT_CHECK_ACTION
;
2360 /* ------------------- Support for multiple arenas -------------------- */
2366 These routines make a number of assertions about the states
2367 of data structures that should be true at all times. If any
2368 are not true, it's very likely that a user program has somehow
2369 trashed memory. (It's also possible that there is a coding error
2370 in malloc. In which case, please report it!)
2375 #define check_chunk(A,P)
2376 #define check_free_chunk(A,P)
2377 #define check_inuse_chunk(A,P)
2378 #define check_remalloced_chunk(A,P,N)
2379 #define check_malloced_chunk(A,P,N)
2380 #define check_malloc_state(A)
2384 #define check_chunk(A,P) do_check_chunk(A,P)
2385 #define check_free_chunk(A,P) do_check_free_chunk(A,P)
2386 #define check_inuse_chunk(A,P) do_check_inuse_chunk(A,P)
2387 #define check_remalloced_chunk(A,P,N) do_check_remalloced_chunk(A,P,N)
2388 #define check_malloced_chunk(A,P,N) do_check_malloced_chunk(A,P,N)
2389 #define check_malloc_state(A) do_check_malloc_state(A)
2392 Properties of all chunks
2396 static void do_check_chunk(mstate av
, mchunkptr p
)
2398 static void do_check_chunk(av
, p
) mstate av
; mchunkptr p
;
2401 unsigned long sz
= chunksize(p
);
2402 /* min and max possible addresses assuming contiguous allocation */
2403 char* max_address
= (char*)(av
->top
) + chunksize(av
->top
);
2404 char* min_address
= max_address
- av
->system_mem
;
2406 if (!chunk_is_mmapped(p
)) {
2408 /* Has legal address ... */
2410 if (contiguous(av
)) {
2411 assert(((char*)p
) >= min_address
);
2412 assert(((char*)p
+ sz
) <= ((char*)(av
->top
)));
2416 /* top size is always at least MINSIZE */
2417 assert((unsigned long)(sz
) >= MINSIZE
);
2418 /* top predecessor always marked inuse */
2419 assert(prev_inuse(p
));
2425 /* address is outside main heap */
2426 if (contiguous(av
) && av
->top
!= initial_top(av
)) {
2427 assert(((char*)p
) < min_address
|| ((char*)p
) > max_address
);
2429 /* chunk is page-aligned */
2430 assert(((p
->prev_size
+ sz
) & (mp_
.pagesize
-1)) == 0);
2431 /* mem is aligned */
2432 assert(aligned_OK(chunk2mem(p
)));
2434 /* force an appropriate assert violation if debug set */
2435 assert(!chunk_is_mmapped(p
));
2441 Properties of free chunks
2445 static void do_check_free_chunk(mstate av
, mchunkptr p
)
2447 static void do_check_free_chunk(av
, p
) mstate av
; mchunkptr p
;
2450 INTERNAL_SIZE_T sz
= p
->size
& ~(PREV_INUSE
|NON_MAIN_ARENA
);
2451 mchunkptr next
= chunk_at_offset(p
, sz
);
2453 do_check_chunk(av
, p
);
2455 /* Chunk must claim to be free ... */
2457 assert (!chunk_is_mmapped(p
));
2459 /* Unless a special marker, must have OK fields */
2460 if ((unsigned long)(sz
) >= MINSIZE
)
2462 assert((sz
& MALLOC_ALIGN_MASK
) == 0);
2463 assert(aligned_OK(chunk2mem(p
)));
2464 /* ... matching footer field */
2465 assert(next
->prev_size
== sz
);
2466 /* ... and is fully consolidated */
2467 assert(prev_inuse(p
));
2468 assert (next
== av
->top
|| inuse(next
));
2470 /* ... and has minimally sane links */
2471 assert(p
->fd
->bk
== p
);
2472 assert(p
->bk
->fd
== p
);
2474 else /* markers are always of size SIZE_SZ */
2475 assert(sz
== SIZE_SZ
);
2479 Properties of inuse chunks
2483 static void do_check_inuse_chunk(mstate av
, mchunkptr p
)
2485 static void do_check_inuse_chunk(av
, p
) mstate av
; mchunkptr p
;
2490 do_check_chunk(av
, p
);
2492 if (chunk_is_mmapped(p
))
2493 return; /* mmapped chunks have no next/prev */
2495 /* Check whether it claims to be in use ... */
2498 next
= next_chunk(p
);
2500 /* ... and is surrounded by OK chunks.
2501 Since more things can be checked with free chunks than inuse ones,
2502 if an inuse chunk borders them and debug is on, it's worth doing them.
2504 if (!prev_inuse(p
)) {
2505 /* Note that we cannot even look at prev unless it is not inuse */
2506 mchunkptr prv
= prev_chunk(p
);
2507 assert(next_chunk(prv
) == p
);
2508 do_check_free_chunk(av
, prv
);
2511 if (next
== av
->top
) {
2512 assert(prev_inuse(next
));
2513 assert(chunksize(next
) >= MINSIZE
);
2515 else if (!inuse(next
))
2516 do_check_free_chunk(av
, next
);
2520 Properties of chunks recycled from fastbins
2524 static void do_check_remalloced_chunk(mstate av
, mchunkptr p
, INTERNAL_SIZE_T s
)
2526 static void do_check_remalloced_chunk(av
, p
, s
)
2527 mstate av
; mchunkptr p
; INTERNAL_SIZE_T s
;
2530 INTERNAL_SIZE_T sz
= p
->size
& ~(PREV_INUSE
|NON_MAIN_ARENA
);
2532 if (!chunk_is_mmapped(p
)) {
2533 assert(av
== arena_for_chunk(p
));
2534 if (chunk_non_main_arena(p
))
2535 assert(av
!= &main_arena
);
2537 assert(av
== &main_arena
);
2540 do_check_inuse_chunk(av
, p
);
2542 /* Legal size ... */
2543 assert((sz
& MALLOC_ALIGN_MASK
) == 0);
2544 assert((unsigned long)(sz
) >= MINSIZE
);
2545 /* ... and alignment */
2546 assert(aligned_OK(chunk2mem(p
)));
2547 /* chunk is less than MINSIZE more than request */
2548 assert((long)(sz
) - (long)(s
) >= 0);
2549 assert((long)(sz
) - (long)(s
+ MINSIZE
) < 0);
2553 Properties of nonrecycled chunks at the point they are malloced
2557 static void do_check_malloced_chunk(mstate av
, mchunkptr p
, INTERNAL_SIZE_T s
)
2559 static void do_check_malloced_chunk(av
, p
, s
)
2560 mstate av
; mchunkptr p
; INTERNAL_SIZE_T s
;
2563 /* same as recycled case ... */
2564 do_check_remalloced_chunk(av
, p
, s
);
2567 ... plus, must obey implementation invariant that prev_inuse is
2568 always true of any allocated chunk; i.e., that each allocated
2569 chunk borders either a previously allocated and still in-use
2570 chunk, or the base of its memory arena. This is ensured
2571 by making all allocations from the the `lowest' part of any found
2572 chunk. This does not necessarily hold however for chunks
2573 recycled via fastbins.
2576 assert(prev_inuse(p
));
2581 Properties of malloc_state.
2583 This may be useful for debugging malloc, as well as detecting user
2584 programmer errors that somehow write into malloc_state.
2586 If you are extending or experimenting with this malloc, you can
2587 probably figure out how to hack this routine to print out or
2588 display chunk addresses, sizes, bins, and other instrumentation.
2591 static void do_check_malloc_state(mstate av
)
2597 unsigned int binbit
;
2600 INTERNAL_SIZE_T size
;
2601 unsigned long total
= 0;
2604 /* internal size_t must be no wider than pointer type */
2605 assert(sizeof(INTERNAL_SIZE_T
) <= sizeof(char*));
2607 /* alignment is a power of 2 */
2608 assert((MALLOC_ALIGNMENT
& (MALLOC_ALIGNMENT
-1)) == 0);
2610 /* cannot run remaining checks until fully initialized */
2611 if (av
->top
== 0 || av
->top
== initial_top(av
))
2614 /* pagesize is a power of 2 */
2615 assert((mp_
.pagesize
& (mp_
.pagesize
-1)) == 0);
2617 /* A contiguous main_arena is consistent with sbrk_base. */
2618 if (av
== &main_arena
&& contiguous(av
))
2619 assert((char*)mp_
.sbrk_base
+ av
->system_mem
==
2620 (char*)av
->top
+ chunksize(av
->top
));
2622 /* properties of fastbins */
2624 /* max_fast is in allowed range */
2625 assert((av
->max_fast
& ~1) <= request2size(MAX_FAST_SIZE
));
2627 max_fast_bin
= fastbin_index(av
->max_fast
);
2629 for (i
= 0; i
< NFASTBINS
; ++i
) {
2630 p
= av
->fastbins
[i
];
2632 /* all bins past max_fast are empty */
2633 if (i
> max_fast_bin
)
2637 /* each chunk claims to be inuse */
2638 do_check_inuse_chunk(av
, p
);
2639 total
+= chunksize(p
);
2640 /* chunk belongs in this bin */
2641 assert(fastbin_index(chunksize(p
)) == i
);
2647 assert(have_fastchunks(av
));
2648 else if (!have_fastchunks(av
))
2651 /* check normal bins */
2652 for (i
= 1; i
< NBINS
; ++i
) {
2655 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2657 binbit
= get_binmap(av
,i
);
2658 empty
= last(b
) == b
;
2665 for (p
= last(b
); p
!= b
; p
= p
->bk
) {
2666 /* each chunk claims to be free */
2667 do_check_free_chunk(av
, p
);
2668 size
= chunksize(p
);
2671 /* chunk belongs in bin */
2672 idx
= bin_index(size
);
2674 /* lists are sorted */
2675 assert(p
->bk
== b
||
2676 (unsigned long)chunksize(p
->bk
) >= (unsigned long)chunksize(p
));
2678 /* chunk is followed by a legal chain of inuse chunks */
2679 for (q
= next_chunk(p
);
2680 (q
!= av
->top
&& inuse(q
) &&
2681 (unsigned long)(chunksize(q
)) >= MINSIZE
);
2683 do_check_inuse_chunk(av
, q
);
2687 /* top chunk is OK */
2688 check_chunk(av
, av
->top
);
2690 /* sanity checks for statistics */
2693 assert(total
<= (unsigned long)(mp_
.max_total_mem
));
2694 assert(mp_
.n_mmaps
>= 0);
2696 assert(mp_
.n_mmaps
<= mp_
.n_mmaps_max
);
2697 assert(mp_
.n_mmaps
<= mp_
.max_n_mmaps
);
2699 assert((unsigned long)(av
->system_mem
) <=
2700 (unsigned long)(av
->max_system_mem
));
2702 assert((unsigned long)(mp_
.mmapped_mem
) <=
2703 (unsigned long)(mp_
.max_mmapped_mem
));
2706 assert((unsigned long)(mp_
.max_total_mem
) >=
2707 (unsigned long)(mp_
.mmapped_mem
) + (unsigned long)(av
->system_mem
));
2713 /* ----------------- Support for debugging hooks -------------------- */
2717 /* ----------- Routines dealing with system allocation -------------- */
2720 sysmalloc handles malloc cases requiring more memory from the system.
2721 On entry, it is assumed that av->top does not have enough
2722 space to service request for nb bytes, thus requiring that av->top
2723 be extended or replaced.
2727 static Void_t
* sYSMALLOc(INTERNAL_SIZE_T nb
, mstate av
)
2729 static Void_t
* sYSMALLOc(nb
, av
) INTERNAL_SIZE_T nb
; mstate av
;
2732 mchunkptr old_top
; /* incoming value of av->top */
2733 INTERNAL_SIZE_T old_size
; /* its size */
2734 char* old_end
; /* its end address */
2736 long size
; /* arg to first MORECORE or mmap call */
2737 char* brk
; /* return value from MORECORE */
2739 long correction
; /* arg to 2nd MORECORE call */
2740 char* snd_brk
; /* 2nd return val */
2742 INTERNAL_SIZE_T front_misalign
; /* unusable bytes at front of new space */
2743 INTERNAL_SIZE_T end_misalign
; /* partial page left at end of new space */
2744 char* aligned_brk
; /* aligned offset into brk */
2746 mchunkptr p
; /* the allocated/returned chunk */
2747 mchunkptr remainder
; /* remainder from allocation */
2748 unsigned long remainder_size
; /* its size */
2750 unsigned long sum
; /* for updating stats */
2752 size_t pagemask
= mp_
.pagesize
- 1;
2758 If have mmap, and the request size meets the mmap threshold, and
2759 the system supports mmap, and there are few enough currently
2760 allocated mmapped regions, try to directly map this request
2761 rather than expanding top.
2764 if ((unsigned long)(nb
) >= (unsigned long)(mp_
.mmap_threshold
) &&
2765 (mp_
.n_mmaps
< mp_
.n_mmaps_max
)) {
2767 char* mm
; /* return value from mmap call*/
2770 Round up size to nearest page. For mmapped chunks, the overhead
2771 is one SIZE_SZ unit larger than for normal chunks, because there
2772 is no following chunk whose prev_size field could be used.
2774 size
= (nb
+ SIZE_SZ
+ MALLOC_ALIGN_MASK
+ pagemask
) & ~pagemask
;
2776 /* Don't try if size wraps around 0 */
2777 if ((unsigned long)(size
) > (unsigned long)(nb
)) {
2779 mm
= (char*)(MMAP(0, size
, PROT_READ
|PROT_WRITE
, MAP_PRIVATE
));
2781 if (mm
!= MAP_FAILED
) {
2784 The offset to the start of the mmapped region is stored
2785 in the prev_size field of the chunk. This allows us to adjust
2786 returned start address to meet alignment requirements here
2787 and in memalign(), and still be able to compute proper
2788 address argument for later munmap in free() and realloc().
2791 front_misalign
= (INTERNAL_SIZE_T
)chunk2mem(mm
) & MALLOC_ALIGN_MASK
;
2792 if (front_misalign
> 0) {
2793 correction
= MALLOC_ALIGNMENT
- front_misalign
;
2794 p
= (mchunkptr
)(mm
+ correction
);
2795 p
->prev_size
= correction
;
2796 set_head(p
, (size
- correction
) |IS_MMAPPED
);
2800 set_head(p
, size
|IS_MMAPPED
);
2803 /* update statistics */
2805 if (++mp_
.n_mmaps
> mp_
.max_n_mmaps
)
2806 mp_
.max_n_mmaps
= mp_
.n_mmaps
;
2808 sum
= mp_
.mmapped_mem
+= size
;
2809 if (sum
> (unsigned long)(mp_
.max_mmapped_mem
))
2810 mp_
.max_mmapped_mem
= sum
;
2812 sum
+= av
->system_mem
;
2813 if (sum
> (unsigned long)(mp_
.max_total_mem
))
2814 mp_
.max_total_mem
= sum
;
2819 return chunk2mem(p
);
2825 /* Record incoming configuration of top */
2828 old_size
= chunksize(old_top
);
2829 old_end
= (char*)(chunk_at_offset(old_top
, old_size
));
2831 brk
= snd_brk
= (char*)(MORECORE_FAILURE
);
2834 If not the first time through, we require old_size to be
2835 at least MINSIZE and to have prev_inuse set.
2838 assert((old_top
== initial_top(av
) && old_size
== 0) ||
2839 ((unsigned long) (old_size
) >= MINSIZE
&&
2840 prev_inuse(old_top
) &&
2841 ((unsigned long)old_end
& pagemask
) == 0));
2843 /* Precondition: not enough current space to satisfy nb request */
2844 assert((unsigned long)(old_size
) < (unsigned long)(nb
+ MINSIZE
));
2846 /* Precondition: all fastbins are consolidated */
2847 assert(!have_fastchunks(av
));
2850 if (av
!= &main_arena
) {
2852 heap_info
*old_heap
, *heap
;
2853 size_t old_heap_size
;
2855 /* First try to extend the current heap. */
2856 old_heap
= heap_for_ptr(old_top
);
2857 old_heap_size
= old_heap
->size
;
2858 if (grow_heap(old_heap
, MINSIZE
+ nb
- old_size
) == 0) {
2859 av
->system_mem
+= old_heap
->size
- old_heap_size
;
2860 arena_mem
+= old_heap
->size
- old_heap_size
;
2862 if(mmapped_mem
+ arena_mem
+ sbrked_mem
> max_total_mem
)
2863 max_total_mem
= mmapped_mem
+ arena_mem
+ sbrked_mem
;
2865 set_head(old_top
, (((char *)old_heap
+ old_heap
->size
) - (char *)old_top
)
2868 else if ((heap
= new_heap(nb
+ (MINSIZE
+ sizeof(*heap
)), mp_
.top_pad
))) {
2869 /* Use a newly allocated heap. */
2871 heap
->prev
= old_heap
;
2872 av
->system_mem
+= heap
->size
;
2873 arena_mem
+= heap
->size
;
2875 if((unsigned long)(mmapped_mem
+ arena_mem
+ sbrked_mem
) > max_total_mem
)
2876 max_total_mem
= mmapped_mem
+ arena_mem
+ sbrked_mem
;
2878 /* Set up the new top. */
2879 top(av
) = chunk_at_offset(heap
, sizeof(*heap
));
2880 set_head(top(av
), (heap
->size
- sizeof(*heap
)) | PREV_INUSE
);
2882 /* Setup fencepost and free the old top chunk. */
2883 /* The fencepost takes at least MINSIZE bytes, because it might
2884 become the top chunk again later. Note that a footer is set
2885 up, too, although the chunk is marked in use. */
2886 old_size
-= MINSIZE
;
2887 set_head(chunk_at_offset(old_top
, old_size
+ 2*SIZE_SZ
), 0|PREV_INUSE
);
2888 if (old_size
>= MINSIZE
) {
2889 set_head(chunk_at_offset(old_top
, old_size
), (2*SIZE_SZ
)|PREV_INUSE
);
2890 set_foot(chunk_at_offset(old_top
, old_size
), (2*SIZE_SZ
));
2891 set_head(old_top
, old_size
|PREV_INUSE
|NON_MAIN_ARENA
);
2892 _int_free(av
, chunk2mem(old_top
));
2894 set_head(old_top
, (old_size
+ 2*SIZE_SZ
)|PREV_INUSE
);
2895 set_foot(old_top
, (old_size
+ 2*SIZE_SZ
));
2899 } else { /* av == main_arena */
2902 /* Request enough space for nb + pad + overhead */
2904 size
= nb
+ mp_
.top_pad
+ MINSIZE
;
2907 If contiguous, we can subtract out existing space that we hope to
2908 combine with new space. We add it back later only if
2909 we don't actually get contiguous space.
2916 Round to a multiple of page size.
2917 If MORECORE is not contiguous, this ensures that we only call it
2918 with whole-page arguments. And if MORECORE is contiguous and
2919 this is not first time through, this preserves page-alignment of
2920 previous calls. Otherwise, we correct to page-align below.
2923 size
= (size
+ pagemask
) & ~pagemask
;
2926 Don't try to call MORECORE if argument is so big as to appear
2927 negative. Note that since mmap takes size_t arg, it may succeed
2928 below even if we cannot call MORECORE.
2932 brk
= (char*)(MORECORE(size
));
2934 if (brk
!= (char*)(MORECORE_FAILURE
)) {
2935 /* Call the `morecore' hook if necessary. */
2936 if (__after_morecore_hook
)
2937 (*__after_morecore_hook
) ();
2940 If have mmap, try using it as a backup when MORECORE fails or
2941 cannot be used. This is worth doing on systems that have "holes" in
2942 address space, so sbrk cannot extend to give contiguous space, but
2943 space is available elsewhere. Note that we ignore mmap max count
2944 and threshold limits, since the space will not be used as a
2945 segregated mmap region.
2949 /* Cannot merge with old top, so add its size back in */
2951 size
= (size
+ old_size
+ pagemask
) & ~pagemask
;
2953 /* If we are relying on mmap as backup, then use larger units */
2954 if ((unsigned long)(size
) < (unsigned long)(MMAP_AS_MORECORE_SIZE
))
2955 size
= MMAP_AS_MORECORE_SIZE
;
2957 /* Don't try if size wraps around 0 */
2958 if ((unsigned long)(size
) > (unsigned long)(nb
)) {
2960 char *mbrk
= (char*)(MMAP(0, size
, PROT_READ
|PROT_WRITE
, MAP_PRIVATE
));
2962 if (mbrk
!= MAP_FAILED
) {
2964 /* We do not need, and cannot use, another sbrk call to find end */
2966 snd_brk
= brk
+ size
;
2969 Record that we no longer have a contiguous sbrk region.
2970 After the first time mmap is used as backup, we do not
2971 ever rely on contiguous space since this could incorrectly
2974 set_noncontiguous(av
);
2980 if (brk
!= (char*)(MORECORE_FAILURE
)) {
2981 if (mp_
.sbrk_base
== 0)
2982 mp_
.sbrk_base
= brk
;
2983 av
->system_mem
+= size
;
2986 If MORECORE extends previous space, we can likewise extend top size.
2989 if (brk
== old_end
&& snd_brk
== (char*)(MORECORE_FAILURE
))
2990 set_head(old_top
, (size
+ old_size
) | PREV_INUSE
);
2992 else if (contiguous(av
) && old_size
&& brk
< old_end
) {
2993 /* Oops! Someone else killed our space.. Can't touch anything. */
2998 Otherwise, make adjustments:
3000 * If the first time through or noncontiguous, we need to call sbrk
3001 just to find out where the end of memory lies.
3003 * We need to ensure that all returned chunks from malloc will meet
3006 * If there was an intervening foreign sbrk, we need to adjust sbrk
3007 request size to account for fact that we will not be able to
3008 combine new space with existing space in old_top.
3010 * Almost all systems internally allocate whole pages at a time, in
3011 which case we might as well use the whole last page of request.
3012 So we allocate enough more memory to hit a page boundary now,
3013 which in turn causes future contiguous calls to page-align.
3022 /* handle contiguous cases */
3023 if (contiguous(av
)) {
3025 /* Count foreign sbrk as system_mem. */
3027 av
->system_mem
+= brk
- old_end
;
3029 /* Guarantee alignment of first new chunk made from this space */
3031 front_misalign
= (INTERNAL_SIZE_T
)chunk2mem(brk
) & MALLOC_ALIGN_MASK
;
3032 if (front_misalign
> 0) {
3035 Skip over some bytes to arrive at an aligned position.
3036 We don't need to specially mark these wasted front bytes.
3037 They will never be accessed anyway because
3038 prev_inuse of av->top (and any chunk created from its start)
3039 is always true after initialization.
3042 correction
= MALLOC_ALIGNMENT
- front_misalign
;
3043 aligned_brk
+= correction
;
3047 If this isn't adjacent to existing space, then we will not
3048 be able to merge with old_top space, so must add to 2nd request.
3051 correction
+= old_size
;
3053 /* Extend the end address to hit a page boundary */
3054 end_misalign
= (INTERNAL_SIZE_T
)(brk
+ size
+ correction
);
3055 correction
+= ((end_misalign
+ pagemask
) & ~pagemask
) - end_misalign
;
3057 assert(correction
>= 0);
3058 snd_brk
= (char*)(MORECORE(correction
));
3061 If can't allocate correction, try to at least find out current
3062 brk. It might be enough to proceed without failing.
3064 Note that if second sbrk did NOT fail, we assume that space
3065 is contiguous with first sbrk. This is a safe assumption unless
3066 program is multithreaded but doesn't use locks and a foreign sbrk
3067 occurred between our first and second calls.
3070 if (snd_brk
== (char*)(MORECORE_FAILURE
)) {
3072 snd_brk
= (char*)(MORECORE(0));
3074 /* Call the `morecore' hook if necessary. */
3075 if (__after_morecore_hook
)
3076 (*__after_morecore_hook
) ();
3079 /* handle non-contiguous cases */
3081 /* MORECORE/mmap must correctly align */
3082 assert(((unsigned long)chunk2mem(brk
) & MALLOC_ALIGN_MASK
) == 0);
3084 /* Find out current end of memory */
3085 if (snd_brk
== (char*)(MORECORE_FAILURE
)) {
3086 snd_brk
= (char*)(MORECORE(0));
3090 /* Adjust top based on results of second sbrk */
3091 if (snd_brk
!= (char*)(MORECORE_FAILURE
)) {
3092 av
->top
= (mchunkptr
)aligned_brk
;
3093 set_head(av
->top
, (snd_brk
- aligned_brk
+ correction
) | PREV_INUSE
);
3094 av
->system_mem
+= correction
;
3097 If not the first time through, we either have a
3098 gap due to foreign sbrk or a non-contiguous region. Insert a
3099 double fencepost at old_top to prevent consolidation with space
3100 we don't own. These fenceposts are artificial chunks that are
3101 marked as inuse and are in any case too small to use. We need
3102 two to make sizes and alignments work out.
3105 if (old_size
!= 0) {
3107 Shrink old_top to insert fenceposts, keeping size a
3108 multiple of MALLOC_ALIGNMENT. We know there is at least
3109 enough space in old_top to do this.
3111 old_size
= (old_size
- 4*SIZE_SZ
) & ~MALLOC_ALIGN_MASK
;
3112 set_head(old_top
, old_size
| PREV_INUSE
);
3115 Note that the following assignments completely overwrite
3116 old_top when old_size was previously MINSIZE. This is
3117 intentional. We need the fencepost, even if old_top otherwise gets
3120 chunk_at_offset(old_top
, old_size
)->size
=
3121 (2*SIZE_SZ
)|PREV_INUSE
;
3123 chunk_at_offset(old_top
, old_size
+ 2*SIZE_SZ
)->size
=
3124 (2*SIZE_SZ
)|PREV_INUSE
;
3126 /* If possible, release the rest. */
3127 if (old_size
>= MINSIZE
) {
3128 _int_free(av
, chunk2mem(old_top
));
3135 /* Update statistics */
3137 sum
= av
->system_mem
+ mp_
.mmapped_mem
;
3138 if (sum
> (unsigned long)(mp_
.max_total_mem
))
3139 mp_
.max_total_mem
= sum
;
3144 } /* if (av != &main_arena) */
3146 if ((unsigned long)av
->system_mem
> (unsigned long)(av
->max_system_mem
))
3147 av
->max_system_mem
= av
->system_mem
;
3148 check_malloc_state(av
);
3150 /* finally, do the allocation */
3152 size
= chunksize(p
);
3154 /* check that one of the above allocation paths succeeded */
3155 if ((unsigned long)(size
) >= (unsigned long)(nb
+ MINSIZE
)) {
3156 remainder_size
= size
- nb
;
3157 remainder
= chunk_at_offset(p
, nb
);
3158 av
->top
= remainder
;
3159 set_head(p
, nb
| PREV_INUSE
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
3160 set_head(remainder
, remainder_size
| PREV_INUSE
);
3161 check_malloced_chunk(av
, p
, nb
);
3162 return chunk2mem(p
);
3165 /* catch all failure paths */
3166 MALLOC_FAILURE_ACTION
;
3172 sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
3173 to the system (via negative arguments to sbrk) if there is unused
3174 memory at the `high' end of the malloc pool. It is called
3175 automatically by free() when top space exceeds the trim
3176 threshold. It is also called by the public malloc_trim routine. It
3177 returns 1 if it actually released any memory, else 0.
3181 static int sYSTRIm(size_t pad
, mstate av
)
3183 static int sYSTRIm(pad
, av
) size_t pad
; mstate av
;
3186 long top_size
; /* Amount of top-most memory */
3187 long extra
; /* Amount to release */
3188 long released
; /* Amount actually released */
3189 char* current_brk
; /* address returned by pre-check sbrk call */
3190 char* new_brk
; /* address returned by post-check sbrk call */
3193 pagesz
= mp_
.pagesize
;
3194 top_size
= chunksize(av
->top
);
3196 /* Release in pagesize units, keeping at least one page */
3197 extra
= ((top_size
- pad
- MINSIZE
+ (pagesz
-1)) / pagesz
- 1) * pagesz
;
3202 Only proceed if end of memory is where we last set it.
3203 This avoids problems if there were foreign sbrk calls.
3205 current_brk
= (char*)(MORECORE(0));
3206 if (current_brk
== (char*)(av
->top
) + top_size
) {
3209 Attempt to release memory. We ignore MORECORE return value,
3210 and instead call again to find out where new end of memory is.
3211 This avoids problems if first call releases less than we asked,
3212 of if failure somehow altered brk value. (We could still
3213 encounter problems if it altered brk in some very bad way,
3214 but the only thing we can do is adjust anyway, which will cause
3215 some downstream failure.)
3219 /* Call the `morecore' hook if necessary. */
3220 if (__after_morecore_hook
)
3221 (*__after_morecore_hook
) ();
3222 new_brk
= (char*)(MORECORE(0));
3224 if (new_brk
!= (char*)MORECORE_FAILURE
) {
3225 released
= (long)(current_brk
- new_brk
);
3227 if (released
!= 0) {
3228 /* Success. Adjust top. */
3229 av
->system_mem
-= released
;
3230 set_head(av
->top
, (top_size
- released
) | PREV_INUSE
);
3231 check_malloc_state(av
);
3245 munmap_chunk(mchunkptr p
)
3247 munmap_chunk(p
) mchunkptr p
;
3250 INTERNAL_SIZE_T size
= chunksize(p
);
3253 assert (chunk_is_mmapped(p
));
3255 assert(! ((char*)p
>= mp_
.sbrk_base
&& (char*)p
< mp_
.sbrk_base
+ mp_
.sbrked_mem
));
3256 assert((mp_
.n_mmaps
> 0));
3258 assert(((p
->prev_size
+ size
) & (mp_
.pagesize
-1)) == 0);
3261 mp_
.mmapped_mem
-= (size
+ p
->prev_size
);
3263 ret
= munmap((char *)p
- p
->prev_size
, size
+ p
->prev_size
);
3265 /* munmap returns non-zero on failure */
3274 mremap_chunk(mchunkptr p
, size_t new_size
)
3276 mremap_chunk(p
, new_size
) mchunkptr p
; size_t new_size
;
3279 size_t page_mask
= mp_
.pagesize
- 1;
3280 INTERNAL_SIZE_T offset
= p
->prev_size
;
3281 INTERNAL_SIZE_T size
= chunksize(p
);
3284 assert (chunk_is_mmapped(p
));
3286 assert(! ((char*)p
>= mp_
.sbrk_base
&& (char*)p
< mp_
.sbrk_base
+ mp_
.sbrked_mem
));
3287 assert((mp_
.n_mmaps
> 0));
3289 assert(((size
+ offset
) & (mp_
.pagesize
-1)) == 0);
3291 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
3292 new_size
= (new_size
+ offset
+ SIZE_SZ
+ page_mask
) & ~page_mask
;
3294 cp
= (char *)mremap((char *)p
- offset
, size
+ offset
, new_size
,
3297 if (cp
== MAP_FAILED
) return 0;
3299 p
= (mchunkptr
)(cp
+ offset
);
3301 assert(aligned_OK(chunk2mem(p
)));
3303 assert((p
->prev_size
== offset
));
3304 set_head(p
, (new_size
- offset
)|IS_MMAPPED
);
3306 mp_
.mmapped_mem
-= size
+ offset
;
3307 mp_
.mmapped_mem
+= new_size
;
3308 if ((unsigned long)mp_
.mmapped_mem
> (unsigned long)mp_
.max_mmapped_mem
)
3309 mp_
.max_mmapped_mem
= mp_
.mmapped_mem
;
3311 if ((unsigned long)(mp_
.mmapped_mem
+ arena_mem
+ main_arena
.system_mem
) >
3313 mp_
.max_total_mem
= mp_
.mmapped_mem
+ arena_mem
+ main_arena
.system_mem
;
3318 #endif /* HAVE_MREMAP */
3320 #endif /* HAVE_MMAP */
3322 /*------------------------ Public wrappers. --------------------------------*/
3325 public_mALLOc(size_t bytes
)
3330 __malloc_ptr_t (*hook
) (size_t, __const __malloc_ptr_t
) = __malloc_hook
;
3332 return (*hook
)(bytes
, RETURN_ADDRESS (0));
3334 arena_get(ar_ptr
, bytes
);
3337 victim
= _int_malloc(ar_ptr
, bytes
);
3339 /* Maybe the failure is due to running out of mmapped areas. */
3340 if(ar_ptr
!= &main_arena
) {
3341 (void)mutex_unlock(&ar_ptr
->mutex
);
3342 (void)mutex_lock(&main_arena
.mutex
);
3343 victim
= _int_malloc(&main_arena
, bytes
);
3344 (void)mutex_unlock(&main_arena
.mutex
);
3347 /* ... or sbrk() has failed and there is still a chance to mmap() */
3348 ar_ptr
= arena_get2(ar_ptr
->next
? ar_ptr
: 0, bytes
);
3349 (void)mutex_unlock(&main_arena
.mutex
);
3351 victim
= _int_malloc(ar_ptr
, bytes
);
3352 (void)mutex_unlock(&ar_ptr
->mutex
);
3357 (void)mutex_unlock(&ar_ptr
->mutex
);
3358 assert(!victim
|| chunk_is_mmapped(mem2chunk(victim
)) ||
3359 ar_ptr
== arena_for_chunk(mem2chunk(victim
)));
3362 #ifdef libc_hidden_def
3363 libc_hidden_def(public_mALLOc
)
3367 public_fREe(Void_t
* mem
)
3370 mchunkptr p
; /* chunk corresponding to mem */
3372 void (*hook
) (__malloc_ptr_t
, __const __malloc_ptr_t
) = __free_hook
;
3374 (*hook
)(mem
, RETURN_ADDRESS (0));
3378 if (mem
== 0) /* free(0) has no effect */
3384 if (chunk_is_mmapped(p
)) /* release mmapped memory. */
3391 ar_ptr
= arena_for_chunk(p
);
3393 if(!mutex_trylock(&ar_ptr
->mutex
))
3394 ++(ar_ptr
->stat_lock_direct
);
3396 (void)mutex_lock(&ar_ptr
->mutex
);
3397 ++(ar_ptr
->stat_lock_wait
);
3400 (void)mutex_lock(&ar_ptr
->mutex
);
3402 _int_free(ar_ptr
, mem
);
3403 (void)mutex_unlock(&ar_ptr
->mutex
);
3405 #ifdef libc_hidden_def
3406 libc_hidden_def (public_fREe
)
3410 public_rEALLOc(Void_t
* oldmem
, size_t bytes
)
3413 INTERNAL_SIZE_T nb
; /* padded request size */
3415 mchunkptr oldp
; /* chunk corresponding to oldmem */
3416 INTERNAL_SIZE_T oldsize
; /* its size */
3418 Void_t
* newp
; /* chunk to return */
3420 __malloc_ptr_t (*hook
) (__malloc_ptr_t
, size_t, __const __malloc_ptr_t
) =
3423 return (*hook
)(oldmem
, bytes
, RETURN_ADDRESS (0));
3425 #if REALLOC_ZERO_BYTES_FREES
3426 if (bytes
== 0 && oldmem
!= NULL
) { public_fREe(oldmem
); return 0; }
3429 /* realloc of null is supposed to be same as malloc */
3430 if (oldmem
== 0) return public_mALLOc(bytes
);
3432 oldp
= mem2chunk(oldmem
);
3433 oldsize
= chunksize(oldp
);
3435 checked_request2size(bytes
, nb
);
3438 if (chunk_is_mmapped(oldp
))
3443 newp
= mremap_chunk(oldp
, nb
);
3444 if(newp
) return chunk2mem(newp
);
3446 /* Note the extra SIZE_SZ overhead. */
3447 if(oldsize
- SIZE_SZ
>= nb
) return oldmem
; /* do nothing */
3448 /* Must alloc, copy, free. */
3449 newmem
= public_mALLOc(bytes
);
3450 if (newmem
== 0) return 0; /* propagate failure */
3451 MALLOC_COPY(newmem
, oldmem
, oldsize
- 2*SIZE_SZ
);
3457 ar_ptr
= arena_for_chunk(oldp
);
3459 if(!mutex_trylock(&ar_ptr
->mutex
))
3460 ++(ar_ptr
->stat_lock_direct
);
3462 (void)mutex_lock(&ar_ptr
->mutex
);
3463 ++(ar_ptr
->stat_lock_wait
);
3466 (void)mutex_lock(&ar_ptr
->mutex
);
3470 /* As in malloc(), remember this arena for the next allocation. */
3471 tsd_setspecific(arena_key
, (Void_t
*)ar_ptr
);
3474 newp
= _int_realloc(ar_ptr
, oldmem
, bytes
);
3476 (void)mutex_unlock(&ar_ptr
->mutex
);
3477 assert(!newp
|| chunk_is_mmapped(mem2chunk(newp
)) ||
3478 ar_ptr
== arena_for_chunk(mem2chunk(newp
)));
3481 #ifdef libc_hidden_def
3482 libc_hidden_def (public_rEALLOc
)
3486 public_mEMALIGn(size_t alignment
, size_t bytes
)
3491 __malloc_ptr_t (*hook
) __MALLOC_PMT ((size_t, size_t,
3492 __const __malloc_ptr_t
)) =
3495 return (*hook
)(alignment
, bytes
, RETURN_ADDRESS (0));
3497 /* If need less alignment than we give anyway, just relay to malloc */
3498 if (alignment
<= MALLOC_ALIGNMENT
) return public_mALLOc(bytes
);
3500 /* Otherwise, ensure that it is at least a minimum chunk size */
3501 if (alignment
< MINSIZE
) alignment
= MINSIZE
;
3503 arena_get(ar_ptr
, bytes
+ alignment
+ MINSIZE
);
3506 p
= _int_memalign(ar_ptr
, alignment
, bytes
);
3507 (void)mutex_unlock(&ar_ptr
->mutex
);
3509 /* Maybe the failure is due to running out of mmapped areas. */
3510 if(ar_ptr
!= &main_arena
) {
3511 (void)mutex_lock(&main_arena
.mutex
);
3512 p
= _int_memalign(&main_arena
, alignment
, bytes
);
3513 (void)mutex_unlock(&main_arena
.mutex
);
3516 /* ... or sbrk() has failed and there is still a chance to mmap() */
3517 ar_ptr
= arena_get2(ar_ptr
->next
? ar_ptr
: 0, bytes
);
3519 p
= _int_memalign(ar_ptr
, alignment
, bytes
);
3520 (void)mutex_unlock(&ar_ptr
->mutex
);
3525 assert(!p
|| chunk_is_mmapped(mem2chunk(p
)) ||
3526 ar_ptr
== arena_for_chunk(mem2chunk(p
)));
3529 #ifdef libc_hidden_def
3530 libc_hidden_def (public_mEMALIGn
)
3534 public_vALLOc(size_t bytes
)
3539 if(__malloc_initialized
< 0)
3542 __malloc_ptr_t (*hook
) __MALLOC_PMT ((size_t, size_t,
3543 __const __malloc_ptr_t
)) =
3546 return (*hook
)(mp_
.pagesize
, bytes
, RETURN_ADDRESS (0));
3548 arena_get(ar_ptr
, bytes
+ mp_
.pagesize
+ MINSIZE
);
3551 p
= _int_valloc(ar_ptr
, bytes
);
3552 (void)mutex_unlock(&ar_ptr
->mutex
);
3557 public_pVALLOc(size_t bytes
)
3562 if(__malloc_initialized
< 0)
3565 __malloc_ptr_t (*hook
) __MALLOC_PMT ((size_t, size_t,
3566 __const __malloc_ptr_t
)) =
3569 return (*hook
)(mp_
.pagesize
,
3570 (bytes
+ mp_
.pagesize
- 1) & ~(mp_
.pagesize
- 1),
3571 RETURN_ADDRESS (0));
3573 arena_get(ar_ptr
, bytes
+ 2*mp_
.pagesize
+ MINSIZE
);
3574 p
= _int_pvalloc(ar_ptr
, bytes
);
3575 (void)mutex_unlock(&ar_ptr
->mutex
);
3580 public_cALLOc(size_t n
, size_t elem_size
)
3583 mchunkptr oldtop
, p
;
3584 INTERNAL_SIZE_T bytes
, sz
, csz
, oldtopsize
;
3586 unsigned long clearsize
;
3587 unsigned long nclears
;
3589 __malloc_ptr_t (*hook
) __MALLOC_PMT ((size_t, __const __malloc_ptr_t
)) =
3592 /* size_t is unsigned so the behavior on overflow is defined. */
3593 bytes
= n
* elem_size
;
3594 #define HALF_INTERNAL_SIZE_T \
3595 (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
3596 if (__builtin_expect ((n
| elem_size
) >= HALF_INTERNAL_SIZE_T
, 0)) {
3597 if (elem_size
!= 0 && bytes
/ elem_size
!= n
) {
3598 MALLOC_FAILURE_ACTION
;
3605 mem
= (*hook
)(sz
, RETURN_ADDRESS (0));
3609 return memset(mem
, 0, sz
);
3611 while(sz
> 0) ((char*)mem
)[--sz
] = 0; /* rather inefficient */
3622 /* Check if we hand out the top chunk, in which case there may be no
3626 oldtopsize
= chunksize(top(av
));
3627 #if MORECORE_CLEARS < 2
3628 /* Only newly allocated memory is guaranteed to be cleared. */
3629 if (av
== &main_arena
&&
3630 oldtopsize
< mp_
.sbrk_base
+ av
->max_system_mem
- (char *)oldtop
)
3631 oldtopsize
= (mp_
.sbrk_base
+ av
->max_system_mem
- (char *)oldtop
);
3634 mem
= _int_malloc(av
, sz
);
3636 /* Only clearing follows, so we can unlock early. */
3637 (void)mutex_unlock(&av
->mutex
);
3639 assert(!mem
|| chunk_is_mmapped(mem2chunk(mem
)) ||
3640 av
== arena_for_chunk(mem2chunk(mem
)));
3643 /* Maybe the failure is due to running out of mmapped areas. */
3644 if(av
!= &main_arena
) {
3645 (void)mutex_lock(&main_arena
.mutex
);
3646 mem
= _int_malloc(&main_arena
, sz
);
3647 (void)mutex_unlock(&main_arena
.mutex
);
3650 /* ... or sbrk() has failed and there is still a chance to mmap() */
3651 (void)mutex_lock(&main_arena
.mutex
);
3652 av
= arena_get2(av
->next
? av
: 0, sz
);
3653 (void)mutex_unlock(&main_arena
.mutex
);
3655 mem
= _int_malloc(av
, sz
);
3656 (void)mutex_unlock(&av
->mutex
);
3660 if (mem
== 0) return 0;
3664 /* Two optional cases in which clearing not necessary */
3666 if (chunk_is_mmapped(p
))
3673 if (p
== oldtop
&& csz
> oldtopsize
) {
3674 /* clear only the bytes from non-freshly-sbrked memory */
3679 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3680 contents have an odd number of INTERNAL_SIZE_T-sized words;
3682 d
= (INTERNAL_SIZE_T
*)mem
;
3683 clearsize
= csz
- SIZE_SZ
;
3684 nclears
= clearsize
/ sizeof(INTERNAL_SIZE_T
);
3685 assert(nclears
>= 3);
3688 MALLOC_ZERO(d
, clearsize
);
3712 public_iCALLOc(size_t n
, size_t elem_size
, Void_t
** chunks
)
3717 arena_get(ar_ptr
, n
*elem_size
);
3721 m
= _int_icalloc(ar_ptr
, n
, elem_size
, chunks
);
3722 (void)mutex_unlock(&ar_ptr
->mutex
);
3727 public_iCOMALLOc(size_t n
, size_t sizes
[], Void_t
** chunks
)
3732 arena_get(ar_ptr
, 0);
3736 m
= _int_icomalloc(ar_ptr
, n
, sizes
, chunks
);
3737 (void)mutex_unlock(&ar_ptr
->mutex
);
3744 public_cFREe(Void_t
* m
)
3752 public_mTRIm(size_t s
)
3756 (void)mutex_lock(&main_arena
.mutex
);
3758 (void)mutex_unlock(&main_arena
.mutex
);
3763 public_mUSABLe(Void_t
* m
)
3767 result
= mUSABLe(m
);
3777 struct mallinfo
public_mALLINFo()
3781 if(__malloc_initialized
< 0)
3783 (void)mutex_lock(&main_arena
.mutex
);
3784 m
= mALLINFo(&main_arena
);
3785 (void)mutex_unlock(&main_arena
.mutex
);
3790 public_mALLOPt(int p
, int v
)
3793 result
= mALLOPt(p
, v
);
3798 ------------------------------ malloc ------------------------------
3802 _int_malloc(mstate av
, size_t bytes
)
3804 INTERNAL_SIZE_T nb
; /* normalized request size */
3805 unsigned int idx
; /* associated bin index */
3806 mbinptr bin
; /* associated bin */
3807 mfastbinptr
* fb
; /* associated fastbin */
3809 mchunkptr victim
; /* inspected/selected chunk */
3810 INTERNAL_SIZE_T size
; /* its size */
3811 int victim_index
; /* its bin index */
3813 mchunkptr remainder
; /* remainder from a split */
3814 unsigned long remainder_size
; /* its size */
3816 unsigned int block
; /* bit map traverser */
3817 unsigned int bit
; /* bit map traverser */
3818 unsigned int map
; /* current word of binmap */
3820 mchunkptr fwd
; /* misc temp for linking */
3821 mchunkptr bck
; /* misc temp for linking */
3824 Convert request size to internal form by adding SIZE_SZ bytes
3825 overhead plus possibly more to obtain necessary alignment and/or
3826 to obtain a size of at least MINSIZE, the smallest allocatable
3827 size. Also, checked_request2size traps (returning 0) request sizes
3828 that are so large that they wrap around zero when padded and
3832 checked_request2size(bytes
, nb
);
3835 If the size qualifies as a fastbin, first check corresponding bin.
3836 This code is safe to execute even if av is not yet initialized, so we
3837 can try it without checking, which saves some time on this fast path.
3840 if ((unsigned long)(nb
) <= (unsigned long)(av
->max_fast
)) {
3841 fb
= &(av
->fastbins
[(fastbin_index(nb
))]);
3842 if ( (victim
= *fb
) != 0) {
3844 check_remalloced_chunk(av
, victim
, nb
);
3845 return chunk2mem(victim
);
3850 If a small request, check regular bin. Since these "smallbins"
3851 hold one size each, no searching within bins is necessary.
3852 (For a large request, we need to wait until unsorted chunks are
3853 processed to find best fit. But for small ones, fits are exact
3854 anyway, so we can check now, which is faster.)
3857 if (in_smallbin_range(nb
)) {
3858 idx
= smallbin_index(nb
);
3859 bin
= bin_at(av
,idx
);
3861 if ( (victim
= last(bin
)) != bin
) {
3862 if (victim
== 0) /* initialization check */
3863 malloc_consolidate(av
);
3866 set_inuse_bit_at_offset(victim
, nb
);
3870 if (av
!= &main_arena
)
3871 victim
->size
|= NON_MAIN_ARENA
;
3872 check_malloced_chunk(av
, victim
, nb
);
3873 return chunk2mem(victim
);
3879 If this is a large request, consolidate fastbins before continuing.
3880 While it might look excessive to kill all fastbins before
3881 even seeing if there is space available, this avoids
3882 fragmentation problems normally associated with fastbins.
3883 Also, in practice, programs tend to have runs of either small or
3884 large requests, but less often mixtures, so consolidation is not
3885 invoked all that often in most programs. And the programs that
3886 it is called frequently in otherwise tend to fragment.
3890 idx
= largebin_index(nb
);
3891 if (have_fastchunks(av
))
3892 malloc_consolidate(av
);
3896 Process recently freed or remaindered chunks, taking one only if
3897 it is exact fit, or, if this a small request, the chunk is remainder from
3898 the most recent non-exact fit. Place other traversed chunks in
3899 bins. Note that this step is the only place in any routine where
3900 chunks are placed in bins.
3902 The outer loop here is needed because we might not realize until
3903 near the end of malloc that we should have consolidated, so must
3904 do so and retry. This happens at most once, and only when we would
3905 otherwise need to expand memory to service a "small" request.
3910 while ( (victim
= unsorted_chunks(av
)->bk
) != unsorted_chunks(av
)) {
3912 size
= chunksize(victim
);
3915 If a small request, try to use last remainder if it is the
3916 only chunk in unsorted bin. This helps promote locality for
3917 runs of consecutive small requests. This is the only
3918 exception to best-fit, and applies only when there is
3919 no exact fit for a small chunk.
3922 if (in_smallbin_range(nb
) &&
3923 bck
== unsorted_chunks(av
) &&
3924 victim
== av
->last_remainder
&&
3925 (unsigned long)(size
) > (unsigned long)(nb
+ MINSIZE
)) {
3927 /* split and reattach remainder */
3928 remainder_size
= size
- nb
;
3929 remainder
= chunk_at_offset(victim
, nb
);
3930 unsorted_chunks(av
)->bk
= unsorted_chunks(av
)->fd
= remainder
;
3931 av
->last_remainder
= remainder
;
3932 remainder
->bk
= remainder
->fd
= unsorted_chunks(av
);
3934 set_head(victim
, nb
| PREV_INUSE
|
3935 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
3936 set_head(remainder
, remainder_size
| PREV_INUSE
);
3937 set_foot(remainder
, remainder_size
);
3939 check_malloced_chunk(av
, victim
, nb
);
3940 return chunk2mem(victim
);
3943 /* remove from unsorted list */
3944 unsorted_chunks(av
)->bk
= bck
;
3945 bck
->fd
= unsorted_chunks(av
);
3947 /* Take now instead of binning if exact fit */
3950 set_inuse_bit_at_offset(victim
, size
);
3951 if (av
!= &main_arena
)
3952 victim
->size
|= NON_MAIN_ARENA
;
3953 check_malloced_chunk(av
, victim
, nb
);
3954 return chunk2mem(victim
);
3957 /* place chunk in bin */
3959 if (in_smallbin_range(size
)) {
3960 victim_index
= smallbin_index(size
);
3961 bck
= bin_at(av
, victim_index
);
3965 victim_index
= largebin_index(size
);
3966 bck
= bin_at(av
, victim_index
);
3969 /* maintain large bins in sorted order */
3971 /* Or with inuse bit to speed comparisons */
3973 /* if smaller than smallest, bypass loop below */
3974 assert((bck
->bk
->size
& NON_MAIN_ARENA
) == 0);
3975 if ((unsigned long)(size
) <= (unsigned long)(bck
->bk
->size
)) {
3980 assert((fwd
->size
& NON_MAIN_ARENA
) == 0);
3981 while ((unsigned long)(size
) < (unsigned long)(fwd
->size
)) {
3983 assert((fwd
->size
& NON_MAIN_ARENA
) == 0);
3990 mark_bin(av
, victim_index
);
3998 If a large request, scan through the chunks of current bin in
3999 sorted order to find smallest that fits. This is the only step
4000 where an unbounded number of chunks might be scanned without doing
4001 anything useful with them. However the lists tend to be short.
4004 if (!in_smallbin_range(nb
)) {
4005 bin
= bin_at(av
, idx
);
4007 /* skip scan if empty or largest chunk is too small */
4008 if ((victim
= last(bin
)) != bin
&&
4009 (unsigned long)(first(bin
)->size
) >= (unsigned long)(nb
)) {
4011 while (((unsigned long)(size
= chunksize(victim
)) <
4012 (unsigned long)(nb
)))
4013 victim
= victim
->bk
;
4015 remainder_size
= size
- nb
;
4016 unlink(victim
, bck
, fwd
);
4019 if (remainder_size
< MINSIZE
) {
4020 set_inuse_bit_at_offset(victim
, size
);
4021 if (av
!= &main_arena
)
4022 victim
->size
|= NON_MAIN_ARENA
;
4023 check_malloced_chunk(av
, victim
, nb
);
4024 return chunk2mem(victim
);
4028 remainder
= chunk_at_offset(victim
, nb
);
4029 unsorted_chunks(av
)->bk
= unsorted_chunks(av
)->fd
= remainder
;
4030 remainder
->bk
= remainder
->fd
= unsorted_chunks(av
);
4031 set_head(victim
, nb
| PREV_INUSE
|
4032 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4033 set_head(remainder
, remainder_size
| PREV_INUSE
);
4034 set_foot(remainder
, remainder_size
);
4035 check_malloced_chunk(av
, victim
, nb
);
4036 return chunk2mem(victim
);
4042 Search for a chunk by scanning bins, starting with next largest
4043 bin. This search is strictly by best-fit; i.e., the smallest
4044 (with ties going to approximately the least recently used) chunk
4045 that fits is selected.
4047 The bitmap avoids needing to check that most blocks are nonempty.
4048 The particular case of skipping all bins during warm-up phases
4049 when no chunks have been returned yet is faster than it might look.
4053 bin
= bin_at(av
,idx
);
4054 block
= idx2block(idx
);
4055 map
= av
->binmap
[block
];
4060 /* Skip rest of block if there are no more set bits in this block. */
4061 if (bit
> map
|| bit
== 0) {
4063 if (++block
>= BINMAPSIZE
) /* out of bins */
4065 } while ( (map
= av
->binmap
[block
]) == 0);
4067 bin
= bin_at(av
, (block
<< BINMAPSHIFT
));
4071 /* Advance to bin with set bit. There must be one. */
4072 while ((bit
& map
) == 0) {
4073 bin
= next_bin(bin
);
4078 /* Inspect the bin. It is likely to be non-empty */
4081 /* If a false alarm (empty bin), clear the bit. */
4082 if (victim
== bin
) {
4083 av
->binmap
[block
] = map
&= ~bit
; /* Write through */
4084 bin
= next_bin(bin
);
4089 size
= chunksize(victim
);
4091 /* We know the first chunk in this bin is big enough to use. */
4092 assert((unsigned long)(size
) >= (unsigned long)(nb
));
4094 remainder_size
= size
- nb
;
4102 if (remainder_size
< MINSIZE
) {
4103 set_inuse_bit_at_offset(victim
, size
);
4104 if (av
!= &main_arena
)
4105 victim
->size
|= NON_MAIN_ARENA
;
4106 check_malloced_chunk(av
, victim
, nb
);
4107 return chunk2mem(victim
);
4112 remainder
= chunk_at_offset(victim
, nb
);
4114 unsorted_chunks(av
)->bk
= unsorted_chunks(av
)->fd
= remainder
;
4115 remainder
->bk
= remainder
->fd
= unsorted_chunks(av
);
4116 /* advertise as last remainder */
4117 if (in_smallbin_range(nb
))
4118 av
->last_remainder
= remainder
;
4120 set_head(victim
, nb
| PREV_INUSE
|
4121 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4122 set_head(remainder
, remainder_size
| PREV_INUSE
);
4123 set_foot(remainder
, remainder_size
);
4124 check_malloced_chunk(av
, victim
, nb
);
4125 return chunk2mem(victim
);
4132 If large enough, split off the chunk bordering the end of memory
4133 (held in av->top). Note that this is in accord with the best-fit
4134 search rule. In effect, av->top is treated as larger (and thus
4135 less well fitting) than any other available chunk since it can
4136 be extended to be as large as necessary (up to system
4139 We require that av->top always exists (i.e., has size >=
4140 MINSIZE) after initialization, so if it would otherwise be
4141 exhuasted by current request, it is replenished. (The main
4142 reason for ensuring it exists is that we may need MINSIZE space
4143 to put in fenceposts in sysmalloc.)
4147 size
= chunksize(victim
);
4149 if ((unsigned long)(size
) >= (unsigned long)(nb
+ MINSIZE
)) {
4150 remainder_size
= size
- nb
;
4151 remainder
= chunk_at_offset(victim
, nb
);
4152 av
->top
= remainder
;
4153 set_head(victim
, nb
| PREV_INUSE
|
4154 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4155 set_head(remainder
, remainder_size
| PREV_INUSE
);
4157 check_malloced_chunk(av
, victim
, nb
);
4158 return chunk2mem(victim
);
4162 If there is space available in fastbins, consolidate and retry,
4163 to possibly avoid expanding memory. This can occur only if nb is
4164 in smallbin range so we didn't consolidate upon entry.
4167 else if (have_fastchunks(av
)) {
4168 assert(in_smallbin_range(nb
));
4169 malloc_consolidate(av
);
4170 idx
= smallbin_index(nb
); /* restore original bin index */
4174 Otherwise, relay to handle system-dependent cases
4177 return sYSMALLOc(nb
, av
);
4182 ------------------------------ free ------------------------------
4186 _int_free(mstate av
, Void_t
* mem
)
4188 mchunkptr p
; /* chunk corresponding to mem */
4189 INTERNAL_SIZE_T size
; /* its size */
4190 mfastbinptr
* fb
; /* associated fastbin */
4191 mchunkptr nextchunk
; /* next contiguous chunk */
4192 INTERNAL_SIZE_T nextsize
; /* its size */
4193 int nextinuse
; /* true if nextchunk is used */
4194 INTERNAL_SIZE_T prevsize
; /* size of previous contiguous chunk */
4195 mchunkptr bck
; /* misc temp for linking */
4196 mchunkptr fwd
; /* misc temp for linking */
4199 /* free(0) has no effect */
4202 size
= chunksize(p
);
4204 /* Little security check which won't hurt performance: the
4205 allocator never wrapps around at the end of the address space.
4206 Therefore we can exclude some size values which might appear
4207 here by accident or by "design" from some intruder. */
4208 if (__builtin_expect ((uintptr_t) p
> (uintptr_t) -size
, 0))
4210 malloc_printerr (check_action
, "free(): invalid pointer", mem
);
4214 check_inuse_chunk(av
, p
);
4217 If eligible, place chunk on a fastbin so it can be found
4218 and used quickly in malloc.
4221 if ((unsigned long)(size
) <= (unsigned long)(av
->max_fast
)
4225 If TRIM_FASTBINS set, don't place chunks
4226 bordering top into fastbins
4228 && (chunk_at_offset(p
, size
) != av
->top
)
4233 fb
= &(av
->fastbins
[fastbin_index(size
)]);
4234 /* Another simple check: make sure the top of the bin is not the
4235 record we are going to add (i.e., double free). */
4236 if (__builtin_expect (*fb
== p
, 0))
4239 malloc_printerr (check_action
, "double free or corruption", mem
);
4247 Consolidate other non-mmapped chunks as they arrive.
4250 else if (!chunk_is_mmapped(p
)) {
4251 nextchunk
= chunk_at_offset(p
, size
);
4253 /* Lightweight tests: check whether the block is already the
4255 if (__builtin_expect (p
== av
->top
, 0))
4257 /* Or whether the next chunk is beyond the boundaries of the arena. */
4258 if (__builtin_expect (contiguous (av
)
4259 && (char *) nextchunk
4260 >= ((char *) av
->top
+ chunksize(av
->top
)), 0))
4262 /* Or whether the block is actually not marked used. */
4263 if (__builtin_expect (!prev_inuse(nextchunk
), 0))
4266 nextsize
= chunksize(nextchunk
);
4267 assert(nextsize
> 0);
4269 /* consolidate backward */
4270 if (!prev_inuse(p
)) {
4271 prevsize
= p
->prev_size
;
4273 p
= chunk_at_offset(p
, -((long) prevsize
));
4274 unlink(p
, bck
, fwd
);
4277 if (nextchunk
!= av
->top
) {
4278 /* get and clear inuse bit */
4279 nextinuse
= inuse_bit_at_offset(nextchunk
, nextsize
);
4281 /* consolidate forward */
4283 unlink(nextchunk
, bck
, fwd
);
4286 clear_inuse_bit_at_offset(nextchunk
, 0);
4289 Place the chunk in unsorted chunk list. Chunks are
4290 not placed into regular bins until after they have
4291 been given one chance to be used in malloc.
4294 bck
= unsorted_chunks(av
);
4301 set_head(p
, size
| PREV_INUSE
);
4304 check_free_chunk(av
, p
);
4308 If the chunk borders the current high end of memory,
4309 consolidate into top
4314 set_head(p
, size
| PREV_INUSE
);
4320 If freeing a large space, consolidate possibly-surrounding
4321 chunks. Then, if the total unused topmost memory exceeds trim
4322 threshold, ask malloc_trim to reduce top.
4324 Unless max_fast is 0, we don't know if there are fastbins
4325 bordering top, so we cannot tell for sure whether threshold
4326 has been reached unless fastbins are consolidated. But we
4327 don't want to consolidate on each free. As a compromise,
4328 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4332 if ((unsigned long)(size
) >= FASTBIN_CONSOLIDATION_THRESHOLD
) {
4333 if (have_fastchunks(av
))
4334 malloc_consolidate(av
);
4336 if (av
== &main_arena
) {
4337 #ifndef MORECORE_CANNOT_TRIM
4338 if ((unsigned long)(chunksize(av
->top
)) >=
4339 (unsigned long)(mp_
.trim_threshold
))
4340 sYSTRIm(mp_
.top_pad
, av
);
4343 /* Always try heap_trim(), even if the top chunk is not
4344 large, because the corresponding heap might go away. */
4345 heap_info
*heap
= heap_for_ptr(top(av
));
4347 assert(heap
->ar_ptr
== av
);
4348 heap_trim(heap
, mp_
.top_pad
);
4354 If the chunk was allocated via mmap, release via munmap(). Note
4355 that if HAVE_MMAP is false but chunk_is_mmapped is true, then
4356 user must have overwritten memory. There's nothing we can do to
4357 catch this error unless MALLOC_DEBUG is set, in which case
4358 check_inuse_chunk (above) will have triggered error.
4364 INTERNAL_SIZE_T offset
= p
->prev_size
;
4366 mp_
.mmapped_mem
-= (size
+ offset
);
4367 ret
= munmap((char*)p
- offset
, size
+ offset
);
4368 /* munmap returns non-zero on failure */
4376 ------------------------- malloc_consolidate -------------------------
4378 malloc_consolidate is a specialized version of free() that tears
4379 down chunks held in fastbins. Free itself cannot be used for this
4380 purpose since, among other things, it might place chunks back onto
4381 fastbins. So, instead, we need to use a minor variant of the same
4384 Also, because this routine needs to be called the first time through
4385 malloc anyway, it turns out to be the perfect place to trigger
4386 initialization code.
4390 static void malloc_consolidate(mstate av
)
4392 static void malloc_consolidate(av
) mstate av
;
4395 mfastbinptr
* fb
; /* current fastbin being consolidated */
4396 mfastbinptr
* maxfb
; /* last fastbin (for loop control) */
4397 mchunkptr p
; /* current chunk being consolidated */
4398 mchunkptr nextp
; /* next chunk to consolidate */
4399 mchunkptr unsorted_bin
; /* bin header */
4400 mchunkptr first_unsorted
; /* chunk to link to */
4402 /* These have same use as in free() */
4403 mchunkptr nextchunk
;
4404 INTERNAL_SIZE_T size
;
4405 INTERNAL_SIZE_T nextsize
;
4406 INTERNAL_SIZE_T prevsize
;
4412 If max_fast is 0, we know that av hasn't
4413 yet been initialized, in which case do so below
4416 if (av
->max_fast
!= 0) {
4417 clear_fastchunks(av
);
4419 unsorted_bin
= unsorted_chunks(av
);
4422 Remove each chunk from fast bin and consolidate it, placing it
4423 then in unsorted bin. Among other reasons for doing this,
4424 placing in unsorted bin avoids needing to calculate actual bins
4425 until malloc is sure that chunks aren't immediately going to be
4429 maxfb
= &(av
->fastbins
[fastbin_index(av
->max_fast
)]);
4430 fb
= &(av
->fastbins
[0]);
4432 if ( (p
= *fb
) != 0) {
4436 check_inuse_chunk(av
, p
);
4439 /* Slightly streamlined version of consolidation code in free() */
4440 size
= p
->size
& ~(PREV_INUSE
|NON_MAIN_ARENA
);
4441 nextchunk
= chunk_at_offset(p
, size
);
4442 nextsize
= chunksize(nextchunk
);
4444 if (!prev_inuse(p
)) {
4445 prevsize
= p
->prev_size
;
4447 p
= chunk_at_offset(p
, -((long) prevsize
));
4448 unlink(p
, bck
, fwd
);
4451 if (nextchunk
!= av
->top
) {
4452 nextinuse
= inuse_bit_at_offset(nextchunk
, nextsize
);
4456 unlink(nextchunk
, bck
, fwd
);
4458 clear_inuse_bit_at_offset(nextchunk
, 0);
4460 first_unsorted
= unsorted_bin
->fd
;
4461 unsorted_bin
->fd
= p
;
4462 first_unsorted
->bk
= p
;
4464 set_head(p
, size
| PREV_INUSE
);
4465 p
->bk
= unsorted_bin
;
4466 p
->fd
= first_unsorted
;
4472 set_head(p
, size
| PREV_INUSE
);
4476 } while ( (p
= nextp
) != 0);
4479 } while (fb
++ != maxfb
);
4482 malloc_init_state(av
);
4483 check_malloc_state(av
);
4488 ------------------------------ realloc ------------------------------
4492 _int_realloc(mstate av
, Void_t
* oldmem
, size_t bytes
)
4494 INTERNAL_SIZE_T nb
; /* padded request size */
4496 mchunkptr oldp
; /* chunk corresponding to oldmem */
4497 INTERNAL_SIZE_T oldsize
; /* its size */
4499 mchunkptr newp
; /* chunk to return */
4500 INTERNAL_SIZE_T newsize
; /* its size */
4501 Void_t
* newmem
; /* corresponding user mem */
4503 mchunkptr next
; /* next contiguous chunk after oldp */
4505 mchunkptr remainder
; /* extra space at end of newp */
4506 unsigned long remainder_size
; /* its size */
4508 mchunkptr bck
; /* misc temp for linking */
4509 mchunkptr fwd
; /* misc temp for linking */
4511 unsigned long copysize
; /* bytes to copy */
4512 unsigned int ncopies
; /* INTERNAL_SIZE_T words to copy */
4513 INTERNAL_SIZE_T
* s
; /* copy source */
4514 INTERNAL_SIZE_T
* d
; /* copy destination */
4517 #if REALLOC_ZERO_BYTES_FREES
4519 _int_free(av
, oldmem
);
4524 /* realloc of null is supposed to be same as malloc */
4525 if (oldmem
== 0) return _int_malloc(av
, bytes
);
4527 checked_request2size(bytes
, nb
);
4529 oldp
= mem2chunk(oldmem
);
4530 oldsize
= chunksize(oldp
);
4532 check_inuse_chunk(av
, oldp
);
4534 if (!chunk_is_mmapped(oldp
)) {
4536 if ((unsigned long)(oldsize
) >= (unsigned long)(nb
)) {
4537 /* already big enough; split below */
4543 next
= chunk_at_offset(oldp
, oldsize
);
4545 /* Try to expand forward into top */
4546 if (next
== av
->top
&&
4547 (unsigned long)(newsize
= oldsize
+ chunksize(next
)) >=
4548 (unsigned long)(nb
+ MINSIZE
)) {
4549 set_head_size(oldp
, nb
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4550 av
->top
= chunk_at_offset(oldp
, nb
);
4551 set_head(av
->top
, (newsize
- nb
) | PREV_INUSE
);
4552 check_inuse_chunk(av
, oldp
);
4553 return chunk2mem(oldp
);
4556 /* Try to expand forward into next chunk; split off remainder below */
4557 else if (next
!= av
->top
&&
4559 (unsigned long)(newsize
= oldsize
+ chunksize(next
)) >=
4560 (unsigned long)(nb
)) {
4562 unlink(next
, bck
, fwd
);
4565 /* allocate, copy, free */
4567 newmem
= _int_malloc(av
, nb
- MALLOC_ALIGN_MASK
);
4569 return 0; /* propagate failure */
4571 newp
= mem2chunk(newmem
);
4572 newsize
= chunksize(newp
);
4575 Avoid copy if newp is next chunk after oldp.
4583 Unroll copy of <= 36 bytes (72 if 8byte sizes)
4584 We know that contents have an odd number of
4585 INTERNAL_SIZE_T-sized words; minimally 3.
4588 copysize
= oldsize
- SIZE_SZ
;
4589 s
= (INTERNAL_SIZE_T
*)(oldmem
);
4590 d
= (INTERNAL_SIZE_T
*)(newmem
);
4591 ncopies
= copysize
/ sizeof(INTERNAL_SIZE_T
);
4592 assert(ncopies
>= 3);
4595 MALLOC_COPY(d
, s
, copysize
);
4615 _int_free(av
, oldmem
);
4616 check_inuse_chunk(av
, newp
);
4617 return chunk2mem(newp
);
4622 /* If possible, free extra space in old or extended chunk */
4624 assert((unsigned long)(newsize
) >= (unsigned long)(nb
));
4626 remainder_size
= newsize
- nb
;
4628 if (remainder_size
< MINSIZE
) { /* not enough extra to split off */
4629 set_head_size(newp
, newsize
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4630 set_inuse_bit_at_offset(newp
, newsize
);
4632 else { /* split remainder */
4633 remainder
= chunk_at_offset(newp
, nb
);
4634 set_head_size(newp
, nb
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4635 set_head(remainder
, remainder_size
| PREV_INUSE
|
4636 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4637 /* Mark remainder as inuse so free() won't complain */
4638 set_inuse_bit_at_offset(remainder
, remainder_size
);
4639 _int_free(av
, chunk2mem(remainder
));
4642 check_inuse_chunk(av
, newp
);
4643 return chunk2mem(newp
);
4654 INTERNAL_SIZE_T offset
= oldp
->prev_size
;
4655 size_t pagemask
= mp_
.pagesize
- 1;
4659 /* Note the extra SIZE_SZ overhead */
4660 newsize
= (nb
+ offset
+ SIZE_SZ
+ pagemask
) & ~pagemask
;
4662 /* don't need to remap if still within same page */
4663 if (oldsize
== newsize
- offset
)
4666 cp
= (char*)mremap((char*)oldp
- offset
, oldsize
+ offset
, newsize
, 1);
4668 if (cp
!= MAP_FAILED
) {
4670 newp
= (mchunkptr
)(cp
+ offset
);
4671 set_head(newp
, (newsize
- offset
)|IS_MMAPPED
);
4673 assert(aligned_OK(chunk2mem(newp
)));
4674 assert((newp
->prev_size
== offset
));
4676 /* update statistics */
4677 sum
= mp_
.mmapped_mem
+= newsize
- oldsize
;
4678 if (sum
> (unsigned long)(mp_
.max_mmapped_mem
))
4679 mp_
.max_mmapped_mem
= sum
;
4681 sum
+= main_arena
.system_mem
;
4682 if (sum
> (unsigned long)(mp_
.max_total_mem
))
4683 mp_
.max_total_mem
= sum
;
4686 return chunk2mem(newp
);
4690 /* Note the extra SIZE_SZ overhead. */
4691 if ((unsigned long)(oldsize
) >= (unsigned long)(nb
+ SIZE_SZ
))
4692 newmem
= oldmem
; /* do nothing */
4694 /* Must alloc, copy, free. */
4695 newmem
= _int_malloc(av
, nb
- MALLOC_ALIGN_MASK
);
4697 MALLOC_COPY(newmem
, oldmem
, oldsize
- 2*SIZE_SZ
);
4698 _int_free(av
, oldmem
);
4704 /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
4705 check_malloc_state(av
);
4706 MALLOC_FAILURE_ACTION
;
4713 ------------------------------ memalign ------------------------------
4717 _int_memalign(mstate av
, size_t alignment
, size_t bytes
)
4719 INTERNAL_SIZE_T nb
; /* padded request size */
4720 char* m
; /* memory returned by malloc call */
4721 mchunkptr p
; /* corresponding chunk */
4722 char* brk
; /* alignment point within p */
4723 mchunkptr newp
; /* chunk to return */
4724 INTERNAL_SIZE_T newsize
; /* its size */
4725 INTERNAL_SIZE_T leadsize
; /* leading space before alignment point */
4726 mchunkptr remainder
; /* spare room at end to split off */
4727 unsigned long remainder_size
; /* its size */
4728 INTERNAL_SIZE_T size
;
4730 /* If need less alignment than we give anyway, just relay to malloc */
4732 if (alignment
<= MALLOC_ALIGNMENT
) return _int_malloc(av
, bytes
);
4734 /* Otherwise, ensure that it is at least a minimum chunk size */
4736 if (alignment
< MINSIZE
) alignment
= MINSIZE
;
4738 /* Make sure alignment is power of 2 (in case MINSIZE is not). */
4739 if ((alignment
& (alignment
- 1)) != 0) {
4740 size_t a
= MALLOC_ALIGNMENT
* 2;
4741 while ((unsigned long)a
< (unsigned long)alignment
) a
<<= 1;
4745 checked_request2size(bytes
, nb
);
4748 Strategy: find a spot within that chunk that meets the alignment
4749 request, and then possibly free the leading and trailing space.
4753 /* Call malloc with worst case padding to hit alignment. */
4755 m
= (char*)(_int_malloc(av
, nb
+ alignment
+ MINSIZE
));
4757 if (m
== 0) return 0; /* propagate failure */
4761 if ((((unsigned long)(m
)) % alignment
) != 0) { /* misaligned */
4764 Find an aligned spot inside chunk. Since we need to give back
4765 leading space in a chunk of at least MINSIZE, if the first
4766 calculation places us at a spot with less than MINSIZE leader,
4767 we can move to the next aligned spot -- we've allocated enough
4768 total room so that this is always possible.
4771 brk
= (char*)mem2chunk(((unsigned long)(m
+ alignment
- 1)) &
4772 -((signed long) alignment
));
4773 if ((unsigned long)(brk
- (char*)(p
)) < MINSIZE
)
4776 newp
= (mchunkptr
)brk
;
4777 leadsize
= brk
- (char*)(p
);
4778 newsize
= chunksize(p
) - leadsize
;
4780 /* For mmapped chunks, just adjust offset */
4781 if (chunk_is_mmapped(p
)) {
4782 newp
->prev_size
= p
->prev_size
+ leadsize
;
4783 set_head(newp
, newsize
|IS_MMAPPED
);
4784 return chunk2mem(newp
);
4787 /* Otherwise, give back leader, use the rest */
4788 set_head(newp
, newsize
| PREV_INUSE
|
4789 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4790 set_inuse_bit_at_offset(newp
, newsize
);
4791 set_head_size(p
, leadsize
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4792 _int_free(av
, chunk2mem(p
));
4795 assert (newsize
>= nb
&&
4796 (((unsigned long)(chunk2mem(p
))) % alignment
) == 0);
4799 /* Also give back spare room at the end */
4800 if (!chunk_is_mmapped(p
)) {
4801 size
= chunksize(p
);
4802 if ((unsigned long)(size
) > (unsigned long)(nb
+ MINSIZE
)) {
4803 remainder_size
= size
- nb
;
4804 remainder
= chunk_at_offset(p
, nb
);
4805 set_head(remainder
, remainder_size
| PREV_INUSE
|
4806 (av
!= &main_arena
? NON_MAIN_ARENA
: 0));
4807 set_head_size(p
, nb
);
4808 _int_free(av
, chunk2mem(remainder
));
4812 check_inuse_chunk(av
, p
);
4813 return chunk2mem(p
);
4818 ------------------------------ calloc ------------------------------
4822 Void_t
* cALLOc(size_t n_elements
, size_t elem_size
)
4824 Void_t
* cALLOc(n_elements
, elem_size
) size_t n_elements
; size_t elem_size
;
4828 unsigned long clearsize
;
4829 unsigned long nclears
;
4832 Void_t
* mem
= mALLOc(n_elements
* elem_size
);
4838 if (!chunk_is_mmapped(p
)) /* don't need to clear mmapped space */
4842 Unroll clear of <= 36 bytes (72 if 8byte sizes)
4843 We know that contents have an odd number of
4844 INTERNAL_SIZE_T-sized words; minimally 3.
4847 d
= (INTERNAL_SIZE_T
*)mem
;
4848 clearsize
= chunksize(p
) - SIZE_SZ
;
4849 nclears
= clearsize
/ sizeof(INTERNAL_SIZE_T
);
4850 assert(nclears
>= 3);
4853 MALLOC_ZERO(d
, clearsize
);
4879 ------------------------- independent_calloc -------------------------
4884 _int_icalloc(mstate av
, size_t n_elements
, size_t elem_size
, Void_t
* chunks
[])
4886 _int_icalloc(av
, n_elements
, elem_size
, chunks
)
4887 mstate av
; size_t n_elements
; size_t elem_size
; Void_t
* chunks
[];
4890 size_t sz
= elem_size
; /* serves as 1-element array */
4891 /* opts arg of 3 means all elements are same size, and should be cleared */
4892 return iALLOc(av
, n_elements
, &sz
, 3, chunks
);
4896 ------------------------- independent_comalloc -------------------------
4901 _int_icomalloc(mstate av
, size_t n_elements
, size_t sizes
[], Void_t
* chunks
[])
4903 _int_icomalloc(av
, n_elements
, sizes
, chunks
)
4904 mstate av
; size_t n_elements
; size_t sizes
[]; Void_t
* chunks
[];
4907 return iALLOc(av
, n_elements
, sizes
, 0, chunks
);
4912 ------------------------------ ialloc ------------------------------
4913 ialloc provides common support for independent_X routines, handling all of
4914 the combinations that can result.
4917 bit 0 set if all elements are same size (using sizes[0])
4918 bit 1 set if elements should be zeroed
4924 iALLOc(mstate av
, size_t n_elements
, size_t* sizes
, int opts
, Void_t
* chunks
[])
4926 iALLOc(av
, n_elements
, sizes
, opts
, chunks
)
4927 mstate av
; size_t n_elements
; size_t* sizes
; int opts
; Void_t
* chunks
[];
4930 INTERNAL_SIZE_T element_size
; /* chunksize of each element, if all same */
4931 INTERNAL_SIZE_T contents_size
; /* total size of elements */
4932 INTERNAL_SIZE_T array_size
; /* request size of pointer array */
4933 Void_t
* mem
; /* malloced aggregate space */
4934 mchunkptr p
; /* corresponding chunk */
4935 INTERNAL_SIZE_T remainder_size
; /* remaining bytes while splitting */
4936 Void_t
** marray
; /* either "chunks" or malloced ptr array */
4937 mchunkptr array_chunk
; /* chunk for malloced ptr array */
4938 int mmx
; /* to disable mmap */
4939 INTERNAL_SIZE_T size
;
4940 INTERNAL_SIZE_T size_flags
;
4943 /* Ensure initialization/consolidation */
4944 if (have_fastchunks(av
)) malloc_consolidate(av
);
4946 /* compute array length, if needed */
4948 if (n_elements
== 0)
4949 return chunks
; /* nothing to do */
4954 /* if empty req, must still return chunk representing empty array */
4955 if (n_elements
== 0)
4956 return (Void_t
**) _int_malloc(av
, 0);
4958 array_size
= request2size(n_elements
* (sizeof(Void_t
*)));
4961 /* compute total element size */
4962 if (opts
& 0x1) { /* all-same-size */
4963 element_size
= request2size(*sizes
);
4964 contents_size
= n_elements
* element_size
;
4966 else { /* add up all the sizes */
4969 for (i
= 0; i
!= n_elements
; ++i
)
4970 contents_size
+= request2size(sizes
[i
]);
4973 /* subtract out alignment bytes from total to minimize overallocation */
4974 size
= contents_size
+ array_size
- MALLOC_ALIGN_MASK
;
4977 Allocate the aggregate chunk.
4978 But first disable mmap so malloc won't use it, since
4979 we would not be able to later free/realloc space internal
4980 to a segregated mmap region.
4982 mmx
= mp_
.n_mmaps_max
; /* disable mmap */
4983 mp_
.n_mmaps_max
= 0;
4984 mem
= _int_malloc(av
, size
);
4985 mp_
.n_mmaps_max
= mmx
; /* reset mmap */
4990 assert(!chunk_is_mmapped(p
));
4991 remainder_size
= chunksize(p
);
4993 if (opts
& 0x2) { /* optionally clear the elements */
4994 MALLOC_ZERO(mem
, remainder_size
- SIZE_SZ
- array_size
);
4997 size_flags
= PREV_INUSE
| (av
!= &main_arena
? NON_MAIN_ARENA
: 0);
4999 /* If not provided, allocate the pointer array as final part of chunk */
5001 array_chunk
= chunk_at_offset(p
, contents_size
);
5002 marray
= (Void_t
**) (chunk2mem(array_chunk
));
5003 set_head(array_chunk
, (remainder_size
- contents_size
) | size_flags
);
5004 remainder_size
= contents_size
;
5007 /* split out elements */
5008 for (i
= 0; ; ++i
) {
5009 marray
[i
] = chunk2mem(p
);
5010 if (i
!= n_elements
-1) {
5011 if (element_size
!= 0)
5012 size
= element_size
;
5014 size
= request2size(sizes
[i
]);
5015 remainder_size
-= size
;
5016 set_head(p
, size
| size_flags
);
5017 p
= chunk_at_offset(p
, size
);
5019 else { /* the final element absorbs any overallocation slop */
5020 set_head(p
, remainder_size
| size_flags
);
5026 if (marray
!= chunks
) {
5027 /* final element must have exactly exhausted chunk */
5028 if (element_size
!= 0)
5029 assert(remainder_size
== element_size
);
5031 assert(remainder_size
== request2size(sizes
[i
]));
5032 check_inuse_chunk(av
, mem2chunk(marray
));
5035 for (i
= 0; i
!= n_elements
; ++i
)
5036 check_inuse_chunk(av
, mem2chunk(marray
[i
]));
5044 ------------------------------ valloc ------------------------------
5049 _int_valloc(mstate av
, size_t bytes
)
5051 _int_valloc(av
, bytes
) mstate av
; size_t bytes
;
5054 /* Ensure initialization/consolidation */
5055 if (have_fastchunks(av
)) malloc_consolidate(av
);
5056 return _int_memalign(av
, mp_
.pagesize
, bytes
);
5060 ------------------------------ pvalloc ------------------------------
5066 _int_pvalloc(mstate av
, size_t bytes
)
5068 _int_pvalloc(av
, bytes
) mstate av
, size_t bytes
;
5073 /* Ensure initialization/consolidation */
5074 if (have_fastchunks(av
)) malloc_consolidate(av
);
5075 pagesz
= mp_
.pagesize
;
5076 return _int_memalign(av
, pagesz
, (bytes
+ pagesz
- 1) & ~(pagesz
- 1));
5081 ------------------------------ malloc_trim ------------------------------
5085 int mTRIm(size_t pad
)
5087 int mTRIm(pad
) size_t pad
;
5090 mstate av
= &main_arena
; /* already locked */
5092 /* Ensure initialization/consolidation */
5093 malloc_consolidate(av
);
5095 #ifndef MORECORE_CANNOT_TRIM
5096 return sYSTRIm(pad
, av
);
5104 ------------------------- malloc_usable_size -------------------------
5108 size_t mUSABLe(Void_t
* mem
)
5110 size_t mUSABLe(mem
) Void_t
* mem
;
5116 if (chunk_is_mmapped(p
))
5117 return chunksize(p
) - 2*SIZE_SZ
;
5119 return chunksize(p
) - SIZE_SZ
;
5125 ------------------------------ mallinfo ------------------------------
5128 struct mallinfo
mALLINFo(mstate av
)
5134 INTERNAL_SIZE_T avail
;
5135 INTERNAL_SIZE_T fastavail
;
5139 /* Ensure initialization */
5140 if (av
->top
== 0) malloc_consolidate(av
);
5142 check_malloc_state(av
);
5144 /* Account for top */
5145 avail
= chunksize(av
->top
);
5146 nblocks
= 1; /* top always exists */
5148 /* traverse fastbins */
5152 for (i
= 0; i
< NFASTBINS
; ++i
) {
5153 for (p
= av
->fastbins
[i
]; p
!= 0; p
= p
->fd
) {
5155 fastavail
+= chunksize(p
);
5161 /* traverse regular bins */
5162 for (i
= 1; i
< NBINS
; ++i
) {
5164 for (p
= last(b
); p
!= b
; p
= p
->bk
) {
5166 avail
+= chunksize(p
);
5170 mi
.smblks
= nfastblocks
;
5171 mi
.ordblks
= nblocks
;
5172 mi
.fordblks
= avail
;
5173 mi
.uordblks
= av
->system_mem
- avail
;
5174 mi
.arena
= av
->system_mem
;
5175 mi
.hblks
= mp_
.n_mmaps
;
5176 mi
.hblkhd
= mp_
.mmapped_mem
;
5177 mi
.fsmblks
= fastavail
;
5178 mi
.keepcost
= chunksize(av
->top
);
5179 mi
.usmblks
= mp_
.max_total_mem
;
5184 ------------------------------ malloc_stats ------------------------------
5192 unsigned int in_use_b
= mp_
.mmapped_mem
, system_b
= in_use_b
;
5194 long stat_lock_direct
= 0, stat_lock_loop
= 0, stat_lock_wait
= 0;
5197 if(__malloc_initialized
< 0)
5200 _IO_flockfile (stderr
);
5201 int old_flags2
= ((_IO_FILE
*) stderr
)->_flags2
;
5202 ((_IO_FILE
*) stderr
)->_flags2
|= _IO_FLAGS2_NOTCANCEL
;
5204 for (i
=0, ar_ptr
= &main_arena
;; i
++) {
5205 (void)mutex_lock(&ar_ptr
->mutex
);
5206 mi
= mALLINFo(ar_ptr
);
5207 fprintf(stderr
, "Arena %d:\n", i
);
5208 fprintf(stderr
, "system bytes = %10u\n", (unsigned int)mi
.arena
);
5209 fprintf(stderr
, "in use bytes = %10u\n", (unsigned int)mi
.uordblks
);
5210 #if MALLOC_DEBUG > 1
5212 dump_heap(heap_for_ptr(top(ar_ptr
)));
5214 system_b
+= mi
.arena
;
5215 in_use_b
+= mi
.uordblks
;
5217 stat_lock_direct
+= ar_ptr
->stat_lock_direct
;
5218 stat_lock_loop
+= ar_ptr
->stat_lock_loop
;
5219 stat_lock_wait
+= ar_ptr
->stat_lock_wait
;
5221 (void)mutex_unlock(&ar_ptr
->mutex
);
5222 ar_ptr
= ar_ptr
->next
;
5223 if(ar_ptr
== &main_arena
) break;
5226 fprintf(stderr
, "Total (incl. mmap):\n");
5228 fprintf(stderr
, "Total:\n");
5230 fprintf(stderr
, "system bytes = %10u\n", system_b
);
5231 fprintf(stderr
, "in use bytes = %10u\n", in_use_b
);
5233 fprintf(stderr
, "max system bytes = %10u\n", (unsigned int)mp_
.max_total_mem
);
5236 fprintf(stderr
, "max mmap regions = %10u\n", (unsigned int)mp_
.max_n_mmaps
);
5237 fprintf(stderr
, "max mmap bytes = %10lu\n",
5238 (unsigned long)mp_
.max_mmapped_mem
);
5241 fprintf(stderr
, "heaps created = %10d\n", stat_n_heaps
);
5242 fprintf(stderr
, "locked directly = %10ld\n", stat_lock_direct
);
5243 fprintf(stderr
, "locked in loop = %10ld\n", stat_lock_loop
);
5244 fprintf(stderr
, "locked waiting = %10ld\n", stat_lock_wait
);
5245 fprintf(stderr
, "locked total = %10ld\n",
5246 stat_lock_direct
+ stat_lock_loop
+ stat_lock_wait
);
5249 ((_IO_FILE
*) stderr
)->_flags2
|= old_flags2
;
5250 _IO_funlockfile (stderr
);
5256 ------------------------------ mallopt ------------------------------
5260 int mALLOPt(int param_number
, int value
)
5262 int mALLOPt(param_number
, value
) int param_number
; int value
;
5265 mstate av
= &main_arena
;
5268 if(__malloc_initialized
< 0)
5270 (void)mutex_lock(&av
->mutex
);
5271 /* Ensure initialization/consolidation */
5272 malloc_consolidate(av
);
5274 switch(param_number
) {
5276 if (value
>= 0 && value
<= MAX_FAST_SIZE
) {
5277 set_max_fast(av
, value
);
5283 case M_TRIM_THRESHOLD
:
5284 mp_
.trim_threshold
= value
;
5288 mp_
.top_pad
= value
;
5291 case M_MMAP_THRESHOLD
:
5293 /* Forbid setting the threshold too high. */
5294 if((unsigned long)value
> HEAP_MAX_SIZE
/2)
5298 mp_
.mmap_threshold
= value
;
5307 mp_
.n_mmaps_max
= value
;
5310 case M_CHECK_ACTION
:
5311 check_action
= value
;
5314 (void)mutex_unlock(&av
->mutex
);
5320 -------------------- Alternative MORECORE functions --------------------
5325 General Requirements for MORECORE.
5327 The MORECORE function must have the following properties:
5329 If MORECORE_CONTIGUOUS is false:
5331 * MORECORE must allocate in multiples of pagesize. It will
5332 only be called with arguments that are multiples of pagesize.
5334 * MORECORE(0) must return an address that is at least
5335 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
5337 else (i.e. If MORECORE_CONTIGUOUS is true):
5339 * Consecutive calls to MORECORE with positive arguments
5340 return increasing addresses, indicating that space has been
5341 contiguously extended.
5343 * MORECORE need not allocate in multiples of pagesize.
5344 Calls to MORECORE need not have args of multiples of pagesize.
5346 * MORECORE need not page-align.
5350 * MORECORE may allocate more memory than requested. (Or even less,
5351 but this will generally result in a malloc failure.)
5353 * MORECORE must not allocate memory when given argument zero, but
5354 instead return one past the end address of memory from previous
5355 nonzero call. This malloc does NOT call MORECORE(0)
5356 until at least one call with positive arguments is made, so
5357 the initial value returned is not important.
5359 * Even though consecutive calls to MORECORE need not return contiguous
5360 addresses, it must be OK for malloc'ed chunks to span multiple
5361 regions in those cases where they do happen to be contiguous.
5363 * MORECORE need not handle negative arguments -- it may instead
5364 just return MORECORE_FAILURE when given negative arguments.
5365 Negative arguments are always multiples of pagesize. MORECORE
5366 must not misinterpret negative args as large positive unsigned
5367 args. You can suppress all such calls from even occurring by defining
5368 MORECORE_CANNOT_TRIM,
5370 There is some variation across systems about the type of the
5371 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
5372 actually be size_t, because sbrk supports negative args, so it is
5373 normally the signed type of the same width as size_t (sometimes
5374 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
5375 matter though. Internally, we use "long" as arguments, which should
5376 work across all reasonable possibilities.
5378 Additionally, if MORECORE ever returns failure for a positive
5379 request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
5380 system allocator. This is a useful backup strategy for systems with
5381 holes in address spaces -- in this case sbrk cannot contiguously
5382 expand the heap, but mmap may be able to map noncontiguous space.
5384 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
5385 a function that always returns MORECORE_FAILURE.
5387 If you are using this malloc with something other than sbrk (or its
5388 emulation) to supply memory regions, you probably want to set
5389 MORECORE_CONTIGUOUS as false. As an example, here is a custom
5390 allocator kindly contributed for pre-OSX macOS. It uses virtually
5391 but not necessarily physically contiguous non-paged memory (locked
5392 in, present and won't get swapped out). You can use it by
5393 uncommenting this section, adding some #includes, and setting up the
5394 appropriate defines above:
5396 #define MORECORE osMoreCore
5397 #define MORECORE_CONTIGUOUS 0
5399 There is also a shutdown routine that should somehow be called for
5400 cleanup upon program exit.
5402 #define MAX_POOL_ENTRIES 100
5403 #define MINIMUM_MORECORE_SIZE (64 * 1024)
5404 static int next_os_pool;
5405 void *our_os_pools[MAX_POOL_ENTRIES];
5407 void *osMoreCore(int size)
5410 static void *sbrk_top = 0;
5414 if (size < MINIMUM_MORECORE_SIZE)
5415 size = MINIMUM_MORECORE_SIZE;
5416 if (CurrentExecutionLevel() == kTaskLevel)
5417 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5420 return (void *) MORECORE_FAILURE;
5422 // save ptrs so they can be freed during cleanup
5423 our_os_pools[next_os_pool] = ptr;
5425 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5426 sbrk_top = (char *) ptr + size;
5431 // we don't currently support shrink behavior
5432 return (void *) MORECORE_FAILURE;
5440 // cleanup any allocated memory pools
5441 // called as last thing before shutting down driver
5443 void osCleanupMem(void)
5447 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5450 PoolDeallocate(*ptr);
5461 malloc_printerr(int action
, const char *str
, void *ptr
)
5465 /* output string will be ": ADDR ***\n" */
5466 static const char suffix
[] = " ***\n";
5467 static const char prefix
[] = ": 0x";
5468 char buf
[sizeof (prefix
) - 1 + sizeof (void *) * 2 + sizeof (suffix
)];
5471 cp
= memcpy (&buf
[sizeof (buf
) - 2], "\n", 2);
5474 cp
= memcpy (&buf
[sizeof (buf
) - sizeof (suffix
)], suffix
,
5476 cp
= _itoa_word ((unsigned long int) ptr
, cp
, 16, 0);
5477 while (cp
> &buf
[sizeof (prefix
) - 1])
5479 cp
= memcpy (buf
, prefix
, sizeof (prefix
) - 1);
5482 struct iovec iov
[3];
5484 if ((action
& 4) == 0)
5486 iov
[0].iov_base
= (char *) "*** glibc detected *** ";
5487 iov
[0].iov_len
= strlen (iov
[0].iov_base
);
5490 iov
[n
].iov_base
= (char *) str
;
5491 iov
[n
].iov_len
= strlen (str
);
5493 iov
[n
].iov_base
= cp
;
5494 iov
[n
].iov_len
= &buf
[sizeof (buf
) - 1] - cp
;
5496 if (TEMP_FAILURE_RETRY (__writev (STDERR_FILENO
, iov
, n
)) == -1
5498 /* Standard error is not opened. Try using syslog. */
5499 syslog (LOG_ERR
, "%s%s%s", (char *) iov
[0].iov_base
,
5500 (char *) iov
[1].iov_base
,
5501 n
== 3 ? (const char *) iov
[2].iov_base
: "");
5508 # include <sys/param.h>
5510 /* We need a wrapper function for one of the additions of POSIX. */
5512 __posix_memalign (void **memptr
, size_t alignment
, size_t size
)
5515 __malloc_ptr_t (*hook
) __MALLOC_PMT ((size_t, size_t,
5516 __const __malloc_ptr_t
)) =
5519 /* Test whether the SIZE argument is valid. It must be a power of
5520 two multiple of sizeof (void *). */
5521 if (alignment
% sizeof (void *) != 0
5522 || !powerof2 (alignment
/ sizeof (void *)) != 0
5526 /* Call the hook here, so that caller is posix_memalign's caller
5527 and not posix_memalign itself. */
5529 mem
= (*hook
)(alignment
, size
, RETURN_ADDRESS (0));
5531 mem
= public_mEMALIGn (alignment
, size
);
5540 weak_alias (__posix_memalign
, posix_memalign
)
5542 strong_alias (__libc_calloc
, __calloc
) weak_alias (__libc_calloc
, calloc
)
5543 strong_alias (__libc_free
, __cfree
) weak_alias (__libc_free
, cfree
)
5544 strong_alias (__libc_free
, __free
) strong_alias (__libc_free
, free
)
5545 strong_alias (__libc_malloc
, __malloc
) strong_alias (__libc_malloc
, malloc
)
5546 strong_alias (__libc_memalign
, __memalign
)
5547 weak_alias (__libc_memalign
, memalign
)
5548 strong_alias (__libc_realloc
, __realloc
) strong_alias (__libc_realloc
, realloc
)
5549 strong_alias (__libc_valloc
, __valloc
) weak_alias (__libc_valloc
, valloc
)
5550 strong_alias (__libc_pvalloc
, __pvalloc
) weak_alias (__libc_pvalloc
, pvalloc
)
5551 strong_alias (__libc_mallinfo
, __mallinfo
)
5552 weak_alias (__libc_mallinfo
, mallinfo
)
5553 strong_alias (__libc_mallopt
, __mallopt
) weak_alias (__libc_mallopt
, mallopt
)
5555 weak_alias (__malloc_stats
, malloc_stats
)
5556 weak_alias (__malloc_usable_size
, malloc_usable_size
)
5557 weak_alias (__malloc_trim
, malloc_trim
)
5558 weak_alias (__malloc_get_state
, malloc_get_state
)
5559 weak_alias (__malloc_set_state
, malloc_set_state
)
5563 /* ------------------------------------------------------------
5566 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]