Mini merge from HEAD.
[glibc.git] / malloc / malloc.c
blob65be2770974cb4447f0172e3c44f249395698810
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 1996-2002, 2003, 2004, 2005 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If not,
19 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
23 This is a version (aka ptmalloc2) of malloc/free/realloc written by
24 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
26 * Version ptmalloc2-20011215
27 $Id$
28 based on:
29 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
31 Note: There may be an updated version of this malloc obtainable at
32 http://www.malloc.de/malloc/ptmalloc2.tar.gz
33 Check before installing!
35 * Quickstart
37 In order to compile this implementation, a Makefile is provided with
38 the ptmalloc2 distribution, which has pre-defined targets for some
39 popular systems (e.g. "make posix" for Posix threads). All that is
40 typically required with regard to compiler flags is the selection of
41 the thread package via defining one out of USE_PTHREADS, USE_THR or
42 USE_SPROC. Check the thread-m.h file for what effects this has.
43 Many/most systems will additionally require USE_TSD_DATA_HACK to be
44 defined, so this is the default for "make posix".
46 * Why use this malloc?
48 This is not the fastest, most space-conserving, most portable, or
49 most tunable malloc ever written. However it is among the fastest
50 while also being among the most space-conserving, portable and tunable.
51 Consistent balance across these factors results in a good general-purpose
52 allocator for malloc-intensive programs.
54 The main properties of the algorithms are:
55 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
56 with ties normally decided via FIFO (i.e. least recently used).
57 * For small (<= 64 bytes by default) requests, it is a caching
58 allocator, that maintains pools of quickly recycled chunks.
59 * In between, and for combinations of large and small requests, it does
60 the best it can trying to meet both goals at once.
61 * For very large requests (>= 128KB by default), it relies on system
62 memory mapping facilities, if supported.
64 For a longer but slightly out of date high-level description, see
65 http://gee.cs.oswego.edu/dl/html/malloc.html
67 You may already by default be using a C library containing a malloc
68 that is based on some version of this malloc (for example in
69 linux). You might still want to use the one in this file in order to
70 customize settings or to avoid overheads associated with library
71 versions.
73 * Contents, described in more detail in "description of public routines" below.
75 Standard (ANSI/SVID/...) functions:
76 malloc(size_t n);
77 calloc(size_t n_elements, size_t element_size);
78 free(Void_t* p);
79 realloc(Void_t* p, size_t n);
80 memalign(size_t alignment, size_t n);
81 valloc(size_t n);
82 mallinfo()
83 mallopt(int parameter_number, int parameter_value)
85 Additional functions:
86 independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]);
87 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
88 pvalloc(size_t n);
89 cfree(Void_t* p);
90 malloc_trim(size_t pad);
91 malloc_usable_size(Void_t* p);
92 malloc_stats();
94 * Vital statistics:
96 Supported pointer representation: 4 or 8 bytes
97 Supported size_t representation: 4 or 8 bytes
98 Note that size_t is allowed to be 4 bytes even if pointers are 8.
99 You can adjust this by defining INTERNAL_SIZE_T
101 Alignment: 2 * sizeof(size_t) (default)
102 (i.e., 8 byte alignment with 4byte size_t). This suffices for
103 nearly all current machines and C compilers. However, you can
104 define MALLOC_ALIGNMENT to be wider than this if necessary.
106 Minimum overhead per allocated chunk: 4 or 8 bytes
107 Each malloced chunk has a hidden word of overhead holding size
108 and status information.
110 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
111 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
113 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
114 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
115 needed; 4 (8) for a trailing size field and 8 (16) bytes for
116 free list pointers. Thus, the minimum allocatable size is
117 16/24/32 bytes.
119 Even a request for zero bytes (i.e., malloc(0)) returns a
120 pointer to something of the minimum allocatable size.
122 The maximum overhead wastage (i.e., number of extra bytes
123 allocated than were requested in malloc) is less than or equal
124 to the minimum size, except for requests >= mmap_threshold that
125 are serviced via mmap(), where the worst case wastage is 2 *
126 sizeof(size_t) bytes plus the remainder from a system page (the
127 minimal mmap unit); typically 4096 or 8192 bytes.
129 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
130 8-byte size_t: 2^64 minus about two pages
132 It is assumed that (possibly signed) size_t values suffice to
133 represent chunk sizes. `Possibly signed' is due to the fact
134 that `size_t' may be defined on a system as either a signed or
135 an unsigned type. The ISO C standard says that it must be
136 unsigned, but a few systems are known not to adhere to this.
137 Additionally, even when size_t is unsigned, sbrk (which is by
138 default used to obtain memory from system) accepts signed
139 arguments, and may not be able to handle size_t-wide arguments
140 with negative sign bit. Generally, values that would
141 appear as negative after accounting for overhead and alignment
142 are supported only via mmap(), which does not have this
143 limitation.
145 Requests for sizes outside the allowed range will perform an optional
146 failure action and then return null. (Requests may also
147 also fail because a system is out of memory.)
149 Thread-safety: thread-safe unless NO_THREADS is defined
151 Compliance: I believe it is compliant with the 1997 Single Unix Specification
152 (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably
153 others as well.
155 * Synopsis of compile-time options:
157 People have reported using previous versions of this malloc on all
158 versions of Unix, sometimes by tweaking some of the defines
159 below. It has been tested most extensively on Solaris and
160 Linux. It is also reported to work on WIN32 platforms.
161 People also report using it in stand-alone embedded systems.
163 The implementation is in straight, hand-tuned ANSI C. It is not
164 at all modular. (Sorry!) It uses a lot of macros. To be at all
165 usable, this code should be compiled using an optimizing compiler
166 (for example gcc -O3) that can simplify expressions and control
167 paths. (FAQ: some macros import variables as arguments rather than
168 declare locals because people reported that some debuggers
169 otherwise get confused.)
171 OPTION DEFAULT VALUE
173 Compilation Environment options:
175 __STD_C derived from C compiler defines
176 WIN32 NOT defined
177 HAVE_MEMCPY defined
178 USE_MEMCPY 1 if HAVE_MEMCPY is defined
179 HAVE_MMAP defined as 1
180 MMAP_CLEARS 1
181 HAVE_MREMAP 0 unless linux defined
182 USE_ARENAS the same as HAVE_MMAP
183 malloc_getpagesize derived from system #includes, or 4096 if not
184 HAVE_USR_INCLUDE_MALLOC_H NOT defined
185 LACKS_UNISTD_H NOT defined unless WIN32
186 LACKS_SYS_PARAM_H NOT defined unless WIN32
187 LACKS_SYS_MMAN_H NOT defined unless WIN32
189 Changing default word sizes:
191 INTERNAL_SIZE_T size_t
192 MALLOC_ALIGNMENT 2 * sizeof(INTERNAL_SIZE_T)
194 Configuration and functionality options:
196 USE_DL_PREFIX NOT defined
197 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
198 USE_MALLOC_LOCK NOT defined
199 MALLOC_DEBUG NOT defined
200 REALLOC_ZERO_BYTES_FREES 1
201 MALLOC_FAILURE_ACTION errno = ENOMEM, if __STD_C defined, else no-op
202 TRIM_FASTBINS 0
204 Options for customizing MORECORE:
206 MORECORE sbrk
207 MORECORE_FAILURE -1
208 MORECORE_CONTIGUOUS 1
209 MORECORE_CANNOT_TRIM NOT defined
210 MORECORE_CLEARS 1
211 MMAP_AS_MORECORE_SIZE (1024 * 1024)
213 Tuning options that are also dynamically changeable via mallopt:
215 DEFAULT_MXFAST 64
216 DEFAULT_TRIM_THRESHOLD 128 * 1024
217 DEFAULT_TOP_PAD 0
218 DEFAULT_MMAP_THRESHOLD 128 * 1024
219 DEFAULT_MMAP_MAX 65536
221 There are several other #defined constants and macros that you
222 probably don't want to touch unless you are extending or adapting malloc. */
225 __STD_C should be nonzero if using ANSI-standard C compiler, a C++
226 compiler, or a C compiler sufficiently close to ANSI to get away
227 with it.
230 #ifndef __STD_C
231 #if defined(__STDC__) || defined(__cplusplus)
232 #define __STD_C 1
233 #else
234 #define __STD_C 0
235 #endif
236 #endif /*__STD_C*/
240 Void_t* is the pointer type that malloc should say it returns
243 #ifndef Void_t
244 #if (__STD_C || defined(WIN32))
245 #define Void_t void
246 #else
247 #define Void_t char
248 #endif
249 #endif /*Void_t*/
251 #if __STD_C
252 #include <stddef.h> /* for size_t */
253 #include <stdlib.h> /* for getenv(), abort() */
254 #else
255 #include <sys/types.h>
256 #endif
258 #include <malloc-machine.h>
260 #ifdef _LIBC
261 #include <stdio-common/_itoa.h>
262 #endif
264 #ifdef __cplusplus
265 extern "C" {
266 #endif
268 /* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */
270 /* #define LACKS_UNISTD_H */
272 #ifndef LACKS_UNISTD_H
273 #include <unistd.h>
274 #endif
276 /* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */
278 /* #define LACKS_SYS_PARAM_H */
281 #include <stdio.h> /* needed for malloc_stats */
282 #include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */
284 /* For uintptr_t. */
285 #include <stdint.h>
287 /* For va_arg, va_start, va_end. */
288 #include <stdarg.h>
290 /* For writev and struct iovec. */
291 #include <sys/uio.h>
292 /* For syslog. */
293 #include <sys/syslog.h>
295 /* For various dynamic linking things. */
296 #include <dlfcn.h>
300 Debugging:
302 Because freed chunks may be overwritten with bookkeeping fields, this
303 malloc will often die when freed memory is overwritten by user
304 programs. This can be very effective (albeit in an annoying way)
305 in helping track down dangling pointers.
307 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
308 enabled that will catch more memory errors. You probably won't be
309 able to make much sense of the actual assertion errors, but they
310 should help you locate incorrectly overwritten memory. The checking
311 is fairly extensive, and will slow down execution
312 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
313 will attempt to check every non-mmapped allocated and free chunk in
314 the course of computing the summmaries. (By nature, mmapped regions
315 cannot be checked very much automatically.)
317 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
318 this code. The assertions in the check routines spell out in more
319 detail the assumptions and invariants underlying the algorithms.
321 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
322 checking that all accesses to malloced memory stay within their
323 bounds. However, there are several add-ons and adaptations of this
324 or other mallocs available that do this.
327 #if MALLOC_DEBUG
328 #include <assert.h>
329 #else
330 #undef assert
331 #define assert(x) ((void)0)
332 #endif
336 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
337 of chunk sizes.
339 The default version is the same as size_t.
341 While not strictly necessary, it is best to define this as an
342 unsigned type, even if size_t is a signed type. This may avoid some
343 artificial size limitations on some systems.
345 On a 64-bit machine, you may be able to reduce malloc overhead by
346 defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
347 expense of not being able to handle more than 2^32 of malloced
348 space. If this limitation is acceptable, you are encouraged to set
349 this unless you are on a platform requiring 16byte alignments. In
350 this case the alignment requirements turn out to negate any
351 potential advantages of decreasing size_t word size.
353 Implementors: Beware of the possible combinations of:
354 - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
355 and might be the same width as int or as long
356 - size_t might have different width and signedness as INTERNAL_SIZE_T
357 - int and long might be 32 or 64 bits, and might be the same width
358 To deal with this, most comparisons and difference computations
359 among INTERNAL_SIZE_Ts should cast them to unsigned long, being
360 aware of the fact that casting an unsigned int to a wider long does
361 not sign-extend. (This also makes checking for negative numbers
362 awkward.) Some of these casts result in harmless compiler warnings
363 on some systems.
366 #ifndef INTERNAL_SIZE_T
367 #define INTERNAL_SIZE_T size_t
368 #endif
370 /* The corresponding word size */
371 #define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
375 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
376 It must be a power of two at least 2 * SIZE_SZ, even on machines
377 for which smaller alignments would suffice. It may be defined as
378 larger than this though. Note however that code and data structures
379 are optimized for the case of 8-byte alignment.
383 #ifndef MALLOC_ALIGNMENT
384 #define MALLOC_ALIGNMENT (2 * SIZE_SZ)
385 #endif
387 /* The corresponding bit mask value */
388 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
393 REALLOC_ZERO_BYTES_FREES should be set if a call to
394 realloc with zero bytes should be the same as a call to free.
395 This is required by the C standard. Otherwise, since this malloc
396 returns a unique pointer for malloc(0), so does realloc(p, 0).
399 #ifndef REALLOC_ZERO_BYTES_FREES
400 #define REALLOC_ZERO_BYTES_FREES 1
401 #endif
404 TRIM_FASTBINS controls whether free() of a very small chunk can
405 immediately lead to trimming. Setting to true (1) can reduce memory
406 footprint, but will almost always slow down programs that use a lot
407 of small chunks.
409 Define this only if you are willing to give up some speed to more
410 aggressively reduce system-level memory footprint when releasing
411 memory in programs that use many small chunks. You can get
412 essentially the same effect by setting MXFAST to 0, but this can
413 lead to even greater slowdowns in programs using many small chunks.
414 TRIM_FASTBINS is an in-between compile-time option, that disables
415 only those chunks bordering topmost memory from being placed in
416 fastbins.
419 #ifndef TRIM_FASTBINS
420 #define TRIM_FASTBINS 0
421 #endif
425 USE_DL_PREFIX will prefix all public routines with the string 'dl'.
426 This is necessary when you only want to use this malloc in one part
427 of a program, using your regular system malloc elsewhere.
430 /* #define USE_DL_PREFIX */
434 Two-phase name translation.
435 All of the actual routines are given mangled names.
436 When wrappers are used, they become the public callable versions.
437 When DL_PREFIX is used, the callable names are prefixed.
440 #ifdef USE_DL_PREFIX
441 #define public_cALLOc dlcalloc
442 #define public_fREe dlfree
443 #define public_cFREe dlcfree
444 #define public_mALLOc dlmalloc
445 #define public_mEMALIGn dlmemalign
446 #define public_rEALLOc dlrealloc
447 #define public_vALLOc dlvalloc
448 #define public_pVALLOc dlpvalloc
449 #define public_mALLINFo dlmallinfo
450 #define public_mALLOPt dlmallopt
451 #define public_mTRIm dlmalloc_trim
452 #define public_mSTATs dlmalloc_stats
453 #define public_mUSABLe dlmalloc_usable_size
454 #define public_iCALLOc dlindependent_calloc
455 #define public_iCOMALLOc dlindependent_comalloc
456 #define public_gET_STATe dlget_state
457 #define public_sET_STATe dlset_state
458 #else /* USE_DL_PREFIX */
459 #ifdef _LIBC
461 /* Special defines for the GNU C library. */
462 #define public_cALLOc __libc_calloc
463 #define public_fREe __libc_free
464 #define public_cFREe __libc_cfree
465 #define public_mALLOc __libc_malloc
466 #define public_mEMALIGn __libc_memalign
467 #define public_rEALLOc __libc_realloc
468 #define public_vALLOc __libc_valloc
469 #define public_pVALLOc __libc_pvalloc
470 #define public_mALLINFo __libc_mallinfo
471 #define public_mALLOPt __libc_mallopt
472 #define public_mTRIm __malloc_trim
473 #define public_mSTATs __malloc_stats
474 #define public_mUSABLe __malloc_usable_size
475 #define public_iCALLOc __libc_independent_calloc
476 #define public_iCOMALLOc __libc_independent_comalloc
477 #define public_gET_STATe __malloc_get_state
478 #define public_sET_STATe __malloc_set_state
479 #define malloc_getpagesize __getpagesize()
480 #define open __open
481 #define mmap __mmap
482 #define munmap __munmap
483 #define mremap __mremap
484 #define mprotect __mprotect
485 #define MORECORE (*__morecore)
486 #define MORECORE_FAILURE 0
488 Void_t * __default_morecore (ptrdiff_t);
489 Void_t *(*__morecore)(ptrdiff_t) = __default_morecore;
491 #else /* !_LIBC */
492 #define public_cALLOc calloc
493 #define public_fREe free
494 #define public_cFREe cfree
495 #define public_mALLOc malloc
496 #define public_mEMALIGn memalign
497 #define public_rEALLOc realloc
498 #define public_vALLOc valloc
499 #define public_pVALLOc pvalloc
500 #define public_mALLINFo mallinfo
501 #define public_mALLOPt mallopt
502 #define public_mTRIm malloc_trim
503 #define public_mSTATs malloc_stats
504 #define public_mUSABLe malloc_usable_size
505 #define public_iCALLOc independent_calloc
506 #define public_iCOMALLOc independent_comalloc
507 #define public_gET_STATe malloc_get_state
508 #define public_sET_STATe malloc_set_state
509 #endif /* _LIBC */
510 #endif /* USE_DL_PREFIX */
512 #ifndef _LIBC
513 #define __builtin_expect(expr, val) (expr)
515 #define fwrite(buf, size, count, fp) _IO_fwrite (buf, size, count, fp)
516 #endif
519 HAVE_MEMCPY should be defined if you are not otherwise using
520 ANSI STD C, but still have memcpy and memset in your C library
521 and want to use them in calloc and realloc. Otherwise simple
522 macro versions are defined below.
524 USE_MEMCPY should be defined as 1 if you actually want to
525 have memset and memcpy called. People report that the macro
526 versions are faster than libc versions on some systems.
528 Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
529 (of <= 36 bytes) are manually unrolled in realloc and calloc.
532 #define HAVE_MEMCPY
534 #ifndef USE_MEMCPY
535 #ifdef HAVE_MEMCPY
536 #define USE_MEMCPY 1
537 #else
538 #define USE_MEMCPY 0
539 #endif
540 #endif
543 #if (__STD_C || defined(HAVE_MEMCPY))
545 #ifdef _LIBC
546 # include <string.h>
547 #else
548 #ifdef WIN32
549 /* On Win32 memset and memcpy are already declared in windows.h */
550 #else
551 #if __STD_C
552 void* memset(void*, int, size_t);
553 void* memcpy(void*, const void*, size_t);
554 #else
555 Void_t* memset();
556 Void_t* memcpy();
557 #endif
558 #endif
559 #endif
560 #endif
563 MALLOC_FAILURE_ACTION is the action to take before "return 0" when
564 malloc fails to be able to return memory, either because memory is
565 exhausted or because of illegal arguments.
567 By default, sets errno if running on STD_C platform, else does nothing.
570 #ifndef MALLOC_FAILURE_ACTION
571 #if __STD_C
572 #define MALLOC_FAILURE_ACTION \
573 errno = ENOMEM;
575 #else
576 #define MALLOC_FAILURE_ACTION
577 #endif
578 #endif
581 MORECORE-related declarations. By default, rely on sbrk
585 #ifdef LACKS_UNISTD_H
586 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
587 #if __STD_C
588 extern Void_t* sbrk(ptrdiff_t);
589 #else
590 extern Void_t* sbrk();
591 #endif
592 #endif
593 #endif
596 MORECORE is the name of the routine to call to obtain more memory
597 from the system. See below for general guidance on writing
598 alternative MORECORE functions, as well as a version for WIN32 and a
599 sample version for pre-OSX macos.
602 #ifndef MORECORE
603 #define MORECORE sbrk
604 #endif
607 MORECORE_FAILURE is the value returned upon failure of MORECORE
608 as well as mmap. Since it cannot be an otherwise valid memory address,
609 and must reflect values of standard sys calls, you probably ought not
610 try to redefine it.
613 #ifndef MORECORE_FAILURE
614 #define MORECORE_FAILURE (-1)
615 #endif
618 If MORECORE_CONTIGUOUS is true, take advantage of fact that
619 consecutive calls to MORECORE with positive arguments always return
620 contiguous increasing addresses. This is true of unix sbrk. Even
621 if not defined, when regions happen to be contiguous, malloc will
622 permit allocations spanning regions obtained from different
623 calls. But defining this when applicable enables some stronger
624 consistency checks and space efficiencies.
627 #ifndef MORECORE_CONTIGUOUS
628 #define MORECORE_CONTIGUOUS 1
629 #endif
632 Define MORECORE_CANNOT_TRIM if your version of MORECORE
633 cannot release space back to the system when given negative
634 arguments. This is generally necessary only if you are using
635 a hand-crafted MORECORE function that cannot handle negative arguments.
638 /* #define MORECORE_CANNOT_TRIM */
640 /* MORECORE_CLEARS (default 1)
641 The degree to which the routine mapped to MORECORE zeroes out
642 memory: never (0), only for newly allocated space (1) or always
643 (2). The distinction between (1) and (2) is necessary because on
644 some systems, if the application first decrements and then
645 increments the break value, the contents of the reallocated space
646 are unspecified.
649 #ifndef MORECORE_CLEARS
650 #define MORECORE_CLEARS 1
651 #endif
655 Define HAVE_MMAP as true to optionally make malloc() use mmap() to
656 allocate very large blocks. These will be returned to the
657 operating system immediately after a free(). Also, if mmap
658 is available, it is used as a backup strategy in cases where
659 MORECORE fails to provide space from system.
661 This malloc is best tuned to work with mmap for large requests.
662 If you do not have mmap, operations involving very large chunks (1MB
663 or so) may be slower than you'd like.
666 #ifndef HAVE_MMAP
667 #define HAVE_MMAP 1
670 Standard unix mmap using /dev/zero clears memory so calloc doesn't
671 need to.
674 #ifndef MMAP_CLEARS
675 #define MMAP_CLEARS 1
676 #endif
678 #else /* no mmap */
679 #ifndef MMAP_CLEARS
680 #define MMAP_CLEARS 0
681 #endif
682 #endif
686 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
687 sbrk fails, and mmap is used as a backup (which is done only if
688 HAVE_MMAP). The value must be a multiple of page size. This
689 backup strategy generally applies only when systems have "holes" in
690 address space, so sbrk cannot perform contiguous expansion, but
691 there is still space available on system. On systems for which
692 this is known to be useful (i.e. most linux kernels), this occurs
693 only when programs allocate huge amounts of memory. Between this,
694 and the fact that mmap regions tend to be limited, the size should
695 be large, to avoid too many mmap calls and thus avoid running out
696 of kernel resources.
699 #ifndef MMAP_AS_MORECORE_SIZE
700 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
701 #endif
704 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
705 large blocks. This is currently only possible on Linux with
706 kernel versions newer than 1.3.77.
709 #ifndef HAVE_MREMAP
710 #ifdef linux
711 #define HAVE_MREMAP 1
712 #else
713 #define HAVE_MREMAP 0
714 #endif
716 #endif /* HAVE_MMAP */
718 /* Define USE_ARENAS to enable support for multiple `arenas'. These
719 are allocated using mmap(), are necessary for threads and
720 occasionally useful to overcome address space limitations affecting
721 sbrk(). */
723 #ifndef USE_ARENAS
724 #define USE_ARENAS HAVE_MMAP
725 #endif
729 The system page size. To the extent possible, this malloc manages
730 memory from the system in page-size units. Note that this value is
731 cached during initialization into a field of malloc_state. So even
732 if malloc_getpagesize is a function, it is only called once.
734 The following mechanics for getpagesize were adapted from bsd/gnu
735 getpagesize.h. If none of the system-probes here apply, a value of
736 4096 is used, which should be OK: If they don't apply, then using
737 the actual value probably doesn't impact performance.
741 #ifndef malloc_getpagesize
743 #ifndef LACKS_UNISTD_H
744 # include <unistd.h>
745 #endif
747 # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
748 # ifndef _SC_PAGE_SIZE
749 # define _SC_PAGE_SIZE _SC_PAGESIZE
750 # endif
751 # endif
753 # ifdef _SC_PAGE_SIZE
754 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
755 # else
756 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
757 extern size_t getpagesize();
758 # define malloc_getpagesize getpagesize()
759 # else
760 # ifdef WIN32 /* use supplied emulation of getpagesize */
761 # define malloc_getpagesize getpagesize()
762 # else
763 # ifndef LACKS_SYS_PARAM_H
764 # include <sys/param.h>
765 # endif
766 # ifdef EXEC_PAGESIZE
767 # define malloc_getpagesize EXEC_PAGESIZE
768 # else
769 # ifdef NBPG
770 # ifndef CLSIZE
771 # define malloc_getpagesize NBPG
772 # else
773 # define malloc_getpagesize (NBPG * CLSIZE)
774 # endif
775 # else
776 # ifdef NBPC
777 # define malloc_getpagesize NBPC
778 # else
779 # ifdef PAGESIZE
780 # define malloc_getpagesize PAGESIZE
781 # else /* just guess */
782 # define malloc_getpagesize (4096)
783 # endif
784 # endif
785 # endif
786 # endif
787 # endif
788 # endif
789 # endif
790 #endif
793 This version of malloc supports the standard SVID/XPG mallinfo
794 routine that returns a struct containing usage properties and
795 statistics. It should work on any SVID/XPG compliant system that has
796 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
797 install such a thing yourself, cut out the preliminary declarations
798 as described above and below and save them in a malloc.h file. But
799 there's no compelling reason to bother to do this.)
801 The main declaration needed is the mallinfo struct that is returned
802 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
803 bunch of fields that are not even meaningful in this version of
804 malloc. These fields are are instead filled by mallinfo() with
805 other numbers that might be of interest.
807 HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
808 /usr/include/malloc.h file that includes a declaration of struct
809 mallinfo. If so, it is included; else an SVID2/XPG2 compliant
810 version is declared below. These must be precisely the same for
811 mallinfo() to work. The original SVID version of this struct,
812 defined on most systems with mallinfo, declares all fields as
813 ints. But some others define as unsigned long. If your system
814 defines the fields using a type of different width than listed here,
815 you must #include your system version and #define
816 HAVE_USR_INCLUDE_MALLOC_H.
819 /* #define HAVE_USR_INCLUDE_MALLOC_H */
821 #ifdef HAVE_USR_INCLUDE_MALLOC_H
822 #include "/usr/include/malloc.h"
823 #endif
826 /* ---------- description of public routines ------------ */
829 malloc(size_t n)
830 Returns a pointer to a newly allocated chunk of at least n bytes, or null
831 if no space is available. Additionally, on failure, errno is
832 set to ENOMEM on ANSI C systems.
834 If n is zero, malloc returns a minumum-sized chunk. (The minimum
835 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
836 systems.) On most systems, size_t is an unsigned type, so calls
837 with negative arguments are interpreted as requests for huge amounts
838 of space, which will often fail. The maximum supported value of n
839 differs across systems, but is in all cases less than the maximum
840 representable value of a size_t.
842 #if __STD_C
843 Void_t* public_mALLOc(size_t);
844 #else
845 Void_t* public_mALLOc();
846 #endif
847 #ifdef libc_hidden_proto
848 libc_hidden_proto (public_mALLOc)
849 #endif
852 free(Void_t* p)
853 Releases the chunk of memory pointed to by p, that had been previously
854 allocated using malloc or a related routine such as realloc.
855 It has no effect if p is null. It can have arbitrary (i.e., bad!)
856 effects if p has already been freed.
858 Unless disabled (using mallopt), freeing very large spaces will
859 when possible, automatically trigger operations that give
860 back unused memory to the system, thus reducing program footprint.
862 #if __STD_C
863 void public_fREe(Void_t*);
864 #else
865 void public_fREe();
866 #endif
867 #ifdef libc_hidden_proto
868 libc_hidden_proto (public_fREe)
869 #endif
872 calloc(size_t n_elements, size_t element_size);
873 Returns a pointer to n_elements * element_size bytes, with all locations
874 set to zero.
876 #if __STD_C
877 Void_t* public_cALLOc(size_t, size_t);
878 #else
879 Void_t* public_cALLOc();
880 #endif
883 realloc(Void_t* p, size_t n)
884 Returns a pointer to a chunk of size n that contains the same data
885 as does chunk p up to the minimum of (n, p's size) bytes, or null
886 if no space is available.
888 The returned pointer may or may not be the same as p. The algorithm
889 prefers extending p when possible, otherwise it employs the
890 equivalent of a malloc-copy-free sequence.
892 If p is null, realloc is equivalent to malloc.
894 If space is not available, realloc returns null, errno is set (if on
895 ANSI) and p is NOT freed.
897 if n is for fewer bytes than already held by p, the newly unused
898 space is lopped off and freed if possible. Unless the #define
899 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
900 zero (re)allocates a minimum-sized chunk.
902 Large chunks that were internally obtained via mmap will always
903 be reallocated using malloc-copy-free sequences unless
904 the system supports MREMAP (currently only linux).
906 The old unix realloc convention of allowing the last-free'd chunk
907 to be used as an argument to realloc is not supported.
909 #if __STD_C
910 Void_t* public_rEALLOc(Void_t*, size_t);
911 #else
912 Void_t* public_rEALLOc();
913 #endif
914 #ifdef libc_hidden_proto
915 libc_hidden_proto (public_rEALLOc)
916 #endif
919 memalign(size_t alignment, size_t n);
920 Returns a pointer to a newly allocated chunk of n bytes, aligned
921 in accord with the alignment argument.
923 The alignment argument should be a power of two. If the argument is
924 not a power of two, the nearest greater power is used.
925 8-byte alignment is guaranteed by normal malloc calls, so don't
926 bother calling memalign with an argument of 8 or less.
928 Overreliance on memalign is a sure way to fragment space.
930 #if __STD_C
931 Void_t* public_mEMALIGn(size_t, size_t);
932 #else
933 Void_t* public_mEMALIGn();
934 #endif
935 #ifdef libc_hidden_proto
936 libc_hidden_proto (public_mEMALIGn)
937 #endif
940 valloc(size_t n);
941 Equivalent to memalign(pagesize, n), where pagesize is the page
942 size of the system. If the pagesize is unknown, 4096 is used.
944 #if __STD_C
945 Void_t* public_vALLOc(size_t);
946 #else
947 Void_t* public_vALLOc();
948 #endif
953 mallopt(int parameter_number, int parameter_value)
954 Sets tunable parameters The format is to provide a
955 (parameter-number, parameter-value) pair. mallopt then sets the
956 corresponding parameter to the argument value if it can (i.e., so
957 long as the value is meaningful), and returns 1 if successful else
958 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
959 normally defined in malloc.h. Only one of these (M_MXFAST) is used
960 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
961 so setting them has no effect. But this malloc also supports four
962 other options in mallopt. See below for details. Briefly, supported
963 parameters are as follows (listed defaults are for "typical"
964 configurations).
966 Symbol param # default allowed param values
967 M_MXFAST 1 64 0-80 (0 disables fastbins)
968 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
969 M_TOP_PAD -2 0 any
970 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
971 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
973 #if __STD_C
974 int public_mALLOPt(int, int);
975 #else
976 int public_mALLOPt();
977 #endif
981 mallinfo()
982 Returns (by copy) a struct containing various summary statistics:
984 arena: current total non-mmapped bytes allocated from system
985 ordblks: the number of free chunks
986 smblks: the number of fastbin blocks (i.e., small chunks that
987 have been freed but not use resused or consolidated)
988 hblks: current number of mmapped regions
989 hblkhd: total bytes held in mmapped regions
990 usmblks: the maximum total allocated space. This will be greater
991 than current total if trimming has occurred.
992 fsmblks: total bytes held in fastbin blocks
993 uordblks: current total allocated space (normal or mmapped)
994 fordblks: total free space
995 keepcost: the maximum number of bytes that could ideally be released
996 back to system via malloc_trim. ("ideally" means that
997 it ignores page restrictions etc.)
999 Because these fields are ints, but internal bookkeeping may
1000 be kept as longs, the reported values may wrap around zero and
1001 thus be inaccurate.
1003 #if __STD_C
1004 struct mallinfo public_mALLINFo(void);
1005 #else
1006 struct mallinfo public_mALLINFo();
1007 #endif
1009 #ifndef _LIBC
1011 independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]);
1013 independent_calloc is similar to calloc, but instead of returning a
1014 single cleared space, it returns an array of pointers to n_elements
1015 independent elements that can hold contents of size elem_size, each
1016 of which starts out cleared, and can be independently freed,
1017 realloc'ed etc. The elements are guaranteed to be adjacently
1018 allocated (this is not guaranteed to occur with multiple callocs or
1019 mallocs), which may also improve cache locality in some
1020 applications.
1022 The "chunks" argument is optional (i.e., may be null, which is
1023 probably the most typical usage). If it is null, the returned array
1024 is itself dynamically allocated and should also be freed when it is
1025 no longer needed. Otherwise, the chunks array must be of at least
1026 n_elements in length. It is filled in with the pointers to the
1027 chunks.
1029 In either case, independent_calloc returns this pointer array, or
1030 null if the allocation failed. If n_elements is zero and "chunks"
1031 is null, it returns a chunk representing an array with zero elements
1032 (which should be freed if not wanted).
1034 Each element must be individually freed when it is no longer
1035 needed. If you'd like to instead be able to free all at once, you
1036 should instead use regular calloc and assign pointers into this
1037 space to represent elements. (In this case though, you cannot
1038 independently free elements.)
1040 independent_calloc simplifies and speeds up implementations of many
1041 kinds of pools. It may also be useful when constructing large data
1042 structures that initially have a fixed number of fixed-sized nodes,
1043 but the number is not known at compile time, and some of the nodes
1044 may later need to be freed. For example:
1046 struct Node { int item; struct Node* next; };
1048 struct Node* build_list() {
1049 struct Node** pool;
1050 int n = read_number_of_nodes_needed();
1051 if (n <= 0) return 0;
1052 pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
1053 if (pool == 0) die();
1054 // organize into a linked list...
1055 struct Node* first = pool[0];
1056 for (i = 0; i < n-1; ++i)
1057 pool[i]->next = pool[i+1];
1058 free(pool); // Can now free the array (or not, if it is needed later)
1059 return first;
1062 #if __STD_C
1063 Void_t** public_iCALLOc(size_t, size_t, Void_t**);
1064 #else
1065 Void_t** public_iCALLOc();
1066 #endif
1069 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
1071 independent_comalloc allocates, all at once, a set of n_elements
1072 chunks with sizes indicated in the "sizes" array. It returns
1073 an array of pointers to these elements, each of which can be
1074 independently freed, realloc'ed etc. The elements are guaranteed to
1075 be adjacently allocated (this is not guaranteed to occur with
1076 multiple callocs or mallocs), which may also improve cache locality
1077 in some applications.
1079 The "chunks" argument is optional (i.e., may be null). If it is null
1080 the returned array is itself dynamically allocated and should also
1081 be freed when it is no longer needed. Otherwise, the chunks array
1082 must be of at least n_elements in length. It is filled in with the
1083 pointers to the chunks.
1085 In either case, independent_comalloc returns this pointer array, or
1086 null if the allocation failed. If n_elements is zero and chunks is
1087 null, it returns a chunk representing an array with zero elements
1088 (which should be freed if not wanted).
1090 Each element must be individually freed when it is no longer
1091 needed. If you'd like to instead be able to free all at once, you
1092 should instead use a single regular malloc, and assign pointers at
1093 particular offsets in the aggregate space. (In this case though, you
1094 cannot independently free elements.)
1096 independent_comallac differs from independent_calloc in that each
1097 element may have a different size, and also that it does not
1098 automatically clear elements.
1100 independent_comalloc can be used to speed up allocation in cases
1101 where several structs or objects must always be allocated at the
1102 same time. For example:
1104 struct Head { ... }
1105 struct Foot { ... }
1107 void send_message(char* msg) {
1108 int msglen = strlen(msg);
1109 size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
1110 void* chunks[3];
1111 if (independent_comalloc(3, sizes, chunks) == 0)
1112 die();
1113 struct Head* head = (struct Head*)(chunks[0]);
1114 char* body = (char*)(chunks[1]);
1115 struct Foot* foot = (struct Foot*)(chunks[2]);
1116 // ...
1119 In general though, independent_comalloc is worth using only for
1120 larger values of n_elements. For small values, you probably won't
1121 detect enough difference from series of malloc calls to bother.
1123 Overuse of independent_comalloc can increase overall memory usage,
1124 since it cannot reuse existing noncontiguous small chunks that
1125 might be available for some of the elements.
1127 #if __STD_C
1128 Void_t** public_iCOMALLOc(size_t, size_t*, Void_t**);
1129 #else
1130 Void_t** public_iCOMALLOc();
1131 #endif
1133 #endif /* _LIBC */
1137 pvalloc(size_t n);
1138 Equivalent to valloc(minimum-page-that-holds(n)), that is,
1139 round up n to nearest pagesize.
1141 #if __STD_C
1142 Void_t* public_pVALLOc(size_t);
1143 #else
1144 Void_t* public_pVALLOc();
1145 #endif
1148 cfree(Void_t* p);
1149 Equivalent to free(p).
1151 cfree is needed/defined on some systems that pair it with calloc,
1152 for odd historical reasons (such as: cfree is used in example
1153 code in the first edition of K&R).
1155 #if __STD_C
1156 void public_cFREe(Void_t*);
1157 #else
1158 void public_cFREe();
1159 #endif
1162 malloc_trim(size_t pad);
1164 If possible, gives memory back to the system (via negative
1165 arguments to sbrk) if there is unused memory at the `high' end of
1166 the malloc pool. You can call this after freeing large blocks of
1167 memory to potentially reduce the system-level memory requirements
1168 of a program. However, it cannot guarantee to reduce memory. Under
1169 some allocation patterns, some large free blocks of memory will be
1170 locked between two used chunks, so they cannot be given back to
1171 the system.
1173 The `pad' argument to malloc_trim represents the amount of free
1174 trailing space to leave untrimmed. If this argument is zero,
1175 only the minimum amount of memory to maintain internal data
1176 structures will be left (one page or less). Non-zero arguments
1177 can be supplied to maintain enough trailing space to service
1178 future expected allocations without having to re-obtain memory
1179 from the system.
1181 Malloc_trim returns 1 if it actually released any memory, else 0.
1182 On systems that do not support "negative sbrks", it will always
1183 rreturn 0.
1185 #if __STD_C
1186 int public_mTRIm(size_t);
1187 #else
1188 int public_mTRIm();
1189 #endif
1192 malloc_usable_size(Void_t* p);
1194 Returns the number of bytes you can actually use in
1195 an allocated chunk, which may be more than you requested (although
1196 often not) due to alignment and minimum size constraints.
1197 You can use this many bytes without worrying about
1198 overwriting other allocated objects. This is not a particularly great
1199 programming practice. malloc_usable_size can be more useful in
1200 debugging and assertions, for example:
1202 p = malloc(n);
1203 assert(malloc_usable_size(p) >= 256);
1206 #if __STD_C
1207 size_t public_mUSABLe(Void_t*);
1208 #else
1209 size_t public_mUSABLe();
1210 #endif
1213 malloc_stats();
1214 Prints on stderr the amount of space obtained from the system (both
1215 via sbrk and mmap), the maximum amount (which may be more than
1216 current if malloc_trim and/or munmap got called), and the current
1217 number of bytes allocated via malloc (or realloc, etc) but not yet
1218 freed. Note that this is the number of bytes allocated, not the
1219 number requested. It will be larger than the number requested
1220 because of alignment and bookkeeping overhead. Because it includes
1221 alignment wastage as being in use, this figure may be greater than
1222 zero even when no user-level chunks are allocated.
1224 The reported current and maximum system memory can be inaccurate if
1225 a program makes other calls to system memory allocation functions
1226 (normally sbrk) outside of malloc.
1228 malloc_stats prints only the most commonly interesting statistics.
1229 More information can be obtained by calling mallinfo.
1232 #if __STD_C
1233 void public_mSTATs(void);
1234 #else
1235 void public_mSTATs();
1236 #endif
1239 malloc_get_state(void);
1241 Returns the state of all malloc variables in an opaque data
1242 structure.
1244 #if __STD_C
1245 Void_t* public_gET_STATe(void);
1246 #else
1247 Void_t* public_gET_STATe();
1248 #endif
1251 malloc_set_state(Void_t* state);
1253 Restore the state of all malloc variables from data obtained with
1254 malloc_get_state().
1256 #if __STD_C
1257 int public_sET_STATe(Void_t*);
1258 #else
1259 int public_sET_STATe();
1260 #endif
1262 #ifdef _LIBC
1264 posix_memalign(void **memptr, size_t alignment, size_t size);
1266 POSIX wrapper like memalign(), checking for validity of size.
1268 int __posix_memalign(void **, size_t, size_t);
1269 #endif
1271 /* mallopt tuning options */
1274 M_MXFAST is the maximum request size used for "fastbins", special bins
1275 that hold returned chunks without consolidating their spaces. This
1276 enables future requests for chunks of the same size to be handled
1277 very quickly, but can increase fragmentation, and thus increase the
1278 overall memory footprint of a program.
1280 This malloc manages fastbins very conservatively yet still
1281 efficiently, so fragmentation is rarely a problem for values less
1282 than or equal to the default. The maximum supported value of MXFAST
1283 is 80. You wouldn't want it any higher than this anyway. Fastbins
1284 are designed especially for use with many small structs, objects or
1285 strings -- the default handles structs/objects/arrays with sizes up
1286 to 8 4byte fields, or small strings representing words, tokens,
1287 etc. Using fastbins for larger objects normally worsens
1288 fragmentation without improving speed.
1290 M_MXFAST is set in REQUEST size units. It is internally used in
1291 chunksize units, which adds padding and alignment. You can reduce
1292 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
1293 algorithm to be a closer approximation of fifo-best-fit in all cases,
1294 not just for larger requests, but will generally cause it to be
1295 slower.
1299 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
1300 #ifndef M_MXFAST
1301 #define M_MXFAST 1
1302 #endif
1304 #ifndef DEFAULT_MXFAST
1305 #define DEFAULT_MXFAST 64
1306 #endif
1310 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
1311 to keep before releasing via malloc_trim in free().
1313 Automatic trimming is mainly useful in long-lived programs.
1314 Because trimming via sbrk can be slow on some systems, and can
1315 sometimes be wasteful (in cases where programs immediately
1316 afterward allocate more large chunks) the value should be high
1317 enough so that your overall system performance would improve by
1318 releasing this much memory.
1320 The trim threshold and the mmap control parameters (see below)
1321 can be traded off with one another. Trimming and mmapping are
1322 two different ways of releasing unused memory back to the
1323 system. Between these two, it is often possible to keep
1324 system-level demands of a long-lived program down to a bare
1325 minimum. For example, in one test suite of sessions measuring
1326 the XF86 X server on Linux, using a trim threshold of 128K and a
1327 mmap threshold of 192K led to near-minimal long term resource
1328 consumption.
1330 If you are using this malloc in a long-lived program, it should
1331 pay to experiment with these values. As a rough guide, you
1332 might set to a value close to the average size of a process
1333 (program) running on your system. Releasing this much memory
1334 would allow such a process to run in memory. Generally, it's
1335 worth it to tune for trimming rather tham memory mapping when a
1336 program undergoes phases where several large chunks are
1337 allocated and released in ways that can reuse each other's
1338 storage, perhaps mixed with phases where there are no such
1339 chunks at all. And in well-behaved long-lived programs,
1340 controlling release of large blocks via trimming versus mapping
1341 is usually faster.
1343 However, in most programs, these parameters serve mainly as
1344 protection against the system-level effects of carrying around
1345 massive amounts of unneeded memory. Since frequent calls to
1346 sbrk, mmap, and munmap otherwise degrade performance, the default
1347 parameters are set to relatively high values that serve only as
1348 safeguards.
1350 The trim value It must be greater than page size to have any useful
1351 effect. To disable trimming completely, you can set to
1352 (unsigned long)(-1)
1354 Trim settings interact with fastbin (MXFAST) settings: Unless
1355 TRIM_FASTBINS is defined, automatic trimming never takes place upon
1356 freeing a chunk with size less than or equal to MXFAST. Trimming is
1357 instead delayed until subsequent freeing of larger chunks. However,
1358 you can still force an attempted trim by calling malloc_trim.
1360 Also, trimming is not generally possible in cases where
1361 the main arena is obtained via mmap.
1363 Note that the trick some people use of mallocing a huge space and
1364 then freeing it at program startup, in an attempt to reserve system
1365 memory, doesn't have the intended effect under automatic trimming,
1366 since that memory will immediately be returned to the system.
1369 #define M_TRIM_THRESHOLD -1
1371 #ifndef DEFAULT_TRIM_THRESHOLD
1372 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
1373 #endif
1376 M_TOP_PAD is the amount of extra `padding' space to allocate or
1377 retain whenever sbrk is called. It is used in two ways internally:
1379 * When sbrk is called to extend the top of the arena to satisfy
1380 a new malloc request, this much padding is added to the sbrk
1381 request.
1383 * When malloc_trim is called automatically from free(),
1384 it is used as the `pad' argument.
1386 In both cases, the actual amount of padding is rounded
1387 so that the end of the arena is always a system page boundary.
1389 The main reason for using padding is to avoid calling sbrk so
1390 often. Having even a small pad greatly reduces the likelihood
1391 that nearly every malloc request during program start-up (or
1392 after trimming) will invoke sbrk, which needlessly wastes
1393 time.
1395 Automatic rounding-up to page-size units is normally sufficient
1396 to avoid measurable overhead, so the default is 0. However, in
1397 systems where sbrk is relatively slow, it can pay to increase
1398 this value, at the expense of carrying around more memory than
1399 the program needs.
1402 #define M_TOP_PAD -2
1404 #ifndef DEFAULT_TOP_PAD
1405 #define DEFAULT_TOP_PAD (0)
1406 #endif
1409 M_MMAP_THRESHOLD is the request size threshold for using mmap()
1410 to service a request. Requests of at least this size that cannot
1411 be allocated using already-existing space will be serviced via mmap.
1412 (If enough normal freed space already exists it is used instead.)
1414 Using mmap segregates relatively large chunks of memory so that
1415 they can be individually obtained and released from the host
1416 system. A request serviced through mmap is never reused by any
1417 other request (at least not directly; the system may just so
1418 happen to remap successive requests to the same locations).
1420 Segregating space in this way has the benefits that:
1422 1. Mmapped space can ALWAYS be individually released back
1423 to the system, which helps keep the system level memory
1424 demands of a long-lived program low.
1425 2. Mapped memory can never become `locked' between
1426 other chunks, as can happen with normally allocated chunks, which
1427 means that even trimming via malloc_trim would not release them.
1428 3. On some systems with "holes" in address spaces, mmap can obtain
1429 memory that sbrk cannot.
1431 However, it has the disadvantages that:
1433 1. The space cannot be reclaimed, consolidated, and then
1434 used to service later requests, as happens with normal chunks.
1435 2. It can lead to more wastage because of mmap page alignment
1436 requirements
1437 3. It causes malloc performance to be more dependent on host
1438 system memory management support routines which may vary in
1439 implementation quality and may impose arbitrary
1440 limitations. Generally, servicing a request via normal
1441 malloc steps is faster than going through a system's mmap.
1443 The advantages of mmap nearly always outweigh disadvantages for
1444 "large" chunks, but the value of "large" varies across systems. The
1445 default is an empirically derived value that works well in most
1446 systems.
1449 #define M_MMAP_THRESHOLD -3
1451 #ifndef DEFAULT_MMAP_THRESHOLD
1452 #define DEFAULT_MMAP_THRESHOLD (128 * 1024)
1453 #endif
1456 M_MMAP_MAX is the maximum number of requests to simultaneously
1457 service using mmap. This parameter exists because
1458 some systems have a limited number of internal tables for
1459 use by mmap, and using more than a few of them may degrade
1460 performance.
1462 The default is set to a value that serves only as a safeguard.
1463 Setting to 0 disables use of mmap for servicing large requests. If
1464 HAVE_MMAP is not set, the default value is 0, and attempts to set it
1465 to non-zero values in mallopt will fail.
1468 #define M_MMAP_MAX -4
1470 #ifndef DEFAULT_MMAP_MAX
1471 #if HAVE_MMAP
1472 #define DEFAULT_MMAP_MAX (65536)
1473 #else
1474 #define DEFAULT_MMAP_MAX (0)
1475 #endif
1476 #endif
1478 #ifdef __cplusplus
1479 } /* end of extern "C" */
1480 #endif
1482 #include <malloc.h>
1484 #ifndef BOUNDED_N
1485 #define BOUNDED_N(ptr, sz) (ptr)
1486 #endif
1487 #ifndef RETURN_ADDRESS
1488 #define RETURN_ADDRESS(X_) (NULL)
1489 #endif
1491 /* On some platforms we can compile internal, not exported functions better.
1492 Let the environment provide a macro and define it to be empty if it
1493 is not available. */
1494 #ifndef internal_function
1495 # define internal_function
1496 #endif
1498 /* Forward declarations. */
1499 struct malloc_chunk;
1500 typedef struct malloc_chunk* mchunkptr;
1502 /* Internal routines. */
1504 #if __STD_C
1506 Void_t* _int_malloc(mstate, size_t);
1507 void _int_free(mstate, Void_t*);
1508 Void_t* _int_realloc(mstate, Void_t*, size_t);
1509 Void_t* _int_memalign(mstate, size_t, size_t);
1510 Void_t* _int_valloc(mstate, size_t);
1511 static Void_t* _int_pvalloc(mstate, size_t);
1512 /*static Void_t* cALLOc(size_t, size_t);*/
1513 #ifndef _LIBC
1514 static Void_t** _int_icalloc(mstate, size_t, size_t, Void_t**);
1515 static Void_t** _int_icomalloc(mstate, size_t, size_t*, Void_t**);
1516 #endif
1517 static int mTRIm(size_t);
1518 static size_t mUSABLe(Void_t*);
1519 static void mSTATs(void);
1520 static int mALLOPt(int, int);
1521 static struct mallinfo mALLINFo(mstate);
1522 static void malloc_printerr(int action, const char *str, void *ptr);
1524 static Void_t* internal_function mem2mem_check(Void_t *p, size_t sz);
1525 static int internal_function top_check(void);
1526 static void internal_function munmap_chunk(mchunkptr p);
1527 #if HAVE_MREMAP
1528 static mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size);
1529 #endif
1531 static Void_t* malloc_check(size_t sz, const Void_t *caller);
1532 static void free_check(Void_t* mem, const Void_t *caller);
1533 static Void_t* realloc_check(Void_t* oldmem, size_t bytes,
1534 const Void_t *caller);
1535 static Void_t* memalign_check(size_t alignment, size_t bytes,
1536 const Void_t *caller);
1537 #ifndef NO_THREADS
1538 # ifdef _LIBC
1539 # if USE___THREAD || (defined USE_TLS && !defined SHARED)
1540 /* These routines are never needed in this configuration. */
1541 # define NO_STARTER
1542 # endif
1543 # endif
1544 # ifdef NO_STARTER
1545 # undef NO_STARTER
1546 # else
1547 static Void_t* malloc_starter(size_t sz, const Void_t *caller);
1548 static Void_t* memalign_starter(size_t aln, size_t sz, const Void_t *caller);
1549 static void free_starter(Void_t* mem, const Void_t *caller);
1550 # endif
1551 static Void_t* malloc_atfork(size_t sz, const Void_t *caller);
1552 static void free_atfork(Void_t* mem, const Void_t *caller);
1553 #endif
1555 #else
1557 Void_t* _int_malloc();
1558 void _int_free();
1559 Void_t* _int_realloc();
1560 Void_t* _int_memalign();
1561 Void_t* _int_valloc();
1562 Void_t* _int_pvalloc();
1563 /*static Void_t* cALLOc();*/
1564 static Void_t** _int_icalloc();
1565 static Void_t** _int_icomalloc();
1566 static int mTRIm();
1567 static size_t mUSABLe();
1568 static void mSTATs();
1569 static int mALLOPt();
1570 static struct mallinfo mALLINFo();
1572 #endif
1577 /* ------------- Optional versions of memcopy ---------------- */
1580 #if USE_MEMCPY
1583 Note: memcpy is ONLY invoked with non-overlapping regions,
1584 so the (usually slower) memmove is not needed.
1587 #define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
1588 #define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
1590 #else /* !USE_MEMCPY */
1592 /* Use Duff's device for good zeroing/copying performance. */
1594 #define MALLOC_ZERO(charp, nbytes) \
1595 do { \
1596 INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
1597 unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1598 long mcn; \
1599 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1600 switch (mctmp) { \
1601 case 0: for(;;) { *mzp++ = 0; \
1602 case 7: *mzp++ = 0; \
1603 case 6: *mzp++ = 0; \
1604 case 5: *mzp++ = 0; \
1605 case 4: *mzp++ = 0; \
1606 case 3: *mzp++ = 0; \
1607 case 2: *mzp++ = 0; \
1608 case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
1610 } while(0)
1612 #define MALLOC_COPY(dest,src,nbytes) \
1613 do { \
1614 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
1615 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
1616 unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1617 long mcn; \
1618 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1619 switch (mctmp) { \
1620 case 0: for(;;) { *mcdst++ = *mcsrc++; \
1621 case 7: *mcdst++ = *mcsrc++; \
1622 case 6: *mcdst++ = *mcsrc++; \
1623 case 5: *mcdst++ = *mcsrc++; \
1624 case 4: *mcdst++ = *mcsrc++; \
1625 case 3: *mcdst++ = *mcsrc++; \
1626 case 2: *mcdst++ = *mcsrc++; \
1627 case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
1629 } while(0)
1631 #endif
1633 /* ------------------ MMAP support ------------------ */
1636 #if HAVE_MMAP
1638 #include <fcntl.h>
1639 #ifndef LACKS_SYS_MMAN_H
1640 #include <sys/mman.h>
1641 #endif
1643 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1644 # define MAP_ANONYMOUS MAP_ANON
1645 #endif
1646 #if !defined(MAP_FAILED)
1647 # define MAP_FAILED ((char*)-1)
1648 #endif
1650 #ifndef MAP_NORESERVE
1651 # ifdef MAP_AUTORESRV
1652 # define MAP_NORESERVE MAP_AUTORESRV
1653 # else
1654 # define MAP_NORESERVE 0
1655 # endif
1656 #endif
1659 Nearly all versions of mmap support MAP_ANONYMOUS,
1660 so the following is unlikely to be needed, but is
1661 supplied just in case.
1664 #ifndef MAP_ANONYMOUS
1666 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
1668 #define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
1669 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1670 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
1671 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
1673 #else
1675 #define MMAP(addr, size, prot, flags) \
1676 (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1678 #endif
1681 #endif /* HAVE_MMAP */
1685 ----------------------- Chunk representations -----------------------
1690 This struct declaration is misleading (but accurate and necessary).
1691 It declares a "view" into memory allowing access to necessary
1692 fields at known offsets from a given base. See explanation below.
1695 struct malloc_chunk {
1697 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
1698 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
1700 struct malloc_chunk* fd; /* double links -- used only if free. */
1701 struct malloc_chunk* bk;
1706 malloc_chunk details:
1708 (The following includes lightly edited explanations by Colin Plumb.)
1710 Chunks of memory are maintained using a `boundary tag' method as
1711 described in e.g., Knuth or Standish. (See the paper by Paul
1712 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1713 survey of such techniques.) Sizes of free chunks are stored both
1714 in the front of each chunk and at the end. This makes
1715 consolidating fragmented chunks into bigger chunks very fast. The
1716 size fields also hold bits representing whether chunks are free or
1717 in use.
1719 An allocated chunk looks like this:
1722 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1723 | Size of previous chunk, if allocated | |
1724 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1725 | Size of chunk, in bytes |M|P|
1726 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1727 | User data starts here... .
1729 . (malloc_usable_space() bytes) .
1731 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1732 | Size of chunk |
1733 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1736 Where "chunk" is the front of the chunk for the purpose of most of
1737 the malloc code, but "mem" is the pointer that is returned to the
1738 user. "Nextchunk" is the beginning of the next contiguous chunk.
1740 Chunks always begin on even word boundries, so the mem portion
1741 (which is returned to the user) is also on an even word boundary, and
1742 thus at least double-word aligned.
1744 Free chunks are stored in circular doubly-linked lists, and look like this:
1746 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1747 | Size of previous chunk |
1748 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1749 `head:' | Size of chunk, in bytes |P|
1750 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1751 | Forward pointer to next chunk in list |
1752 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1753 | Back pointer to previous chunk in list |
1754 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1755 | Unused space (may be 0 bytes long) .
1758 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1759 `foot:' | Size of chunk, in bytes |
1760 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1762 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1763 chunk size (which is always a multiple of two words), is an in-use
1764 bit for the *previous* chunk. If that bit is *clear*, then the
1765 word before the current chunk size contains the previous chunk
1766 size, and can be used to find the front of the previous chunk.
1767 The very first chunk allocated always has this bit set,
1768 preventing access to non-existent (or non-owned) memory. If
1769 prev_inuse is set for any given chunk, then you CANNOT determine
1770 the size of the previous chunk, and might even get a memory
1771 addressing fault when trying to do so.
1773 Note that the `foot' of the current chunk is actually represented
1774 as the prev_size of the NEXT chunk. This makes it easier to
1775 deal with alignments etc but can be very confusing when trying
1776 to extend or adapt this code.
1778 The two exceptions to all this are
1780 1. The special chunk `top' doesn't bother using the
1781 trailing size field since there is no next contiguous chunk
1782 that would have to index off it. After initialization, `top'
1783 is forced to always exist. If it would become less than
1784 MINSIZE bytes long, it is replenished.
1786 2. Chunks allocated via mmap, which have the second-lowest-order
1787 bit M (IS_MMAPPED) set in their size fields. Because they are
1788 allocated one-by-one, each must contain its own trailing size field.
1793 ---------- Size and alignment checks and conversions ----------
1796 /* conversion from malloc headers to user pointers, and back */
1798 #define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
1799 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
1801 /* The smallest possible chunk */
1802 #define MIN_CHUNK_SIZE (sizeof(struct malloc_chunk))
1804 /* The smallest size we can malloc is an aligned minimal chunk */
1806 #define MINSIZE \
1807 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1809 /* Check if m has acceptable alignment */
1811 #define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
1815 Check if a request is so large that it would wrap around zero when
1816 padded and aligned. To simplify some other code, the bound is made
1817 low enough so that adding MINSIZE will also not wrap around zero.
1820 #define REQUEST_OUT_OF_RANGE(req) \
1821 ((unsigned long)(req) >= \
1822 (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))
1824 /* pad request bytes into a usable size -- internal version */
1826 #define request2size(req) \
1827 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1828 MINSIZE : \
1829 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1831 /* Same, except also perform argument check */
1833 #define checked_request2size(req, sz) \
1834 if (REQUEST_OUT_OF_RANGE(req)) { \
1835 MALLOC_FAILURE_ACTION; \
1836 return 0; \
1838 (sz) = request2size(req);
1841 --------------- Physical chunk operations ---------------
1845 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1846 #define PREV_INUSE 0x1
1848 /* extract inuse bit of previous chunk */
1849 #define prev_inuse(p) ((p)->size & PREV_INUSE)
1852 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1853 #define IS_MMAPPED 0x2
1855 /* check for mmap()'ed chunk */
1856 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1859 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1860 from a non-main arena. This is only set immediately before handing
1861 the chunk to the user, if necessary. */
1862 #define NON_MAIN_ARENA 0x4
1864 /* check for chunk from non-main arena */
1865 #define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
1869 Bits to mask off when extracting size
1871 Note: IS_MMAPPED is intentionally not masked off from size field in
1872 macros for which mmapped chunks should never be seen. This should
1873 cause helpful core dumps to occur if it is tried by accident by
1874 people extending or adapting this malloc.
1876 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA)
1878 /* Get size, ignoring use bits */
1879 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
1882 /* Ptr to next physical malloc_chunk. */
1883 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) ))
1885 /* Ptr to previous physical malloc_chunk */
1886 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
1888 /* Treat space at ptr + offset as a chunk */
1889 #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
1891 /* extract p's inuse bit */
1892 #define inuse(p)\
1893 ((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
1895 /* set/clear chunk as being inuse without otherwise disturbing */
1896 #define set_inuse(p)\
1897 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
1899 #define clear_inuse(p)\
1900 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
1903 /* check/set/clear inuse bits in known places */
1904 #define inuse_bit_at_offset(p, s)\
1905 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
1907 #define set_inuse_bit_at_offset(p, s)\
1908 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
1910 #define clear_inuse_bit_at_offset(p, s)\
1911 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
1914 /* Set size at head, without disturbing its use bit */
1915 #define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_BITS) | (s)))
1917 /* Set size/use field */
1918 #define set_head(p, s) ((p)->size = (s))
1920 /* Set size at footer (only when chunk is not in use) */
1921 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
1925 -------------------- Internal data structures --------------------
1927 All internal state is held in an instance of malloc_state defined
1928 below. There are no other static variables, except in two optional
1929 cases:
1930 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1931 * If HAVE_MMAP is true, but mmap doesn't support
1932 MAP_ANONYMOUS, a dummy file descriptor for mmap.
1934 Beware of lots of tricks that minimize the total bookkeeping space
1935 requirements. The result is a little over 1K bytes (for 4byte
1936 pointers and size_t.)
1940 Bins
1942 An array of bin headers for free chunks. Each bin is doubly
1943 linked. The bins are approximately proportionally (log) spaced.
1944 There are a lot of these bins (128). This may look excessive, but
1945 works very well in practice. Most bins hold sizes that are
1946 unusual as malloc request sizes, but are more usual for fragments
1947 and consolidated sets of chunks, which is what these bins hold, so
1948 they can be found quickly. All procedures maintain the invariant
1949 that no consolidated chunk physically borders another one, so each
1950 chunk in a list is known to be preceeded and followed by either
1951 inuse chunks or the ends of memory.
1953 Chunks in bins are kept in size order, with ties going to the
1954 approximately least recently used chunk. Ordering isn't needed
1955 for the small bins, which all contain the same-sized chunks, but
1956 facilitates best-fit allocation for larger chunks. These lists
1957 are just sequential. Keeping them in order almost never requires
1958 enough traversal to warrant using fancier ordered data
1959 structures.
1961 Chunks of the same size are linked with the most
1962 recently freed at the front, and allocations are taken from the
1963 back. This results in LRU (FIFO) allocation order, which tends
1964 to give each chunk an equal opportunity to be consolidated with
1965 adjacent freed chunks, resulting in larger free chunks and less
1966 fragmentation.
1968 To simplify use in double-linked lists, each bin header acts
1969 as a malloc_chunk. This avoids special-casing for headers.
1970 But to conserve space and improve locality, we allocate
1971 only the fd/bk pointers of bins, and then use repositioning tricks
1972 to treat these as the fields of a malloc_chunk*.
1975 typedef struct malloc_chunk* mbinptr;
1977 /* addressing -- note that bin_at(0) does not exist */
1978 #define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - (SIZE_SZ<<1)))
1980 /* analog of ++bin */
1981 #define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
1983 /* Reminders about list directionality within bins */
1984 #define first(b) ((b)->fd)
1985 #define last(b) ((b)->bk)
1987 /* Take a chunk off a bin list */
1988 #define unlink(P, BK, FD) { \
1989 FD = P->fd; \
1990 BK = P->bk; \
1991 if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \
1992 malloc_printerr (check_action, "corrupted double-linked list", P); \
1993 else { \
1994 FD->bk = BK; \
1995 BK->fd = FD; \
2000 Indexing
2002 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
2003 8 bytes apart. Larger bins are approximately logarithmically spaced:
2005 64 bins of size 8
2006 32 bins of size 64
2007 16 bins of size 512
2008 8 bins of size 4096
2009 4 bins of size 32768
2010 2 bins of size 262144
2011 1 bin of size what's left
2013 There is actually a little bit of slop in the numbers in bin_index
2014 for the sake of speed. This makes no difference elsewhere.
2016 The bins top out around 1MB because we expect to service large
2017 requests via mmap.
2020 #define NBINS 128
2021 #define NSMALLBINS 64
2022 #define SMALLBIN_WIDTH 8
2023 #define MIN_LARGE_SIZE 512
2025 #define in_smallbin_range(sz) \
2026 ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
2028 #define smallbin_index(sz) (((unsigned)(sz)) >> 3)
2030 #define largebin_index(sz) \
2031 (((((unsigned long)(sz)) >> 6) <= 32)? 56 + (((unsigned long)(sz)) >> 6): \
2032 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
2033 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
2034 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
2035 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
2036 126)
2038 #define bin_index(sz) \
2039 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
2043 Unsorted chunks
2045 All remainders from chunk splits, as well as all returned chunks,
2046 are first placed in the "unsorted" bin. They are then placed
2047 in regular bins after malloc gives them ONE chance to be used before
2048 binning. So, basically, the unsorted_chunks list acts as a queue,
2049 with chunks being placed on it in free (and malloc_consolidate),
2050 and taken off (to be either used or placed in bins) in malloc.
2052 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
2053 does not have to be taken into account in size comparisons.
2056 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
2057 #define unsorted_chunks(M) (bin_at(M, 1))
2062 The top-most available chunk (i.e., the one bordering the end of
2063 available memory) is treated specially. It is never included in
2064 any bin, is used only if no other chunk is available, and is
2065 released back to the system if it is very large (see
2066 M_TRIM_THRESHOLD). Because top initially
2067 points to its own bin with initial zero size, thus forcing
2068 extension on the first malloc request, we avoid having any special
2069 code in malloc to check whether it even exists yet. But we still
2070 need to do so when getting memory from system, so we make
2071 initial_top treat the bin as a legal but unusable chunk during the
2072 interval between initialization and the first call to
2073 sYSMALLOc. (This is somewhat delicate, since it relies on
2074 the 2 preceding words to be zero during this interval as well.)
2077 /* Conveniently, the unsorted bin can be used as dummy top on first call */
2078 #define initial_top(M) (unsorted_chunks(M))
2081 Binmap
2083 To help compensate for the large number of bins, a one-level index
2084 structure is used for bin-by-bin searching. `binmap' is a
2085 bitvector recording whether bins are definitely empty so they can
2086 be skipped over during during traversals. The bits are NOT always
2087 cleared as soon as bins are empty, but instead only
2088 when they are noticed to be empty during traversal in malloc.
2091 /* Conservatively use 32 bits per map word, even if on 64bit system */
2092 #define BINMAPSHIFT 5
2093 #define BITSPERMAP (1U << BINMAPSHIFT)
2094 #define BINMAPSIZE (NBINS / BITSPERMAP)
2096 #define idx2block(i) ((i) >> BINMAPSHIFT)
2097 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
2099 #define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
2100 #define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
2101 #define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
2104 Fastbins
2106 An array of lists holding recently freed small chunks. Fastbins
2107 are not doubly linked. It is faster to single-link them, and
2108 since chunks are never removed from the middles of these lists,
2109 double linking is not necessary. Also, unlike regular bins, they
2110 are not even processed in FIFO order (they use faster LIFO) since
2111 ordering doesn't much matter in the transient contexts in which
2112 fastbins are normally used.
2114 Chunks in fastbins keep their inuse bit set, so they cannot
2115 be consolidated with other free chunks. malloc_consolidate
2116 releases all chunks in fastbins and consolidates them with
2117 other free chunks.
2120 typedef struct malloc_chunk* mfastbinptr;
2122 /* offset 2 to use otherwise unindexable first 2 bins */
2123 #define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
2125 /* The maximum fastbin request size we support */
2126 #define MAX_FAST_SIZE 80
2128 #define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
2131 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
2132 that triggers automatic consolidation of possibly-surrounding
2133 fastbin chunks. This is a heuristic, so the exact value should not
2134 matter too much. It is defined at half the default trim threshold as a
2135 compromise heuristic to only attempt consolidation if it is likely
2136 to lead to trimming. However, it is not dynamically tunable, since
2137 consolidation reduces fragmentation surrounding large chunks even
2138 if trimming is not used.
2141 #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
2144 Since the lowest 2 bits in max_fast don't matter in size comparisons,
2145 they are used as flags.
2149 FASTCHUNKS_BIT held in max_fast indicates that there are probably
2150 some fastbin chunks. It is set true on entering a chunk into any
2151 fastbin, and cleared only in malloc_consolidate.
2153 The truth value is inverted so that have_fastchunks will be true
2154 upon startup (since statics are zero-filled), simplifying
2155 initialization checks.
2158 #define FASTCHUNKS_BIT (1U)
2160 #define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT) == 0)
2161 #define clear_fastchunks(M) ((M)->max_fast |= FASTCHUNKS_BIT)
2162 #define set_fastchunks(M) ((M)->max_fast &= ~FASTCHUNKS_BIT)
2165 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
2166 regions. Otherwise, contiguity is exploited in merging together,
2167 when possible, results from consecutive MORECORE calls.
2169 The initial value comes from MORECORE_CONTIGUOUS, but is
2170 changed dynamically if mmap is ever used as an sbrk substitute.
2173 #define NONCONTIGUOUS_BIT (2U)
2175 #define contiguous(M) (((M)->max_fast & NONCONTIGUOUS_BIT) == 0)
2176 #define noncontiguous(M) (((M)->max_fast & NONCONTIGUOUS_BIT) != 0)
2177 #define set_noncontiguous(M) ((M)->max_fast |= NONCONTIGUOUS_BIT)
2178 #define set_contiguous(M) ((M)->max_fast &= ~NONCONTIGUOUS_BIT)
2181 Set value of max_fast.
2182 Use impossibly small value if 0.
2183 Precondition: there are no existing fastbin chunks.
2184 Setting the value clears fastchunk bit but preserves noncontiguous bit.
2187 #define set_max_fast(M, s) \
2188 (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
2189 FASTCHUNKS_BIT | \
2190 ((M)->max_fast & NONCONTIGUOUS_BIT)
2194 ----------- Internal state representation and initialization -----------
2197 struct malloc_state {
2198 /* Serialize access. */
2199 mutex_t mutex;
2200 // Should we have padding to move the mutex to its own cache line?
2202 #if THREAD_STATS
2203 /* Statistics for locking. Only used if THREAD_STATS is defined. */
2204 long stat_lock_direct, stat_lock_loop, stat_lock_wait;
2205 #endif
2207 /* The maximum chunk size to be eligible for fastbin */
2208 INTERNAL_SIZE_T max_fast; /* low 2 bits used as flags */
2210 /* Fastbins */
2211 mfastbinptr fastbins[NFASTBINS];
2213 /* Base of the topmost chunk -- not otherwise kept in a bin */
2214 mchunkptr top;
2216 /* The remainder from the most recent split of a small request */
2217 mchunkptr last_remainder;
2219 /* Normal bins packed as described above */
2220 mchunkptr bins[NBINS * 2];
2222 /* Bitmap of bins */
2223 unsigned int binmap[BINMAPSIZE];
2225 /* Linked list */
2226 struct malloc_state *next;
2228 /* Memory allocated from the system in this arena. */
2229 INTERNAL_SIZE_T system_mem;
2230 INTERNAL_SIZE_T max_system_mem;
2233 struct malloc_par {
2234 /* Tunable parameters */
2235 unsigned long trim_threshold;
2236 INTERNAL_SIZE_T top_pad;
2237 INTERNAL_SIZE_T mmap_threshold;
2239 /* Memory map support */
2240 int n_mmaps;
2241 int n_mmaps_max;
2242 int max_n_mmaps;
2244 /* Cache malloc_getpagesize */
2245 unsigned int pagesize;
2247 /* Statistics */
2248 INTERNAL_SIZE_T mmapped_mem;
2249 /*INTERNAL_SIZE_T sbrked_mem;*/
2250 /*INTERNAL_SIZE_T max_sbrked_mem;*/
2251 INTERNAL_SIZE_T max_mmapped_mem;
2252 INTERNAL_SIZE_T max_total_mem; /* only kept for NO_THREADS */
2254 /* First address handed out by MORECORE/sbrk. */
2255 char* sbrk_base;
2258 /* There are several instances of this struct ("arenas") in this
2259 malloc. If you are adapting this malloc in a way that does NOT use
2260 a static or mmapped malloc_state, you MUST explicitly zero-fill it
2261 before using. This malloc relies on the property that malloc_state
2262 is initialized to all zeroes (as is true of C statics). */
2264 static struct malloc_state main_arena;
2266 /* There is only one instance of the malloc parameters. */
2268 static struct malloc_par mp_;
2271 Initialize a malloc_state struct.
2273 This is called only from within malloc_consolidate, which needs
2274 be called in the same contexts anyway. It is never called directly
2275 outside of malloc_consolidate because some optimizing compilers try
2276 to inline it at all call points, which turns out not to be an
2277 optimization at all. (Inlining it in malloc_consolidate is fine though.)
2280 #if __STD_C
2281 static void malloc_init_state(mstate av)
2282 #else
2283 static void malloc_init_state(av) mstate av;
2284 #endif
2286 int i;
2287 mbinptr bin;
2289 /* Establish circular links for normal bins */
2290 for (i = 1; i < NBINS; ++i) {
2291 bin = bin_at(av,i);
2292 bin->fd = bin->bk = bin;
2295 #if MORECORE_CONTIGUOUS
2296 if (av != &main_arena)
2297 #endif
2298 set_noncontiguous(av);
2300 set_max_fast(av, DEFAULT_MXFAST);
2302 av->top = initial_top(av);
2306 Other internal utilities operating on mstates
2309 #if __STD_C
2310 static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate);
2311 static int sYSTRIm(size_t, mstate);
2312 static void malloc_consolidate(mstate);
2313 #ifndef _LIBC
2314 static Void_t** iALLOc(mstate, size_t, size_t*, int, Void_t**);
2315 #endif
2316 #else
2317 static Void_t* sYSMALLOc();
2318 static int sYSTRIm();
2319 static void malloc_consolidate();
2320 static Void_t** iALLOc();
2321 #endif
2324 /* -------------- Early definitions for debugging hooks ---------------- */
2326 /* Define and initialize the hook variables. These weak definitions must
2327 appear before any use of the variables in a function (arena.c uses one). */
2328 #ifndef weak_variable
2329 #ifndef _LIBC
2330 #define weak_variable /**/
2331 #else
2332 /* In GNU libc we want the hook variables to be weak definitions to
2333 avoid a problem with Emacs. */
2334 #define weak_variable weak_function
2335 #endif
2336 #endif
2338 /* Forward declarations. */
2339 static Void_t* malloc_hook_ini __MALLOC_P ((size_t sz,
2340 const __malloc_ptr_t caller));
2341 static Void_t* realloc_hook_ini __MALLOC_P ((Void_t* ptr, size_t sz,
2342 const __malloc_ptr_t caller));
2343 static Void_t* memalign_hook_ini __MALLOC_P ((size_t alignment, size_t sz,
2344 const __malloc_ptr_t caller));
2346 void weak_variable (*__malloc_initialize_hook) (void) = NULL;
2347 void weak_variable (*__free_hook) (__malloc_ptr_t __ptr,
2348 const __malloc_ptr_t) = NULL;
2349 __malloc_ptr_t weak_variable (*__malloc_hook)
2350 (size_t __size, const __malloc_ptr_t) = malloc_hook_ini;
2351 __malloc_ptr_t weak_variable (*__realloc_hook)
2352 (__malloc_ptr_t __ptr, size_t __size, const __malloc_ptr_t)
2353 = realloc_hook_ini;
2354 __malloc_ptr_t weak_variable (*__memalign_hook)
2355 (size_t __alignment, size_t __size, const __malloc_ptr_t)
2356 = memalign_hook_ini;
2357 void weak_variable (*__after_morecore_hook) (void) = NULL;
2360 /* ---------------- Error behavior ------------------------------------ */
2362 #ifndef DEFAULT_CHECK_ACTION
2363 #define DEFAULT_CHECK_ACTION 3
2364 #endif
2366 static int check_action = DEFAULT_CHECK_ACTION;
2369 /* ------------------ Testing support ----------------------------------*/
2371 static int perturb_byte;
2373 #define alloc_perturb(p, n) memset (p, (perturb_byte ^ 0xff) & 0xff, n)
2374 #define free_perturb(p, n) memset (p, perturb_byte & 0xff, n)
2377 /* ------------------- Support for multiple arenas -------------------- */
2378 #include "arena.c"
2381 Debugging support
2383 These routines make a number of assertions about the states
2384 of data structures that should be true at all times. If any
2385 are not true, it's very likely that a user program has somehow
2386 trashed memory. (It's also possible that there is a coding error
2387 in malloc. In which case, please report it!)
2390 #if ! MALLOC_DEBUG
2392 #define check_chunk(A,P)
2393 #define check_free_chunk(A,P)
2394 #define check_inuse_chunk(A,P)
2395 #define check_remalloced_chunk(A,P,N)
2396 #define check_malloced_chunk(A,P,N)
2397 #define check_malloc_state(A)
2399 #else
2401 #define check_chunk(A,P) do_check_chunk(A,P)
2402 #define check_free_chunk(A,P) do_check_free_chunk(A,P)
2403 #define check_inuse_chunk(A,P) do_check_inuse_chunk(A,P)
2404 #define check_remalloced_chunk(A,P,N) do_check_remalloced_chunk(A,P,N)
2405 #define check_malloced_chunk(A,P,N) do_check_malloced_chunk(A,P,N)
2406 #define check_malloc_state(A) do_check_malloc_state(A)
2409 Properties of all chunks
2412 #if __STD_C
2413 static void do_check_chunk(mstate av, mchunkptr p)
2414 #else
2415 static void do_check_chunk(av, p) mstate av; mchunkptr p;
2416 #endif
2418 unsigned long sz = chunksize(p);
2419 /* min and max possible addresses assuming contiguous allocation */
2420 char* max_address = (char*)(av->top) + chunksize(av->top);
2421 char* min_address = max_address - av->system_mem;
2423 if (!chunk_is_mmapped(p)) {
2425 /* Has legal address ... */
2426 if (p != av->top) {
2427 if (contiguous(av)) {
2428 assert(((char*)p) >= min_address);
2429 assert(((char*)p + sz) <= ((char*)(av->top)));
2432 else {
2433 /* top size is always at least MINSIZE */
2434 assert((unsigned long)(sz) >= MINSIZE);
2435 /* top predecessor always marked inuse */
2436 assert(prev_inuse(p));
2440 else {
2441 #if HAVE_MMAP
2442 /* address is outside main heap */
2443 if (contiguous(av) && av->top != initial_top(av)) {
2444 assert(((char*)p) < min_address || ((char*)p) > max_address);
2446 /* chunk is page-aligned */
2447 assert(((p->prev_size + sz) & (mp_.pagesize-1)) == 0);
2448 /* mem is aligned */
2449 assert(aligned_OK(chunk2mem(p)));
2450 #else
2451 /* force an appropriate assert violation if debug set */
2452 assert(!chunk_is_mmapped(p));
2453 #endif
2458 Properties of free chunks
2461 #if __STD_C
2462 static void do_check_free_chunk(mstate av, mchunkptr p)
2463 #else
2464 static void do_check_free_chunk(av, p) mstate av; mchunkptr p;
2465 #endif
2467 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
2468 mchunkptr next = chunk_at_offset(p, sz);
2470 do_check_chunk(av, p);
2472 /* Chunk must claim to be free ... */
2473 assert(!inuse(p));
2474 assert (!chunk_is_mmapped(p));
2476 /* Unless a special marker, must have OK fields */
2477 if ((unsigned long)(sz) >= MINSIZE)
2479 assert((sz & MALLOC_ALIGN_MASK) == 0);
2480 assert(aligned_OK(chunk2mem(p)));
2481 /* ... matching footer field */
2482 assert(next->prev_size == sz);
2483 /* ... and is fully consolidated */
2484 assert(prev_inuse(p));
2485 assert (next == av->top || inuse(next));
2487 /* ... and has minimally sane links */
2488 assert(p->fd->bk == p);
2489 assert(p->bk->fd == p);
2491 else /* markers are always of size SIZE_SZ */
2492 assert(sz == SIZE_SZ);
2496 Properties of inuse chunks
2499 #if __STD_C
2500 static void do_check_inuse_chunk(mstate av, mchunkptr p)
2501 #else
2502 static void do_check_inuse_chunk(av, p) mstate av; mchunkptr p;
2503 #endif
2505 mchunkptr next;
2507 do_check_chunk(av, p);
2509 if (chunk_is_mmapped(p))
2510 return; /* mmapped chunks have no next/prev */
2512 /* Check whether it claims to be in use ... */
2513 assert(inuse(p));
2515 next = next_chunk(p);
2517 /* ... and is surrounded by OK chunks.
2518 Since more things can be checked with free chunks than inuse ones,
2519 if an inuse chunk borders them and debug is on, it's worth doing them.
2521 if (!prev_inuse(p)) {
2522 /* Note that we cannot even look at prev unless it is not inuse */
2523 mchunkptr prv = prev_chunk(p);
2524 assert(next_chunk(prv) == p);
2525 do_check_free_chunk(av, prv);
2528 if (next == av->top) {
2529 assert(prev_inuse(next));
2530 assert(chunksize(next) >= MINSIZE);
2532 else if (!inuse(next))
2533 do_check_free_chunk(av, next);
2537 Properties of chunks recycled from fastbins
2540 #if __STD_C
2541 static void do_check_remalloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2542 #else
2543 static void do_check_remalloced_chunk(av, p, s)
2544 mstate av; mchunkptr p; INTERNAL_SIZE_T s;
2545 #endif
2547 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
2549 if (!chunk_is_mmapped(p)) {
2550 assert(av == arena_for_chunk(p));
2551 if (chunk_non_main_arena(p))
2552 assert(av != &main_arena);
2553 else
2554 assert(av == &main_arena);
2557 do_check_inuse_chunk(av, p);
2559 /* Legal size ... */
2560 assert((sz & MALLOC_ALIGN_MASK) == 0);
2561 assert((unsigned long)(sz) >= MINSIZE);
2562 /* ... and alignment */
2563 assert(aligned_OK(chunk2mem(p)));
2564 /* chunk is less than MINSIZE more than request */
2565 assert((long)(sz) - (long)(s) >= 0);
2566 assert((long)(sz) - (long)(s + MINSIZE) < 0);
2570 Properties of nonrecycled chunks at the point they are malloced
2573 #if __STD_C
2574 static void do_check_malloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2575 #else
2576 static void do_check_malloced_chunk(av, p, s)
2577 mstate av; mchunkptr p; INTERNAL_SIZE_T s;
2578 #endif
2580 /* same as recycled case ... */
2581 do_check_remalloced_chunk(av, p, s);
2584 ... plus, must obey implementation invariant that prev_inuse is
2585 always true of any allocated chunk; i.e., that each allocated
2586 chunk borders either a previously allocated and still in-use
2587 chunk, or the base of its memory arena. This is ensured
2588 by making all allocations from the the `lowest' part of any found
2589 chunk. This does not necessarily hold however for chunks
2590 recycled via fastbins.
2593 assert(prev_inuse(p));
2598 Properties of malloc_state.
2600 This may be useful for debugging malloc, as well as detecting user
2601 programmer errors that somehow write into malloc_state.
2603 If you are extending or experimenting with this malloc, you can
2604 probably figure out how to hack this routine to print out or
2605 display chunk addresses, sizes, bins, and other instrumentation.
2608 static void do_check_malloc_state(mstate av)
2610 int i;
2611 mchunkptr p;
2612 mchunkptr q;
2613 mbinptr b;
2614 unsigned int binbit;
2615 int empty;
2616 unsigned int idx;
2617 INTERNAL_SIZE_T size;
2618 unsigned long total = 0;
2619 int max_fast_bin;
2621 /* internal size_t must be no wider than pointer type */
2622 assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
2624 /* alignment is a power of 2 */
2625 assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
2627 /* cannot run remaining checks until fully initialized */
2628 if (av->top == 0 || av->top == initial_top(av))
2629 return;
2631 /* pagesize is a power of 2 */
2632 assert((mp_.pagesize & (mp_.pagesize-1)) == 0);
2634 /* A contiguous main_arena is consistent with sbrk_base. */
2635 if (av == &main_arena && contiguous(av))
2636 assert((char*)mp_.sbrk_base + av->system_mem ==
2637 (char*)av->top + chunksize(av->top));
2639 /* properties of fastbins */
2641 /* max_fast is in allowed range */
2642 assert((av->max_fast & ~1) <= request2size(MAX_FAST_SIZE));
2644 max_fast_bin = fastbin_index(av->max_fast);
2646 for (i = 0; i < NFASTBINS; ++i) {
2647 p = av->fastbins[i];
2649 /* all bins past max_fast are empty */
2650 if (i > max_fast_bin)
2651 assert(p == 0);
2653 while (p != 0) {
2654 /* each chunk claims to be inuse */
2655 do_check_inuse_chunk(av, p);
2656 total += chunksize(p);
2657 /* chunk belongs in this bin */
2658 assert(fastbin_index(chunksize(p)) == i);
2659 p = p->fd;
2663 if (total != 0)
2664 assert(have_fastchunks(av));
2665 else if (!have_fastchunks(av))
2666 assert(total == 0);
2668 /* check normal bins */
2669 for (i = 1; i < NBINS; ++i) {
2670 b = bin_at(av,i);
2672 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2673 if (i >= 2) {
2674 binbit = get_binmap(av,i);
2675 empty = last(b) == b;
2676 if (!binbit)
2677 assert(empty);
2678 else if (!empty)
2679 assert(binbit);
2682 for (p = last(b); p != b; p = p->bk) {
2683 /* each chunk claims to be free */
2684 do_check_free_chunk(av, p);
2685 size = chunksize(p);
2686 total += size;
2687 if (i >= 2) {
2688 /* chunk belongs in bin */
2689 idx = bin_index(size);
2690 assert(idx == i);
2691 /* lists are sorted */
2692 assert(p->bk == b ||
2693 (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p));
2695 /* chunk is followed by a legal chain of inuse chunks */
2696 for (q = next_chunk(p);
2697 (q != av->top && inuse(q) &&
2698 (unsigned long)(chunksize(q)) >= MINSIZE);
2699 q = next_chunk(q))
2700 do_check_inuse_chunk(av, q);
2704 /* top chunk is OK */
2705 check_chunk(av, av->top);
2707 /* sanity checks for statistics */
2709 #ifdef NO_THREADS
2710 assert(total <= (unsigned long)(mp_.max_total_mem));
2711 assert(mp_.n_mmaps >= 0);
2712 #endif
2713 assert(mp_.n_mmaps <= mp_.n_mmaps_max);
2714 assert(mp_.n_mmaps <= mp_.max_n_mmaps);
2716 assert((unsigned long)(av->system_mem) <=
2717 (unsigned long)(av->max_system_mem));
2719 assert((unsigned long)(mp_.mmapped_mem) <=
2720 (unsigned long)(mp_.max_mmapped_mem));
2722 #ifdef NO_THREADS
2723 assert((unsigned long)(mp_.max_total_mem) >=
2724 (unsigned long)(mp_.mmapped_mem) + (unsigned long)(av->system_mem));
2725 #endif
2727 #endif
2730 /* ----------------- Support for debugging hooks -------------------- */
2731 #include "hooks.c"
2734 /* ----------- Routines dealing with system allocation -------------- */
2737 sysmalloc handles malloc cases requiring more memory from the system.
2738 On entry, it is assumed that av->top does not have enough
2739 space to service request for nb bytes, thus requiring that av->top
2740 be extended or replaced.
2743 #if __STD_C
2744 static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
2745 #else
2746 static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
2747 #endif
2749 mchunkptr old_top; /* incoming value of av->top */
2750 INTERNAL_SIZE_T old_size; /* its size */
2751 char* old_end; /* its end address */
2753 long size; /* arg to first MORECORE or mmap call */
2754 char* brk; /* return value from MORECORE */
2756 long correction; /* arg to 2nd MORECORE call */
2757 char* snd_brk; /* 2nd return val */
2759 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2760 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
2761 char* aligned_brk; /* aligned offset into brk */
2763 mchunkptr p; /* the allocated/returned chunk */
2764 mchunkptr remainder; /* remainder from allocation */
2765 unsigned long remainder_size; /* its size */
2767 unsigned long sum; /* for updating stats */
2769 size_t pagemask = mp_.pagesize - 1;
2772 #if HAVE_MMAP
2775 If have mmap, and the request size meets the mmap threshold, and
2776 the system supports mmap, and there are few enough currently
2777 allocated mmapped regions, try to directly map this request
2778 rather than expanding top.
2781 if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) &&
2782 (mp_.n_mmaps < mp_.n_mmaps_max)) {
2784 char* mm; /* return value from mmap call*/
2787 Round up size to nearest page. For mmapped chunks, the overhead
2788 is one SIZE_SZ unit larger than for normal chunks, because there
2789 is no following chunk whose prev_size field could be used.
2791 size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
2793 /* Don't try if size wraps around 0 */
2794 if ((unsigned long)(size) > (unsigned long)(nb)) {
2796 mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
2798 if (mm != MAP_FAILED) {
2801 The offset to the start of the mmapped region is stored
2802 in the prev_size field of the chunk. This allows us to adjust
2803 returned start address to meet alignment requirements here
2804 and in memalign(), and still be able to compute proper
2805 address argument for later munmap in free() and realloc().
2808 front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
2809 if (front_misalign > 0) {
2810 correction = MALLOC_ALIGNMENT - front_misalign;
2811 p = (mchunkptr)(mm + correction);
2812 p->prev_size = correction;
2813 set_head(p, (size - correction) |IS_MMAPPED);
2815 else {
2816 p = (mchunkptr)mm;
2817 set_head(p, size|IS_MMAPPED);
2820 /* update statistics */
2822 if (++mp_.n_mmaps > mp_.max_n_mmaps)
2823 mp_.max_n_mmaps = mp_.n_mmaps;
2825 sum = mp_.mmapped_mem += size;
2826 if (sum > (unsigned long)(mp_.max_mmapped_mem))
2827 mp_.max_mmapped_mem = sum;
2828 #ifdef NO_THREADS
2829 sum += av->system_mem;
2830 if (sum > (unsigned long)(mp_.max_total_mem))
2831 mp_.max_total_mem = sum;
2832 #endif
2834 check_chunk(av, p);
2836 return chunk2mem(p);
2840 #endif
2842 /* Record incoming configuration of top */
2844 old_top = av->top;
2845 old_size = chunksize(old_top);
2846 old_end = (char*)(chunk_at_offset(old_top, old_size));
2848 brk = snd_brk = (char*)(MORECORE_FAILURE);
2851 If not the first time through, we require old_size to be
2852 at least MINSIZE and to have prev_inuse set.
2855 assert((old_top == initial_top(av) && old_size == 0) ||
2856 ((unsigned long) (old_size) >= MINSIZE &&
2857 prev_inuse(old_top) &&
2858 ((unsigned long)old_end & pagemask) == 0));
2860 /* Precondition: not enough current space to satisfy nb request */
2861 assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE));
2863 /* Precondition: all fastbins are consolidated */
2864 assert(!have_fastchunks(av));
2867 if (av != &main_arena) {
2869 heap_info *old_heap, *heap;
2870 size_t old_heap_size;
2872 /* First try to extend the current heap. */
2873 old_heap = heap_for_ptr(old_top);
2874 old_heap_size = old_heap->size;
2875 if (grow_heap(old_heap, MINSIZE + nb - old_size) == 0) {
2876 av->system_mem += old_heap->size - old_heap_size;
2877 arena_mem += old_heap->size - old_heap_size;
2878 #if 0
2879 if(mmapped_mem + arena_mem + sbrked_mem > max_total_mem)
2880 max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
2881 #endif
2882 set_head(old_top, (((char *)old_heap + old_heap->size) - (char *)old_top)
2883 | PREV_INUSE);
2885 else if ((heap = new_heap(nb + (MINSIZE + sizeof(*heap)), mp_.top_pad))) {
2886 /* Use a newly allocated heap. */
2887 heap->ar_ptr = av;
2888 heap->prev = old_heap;
2889 av->system_mem += heap->size;
2890 arena_mem += heap->size;
2891 #if 0
2892 if((unsigned long)(mmapped_mem + arena_mem + sbrked_mem) > max_total_mem)
2893 max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
2894 #endif
2895 /* Set up the new top. */
2896 top(av) = chunk_at_offset(heap, sizeof(*heap));
2897 set_head(top(av), (heap->size - sizeof(*heap)) | PREV_INUSE);
2899 /* Setup fencepost and free the old top chunk. */
2900 /* The fencepost takes at least MINSIZE bytes, because it might
2901 become the top chunk again later. Note that a footer is set
2902 up, too, although the chunk is marked in use. */
2903 old_size -= MINSIZE;
2904 set_head(chunk_at_offset(old_top, old_size + 2*SIZE_SZ), 0|PREV_INUSE);
2905 if (old_size >= MINSIZE) {
2906 set_head(chunk_at_offset(old_top, old_size), (2*SIZE_SZ)|PREV_INUSE);
2907 set_foot(chunk_at_offset(old_top, old_size), (2*SIZE_SZ));
2908 set_head(old_top, old_size|PREV_INUSE|NON_MAIN_ARENA);
2909 _int_free(av, chunk2mem(old_top));
2910 } else {
2911 set_head(old_top, (old_size + 2*SIZE_SZ)|PREV_INUSE);
2912 set_foot(old_top, (old_size + 2*SIZE_SZ));
2916 } else { /* av == main_arena */
2919 /* Request enough space for nb + pad + overhead */
2921 size = nb + mp_.top_pad + MINSIZE;
2924 If contiguous, we can subtract out existing space that we hope to
2925 combine with new space. We add it back later only if
2926 we don't actually get contiguous space.
2929 if (contiguous(av))
2930 size -= old_size;
2933 Round to a multiple of page size.
2934 If MORECORE is not contiguous, this ensures that we only call it
2935 with whole-page arguments. And if MORECORE is contiguous and
2936 this is not first time through, this preserves page-alignment of
2937 previous calls. Otherwise, we correct to page-align below.
2940 size = (size + pagemask) & ~pagemask;
2943 Don't try to call MORECORE if argument is so big as to appear
2944 negative. Note that since mmap takes size_t arg, it may succeed
2945 below even if we cannot call MORECORE.
2948 if (size > 0)
2949 brk = (char*)(MORECORE(size));
2951 if (brk != (char*)(MORECORE_FAILURE)) {
2952 /* Call the `morecore' hook if necessary. */
2953 if (__after_morecore_hook)
2954 (*__after_morecore_hook) ();
2955 } else {
2957 If have mmap, try using it as a backup when MORECORE fails or
2958 cannot be used. This is worth doing on systems that have "holes" in
2959 address space, so sbrk cannot extend to give contiguous space, but
2960 space is available elsewhere. Note that we ignore mmap max count
2961 and threshold limits, since the space will not be used as a
2962 segregated mmap region.
2965 #if HAVE_MMAP
2966 /* Cannot merge with old top, so add its size back in */
2967 if (contiguous(av))
2968 size = (size + old_size + pagemask) & ~pagemask;
2970 /* If we are relying on mmap as backup, then use larger units */
2971 if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE))
2972 size = MMAP_AS_MORECORE_SIZE;
2974 /* Don't try if size wraps around 0 */
2975 if ((unsigned long)(size) > (unsigned long)(nb)) {
2977 char *mbrk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
2979 if (mbrk != MAP_FAILED) {
2981 /* We do not need, and cannot use, another sbrk call to find end */
2982 brk = mbrk;
2983 snd_brk = brk + size;
2986 Record that we no longer have a contiguous sbrk region.
2987 After the first time mmap is used as backup, we do not
2988 ever rely on contiguous space since this could incorrectly
2989 bridge regions.
2991 set_noncontiguous(av);
2994 #endif
2997 if (brk != (char*)(MORECORE_FAILURE)) {
2998 if (mp_.sbrk_base == 0)
2999 mp_.sbrk_base = brk;
3000 av->system_mem += size;
3003 If MORECORE extends previous space, we can likewise extend top size.
3006 if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE))
3007 set_head(old_top, (size + old_size) | PREV_INUSE);
3009 else if (contiguous(av) && old_size && brk < old_end) {
3010 /* Oops! Someone else killed our space.. Can't touch anything. */
3011 assert(0);
3015 Otherwise, make adjustments:
3017 * If the first time through or noncontiguous, we need to call sbrk
3018 just to find out where the end of memory lies.
3020 * We need to ensure that all returned chunks from malloc will meet
3021 MALLOC_ALIGNMENT
3023 * If there was an intervening foreign sbrk, we need to adjust sbrk
3024 request size to account for fact that we will not be able to
3025 combine new space with existing space in old_top.
3027 * Almost all systems internally allocate whole pages at a time, in
3028 which case we might as well use the whole last page of request.
3029 So we allocate enough more memory to hit a page boundary now,
3030 which in turn causes future contiguous calls to page-align.
3033 else {
3034 front_misalign = 0;
3035 end_misalign = 0;
3036 correction = 0;
3037 aligned_brk = brk;
3039 /* handle contiguous cases */
3040 if (contiguous(av)) {
3042 /* Count foreign sbrk as system_mem. */
3043 if (old_size)
3044 av->system_mem += brk - old_end;
3046 /* Guarantee alignment of first new chunk made from this space */
3048 front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
3049 if (front_misalign > 0) {
3052 Skip over some bytes to arrive at an aligned position.
3053 We don't need to specially mark these wasted front bytes.
3054 They will never be accessed anyway because
3055 prev_inuse of av->top (and any chunk created from its start)
3056 is always true after initialization.
3059 correction = MALLOC_ALIGNMENT - front_misalign;
3060 aligned_brk += correction;
3064 If this isn't adjacent to existing space, then we will not
3065 be able to merge with old_top space, so must add to 2nd request.
3068 correction += old_size;
3070 /* Extend the end address to hit a page boundary */
3071 end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
3072 correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
3074 assert(correction >= 0);
3075 snd_brk = (char*)(MORECORE(correction));
3078 If can't allocate correction, try to at least find out current
3079 brk. It might be enough to proceed without failing.
3081 Note that if second sbrk did NOT fail, we assume that space
3082 is contiguous with first sbrk. This is a safe assumption unless
3083 program is multithreaded but doesn't use locks and a foreign sbrk
3084 occurred between our first and second calls.
3087 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3088 correction = 0;
3089 snd_brk = (char*)(MORECORE(0));
3090 } else
3091 /* Call the `morecore' hook if necessary. */
3092 if (__after_morecore_hook)
3093 (*__after_morecore_hook) ();
3096 /* handle non-contiguous cases */
3097 else {
3098 /* MORECORE/mmap must correctly align */
3099 assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
3101 /* Find out current end of memory */
3102 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3103 snd_brk = (char*)(MORECORE(0));
3107 /* Adjust top based on results of second sbrk */
3108 if (snd_brk != (char*)(MORECORE_FAILURE)) {
3109 av->top = (mchunkptr)aligned_brk;
3110 set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
3111 av->system_mem += correction;
3114 If not the first time through, we either have a
3115 gap due to foreign sbrk or a non-contiguous region. Insert a
3116 double fencepost at old_top to prevent consolidation with space
3117 we don't own. These fenceposts are artificial chunks that are
3118 marked as inuse and are in any case too small to use. We need
3119 two to make sizes and alignments work out.
3122 if (old_size != 0) {
3124 Shrink old_top to insert fenceposts, keeping size a
3125 multiple of MALLOC_ALIGNMENT. We know there is at least
3126 enough space in old_top to do this.
3128 old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
3129 set_head(old_top, old_size | PREV_INUSE);
3132 Note that the following assignments completely overwrite
3133 old_top when old_size was previously MINSIZE. This is
3134 intentional. We need the fencepost, even if old_top otherwise gets
3135 lost.
3137 chunk_at_offset(old_top, old_size )->size =
3138 (2*SIZE_SZ)|PREV_INUSE;
3140 chunk_at_offset(old_top, old_size + 2*SIZE_SZ)->size =
3141 (2*SIZE_SZ)|PREV_INUSE;
3143 /* If possible, release the rest. */
3144 if (old_size >= MINSIZE) {
3145 _int_free(av, chunk2mem(old_top));
3152 /* Update statistics */
3153 #ifdef NO_THREADS
3154 sum = av->system_mem + mp_.mmapped_mem;
3155 if (sum > (unsigned long)(mp_.max_total_mem))
3156 mp_.max_total_mem = sum;
3157 #endif
3161 } /* if (av != &main_arena) */
3163 if ((unsigned long)av->system_mem > (unsigned long)(av->max_system_mem))
3164 av->max_system_mem = av->system_mem;
3165 check_malloc_state(av);
3167 /* finally, do the allocation */
3168 p = av->top;
3169 size = chunksize(p);
3171 /* check that one of the above allocation paths succeeded */
3172 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
3173 remainder_size = size - nb;
3174 remainder = chunk_at_offset(p, nb);
3175 av->top = remainder;
3176 set_head(p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
3177 set_head(remainder, remainder_size | PREV_INUSE);
3178 check_malloced_chunk(av, p, nb);
3179 return chunk2mem(p);
3182 /* catch all failure paths */
3183 MALLOC_FAILURE_ACTION;
3184 return 0;
3189 sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
3190 to the system (via negative arguments to sbrk) if there is unused
3191 memory at the `high' end of the malloc pool. It is called
3192 automatically by free() when top space exceeds the trim
3193 threshold. It is also called by the public malloc_trim routine. It
3194 returns 1 if it actually released any memory, else 0.
3197 #if __STD_C
3198 static int sYSTRIm(size_t pad, mstate av)
3199 #else
3200 static int sYSTRIm(pad, av) size_t pad; mstate av;
3201 #endif
3203 long top_size; /* Amount of top-most memory */
3204 long extra; /* Amount to release */
3205 long released; /* Amount actually released */
3206 char* current_brk; /* address returned by pre-check sbrk call */
3207 char* new_brk; /* address returned by post-check sbrk call */
3208 size_t pagesz;
3210 pagesz = mp_.pagesize;
3211 top_size = chunksize(av->top);
3213 /* Release in pagesize units, keeping at least one page */
3214 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
3216 if (extra > 0) {
3219 Only proceed if end of memory is where we last set it.
3220 This avoids problems if there were foreign sbrk calls.
3222 current_brk = (char*)(MORECORE(0));
3223 if (current_brk == (char*)(av->top) + top_size) {
3226 Attempt to release memory. We ignore MORECORE return value,
3227 and instead call again to find out where new end of memory is.
3228 This avoids problems if first call releases less than we asked,
3229 of if failure somehow altered brk value. (We could still
3230 encounter problems if it altered brk in some very bad way,
3231 but the only thing we can do is adjust anyway, which will cause
3232 some downstream failure.)
3235 MORECORE(-extra);
3236 /* Call the `morecore' hook if necessary. */
3237 if (__after_morecore_hook)
3238 (*__after_morecore_hook) ();
3239 new_brk = (char*)(MORECORE(0));
3241 if (new_brk != (char*)MORECORE_FAILURE) {
3242 released = (long)(current_brk - new_brk);
3244 if (released != 0) {
3245 /* Success. Adjust top. */
3246 av->system_mem -= released;
3247 set_head(av->top, (top_size - released) | PREV_INUSE);
3248 check_malloc_state(av);
3249 return 1;
3254 return 0;
3257 #ifdef HAVE_MMAP
3259 static void
3260 internal_function
3261 #if __STD_C
3262 munmap_chunk(mchunkptr p)
3263 #else
3264 munmap_chunk(p) mchunkptr p;
3265 #endif
3267 INTERNAL_SIZE_T size = chunksize(p);
3268 int ret;
3270 assert (chunk_is_mmapped(p));
3271 #if 0
3272 assert(! ((char*)p >= mp_.sbrk_base && (char*)p < mp_.sbrk_base + mp_.sbrked_mem));
3273 assert((mp_.n_mmaps > 0));
3274 #endif
3275 assert(((p->prev_size + size) & (mp_.pagesize-1)) == 0);
3277 mp_.n_mmaps--;
3278 mp_.mmapped_mem -= (size + p->prev_size);
3280 ret = munmap((char *)p - p->prev_size, size + p->prev_size);
3282 /* munmap returns non-zero on failure */
3283 assert(ret == 0);
3286 #if HAVE_MREMAP
3288 static mchunkptr
3289 internal_function
3290 #if __STD_C
3291 mremap_chunk(mchunkptr p, size_t new_size)
3292 #else
3293 mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
3294 #endif
3296 size_t page_mask = mp_.pagesize - 1;
3297 INTERNAL_SIZE_T offset = p->prev_size;
3298 INTERNAL_SIZE_T size = chunksize(p);
3299 char *cp;
3301 assert (chunk_is_mmapped(p));
3302 #if 0
3303 assert(! ((char*)p >= mp_.sbrk_base && (char*)p < mp_.sbrk_base + mp_.sbrked_mem));
3304 assert((mp_.n_mmaps > 0));
3305 #endif
3306 assert(((size + offset) & (mp_.pagesize-1)) == 0);
3308 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
3309 new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
3311 cp = (char *)mremap((char *)p - offset, size + offset, new_size,
3312 MREMAP_MAYMOVE);
3314 if (cp == MAP_FAILED) return 0;
3316 p = (mchunkptr)(cp + offset);
3318 assert(aligned_OK(chunk2mem(p)));
3320 assert((p->prev_size == offset));
3321 set_head(p, (new_size - offset)|IS_MMAPPED);
3323 mp_.mmapped_mem -= size + offset;
3324 mp_.mmapped_mem += new_size;
3325 if ((unsigned long)mp_.mmapped_mem > (unsigned long)mp_.max_mmapped_mem)
3326 mp_.max_mmapped_mem = mp_.mmapped_mem;
3327 #ifdef NO_THREADS
3328 if ((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) >
3329 mp_.max_total_mem)
3330 mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem;
3331 #endif
3332 return p;
3335 #endif /* HAVE_MREMAP */
3337 #endif /* HAVE_MMAP */
3339 /*------------------------ Public wrappers. --------------------------------*/
3341 Void_t*
3342 public_mALLOc(size_t bytes)
3344 mstate ar_ptr;
3345 Void_t *victim;
3347 __malloc_ptr_t (*hook) (size_t, __const __malloc_ptr_t) = __malloc_hook;
3348 if (hook != NULL)
3349 return (*hook)(bytes, RETURN_ADDRESS (0));
3351 arena_get(ar_ptr, bytes);
3352 if(!ar_ptr)
3353 return 0;
3354 victim = _int_malloc(ar_ptr, bytes);
3355 if(!victim) {
3356 /* Maybe the failure is due to running out of mmapped areas. */
3357 if(ar_ptr != &main_arena) {
3358 (void)mutex_unlock(&ar_ptr->mutex);
3359 (void)mutex_lock(&main_arena.mutex);
3360 victim = _int_malloc(&main_arena, bytes);
3361 (void)mutex_unlock(&main_arena.mutex);
3362 } else {
3363 #if USE_ARENAS
3364 /* ... or sbrk() has failed and there is still a chance to mmap() */
3365 ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
3366 (void)mutex_unlock(&main_arena.mutex);
3367 if(ar_ptr) {
3368 victim = _int_malloc(ar_ptr, bytes);
3369 (void)mutex_unlock(&ar_ptr->mutex);
3371 #endif
3373 } else
3374 (void)mutex_unlock(&ar_ptr->mutex);
3375 assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
3376 ar_ptr == arena_for_chunk(mem2chunk(victim)));
3377 return victim;
3379 #ifdef libc_hidden_def
3380 libc_hidden_def(public_mALLOc)
3381 #endif
3383 void
3384 public_fREe(Void_t* mem)
3386 mstate ar_ptr;
3387 mchunkptr p; /* chunk corresponding to mem */
3389 void (*hook) (__malloc_ptr_t, __const __malloc_ptr_t) = __free_hook;
3390 if (hook != NULL) {
3391 (*hook)(mem, RETURN_ADDRESS (0));
3392 return;
3395 if (mem == 0) /* free(0) has no effect */
3396 return;
3398 p = mem2chunk(mem);
3400 #if HAVE_MMAP
3401 if (chunk_is_mmapped(p)) /* release mmapped memory. */
3403 munmap_chunk(p);
3404 return;
3406 #endif
3408 ar_ptr = arena_for_chunk(p);
3409 #if THREAD_STATS
3410 if(!mutex_trylock(&ar_ptr->mutex))
3411 ++(ar_ptr->stat_lock_direct);
3412 else {
3413 (void)mutex_lock(&ar_ptr->mutex);
3414 ++(ar_ptr->stat_lock_wait);
3416 #else
3417 (void)mutex_lock(&ar_ptr->mutex);
3418 #endif
3419 _int_free(ar_ptr, mem);
3420 (void)mutex_unlock(&ar_ptr->mutex);
3422 #ifdef libc_hidden_def
3423 libc_hidden_def (public_fREe)
3424 #endif
3426 Void_t*
3427 public_rEALLOc(Void_t* oldmem, size_t bytes)
3429 mstate ar_ptr;
3430 INTERNAL_SIZE_T nb; /* padded request size */
3432 mchunkptr oldp; /* chunk corresponding to oldmem */
3433 INTERNAL_SIZE_T oldsize; /* its size */
3435 Void_t* newp; /* chunk to return */
3437 __malloc_ptr_t (*hook) (__malloc_ptr_t, size_t, __const __malloc_ptr_t) =
3438 __realloc_hook;
3439 if (hook != NULL)
3440 return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
3442 #if REALLOC_ZERO_BYTES_FREES
3443 if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; }
3444 #endif
3446 /* realloc of null is supposed to be same as malloc */
3447 if (oldmem == 0) return public_mALLOc(bytes);
3449 oldp = mem2chunk(oldmem);
3450 oldsize = chunksize(oldp);
3452 /* Little security check which won't hurt performance: the
3453 allocator never wrapps around at the end of the address space.
3454 Therefore we can exclude some size values which might appear
3455 here by accident or by "design" from some intruder. */
3456 if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
3457 || __builtin_expect ((uintptr_t) oldp & MALLOC_ALIGN_MASK, 0))
3459 malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
3460 return NULL;
3463 checked_request2size(bytes, nb);
3465 #if HAVE_MMAP
3466 if (chunk_is_mmapped(oldp))
3468 Void_t* newmem;
3470 #if HAVE_MREMAP
3471 newp = mremap_chunk(oldp, nb);
3472 if(newp) return chunk2mem(newp);
3473 #endif
3474 /* Note the extra SIZE_SZ overhead. */
3475 if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
3476 /* Must alloc, copy, free. */
3477 newmem = public_mALLOc(bytes);
3478 if (newmem == 0) return 0; /* propagate failure */
3479 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
3480 munmap_chunk(oldp);
3481 return newmem;
3483 #endif
3485 ar_ptr = arena_for_chunk(oldp);
3486 #if THREAD_STATS
3487 if(!mutex_trylock(&ar_ptr->mutex))
3488 ++(ar_ptr->stat_lock_direct);
3489 else {
3490 (void)mutex_lock(&ar_ptr->mutex);
3491 ++(ar_ptr->stat_lock_wait);
3493 #else
3494 (void)mutex_lock(&ar_ptr->mutex);
3495 #endif
3497 #ifndef NO_THREADS
3498 /* As in malloc(), remember this arena for the next allocation. */
3499 tsd_setspecific(arena_key, (Void_t *)ar_ptr);
3500 #endif
3502 newp = _int_realloc(ar_ptr, oldmem, bytes);
3504 (void)mutex_unlock(&ar_ptr->mutex);
3505 assert(!newp || chunk_is_mmapped(mem2chunk(newp)) ||
3506 ar_ptr == arena_for_chunk(mem2chunk(newp)));
3507 return newp;
3509 #ifdef libc_hidden_def
3510 libc_hidden_def (public_rEALLOc)
3511 #endif
3513 Void_t*
3514 public_mEMALIGn(size_t alignment, size_t bytes)
3516 mstate ar_ptr;
3517 Void_t *p;
3519 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3520 __const __malloc_ptr_t)) =
3521 __memalign_hook;
3522 if (hook != NULL)
3523 return (*hook)(alignment, bytes, RETURN_ADDRESS (0));
3525 /* If need less alignment than we give anyway, just relay to malloc */
3526 if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes);
3528 /* Otherwise, ensure that it is at least a minimum chunk size */
3529 if (alignment < MINSIZE) alignment = MINSIZE;
3531 arena_get(ar_ptr, bytes + alignment + MINSIZE);
3532 if(!ar_ptr)
3533 return 0;
3534 p = _int_memalign(ar_ptr, alignment, bytes);
3535 (void)mutex_unlock(&ar_ptr->mutex);
3536 if(!p) {
3537 /* Maybe the failure is due to running out of mmapped areas. */
3538 if(ar_ptr != &main_arena) {
3539 (void)mutex_lock(&main_arena.mutex);
3540 p = _int_memalign(&main_arena, alignment, bytes);
3541 (void)mutex_unlock(&main_arena.mutex);
3542 } else {
3543 #if USE_ARENAS
3544 /* ... or sbrk() has failed and there is still a chance to mmap() */
3545 ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
3546 if(ar_ptr) {
3547 p = _int_memalign(ar_ptr, alignment, bytes);
3548 (void)mutex_unlock(&ar_ptr->mutex);
3550 #endif
3553 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
3554 ar_ptr == arena_for_chunk(mem2chunk(p)));
3555 return p;
3557 #ifdef libc_hidden_def
3558 libc_hidden_def (public_mEMALIGn)
3559 #endif
3561 Void_t*
3562 public_vALLOc(size_t bytes)
3564 mstate ar_ptr;
3565 Void_t *p;
3567 if(__malloc_initialized < 0)
3568 ptmalloc_init ();
3570 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3571 __const __malloc_ptr_t)) =
3572 __memalign_hook;
3573 if (hook != NULL)
3574 return (*hook)(mp_.pagesize, bytes, RETURN_ADDRESS (0));
3576 arena_get(ar_ptr, bytes + mp_.pagesize + MINSIZE);
3577 if(!ar_ptr)
3578 return 0;
3579 p = _int_valloc(ar_ptr, bytes);
3580 (void)mutex_unlock(&ar_ptr->mutex);
3581 return p;
3584 Void_t*
3585 public_pVALLOc(size_t bytes)
3587 mstate ar_ptr;
3588 Void_t *p;
3590 if(__malloc_initialized < 0)
3591 ptmalloc_init ();
3593 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3594 __const __malloc_ptr_t)) =
3595 __memalign_hook;
3596 if (hook != NULL)
3597 return (*hook)(mp_.pagesize,
3598 (bytes + mp_.pagesize - 1) & ~(mp_.pagesize - 1),
3599 RETURN_ADDRESS (0));
3601 arena_get(ar_ptr, bytes + 2*mp_.pagesize + MINSIZE);
3602 p = _int_pvalloc(ar_ptr, bytes);
3603 (void)mutex_unlock(&ar_ptr->mutex);
3604 return p;
3607 Void_t*
3608 public_cALLOc(size_t n, size_t elem_size)
3610 mstate av;
3611 mchunkptr oldtop, p;
3612 INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
3613 Void_t* mem;
3614 unsigned long clearsize;
3615 unsigned long nclears;
3616 INTERNAL_SIZE_T* d;
3617 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, __const __malloc_ptr_t)) =
3618 __malloc_hook;
3620 /* size_t is unsigned so the behavior on overflow is defined. */
3621 bytes = n * elem_size;
3622 #define HALF_INTERNAL_SIZE_T \
3623 (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
3624 if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
3625 if (elem_size != 0 && bytes / elem_size != n) {
3626 MALLOC_FAILURE_ACTION;
3627 return 0;
3631 if (hook != NULL) {
3632 sz = bytes;
3633 mem = (*hook)(sz, RETURN_ADDRESS (0));
3634 if(mem == 0)
3635 return 0;
3636 #ifdef HAVE_MEMCPY
3637 return memset(mem, 0, sz);
3638 #else
3639 while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */
3640 return mem;
3641 #endif
3644 sz = bytes;
3646 arena_get(av, sz);
3647 if(!av)
3648 return 0;
3650 /* Check if we hand out the top chunk, in which case there may be no
3651 need to clear. */
3652 #if MORECORE_CLEARS
3653 oldtop = top(av);
3654 oldtopsize = chunksize(top(av));
3655 #if MORECORE_CLEARS < 2
3656 /* Only newly allocated memory is guaranteed to be cleared. */
3657 if (av == &main_arena &&
3658 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
3659 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
3660 #endif
3661 #endif
3662 mem = _int_malloc(av, sz);
3664 /* Only clearing follows, so we can unlock early. */
3665 (void)mutex_unlock(&av->mutex);
3667 assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
3668 av == arena_for_chunk(mem2chunk(mem)));
3670 if (mem == 0) {
3671 /* Maybe the failure is due to running out of mmapped areas. */
3672 if(av != &main_arena) {
3673 (void)mutex_lock(&main_arena.mutex);
3674 mem = _int_malloc(&main_arena, sz);
3675 (void)mutex_unlock(&main_arena.mutex);
3676 } else {
3677 #if USE_ARENAS
3678 /* ... or sbrk() has failed and there is still a chance to mmap() */
3679 (void)mutex_lock(&main_arena.mutex);
3680 av = arena_get2(av->next ? av : 0, sz);
3681 (void)mutex_unlock(&main_arena.mutex);
3682 if(av) {
3683 mem = _int_malloc(av, sz);
3684 (void)mutex_unlock(&av->mutex);
3686 #endif
3688 if (mem == 0) return 0;
3690 p = mem2chunk(mem);
3692 /* Two optional cases in which clearing not necessary */
3693 #if HAVE_MMAP
3694 if (perturb_byte == 0 && chunk_is_mmapped(p))
3695 return mem;
3696 #endif
3698 csz = chunksize(p);
3700 #if MORECORE_CLEARS
3701 if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize)) {
3702 /* clear only the bytes from non-freshly-sbrked memory */
3703 csz = oldtopsize;
3705 #endif
3707 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3708 contents have an odd number of INTERNAL_SIZE_T-sized words;
3709 minimally 3. */
3710 d = (INTERNAL_SIZE_T*)mem;
3711 clearsize = csz - SIZE_SZ;
3712 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
3713 assert(nclears >= 3);
3715 if (nclears > 9)
3716 MALLOC_ZERO(d, clearsize);
3718 else {
3719 *(d+0) = 0;
3720 *(d+1) = 0;
3721 *(d+2) = 0;
3722 if (nclears > 4) {
3723 *(d+3) = 0;
3724 *(d+4) = 0;
3725 if (nclears > 6) {
3726 *(d+5) = 0;
3727 *(d+6) = 0;
3728 if (nclears > 8) {
3729 *(d+7) = 0;
3730 *(d+8) = 0;
3736 return mem;
3739 #ifndef _LIBC
3741 Void_t**
3742 public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks)
3744 mstate ar_ptr;
3745 Void_t** m;
3747 arena_get(ar_ptr, n*elem_size);
3748 if(!ar_ptr)
3749 return 0;
3751 m = _int_icalloc(ar_ptr, n, elem_size, chunks);
3752 (void)mutex_unlock(&ar_ptr->mutex);
3753 return m;
3756 Void_t**
3757 public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks)
3759 mstate ar_ptr;
3760 Void_t** m;
3762 arena_get(ar_ptr, 0);
3763 if(!ar_ptr)
3764 return 0;
3766 m = _int_icomalloc(ar_ptr, n, sizes, chunks);
3767 (void)mutex_unlock(&ar_ptr->mutex);
3768 return m;
3771 void
3772 public_cFREe(Void_t* m)
3774 public_fREe(m);
3777 #endif /* _LIBC */
3780 public_mTRIm(size_t s)
3782 int result;
3784 if(__malloc_initialized < 0)
3785 ptmalloc_init ();
3786 (void)mutex_lock(&main_arena.mutex);
3787 result = mTRIm(s);
3788 (void)mutex_unlock(&main_arena.mutex);
3789 return result;
3792 size_t
3793 public_mUSABLe(Void_t* m)
3795 size_t result;
3797 result = mUSABLe(m);
3798 return result;
3801 void
3802 public_mSTATs()
3804 mSTATs();
3807 struct mallinfo public_mALLINFo()
3809 struct mallinfo m;
3811 if(__malloc_initialized < 0)
3812 ptmalloc_init ();
3813 (void)mutex_lock(&main_arena.mutex);
3814 m = mALLINFo(&main_arena);
3815 (void)mutex_unlock(&main_arena.mutex);
3816 return m;
3820 public_mALLOPt(int p, int v)
3822 int result;
3823 result = mALLOPt(p, v);
3824 return result;
3828 ------------------------------ malloc ------------------------------
3831 Void_t*
3832 _int_malloc(mstate av, size_t bytes)
3834 INTERNAL_SIZE_T nb; /* normalized request size */
3835 unsigned int idx; /* associated bin index */
3836 mbinptr bin; /* associated bin */
3837 mfastbinptr* fb; /* associated fastbin */
3839 mchunkptr victim; /* inspected/selected chunk */
3840 INTERNAL_SIZE_T size; /* its size */
3841 int victim_index; /* its bin index */
3843 mchunkptr remainder; /* remainder from a split */
3844 unsigned long remainder_size; /* its size */
3846 unsigned int block; /* bit map traverser */
3847 unsigned int bit; /* bit map traverser */
3848 unsigned int map; /* current word of binmap */
3850 mchunkptr fwd; /* misc temp for linking */
3851 mchunkptr bck; /* misc temp for linking */
3854 Convert request size to internal form by adding SIZE_SZ bytes
3855 overhead plus possibly more to obtain necessary alignment and/or
3856 to obtain a size of at least MINSIZE, the smallest allocatable
3857 size. Also, checked_request2size traps (returning 0) request sizes
3858 that are so large that they wrap around zero when padded and
3859 aligned.
3862 checked_request2size(bytes, nb);
3865 If the size qualifies as a fastbin, first check corresponding bin.
3866 This code is safe to execute even if av is not yet initialized, so we
3867 can try it without checking, which saves some time on this fast path.
3870 if ((unsigned long)(nb) <= (unsigned long)(av->max_fast)) {
3871 long int idx = fastbin_index(nb);
3872 fb = &(av->fastbins[idx]);
3873 if ( (victim = *fb) != 0) {
3874 if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
3875 malloc_printerr (check_action, "malloc(): memory corruption (fast)",
3876 chunk2mem (victim));
3877 *fb = victim->fd;
3878 check_remalloced_chunk(av, victim, nb);
3879 void *p = chunk2mem(victim);
3880 if (__builtin_expect (perturb_byte, 0))
3881 alloc_perturb (p, bytes);
3882 return p;
3887 If a small request, check regular bin. Since these "smallbins"
3888 hold one size each, no searching within bins is necessary.
3889 (For a large request, we need to wait until unsorted chunks are
3890 processed to find best fit. But for small ones, fits are exact
3891 anyway, so we can check now, which is faster.)
3894 if (in_smallbin_range(nb)) {
3895 idx = smallbin_index(nb);
3896 bin = bin_at(av,idx);
3898 if ( (victim = last(bin)) != bin) {
3899 if (victim == 0) /* initialization check */
3900 malloc_consolidate(av);
3901 else {
3902 bck = victim->bk;
3903 set_inuse_bit_at_offset(victim, nb);
3904 bin->bk = bck;
3905 bck->fd = bin;
3907 if (av != &main_arena)
3908 victim->size |= NON_MAIN_ARENA;
3909 check_malloced_chunk(av, victim, nb);
3910 void *p = chunk2mem(victim);
3911 if (__builtin_expect (perturb_byte, 0))
3912 alloc_perturb (p, bytes);
3913 return p;
3919 If this is a large request, consolidate fastbins before continuing.
3920 While it might look excessive to kill all fastbins before
3921 even seeing if there is space available, this avoids
3922 fragmentation problems normally associated with fastbins.
3923 Also, in practice, programs tend to have runs of either small or
3924 large requests, but less often mixtures, so consolidation is not
3925 invoked all that often in most programs. And the programs that
3926 it is called frequently in otherwise tend to fragment.
3929 else {
3930 idx = largebin_index(nb);
3931 if (have_fastchunks(av))
3932 malloc_consolidate(av);
3936 Process recently freed or remaindered chunks, taking one only if
3937 it is exact fit, or, if this a small request, the chunk is remainder from
3938 the most recent non-exact fit. Place other traversed chunks in
3939 bins. Note that this step is the only place in any routine where
3940 chunks are placed in bins.
3942 The outer loop here is needed because we might not realize until
3943 near the end of malloc that we should have consolidated, so must
3944 do so and retry. This happens at most once, and only when we would
3945 otherwise need to expand memory to service a "small" request.
3948 for(;;) {
3950 while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
3951 bck = victim->bk;
3952 if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)
3953 || __builtin_expect (victim->size > av->system_mem, 0))
3954 malloc_printerr (check_action, "malloc(): memory corruption",
3955 chunk2mem (victim));
3956 size = chunksize(victim);
3959 If a small request, try to use last remainder if it is the
3960 only chunk in unsorted bin. This helps promote locality for
3961 runs of consecutive small requests. This is the only
3962 exception to best-fit, and applies only when there is
3963 no exact fit for a small chunk.
3966 if (in_smallbin_range(nb) &&
3967 bck == unsorted_chunks(av) &&
3968 victim == av->last_remainder &&
3969 (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
3971 /* split and reattach remainder */
3972 remainder_size = size - nb;
3973 remainder = chunk_at_offset(victim, nb);
3974 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
3975 av->last_remainder = remainder;
3976 remainder->bk = remainder->fd = unsorted_chunks(av);
3978 set_head(victim, nb | PREV_INUSE |
3979 (av != &main_arena ? NON_MAIN_ARENA : 0));
3980 set_head(remainder, remainder_size | PREV_INUSE);
3981 set_foot(remainder, remainder_size);
3983 check_malloced_chunk(av, victim, nb);
3984 void *p = chunk2mem(victim);
3985 if (__builtin_expect (perturb_byte, 0))
3986 alloc_perturb (p, bytes);
3987 return p;
3990 /* remove from unsorted list */
3991 unsorted_chunks(av)->bk = bck;
3992 bck->fd = unsorted_chunks(av);
3994 /* Take now instead of binning if exact fit */
3996 if (size == nb) {
3997 set_inuse_bit_at_offset(victim, size);
3998 if (av != &main_arena)
3999 victim->size |= NON_MAIN_ARENA;
4000 check_malloced_chunk(av, victim, nb);
4001 void *p = chunk2mem(victim);
4002 if (__builtin_expect (perturb_byte, 0))
4003 alloc_perturb (p, bytes);
4004 return p;
4007 /* place chunk in bin */
4009 if (in_smallbin_range(size)) {
4010 victim_index = smallbin_index(size);
4011 bck = bin_at(av, victim_index);
4012 fwd = bck->fd;
4014 else {
4015 victim_index = largebin_index(size);
4016 bck = bin_at(av, victim_index);
4017 fwd = bck->fd;
4019 /* maintain large bins in sorted order */
4020 if (fwd != bck) {
4021 /* Or with inuse bit to speed comparisons */
4022 size |= PREV_INUSE;
4023 /* if smaller than smallest, bypass loop below */
4024 assert((bck->bk->size & NON_MAIN_ARENA) == 0);
4025 if ((unsigned long)(size) <= (unsigned long)(bck->bk->size)) {
4026 fwd = bck;
4027 bck = bck->bk;
4029 else {
4030 assert((fwd->size & NON_MAIN_ARENA) == 0);
4031 while ((unsigned long)(size) < (unsigned long)(fwd->size)) {
4032 fwd = fwd->fd;
4033 assert((fwd->size & NON_MAIN_ARENA) == 0);
4035 bck = fwd->bk;
4040 mark_bin(av, victim_index);
4041 victim->bk = bck;
4042 victim->fd = fwd;
4043 fwd->bk = victim;
4044 bck->fd = victim;
4048 If a large request, scan through the chunks of current bin in
4049 sorted order to find smallest that fits. This is the only step
4050 where an unbounded number of chunks might be scanned without doing
4051 anything useful with them. However the lists tend to be short.
4054 if (!in_smallbin_range(nb)) {
4055 bin = bin_at(av, idx);
4057 /* skip scan if empty or largest chunk is too small */
4058 if ((victim = last(bin)) != bin &&
4059 (unsigned long)(first(bin)->size) >= (unsigned long)(nb)) {
4061 while (((unsigned long)(size = chunksize(victim)) <
4062 (unsigned long)(nb)))
4063 victim = victim->bk;
4065 remainder_size = size - nb;
4066 unlink(victim, bck, fwd);
4068 /* Exhaust */
4069 if (remainder_size < MINSIZE) {
4070 set_inuse_bit_at_offset(victim, size);
4071 if (av != &main_arena)
4072 victim->size |= NON_MAIN_ARENA;
4074 /* Split */
4075 else {
4076 remainder = chunk_at_offset(victim, nb);
4077 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4078 remainder->bk = remainder->fd = unsorted_chunks(av);
4079 set_head(victim, nb | PREV_INUSE |
4080 (av != &main_arena ? NON_MAIN_ARENA : 0));
4081 set_head(remainder, remainder_size | PREV_INUSE);
4082 set_foot(remainder, remainder_size);
4084 check_malloced_chunk(av, victim, nb);
4085 void *p = chunk2mem(victim);
4086 if (__builtin_expect (perturb_byte, 0))
4087 alloc_perturb (p, bytes);
4088 return p;
4093 Search for a chunk by scanning bins, starting with next largest
4094 bin. This search is strictly by best-fit; i.e., the smallest
4095 (with ties going to approximately the least recently used) chunk
4096 that fits is selected.
4098 The bitmap avoids needing to check that most blocks are nonempty.
4099 The particular case of skipping all bins during warm-up phases
4100 when no chunks have been returned yet is faster than it might look.
4103 ++idx;
4104 bin = bin_at(av,idx);
4105 block = idx2block(idx);
4106 map = av->binmap[block];
4107 bit = idx2bit(idx);
4109 for (;;) {
4111 /* Skip rest of block if there are no more set bits in this block. */
4112 if (bit > map || bit == 0) {
4113 do {
4114 if (++block >= BINMAPSIZE) /* out of bins */
4115 goto use_top;
4116 } while ( (map = av->binmap[block]) == 0);
4118 bin = bin_at(av, (block << BINMAPSHIFT));
4119 bit = 1;
4122 /* Advance to bin with set bit. There must be one. */
4123 while ((bit & map) == 0) {
4124 bin = next_bin(bin);
4125 bit <<= 1;
4126 assert(bit != 0);
4129 /* Inspect the bin. It is likely to be non-empty */
4130 victim = last(bin);
4132 /* If a false alarm (empty bin), clear the bit. */
4133 if (victim == bin) {
4134 av->binmap[block] = map &= ~bit; /* Write through */
4135 bin = next_bin(bin);
4136 bit <<= 1;
4139 else {
4140 size = chunksize(victim);
4142 /* We know the first chunk in this bin is big enough to use. */
4143 assert((unsigned long)(size) >= (unsigned long)(nb));
4145 remainder_size = size - nb;
4147 /* unlink */
4148 bck = victim->bk;
4149 bin->bk = bck;
4150 bck->fd = bin;
4152 /* Exhaust */
4153 if (remainder_size < MINSIZE) {
4154 set_inuse_bit_at_offset(victim, size);
4155 if (av != &main_arena)
4156 victim->size |= NON_MAIN_ARENA;
4159 /* Split */
4160 else {
4161 remainder = chunk_at_offset(victim, nb);
4163 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4164 remainder->bk = remainder->fd = unsorted_chunks(av);
4165 /* advertise as last remainder */
4166 if (in_smallbin_range(nb))
4167 av->last_remainder = remainder;
4169 set_head(victim, nb | PREV_INUSE |
4170 (av != &main_arena ? NON_MAIN_ARENA : 0));
4171 set_head(remainder, remainder_size | PREV_INUSE);
4172 set_foot(remainder, remainder_size);
4174 check_malloced_chunk(av, victim, nb);
4175 void *p = chunk2mem(victim);
4176 if (__builtin_expect (perturb_byte, 0))
4177 alloc_perturb (p, bytes);
4178 return p;
4182 use_top:
4184 If large enough, split off the chunk bordering the end of memory
4185 (held in av->top). Note that this is in accord with the best-fit
4186 search rule. In effect, av->top is treated as larger (and thus
4187 less well fitting) than any other available chunk since it can
4188 be extended to be as large as necessary (up to system
4189 limitations).
4191 We require that av->top always exists (i.e., has size >=
4192 MINSIZE) after initialization, so if it would otherwise be
4193 exhuasted by current request, it is replenished. (The main
4194 reason for ensuring it exists is that we may need MINSIZE space
4195 to put in fenceposts in sysmalloc.)
4198 victim = av->top;
4199 size = chunksize(victim);
4201 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
4202 remainder_size = size - nb;
4203 remainder = chunk_at_offset(victim, nb);
4204 av->top = remainder;
4205 set_head(victim, nb | PREV_INUSE |
4206 (av != &main_arena ? NON_MAIN_ARENA : 0));
4207 set_head(remainder, remainder_size | PREV_INUSE);
4209 check_malloced_chunk(av, victim, nb);
4210 void *p = chunk2mem(victim);
4211 if (__builtin_expect (perturb_byte, 0))
4212 alloc_perturb (p, bytes);
4213 return p;
4217 If there is space available in fastbins, consolidate and retry,
4218 to possibly avoid expanding memory. This can occur only if nb is
4219 in smallbin range so we didn't consolidate upon entry.
4222 else if (have_fastchunks(av)) {
4223 assert(in_smallbin_range(nb));
4224 malloc_consolidate(av);
4225 idx = smallbin_index(nb); /* restore original bin index */
4229 Otherwise, relay to handle system-dependent cases
4231 else {
4232 void *p = sYSMALLOc(nb, av);
4233 if (__builtin_expect (perturb_byte, 0))
4234 alloc_perturb (p, bytes);
4235 return p;
4241 ------------------------------ free ------------------------------
4244 void
4245 _int_free(mstate av, Void_t* mem)
4247 mchunkptr p; /* chunk corresponding to mem */
4248 INTERNAL_SIZE_T size; /* its size */
4249 mfastbinptr* fb; /* associated fastbin */
4250 mchunkptr nextchunk; /* next contiguous chunk */
4251 INTERNAL_SIZE_T nextsize; /* its size */
4252 int nextinuse; /* true if nextchunk is used */
4253 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
4254 mchunkptr bck; /* misc temp for linking */
4255 mchunkptr fwd; /* misc temp for linking */
4257 const char *errstr = NULL;
4259 p = mem2chunk(mem);
4260 size = chunksize(p);
4262 /* Little security check which won't hurt performance: the
4263 allocator never wrapps around at the end of the address space.
4264 Therefore we can exclude some size values which might appear
4265 here by accident or by "design" from some intruder. */
4266 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
4267 || __builtin_expect ((uintptr_t) p & MALLOC_ALIGN_MASK, 0))
4269 errstr = "free(): invalid pointer";
4270 errout:
4271 malloc_printerr (check_action, errstr, mem);
4272 return;
4275 check_inuse_chunk(av, p);
4278 If eligible, place chunk on a fastbin so it can be found
4279 and used quickly in malloc.
4282 if ((unsigned long)(size) <= (unsigned long)(av->max_fast)
4284 #if TRIM_FASTBINS
4286 If TRIM_FASTBINS set, don't place chunks
4287 bordering top into fastbins
4289 && (chunk_at_offset(p, size) != av->top)
4290 #endif
4293 if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
4294 || __builtin_expect (chunksize (chunk_at_offset (p, size))
4295 >= av->system_mem, 0))
4297 errstr = "free(): invalid next size (fast)";
4298 goto errout;
4301 set_fastchunks(av);
4302 fb = &(av->fastbins[fastbin_index(size)]);
4303 /* Another simple check: make sure the top of the bin is not the
4304 record we are going to add (i.e., double free). */
4305 if (__builtin_expect (*fb == p, 0))
4307 errstr = "double free or corruption (fasttop)";
4308 goto errout;
4311 if (__builtin_expect (perturb_byte, 0))
4312 free_perturb (mem, size - SIZE_SZ);
4314 p->fd = *fb;
4315 *fb = p;
4319 Consolidate other non-mmapped chunks as they arrive.
4322 else if (!chunk_is_mmapped(p)) {
4323 nextchunk = chunk_at_offset(p, size);
4325 /* Lightweight tests: check whether the block is already the
4326 top block. */
4327 if (__builtin_expect (p == av->top, 0))
4329 errstr = "double free or corruption (top)";
4330 goto errout;
4332 /* Or whether the next chunk is beyond the boundaries of the arena. */
4333 if (__builtin_expect (contiguous (av)
4334 && (char *) nextchunk
4335 >= ((char *) av->top + chunksize(av->top)), 0))
4337 errstr = "double free or corruption (out)";
4338 goto errout;
4340 /* Or whether the block is actually not marked used. */
4341 if (__builtin_expect (!prev_inuse(nextchunk), 0))
4343 errstr = "double free or corruption (!prev)";
4344 goto errout;
4347 nextsize = chunksize(nextchunk);
4348 if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
4349 || __builtin_expect (nextsize >= av->system_mem, 0))
4351 errstr = "free(): invalid next size (normal)";
4352 goto errout;
4355 if (__builtin_expect (perturb_byte, 0))
4356 free_perturb (mem, size - SIZE_SZ);
4358 /* consolidate backward */
4359 if (!prev_inuse(p)) {
4360 prevsize = p->prev_size;
4361 size += prevsize;
4362 p = chunk_at_offset(p, -((long) prevsize));
4363 unlink(p, bck, fwd);
4366 if (nextchunk != av->top) {
4367 /* get and clear inuse bit */
4368 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4370 /* consolidate forward */
4371 if (!nextinuse) {
4372 unlink(nextchunk, bck, fwd);
4373 size += nextsize;
4374 } else
4375 clear_inuse_bit_at_offset(nextchunk, 0);
4378 Place the chunk in unsorted chunk list. Chunks are
4379 not placed into regular bins until after they have
4380 been given one chance to be used in malloc.
4383 bck = unsorted_chunks(av);
4384 fwd = bck->fd;
4385 p->bk = bck;
4386 p->fd = fwd;
4387 bck->fd = p;
4388 fwd->bk = p;
4390 set_head(p, size | PREV_INUSE);
4391 set_foot(p, size);
4393 check_free_chunk(av, p);
4397 If the chunk borders the current high end of memory,
4398 consolidate into top
4401 else {
4402 size += nextsize;
4403 set_head(p, size | PREV_INUSE);
4404 av->top = p;
4405 check_chunk(av, p);
4409 If freeing a large space, consolidate possibly-surrounding
4410 chunks. Then, if the total unused topmost memory exceeds trim
4411 threshold, ask malloc_trim to reduce top.
4413 Unless max_fast is 0, we don't know if there are fastbins
4414 bordering top, so we cannot tell for sure whether threshold
4415 has been reached unless fastbins are consolidated. But we
4416 don't want to consolidate on each free. As a compromise,
4417 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4418 is reached.
4421 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
4422 if (have_fastchunks(av))
4423 malloc_consolidate(av);
4425 if (av == &main_arena) {
4426 #ifndef MORECORE_CANNOT_TRIM
4427 if ((unsigned long)(chunksize(av->top)) >=
4428 (unsigned long)(mp_.trim_threshold))
4429 sYSTRIm(mp_.top_pad, av);
4430 #endif
4431 } else {
4432 /* Always try heap_trim(), even if the top chunk is not
4433 large, because the corresponding heap might go away. */
4434 heap_info *heap = heap_for_ptr(top(av));
4436 assert(heap->ar_ptr == av);
4437 heap_trim(heap, mp_.top_pad);
4443 If the chunk was allocated via mmap, release via munmap(). Note
4444 that if HAVE_MMAP is false but chunk_is_mmapped is true, then
4445 user must have overwritten memory. There's nothing we can do to
4446 catch this error unless MALLOC_DEBUG is set, in which case
4447 check_inuse_chunk (above) will have triggered error.
4450 else {
4451 #if HAVE_MMAP
4452 munmap_chunk (p);
4453 #endif
4458 ------------------------- malloc_consolidate -------------------------
4460 malloc_consolidate is a specialized version of free() that tears
4461 down chunks held in fastbins. Free itself cannot be used for this
4462 purpose since, among other things, it might place chunks back onto
4463 fastbins. So, instead, we need to use a minor variant of the same
4464 code.
4466 Also, because this routine needs to be called the first time through
4467 malloc anyway, it turns out to be the perfect place to trigger
4468 initialization code.
4471 #if __STD_C
4472 static void malloc_consolidate(mstate av)
4473 #else
4474 static void malloc_consolidate(av) mstate av;
4475 #endif
4477 mfastbinptr* fb; /* current fastbin being consolidated */
4478 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4479 mchunkptr p; /* current chunk being consolidated */
4480 mchunkptr nextp; /* next chunk to consolidate */
4481 mchunkptr unsorted_bin; /* bin header */
4482 mchunkptr first_unsorted; /* chunk to link to */
4484 /* These have same use as in free() */
4485 mchunkptr nextchunk;
4486 INTERNAL_SIZE_T size;
4487 INTERNAL_SIZE_T nextsize;
4488 INTERNAL_SIZE_T prevsize;
4489 int nextinuse;
4490 mchunkptr bck;
4491 mchunkptr fwd;
4494 If max_fast is 0, we know that av hasn't
4495 yet been initialized, in which case do so below
4498 if (av->max_fast != 0) {
4499 clear_fastchunks(av);
4501 unsorted_bin = unsorted_chunks(av);
4504 Remove each chunk from fast bin and consolidate it, placing it
4505 then in unsorted bin. Among other reasons for doing this,
4506 placing in unsorted bin avoids needing to calculate actual bins
4507 until malloc is sure that chunks aren't immediately going to be
4508 reused anyway.
4511 maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
4512 fb = &(av->fastbins[0]);
4513 do {
4514 if ( (p = *fb) != 0) {
4515 *fb = 0;
4517 do {
4518 check_inuse_chunk(av, p);
4519 nextp = p->fd;
4521 /* Slightly streamlined version of consolidation code in free() */
4522 size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
4523 nextchunk = chunk_at_offset(p, size);
4524 nextsize = chunksize(nextchunk);
4526 if (!prev_inuse(p)) {
4527 prevsize = p->prev_size;
4528 size += prevsize;
4529 p = chunk_at_offset(p, -((long) prevsize));
4530 unlink(p, bck, fwd);
4533 if (nextchunk != av->top) {
4534 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4536 if (!nextinuse) {
4537 size += nextsize;
4538 unlink(nextchunk, bck, fwd);
4539 } else
4540 clear_inuse_bit_at_offset(nextchunk, 0);
4542 first_unsorted = unsorted_bin->fd;
4543 unsorted_bin->fd = p;
4544 first_unsorted->bk = p;
4546 set_head(p, size | PREV_INUSE);
4547 p->bk = unsorted_bin;
4548 p->fd = first_unsorted;
4549 set_foot(p, size);
4552 else {
4553 size += nextsize;
4554 set_head(p, size | PREV_INUSE);
4555 av->top = p;
4558 } while ( (p = nextp) != 0);
4561 } while (fb++ != maxfb);
4563 else {
4564 malloc_init_state(av);
4565 check_malloc_state(av);
4570 ------------------------------ realloc ------------------------------
4573 Void_t*
4574 _int_realloc(mstate av, Void_t* oldmem, size_t bytes)
4576 INTERNAL_SIZE_T nb; /* padded request size */
4578 mchunkptr oldp; /* chunk corresponding to oldmem */
4579 INTERNAL_SIZE_T oldsize; /* its size */
4581 mchunkptr newp; /* chunk to return */
4582 INTERNAL_SIZE_T newsize; /* its size */
4583 Void_t* newmem; /* corresponding user mem */
4585 mchunkptr next; /* next contiguous chunk after oldp */
4587 mchunkptr remainder; /* extra space at end of newp */
4588 unsigned long remainder_size; /* its size */
4590 mchunkptr bck; /* misc temp for linking */
4591 mchunkptr fwd; /* misc temp for linking */
4593 unsigned long copysize; /* bytes to copy */
4594 unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
4595 INTERNAL_SIZE_T* s; /* copy source */
4596 INTERNAL_SIZE_T* d; /* copy destination */
4598 const char *errstr = NULL;
4601 checked_request2size(bytes, nb);
4603 oldp = mem2chunk(oldmem);
4604 oldsize = chunksize(oldp);
4606 /* Simple tests for old block integrity. */
4607 if (__builtin_expect ((uintptr_t) oldp & MALLOC_ALIGN_MASK, 0))
4609 errstr = "realloc(): invalid pointer";
4610 errout:
4611 malloc_printerr (check_action, errstr, oldmem);
4612 return NULL;
4614 if (__builtin_expect (oldp->size <= 2 * SIZE_SZ, 0)
4615 || __builtin_expect (oldsize >= av->system_mem, 0))
4617 errstr = "realloc(): invalid size";
4618 goto errout;
4621 check_inuse_chunk(av, oldp);
4623 if (!chunk_is_mmapped(oldp)) {
4625 next = chunk_at_offset(oldp, oldsize);
4626 INTERNAL_SIZE_T nextsize = chunksize(next);
4627 if (__builtin_expect (next->size <= 2 * SIZE_SZ, 0)
4628 || __builtin_expect (nextsize >= av->system_mem, 0))
4630 errstr = "realloc(): invalid next size";
4631 goto errout;
4634 if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
4635 /* already big enough; split below */
4636 newp = oldp;
4637 newsize = oldsize;
4640 else {
4641 /* Try to expand forward into top */
4642 if (next == av->top &&
4643 (unsigned long)(newsize = oldsize + nextsize) >=
4644 (unsigned long)(nb + MINSIZE)) {
4645 set_head_size(oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4646 av->top = chunk_at_offset(oldp, nb);
4647 set_head(av->top, (newsize - nb) | PREV_INUSE);
4648 check_inuse_chunk(av, oldp);
4649 return chunk2mem(oldp);
4652 /* Try to expand forward into next chunk; split off remainder below */
4653 else if (next != av->top &&
4654 !inuse(next) &&
4655 (unsigned long)(newsize = oldsize + nextsize) >=
4656 (unsigned long)(nb)) {
4657 newp = oldp;
4658 unlink(next, bck, fwd);
4661 /* allocate, copy, free */
4662 else {
4663 newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
4664 if (newmem == 0)
4665 return 0; /* propagate failure */
4667 newp = mem2chunk(newmem);
4668 newsize = chunksize(newp);
4671 Avoid copy if newp is next chunk after oldp.
4673 if (newp == next) {
4674 newsize += oldsize;
4675 newp = oldp;
4677 else {
4679 Unroll copy of <= 36 bytes (72 if 8byte sizes)
4680 We know that contents have an odd number of
4681 INTERNAL_SIZE_T-sized words; minimally 3.
4684 copysize = oldsize - SIZE_SZ;
4685 s = (INTERNAL_SIZE_T*)(oldmem);
4686 d = (INTERNAL_SIZE_T*)(newmem);
4687 ncopies = copysize / sizeof(INTERNAL_SIZE_T);
4688 assert(ncopies >= 3);
4690 if (ncopies > 9)
4691 MALLOC_COPY(d, s, copysize);
4693 else {
4694 *(d+0) = *(s+0);
4695 *(d+1) = *(s+1);
4696 *(d+2) = *(s+2);
4697 if (ncopies > 4) {
4698 *(d+3) = *(s+3);
4699 *(d+4) = *(s+4);
4700 if (ncopies > 6) {
4701 *(d+5) = *(s+5);
4702 *(d+6) = *(s+6);
4703 if (ncopies > 8) {
4704 *(d+7) = *(s+7);
4705 *(d+8) = *(s+8);
4711 _int_free(av, oldmem);
4712 check_inuse_chunk(av, newp);
4713 return chunk2mem(newp);
4718 /* If possible, free extra space in old or extended chunk */
4720 assert((unsigned long)(newsize) >= (unsigned long)(nb));
4722 remainder_size = newsize - nb;
4724 if (remainder_size < MINSIZE) { /* not enough extra to split off */
4725 set_head_size(newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4726 set_inuse_bit_at_offset(newp, newsize);
4728 else { /* split remainder */
4729 remainder = chunk_at_offset(newp, nb);
4730 set_head_size(newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4731 set_head(remainder, remainder_size | PREV_INUSE |
4732 (av != &main_arena ? NON_MAIN_ARENA : 0));
4733 /* Mark remainder as inuse so free() won't complain */
4734 set_inuse_bit_at_offset(remainder, remainder_size);
4735 _int_free(av, chunk2mem(remainder));
4738 check_inuse_chunk(av, newp);
4739 return chunk2mem(newp);
4743 Handle mmap cases
4746 else {
4747 #if HAVE_MMAP
4749 #if HAVE_MREMAP
4750 INTERNAL_SIZE_T offset = oldp->prev_size;
4751 size_t pagemask = mp_.pagesize - 1;
4752 char *cp;
4753 unsigned long sum;
4755 /* Note the extra SIZE_SZ overhead */
4756 newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;
4758 /* don't need to remap if still within same page */
4759 if (oldsize == newsize - offset)
4760 return oldmem;
4762 cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
4764 if (cp != MAP_FAILED) {
4766 newp = (mchunkptr)(cp + offset);
4767 set_head(newp, (newsize - offset)|IS_MMAPPED);
4769 assert(aligned_OK(chunk2mem(newp)));
4770 assert((newp->prev_size == offset));
4772 /* update statistics */
4773 sum = mp_.mmapped_mem += newsize - oldsize;
4774 if (sum > (unsigned long)(mp_.max_mmapped_mem))
4775 mp_.max_mmapped_mem = sum;
4776 #ifdef NO_THREADS
4777 sum += main_arena.system_mem;
4778 if (sum > (unsigned long)(mp_.max_total_mem))
4779 mp_.max_total_mem = sum;
4780 #endif
4782 return chunk2mem(newp);
4784 #endif
4786 /* Note the extra SIZE_SZ overhead. */
4787 if ((unsigned long)(oldsize) >= (unsigned long)(nb + SIZE_SZ))
4788 newmem = oldmem; /* do nothing */
4789 else {
4790 /* Must alloc, copy, free. */
4791 newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
4792 if (newmem != 0) {
4793 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
4794 _int_free(av, oldmem);
4797 return newmem;
4799 #else
4800 /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
4801 check_malloc_state(av);
4802 MALLOC_FAILURE_ACTION;
4803 return 0;
4804 #endif
4809 ------------------------------ memalign ------------------------------
4812 Void_t*
4813 _int_memalign(mstate av, size_t alignment, size_t bytes)
4815 INTERNAL_SIZE_T nb; /* padded request size */
4816 char* m; /* memory returned by malloc call */
4817 mchunkptr p; /* corresponding chunk */
4818 char* brk; /* alignment point within p */
4819 mchunkptr newp; /* chunk to return */
4820 INTERNAL_SIZE_T newsize; /* its size */
4821 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
4822 mchunkptr remainder; /* spare room at end to split off */
4823 unsigned long remainder_size; /* its size */
4824 INTERNAL_SIZE_T size;
4826 /* If need less alignment than we give anyway, just relay to malloc */
4828 if (alignment <= MALLOC_ALIGNMENT) return _int_malloc(av, bytes);
4830 /* Otherwise, ensure that it is at least a minimum chunk size */
4832 if (alignment < MINSIZE) alignment = MINSIZE;
4834 /* Make sure alignment is power of 2 (in case MINSIZE is not). */
4835 if ((alignment & (alignment - 1)) != 0) {
4836 size_t a = MALLOC_ALIGNMENT * 2;
4837 while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
4838 alignment = a;
4841 checked_request2size(bytes, nb);
4844 Strategy: find a spot within that chunk that meets the alignment
4845 request, and then possibly free the leading and trailing space.
4849 /* Call malloc with worst case padding to hit alignment. */
4851 m = (char*)(_int_malloc(av, nb + alignment + MINSIZE));
4853 if (m == 0) return 0; /* propagate failure */
4855 p = mem2chunk(m);
4857 if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */
4860 Find an aligned spot inside chunk. Since we need to give back
4861 leading space in a chunk of at least MINSIZE, if the first
4862 calculation places us at a spot with less than MINSIZE leader,
4863 we can move to the next aligned spot -- we've allocated enough
4864 total room so that this is always possible.
4867 brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) &
4868 -((signed long) alignment));
4869 if ((unsigned long)(brk - (char*)(p)) < MINSIZE)
4870 brk += alignment;
4872 newp = (mchunkptr)brk;
4873 leadsize = brk - (char*)(p);
4874 newsize = chunksize(p) - leadsize;
4876 /* For mmapped chunks, just adjust offset */
4877 if (chunk_is_mmapped(p)) {
4878 newp->prev_size = p->prev_size + leadsize;
4879 set_head(newp, newsize|IS_MMAPPED);
4880 return chunk2mem(newp);
4883 /* Otherwise, give back leader, use the rest */
4884 set_head(newp, newsize | PREV_INUSE |
4885 (av != &main_arena ? NON_MAIN_ARENA : 0));
4886 set_inuse_bit_at_offset(newp, newsize);
4887 set_head_size(p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4888 _int_free(av, chunk2mem(p));
4889 p = newp;
4891 assert (newsize >= nb &&
4892 (((unsigned long)(chunk2mem(p))) % alignment) == 0);
4895 /* Also give back spare room at the end */
4896 if (!chunk_is_mmapped(p)) {
4897 size = chunksize(p);
4898 if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
4899 remainder_size = size - nb;
4900 remainder = chunk_at_offset(p, nb);
4901 set_head(remainder, remainder_size | PREV_INUSE |
4902 (av != &main_arena ? NON_MAIN_ARENA : 0));
4903 set_head_size(p, nb);
4904 _int_free(av, chunk2mem(remainder));
4908 check_inuse_chunk(av, p);
4909 return chunk2mem(p);
4912 #if 0
4914 ------------------------------ calloc ------------------------------
4917 #if __STD_C
4918 Void_t* cALLOc(size_t n_elements, size_t elem_size)
4919 #else
4920 Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
4921 #endif
4923 mchunkptr p;
4924 unsigned long clearsize;
4925 unsigned long nclears;
4926 INTERNAL_SIZE_T* d;
4928 Void_t* mem = mALLOc(n_elements * elem_size);
4930 if (mem != 0) {
4931 p = mem2chunk(mem);
4933 #if MMAP_CLEARS
4934 if (!chunk_is_mmapped(p)) /* don't need to clear mmapped space */
4935 #endif
4938 Unroll clear of <= 36 bytes (72 if 8byte sizes)
4939 We know that contents have an odd number of
4940 INTERNAL_SIZE_T-sized words; minimally 3.
4943 d = (INTERNAL_SIZE_T*)mem;
4944 clearsize = chunksize(p) - SIZE_SZ;
4945 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
4946 assert(nclears >= 3);
4948 if (nclears > 9)
4949 MALLOC_ZERO(d, clearsize);
4951 else {
4952 *(d+0) = 0;
4953 *(d+1) = 0;
4954 *(d+2) = 0;
4955 if (nclears > 4) {
4956 *(d+3) = 0;
4957 *(d+4) = 0;
4958 if (nclears > 6) {
4959 *(d+5) = 0;
4960 *(d+6) = 0;
4961 if (nclears > 8) {
4962 *(d+7) = 0;
4963 *(d+8) = 0;
4970 return mem;
4972 #endif /* 0 */
4974 #ifndef _LIBC
4976 ------------------------- independent_calloc -------------------------
4979 Void_t**
4980 #if __STD_C
4981 _int_icalloc(mstate av, size_t n_elements, size_t elem_size, Void_t* chunks[])
4982 #else
4983 _int_icalloc(av, n_elements, elem_size, chunks)
4984 mstate av; size_t n_elements; size_t elem_size; Void_t* chunks[];
4985 #endif
4987 size_t sz = elem_size; /* serves as 1-element array */
4988 /* opts arg of 3 means all elements are same size, and should be cleared */
4989 return iALLOc(av, n_elements, &sz, 3, chunks);
4993 ------------------------- independent_comalloc -------------------------
4996 Void_t**
4997 #if __STD_C
4998 _int_icomalloc(mstate av, size_t n_elements, size_t sizes[], Void_t* chunks[])
4999 #else
5000 _int_icomalloc(av, n_elements, sizes, chunks)
5001 mstate av; size_t n_elements; size_t sizes[]; Void_t* chunks[];
5002 #endif
5004 return iALLOc(av, n_elements, sizes, 0, chunks);
5009 ------------------------------ ialloc ------------------------------
5010 ialloc provides common support for independent_X routines, handling all of
5011 the combinations that can result.
5013 The opts arg has:
5014 bit 0 set if all elements are same size (using sizes[0])
5015 bit 1 set if elements should be zeroed
5019 static Void_t**
5020 #if __STD_C
5021 iALLOc(mstate av, size_t n_elements, size_t* sizes, int opts, Void_t* chunks[])
5022 #else
5023 iALLOc(av, n_elements, sizes, opts, chunks)
5024 mstate av; size_t n_elements; size_t* sizes; int opts; Void_t* chunks[];
5025 #endif
5027 INTERNAL_SIZE_T element_size; /* chunksize of each element, if all same */
5028 INTERNAL_SIZE_T contents_size; /* total size of elements */
5029 INTERNAL_SIZE_T array_size; /* request size of pointer array */
5030 Void_t* mem; /* malloced aggregate space */
5031 mchunkptr p; /* corresponding chunk */
5032 INTERNAL_SIZE_T remainder_size; /* remaining bytes while splitting */
5033 Void_t** marray; /* either "chunks" or malloced ptr array */
5034 mchunkptr array_chunk; /* chunk for malloced ptr array */
5035 int mmx; /* to disable mmap */
5036 INTERNAL_SIZE_T size;
5037 INTERNAL_SIZE_T size_flags;
5038 size_t i;
5040 /* Ensure initialization/consolidation */
5041 if (have_fastchunks(av)) malloc_consolidate(av);
5043 /* compute array length, if needed */
5044 if (chunks != 0) {
5045 if (n_elements == 0)
5046 return chunks; /* nothing to do */
5047 marray = chunks;
5048 array_size = 0;
5050 else {
5051 /* if empty req, must still return chunk representing empty array */
5052 if (n_elements == 0)
5053 return (Void_t**) _int_malloc(av, 0);
5054 marray = 0;
5055 array_size = request2size(n_elements * (sizeof(Void_t*)));
5058 /* compute total element size */
5059 if (opts & 0x1) { /* all-same-size */
5060 element_size = request2size(*sizes);
5061 contents_size = n_elements * element_size;
5063 else { /* add up all the sizes */
5064 element_size = 0;
5065 contents_size = 0;
5066 for (i = 0; i != n_elements; ++i)
5067 contents_size += request2size(sizes[i]);
5070 /* subtract out alignment bytes from total to minimize overallocation */
5071 size = contents_size + array_size - MALLOC_ALIGN_MASK;
5074 Allocate the aggregate chunk.
5075 But first disable mmap so malloc won't use it, since
5076 we would not be able to later free/realloc space internal
5077 to a segregated mmap region.
5079 mmx = mp_.n_mmaps_max; /* disable mmap */
5080 mp_.n_mmaps_max = 0;
5081 mem = _int_malloc(av, size);
5082 mp_.n_mmaps_max = mmx; /* reset mmap */
5083 if (mem == 0)
5084 return 0;
5086 p = mem2chunk(mem);
5087 assert(!chunk_is_mmapped(p));
5088 remainder_size = chunksize(p);
5090 if (opts & 0x2) { /* optionally clear the elements */
5091 MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size);
5094 size_flags = PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0);
5096 /* If not provided, allocate the pointer array as final part of chunk */
5097 if (marray == 0) {
5098 array_chunk = chunk_at_offset(p, contents_size);
5099 marray = (Void_t**) (chunk2mem(array_chunk));
5100 set_head(array_chunk, (remainder_size - contents_size) | size_flags);
5101 remainder_size = contents_size;
5104 /* split out elements */
5105 for (i = 0; ; ++i) {
5106 marray[i] = chunk2mem(p);
5107 if (i != n_elements-1) {
5108 if (element_size != 0)
5109 size = element_size;
5110 else
5111 size = request2size(sizes[i]);
5112 remainder_size -= size;
5113 set_head(p, size | size_flags);
5114 p = chunk_at_offset(p, size);
5116 else { /* the final element absorbs any overallocation slop */
5117 set_head(p, remainder_size | size_flags);
5118 break;
5122 #if MALLOC_DEBUG
5123 if (marray != chunks) {
5124 /* final element must have exactly exhausted chunk */
5125 if (element_size != 0)
5126 assert(remainder_size == element_size);
5127 else
5128 assert(remainder_size == request2size(sizes[i]));
5129 check_inuse_chunk(av, mem2chunk(marray));
5132 for (i = 0; i != n_elements; ++i)
5133 check_inuse_chunk(av, mem2chunk(marray[i]));
5134 #endif
5136 return marray;
5138 #endif /* _LIBC */
5142 ------------------------------ valloc ------------------------------
5145 Void_t*
5146 #if __STD_C
5147 _int_valloc(mstate av, size_t bytes)
5148 #else
5149 _int_valloc(av, bytes) mstate av; size_t bytes;
5150 #endif
5152 /* Ensure initialization/consolidation */
5153 if (have_fastchunks(av)) malloc_consolidate(av);
5154 return _int_memalign(av, mp_.pagesize, bytes);
5158 ------------------------------ pvalloc ------------------------------
5162 Void_t*
5163 #if __STD_C
5164 _int_pvalloc(mstate av, size_t bytes)
5165 #else
5166 _int_pvalloc(av, bytes) mstate av, size_t bytes;
5167 #endif
5169 size_t pagesz;
5171 /* Ensure initialization/consolidation */
5172 if (have_fastchunks(av)) malloc_consolidate(av);
5173 pagesz = mp_.pagesize;
5174 return _int_memalign(av, pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
5179 ------------------------------ malloc_trim ------------------------------
5182 #if __STD_C
5183 int mTRIm(size_t pad)
5184 #else
5185 int mTRIm(pad) size_t pad;
5186 #endif
5188 mstate av = &main_arena; /* already locked */
5190 /* Ensure initialization/consolidation */
5191 malloc_consolidate(av);
5193 #ifndef MORECORE_CANNOT_TRIM
5194 return sYSTRIm(pad, av);
5195 #else
5196 return 0;
5197 #endif
5202 ------------------------- malloc_usable_size -------------------------
5205 #if __STD_C
5206 size_t mUSABLe(Void_t* mem)
5207 #else
5208 size_t mUSABLe(mem) Void_t* mem;
5209 #endif
5211 mchunkptr p;
5212 if (mem != 0) {
5213 p = mem2chunk(mem);
5214 if (chunk_is_mmapped(p))
5215 return chunksize(p) - 2*SIZE_SZ;
5216 else if (inuse(p))
5217 return chunksize(p) - SIZE_SZ;
5219 return 0;
5223 ------------------------------ mallinfo ------------------------------
5226 struct mallinfo mALLINFo(mstate av)
5228 struct mallinfo mi;
5229 size_t i;
5230 mbinptr b;
5231 mchunkptr p;
5232 INTERNAL_SIZE_T avail;
5233 INTERNAL_SIZE_T fastavail;
5234 int nblocks;
5235 int nfastblocks;
5237 /* Ensure initialization */
5238 if (av->top == 0) malloc_consolidate(av);
5240 check_malloc_state(av);
5242 /* Account for top */
5243 avail = chunksize(av->top);
5244 nblocks = 1; /* top always exists */
5246 /* traverse fastbins */
5247 nfastblocks = 0;
5248 fastavail = 0;
5250 for (i = 0; i < NFASTBINS; ++i) {
5251 for (p = av->fastbins[i]; p != 0; p = p->fd) {
5252 ++nfastblocks;
5253 fastavail += chunksize(p);
5257 avail += fastavail;
5259 /* traverse regular bins */
5260 for (i = 1; i < NBINS; ++i) {
5261 b = bin_at(av, i);
5262 for (p = last(b); p != b; p = p->bk) {
5263 ++nblocks;
5264 avail += chunksize(p);
5268 mi.smblks = nfastblocks;
5269 mi.ordblks = nblocks;
5270 mi.fordblks = avail;
5271 mi.uordblks = av->system_mem - avail;
5272 mi.arena = av->system_mem;
5273 mi.hblks = mp_.n_mmaps;
5274 mi.hblkhd = mp_.mmapped_mem;
5275 mi.fsmblks = fastavail;
5276 mi.keepcost = chunksize(av->top);
5277 mi.usmblks = mp_.max_total_mem;
5278 return mi;
5282 ------------------------------ malloc_stats ------------------------------
5285 void mSTATs()
5287 int i;
5288 mstate ar_ptr;
5289 struct mallinfo mi;
5290 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
5291 #if THREAD_STATS
5292 long stat_lock_direct = 0, stat_lock_loop = 0, stat_lock_wait = 0;
5293 #endif
5295 if(__malloc_initialized < 0)
5296 ptmalloc_init ();
5297 #ifdef _LIBC
5298 _IO_flockfile (stderr);
5299 int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
5300 ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
5301 #endif
5302 for (i=0, ar_ptr = &main_arena;; i++) {
5303 (void)mutex_lock(&ar_ptr->mutex);
5304 mi = mALLINFo(ar_ptr);
5305 fprintf(stderr, "Arena %d:\n", i);
5306 fprintf(stderr, "system bytes = %10u\n", (unsigned int)mi.arena);
5307 fprintf(stderr, "in use bytes = %10u\n", (unsigned int)mi.uordblks);
5308 #if MALLOC_DEBUG > 1
5309 if (i > 0)
5310 dump_heap(heap_for_ptr(top(ar_ptr)));
5311 #endif
5312 system_b += mi.arena;
5313 in_use_b += mi.uordblks;
5314 #if THREAD_STATS
5315 stat_lock_direct += ar_ptr->stat_lock_direct;
5316 stat_lock_loop += ar_ptr->stat_lock_loop;
5317 stat_lock_wait += ar_ptr->stat_lock_wait;
5318 #endif
5319 (void)mutex_unlock(&ar_ptr->mutex);
5320 ar_ptr = ar_ptr->next;
5321 if(ar_ptr == &main_arena) break;
5323 #if HAVE_MMAP
5324 fprintf(stderr, "Total (incl. mmap):\n");
5325 #else
5326 fprintf(stderr, "Total:\n");
5327 #endif
5328 fprintf(stderr, "system bytes = %10u\n", system_b);
5329 fprintf(stderr, "in use bytes = %10u\n", in_use_b);
5330 #ifdef NO_THREADS
5331 fprintf(stderr, "max system bytes = %10u\n", (unsigned int)mp_.max_total_mem);
5332 #endif
5333 #if HAVE_MMAP
5334 fprintf(stderr, "max mmap regions = %10u\n", (unsigned int)mp_.max_n_mmaps);
5335 fprintf(stderr, "max mmap bytes = %10lu\n",
5336 (unsigned long)mp_.max_mmapped_mem);
5337 #endif
5338 #if THREAD_STATS
5339 fprintf(stderr, "heaps created = %10d\n", stat_n_heaps);
5340 fprintf(stderr, "locked directly = %10ld\n", stat_lock_direct);
5341 fprintf(stderr, "locked in loop = %10ld\n", stat_lock_loop);
5342 fprintf(stderr, "locked waiting = %10ld\n", stat_lock_wait);
5343 fprintf(stderr, "locked total = %10ld\n",
5344 stat_lock_direct + stat_lock_loop + stat_lock_wait);
5345 #endif
5346 #ifdef _LIBC
5347 ((_IO_FILE *) stderr)->_flags2 |= old_flags2;
5348 _IO_funlockfile (stderr);
5349 #endif
5354 ------------------------------ mallopt ------------------------------
5357 #if __STD_C
5358 int mALLOPt(int param_number, int value)
5359 #else
5360 int mALLOPt(param_number, value) int param_number; int value;
5361 #endif
5363 mstate av = &main_arena;
5364 int res = 1;
5366 if(__malloc_initialized < 0)
5367 ptmalloc_init ();
5368 (void)mutex_lock(&av->mutex);
5369 /* Ensure initialization/consolidation */
5370 malloc_consolidate(av);
5372 switch(param_number) {
5373 case M_MXFAST:
5374 if (value >= 0 && value <= MAX_FAST_SIZE) {
5375 set_max_fast(av, value);
5377 else
5378 res = 0;
5379 break;
5381 case M_TRIM_THRESHOLD:
5382 mp_.trim_threshold = value;
5383 break;
5385 case M_TOP_PAD:
5386 mp_.top_pad = value;
5387 break;
5389 case M_MMAP_THRESHOLD:
5390 #if USE_ARENAS
5391 /* Forbid setting the threshold too high. */
5392 if((unsigned long)value > HEAP_MAX_SIZE/2)
5393 res = 0;
5394 else
5395 #endif
5396 mp_.mmap_threshold = value;
5397 break;
5399 case M_MMAP_MAX:
5400 #if !HAVE_MMAP
5401 if (value != 0)
5402 res = 0;
5403 else
5404 #endif
5405 mp_.n_mmaps_max = value;
5406 break;
5408 case M_CHECK_ACTION:
5409 check_action = value;
5410 break;
5412 case M_PERTURB:
5413 perturb_byte = value;
5414 break;
5416 (void)mutex_unlock(&av->mutex);
5417 return res;
5422 -------------------- Alternative MORECORE functions --------------------
5427 General Requirements for MORECORE.
5429 The MORECORE function must have the following properties:
5431 If MORECORE_CONTIGUOUS is false:
5433 * MORECORE must allocate in multiples of pagesize. It will
5434 only be called with arguments that are multiples of pagesize.
5436 * MORECORE(0) must return an address that is at least
5437 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
5439 else (i.e. If MORECORE_CONTIGUOUS is true):
5441 * Consecutive calls to MORECORE with positive arguments
5442 return increasing addresses, indicating that space has been
5443 contiguously extended.
5445 * MORECORE need not allocate in multiples of pagesize.
5446 Calls to MORECORE need not have args of multiples of pagesize.
5448 * MORECORE need not page-align.
5450 In either case:
5452 * MORECORE may allocate more memory than requested. (Or even less,
5453 but this will generally result in a malloc failure.)
5455 * MORECORE must not allocate memory when given argument zero, but
5456 instead return one past the end address of memory from previous
5457 nonzero call. This malloc does NOT call MORECORE(0)
5458 until at least one call with positive arguments is made, so
5459 the initial value returned is not important.
5461 * Even though consecutive calls to MORECORE need not return contiguous
5462 addresses, it must be OK for malloc'ed chunks to span multiple
5463 regions in those cases where they do happen to be contiguous.
5465 * MORECORE need not handle negative arguments -- it may instead
5466 just return MORECORE_FAILURE when given negative arguments.
5467 Negative arguments are always multiples of pagesize. MORECORE
5468 must not misinterpret negative args as large positive unsigned
5469 args. You can suppress all such calls from even occurring by defining
5470 MORECORE_CANNOT_TRIM,
5472 There is some variation across systems about the type of the
5473 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
5474 actually be size_t, because sbrk supports negative args, so it is
5475 normally the signed type of the same width as size_t (sometimes
5476 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
5477 matter though. Internally, we use "long" as arguments, which should
5478 work across all reasonable possibilities.
5480 Additionally, if MORECORE ever returns failure for a positive
5481 request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
5482 system allocator. This is a useful backup strategy for systems with
5483 holes in address spaces -- in this case sbrk cannot contiguously
5484 expand the heap, but mmap may be able to map noncontiguous space.
5486 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
5487 a function that always returns MORECORE_FAILURE.
5489 If you are using this malloc with something other than sbrk (or its
5490 emulation) to supply memory regions, you probably want to set
5491 MORECORE_CONTIGUOUS as false. As an example, here is a custom
5492 allocator kindly contributed for pre-OSX macOS. It uses virtually
5493 but not necessarily physically contiguous non-paged memory (locked
5494 in, present and won't get swapped out). You can use it by
5495 uncommenting this section, adding some #includes, and setting up the
5496 appropriate defines above:
5498 #define MORECORE osMoreCore
5499 #define MORECORE_CONTIGUOUS 0
5501 There is also a shutdown routine that should somehow be called for
5502 cleanup upon program exit.
5504 #define MAX_POOL_ENTRIES 100
5505 #define MINIMUM_MORECORE_SIZE (64 * 1024)
5506 static int next_os_pool;
5507 void *our_os_pools[MAX_POOL_ENTRIES];
5509 void *osMoreCore(int size)
5511 void *ptr = 0;
5512 static void *sbrk_top = 0;
5514 if (size > 0)
5516 if (size < MINIMUM_MORECORE_SIZE)
5517 size = MINIMUM_MORECORE_SIZE;
5518 if (CurrentExecutionLevel() == kTaskLevel)
5519 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5520 if (ptr == 0)
5522 return (void *) MORECORE_FAILURE;
5524 // save ptrs so they can be freed during cleanup
5525 our_os_pools[next_os_pool] = ptr;
5526 next_os_pool++;
5527 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5528 sbrk_top = (char *) ptr + size;
5529 return ptr;
5531 else if (size < 0)
5533 // we don't currently support shrink behavior
5534 return (void *) MORECORE_FAILURE;
5536 else
5538 return sbrk_top;
5542 // cleanup any allocated memory pools
5543 // called as last thing before shutting down driver
5545 void osCleanupMem(void)
5547 void **ptr;
5549 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5550 if (*ptr)
5552 PoolDeallocate(*ptr);
5553 *ptr = 0;
5560 /* Helper code. */
5562 extern char **__libc_argv attribute_hidden;
5564 static void
5565 malloc_printerr(int action, const char *str, void *ptr)
5567 if ((action & 5) == 5)
5568 __libc_message (action & 2, "%s\n", str);
5569 else if (action & 1)
5571 char buf[2 * sizeof (uintptr_t) + 1];
5573 buf[sizeof (buf) - 1] = '\0';
5574 char *cp = _itoa_word ((uintptr_t) ptr, &buf[sizeof (buf) - 1], 16, 0);
5575 while (cp > buf)
5576 *--cp = '0';
5578 __libc_message (action & 2,
5579 "*** glibc detected *** %s: %s: 0x%s ***\n",
5580 __libc_argv[0] ?: "<unknown>", str, cp);
5582 else if (action & 2)
5583 abort ();
5586 #ifdef _LIBC
5587 # include <sys/param.h>
5589 /* We need a wrapper function for one of the additions of POSIX. */
5591 __posix_memalign (void **memptr, size_t alignment, size_t size)
5593 void *mem;
5594 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
5595 __const __malloc_ptr_t)) =
5596 __memalign_hook;
5598 /* Test whether the SIZE argument is valid. It must be a power of
5599 two multiple of sizeof (void *). */
5600 if (alignment % sizeof (void *) != 0
5601 || !powerof2 (alignment / sizeof (void *)) != 0
5602 || alignment == 0)
5603 return EINVAL;
5605 /* Call the hook here, so that caller is posix_memalign's caller
5606 and not posix_memalign itself. */
5607 if (hook != NULL)
5608 mem = (*hook)(alignment, size, RETURN_ADDRESS (0));
5609 else
5610 mem = public_mEMALIGn (alignment, size);
5612 if (mem != NULL) {
5613 *memptr = mem;
5614 return 0;
5617 return ENOMEM;
5619 weak_alias (__posix_memalign, posix_memalign)
5621 strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
5622 strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
5623 strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
5624 strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
5625 strong_alias (__libc_memalign, __memalign)
5626 weak_alias (__libc_memalign, memalign)
5627 strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
5628 strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
5629 strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
5630 strong_alias (__libc_mallinfo, __mallinfo)
5631 weak_alias (__libc_mallinfo, mallinfo)
5632 strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
5634 weak_alias (__malloc_stats, malloc_stats)
5635 weak_alias (__malloc_usable_size, malloc_usable_size)
5636 weak_alias (__malloc_trim, malloc_trim)
5637 weak_alias (__malloc_get_state, malloc_get_state)
5638 weak_alias (__malloc_set_state, malloc_set_state)
5640 #endif /* _LIBC */
5642 /* ------------------------------------------------------------
5643 History:
5645 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
5649 * Local variables:
5650 * c-basic-offset: 2
5651 * End: