Optimizations from GMP.
[glibc.git] / malloc / malloc.c
blob17e4e03bac34e0ad909e9acbe96dd4a914b6eb4f
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 1996-2006, 2007, 2008, 2009 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If not,
19 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
23 This is a version (aka ptmalloc2) of malloc/free/realloc written by
24 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
26 * Version ptmalloc2-20011215
27 based on:
28 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
30 * Quickstart
32 In order to compile this implementation, a Makefile is provided with
33 the ptmalloc2 distribution, which has pre-defined targets for some
34 popular systems (e.g. "make posix" for Posix threads). All that is
35 typically required with regard to compiler flags is the selection of
36 the thread package via defining one out of USE_PTHREADS, USE_THR or
37 USE_SPROC. Check the thread-m.h file for what effects this has.
38 Many/most systems will additionally require USE_TSD_DATA_HACK to be
39 defined, so this is the default for "make posix".
41 * Why use this malloc?
43 This is not the fastest, most space-conserving, most portable, or
44 most tunable malloc ever written. However it is among the fastest
45 while also being among the most space-conserving, portable and tunable.
46 Consistent balance across these factors results in a good general-purpose
47 allocator for malloc-intensive programs.
49 The main properties of the algorithms are:
50 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
51 with ties normally decided via FIFO (i.e. least recently used).
52 * For small (<= 64 bytes by default) requests, it is a caching
53 allocator, that maintains pools of quickly recycled chunks.
54 * In between, and for combinations of large and small requests, it does
55 the best it can trying to meet both goals at once.
56 * For very large requests (>= 128KB by default), it relies on system
57 memory mapping facilities, if supported.
59 For a longer but slightly out of date high-level description, see
60 http://gee.cs.oswego.edu/dl/html/malloc.html
62 You may already by default be using a C library containing a malloc
63 that is based on some version of this malloc (for example in
64 linux). You might still want to use the one in this file in order to
65 customize settings or to avoid overheads associated with library
66 versions.
68 * Contents, described in more detail in "description of public routines" below.
70 Standard (ANSI/SVID/...) functions:
71 malloc(size_t n);
72 calloc(size_t n_elements, size_t element_size);
73 free(Void_t* p);
74 realloc(Void_t* p, size_t n);
75 memalign(size_t alignment, size_t n);
76 valloc(size_t n);
77 mallinfo()
78 mallopt(int parameter_number, int parameter_value)
80 Additional functions:
81 independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]);
82 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
83 pvalloc(size_t n);
84 cfree(Void_t* p);
85 malloc_trim(size_t pad);
86 malloc_usable_size(Void_t* p);
87 malloc_stats();
89 * Vital statistics:
91 Supported pointer representation: 4 or 8 bytes
92 Supported size_t representation: 4 or 8 bytes
93 Note that size_t is allowed to be 4 bytes even if pointers are 8.
94 You can adjust this by defining INTERNAL_SIZE_T
96 Alignment: 2 * sizeof(size_t) (default)
97 (i.e., 8 byte alignment with 4byte size_t). This suffices for
98 nearly all current machines and C compilers. However, you can
99 define MALLOC_ALIGNMENT to be wider than this if necessary.
101 Minimum overhead per allocated chunk: 4 or 8 bytes
102 Each malloced chunk has a hidden word of overhead holding size
103 and status information.
105 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
106 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
108 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
109 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
110 needed; 4 (8) for a trailing size field and 8 (16) bytes for
111 free list pointers. Thus, the minimum allocatable size is
112 16/24/32 bytes.
114 Even a request for zero bytes (i.e., malloc(0)) returns a
115 pointer to something of the minimum allocatable size.
117 The maximum overhead wastage (i.e., number of extra bytes
118 allocated than were requested in malloc) is less than or equal
119 to the minimum size, except for requests >= mmap_threshold that
120 are serviced via mmap(), where the worst case wastage is 2 *
121 sizeof(size_t) bytes plus the remainder from a system page (the
122 minimal mmap unit); typically 4096 or 8192 bytes.
124 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
125 8-byte size_t: 2^64 minus about two pages
127 It is assumed that (possibly signed) size_t values suffice to
128 represent chunk sizes. `Possibly signed' is due to the fact
129 that `size_t' may be defined on a system as either a signed or
130 an unsigned type. The ISO C standard says that it must be
131 unsigned, but a few systems are known not to adhere to this.
132 Additionally, even when size_t is unsigned, sbrk (which is by
133 default used to obtain memory from system) accepts signed
134 arguments, and may not be able to handle size_t-wide arguments
135 with negative sign bit. Generally, values that would
136 appear as negative after accounting for overhead and alignment
137 are supported only via mmap(), which does not have this
138 limitation.
140 Requests for sizes outside the allowed range will perform an optional
141 failure action and then return null. (Requests may also
142 also fail because a system is out of memory.)
144 Thread-safety: thread-safe unless NO_THREADS is defined
146 Compliance: I believe it is compliant with the 1997 Single Unix Specification
147 (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably
148 others as well.
150 * Synopsis of compile-time options:
152 People have reported using previous versions of this malloc on all
153 versions of Unix, sometimes by tweaking some of the defines
154 below. It has been tested most extensively on Solaris and
155 Linux. It is also reported to work on WIN32 platforms.
156 People also report using it in stand-alone embedded systems.
158 The implementation is in straight, hand-tuned ANSI C. It is not
159 at all modular. (Sorry!) It uses a lot of macros. To be at all
160 usable, this code should be compiled using an optimizing compiler
161 (for example gcc -O3) that can simplify expressions and control
162 paths. (FAQ: some macros import variables as arguments rather than
163 declare locals because people reported that some debuggers
164 otherwise get confused.)
166 OPTION DEFAULT VALUE
168 Compilation Environment options:
170 __STD_C derived from C compiler defines
171 WIN32 NOT defined
172 HAVE_MEMCPY defined
173 USE_MEMCPY 1 if HAVE_MEMCPY is defined
174 HAVE_MMAP defined as 1
175 MMAP_CLEARS 1
176 HAVE_MREMAP 0 unless linux defined
177 USE_ARENAS the same as HAVE_MMAP
178 malloc_getpagesize derived from system #includes, or 4096 if not
179 HAVE_USR_INCLUDE_MALLOC_H NOT defined
180 LACKS_UNISTD_H NOT defined unless WIN32
181 LACKS_SYS_PARAM_H NOT defined unless WIN32
182 LACKS_SYS_MMAN_H NOT defined unless WIN32
184 Changing default word sizes:
186 INTERNAL_SIZE_T size_t
187 MALLOC_ALIGNMENT MAX (2 * sizeof(INTERNAL_SIZE_T),
188 __alignof__ (long double))
190 Configuration and functionality options:
192 USE_DL_PREFIX NOT defined
193 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
194 USE_MALLOC_LOCK NOT defined
195 MALLOC_DEBUG NOT defined
196 REALLOC_ZERO_BYTES_FREES 1
197 MALLOC_FAILURE_ACTION errno = ENOMEM, if __STD_C defined, else no-op
198 TRIM_FASTBINS 0
200 Options for customizing MORECORE:
202 MORECORE sbrk
203 MORECORE_FAILURE -1
204 MORECORE_CONTIGUOUS 1
205 MORECORE_CANNOT_TRIM NOT defined
206 MORECORE_CLEARS 1
207 MMAP_AS_MORECORE_SIZE (1024 * 1024)
209 Tuning options that are also dynamically changeable via mallopt:
211 DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
212 DEFAULT_TRIM_THRESHOLD 128 * 1024
213 DEFAULT_TOP_PAD 0
214 DEFAULT_MMAP_THRESHOLD 128 * 1024
215 DEFAULT_MMAP_MAX 65536
217 There are several other #defined constants and macros that you
218 probably don't want to touch unless you are extending or adapting malloc. */
221 __STD_C should be nonzero if using ANSI-standard C compiler, a C++
222 compiler, or a C compiler sufficiently close to ANSI to get away
223 with it.
226 #ifndef __STD_C
227 #if defined(__STDC__) || defined(__cplusplus)
228 #define __STD_C 1
229 #else
230 #define __STD_C 0
231 #endif
232 #endif /*__STD_C*/
236 Void_t* is the pointer type that malloc should say it returns
239 #ifndef Void_t
240 #if (__STD_C || defined(WIN32))
241 #define Void_t void
242 #else
243 #define Void_t char
244 #endif
245 #endif /*Void_t*/
247 #if __STD_C
248 #include <stddef.h> /* for size_t */
249 #include <stdlib.h> /* for getenv(), abort() */
250 #else
251 #include <sys/types.h>
252 #endif
254 #include <malloc-machine.h>
256 #ifdef _LIBC
257 #ifdef ATOMIC_FASTBINS
258 #include <atomic.h>
259 #endif
260 #include <stdio-common/_itoa.h>
261 #include <bits/wordsize.h>
262 #include <sys/sysinfo.h>
263 #endif
265 #ifdef __cplusplus
266 extern "C" {
267 #endif
269 /* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */
271 /* #define LACKS_UNISTD_H */
273 #ifndef LACKS_UNISTD_H
274 #include <unistd.h>
275 #endif
277 /* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */
279 /* #define LACKS_SYS_PARAM_H */
282 #include <stdio.h> /* needed for malloc_stats */
283 #include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */
285 /* For uintptr_t. */
286 #include <stdint.h>
288 /* For va_arg, va_start, va_end. */
289 #include <stdarg.h>
291 /* For writev and struct iovec. */
292 #include <sys/uio.h>
293 /* For syslog. */
294 #include <sys/syslog.h>
296 /* For various dynamic linking things. */
297 #include <dlfcn.h>
301 Debugging:
303 Because freed chunks may be overwritten with bookkeeping fields, this
304 malloc will often die when freed memory is overwritten by user
305 programs. This can be very effective (albeit in an annoying way)
306 in helping track down dangling pointers.
308 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
309 enabled that will catch more memory errors. You probably won't be
310 able to make much sense of the actual assertion errors, but they
311 should help you locate incorrectly overwritten memory. The checking
312 is fairly extensive, and will slow down execution
313 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
314 will attempt to check every non-mmapped allocated and free chunk in
315 the course of computing the summmaries. (By nature, mmapped regions
316 cannot be checked very much automatically.)
318 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
319 this code. The assertions in the check routines spell out in more
320 detail the assumptions and invariants underlying the algorithms.
322 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
323 checking that all accesses to malloced memory stay within their
324 bounds. However, there are several add-ons and adaptations of this
325 or other mallocs available that do this.
328 #include <assert.h>
332 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
333 of chunk sizes.
335 The default version is the same as size_t.
337 While not strictly necessary, it is best to define this as an
338 unsigned type, even if size_t is a signed type. This may avoid some
339 artificial size limitations on some systems.
341 On a 64-bit machine, you may be able to reduce malloc overhead by
342 defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
343 expense of not being able to handle more than 2^32 of malloced
344 space. If this limitation is acceptable, you are encouraged to set
345 this unless you are on a platform requiring 16byte alignments. In
346 this case the alignment requirements turn out to negate any
347 potential advantages of decreasing size_t word size.
349 Implementors: Beware of the possible combinations of:
350 - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
351 and might be the same width as int or as long
352 - size_t might have different width and signedness as INTERNAL_SIZE_T
353 - int and long might be 32 or 64 bits, and might be the same width
354 To deal with this, most comparisons and difference computations
355 among INTERNAL_SIZE_Ts should cast them to unsigned long, being
356 aware of the fact that casting an unsigned int to a wider long does
357 not sign-extend. (This also makes checking for negative numbers
358 awkward.) Some of these casts result in harmless compiler warnings
359 on some systems.
362 #ifndef INTERNAL_SIZE_T
363 #define INTERNAL_SIZE_T size_t
364 #endif
366 /* The corresponding word size */
367 #define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
371 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
372 It must be a power of two at least 2 * SIZE_SZ, even on machines
373 for which smaller alignments would suffice. It may be defined as
374 larger than this though. Note however that code and data structures
375 are optimized for the case of 8-byte alignment.
379 #ifndef MALLOC_ALIGNMENT
380 /* XXX This is the correct definition. It differs from 2*SIZE_SZ only on
381 powerpc32. For the time being, changing this is causing more
382 compatibility problems due to malloc_get_state/malloc_set_state than
383 will returning blocks not adequately aligned for long double objects
384 under -mlong-double-128.
386 #define MALLOC_ALIGNMENT (2 * SIZE_SZ < __alignof__ (long double) \
387 ? __alignof__ (long double) : 2 * SIZE_SZ)
389 #define MALLOC_ALIGNMENT (2 * SIZE_SZ)
390 #endif
392 /* The corresponding bit mask value */
393 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
398 REALLOC_ZERO_BYTES_FREES should be set if a call to
399 realloc with zero bytes should be the same as a call to free.
400 This is required by the C standard. Otherwise, since this malloc
401 returns a unique pointer for malloc(0), so does realloc(p, 0).
404 #ifndef REALLOC_ZERO_BYTES_FREES
405 #define REALLOC_ZERO_BYTES_FREES 1
406 #endif
409 TRIM_FASTBINS controls whether free() of a very small chunk can
410 immediately lead to trimming. Setting to true (1) can reduce memory
411 footprint, but will almost always slow down programs that use a lot
412 of small chunks.
414 Define this only if you are willing to give up some speed to more
415 aggressively reduce system-level memory footprint when releasing
416 memory in programs that use many small chunks. You can get
417 essentially the same effect by setting MXFAST to 0, but this can
418 lead to even greater slowdowns in programs using many small chunks.
419 TRIM_FASTBINS is an in-between compile-time option, that disables
420 only those chunks bordering topmost memory from being placed in
421 fastbins.
424 #ifndef TRIM_FASTBINS
425 #define TRIM_FASTBINS 0
426 #endif
430 USE_DL_PREFIX will prefix all public routines with the string 'dl'.
431 This is necessary when you only want to use this malloc in one part
432 of a program, using your regular system malloc elsewhere.
435 /* #define USE_DL_PREFIX */
439 Two-phase name translation.
440 All of the actual routines are given mangled names.
441 When wrappers are used, they become the public callable versions.
442 When DL_PREFIX is used, the callable names are prefixed.
445 #ifdef USE_DL_PREFIX
446 #define public_cALLOc dlcalloc
447 #define public_fREe dlfree
448 #define public_cFREe dlcfree
449 #define public_mALLOc dlmalloc
450 #define public_mEMALIGn dlmemalign
451 #define public_rEALLOc dlrealloc
452 #define public_vALLOc dlvalloc
453 #define public_pVALLOc dlpvalloc
454 #define public_mALLINFo dlmallinfo
455 #define public_mALLOPt dlmallopt
456 #define public_mTRIm dlmalloc_trim
457 #define public_mSTATs dlmalloc_stats
458 #define public_mUSABLe dlmalloc_usable_size
459 #define public_iCALLOc dlindependent_calloc
460 #define public_iCOMALLOc dlindependent_comalloc
461 #define public_gET_STATe dlget_state
462 #define public_sET_STATe dlset_state
463 #else /* USE_DL_PREFIX */
464 #ifdef _LIBC
466 /* Special defines for the GNU C library. */
467 #define public_cALLOc __libc_calloc
468 #define public_fREe __libc_free
469 #define public_cFREe __libc_cfree
470 #define public_mALLOc __libc_malloc
471 #define public_mEMALIGn __libc_memalign
472 #define public_rEALLOc __libc_realloc
473 #define public_vALLOc __libc_valloc
474 #define public_pVALLOc __libc_pvalloc
475 #define public_mALLINFo __libc_mallinfo
476 #define public_mALLOPt __libc_mallopt
477 #define public_mTRIm __malloc_trim
478 #define public_mSTATs __malloc_stats
479 #define public_mUSABLe __malloc_usable_size
480 #define public_iCALLOc __libc_independent_calloc
481 #define public_iCOMALLOc __libc_independent_comalloc
482 #define public_gET_STATe __malloc_get_state
483 #define public_sET_STATe __malloc_set_state
484 #define malloc_getpagesize __getpagesize()
485 #define open __open
486 #define mmap __mmap
487 #define munmap __munmap
488 #define mremap __mremap
489 #define mprotect __mprotect
490 #define MORECORE (*__morecore)
491 #define MORECORE_FAILURE 0
493 Void_t * __default_morecore (ptrdiff_t);
494 Void_t *(*__morecore)(ptrdiff_t) = __default_morecore;
496 #else /* !_LIBC */
497 #define public_cALLOc calloc
498 #define public_fREe free
499 #define public_cFREe cfree
500 #define public_mALLOc malloc
501 #define public_mEMALIGn memalign
502 #define public_rEALLOc realloc
503 #define public_vALLOc valloc
504 #define public_pVALLOc pvalloc
505 #define public_mALLINFo mallinfo
506 #define public_mALLOPt mallopt
507 #define public_mTRIm malloc_trim
508 #define public_mSTATs malloc_stats
509 #define public_mUSABLe malloc_usable_size
510 #define public_iCALLOc independent_calloc
511 #define public_iCOMALLOc independent_comalloc
512 #define public_gET_STATe malloc_get_state
513 #define public_sET_STATe malloc_set_state
514 #endif /* _LIBC */
515 #endif /* USE_DL_PREFIX */
517 #ifndef _LIBC
518 #define __builtin_expect(expr, val) (expr)
520 #define fwrite(buf, size, count, fp) _IO_fwrite (buf, size, count, fp)
521 #endif
524 HAVE_MEMCPY should be defined if you are not otherwise using
525 ANSI STD C, but still have memcpy and memset in your C library
526 and want to use them in calloc and realloc. Otherwise simple
527 macro versions are defined below.
529 USE_MEMCPY should be defined as 1 if you actually want to
530 have memset and memcpy called. People report that the macro
531 versions are faster than libc versions on some systems.
533 Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
534 (of <= 36 bytes) are manually unrolled in realloc and calloc.
537 #define HAVE_MEMCPY
539 #ifndef USE_MEMCPY
540 #ifdef HAVE_MEMCPY
541 #define USE_MEMCPY 1
542 #else
543 #define USE_MEMCPY 0
544 #endif
545 #endif
548 #if (__STD_C || defined(HAVE_MEMCPY))
550 #ifdef _LIBC
551 # include <string.h>
552 #else
553 #ifdef WIN32
554 /* On Win32 memset and memcpy are already declared in windows.h */
555 #else
556 #if __STD_C
557 void* memset(void*, int, size_t);
558 void* memcpy(void*, const void*, size_t);
559 #else
560 Void_t* memset();
561 Void_t* memcpy();
562 #endif
563 #endif
564 #endif
565 #endif
568 MALLOC_FAILURE_ACTION is the action to take before "return 0" when
569 malloc fails to be able to return memory, either because memory is
570 exhausted or because of illegal arguments.
572 By default, sets errno if running on STD_C platform, else does nothing.
575 #ifndef MALLOC_FAILURE_ACTION
576 #if __STD_C
577 #define MALLOC_FAILURE_ACTION \
578 errno = ENOMEM;
580 #else
581 #define MALLOC_FAILURE_ACTION
582 #endif
583 #endif
586 MORECORE-related declarations. By default, rely on sbrk
590 #ifdef LACKS_UNISTD_H
591 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
592 #if __STD_C
593 extern Void_t* sbrk(ptrdiff_t);
594 #else
595 extern Void_t* sbrk();
596 #endif
597 #endif
598 #endif
601 MORECORE is the name of the routine to call to obtain more memory
602 from the system. See below for general guidance on writing
603 alternative MORECORE functions, as well as a version for WIN32 and a
604 sample version for pre-OSX macos.
607 #ifndef MORECORE
608 #define MORECORE sbrk
609 #endif
612 MORECORE_FAILURE is the value returned upon failure of MORECORE
613 as well as mmap. Since it cannot be an otherwise valid memory address,
614 and must reflect values of standard sys calls, you probably ought not
615 try to redefine it.
618 #ifndef MORECORE_FAILURE
619 #define MORECORE_FAILURE (-1)
620 #endif
623 If MORECORE_CONTIGUOUS is true, take advantage of fact that
624 consecutive calls to MORECORE with positive arguments always return
625 contiguous increasing addresses. This is true of unix sbrk. Even
626 if not defined, when regions happen to be contiguous, malloc will
627 permit allocations spanning regions obtained from different
628 calls. But defining this when applicable enables some stronger
629 consistency checks and space efficiencies.
632 #ifndef MORECORE_CONTIGUOUS
633 #define MORECORE_CONTIGUOUS 1
634 #endif
637 Define MORECORE_CANNOT_TRIM if your version of MORECORE
638 cannot release space back to the system when given negative
639 arguments. This is generally necessary only if you are using
640 a hand-crafted MORECORE function that cannot handle negative arguments.
643 /* #define MORECORE_CANNOT_TRIM */
645 /* MORECORE_CLEARS (default 1)
646 The degree to which the routine mapped to MORECORE zeroes out
647 memory: never (0), only for newly allocated space (1) or always
648 (2). The distinction between (1) and (2) is necessary because on
649 some systems, if the application first decrements and then
650 increments the break value, the contents of the reallocated space
651 are unspecified.
654 #ifndef MORECORE_CLEARS
655 #define MORECORE_CLEARS 1
656 #endif
660 Define HAVE_MMAP as true to optionally make malloc() use mmap() to
661 allocate very large blocks. These will be returned to the
662 operating system immediately after a free(). Also, if mmap
663 is available, it is used as a backup strategy in cases where
664 MORECORE fails to provide space from system.
666 This malloc is best tuned to work with mmap for large requests.
667 If you do not have mmap, operations involving very large chunks (1MB
668 or so) may be slower than you'd like.
671 #ifndef HAVE_MMAP
672 #define HAVE_MMAP 1
675 Standard unix mmap using /dev/zero clears memory so calloc doesn't
676 need to.
679 #ifndef MMAP_CLEARS
680 #define MMAP_CLEARS 1
681 #endif
683 #else /* no mmap */
684 #ifndef MMAP_CLEARS
685 #define MMAP_CLEARS 0
686 #endif
687 #endif
691 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
692 sbrk fails, and mmap is used as a backup (which is done only if
693 HAVE_MMAP). The value must be a multiple of page size. This
694 backup strategy generally applies only when systems have "holes" in
695 address space, so sbrk cannot perform contiguous expansion, but
696 there is still space available on system. On systems for which
697 this is known to be useful (i.e. most linux kernels), this occurs
698 only when programs allocate huge amounts of memory. Between this,
699 and the fact that mmap regions tend to be limited, the size should
700 be large, to avoid too many mmap calls and thus avoid running out
701 of kernel resources.
704 #ifndef MMAP_AS_MORECORE_SIZE
705 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
706 #endif
709 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
710 large blocks. This is currently only possible on Linux with
711 kernel versions newer than 1.3.77.
714 #ifndef HAVE_MREMAP
715 #ifdef linux
716 #define HAVE_MREMAP 1
717 #else
718 #define HAVE_MREMAP 0
719 #endif
721 #endif /* HAVE_MMAP */
723 /* Define USE_ARENAS to enable support for multiple `arenas'. These
724 are allocated using mmap(), are necessary for threads and
725 occasionally useful to overcome address space limitations affecting
726 sbrk(). */
728 #ifndef USE_ARENAS
729 #define USE_ARENAS HAVE_MMAP
730 #endif
734 The system page size. To the extent possible, this malloc manages
735 memory from the system in page-size units. Note that this value is
736 cached during initialization into a field of malloc_state. So even
737 if malloc_getpagesize is a function, it is only called once.
739 The following mechanics for getpagesize were adapted from bsd/gnu
740 getpagesize.h. If none of the system-probes here apply, a value of
741 4096 is used, which should be OK: If they don't apply, then using
742 the actual value probably doesn't impact performance.
746 #ifndef malloc_getpagesize
748 #ifndef LACKS_UNISTD_H
749 # include <unistd.h>
750 #endif
752 # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
753 # ifndef _SC_PAGE_SIZE
754 # define _SC_PAGE_SIZE _SC_PAGESIZE
755 # endif
756 # endif
758 # ifdef _SC_PAGE_SIZE
759 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
760 # else
761 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
762 extern size_t getpagesize();
763 # define malloc_getpagesize getpagesize()
764 # else
765 # ifdef WIN32 /* use supplied emulation of getpagesize */
766 # define malloc_getpagesize getpagesize()
767 # else
768 # ifndef LACKS_SYS_PARAM_H
769 # include <sys/param.h>
770 # endif
771 # ifdef EXEC_PAGESIZE
772 # define malloc_getpagesize EXEC_PAGESIZE
773 # else
774 # ifdef NBPG
775 # ifndef CLSIZE
776 # define malloc_getpagesize NBPG
777 # else
778 # define malloc_getpagesize (NBPG * CLSIZE)
779 # endif
780 # else
781 # ifdef NBPC
782 # define malloc_getpagesize NBPC
783 # else
784 # ifdef PAGESIZE
785 # define malloc_getpagesize PAGESIZE
786 # else /* just guess */
787 # define malloc_getpagesize (4096)
788 # endif
789 # endif
790 # endif
791 # endif
792 # endif
793 # endif
794 # endif
795 #endif
798 This version of malloc supports the standard SVID/XPG mallinfo
799 routine that returns a struct containing usage properties and
800 statistics. It should work on any SVID/XPG compliant system that has
801 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
802 install such a thing yourself, cut out the preliminary declarations
803 as described above and below and save them in a malloc.h file. But
804 there's no compelling reason to bother to do this.)
806 The main declaration needed is the mallinfo struct that is returned
807 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
808 bunch of fields that are not even meaningful in this version of
809 malloc. These fields are are instead filled by mallinfo() with
810 other numbers that might be of interest.
812 HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
813 /usr/include/malloc.h file that includes a declaration of struct
814 mallinfo. If so, it is included; else an SVID2/XPG2 compliant
815 version is declared below. These must be precisely the same for
816 mallinfo() to work. The original SVID version of this struct,
817 defined on most systems with mallinfo, declares all fields as
818 ints. But some others define as unsigned long. If your system
819 defines the fields using a type of different width than listed here,
820 you must #include your system version and #define
821 HAVE_USR_INCLUDE_MALLOC_H.
824 /* #define HAVE_USR_INCLUDE_MALLOC_H */
826 #ifdef HAVE_USR_INCLUDE_MALLOC_H
827 #include "/usr/include/malloc.h"
828 #endif
831 /* ---------- description of public routines ------------ */
834 malloc(size_t n)
835 Returns a pointer to a newly allocated chunk of at least n bytes, or null
836 if no space is available. Additionally, on failure, errno is
837 set to ENOMEM on ANSI C systems.
839 If n is zero, malloc returns a minumum-sized chunk. (The minimum
840 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
841 systems.) On most systems, size_t is an unsigned type, so calls
842 with negative arguments are interpreted as requests for huge amounts
843 of space, which will often fail. The maximum supported value of n
844 differs across systems, but is in all cases less than the maximum
845 representable value of a size_t.
847 #if __STD_C
848 Void_t* public_mALLOc(size_t);
849 #else
850 Void_t* public_mALLOc();
851 #endif
852 #ifdef libc_hidden_proto
853 libc_hidden_proto (public_mALLOc)
854 #endif
857 free(Void_t* p)
858 Releases the chunk of memory pointed to by p, that had been previously
859 allocated using malloc or a related routine such as realloc.
860 It has no effect if p is null. It can have arbitrary (i.e., bad!)
861 effects if p has already been freed.
863 Unless disabled (using mallopt), freeing very large spaces will
864 when possible, automatically trigger operations that give
865 back unused memory to the system, thus reducing program footprint.
867 #if __STD_C
868 void public_fREe(Void_t*);
869 #else
870 void public_fREe();
871 #endif
872 #ifdef libc_hidden_proto
873 libc_hidden_proto (public_fREe)
874 #endif
877 calloc(size_t n_elements, size_t element_size);
878 Returns a pointer to n_elements * element_size bytes, with all locations
879 set to zero.
881 #if __STD_C
882 Void_t* public_cALLOc(size_t, size_t);
883 #else
884 Void_t* public_cALLOc();
885 #endif
888 realloc(Void_t* p, size_t n)
889 Returns a pointer to a chunk of size n that contains the same data
890 as does chunk p up to the minimum of (n, p's size) bytes, or null
891 if no space is available.
893 The returned pointer may or may not be the same as p. The algorithm
894 prefers extending p when possible, otherwise it employs the
895 equivalent of a malloc-copy-free sequence.
897 If p is null, realloc is equivalent to malloc.
899 If space is not available, realloc returns null, errno is set (if on
900 ANSI) and p is NOT freed.
902 if n is for fewer bytes than already held by p, the newly unused
903 space is lopped off and freed if possible. Unless the #define
904 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
905 zero (re)allocates a minimum-sized chunk.
907 Large chunks that were internally obtained via mmap will always
908 be reallocated using malloc-copy-free sequences unless
909 the system supports MREMAP (currently only linux).
911 The old unix realloc convention of allowing the last-free'd chunk
912 to be used as an argument to realloc is not supported.
914 #if __STD_C
915 Void_t* public_rEALLOc(Void_t*, size_t);
916 #else
917 Void_t* public_rEALLOc();
918 #endif
919 #ifdef libc_hidden_proto
920 libc_hidden_proto (public_rEALLOc)
921 #endif
924 memalign(size_t alignment, size_t n);
925 Returns a pointer to a newly allocated chunk of n bytes, aligned
926 in accord with the alignment argument.
928 The alignment argument should be a power of two. If the argument is
929 not a power of two, the nearest greater power is used.
930 8-byte alignment is guaranteed by normal malloc calls, so don't
931 bother calling memalign with an argument of 8 or less.
933 Overreliance on memalign is a sure way to fragment space.
935 #if __STD_C
936 Void_t* public_mEMALIGn(size_t, size_t);
937 #else
938 Void_t* public_mEMALIGn();
939 #endif
940 #ifdef libc_hidden_proto
941 libc_hidden_proto (public_mEMALIGn)
942 #endif
945 valloc(size_t n);
946 Equivalent to memalign(pagesize, n), where pagesize is the page
947 size of the system. If the pagesize is unknown, 4096 is used.
949 #if __STD_C
950 Void_t* public_vALLOc(size_t);
951 #else
952 Void_t* public_vALLOc();
953 #endif
958 mallopt(int parameter_number, int parameter_value)
959 Sets tunable parameters The format is to provide a
960 (parameter-number, parameter-value) pair. mallopt then sets the
961 corresponding parameter to the argument value if it can (i.e., so
962 long as the value is meaningful), and returns 1 if successful else
963 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
964 normally defined in malloc.h. Only one of these (M_MXFAST) is used
965 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
966 so setting them has no effect. But this malloc also supports four
967 other options in mallopt. See below for details. Briefly, supported
968 parameters are as follows (listed defaults are for "typical"
969 configurations).
971 Symbol param # default allowed param values
972 M_MXFAST 1 64 0-80 (0 disables fastbins)
973 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
974 M_TOP_PAD -2 0 any
975 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
976 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
978 #if __STD_C
979 int public_mALLOPt(int, int);
980 #else
981 int public_mALLOPt();
982 #endif
986 mallinfo()
987 Returns (by copy) a struct containing various summary statistics:
989 arena: current total non-mmapped bytes allocated from system
990 ordblks: the number of free chunks
991 smblks: the number of fastbin blocks (i.e., small chunks that
992 have been freed but not use resused or consolidated)
993 hblks: current number of mmapped regions
994 hblkhd: total bytes held in mmapped regions
995 usmblks: the maximum total allocated space. This will be greater
996 than current total if trimming has occurred.
997 fsmblks: total bytes held in fastbin blocks
998 uordblks: current total allocated space (normal or mmapped)
999 fordblks: total free space
1000 keepcost: the maximum number of bytes that could ideally be released
1001 back to system via malloc_trim. ("ideally" means that
1002 it ignores page restrictions etc.)
1004 Because these fields are ints, but internal bookkeeping may
1005 be kept as longs, the reported values may wrap around zero and
1006 thus be inaccurate.
1008 #if __STD_C
1009 struct mallinfo public_mALLINFo(void);
1010 #else
1011 struct mallinfo public_mALLINFo();
1012 #endif
1014 #ifndef _LIBC
1016 independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]);
1018 independent_calloc is similar to calloc, but instead of returning a
1019 single cleared space, it returns an array of pointers to n_elements
1020 independent elements that can hold contents of size elem_size, each
1021 of which starts out cleared, and can be independently freed,
1022 realloc'ed etc. The elements are guaranteed to be adjacently
1023 allocated (this is not guaranteed to occur with multiple callocs or
1024 mallocs), which may also improve cache locality in some
1025 applications.
1027 The "chunks" argument is optional (i.e., may be null, which is
1028 probably the most typical usage). If it is null, the returned array
1029 is itself dynamically allocated and should also be freed when it is
1030 no longer needed. Otherwise, the chunks array must be of at least
1031 n_elements in length. It is filled in with the pointers to the
1032 chunks.
1034 In either case, independent_calloc returns this pointer array, or
1035 null if the allocation failed. If n_elements is zero and "chunks"
1036 is null, it returns a chunk representing an array with zero elements
1037 (which should be freed if not wanted).
1039 Each element must be individually freed when it is no longer
1040 needed. If you'd like to instead be able to free all at once, you
1041 should instead use regular calloc and assign pointers into this
1042 space to represent elements. (In this case though, you cannot
1043 independently free elements.)
1045 independent_calloc simplifies and speeds up implementations of many
1046 kinds of pools. It may also be useful when constructing large data
1047 structures that initially have a fixed number of fixed-sized nodes,
1048 but the number is not known at compile time, and some of the nodes
1049 may later need to be freed. For example:
1051 struct Node { int item; struct Node* next; };
1053 struct Node* build_list() {
1054 struct Node** pool;
1055 int n = read_number_of_nodes_needed();
1056 if (n <= 0) return 0;
1057 pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
1058 if (pool == 0) die();
1059 // organize into a linked list...
1060 struct Node* first = pool[0];
1061 for (i = 0; i < n-1; ++i)
1062 pool[i]->next = pool[i+1];
1063 free(pool); // Can now free the array (or not, if it is needed later)
1064 return first;
1067 #if __STD_C
1068 Void_t** public_iCALLOc(size_t, size_t, Void_t**);
1069 #else
1070 Void_t** public_iCALLOc();
1071 #endif
1074 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
1076 independent_comalloc allocates, all at once, a set of n_elements
1077 chunks with sizes indicated in the "sizes" array. It returns
1078 an array of pointers to these elements, each of which can be
1079 independently freed, realloc'ed etc. The elements are guaranteed to
1080 be adjacently allocated (this is not guaranteed to occur with
1081 multiple callocs or mallocs), which may also improve cache locality
1082 in some applications.
1084 The "chunks" argument is optional (i.e., may be null). If it is null
1085 the returned array is itself dynamically allocated and should also
1086 be freed when it is no longer needed. Otherwise, the chunks array
1087 must be of at least n_elements in length. It is filled in with the
1088 pointers to the chunks.
1090 In either case, independent_comalloc returns this pointer array, or
1091 null if the allocation failed. If n_elements is zero and chunks is
1092 null, it returns a chunk representing an array with zero elements
1093 (which should be freed if not wanted).
1095 Each element must be individually freed when it is no longer
1096 needed. If you'd like to instead be able to free all at once, you
1097 should instead use a single regular malloc, and assign pointers at
1098 particular offsets in the aggregate space. (In this case though, you
1099 cannot independently free elements.)
1101 independent_comallac differs from independent_calloc in that each
1102 element may have a different size, and also that it does not
1103 automatically clear elements.
1105 independent_comalloc can be used to speed up allocation in cases
1106 where several structs or objects must always be allocated at the
1107 same time. For example:
1109 struct Head { ... }
1110 struct Foot { ... }
1112 void send_message(char* msg) {
1113 int msglen = strlen(msg);
1114 size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
1115 void* chunks[3];
1116 if (independent_comalloc(3, sizes, chunks) == 0)
1117 die();
1118 struct Head* head = (struct Head*)(chunks[0]);
1119 char* body = (char*)(chunks[1]);
1120 struct Foot* foot = (struct Foot*)(chunks[2]);
1121 // ...
1124 In general though, independent_comalloc is worth using only for
1125 larger values of n_elements. For small values, you probably won't
1126 detect enough difference from series of malloc calls to bother.
1128 Overuse of independent_comalloc can increase overall memory usage,
1129 since it cannot reuse existing noncontiguous small chunks that
1130 might be available for some of the elements.
1132 #if __STD_C
1133 Void_t** public_iCOMALLOc(size_t, size_t*, Void_t**);
1134 #else
1135 Void_t** public_iCOMALLOc();
1136 #endif
1138 #endif /* _LIBC */
1142 pvalloc(size_t n);
1143 Equivalent to valloc(minimum-page-that-holds(n)), that is,
1144 round up n to nearest pagesize.
1146 #if __STD_C
1147 Void_t* public_pVALLOc(size_t);
1148 #else
1149 Void_t* public_pVALLOc();
1150 #endif
1153 cfree(Void_t* p);
1154 Equivalent to free(p).
1156 cfree is needed/defined on some systems that pair it with calloc,
1157 for odd historical reasons (such as: cfree is used in example
1158 code in the first edition of K&R).
1160 #if __STD_C
1161 void public_cFREe(Void_t*);
1162 #else
1163 void public_cFREe();
1164 #endif
1167 malloc_trim(size_t pad);
1169 If possible, gives memory back to the system (via negative
1170 arguments to sbrk) if there is unused memory at the `high' end of
1171 the malloc pool. You can call this after freeing large blocks of
1172 memory to potentially reduce the system-level memory requirements
1173 of a program. However, it cannot guarantee to reduce memory. Under
1174 some allocation patterns, some large free blocks of memory will be
1175 locked between two used chunks, so they cannot be given back to
1176 the system.
1178 The `pad' argument to malloc_trim represents the amount of free
1179 trailing space to leave untrimmed. If this argument is zero,
1180 only the minimum amount of memory to maintain internal data
1181 structures will be left (one page or less). Non-zero arguments
1182 can be supplied to maintain enough trailing space to service
1183 future expected allocations without having to re-obtain memory
1184 from the system.
1186 Malloc_trim returns 1 if it actually released any memory, else 0.
1187 On systems that do not support "negative sbrks", it will always
1188 return 0.
1190 #if __STD_C
1191 int public_mTRIm(size_t);
1192 #else
1193 int public_mTRIm();
1194 #endif
1197 malloc_usable_size(Void_t* p);
1199 Returns the number of bytes you can actually use in
1200 an allocated chunk, which may be more than you requested (although
1201 often not) due to alignment and minimum size constraints.
1202 You can use this many bytes without worrying about
1203 overwriting other allocated objects. This is not a particularly great
1204 programming practice. malloc_usable_size can be more useful in
1205 debugging and assertions, for example:
1207 p = malloc(n);
1208 assert(malloc_usable_size(p) >= 256);
1211 #if __STD_C
1212 size_t public_mUSABLe(Void_t*);
1213 #else
1214 size_t public_mUSABLe();
1215 #endif
1218 malloc_stats();
1219 Prints on stderr the amount of space obtained from the system (both
1220 via sbrk and mmap), the maximum amount (which may be more than
1221 current if malloc_trim and/or munmap got called), and the current
1222 number of bytes allocated via malloc (or realloc, etc) but not yet
1223 freed. Note that this is the number of bytes allocated, not the
1224 number requested. It will be larger than the number requested
1225 because of alignment and bookkeeping overhead. Because it includes
1226 alignment wastage as being in use, this figure may be greater than
1227 zero even when no user-level chunks are allocated.
1229 The reported current and maximum system memory can be inaccurate if
1230 a program makes other calls to system memory allocation functions
1231 (normally sbrk) outside of malloc.
1233 malloc_stats prints only the most commonly interesting statistics.
1234 More information can be obtained by calling mallinfo.
1237 #if __STD_C
1238 void public_mSTATs(void);
1239 #else
1240 void public_mSTATs();
1241 #endif
1244 malloc_get_state(void);
1246 Returns the state of all malloc variables in an opaque data
1247 structure.
1249 #if __STD_C
1250 Void_t* public_gET_STATe(void);
1251 #else
1252 Void_t* public_gET_STATe();
1253 #endif
1256 malloc_set_state(Void_t* state);
1258 Restore the state of all malloc variables from data obtained with
1259 malloc_get_state().
1261 #if __STD_C
1262 int public_sET_STATe(Void_t*);
1263 #else
1264 int public_sET_STATe();
1265 #endif
1267 #ifdef _LIBC
1269 posix_memalign(void **memptr, size_t alignment, size_t size);
1271 POSIX wrapper like memalign(), checking for validity of size.
1273 int __posix_memalign(void **, size_t, size_t);
1274 #endif
1276 /* mallopt tuning options */
1279 M_MXFAST is the maximum request size used for "fastbins", special bins
1280 that hold returned chunks without consolidating their spaces. This
1281 enables future requests for chunks of the same size to be handled
1282 very quickly, but can increase fragmentation, and thus increase the
1283 overall memory footprint of a program.
1285 This malloc manages fastbins very conservatively yet still
1286 efficiently, so fragmentation is rarely a problem for values less
1287 than or equal to the default. The maximum supported value of MXFAST
1288 is 80. You wouldn't want it any higher than this anyway. Fastbins
1289 are designed especially for use with many small structs, objects or
1290 strings -- the default handles structs/objects/arrays with sizes up
1291 to 8 4byte fields, or small strings representing words, tokens,
1292 etc. Using fastbins for larger objects normally worsens
1293 fragmentation without improving speed.
1295 M_MXFAST is set in REQUEST size units. It is internally used in
1296 chunksize units, which adds padding and alignment. You can reduce
1297 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
1298 algorithm to be a closer approximation of fifo-best-fit in all cases,
1299 not just for larger requests, but will generally cause it to be
1300 slower.
1304 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
1305 #ifndef M_MXFAST
1306 #define M_MXFAST 1
1307 #endif
1309 #ifndef DEFAULT_MXFAST
1310 #define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
1311 #endif
1315 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
1316 to keep before releasing via malloc_trim in free().
1318 Automatic trimming is mainly useful in long-lived programs.
1319 Because trimming via sbrk can be slow on some systems, and can
1320 sometimes be wasteful (in cases where programs immediately
1321 afterward allocate more large chunks) the value should be high
1322 enough so that your overall system performance would improve by
1323 releasing this much memory.
1325 The trim threshold and the mmap control parameters (see below)
1326 can be traded off with one another. Trimming and mmapping are
1327 two different ways of releasing unused memory back to the
1328 system. Between these two, it is often possible to keep
1329 system-level demands of a long-lived program down to a bare
1330 minimum. For example, in one test suite of sessions measuring
1331 the XF86 X server on Linux, using a trim threshold of 128K and a
1332 mmap threshold of 192K led to near-minimal long term resource
1333 consumption.
1335 If you are using this malloc in a long-lived program, it should
1336 pay to experiment with these values. As a rough guide, you
1337 might set to a value close to the average size of a process
1338 (program) running on your system. Releasing this much memory
1339 would allow such a process to run in memory. Generally, it's
1340 worth it to tune for trimming rather tham memory mapping when a
1341 program undergoes phases where several large chunks are
1342 allocated and released in ways that can reuse each other's
1343 storage, perhaps mixed with phases where there are no such
1344 chunks at all. And in well-behaved long-lived programs,
1345 controlling release of large blocks via trimming versus mapping
1346 is usually faster.
1348 However, in most programs, these parameters serve mainly as
1349 protection against the system-level effects of carrying around
1350 massive amounts of unneeded memory. Since frequent calls to
1351 sbrk, mmap, and munmap otherwise degrade performance, the default
1352 parameters are set to relatively high values that serve only as
1353 safeguards.
1355 The trim value It must be greater than page size to have any useful
1356 effect. To disable trimming completely, you can set to
1357 (unsigned long)(-1)
1359 Trim settings interact with fastbin (MXFAST) settings: Unless
1360 TRIM_FASTBINS is defined, automatic trimming never takes place upon
1361 freeing a chunk with size less than or equal to MXFAST. Trimming is
1362 instead delayed until subsequent freeing of larger chunks. However,
1363 you can still force an attempted trim by calling malloc_trim.
1365 Also, trimming is not generally possible in cases where
1366 the main arena is obtained via mmap.
1368 Note that the trick some people use of mallocing a huge space and
1369 then freeing it at program startup, in an attempt to reserve system
1370 memory, doesn't have the intended effect under automatic trimming,
1371 since that memory will immediately be returned to the system.
1374 #define M_TRIM_THRESHOLD -1
1376 #ifndef DEFAULT_TRIM_THRESHOLD
1377 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
1378 #endif
1381 M_TOP_PAD is the amount of extra `padding' space to allocate or
1382 retain whenever sbrk is called. It is used in two ways internally:
1384 * When sbrk is called to extend the top of the arena to satisfy
1385 a new malloc request, this much padding is added to the sbrk
1386 request.
1388 * When malloc_trim is called automatically from free(),
1389 it is used as the `pad' argument.
1391 In both cases, the actual amount of padding is rounded
1392 so that the end of the arena is always a system page boundary.
1394 The main reason for using padding is to avoid calling sbrk so
1395 often. Having even a small pad greatly reduces the likelihood
1396 that nearly every malloc request during program start-up (or
1397 after trimming) will invoke sbrk, which needlessly wastes
1398 time.
1400 Automatic rounding-up to page-size units is normally sufficient
1401 to avoid measurable overhead, so the default is 0. However, in
1402 systems where sbrk is relatively slow, it can pay to increase
1403 this value, at the expense of carrying around more memory than
1404 the program needs.
1407 #define M_TOP_PAD -2
1409 #ifndef DEFAULT_TOP_PAD
1410 #define DEFAULT_TOP_PAD (0)
1411 #endif
1414 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
1415 adjusted MMAP_THRESHOLD.
1418 #ifndef DEFAULT_MMAP_THRESHOLD_MIN
1419 #define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
1420 #endif
1422 #ifndef DEFAULT_MMAP_THRESHOLD_MAX
1423 /* For 32-bit platforms we cannot increase the maximum mmap
1424 threshold much because it is also the minimum value for the
1425 maximum heap size and its alignment. Going above 512k (i.e., 1M
1426 for new heaps) wastes too much address space. */
1427 # if __WORDSIZE == 32
1428 # define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
1429 # else
1430 # define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
1431 # endif
1432 #endif
1435 M_MMAP_THRESHOLD is the request size threshold for using mmap()
1436 to service a request. Requests of at least this size that cannot
1437 be allocated using already-existing space will be serviced via mmap.
1438 (If enough normal freed space already exists it is used instead.)
1440 Using mmap segregates relatively large chunks of memory so that
1441 they can be individually obtained and released from the host
1442 system. A request serviced through mmap is never reused by any
1443 other request (at least not directly; the system may just so
1444 happen to remap successive requests to the same locations).
1446 Segregating space in this way has the benefits that:
1448 1. Mmapped space can ALWAYS be individually released back
1449 to the system, which helps keep the system level memory
1450 demands of a long-lived program low.
1451 2. Mapped memory can never become `locked' between
1452 other chunks, as can happen with normally allocated chunks, which
1453 means that even trimming via malloc_trim would not release them.
1454 3. On some systems with "holes" in address spaces, mmap can obtain
1455 memory that sbrk cannot.
1457 However, it has the disadvantages that:
1459 1. The space cannot be reclaimed, consolidated, and then
1460 used to service later requests, as happens with normal chunks.
1461 2. It can lead to more wastage because of mmap page alignment
1462 requirements
1463 3. It causes malloc performance to be more dependent on host
1464 system memory management support routines which may vary in
1465 implementation quality and may impose arbitrary
1466 limitations. Generally, servicing a request via normal
1467 malloc steps is faster than going through a system's mmap.
1469 The advantages of mmap nearly always outweigh disadvantages for
1470 "large" chunks, but the value of "large" varies across systems. The
1471 default is an empirically derived value that works well in most
1472 systems.
1475 Update in 2006:
1476 The above was written in 2001. Since then the world has changed a lot.
1477 Memory got bigger. Applications got bigger. The virtual address space
1478 layout in 32 bit linux changed.
1480 In the new situation, brk() and mmap space is shared and there are no
1481 artificial limits on brk size imposed by the kernel. What is more,
1482 applications have started using transient allocations larger than the
1483 128Kb as was imagined in 2001.
1485 The price for mmap is also high now; each time glibc mmaps from the
1486 kernel, the kernel is forced to zero out the memory it gives to the
1487 application. Zeroing memory is expensive and eats a lot of cache and
1488 memory bandwidth. This has nothing to do with the efficiency of the
1489 virtual memory system, by doing mmap the kernel just has no choice but
1490 to zero.
1492 In 2001, the kernel had a maximum size for brk() which was about 800
1493 megabytes on 32 bit x86, at that point brk() would hit the first
1494 mmaped shared libaries and couldn't expand anymore. With current 2.6
1495 kernels, the VA space layout is different and brk() and mmap
1496 both can span the entire heap at will.
1498 Rather than using a static threshold for the brk/mmap tradeoff,
1499 we are now using a simple dynamic one. The goal is still to avoid
1500 fragmentation. The old goals we kept are
1501 1) try to get the long lived large allocations to use mmap()
1502 2) really large allocations should always use mmap()
1503 and we're adding now:
1504 3) transient allocations should use brk() to avoid forcing the kernel
1505 having to zero memory over and over again
1507 The implementation works with a sliding threshold, which is by default
1508 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
1509 out at 128Kb as per the 2001 default.
1511 This allows us to satisfy requirement 1) under the assumption that long
1512 lived allocations are made early in the process' lifespan, before it has
1513 started doing dynamic allocations of the same size (which will
1514 increase the threshold).
1516 The upperbound on the threshold satisfies requirement 2)
1518 The threshold goes up in value when the application frees memory that was
1519 allocated with the mmap allocator. The idea is that once the application
1520 starts freeing memory of a certain size, it's highly probable that this is
1521 a size the application uses for transient allocations. This estimator
1522 is there to satisfy the new third requirement.
1526 #define M_MMAP_THRESHOLD -3
1528 #ifndef DEFAULT_MMAP_THRESHOLD
1529 #define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
1530 #endif
1533 M_MMAP_MAX is the maximum number of requests to simultaneously
1534 service using mmap. This parameter exists because
1535 some systems have a limited number of internal tables for
1536 use by mmap, and using more than a few of them may degrade
1537 performance.
1539 The default is set to a value that serves only as a safeguard.
1540 Setting to 0 disables use of mmap for servicing large requests. If
1541 HAVE_MMAP is not set, the default value is 0, and attempts to set it
1542 to non-zero values in mallopt will fail.
1545 #define M_MMAP_MAX -4
1547 #ifndef DEFAULT_MMAP_MAX
1548 #if HAVE_MMAP
1549 #define DEFAULT_MMAP_MAX (65536)
1550 #else
1551 #define DEFAULT_MMAP_MAX (0)
1552 #endif
1553 #endif
1555 #ifdef __cplusplus
1556 } /* end of extern "C" */
1557 #endif
1559 #include <malloc.h>
1561 #ifndef BOUNDED_N
1562 #define BOUNDED_N(ptr, sz) (ptr)
1563 #endif
1564 #ifndef RETURN_ADDRESS
1565 #define RETURN_ADDRESS(X_) (NULL)
1566 #endif
1568 /* On some platforms we can compile internal, not exported functions better.
1569 Let the environment provide a macro and define it to be empty if it
1570 is not available. */
1571 #ifndef internal_function
1572 # define internal_function
1573 #endif
1575 /* Forward declarations. */
1576 struct malloc_chunk;
1577 typedef struct malloc_chunk* mchunkptr;
1579 /* Internal routines. */
1581 #if __STD_C
1583 static Void_t* _int_malloc(mstate, size_t);
1584 #ifdef ATOMIC_FASTBINS
1585 static void _int_free(mstate, mchunkptr, int);
1586 #else
1587 static void _int_free(mstate, mchunkptr);
1588 #endif
1589 static Void_t* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
1590 INTERNAL_SIZE_T);
1591 static Void_t* _int_memalign(mstate, size_t, size_t);
1592 static Void_t* _int_valloc(mstate, size_t);
1593 static Void_t* _int_pvalloc(mstate, size_t);
1594 /*static Void_t* cALLOc(size_t, size_t);*/
1595 #ifndef _LIBC
1596 static Void_t** _int_icalloc(mstate, size_t, size_t, Void_t**);
1597 static Void_t** _int_icomalloc(mstate, size_t, size_t*, Void_t**);
1598 #endif
1599 static int mTRIm(mstate, size_t);
1600 static size_t mUSABLe(Void_t*);
1601 static void mSTATs(void);
1602 static int mALLOPt(int, int);
1603 static struct mallinfo mALLINFo(mstate);
1604 static void malloc_printerr(int action, const char *str, void *ptr);
1606 static Void_t* internal_function mem2mem_check(Void_t *p, size_t sz);
1607 static int internal_function top_check(void);
1608 static void internal_function munmap_chunk(mchunkptr p);
1609 #if HAVE_MREMAP
1610 static mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size);
1611 #endif
1613 static Void_t* malloc_check(size_t sz, const Void_t *caller);
1614 static void free_check(Void_t* mem, const Void_t *caller);
1615 static Void_t* realloc_check(Void_t* oldmem, size_t bytes,
1616 const Void_t *caller);
1617 static Void_t* memalign_check(size_t alignment, size_t bytes,
1618 const Void_t *caller);
1619 #ifndef NO_THREADS
1620 # ifdef _LIBC
1621 # if USE___THREAD || !defined SHARED
1622 /* These routines are never needed in this configuration. */
1623 # define NO_STARTER
1624 # endif
1625 # endif
1626 # ifdef NO_STARTER
1627 # undef NO_STARTER
1628 # else
1629 static Void_t* malloc_starter(size_t sz, const Void_t *caller);
1630 static Void_t* memalign_starter(size_t aln, size_t sz, const Void_t *caller);
1631 static void free_starter(Void_t* mem, const Void_t *caller);
1632 # endif
1633 static Void_t* malloc_atfork(size_t sz, const Void_t *caller);
1634 static void free_atfork(Void_t* mem, const Void_t *caller);
1635 #endif
1637 #else
1639 static Void_t* _int_malloc();
1640 static void _int_free();
1641 static Void_t* _int_realloc();
1642 static Void_t* _int_memalign();
1643 static Void_t* _int_valloc();
1644 static Void_t* _int_pvalloc();
1645 /*static Void_t* cALLOc();*/
1646 static Void_t** _int_icalloc();
1647 static Void_t** _int_icomalloc();
1648 static int mTRIm();
1649 static size_t mUSABLe();
1650 static void mSTATs();
1651 static int mALLOPt();
1652 static struct mallinfo mALLINFo();
1654 #endif
1659 /* ------------- Optional versions of memcopy ---------------- */
1662 #if USE_MEMCPY
1665 Note: memcpy is ONLY invoked with non-overlapping regions,
1666 so the (usually slower) memmove is not needed.
1669 #define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
1670 #define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
1672 #else /* !USE_MEMCPY */
1674 /* Use Duff's device for good zeroing/copying performance. */
1676 #define MALLOC_ZERO(charp, nbytes) \
1677 do { \
1678 INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
1679 unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1680 long mcn; \
1681 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1682 switch (mctmp) { \
1683 case 0: for(;;) { *mzp++ = 0; \
1684 case 7: *mzp++ = 0; \
1685 case 6: *mzp++ = 0; \
1686 case 5: *mzp++ = 0; \
1687 case 4: *mzp++ = 0; \
1688 case 3: *mzp++ = 0; \
1689 case 2: *mzp++ = 0; \
1690 case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
1692 } while(0)
1694 #define MALLOC_COPY(dest,src,nbytes) \
1695 do { \
1696 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
1697 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
1698 unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1699 long mcn; \
1700 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1701 switch (mctmp) { \
1702 case 0: for(;;) { *mcdst++ = *mcsrc++; \
1703 case 7: *mcdst++ = *mcsrc++; \
1704 case 6: *mcdst++ = *mcsrc++; \
1705 case 5: *mcdst++ = *mcsrc++; \
1706 case 4: *mcdst++ = *mcsrc++; \
1707 case 3: *mcdst++ = *mcsrc++; \
1708 case 2: *mcdst++ = *mcsrc++; \
1709 case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
1711 } while(0)
1713 #endif
1715 /* ------------------ MMAP support ------------------ */
1718 #if HAVE_MMAP
1720 #include <fcntl.h>
1721 #ifndef LACKS_SYS_MMAN_H
1722 #include <sys/mman.h>
1723 #endif
1725 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1726 # define MAP_ANONYMOUS MAP_ANON
1727 #endif
1728 #if !defined(MAP_FAILED)
1729 # define MAP_FAILED ((char*)-1)
1730 #endif
1732 #ifndef MAP_NORESERVE
1733 # ifdef MAP_AUTORESRV
1734 # define MAP_NORESERVE MAP_AUTORESRV
1735 # else
1736 # define MAP_NORESERVE 0
1737 # endif
1738 #endif
1741 Nearly all versions of mmap support MAP_ANONYMOUS,
1742 so the following is unlikely to be needed, but is
1743 supplied just in case.
1746 #ifndef MAP_ANONYMOUS
1748 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
1750 #define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
1751 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1752 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
1753 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
1755 #else
1757 #define MMAP(addr, size, prot, flags) \
1758 (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1760 #endif
1763 #endif /* HAVE_MMAP */
1767 ----------------------- Chunk representations -----------------------
1772 This struct declaration is misleading (but accurate and necessary).
1773 It declares a "view" into memory allowing access to necessary
1774 fields at known offsets from a given base. See explanation below.
1777 struct malloc_chunk {
1779 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
1780 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
1782 struct malloc_chunk* fd; /* double links -- used only if free. */
1783 struct malloc_chunk* bk;
1785 /* Only used for large blocks: pointer to next larger size. */
1786 struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
1787 struct malloc_chunk* bk_nextsize;
1792 malloc_chunk details:
1794 (The following includes lightly edited explanations by Colin Plumb.)
1796 Chunks of memory are maintained using a `boundary tag' method as
1797 described in e.g., Knuth or Standish. (See the paper by Paul
1798 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1799 survey of such techniques.) Sizes of free chunks are stored both
1800 in the front of each chunk and at the end. This makes
1801 consolidating fragmented chunks into bigger chunks very fast. The
1802 size fields also hold bits representing whether chunks are free or
1803 in use.
1805 An allocated chunk looks like this:
1808 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1809 | Size of previous chunk, if allocated | |
1810 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1811 | Size of chunk, in bytes |M|P|
1812 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1813 | User data starts here... .
1815 . (malloc_usable_size() bytes) .
1817 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1818 | Size of chunk |
1819 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1822 Where "chunk" is the front of the chunk for the purpose of most of
1823 the malloc code, but "mem" is the pointer that is returned to the
1824 user. "Nextchunk" is the beginning of the next contiguous chunk.
1826 Chunks always begin on even word boundries, so the mem portion
1827 (which is returned to the user) is also on an even word boundary, and
1828 thus at least double-word aligned.
1830 Free chunks are stored in circular doubly-linked lists, and look like this:
1832 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1833 | Size of previous chunk |
1834 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1835 `head:' | Size of chunk, in bytes |P|
1836 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1837 | Forward pointer to next chunk in list |
1838 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1839 | Back pointer to previous chunk in list |
1840 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1841 | Unused space (may be 0 bytes long) .
1844 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1845 `foot:' | Size of chunk, in bytes |
1846 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1848 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1849 chunk size (which is always a multiple of two words), is an in-use
1850 bit for the *previous* chunk. If that bit is *clear*, then the
1851 word before the current chunk size contains the previous chunk
1852 size, and can be used to find the front of the previous chunk.
1853 The very first chunk allocated always has this bit set,
1854 preventing access to non-existent (or non-owned) memory. If
1855 prev_inuse is set for any given chunk, then you CANNOT determine
1856 the size of the previous chunk, and might even get a memory
1857 addressing fault when trying to do so.
1859 Note that the `foot' of the current chunk is actually represented
1860 as the prev_size of the NEXT chunk. This makes it easier to
1861 deal with alignments etc but can be very confusing when trying
1862 to extend or adapt this code.
1864 The two exceptions to all this are
1866 1. The special chunk `top' doesn't bother using the
1867 trailing size field since there is no next contiguous chunk
1868 that would have to index off it. After initialization, `top'
1869 is forced to always exist. If it would become less than
1870 MINSIZE bytes long, it is replenished.
1872 2. Chunks allocated via mmap, which have the second-lowest-order
1873 bit M (IS_MMAPPED) set in their size fields. Because they are
1874 allocated one-by-one, each must contain its own trailing size field.
1879 ---------- Size and alignment checks and conversions ----------
1882 /* conversion from malloc headers to user pointers, and back */
1884 #define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
1885 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
1887 /* The smallest possible chunk */
1888 #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
1890 /* The smallest size we can malloc is an aligned minimal chunk */
1892 #define MINSIZE \
1893 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1895 /* Check if m has acceptable alignment */
1897 #define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1899 #define misaligned_chunk(p) \
1900 ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
1901 & MALLOC_ALIGN_MASK)
1905 Check if a request is so large that it would wrap around zero when
1906 padded and aligned. To simplify some other code, the bound is made
1907 low enough so that adding MINSIZE will also not wrap around zero.
1910 #define REQUEST_OUT_OF_RANGE(req) \
1911 ((unsigned long)(req) >= \
1912 (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))
1914 /* pad request bytes into a usable size -- internal version */
1916 #define request2size(req) \
1917 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1918 MINSIZE : \
1919 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1921 /* Same, except also perform argument check */
1923 #define checked_request2size(req, sz) \
1924 if (REQUEST_OUT_OF_RANGE(req)) { \
1925 MALLOC_FAILURE_ACTION; \
1926 return 0; \
1928 (sz) = request2size(req);
1931 --------------- Physical chunk operations ---------------
1935 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1936 #define PREV_INUSE 0x1
1938 /* extract inuse bit of previous chunk */
1939 #define prev_inuse(p) ((p)->size & PREV_INUSE)
1942 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1943 #define IS_MMAPPED 0x2
1945 /* check for mmap()'ed chunk */
1946 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1949 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1950 from a non-main arena. This is only set immediately before handing
1951 the chunk to the user, if necessary. */
1952 #define NON_MAIN_ARENA 0x4
1954 /* check for chunk from non-main arena */
1955 #define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
1959 Bits to mask off when extracting size
1961 Note: IS_MMAPPED is intentionally not masked off from size field in
1962 macros for which mmapped chunks should never be seen. This should
1963 cause helpful core dumps to occur if it is tried by accident by
1964 people extending or adapting this malloc.
1966 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA)
1968 /* Get size, ignoring use bits */
1969 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
1972 /* Ptr to next physical malloc_chunk. */
1973 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) ))
1975 /* Ptr to previous physical malloc_chunk */
1976 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
1978 /* Treat space at ptr + offset as a chunk */
1979 #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
1981 /* extract p's inuse bit */
1982 #define inuse(p)\
1983 ((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
1985 /* set/clear chunk as being inuse without otherwise disturbing */
1986 #define set_inuse(p)\
1987 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
1989 #define clear_inuse(p)\
1990 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
1993 /* check/set/clear inuse bits in known places */
1994 #define inuse_bit_at_offset(p, s)\
1995 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
1997 #define set_inuse_bit_at_offset(p, s)\
1998 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
2000 #define clear_inuse_bit_at_offset(p, s)\
2001 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
2004 /* Set size at head, without disturbing its use bit */
2005 #define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_BITS) | (s)))
2007 /* Set size/use field */
2008 #define set_head(p, s) ((p)->size = (s))
2010 /* Set size at footer (only when chunk is not in use) */
2011 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
2015 -------------------- Internal data structures --------------------
2017 All internal state is held in an instance of malloc_state defined
2018 below. There are no other static variables, except in two optional
2019 cases:
2020 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
2021 * If HAVE_MMAP is true, but mmap doesn't support
2022 MAP_ANONYMOUS, a dummy file descriptor for mmap.
2024 Beware of lots of tricks that minimize the total bookkeeping space
2025 requirements. The result is a little over 1K bytes (for 4byte
2026 pointers and size_t.)
2030 Bins
2032 An array of bin headers for free chunks. Each bin is doubly
2033 linked. The bins are approximately proportionally (log) spaced.
2034 There are a lot of these bins (128). This may look excessive, but
2035 works very well in practice. Most bins hold sizes that are
2036 unusual as malloc request sizes, but are more usual for fragments
2037 and consolidated sets of chunks, which is what these bins hold, so
2038 they can be found quickly. All procedures maintain the invariant
2039 that no consolidated chunk physically borders another one, so each
2040 chunk in a list is known to be preceeded and followed by either
2041 inuse chunks or the ends of memory.
2043 Chunks in bins are kept in size order, with ties going to the
2044 approximately least recently used chunk. Ordering isn't needed
2045 for the small bins, which all contain the same-sized chunks, but
2046 facilitates best-fit allocation for larger chunks. These lists
2047 are just sequential. Keeping them in order almost never requires
2048 enough traversal to warrant using fancier ordered data
2049 structures.
2051 Chunks of the same size are linked with the most
2052 recently freed at the front, and allocations are taken from the
2053 back. This results in LRU (FIFO) allocation order, which tends
2054 to give each chunk an equal opportunity to be consolidated with
2055 adjacent freed chunks, resulting in larger free chunks and less
2056 fragmentation.
2058 To simplify use in double-linked lists, each bin header acts
2059 as a malloc_chunk. This avoids special-casing for headers.
2060 But to conserve space and improve locality, we allocate
2061 only the fd/bk pointers of bins, and then use repositioning tricks
2062 to treat these as the fields of a malloc_chunk*.
2065 typedef struct malloc_chunk* mbinptr;
2067 /* addressing -- note that bin_at(0) does not exist */
2068 #define bin_at(m, i) \
2069 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
2070 - offsetof (struct malloc_chunk, fd))
2072 /* analog of ++bin */
2073 #define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
2075 /* Reminders about list directionality within bins */
2076 #define first(b) ((b)->fd)
2077 #define last(b) ((b)->bk)
2079 /* Take a chunk off a bin list */
2080 #define unlink(P, BK, FD) { \
2081 FD = P->fd; \
2082 BK = P->bk; \
2083 if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \
2084 malloc_printerr (check_action, "corrupted double-linked list", P); \
2085 else { \
2086 FD->bk = BK; \
2087 BK->fd = FD; \
2088 if (!in_smallbin_range (P->size) \
2089 && __builtin_expect (P->fd_nextsize != NULL, 0)) { \
2090 assert (P->fd_nextsize->bk_nextsize == P); \
2091 assert (P->bk_nextsize->fd_nextsize == P); \
2092 if (FD->fd_nextsize == NULL) { \
2093 if (P->fd_nextsize == P) \
2094 FD->fd_nextsize = FD->bk_nextsize = FD; \
2095 else { \
2096 FD->fd_nextsize = P->fd_nextsize; \
2097 FD->bk_nextsize = P->bk_nextsize; \
2098 P->fd_nextsize->bk_nextsize = FD; \
2099 P->bk_nextsize->fd_nextsize = FD; \
2101 } else { \
2102 P->fd_nextsize->bk_nextsize = P->bk_nextsize; \
2103 P->bk_nextsize->fd_nextsize = P->fd_nextsize; \
2110 Indexing
2112 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
2113 8 bytes apart. Larger bins are approximately logarithmically spaced:
2115 64 bins of size 8
2116 32 bins of size 64
2117 16 bins of size 512
2118 8 bins of size 4096
2119 4 bins of size 32768
2120 2 bins of size 262144
2121 1 bin of size what's left
2123 There is actually a little bit of slop in the numbers in bin_index
2124 for the sake of speed. This makes no difference elsewhere.
2126 The bins top out around 1MB because we expect to service large
2127 requests via mmap.
2130 #define NBINS 128
2131 #define NSMALLBINS 64
2132 #define SMALLBIN_WIDTH MALLOC_ALIGNMENT
2133 #define MIN_LARGE_SIZE (NSMALLBINS * SMALLBIN_WIDTH)
2135 #define in_smallbin_range(sz) \
2136 ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
2138 #define smallbin_index(sz) \
2139 (SMALLBIN_WIDTH == 16 ? (((unsigned)(sz)) >> 4) : (((unsigned)(sz)) >> 3))
2141 #define largebin_index_32(sz) \
2142 (((((unsigned long)(sz)) >> 6) <= 38)? 56 + (((unsigned long)(sz)) >> 6): \
2143 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
2144 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
2145 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
2146 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
2147 126)
2149 // XXX It remains to be seen whether it is good to keep the widths of
2150 // XXX the buckets the same or whether it should be scaled by a factor
2151 // XXX of two as well.
2152 #define largebin_index_64(sz) \
2153 (((((unsigned long)(sz)) >> 6) <= 48)? 48 + (((unsigned long)(sz)) >> 6): \
2154 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
2155 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
2156 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
2157 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
2158 126)
2160 #define largebin_index(sz) \
2161 (SIZE_SZ == 8 ? largebin_index_64 (sz) : largebin_index_32 (sz))
2163 #define bin_index(sz) \
2164 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
2168 Unsorted chunks
2170 All remainders from chunk splits, as well as all returned chunks,
2171 are first placed in the "unsorted" bin. They are then placed
2172 in regular bins after malloc gives them ONE chance to be used before
2173 binning. So, basically, the unsorted_chunks list acts as a queue,
2174 with chunks being placed on it in free (and malloc_consolidate),
2175 and taken off (to be either used or placed in bins) in malloc.
2177 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
2178 does not have to be taken into account in size comparisons.
2181 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
2182 #define unsorted_chunks(M) (bin_at(M, 1))
2187 The top-most available chunk (i.e., the one bordering the end of
2188 available memory) is treated specially. It is never included in
2189 any bin, is used only if no other chunk is available, and is
2190 released back to the system if it is very large (see
2191 M_TRIM_THRESHOLD). Because top initially
2192 points to its own bin with initial zero size, thus forcing
2193 extension on the first malloc request, we avoid having any special
2194 code in malloc to check whether it even exists yet. But we still
2195 need to do so when getting memory from system, so we make
2196 initial_top treat the bin as a legal but unusable chunk during the
2197 interval between initialization and the first call to
2198 sYSMALLOc. (This is somewhat delicate, since it relies on
2199 the 2 preceding words to be zero during this interval as well.)
2202 /* Conveniently, the unsorted bin can be used as dummy top on first call */
2203 #define initial_top(M) (unsorted_chunks(M))
2206 Binmap
2208 To help compensate for the large number of bins, a one-level index
2209 structure is used for bin-by-bin searching. `binmap' is a
2210 bitvector recording whether bins are definitely empty so they can
2211 be skipped over during during traversals. The bits are NOT always
2212 cleared as soon as bins are empty, but instead only
2213 when they are noticed to be empty during traversal in malloc.
2216 /* Conservatively use 32 bits per map word, even if on 64bit system */
2217 #define BINMAPSHIFT 5
2218 #define BITSPERMAP (1U << BINMAPSHIFT)
2219 #define BINMAPSIZE (NBINS / BITSPERMAP)
2221 #define idx2block(i) ((i) >> BINMAPSHIFT)
2222 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
2224 #define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
2225 #define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
2226 #define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
2229 Fastbins
2231 An array of lists holding recently freed small chunks. Fastbins
2232 are not doubly linked. It is faster to single-link them, and
2233 since chunks are never removed from the middles of these lists,
2234 double linking is not necessary. Also, unlike regular bins, they
2235 are not even processed in FIFO order (they use faster LIFO) since
2236 ordering doesn't much matter in the transient contexts in which
2237 fastbins are normally used.
2239 Chunks in fastbins keep their inuse bit set, so they cannot
2240 be consolidated with other free chunks. malloc_consolidate
2241 releases all chunks in fastbins and consolidates them with
2242 other free chunks.
2245 typedef struct malloc_chunk* mfastbinptr;
2246 #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
2248 /* offset 2 to use otherwise unindexable first 2 bins */
2249 #define fastbin_index(sz) \
2250 ((((unsigned int)(sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
2253 /* The maximum fastbin request size we support */
2254 #define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
2256 #define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
2259 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
2260 that triggers automatic consolidation of possibly-surrounding
2261 fastbin chunks. This is a heuristic, so the exact value should not
2262 matter too much. It is defined at half the default trim threshold as a
2263 compromise heuristic to only attempt consolidation if it is likely
2264 to lead to trimming. However, it is not dynamically tunable, since
2265 consolidation reduces fragmentation surrounding large chunks even
2266 if trimming is not used.
2269 #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
2272 Since the lowest 2 bits in max_fast don't matter in size comparisons,
2273 they are used as flags.
2277 FASTCHUNKS_BIT held in max_fast indicates that there are probably
2278 some fastbin chunks. It is set true on entering a chunk into any
2279 fastbin, and cleared only in malloc_consolidate.
2281 The truth value is inverted so that have_fastchunks will be true
2282 upon startup (since statics are zero-filled), simplifying
2283 initialization checks.
2286 #define FASTCHUNKS_BIT (1U)
2288 #define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0)
2289 #ifdef ATOMIC_FASTBINS
2290 #define clear_fastchunks(M) catomic_or (&(M)->flags, FASTCHUNKS_BIT)
2291 #define set_fastchunks(M) catomic_and (&(M)->flags, ~FASTCHUNKS_BIT)
2292 #else
2293 #define clear_fastchunks(M) ((M)->flags |= FASTCHUNKS_BIT)
2294 #define set_fastchunks(M) ((M)->flags &= ~FASTCHUNKS_BIT)
2295 #endif
2298 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
2299 regions. Otherwise, contiguity is exploited in merging together,
2300 when possible, results from consecutive MORECORE calls.
2302 The initial value comes from MORECORE_CONTIGUOUS, but is
2303 changed dynamically if mmap is ever used as an sbrk substitute.
2306 #define NONCONTIGUOUS_BIT (2U)
2308 #define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
2309 #define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
2310 #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
2311 #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
2314 Set value of max_fast.
2315 Use impossibly small value if 0.
2316 Precondition: there are no existing fastbin chunks.
2317 Setting the value clears fastchunk bit but preserves noncontiguous bit.
2320 #define set_max_fast(s) \
2321 global_max_fast = ((s) == 0)? SMALLBIN_WIDTH: request2size(s)
2322 #define get_max_fast() global_max_fast
2326 ----------- Internal state representation and initialization -----------
2329 struct malloc_state {
2330 /* Serialize access. */
2331 mutex_t mutex;
2333 /* Flags (formerly in max_fast). */
2334 int flags;
2336 #if THREAD_STATS
2337 /* Statistics for locking. Only used if THREAD_STATS is defined. */
2338 long stat_lock_direct, stat_lock_loop, stat_lock_wait;
2339 #endif
2341 /* Fastbins */
2342 mfastbinptr fastbinsY[NFASTBINS];
2344 /* Base of the topmost chunk -- not otherwise kept in a bin */
2345 mchunkptr top;
2347 /* The remainder from the most recent split of a small request */
2348 mchunkptr last_remainder;
2350 /* Normal bins packed as described above */
2351 mchunkptr bins[NBINS * 2 - 2];
2353 /* Bitmap of bins */
2354 unsigned int binmap[BINMAPSIZE];
2356 /* Linked list */
2357 struct malloc_state *next;
2359 #ifdef PER_THREAD
2360 /* Linked list for free arenas. */
2361 struct malloc_state *next_free;
2362 #endif
2364 /* Memory allocated from the system in this arena. */
2365 INTERNAL_SIZE_T system_mem;
2366 INTERNAL_SIZE_T max_system_mem;
2369 struct malloc_par {
2370 /* Tunable parameters */
2371 unsigned long trim_threshold;
2372 INTERNAL_SIZE_T top_pad;
2373 INTERNAL_SIZE_T mmap_threshold;
2374 #ifdef PER_THREAD
2375 INTERNAL_SIZE_T arena_test;
2376 INTERNAL_SIZE_T arena_max;
2377 #endif
2379 /* Memory map support */
2380 int n_mmaps;
2381 int n_mmaps_max;
2382 int max_n_mmaps;
2383 /* the mmap_threshold is dynamic, until the user sets
2384 it manually, at which point we need to disable any
2385 dynamic behavior. */
2386 int no_dyn_threshold;
2388 /* Cache malloc_getpagesize */
2389 unsigned int pagesize;
2391 /* Statistics */
2392 INTERNAL_SIZE_T mmapped_mem;
2393 /*INTERNAL_SIZE_T sbrked_mem;*/
2394 /*INTERNAL_SIZE_T max_sbrked_mem;*/
2395 INTERNAL_SIZE_T max_mmapped_mem;
2396 INTERNAL_SIZE_T max_total_mem; /* only kept for NO_THREADS */
2398 /* First address handed out by MORECORE/sbrk. */
2399 char* sbrk_base;
2402 /* There are several instances of this struct ("arenas") in this
2403 malloc. If you are adapting this malloc in a way that does NOT use
2404 a static or mmapped malloc_state, you MUST explicitly zero-fill it
2405 before using. This malloc relies on the property that malloc_state
2406 is initialized to all zeroes (as is true of C statics). */
2408 static struct malloc_state main_arena;
2410 /* There is only one instance of the malloc parameters. */
2412 static struct malloc_par mp_;
2415 #ifdef PER_THREAD
2416 /* Non public mallopt parameters. */
2417 #define M_ARENA_TEST -7
2418 #define M_ARENA_MAX -8
2419 #endif
2422 /* Maximum size of memory handled in fastbins. */
2423 static INTERNAL_SIZE_T global_max_fast;
2426 Initialize a malloc_state struct.
2428 This is called only from within malloc_consolidate, which needs
2429 be called in the same contexts anyway. It is never called directly
2430 outside of malloc_consolidate because some optimizing compilers try
2431 to inline it at all call points, which turns out not to be an
2432 optimization at all. (Inlining it in malloc_consolidate is fine though.)
2435 #if __STD_C
2436 static void malloc_init_state(mstate av)
2437 #else
2438 static void malloc_init_state(av) mstate av;
2439 #endif
2441 int i;
2442 mbinptr bin;
2444 /* Establish circular links for normal bins */
2445 for (i = 1; i < NBINS; ++i) {
2446 bin = bin_at(av,i);
2447 bin->fd = bin->bk = bin;
2450 #if MORECORE_CONTIGUOUS
2451 if (av != &main_arena)
2452 #endif
2453 set_noncontiguous(av);
2454 if (av == &main_arena)
2455 set_max_fast(DEFAULT_MXFAST);
2456 av->flags |= FASTCHUNKS_BIT;
2458 av->top = initial_top(av);
2462 Other internal utilities operating on mstates
2465 #if __STD_C
2466 static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate);
2467 static int sYSTRIm(size_t, mstate);
2468 static void malloc_consolidate(mstate);
2469 #ifndef _LIBC
2470 static Void_t** iALLOc(mstate, size_t, size_t*, int, Void_t**);
2471 #endif
2472 #else
2473 static Void_t* sYSMALLOc();
2474 static int sYSTRIm();
2475 static void malloc_consolidate();
2476 static Void_t** iALLOc();
2477 #endif
2480 /* -------------- Early definitions for debugging hooks ---------------- */
2482 /* Define and initialize the hook variables. These weak definitions must
2483 appear before any use of the variables in a function (arena.c uses one). */
2484 #ifndef weak_variable
2485 #ifndef _LIBC
2486 #define weak_variable /**/
2487 #else
2488 /* In GNU libc we want the hook variables to be weak definitions to
2489 avoid a problem with Emacs. */
2490 #define weak_variable weak_function
2491 #endif
2492 #endif
2494 /* Forward declarations. */
2495 static Void_t* malloc_hook_ini __MALLOC_P ((size_t sz,
2496 const __malloc_ptr_t caller));
2497 static Void_t* realloc_hook_ini __MALLOC_P ((Void_t* ptr, size_t sz,
2498 const __malloc_ptr_t caller));
2499 static Void_t* memalign_hook_ini __MALLOC_P ((size_t alignment, size_t sz,
2500 const __malloc_ptr_t caller));
2502 void weak_variable (*__malloc_initialize_hook) (void) = NULL;
2503 void weak_variable (*__free_hook) (__malloc_ptr_t __ptr,
2504 const __malloc_ptr_t) = NULL;
2505 __malloc_ptr_t weak_variable (*__malloc_hook)
2506 (size_t __size, const __malloc_ptr_t) = malloc_hook_ini;
2507 __malloc_ptr_t weak_variable (*__realloc_hook)
2508 (__malloc_ptr_t __ptr, size_t __size, const __malloc_ptr_t)
2509 = realloc_hook_ini;
2510 __malloc_ptr_t weak_variable (*__memalign_hook)
2511 (size_t __alignment, size_t __size, const __malloc_ptr_t)
2512 = memalign_hook_ini;
2513 void weak_variable (*__after_morecore_hook) (void) = NULL;
2516 /* ---------------- Error behavior ------------------------------------ */
2518 #ifndef DEFAULT_CHECK_ACTION
2519 #define DEFAULT_CHECK_ACTION 3
2520 #endif
2522 static int check_action = DEFAULT_CHECK_ACTION;
2525 /* ------------------ Testing support ----------------------------------*/
2527 static int perturb_byte;
2529 #define alloc_perturb(p, n) memset (p, (perturb_byte ^ 0xff) & 0xff, n)
2530 #define free_perturb(p, n) memset (p, perturb_byte & 0xff, n)
2533 /* ------------------- Support for multiple arenas -------------------- */
2534 #include "arena.c"
2537 Debugging support
2539 These routines make a number of assertions about the states
2540 of data structures that should be true at all times. If any
2541 are not true, it's very likely that a user program has somehow
2542 trashed memory. (It's also possible that there is a coding error
2543 in malloc. In which case, please report it!)
2546 #if ! MALLOC_DEBUG
2548 #define check_chunk(A,P)
2549 #define check_free_chunk(A,P)
2550 #define check_inuse_chunk(A,P)
2551 #define check_remalloced_chunk(A,P,N)
2552 #define check_malloced_chunk(A,P,N)
2553 #define check_malloc_state(A)
2555 #else
2557 #define check_chunk(A,P) do_check_chunk(A,P)
2558 #define check_free_chunk(A,P) do_check_free_chunk(A,P)
2559 #define check_inuse_chunk(A,P) do_check_inuse_chunk(A,P)
2560 #define check_remalloced_chunk(A,P,N) do_check_remalloced_chunk(A,P,N)
2561 #define check_malloced_chunk(A,P,N) do_check_malloced_chunk(A,P,N)
2562 #define check_malloc_state(A) do_check_malloc_state(A)
2565 Properties of all chunks
2568 #if __STD_C
2569 static void do_check_chunk(mstate av, mchunkptr p)
2570 #else
2571 static void do_check_chunk(av, p) mstate av; mchunkptr p;
2572 #endif
2574 unsigned long sz = chunksize(p);
2575 /* min and max possible addresses assuming contiguous allocation */
2576 char* max_address = (char*)(av->top) + chunksize(av->top);
2577 char* min_address = max_address - av->system_mem;
2579 if (!chunk_is_mmapped(p)) {
2581 /* Has legal address ... */
2582 if (p != av->top) {
2583 if (contiguous(av)) {
2584 assert(((char*)p) >= min_address);
2585 assert(((char*)p + sz) <= ((char*)(av->top)));
2588 else {
2589 /* top size is always at least MINSIZE */
2590 assert((unsigned long)(sz) >= MINSIZE);
2591 /* top predecessor always marked inuse */
2592 assert(prev_inuse(p));
2596 else {
2597 #if HAVE_MMAP
2598 /* address is outside main heap */
2599 if (contiguous(av) && av->top != initial_top(av)) {
2600 assert(((char*)p) < min_address || ((char*)p) >= max_address);
2602 /* chunk is page-aligned */
2603 assert(((p->prev_size + sz) & (mp_.pagesize-1)) == 0);
2604 /* mem is aligned */
2605 assert(aligned_OK(chunk2mem(p)));
2606 #else
2607 /* force an appropriate assert violation if debug set */
2608 assert(!chunk_is_mmapped(p));
2609 #endif
2614 Properties of free chunks
2617 #if __STD_C
2618 static void do_check_free_chunk(mstate av, mchunkptr p)
2619 #else
2620 static void do_check_free_chunk(av, p) mstate av; mchunkptr p;
2621 #endif
2623 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
2624 mchunkptr next = chunk_at_offset(p, sz);
2626 do_check_chunk(av, p);
2628 /* Chunk must claim to be free ... */
2629 assert(!inuse(p));
2630 assert (!chunk_is_mmapped(p));
2632 /* Unless a special marker, must have OK fields */
2633 if ((unsigned long)(sz) >= MINSIZE)
2635 assert((sz & MALLOC_ALIGN_MASK) == 0);
2636 assert(aligned_OK(chunk2mem(p)));
2637 /* ... matching footer field */
2638 assert(next->prev_size == sz);
2639 /* ... and is fully consolidated */
2640 assert(prev_inuse(p));
2641 assert (next == av->top || inuse(next));
2643 /* ... and has minimally sane links */
2644 assert(p->fd->bk == p);
2645 assert(p->bk->fd == p);
2647 else /* markers are always of size SIZE_SZ */
2648 assert(sz == SIZE_SZ);
2652 Properties of inuse chunks
2655 #if __STD_C
2656 static void do_check_inuse_chunk(mstate av, mchunkptr p)
2657 #else
2658 static void do_check_inuse_chunk(av, p) mstate av; mchunkptr p;
2659 #endif
2661 mchunkptr next;
2663 do_check_chunk(av, p);
2665 if (chunk_is_mmapped(p))
2666 return; /* mmapped chunks have no next/prev */
2668 /* Check whether it claims to be in use ... */
2669 assert(inuse(p));
2671 next = next_chunk(p);
2673 /* ... and is surrounded by OK chunks.
2674 Since more things can be checked with free chunks than inuse ones,
2675 if an inuse chunk borders them and debug is on, it's worth doing them.
2677 if (!prev_inuse(p)) {
2678 /* Note that we cannot even look at prev unless it is not inuse */
2679 mchunkptr prv = prev_chunk(p);
2680 assert(next_chunk(prv) == p);
2681 do_check_free_chunk(av, prv);
2684 if (next == av->top) {
2685 assert(prev_inuse(next));
2686 assert(chunksize(next) >= MINSIZE);
2688 else if (!inuse(next))
2689 do_check_free_chunk(av, next);
2693 Properties of chunks recycled from fastbins
2696 #if __STD_C
2697 static void do_check_remalloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2698 #else
2699 static void do_check_remalloced_chunk(av, p, s)
2700 mstate av; mchunkptr p; INTERNAL_SIZE_T s;
2701 #endif
2703 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
2705 if (!chunk_is_mmapped(p)) {
2706 assert(av == arena_for_chunk(p));
2707 if (chunk_non_main_arena(p))
2708 assert(av != &main_arena);
2709 else
2710 assert(av == &main_arena);
2713 do_check_inuse_chunk(av, p);
2715 /* Legal size ... */
2716 assert((sz & MALLOC_ALIGN_MASK) == 0);
2717 assert((unsigned long)(sz) >= MINSIZE);
2718 /* ... and alignment */
2719 assert(aligned_OK(chunk2mem(p)));
2720 /* chunk is less than MINSIZE more than request */
2721 assert((long)(sz) - (long)(s) >= 0);
2722 assert((long)(sz) - (long)(s + MINSIZE) < 0);
2726 Properties of nonrecycled chunks at the point they are malloced
2729 #if __STD_C
2730 static void do_check_malloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2731 #else
2732 static void do_check_malloced_chunk(av, p, s)
2733 mstate av; mchunkptr p; INTERNAL_SIZE_T s;
2734 #endif
2736 /* same as recycled case ... */
2737 do_check_remalloced_chunk(av, p, s);
2740 ... plus, must obey implementation invariant that prev_inuse is
2741 always true of any allocated chunk; i.e., that each allocated
2742 chunk borders either a previously allocated and still in-use
2743 chunk, or the base of its memory arena. This is ensured
2744 by making all allocations from the the `lowest' part of any found
2745 chunk. This does not necessarily hold however for chunks
2746 recycled via fastbins.
2749 assert(prev_inuse(p));
2754 Properties of malloc_state.
2756 This may be useful for debugging malloc, as well as detecting user
2757 programmer errors that somehow write into malloc_state.
2759 If you are extending or experimenting with this malloc, you can
2760 probably figure out how to hack this routine to print out or
2761 display chunk addresses, sizes, bins, and other instrumentation.
2764 static void do_check_malloc_state(mstate av)
2766 int i;
2767 mchunkptr p;
2768 mchunkptr q;
2769 mbinptr b;
2770 unsigned int idx;
2771 INTERNAL_SIZE_T size;
2772 unsigned long total = 0;
2773 int max_fast_bin;
2775 /* internal size_t must be no wider than pointer type */
2776 assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
2778 /* alignment is a power of 2 */
2779 assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
2781 /* cannot run remaining checks until fully initialized */
2782 if (av->top == 0 || av->top == initial_top(av))
2783 return;
2785 /* pagesize is a power of 2 */
2786 assert((mp_.pagesize & (mp_.pagesize-1)) == 0);
2788 /* A contiguous main_arena is consistent with sbrk_base. */
2789 if (av == &main_arena && contiguous(av))
2790 assert((char*)mp_.sbrk_base + av->system_mem ==
2791 (char*)av->top + chunksize(av->top));
2793 /* properties of fastbins */
2795 /* max_fast is in allowed range */
2796 assert((get_max_fast () & ~1) <= request2size(MAX_FAST_SIZE));
2798 max_fast_bin = fastbin_index(get_max_fast ());
2800 for (i = 0; i < NFASTBINS; ++i) {
2801 p = av->fastbins[i];
2803 /* The following test can only be performed for the main arena.
2804 While mallopt calls malloc_consolidate to get rid of all fast
2805 bins (especially those larger than the new maximum) this does
2806 only happen for the main arena. Trying to do this for any
2807 other arena would mean those arenas have to be locked and
2808 malloc_consolidate be called for them. This is excessive. And
2809 even if this is acceptable to somebody it still cannot solve
2810 the problem completely since if the arena is locked a
2811 concurrent malloc call might create a new arena which then
2812 could use the newly invalid fast bins. */
2814 /* all bins past max_fast are empty */
2815 if (av == &main_arena && i > max_fast_bin)
2816 assert(p == 0);
2818 while (p != 0) {
2819 /* each chunk claims to be inuse */
2820 do_check_inuse_chunk(av, p);
2821 total += chunksize(p);
2822 /* chunk belongs in this bin */
2823 assert(fastbin_index(chunksize(p)) == i);
2824 p = p->fd;
2828 if (total != 0)
2829 assert(have_fastchunks(av));
2830 else if (!have_fastchunks(av))
2831 assert(total == 0);
2833 /* check normal bins */
2834 for (i = 1; i < NBINS; ++i) {
2835 b = bin_at(av,i);
2837 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2838 if (i >= 2) {
2839 unsigned int binbit = get_binmap(av,i);
2840 int empty = last(b) == b;
2841 if (!binbit)
2842 assert(empty);
2843 else if (!empty)
2844 assert(binbit);
2847 for (p = last(b); p != b; p = p->bk) {
2848 /* each chunk claims to be free */
2849 do_check_free_chunk(av, p);
2850 size = chunksize(p);
2851 total += size;
2852 if (i >= 2) {
2853 /* chunk belongs in bin */
2854 idx = bin_index(size);
2855 assert(idx == i);
2856 /* lists are sorted */
2857 assert(p->bk == b ||
2858 (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p));
2860 if (!in_smallbin_range(size))
2862 if (p->fd_nextsize != NULL)
2864 if (p->fd_nextsize == p)
2865 assert (p->bk_nextsize == p);
2866 else
2868 if (p->fd_nextsize == first (b))
2869 assert (chunksize (p) < chunksize (p->fd_nextsize));
2870 else
2871 assert (chunksize (p) > chunksize (p->fd_nextsize));
2873 if (p == first (b))
2874 assert (chunksize (p) > chunksize (p->bk_nextsize));
2875 else
2876 assert (chunksize (p) < chunksize (p->bk_nextsize));
2879 else
2880 assert (p->bk_nextsize == NULL);
2882 } else if (!in_smallbin_range(size))
2883 assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
2884 /* chunk is followed by a legal chain of inuse chunks */
2885 for (q = next_chunk(p);
2886 (q != av->top && inuse(q) &&
2887 (unsigned long)(chunksize(q)) >= MINSIZE);
2888 q = next_chunk(q))
2889 do_check_inuse_chunk(av, q);
2893 /* top chunk is OK */
2894 check_chunk(av, av->top);
2896 /* sanity checks for statistics */
2898 #ifdef NO_THREADS
2899 assert(total <= (unsigned long)(mp_.max_total_mem));
2900 assert(mp_.n_mmaps >= 0);
2901 #endif
2902 assert(mp_.n_mmaps <= mp_.max_n_mmaps);
2904 assert((unsigned long)(av->system_mem) <=
2905 (unsigned long)(av->max_system_mem));
2907 assert((unsigned long)(mp_.mmapped_mem) <=
2908 (unsigned long)(mp_.max_mmapped_mem));
2910 #ifdef NO_THREADS
2911 assert((unsigned long)(mp_.max_total_mem) >=
2912 (unsigned long)(mp_.mmapped_mem) + (unsigned long)(av->system_mem));
2913 #endif
2915 #endif
2918 /* ----------------- Support for debugging hooks -------------------- */
2919 #include "hooks.c"
2922 /* ----------- Routines dealing with system allocation -------------- */
2925 sysmalloc handles malloc cases requiring more memory from the system.
2926 On entry, it is assumed that av->top does not have enough
2927 space to service request for nb bytes, thus requiring that av->top
2928 be extended or replaced.
2931 #if __STD_C
2932 static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
2933 #else
2934 static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
2935 #endif
2937 mchunkptr old_top; /* incoming value of av->top */
2938 INTERNAL_SIZE_T old_size; /* its size */
2939 char* old_end; /* its end address */
2941 long size; /* arg to first MORECORE or mmap call */
2942 char* brk; /* return value from MORECORE */
2944 long correction; /* arg to 2nd MORECORE call */
2945 char* snd_brk; /* 2nd return val */
2947 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2948 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
2949 char* aligned_brk; /* aligned offset into brk */
2951 mchunkptr p; /* the allocated/returned chunk */
2952 mchunkptr remainder; /* remainder from allocation */
2953 unsigned long remainder_size; /* its size */
2955 unsigned long sum; /* for updating stats */
2957 size_t pagemask = mp_.pagesize - 1;
2958 bool tried_mmap = false;
2961 #if HAVE_MMAP
2964 If have mmap, and the request size meets the mmap threshold, and
2965 the system supports mmap, and there are few enough currently
2966 allocated mmapped regions, try to directly map this request
2967 rather than expanding top.
2970 if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) &&
2971 (mp_.n_mmaps < mp_.n_mmaps_max)) {
2973 char* mm; /* return value from mmap call*/
2975 try_mmap:
2977 Round up size to nearest page. For mmapped chunks, the overhead
2978 is one SIZE_SZ unit larger than for normal chunks, because there
2979 is no following chunk whose prev_size field could be used.
2981 #if 1
2982 /* See the front_misalign handling below, for glibc there is no
2983 need for further alignments. */
2984 size = (nb + SIZE_SZ + pagemask) & ~pagemask;
2985 #else
2986 size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
2987 #endif
2988 tried_mmap = true;
2990 /* Don't try if size wraps around 0 */
2991 if ((unsigned long)(size) > (unsigned long)(nb)) {
2993 mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
2995 if (mm != MAP_FAILED) {
2998 The offset to the start of the mmapped region is stored
2999 in the prev_size field of the chunk. This allows us to adjust
3000 returned start address to meet alignment requirements here
3001 and in memalign(), and still be able to compute proper
3002 address argument for later munmap in free() and realloc().
3005 #if 1
3006 /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
3007 MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
3008 aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
3009 assert (((INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK) == 0);
3010 #else
3011 front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
3012 if (front_misalign > 0) {
3013 correction = MALLOC_ALIGNMENT - front_misalign;
3014 p = (mchunkptr)(mm + correction);
3015 p->prev_size = correction;
3016 set_head(p, (size - correction) |IS_MMAPPED);
3018 else
3019 #endif
3021 p = (mchunkptr)mm;
3022 set_head(p, size|IS_MMAPPED);
3025 /* update statistics */
3027 if (++mp_.n_mmaps > mp_.max_n_mmaps)
3028 mp_.max_n_mmaps = mp_.n_mmaps;
3030 sum = mp_.mmapped_mem += size;
3031 if (sum > (unsigned long)(mp_.max_mmapped_mem))
3032 mp_.max_mmapped_mem = sum;
3033 #ifdef NO_THREADS
3034 sum += av->system_mem;
3035 if (sum > (unsigned long)(mp_.max_total_mem))
3036 mp_.max_total_mem = sum;
3037 #endif
3039 check_chunk(av, p);
3041 return chunk2mem(p);
3045 #endif
3047 /* Record incoming configuration of top */
3049 old_top = av->top;
3050 old_size = chunksize(old_top);
3051 old_end = (char*)(chunk_at_offset(old_top, old_size));
3053 brk = snd_brk = (char*)(MORECORE_FAILURE);
3056 If not the first time through, we require old_size to be
3057 at least MINSIZE and to have prev_inuse set.
3060 assert((old_top == initial_top(av) && old_size == 0) ||
3061 ((unsigned long) (old_size) >= MINSIZE &&
3062 prev_inuse(old_top) &&
3063 ((unsigned long)old_end & pagemask) == 0));
3065 /* Precondition: not enough current space to satisfy nb request */
3066 assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE));
3068 #ifndef ATOMIC_FASTBINS
3069 /* Precondition: all fastbins are consolidated */
3070 assert(!have_fastchunks(av));
3071 #endif
3074 if (av != &main_arena) {
3076 heap_info *old_heap, *heap;
3077 size_t old_heap_size;
3079 /* First try to extend the current heap. */
3080 old_heap = heap_for_ptr(old_top);
3081 old_heap_size = old_heap->size;
3082 if ((long) (MINSIZE + nb - old_size) > 0
3083 && grow_heap(old_heap, MINSIZE + nb - old_size) == 0) {
3084 av->system_mem += old_heap->size - old_heap_size;
3085 arena_mem += old_heap->size - old_heap_size;
3086 #if 0
3087 if(mmapped_mem + arena_mem + sbrked_mem > max_total_mem)
3088 max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
3089 #endif
3090 set_head(old_top, (((char *)old_heap + old_heap->size) - (char *)old_top)
3091 | PREV_INUSE);
3093 else if ((heap = new_heap(nb + (MINSIZE + sizeof(*heap)), mp_.top_pad))) {
3094 /* Use a newly allocated heap. */
3095 heap->ar_ptr = av;
3096 heap->prev = old_heap;
3097 av->system_mem += heap->size;
3098 arena_mem += heap->size;
3099 #if 0
3100 if((unsigned long)(mmapped_mem + arena_mem + sbrked_mem) > max_total_mem)
3101 max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
3102 #endif
3103 /* Set up the new top. */
3104 top(av) = chunk_at_offset(heap, sizeof(*heap));
3105 set_head(top(av), (heap->size - sizeof(*heap)) | PREV_INUSE);
3107 /* Setup fencepost and free the old top chunk. */
3108 /* The fencepost takes at least MINSIZE bytes, because it might
3109 become the top chunk again later. Note that a footer is set
3110 up, too, although the chunk is marked in use. */
3111 old_size -= MINSIZE;
3112 set_head(chunk_at_offset(old_top, old_size + 2*SIZE_SZ), 0|PREV_INUSE);
3113 if (old_size >= MINSIZE) {
3114 set_head(chunk_at_offset(old_top, old_size), (2*SIZE_SZ)|PREV_INUSE);
3115 set_foot(chunk_at_offset(old_top, old_size), (2*SIZE_SZ));
3116 set_head(old_top, old_size|PREV_INUSE|NON_MAIN_ARENA);
3117 #ifdef ATOMIC_FASTBINS
3118 _int_free(av, old_top, 1);
3119 #else
3120 _int_free(av, old_top);
3121 #endif
3122 } else {
3123 set_head(old_top, (old_size + 2*SIZE_SZ)|PREV_INUSE);
3124 set_foot(old_top, (old_size + 2*SIZE_SZ));
3127 else if (!tried_mmap)
3128 /* We can at least try to use to mmap memory. */
3129 goto try_mmap;
3131 } else { /* av == main_arena */
3134 /* Request enough space for nb + pad + overhead */
3136 size = nb + mp_.top_pad + MINSIZE;
3139 If contiguous, we can subtract out existing space that we hope to
3140 combine with new space. We add it back later only if
3141 we don't actually get contiguous space.
3144 if (contiguous(av))
3145 size -= old_size;
3148 Round to a multiple of page size.
3149 If MORECORE is not contiguous, this ensures that we only call it
3150 with whole-page arguments. And if MORECORE is contiguous and
3151 this is not first time through, this preserves page-alignment of
3152 previous calls. Otherwise, we correct to page-align below.
3155 size = (size + pagemask) & ~pagemask;
3158 Don't try to call MORECORE if argument is so big as to appear
3159 negative. Note that since mmap takes size_t arg, it may succeed
3160 below even if we cannot call MORECORE.
3163 if (size > 0)
3164 brk = (char*)(MORECORE(size));
3166 if (brk != (char*)(MORECORE_FAILURE)) {
3167 /* Call the `morecore' hook if necessary. */
3168 if (__builtin_expect (__after_morecore_hook != NULL, 0))
3169 (*__after_morecore_hook) ();
3170 } else {
3172 If have mmap, try using it as a backup when MORECORE fails or
3173 cannot be used. This is worth doing on systems that have "holes" in
3174 address space, so sbrk cannot extend to give contiguous space, but
3175 space is available elsewhere. Note that we ignore mmap max count
3176 and threshold limits, since the space will not be used as a
3177 segregated mmap region.
3180 #if HAVE_MMAP
3181 /* Cannot merge with old top, so add its size back in */
3182 if (contiguous(av))
3183 size = (size + old_size + pagemask) & ~pagemask;
3185 /* If we are relying on mmap as backup, then use larger units */
3186 if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE))
3187 size = MMAP_AS_MORECORE_SIZE;
3189 /* Don't try if size wraps around 0 */
3190 if ((unsigned long)(size) > (unsigned long)(nb)) {
3192 char *mbrk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
3194 if (mbrk != MAP_FAILED) {
3196 /* We do not need, and cannot use, another sbrk call to find end */
3197 brk = mbrk;
3198 snd_brk = brk + size;
3201 Record that we no longer have a contiguous sbrk region.
3202 After the first time mmap is used as backup, we do not
3203 ever rely on contiguous space since this could incorrectly
3204 bridge regions.
3206 set_noncontiguous(av);
3209 #endif
3212 if (brk != (char*)(MORECORE_FAILURE)) {
3213 if (mp_.sbrk_base == 0)
3214 mp_.sbrk_base = brk;
3215 av->system_mem += size;
3218 If MORECORE extends previous space, we can likewise extend top size.
3221 if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE))
3222 set_head(old_top, (size + old_size) | PREV_INUSE);
3224 else if (contiguous(av) && old_size && brk < old_end) {
3225 /* Oops! Someone else killed our space.. Can't touch anything. */
3226 malloc_printerr (3, "break adjusted to free malloc space", brk);
3230 Otherwise, make adjustments:
3232 * If the first time through or noncontiguous, we need to call sbrk
3233 just to find out where the end of memory lies.
3235 * We need to ensure that all returned chunks from malloc will meet
3236 MALLOC_ALIGNMENT
3238 * If there was an intervening foreign sbrk, we need to adjust sbrk
3239 request size to account for fact that we will not be able to
3240 combine new space with existing space in old_top.
3242 * Almost all systems internally allocate whole pages at a time, in
3243 which case we might as well use the whole last page of request.
3244 So we allocate enough more memory to hit a page boundary now,
3245 which in turn causes future contiguous calls to page-align.
3248 else {
3249 front_misalign = 0;
3250 end_misalign = 0;
3251 correction = 0;
3252 aligned_brk = brk;
3254 /* handle contiguous cases */
3255 if (contiguous(av)) {
3257 /* Count foreign sbrk as system_mem. */
3258 if (old_size)
3259 av->system_mem += brk - old_end;
3261 /* Guarantee alignment of first new chunk made from this space */
3263 front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
3264 if (front_misalign > 0) {
3267 Skip over some bytes to arrive at an aligned position.
3268 We don't need to specially mark these wasted front bytes.
3269 They will never be accessed anyway because
3270 prev_inuse of av->top (and any chunk created from its start)
3271 is always true after initialization.
3274 correction = MALLOC_ALIGNMENT - front_misalign;
3275 aligned_brk += correction;
3279 If this isn't adjacent to existing space, then we will not
3280 be able to merge with old_top space, so must add to 2nd request.
3283 correction += old_size;
3285 /* Extend the end address to hit a page boundary */
3286 end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
3287 correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
3289 assert(correction >= 0);
3290 snd_brk = (char*)(MORECORE(correction));
3293 If can't allocate correction, try to at least find out current
3294 brk. It might be enough to proceed without failing.
3296 Note that if second sbrk did NOT fail, we assume that space
3297 is contiguous with first sbrk. This is a safe assumption unless
3298 program is multithreaded but doesn't use locks and a foreign sbrk
3299 occurred between our first and second calls.
3302 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3303 correction = 0;
3304 snd_brk = (char*)(MORECORE(0));
3305 } else
3306 /* Call the `morecore' hook if necessary. */
3307 if (__builtin_expect (__after_morecore_hook != NULL, 0))
3308 (*__after_morecore_hook) ();
3311 /* handle non-contiguous cases */
3312 else {
3313 /* MORECORE/mmap must correctly align */
3314 assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
3316 /* Find out current end of memory */
3317 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3318 snd_brk = (char*)(MORECORE(0));
3322 /* Adjust top based on results of second sbrk */
3323 if (snd_brk != (char*)(MORECORE_FAILURE)) {
3324 av->top = (mchunkptr)aligned_brk;
3325 set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
3326 av->system_mem += correction;
3329 If not the first time through, we either have a
3330 gap due to foreign sbrk or a non-contiguous region. Insert a
3331 double fencepost at old_top to prevent consolidation with space
3332 we don't own. These fenceposts are artificial chunks that are
3333 marked as inuse and are in any case too small to use. We need
3334 two to make sizes and alignments work out.
3337 if (old_size != 0) {
3339 Shrink old_top to insert fenceposts, keeping size a
3340 multiple of MALLOC_ALIGNMENT. We know there is at least
3341 enough space in old_top to do this.
3343 old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
3344 set_head(old_top, old_size | PREV_INUSE);
3347 Note that the following assignments completely overwrite
3348 old_top when old_size was previously MINSIZE. This is
3349 intentional. We need the fencepost, even if old_top otherwise gets
3350 lost.
3352 chunk_at_offset(old_top, old_size )->size =
3353 (2*SIZE_SZ)|PREV_INUSE;
3355 chunk_at_offset(old_top, old_size + 2*SIZE_SZ)->size =
3356 (2*SIZE_SZ)|PREV_INUSE;
3358 /* If possible, release the rest. */
3359 if (old_size >= MINSIZE) {
3360 #ifdef ATOMIC_FASTBINS
3361 _int_free(av, old_top, 1);
3362 #else
3363 _int_free(av, old_top);
3364 #endif
3371 /* Update statistics */
3372 #ifdef NO_THREADS
3373 sum = av->system_mem + mp_.mmapped_mem;
3374 if (sum > (unsigned long)(mp_.max_total_mem))
3375 mp_.max_total_mem = sum;
3376 #endif
3380 } /* if (av != &main_arena) */
3382 if ((unsigned long)av->system_mem > (unsigned long)(av->max_system_mem))
3383 av->max_system_mem = av->system_mem;
3384 check_malloc_state(av);
3386 /* finally, do the allocation */
3387 p = av->top;
3388 size = chunksize(p);
3390 /* check that one of the above allocation paths succeeded */
3391 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
3392 remainder_size = size - nb;
3393 remainder = chunk_at_offset(p, nb);
3394 av->top = remainder;
3395 set_head(p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
3396 set_head(remainder, remainder_size | PREV_INUSE);
3397 check_malloced_chunk(av, p, nb);
3398 return chunk2mem(p);
3401 /* catch all failure paths */
3402 MALLOC_FAILURE_ACTION;
3403 return 0;
3408 sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
3409 to the system (via negative arguments to sbrk) if there is unused
3410 memory at the `high' end of the malloc pool. It is called
3411 automatically by free() when top space exceeds the trim
3412 threshold. It is also called by the public malloc_trim routine. It
3413 returns 1 if it actually released any memory, else 0.
3416 #if __STD_C
3417 static int sYSTRIm(size_t pad, mstate av)
3418 #else
3419 static int sYSTRIm(pad, av) size_t pad; mstate av;
3420 #endif
3422 long top_size; /* Amount of top-most memory */
3423 long extra; /* Amount to release */
3424 long released; /* Amount actually released */
3425 char* current_brk; /* address returned by pre-check sbrk call */
3426 char* new_brk; /* address returned by post-check sbrk call */
3427 size_t pagesz;
3429 pagesz = mp_.pagesize;
3430 top_size = chunksize(av->top);
3432 /* Release in pagesize units, keeping at least one page */
3433 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
3435 if (extra > 0) {
3438 Only proceed if end of memory is where we last set it.
3439 This avoids problems if there were foreign sbrk calls.
3441 current_brk = (char*)(MORECORE(0));
3442 if (current_brk == (char*)(av->top) + top_size) {
3445 Attempt to release memory. We ignore MORECORE return value,
3446 and instead call again to find out where new end of memory is.
3447 This avoids problems if first call releases less than we asked,
3448 of if failure somehow altered brk value. (We could still
3449 encounter problems if it altered brk in some very bad way,
3450 but the only thing we can do is adjust anyway, which will cause
3451 some downstream failure.)
3454 MORECORE(-extra);
3455 /* Call the `morecore' hook if necessary. */
3456 if (__builtin_expect (__after_morecore_hook != NULL, 0))
3457 (*__after_morecore_hook) ();
3458 new_brk = (char*)(MORECORE(0));
3460 if (new_brk != (char*)MORECORE_FAILURE) {
3461 released = (long)(current_brk - new_brk);
3463 if (released != 0) {
3464 /* Success. Adjust top. */
3465 av->system_mem -= released;
3466 set_head(av->top, (top_size - released) | PREV_INUSE);
3467 check_malloc_state(av);
3468 return 1;
3473 return 0;
3476 #ifdef HAVE_MMAP
3478 static void
3479 internal_function
3480 #if __STD_C
3481 munmap_chunk(mchunkptr p)
3482 #else
3483 munmap_chunk(p) mchunkptr p;
3484 #endif
3486 INTERNAL_SIZE_T size = chunksize(p);
3488 assert (chunk_is_mmapped(p));
3489 #if 0
3490 assert(! ((char*)p >= mp_.sbrk_base && (char*)p < mp_.sbrk_base + mp_.sbrked_mem));
3491 assert((mp_.n_mmaps > 0));
3492 #endif
3494 uintptr_t block = (uintptr_t) p - p->prev_size;
3495 size_t total_size = p->prev_size + size;
3496 /* Unfortunately we have to do the compilers job by hand here. Normally
3497 we would test BLOCK and TOTAL-SIZE separately for compliance with the
3498 page size. But gcc does not recognize the optimization possibility
3499 (in the moment at least) so we combine the two values into one before
3500 the bit test. */
3501 if (__builtin_expect (((block | total_size) & (mp_.pagesize - 1)) != 0, 0))
3503 malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
3504 chunk2mem (p));
3505 return;
3508 mp_.n_mmaps--;
3509 mp_.mmapped_mem -= total_size;
3511 int ret __attribute__ ((unused)) = munmap((char *)block, total_size);
3513 /* munmap returns non-zero on failure */
3514 assert(ret == 0);
3517 #if HAVE_MREMAP
3519 static mchunkptr
3520 internal_function
3521 #if __STD_C
3522 mremap_chunk(mchunkptr p, size_t new_size)
3523 #else
3524 mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
3525 #endif
3527 size_t page_mask = mp_.pagesize - 1;
3528 INTERNAL_SIZE_T offset = p->prev_size;
3529 INTERNAL_SIZE_T size = chunksize(p);
3530 char *cp;
3532 assert (chunk_is_mmapped(p));
3533 #if 0
3534 assert(! ((char*)p >= mp_.sbrk_base && (char*)p < mp_.sbrk_base + mp_.sbrked_mem));
3535 assert((mp_.n_mmaps > 0));
3536 #endif
3537 assert(((size + offset) & (mp_.pagesize-1)) == 0);
3539 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
3540 new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
3542 /* No need to remap if the number of pages does not change. */
3543 if (size + offset == new_size)
3544 return p;
3546 cp = (char *)mremap((char *)p - offset, size + offset, new_size,
3547 MREMAP_MAYMOVE);
3549 if (cp == MAP_FAILED) return 0;
3551 p = (mchunkptr)(cp + offset);
3553 assert(aligned_OK(chunk2mem(p)));
3555 assert((p->prev_size == offset));
3556 set_head(p, (new_size - offset)|IS_MMAPPED);
3558 mp_.mmapped_mem -= size + offset;
3559 mp_.mmapped_mem += new_size;
3560 if ((unsigned long)mp_.mmapped_mem > (unsigned long)mp_.max_mmapped_mem)
3561 mp_.max_mmapped_mem = mp_.mmapped_mem;
3562 #ifdef NO_THREADS
3563 if ((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) >
3564 mp_.max_total_mem)
3565 mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem;
3566 #endif
3567 return p;
3570 #endif /* HAVE_MREMAP */
3572 #endif /* HAVE_MMAP */
3574 /*------------------------ Public wrappers. --------------------------------*/
3576 Void_t*
3577 public_mALLOc(size_t bytes)
3579 mstate ar_ptr;
3580 Void_t *victim;
3582 __malloc_ptr_t (*hook) (size_t, __const __malloc_ptr_t) = __malloc_hook;
3583 if (__builtin_expect (hook != NULL, 0))
3584 return (*hook)(bytes, RETURN_ADDRESS (0));
3586 arena_lookup(ar_ptr);
3587 #if 0
3588 // XXX We need double-word CAS and fastbins must be extended to also
3589 // XXX hold a generation counter for each entry.
3590 if (ar_ptr) {
3591 INTERNAL_SIZE_T nb; /* normalized request size */
3592 checked_request2size(bytes, nb);
3593 if (nb <= get_max_fast ()) {
3594 long int idx = fastbin_index(nb);
3595 mfastbinptr* fb = &fastbin (ar_ptr, idx);
3596 mchunkptr pp = *fb;
3597 mchunkptr v;
3600 v = pp;
3601 if (v == NULL)
3602 break;
3604 while ((pp = catomic_compare_and_exchange_val_acq (fb, v->fd, v)) != v);
3605 if (v != 0) {
3606 if (__builtin_expect (fastbin_index (chunksize (v)) != idx, 0))
3607 malloc_printerr (check_action, "malloc(): memory corruption (fast)",
3608 chunk2mem (v));
3609 check_remalloced_chunk(ar_ptr, v, nb);
3610 void *p = chunk2mem(v);
3611 if (__builtin_expect (perturb_byte, 0))
3612 alloc_perturb (p, bytes);
3613 return p;
3617 #endif
3619 arena_lock(ar_ptr, bytes);
3620 if(!ar_ptr)
3621 return 0;
3622 victim = _int_malloc(ar_ptr, bytes);
3623 if(!victim) {
3624 /* Maybe the failure is due to running out of mmapped areas. */
3625 if(ar_ptr != &main_arena) {
3626 (void)mutex_unlock(&ar_ptr->mutex);
3627 ar_ptr = &main_arena;
3628 (void)mutex_lock(&ar_ptr->mutex);
3629 victim = _int_malloc(ar_ptr, bytes);
3630 (void)mutex_unlock(&ar_ptr->mutex);
3631 } else {
3632 #if USE_ARENAS
3633 /* ... or sbrk() has failed and there is still a chance to mmap() */
3634 ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
3635 (void)mutex_unlock(&main_arena.mutex);
3636 if(ar_ptr) {
3637 victim = _int_malloc(ar_ptr, bytes);
3638 (void)mutex_unlock(&ar_ptr->mutex);
3640 #endif
3642 } else
3643 (void)mutex_unlock(&ar_ptr->mutex);
3644 assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
3645 ar_ptr == arena_for_chunk(mem2chunk(victim)));
3646 return victim;
3648 #ifdef libc_hidden_def
3649 libc_hidden_def(public_mALLOc)
3650 #endif
3652 void
3653 public_fREe(Void_t* mem)
3655 mstate ar_ptr;
3656 mchunkptr p; /* chunk corresponding to mem */
3658 void (*hook) (__malloc_ptr_t, __const __malloc_ptr_t) = __free_hook;
3659 if (__builtin_expect (hook != NULL, 0)) {
3660 (*hook)(mem, RETURN_ADDRESS (0));
3661 return;
3664 if (mem == 0) /* free(0) has no effect */
3665 return;
3667 p = mem2chunk(mem);
3669 #if HAVE_MMAP
3670 if (chunk_is_mmapped(p)) /* release mmapped memory. */
3672 /* see if the dynamic brk/mmap threshold needs adjusting */
3673 if (!mp_.no_dyn_threshold
3674 && p->size > mp_.mmap_threshold
3675 && p->size <= DEFAULT_MMAP_THRESHOLD_MAX)
3677 mp_.mmap_threshold = chunksize (p);
3678 mp_.trim_threshold = 2 * mp_.mmap_threshold;
3680 munmap_chunk(p);
3681 return;
3683 #endif
3685 ar_ptr = arena_for_chunk(p);
3686 #ifdef ATOMIC_FASTBINS
3687 _int_free(ar_ptr, p, 0);
3688 #else
3689 # if THREAD_STATS
3690 if(!mutex_trylock(&ar_ptr->mutex))
3691 ++(ar_ptr->stat_lock_direct);
3692 else {
3693 (void)mutex_lock(&ar_ptr->mutex);
3694 ++(ar_ptr->stat_lock_wait);
3696 # else
3697 (void)mutex_lock(&ar_ptr->mutex);
3698 # endif
3699 _int_free(ar_ptr, p);
3700 (void)mutex_unlock(&ar_ptr->mutex);
3701 #endif
3703 #ifdef libc_hidden_def
3704 libc_hidden_def (public_fREe)
3705 #endif
3707 Void_t*
3708 public_rEALLOc(Void_t* oldmem, size_t bytes)
3710 mstate ar_ptr;
3711 INTERNAL_SIZE_T nb; /* padded request size */
3713 Void_t* newp; /* chunk to return */
3715 __malloc_ptr_t (*hook) (__malloc_ptr_t, size_t, __const __malloc_ptr_t) =
3716 __realloc_hook;
3717 if (__builtin_expect (hook != NULL, 0))
3718 return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
3720 #if REALLOC_ZERO_BYTES_FREES
3721 if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; }
3722 #endif
3724 /* realloc of null is supposed to be same as malloc */
3725 if (oldmem == 0) return public_mALLOc(bytes);
3727 /* chunk corresponding to oldmem */
3728 const mchunkptr oldp = mem2chunk(oldmem);
3729 /* its size */
3730 const INTERNAL_SIZE_T oldsize = chunksize(oldp);
3732 /* Little security check which won't hurt performance: the
3733 allocator never wrapps around at the end of the address space.
3734 Therefore we can exclude some size values which might appear
3735 here by accident or by "design" from some intruder. */
3736 if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
3737 || __builtin_expect (misaligned_chunk (oldp), 0))
3739 malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
3740 return NULL;
3743 checked_request2size(bytes, nb);
3745 #if HAVE_MMAP
3746 if (chunk_is_mmapped(oldp))
3748 Void_t* newmem;
3750 #if HAVE_MREMAP
3751 newp = mremap_chunk(oldp, nb);
3752 if(newp) return chunk2mem(newp);
3753 #endif
3754 /* Note the extra SIZE_SZ overhead. */
3755 if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
3756 /* Must alloc, copy, free. */
3757 newmem = public_mALLOc(bytes);
3758 if (newmem == 0) return 0; /* propagate failure */
3759 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
3760 munmap_chunk(oldp);
3761 return newmem;
3763 #endif
3765 ar_ptr = arena_for_chunk(oldp);
3766 #if THREAD_STATS
3767 if(!mutex_trylock(&ar_ptr->mutex))
3768 ++(ar_ptr->stat_lock_direct);
3769 else {
3770 (void)mutex_lock(&ar_ptr->mutex);
3771 ++(ar_ptr->stat_lock_wait);
3773 #else
3774 (void)mutex_lock(&ar_ptr->mutex);
3775 #endif
3777 #if !defined NO_THREADS && !defined PER_THREAD
3778 /* As in malloc(), remember this arena for the next allocation. */
3779 tsd_setspecific(arena_key, (Void_t *)ar_ptr);
3780 #endif
3782 newp = _int_realloc(ar_ptr, oldp, oldsize, nb);
3784 (void)mutex_unlock(&ar_ptr->mutex);
3785 assert(!newp || chunk_is_mmapped(mem2chunk(newp)) ||
3786 ar_ptr == arena_for_chunk(mem2chunk(newp)));
3788 if (newp == NULL)
3790 /* Try harder to allocate memory in other arenas. */
3791 newp = public_mALLOc(bytes);
3792 if (newp != NULL)
3794 MALLOC_COPY (newp, oldmem, oldsize - SIZE_SZ);
3795 #ifdef ATOMIC_FASTBINS
3796 _int_free(ar_ptr, oldp, 0);
3797 #else
3798 # if THREAD_STATS
3799 if(!mutex_trylock(&ar_ptr->mutex))
3800 ++(ar_ptr->stat_lock_direct);
3801 else {
3802 (void)mutex_lock(&ar_ptr->mutex);
3803 ++(ar_ptr->stat_lock_wait);
3805 # else
3806 (void)mutex_lock(&ar_ptr->mutex);
3807 # endif
3808 _int_free(ar_ptr, oldp);
3809 (void)mutex_unlock(&ar_ptr->mutex);
3810 #endif
3814 return newp;
3816 #ifdef libc_hidden_def
3817 libc_hidden_def (public_rEALLOc)
3818 #endif
3820 Void_t*
3821 public_mEMALIGn(size_t alignment, size_t bytes)
3823 mstate ar_ptr;
3824 Void_t *p;
3826 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3827 __const __malloc_ptr_t)) =
3828 __memalign_hook;
3829 if (__builtin_expect (hook != NULL, 0))
3830 return (*hook)(alignment, bytes, RETURN_ADDRESS (0));
3832 /* If need less alignment than we give anyway, just relay to malloc */
3833 if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes);
3835 /* Otherwise, ensure that it is at least a minimum chunk size */
3836 if (alignment < MINSIZE) alignment = MINSIZE;
3838 arena_get(ar_ptr, bytes + alignment + MINSIZE);
3839 if(!ar_ptr)
3840 return 0;
3841 p = _int_memalign(ar_ptr, alignment, bytes);
3842 if(!p) {
3843 /* Maybe the failure is due to running out of mmapped areas. */
3844 if(ar_ptr != &main_arena) {
3845 (void)mutex_unlock(&ar_ptr->mutex);
3846 ar_ptr = &main_arena;
3847 (void)mutex_lock(&ar_ptr->mutex);
3848 p = _int_memalign(ar_ptr, alignment, bytes);
3849 (void)mutex_unlock(&ar_ptr->mutex);
3850 } else {
3851 #if USE_ARENAS
3852 /* ... or sbrk() has failed and there is still a chance to mmap() */
3853 mstate prev = ar_ptr->next ? ar_ptr : 0;
3854 (void)mutex_unlock(&ar_ptr->mutex);
3855 ar_ptr = arena_get2(prev, bytes);
3856 if(ar_ptr) {
3857 p = _int_memalign(ar_ptr, alignment, bytes);
3858 (void)mutex_unlock(&ar_ptr->mutex);
3860 #endif
3862 } else
3863 (void)mutex_unlock(&ar_ptr->mutex);
3864 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
3865 ar_ptr == arena_for_chunk(mem2chunk(p)));
3866 return p;
3868 #ifdef libc_hidden_def
3869 libc_hidden_def (public_mEMALIGn)
3870 #endif
3872 Void_t*
3873 public_vALLOc(size_t bytes)
3875 mstate ar_ptr;
3876 Void_t *p;
3878 if(__malloc_initialized < 0)
3879 ptmalloc_init ();
3881 size_t pagesz = mp_.pagesize;
3883 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3884 __const __malloc_ptr_t)) =
3885 __memalign_hook;
3886 if (__builtin_expect (hook != NULL, 0))
3887 return (*hook)(pagesz, bytes, RETURN_ADDRESS (0));
3889 arena_get(ar_ptr, bytes + pagesz + MINSIZE);
3890 if(!ar_ptr)
3891 return 0;
3892 p = _int_valloc(ar_ptr, bytes);
3893 (void)mutex_unlock(&ar_ptr->mutex);
3894 if(!p) {
3895 /* Maybe the failure is due to running out of mmapped areas. */
3896 if(ar_ptr != &main_arena) {
3897 (void)mutex_lock(&main_arena.mutex);
3898 p = _int_memalign(&main_arena, pagesz, bytes);
3899 (void)mutex_unlock(&main_arena.mutex);
3900 } else {
3901 #if USE_ARENAS
3902 /* ... or sbrk() has failed and there is still a chance to mmap() */
3903 ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
3904 if(ar_ptr) {
3905 p = _int_memalign(ar_ptr, pagesz, bytes);
3906 (void)mutex_unlock(&ar_ptr->mutex);
3908 #endif
3911 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
3912 ar_ptr == arena_for_chunk(mem2chunk(p)));
3914 return p;
3917 Void_t*
3918 public_pVALLOc(size_t bytes)
3920 mstate ar_ptr;
3921 Void_t *p;
3923 if(__malloc_initialized < 0)
3924 ptmalloc_init ();
3926 size_t pagesz = mp_.pagesize;
3927 size_t page_mask = mp_.pagesize - 1;
3928 size_t rounded_bytes = (bytes + page_mask) & ~(page_mask);
3930 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3931 __const __malloc_ptr_t)) =
3932 __memalign_hook;
3933 if (__builtin_expect (hook != NULL, 0))
3934 return (*hook)(pagesz, rounded_bytes, RETURN_ADDRESS (0));
3936 arena_get(ar_ptr, bytes + 2*pagesz + MINSIZE);
3937 p = _int_pvalloc(ar_ptr, bytes);
3938 (void)mutex_unlock(&ar_ptr->mutex);
3939 if(!p) {
3940 /* Maybe the failure is due to running out of mmapped areas. */
3941 if(ar_ptr != &main_arena) {
3942 (void)mutex_lock(&main_arena.mutex);
3943 p = _int_memalign(&main_arena, pagesz, rounded_bytes);
3944 (void)mutex_unlock(&main_arena.mutex);
3945 } else {
3946 #if USE_ARENAS
3947 /* ... or sbrk() has failed and there is still a chance to mmap() */
3948 ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0,
3949 bytes + 2*pagesz + MINSIZE);
3950 if(ar_ptr) {
3951 p = _int_memalign(ar_ptr, pagesz, rounded_bytes);
3952 (void)mutex_unlock(&ar_ptr->mutex);
3954 #endif
3957 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
3958 ar_ptr == arena_for_chunk(mem2chunk(p)));
3960 return p;
3963 Void_t*
3964 public_cALLOc(size_t n, size_t elem_size)
3966 mstate av;
3967 mchunkptr oldtop, p;
3968 INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
3969 Void_t* mem;
3970 unsigned long clearsize;
3971 unsigned long nclears;
3972 INTERNAL_SIZE_T* d;
3973 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, __const __malloc_ptr_t)) =
3974 __malloc_hook;
3976 /* size_t is unsigned so the behavior on overflow is defined. */
3977 bytes = n * elem_size;
3978 #define HALF_INTERNAL_SIZE_T \
3979 (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
3980 if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
3981 if (elem_size != 0 && bytes / elem_size != n) {
3982 MALLOC_FAILURE_ACTION;
3983 return 0;
3987 if (__builtin_expect (hook != NULL, 0)) {
3988 sz = bytes;
3989 mem = (*hook)(sz, RETURN_ADDRESS (0));
3990 if(mem == 0)
3991 return 0;
3992 #ifdef HAVE_MEMCPY
3993 return memset(mem, 0, sz);
3994 #else
3995 while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */
3996 return mem;
3997 #endif
4000 sz = bytes;
4002 arena_get(av, sz);
4003 if(!av)
4004 return 0;
4006 /* Check if we hand out the top chunk, in which case there may be no
4007 need to clear. */
4008 #if MORECORE_CLEARS
4009 oldtop = top(av);
4010 oldtopsize = chunksize(top(av));
4011 #if MORECORE_CLEARS < 2
4012 /* Only newly allocated memory is guaranteed to be cleared. */
4013 if (av == &main_arena &&
4014 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
4015 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
4016 #endif
4017 if (av != &main_arena)
4019 heap_info *heap = heap_for_ptr (oldtop);
4020 if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
4021 oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
4023 #endif
4024 mem = _int_malloc(av, sz);
4026 /* Only clearing follows, so we can unlock early. */
4027 (void)mutex_unlock(&av->mutex);
4029 assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
4030 av == arena_for_chunk(mem2chunk(mem)));
4032 if (mem == 0) {
4033 /* Maybe the failure is due to running out of mmapped areas. */
4034 if(av != &main_arena) {
4035 (void)mutex_lock(&main_arena.mutex);
4036 mem = _int_malloc(&main_arena, sz);
4037 (void)mutex_unlock(&main_arena.mutex);
4038 } else {
4039 #if USE_ARENAS
4040 /* ... or sbrk() has failed and there is still a chance to mmap() */
4041 (void)mutex_lock(&main_arena.mutex);
4042 av = arena_get2(av->next ? av : 0, sz);
4043 (void)mutex_unlock(&main_arena.mutex);
4044 if(av) {
4045 mem = _int_malloc(av, sz);
4046 (void)mutex_unlock(&av->mutex);
4048 #endif
4050 if (mem == 0) return 0;
4052 p = mem2chunk(mem);
4054 /* Two optional cases in which clearing not necessary */
4055 #if HAVE_MMAP
4056 if (chunk_is_mmapped (p))
4058 if (__builtin_expect (perturb_byte, 0))
4059 MALLOC_ZERO (mem, sz);
4060 return mem;
4062 #endif
4064 csz = chunksize(p);
4066 #if MORECORE_CLEARS
4067 if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize)) {
4068 /* clear only the bytes from non-freshly-sbrked memory */
4069 csz = oldtopsize;
4071 #endif
4073 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
4074 contents have an odd number of INTERNAL_SIZE_T-sized words;
4075 minimally 3. */
4076 d = (INTERNAL_SIZE_T*)mem;
4077 clearsize = csz - SIZE_SZ;
4078 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
4079 assert(nclears >= 3);
4081 if (nclears > 9)
4082 MALLOC_ZERO(d, clearsize);
4084 else {
4085 *(d+0) = 0;
4086 *(d+1) = 0;
4087 *(d+2) = 0;
4088 if (nclears > 4) {
4089 *(d+3) = 0;
4090 *(d+4) = 0;
4091 if (nclears > 6) {
4092 *(d+5) = 0;
4093 *(d+6) = 0;
4094 if (nclears > 8) {
4095 *(d+7) = 0;
4096 *(d+8) = 0;
4102 return mem;
4105 #ifndef _LIBC
4107 Void_t**
4108 public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks)
4110 mstate ar_ptr;
4111 Void_t** m;
4113 arena_get(ar_ptr, n*elem_size);
4114 if(!ar_ptr)
4115 return 0;
4117 m = _int_icalloc(ar_ptr, n, elem_size, chunks);
4118 (void)mutex_unlock(&ar_ptr->mutex);
4119 return m;
4122 Void_t**
4123 public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks)
4125 mstate ar_ptr;
4126 Void_t** m;
4128 arena_get(ar_ptr, 0);
4129 if(!ar_ptr)
4130 return 0;
4132 m = _int_icomalloc(ar_ptr, n, sizes, chunks);
4133 (void)mutex_unlock(&ar_ptr->mutex);
4134 return m;
4137 void
4138 public_cFREe(Void_t* m)
4140 public_fREe(m);
4143 #endif /* _LIBC */
4146 public_mTRIm(size_t s)
4148 int result = 0;
4150 if(__malloc_initialized < 0)
4151 ptmalloc_init ();
4153 mstate ar_ptr = &main_arena;
4156 (void) mutex_lock (&ar_ptr->mutex);
4157 result |= mTRIm (ar_ptr, s);
4158 (void) mutex_unlock (&ar_ptr->mutex);
4160 ar_ptr = ar_ptr->next;
4162 while (ar_ptr != &main_arena);
4164 return result;
4167 size_t
4168 public_mUSABLe(Void_t* m)
4170 size_t result;
4172 result = mUSABLe(m);
4173 return result;
4176 void
4177 public_mSTATs()
4179 mSTATs();
4182 struct mallinfo public_mALLINFo()
4184 struct mallinfo m;
4186 if(__malloc_initialized < 0)
4187 ptmalloc_init ();
4188 (void)mutex_lock(&main_arena.mutex);
4189 m = mALLINFo(&main_arena);
4190 (void)mutex_unlock(&main_arena.mutex);
4191 return m;
4195 public_mALLOPt(int p, int v)
4197 int result;
4198 result = mALLOPt(p, v);
4199 return result;
4203 ------------------------------ malloc ------------------------------
4206 static Void_t*
4207 _int_malloc(mstate av, size_t bytes)
4209 INTERNAL_SIZE_T nb; /* normalized request size */
4210 unsigned int idx; /* associated bin index */
4211 mbinptr bin; /* associated bin */
4213 mchunkptr victim; /* inspected/selected chunk */
4214 INTERNAL_SIZE_T size; /* its size */
4215 int victim_index; /* its bin index */
4217 mchunkptr remainder; /* remainder from a split */
4218 unsigned long remainder_size; /* its size */
4220 unsigned int block; /* bit map traverser */
4221 unsigned int bit; /* bit map traverser */
4222 unsigned int map; /* current word of binmap */
4224 mchunkptr fwd; /* misc temp for linking */
4225 mchunkptr bck; /* misc temp for linking */
4228 Convert request size to internal form by adding SIZE_SZ bytes
4229 overhead plus possibly more to obtain necessary alignment and/or
4230 to obtain a size of at least MINSIZE, the smallest allocatable
4231 size. Also, checked_request2size traps (returning 0) request sizes
4232 that are so large that they wrap around zero when padded and
4233 aligned.
4236 checked_request2size(bytes, nb);
4239 If the size qualifies as a fastbin, first check corresponding bin.
4240 This code is safe to execute even if av is not yet initialized, so we
4241 can try it without checking, which saves some time on this fast path.
4244 if ((unsigned long)(nb) <= (unsigned long)(get_max_fast ())) {
4245 idx = fastbin_index(nb);
4246 mfastbinptr* fb = &fastbin (av, idx);
4247 #ifdef ATOMIC_FASTBINS
4248 mchunkptr pp = *fb;
4251 victim = pp;
4252 if (victim == NULL)
4253 break;
4255 while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim))
4256 != victim);
4257 #else
4258 victim = *fb;
4259 #endif
4260 if (victim != 0) {
4261 if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
4262 malloc_printerr (check_action, "malloc(): memory corruption (fast)",
4263 chunk2mem (victim));
4264 #ifndef ATOMIC_FASTBINS
4265 *fb = victim->fd;
4266 #endif
4267 check_remalloced_chunk(av, victim, nb);
4268 void *p = chunk2mem(victim);
4269 if (__builtin_expect (perturb_byte, 0))
4270 alloc_perturb (p, bytes);
4271 return p;
4276 If a small request, check regular bin. Since these "smallbins"
4277 hold one size each, no searching within bins is necessary.
4278 (For a large request, we need to wait until unsorted chunks are
4279 processed to find best fit. But for small ones, fits are exact
4280 anyway, so we can check now, which is faster.)
4283 if (in_smallbin_range(nb)) {
4284 idx = smallbin_index(nb);
4285 bin = bin_at(av,idx);
4287 if ( (victim = last(bin)) != bin) {
4288 if (victim == 0) /* initialization check */
4289 malloc_consolidate(av);
4290 else {
4291 bck = victim->bk;
4292 set_inuse_bit_at_offset(victim, nb);
4293 bin->bk = bck;
4294 bck->fd = bin;
4296 if (av != &main_arena)
4297 victim->size |= NON_MAIN_ARENA;
4298 check_malloced_chunk(av, victim, nb);
4299 void *p = chunk2mem(victim);
4300 if (__builtin_expect (perturb_byte, 0))
4301 alloc_perturb (p, bytes);
4302 return p;
4308 If this is a large request, consolidate fastbins before continuing.
4309 While it might look excessive to kill all fastbins before
4310 even seeing if there is space available, this avoids
4311 fragmentation problems normally associated with fastbins.
4312 Also, in practice, programs tend to have runs of either small or
4313 large requests, but less often mixtures, so consolidation is not
4314 invoked all that often in most programs. And the programs that
4315 it is called frequently in otherwise tend to fragment.
4318 else {
4319 idx = largebin_index(nb);
4320 if (have_fastchunks(av))
4321 malloc_consolidate(av);
4325 Process recently freed or remaindered chunks, taking one only if
4326 it is exact fit, or, if this a small request, the chunk is remainder from
4327 the most recent non-exact fit. Place other traversed chunks in
4328 bins. Note that this step is the only place in any routine where
4329 chunks are placed in bins.
4331 The outer loop here is needed because we might not realize until
4332 near the end of malloc that we should have consolidated, so must
4333 do so and retry. This happens at most once, and only when we would
4334 otherwise need to expand memory to service a "small" request.
4337 for(;;) {
4339 int iters = 0;
4340 while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
4341 bck = victim->bk;
4342 if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)
4343 || __builtin_expect (victim->size > av->system_mem, 0))
4344 malloc_printerr (check_action, "malloc(): memory corruption",
4345 chunk2mem (victim));
4346 size = chunksize(victim);
4349 If a small request, try to use last remainder if it is the
4350 only chunk in unsorted bin. This helps promote locality for
4351 runs of consecutive small requests. This is the only
4352 exception to best-fit, and applies only when there is
4353 no exact fit for a small chunk.
4356 if (in_smallbin_range(nb) &&
4357 bck == unsorted_chunks(av) &&
4358 victim == av->last_remainder &&
4359 (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
4361 /* split and reattach remainder */
4362 remainder_size = size - nb;
4363 remainder = chunk_at_offset(victim, nb);
4364 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4365 av->last_remainder = remainder;
4366 remainder->bk = remainder->fd = unsorted_chunks(av);
4367 if (!in_smallbin_range(remainder_size))
4369 remainder->fd_nextsize = NULL;
4370 remainder->bk_nextsize = NULL;
4373 set_head(victim, nb | PREV_INUSE |
4374 (av != &main_arena ? NON_MAIN_ARENA : 0));
4375 set_head(remainder, remainder_size | PREV_INUSE);
4376 set_foot(remainder, remainder_size);
4378 check_malloced_chunk(av, victim, nb);
4379 void *p = chunk2mem(victim);
4380 if (__builtin_expect (perturb_byte, 0))
4381 alloc_perturb (p, bytes);
4382 return p;
4385 /* remove from unsorted list */
4386 unsorted_chunks(av)->bk = bck;
4387 bck->fd = unsorted_chunks(av);
4389 /* Take now instead of binning if exact fit */
4391 if (size == nb) {
4392 set_inuse_bit_at_offset(victim, size);
4393 if (av != &main_arena)
4394 victim->size |= NON_MAIN_ARENA;
4395 check_malloced_chunk(av, victim, nb);
4396 void *p = chunk2mem(victim);
4397 if (__builtin_expect (perturb_byte, 0))
4398 alloc_perturb (p, bytes);
4399 return p;
4402 /* place chunk in bin */
4404 if (in_smallbin_range(size)) {
4405 victim_index = smallbin_index(size);
4406 bck = bin_at(av, victim_index);
4407 fwd = bck->fd;
4409 else {
4410 victim_index = largebin_index(size);
4411 bck = bin_at(av, victim_index);
4412 fwd = bck->fd;
4414 /* maintain large bins in sorted order */
4415 if (fwd != bck) {
4416 /* Or with inuse bit to speed comparisons */
4417 size |= PREV_INUSE;
4418 /* if smaller than smallest, bypass loop below */
4419 assert((bck->bk->size & NON_MAIN_ARENA) == 0);
4420 if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) {
4421 fwd = bck;
4422 bck = bck->bk;
4424 victim->fd_nextsize = fwd->fd;
4425 victim->bk_nextsize = fwd->fd->bk_nextsize;
4426 fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
4428 else {
4429 assert((fwd->size & NON_MAIN_ARENA) == 0);
4430 while ((unsigned long) size < fwd->size)
4432 fwd = fwd->fd_nextsize;
4433 assert((fwd->size & NON_MAIN_ARENA) == 0);
4436 if ((unsigned long) size == (unsigned long) fwd->size)
4437 /* Always insert in the second position. */
4438 fwd = fwd->fd;
4439 else
4441 victim->fd_nextsize = fwd;
4442 victim->bk_nextsize = fwd->bk_nextsize;
4443 fwd->bk_nextsize = victim;
4444 victim->bk_nextsize->fd_nextsize = victim;
4446 bck = fwd->bk;
4448 } else
4449 victim->fd_nextsize = victim->bk_nextsize = victim;
4452 mark_bin(av, victim_index);
4453 victim->bk = bck;
4454 victim->fd = fwd;
4455 fwd->bk = victim;
4456 bck->fd = victim;
4458 #define MAX_ITERS 10000
4459 if (++iters >= MAX_ITERS)
4460 break;
4464 If a large request, scan through the chunks of current bin in
4465 sorted order to find smallest that fits. Use the skip list for this.
4468 if (!in_smallbin_range(nb)) {
4469 bin = bin_at(av, idx);
4471 /* skip scan if empty or largest chunk is too small */
4472 if ((victim = first(bin)) != bin &&
4473 (unsigned long)(victim->size) >= (unsigned long)(nb)) {
4475 victim = victim->bk_nextsize;
4476 while (((unsigned long)(size = chunksize(victim)) <
4477 (unsigned long)(nb)))
4478 victim = victim->bk_nextsize;
4480 /* Avoid removing the first entry for a size so that the skip
4481 list does not have to be rerouted. */
4482 if (victim != last(bin) && victim->size == victim->fd->size)
4483 victim = victim->fd;
4485 remainder_size = size - nb;
4486 unlink(victim, bck, fwd);
4488 /* Exhaust */
4489 if (remainder_size < MINSIZE) {
4490 set_inuse_bit_at_offset(victim, size);
4491 if (av != &main_arena)
4492 victim->size |= NON_MAIN_ARENA;
4494 /* Split */
4495 else {
4496 remainder = chunk_at_offset(victim, nb);
4497 /* We cannot assume the unsorted list is empty and therefore
4498 have to perform a complete insert here. */
4499 bck = unsorted_chunks(av);
4500 fwd = bck->fd;
4501 remainder->bk = bck;
4502 remainder->fd = fwd;
4503 bck->fd = remainder;
4504 fwd->bk = remainder;
4505 if (!in_smallbin_range(remainder_size))
4507 remainder->fd_nextsize = NULL;
4508 remainder->bk_nextsize = NULL;
4510 set_head(victim, nb | PREV_INUSE |
4511 (av != &main_arena ? NON_MAIN_ARENA : 0));
4512 set_head(remainder, remainder_size | PREV_INUSE);
4513 set_foot(remainder, remainder_size);
4515 check_malloced_chunk(av, victim, nb);
4516 void *p = chunk2mem(victim);
4517 if (__builtin_expect (perturb_byte, 0))
4518 alloc_perturb (p, bytes);
4519 return p;
4524 Search for a chunk by scanning bins, starting with next largest
4525 bin. This search is strictly by best-fit; i.e., the smallest
4526 (with ties going to approximately the least recently used) chunk
4527 that fits is selected.
4529 The bitmap avoids needing to check that most blocks are nonempty.
4530 The particular case of skipping all bins during warm-up phases
4531 when no chunks have been returned yet is faster than it might look.
4534 ++idx;
4535 bin = bin_at(av,idx);
4536 block = idx2block(idx);
4537 map = av->binmap[block];
4538 bit = idx2bit(idx);
4540 for (;;) {
4542 /* Skip rest of block if there are no more set bits in this block. */
4543 if (bit > map || bit == 0) {
4544 do {
4545 if (++block >= BINMAPSIZE) /* out of bins */
4546 goto use_top;
4547 } while ( (map = av->binmap[block]) == 0);
4549 bin = bin_at(av, (block << BINMAPSHIFT));
4550 bit = 1;
4553 /* Advance to bin with set bit. There must be one. */
4554 while ((bit & map) == 0) {
4555 bin = next_bin(bin);
4556 bit <<= 1;
4557 assert(bit != 0);
4560 /* Inspect the bin. It is likely to be non-empty */
4561 victim = last(bin);
4563 /* If a false alarm (empty bin), clear the bit. */
4564 if (victim == bin) {
4565 av->binmap[block] = map &= ~bit; /* Write through */
4566 bin = next_bin(bin);
4567 bit <<= 1;
4570 else {
4571 size = chunksize(victim);
4573 /* We know the first chunk in this bin is big enough to use. */
4574 assert((unsigned long)(size) >= (unsigned long)(nb));
4576 remainder_size = size - nb;
4578 /* unlink */
4579 unlink(victim, bck, fwd);
4581 /* Exhaust */
4582 if (remainder_size < MINSIZE) {
4583 set_inuse_bit_at_offset(victim, size);
4584 if (av != &main_arena)
4585 victim->size |= NON_MAIN_ARENA;
4588 /* Split */
4589 else {
4590 remainder = chunk_at_offset(victim, nb);
4592 /* We cannot assume the unsorted list is empty and therefore
4593 have to perform a complete insert here. */
4594 bck = unsorted_chunks(av);
4595 fwd = bck->fd;
4596 remainder->bk = bck;
4597 remainder->fd = fwd;
4598 bck->fd = remainder;
4599 fwd->bk = remainder;
4601 /* advertise as last remainder */
4602 if (in_smallbin_range(nb))
4603 av->last_remainder = remainder;
4604 if (!in_smallbin_range(remainder_size))
4606 remainder->fd_nextsize = NULL;
4607 remainder->bk_nextsize = NULL;
4609 set_head(victim, nb | PREV_INUSE |
4610 (av != &main_arena ? NON_MAIN_ARENA : 0));
4611 set_head(remainder, remainder_size | PREV_INUSE);
4612 set_foot(remainder, remainder_size);
4614 check_malloced_chunk(av, victim, nb);
4615 void *p = chunk2mem(victim);
4616 if (__builtin_expect (perturb_byte, 0))
4617 alloc_perturb (p, bytes);
4618 return p;
4622 use_top:
4624 If large enough, split off the chunk bordering the end of memory
4625 (held in av->top). Note that this is in accord with the best-fit
4626 search rule. In effect, av->top is treated as larger (and thus
4627 less well fitting) than any other available chunk since it can
4628 be extended to be as large as necessary (up to system
4629 limitations).
4631 We require that av->top always exists (i.e., has size >=
4632 MINSIZE) after initialization, so if it would otherwise be
4633 exhausted by current request, it is replenished. (The main
4634 reason for ensuring it exists is that we may need MINSIZE space
4635 to put in fenceposts in sysmalloc.)
4638 victim = av->top;
4639 size = chunksize(victim);
4641 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
4642 remainder_size = size - nb;
4643 remainder = chunk_at_offset(victim, nb);
4644 av->top = remainder;
4645 set_head(victim, nb | PREV_INUSE |
4646 (av != &main_arena ? NON_MAIN_ARENA : 0));
4647 set_head(remainder, remainder_size | PREV_INUSE);
4649 check_malloced_chunk(av, victim, nb);
4650 void *p = chunk2mem(victim);
4651 if (__builtin_expect (perturb_byte, 0))
4652 alloc_perturb (p, bytes);
4653 return p;
4656 #ifdef ATOMIC_FASTBINS
4657 /* When we are using atomic ops to free fast chunks we can get
4658 here for all block sizes. */
4659 else if (have_fastchunks(av)) {
4660 malloc_consolidate(av);
4661 /* restore original bin index */
4662 if (in_smallbin_range(nb))
4663 idx = smallbin_index(nb);
4664 else
4665 idx = largebin_index(nb);
4667 #else
4669 If there is space available in fastbins, consolidate and retry,
4670 to possibly avoid expanding memory. This can occur only if nb is
4671 in smallbin range so we didn't consolidate upon entry.
4674 else if (have_fastchunks(av)) {
4675 assert(in_smallbin_range(nb));
4676 malloc_consolidate(av);
4677 idx = smallbin_index(nb); /* restore original bin index */
4679 #endif
4682 Otherwise, relay to handle system-dependent cases
4684 else {
4685 void *p = sYSMALLOc(nb, av);
4686 if (p != NULL && __builtin_expect (perturb_byte, 0))
4687 alloc_perturb (p, bytes);
4688 return p;
4694 ------------------------------ free ------------------------------
4697 static void
4698 #ifdef ATOMIC_FASTBINS
4699 _int_free(mstate av, mchunkptr p, int have_lock)
4700 #else
4701 _int_free(mstate av, mchunkptr p)
4702 #endif
4704 INTERNAL_SIZE_T size; /* its size */
4705 mfastbinptr* fb; /* associated fastbin */
4706 mchunkptr nextchunk; /* next contiguous chunk */
4707 INTERNAL_SIZE_T nextsize; /* its size */
4708 int nextinuse; /* true if nextchunk is used */
4709 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
4710 mchunkptr bck; /* misc temp for linking */
4711 mchunkptr fwd; /* misc temp for linking */
4713 const char *errstr = NULL;
4714 #ifdef ATOMIC_FASTBINS
4715 int locked = 0;
4716 #endif
4718 size = chunksize(p);
4720 /* Little security check which won't hurt performance: the
4721 allocator never wrapps around at the end of the address space.
4722 Therefore we can exclude some size values which might appear
4723 here by accident or by "design" from some intruder. */
4724 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
4725 || __builtin_expect (misaligned_chunk (p), 0))
4727 errstr = "free(): invalid pointer";
4728 errout:
4729 #ifdef ATOMIC_FASTBINS
4730 if (! have_lock && locked)
4731 (void)mutex_unlock(&av->mutex);
4732 #endif
4733 malloc_printerr (check_action, errstr, chunk2mem(p));
4734 return;
4736 /* We know that each chunk is at least MINSIZE bytes in size. */
4737 if (__builtin_expect (size < MINSIZE, 0))
4739 errstr = "free(): invalid size";
4740 goto errout;
4743 check_inuse_chunk(av, p);
4746 If eligible, place chunk on a fastbin so it can be found
4747 and used quickly in malloc.
4750 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
4752 #if TRIM_FASTBINS
4754 If TRIM_FASTBINS set, don't place chunks
4755 bordering top into fastbins
4757 && (chunk_at_offset(p, size) != av->top)
4758 #endif
4761 if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
4762 || __builtin_expect (chunksize (chunk_at_offset (p, size))
4763 >= av->system_mem, 0))
4765 errstr = "free(): invalid next size (fast)";
4766 goto errout;
4769 if (__builtin_expect (perturb_byte, 0))
4770 free_perturb (chunk2mem(p), size - SIZE_SZ);
4772 set_fastchunks(av);
4773 fb = &fastbin (av, fastbin_index(size));
4775 #ifdef ATOMIC_FASTBINS
4776 mchunkptr fd;
4777 mchunkptr old = *fb;
4780 /* Another simple check: make sure the top of the bin is not the
4781 record we are going to add (i.e., double free). */
4782 if (__builtin_expect (old == p, 0))
4784 errstr = "double free or corruption (fasttop)";
4785 goto errout;
4787 p->fd = fd = old;
4789 while ((old = catomic_compare_and_exchange_val_acq (fb, p, fd)) != fd);
4790 #else
4791 /* Another simple check: make sure the top of the bin is not the
4792 record we are going to add (i.e., double free). */
4793 if (__builtin_expect (*fb == p, 0))
4795 errstr = "double free or corruption (fasttop)";
4796 goto errout;
4799 p->fd = *fb;
4800 *fb = p;
4801 #endif
4805 Consolidate other non-mmapped chunks as they arrive.
4808 else if (!chunk_is_mmapped(p)) {
4809 #ifdef ATOMIC_FASTBINS
4810 if (! have_lock) {
4811 # if THREAD_STATS
4812 if(!mutex_trylock(&av->mutex))
4813 ++(av->stat_lock_direct);
4814 else {
4815 (void)mutex_lock(&av->mutex);
4816 ++(av->stat_lock_wait);
4818 # else
4819 (void)mutex_lock(&av->mutex);
4820 # endif
4821 locked = 1;
4823 #endif
4825 nextchunk = chunk_at_offset(p, size);
4827 /* Lightweight tests: check whether the block is already the
4828 top block. */
4829 if (__builtin_expect (p == av->top, 0))
4831 errstr = "double free or corruption (top)";
4832 goto errout;
4834 /* Or whether the next chunk is beyond the boundaries of the arena. */
4835 if (__builtin_expect (contiguous (av)
4836 && (char *) nextchunk
4837 >= ((char *) av->top + chunksize(av->top)), 0))
4839 errstr = "double free or corruption (out)";
4840 goto errout;
4842 /* Or whether the block is actually not marked used. */
4843 if (__builtin_expect (!prev_inuse(nextchunk), 0))
4845 errstr = "double free or corruption (!prev)";
4846 goto errout;
4849 nextsize = chunksize(nextchunk);
4850 if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
4851 || __builtin_expect (nextsize >= av->system_mem, 0))
4853 errstr = "free(): invalid next size (normal)";
4854 goto errout;
4857 if (__builtin_expect (perturb_byte, 0))
4858 free_perturb (chunk2mem(p), size - SIZE_SZ);
4860 /* consolidate backward */
4861 if (!prev_inuse(p)) {
4862 prevsize = p->prev_size;
4863 size += prevsize;
4864 p = chunk_at_offset(p, -((long) prevsize));
4865 unlink(p, bck, fwd);
4868 if (nextchunk != av->top) {
4869 /* get and clear inuse bit */
4870 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4872 /* consolidate forward */
4873 if (!nextinuse) {
4874 unlink(nextchunk, bck, fwd);
4875 size += nextsize;
4876 } else
4877 clear_inuse_bit_at_offset(nextchunk, 0);
4880 Place the chunk in unsorted chunk list. Chunks are
4881 not placed into regular bins until after they have
4882 been given one chance to be used in malloc.
4885 bck = unsorted_chunks(av);
4886 fwd = bck->fd;
4887 p->fd = fwd;
4888 p->bk = bck;
4889 if (!in_smallbin_range(size))
4891 p->fd_nextsize = NULL;
4892 p->bk_nextsize = NULL;
4894 bck->fd = p;
4895 fwd->bk = p;
4897 set_head(p, size | PREV_INUSE);
4898 set_foot(p, size);
4900 check_free_chunk(av, p);
4904 If the chunk borders the current high end of memory,
4905 consolidate into top
4908 else {
4909 size += nextsize;
4910 set_head(p, size | PREV_INUSE);
4911 av->top = p;
4912 check_chunk(av, p);
4916 If freeing a large space, consolidate possibly-surrounding
4917 chunks. Then, if the total unused topmost memory exceeds trim
4918 threshold, ask malloc_trim to reduce top.
4920 Unless max_fast is 0, we don't know if there are fastbins
4921 bordering top, so we cannot tell for sure whether threshold
4922 has been reached unless fastbins are consolidated. But we
4923 don't want to consolidate on each free. As a compromise,
4924 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4925 is reached.
4928 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
4929 if (have_fastchunks(av))
4930 malloc_consolidate(av);
4932 if (av == &main_arena) {
4933 #ifndef MORECORE_CANNOT_TRIM
4934 if ((unsigned long)(chunksize(av->top)) >=
4935 (unsigned long)(mp_.trim_threshold))
4936 sYSTRIm(mp_.top_pad, av);
4937 #endif
4938 } else {
4939 /* Always try heap_trim(), even if the top chunk is not
4940 large, because the corresponding heap might go away. */
4941 heap_info *heap = heap_for_ptr(top(av));
4943 assert(heap->ar_ptr == av);
4944 heap_trim(heap, mp_.top_pad);
4948 #ifdef ATOMIC_FASTBINS
4949 if (! have_lock) {
4950 assert (locked);
4951 (void)mutex_unlock(&av->mutex);
4953 #endif
4956 If the chunk was allocated via mmap, release via munmap(). Note
4957 that if HAVE_MMAP is false but chunk_is_mmapped is true, then
4958 user must have overwritten memory. There's nothing we can do to
4959 catch this error unless MALLOC_DEBUG is set, in which case
4960 check_inuse_chunk (above) will have triggered error.
4963 else {
4964 #if HAVE_MMAP
4965 munmap_chunk (p);
4966 #endif
4971 ------------------------- malloc_consolidate -------------------------
4973 malloc_consolidate is a specialized version of free() that tears
4974 down chunks held in fastbins. Free itself cannot be used for this
4975 purpose since, among other things, it might place chunks back onto
4976 fastbins. So, instead, we need to use a minor variant of the same
4977 code.
4979 Also, because this routine needs to be called the first time through
4980 malloc anyway, it turns out to be the perfect place to trigger
4981 initialization code.
4984 #if __STD_C
4985 static void malloc_consolidate(mstate av)
4986 #else
4987 static void malloc_consolidate(av) mstate av;
4988 #endif
4990 mfastbinptr* fb; /* current fastbin being consolidated */
4991 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4992 mchunkptr p; /* current chunk being consolidated */
4993 mchunkptr nextp; /* next chunk to consolidate */
4994 mchunkptr unsorted_bin; /* bin header */
4995 mchunkptr first_unsorted; /* chunk to link to */
4997 /* These have same use as in free() */
4998 mchunkptr nextchunk;
4999 INTERNAL_SIZE_T size;
5000 INTERNAL_SIZE_T nextsize;
5001 INTERNAL_SIZE_T prevsize;
5002 int nextinuse;
5003 mchunkptr bck;
5004 mchunkptr fwd;
5007 If max_fast is 0, we know that av hasn't
5008 yet been initialized, in which case do so below
5011 if (get_max_fast () != 0) {
5012 clear_fastchunks(av);
5014 unsorted_bin = unsorted_chunks(av);
5017 Remove each chunk from fast bin and consolidate it, placing it
5018 then in unsorted bin. Among other reasons for doing this,
5019 placing in unsorted bin avoids needing to calculate actual bins
5020 until malloc is sure that chunks aren't immediately going to be
5021 reused anyway.
5024 #if 0
5025 /* It is wrong to limit the fast bins to search using get_max_fast
5026 because, except for the main arena, all the others might have
5027 blocks in the high fast bins. It's not worth it anyway, just
5028 search all bins all the time. */
5029 maxfb = &fastbin (av, fastbin_index(get_max_fast ()));
5030 #else
5031 maxfb = &fastbin (av, NFASTBINS - 1);
5032 #endif
5033 fb = &fastbin (av, 0);
5034 do {
5035 #ifdef ATOMIC_FASTBINS
5036 p = atomic_exchange_acq (fb, 0);
5037 #else
5038 p = *fb;
5039 #endif
5040 if (p != 0) {
5041 #ifndef ATOMIC_FASTBINS
5042 *fb = 0;
5043 #endif
5044 do {
5045 check_inuse_chunk(av, p);
5046 nextp = p->fd;
5048 /* Slightly streamlined version of consolidation code in free() */
5049 size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
5050 nextchunk = chunk_at_offset(p, size);
5051 nextsize = chunksize(nextchunk);
5053 if (!prev_inuse(p)) {
5054 prevsize = p->prev_size;
5055 size += prevsize;
5056 p = chunk_at_offset(p, -((long) prevsize));
5057 unlink(p, bck, fwd);
5060 if (nextchunk != av->top) {
5061 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
5063 if (!nextinuse) {
5064 size += nextsize;
5065 unlink(nextchunk, bck, fwd);
5066 } else
5067 clear_inuse_bit_at_offset(nextchunk, 0);
5069 first_unsorted = unsorted_bin->fd;
5070 unsorted_bin->fd = p;
5071 first_unsorted->bk = p;
5073 if (!in_smallbin_range (size)) {
5074 p->fd_nextsize = NULL;
5075 p->bk_nextsize = NULL;
5078 set_head(p, size | PREV_INUSE);
5079 p->bk = unsorted_bin;
5080 p->fd = first_unsorted;
5081 set_foot(p, size);
5084 else {
5085 size += nextsize;
5086 set_head(p, size | PREV_INUSE);
5087 av->top = p;
5090 } while ( (p = nextp) != 0);
5093 } while (fb++ != maxfb);
5095 else {
5096 malloc_init_state(av);
5097 check_malloc_state(av);
5102 ------------------------------ realloc ------------------------------
5105 Void_t*
5106 _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
5107 INTERNAL_SIZE_T nb)
5109 mchunkptr newp; /* chunk to return */
5110 INTERNAL_SIZE_T newsize; /* its size */
5111 Void_t* newmem; /* corresponding user mem */
5113 mchunkptr next; /* next contiguous chunk after oldp */
5115 mchunkptr remainder; /* extra space at end of newp */
5116 unsigned long remainder_size; /* its size */
5118 mchunkptr bck; /* misc temp for linking */
5119 mchunkptr fwd; /* misc temp for linking */
5121 unsigned long copysize; /* bytes to copy */
5122 unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
5123 INTERNAL_SIZE_T* s; /* copy source */
5124 INTERNAL_SIZE_T* d; /* copy destination */
5126 const char *errstr = NULL;
5128 /* oldmem size */
5129 if (__builtin_expect (oldp->size <= 2 * SIZE_SZ, 0)
5130 || __builtin_expect (oldsize >= av->system_mem, 0))
5132 errstr = "realloc(): invalid old size";
5133 errout:
5134 malloc_printerr (check_action, errstr, chunk2mem(oldp));
5135 return NULL;
5138 check_inuse_chunk(av, oldp);
5140 /* All callers already filter out mmap'ed chunks. */
5141 #if 0
5142 if (!chunk_is_mmapped(oldp))
5143 #else
5144 assert (!chunk_is_mmapped(oldp));
5145 #endif
5148 next = chunk_at_offset(oldp, oldsize);
5149 INTERNAL_SIZE_T nextsize = chunksize(next);
5150 if (__builtin_expect (next->size <= 2 * SIZE_SZ, 0)
5151 || __builtin_expect (nextsize >= av->system_mem, 0))
5153 errstr = "realloc(): invalid next size";
5154 goto errout;
5157 if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
5158 /* already big enough; split below */
5159 newp = oldp;
5160 newsize = oldsize;
5163 else {
5164 /* Try to expand forward into top */
5165 if (next == av->top &&
5166 (unsigned long)(newsize = oldsize + nextsize) >=
5167 (unsigned long)(nb + MINSIZE)) {
5168 set_head_size(oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
5169 av->top = chunk_at_offset(oldp, nb);
5170 set_head(av->top, (newsize - nb) | PREV_INUSE);
5171 check_inuse_chunk(av, oldp);
5172 return chunk2mem(oldp);
5175 /* Try to expand forward into next chunk; split off remainder below */
5176 else if (next != av->top &&
5177 !inuse(next) &&
5178 (unsigned long)(newsize = oldsize + nextsize) >=
5179 (unsigned long)(nb)) {
5180 newp = oldp;
5181 unlink(next, bck, fwd);
5184 /* allocate, copy, free */
5185 else {
5186 newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
5187 if (newmem == 0)
5188 return 0; /* propagate failure */
5190 newp = mem2chunk(newmem);
5191 newsize = chunksize(newp);
5194 Avoid copy if newp is next chunk after oldp.
5196 if (newp == next) {
5197 newsize += oldsize;
5198 newp = oldp;
5200 else {
5202 Unroll copy of <= 36 bytes (72 if 8byte sizes)
5203 We know that contents have an odd number of
5204 INTERNAL_SIZE_T-sized words; minimally 3.
5207 copysize = oldsize - SIZE_SZ;
5208 s = (INTERNAL_SIZE_T*)(chunk2mem(oldp));
5209 d = (INTERNAL_SIZE_T*)(newmem);
5210 ncopies = copysize / sizeof(INTERNAL_SIZE_T);
5211 assert(ncopies >= 3);
5213 if (ncopies > 9)
5214 MALLOC_COPY(d, s, copysize);
5216 else {
5217 *(d+0) = *(s+0);
5218 *(d+1) = *(s+1);
5219 *(d+2) = *(s+2);
5220 if (ncopies > 4) {
5221 *(d+3) = *(s+3);
5222 *(d+4) = *(s+4);
5223 if (ncopies > 6) {
5224 *(d+5) = *(s+5);
5225 *(d+6) = *(s+6);
5226 if (ncopies > 8) {
5227 *(d+7) = *(s+7);
5228 *(d+8) = *(s+8);
5234 #ifdef ATOMIC_FASTBINS
5235 _int_free(av, oldp, 1);
5236 #else
5237 _int_free(av, oldp);
5238 #endif
5239 check_inuse_chunk(av, newp);
5240 return chunk2mem(newp);
5245 /* If possible, free extra space in old or extended chunk */
5247 assert((unsigned long)(newsize) >= (unsigned long)(nb));
5249 remainder_size = newsize - nb;
5251 if (remainder_size < MINSIZE) { /* not enough extra to split off */
5252 set_head_size(newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
5253 set_inuse_bit_at_offset(newp, newsize);
5255 else { /* split remainder */
5256 remainder = chunk_at_offset(newp, nb);
5257 set_head_size(newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
5258 set_head(remainder, remainder_size | PREV_INUSE |
5259 (av != &main_arena ? NON_MAIN_ARENA : 0));
5260 /* Mark remainder as inuse so free() won't complain */
5261 set_inuse_bit_at_offset(remainder, remainder_size);
5262 #ifdef ATOMIC_FASTBINS
5263 _int_free(av, remainder, 1);
5264 #else
5265 _int_free(av, remainder);
5266 #endif
5269 check_inuse_chunk(av, newp);
5270 return chunk2mem(newp);
5273 #if 0
5275 Handle mmap cases
5278 else {
5279 #if HAVE_MMAP
5281 #if HAVE_MREMAP
5282 INTERNAL_SIZE_T offset = oldp->prev_size;
5283 size_t pagemask = mp_.pagesize - 1;
5284 char *cp;
5285 unsigned long sum;
5287 /* Note the extra SIZE_SZ overhead */
5288 newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;
5290 /* don't need to remap if still within same page */
5291 if (oldsize == newsize - offset)
5292 return chunk2mem(oldp);
5294 cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
5296 if (cp != MAP_FAILED) {
5298 newp = (mchunkptr)(cp + offset);
5299 set_head(newp, (newsize - offset)|IS_MMAPPED);
5301 assert(aligned_OK(chunk2mem(newp)));
5302 assert((newp->prev_size == offset));
5304 /* update statistics */
5305 sum = mp_.mmapped_mem += newsize - oldsize;
5306 if (sum > (unsigned long)(mp_.max_mmapped_mem))
5307 mp_.max_mmapped_mem = sum;
5308 #ifdef NO_THREADS
5309 sum += main_arena.system_mem;
5310 if (sum > (unsigned long)(mp_.max_total_mem))
5311 mp_.max_total_mem = sum;
5312 #endif
5314 return chunk2mem(newp);
5316 #endif
5318 /* Note the extra SIZE_SZ overhead. */
5319 if ((unsigned long)(oldsize) >= (unsigned long)(nb + SIZE_SZ))
5320 newmem = chunk2mem(oldp); /* do nothing */
5321 else {
5322 /* Must alloc, copy, free. */
5323 newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
5324 if (newmem != 0) {
5325 MALLOC_COPY(newmem, chunk2mem(oldp), oldsize - 2*SIZE_SZ);
5326 #ifdef ATOMIC_FASTBINS
5327 _int_free(av, oldp, 1);
5328 #else
5329 _int_free(av, oldp);
5330 #endif
5333 return newmem;
5335 #else
5336 /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
5337 check_malloc_state(av);
5338 MALLOC_FAILURE_ACTION;
5339 return 0;
5340 #endif
5342 #endif
5346 ------------------------------ memalign ------------------------------
5349 static Void_t*
5350 _int_memalign(mstate av, size_t alignment, size_t bytes)
5352 INTERNAL_SIZE_T nb; /* padded request size */
5353 char* m; /* memory returned by malloc call */
5354 mchunkptr p; /* corresponding chunk */
5355 char* brk; /* alignment point within p */
5356 mchunkptr newp; /* chunk to return */
5357 INTERNAL_SIZE_T newsize; /* its size */
5358 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
5359 mchunkptr remainder; /* spare room at end to split off */
5360 unsigned long remainder_size; /* its size */
5361 INTERNAL_SIZE_T size;
5363 /* If need less alignment than we give anyway, just relay to malloc */
5365 if (alignment <= MALLOC_ALIGNMENT) return _int_malloc(av, bytes);
5367 /* Otherwise, ensure that it is at least a minimum chunk size */
5369 if (alignment < MINSIZE) alignment = MINSIZE;
5371 /* Make sure alignment is power of 2 (in case MINSIZE is not). */
5372 if ((alignment & (alignment - 1)) != 0) {
5373 size_t a = MALLOC_ALIGNMENT * 2;
5374 while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
5375 alignment = a;
5378 checked_request2size(bytes, nb);
5381 Strategy: find a spot within that chunk that meets the alignment
5382 request, and then possibly free the leading and trailing space.
5386 /* Call malloc with worst case padding to hit alignment. */
5388 m = (char*)(_int_malloc(av, nb + alignment + MINSIZE));
5390 if (m == 0) return 0; /* propagate failure */
5392 p = mem2chunk(m);
5394 if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */
5397 Find an aligned spot inside chunk. Since we need to give back
5398 leading space in a chunk of at least MINSIZE, if the first
5399 calculation places us at a spot with less than MINSIZE leader,
5400 we can move to the next aligned spot -- we've allocated enough
5401 total room so that this is always possible.
5404 brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) &
5405 -((signed long) alignment));
5406 if ((unsigned long)(brk - (char*)(p)) < MINSIZE)
5407 brk += alignment;
5409 newp = (mchunkptr)brk;
5410 leadsize = brk - (char*)(p);
5411 newsize = chunksize(p) - leadsize;
5413 /* For mmapped chunks, just adjust offset */
5414 if (chunk_is_mmapped(p)) {
5415 newp->prev_size = p->prev_size + leadsize;
5416 set_head(newp, newsize|IS_MMAPPED);
5417 return chunk2mem(newp);
5420 /* Otherwise, give back leader, use the rest */
5421 set_head(newp, newsize | PREV_INUSE |
5422 (av != &main_arena ? NON_MAIN_ARENA : 0));
5423 set_inuse_bit_at_offset(newp, newsize);
5424 set_head_size(p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
5425 #ifdef ATOMIC_FASTBINS
5426 _int_free(av, p, 1);
5427 #else
5428 _int_free(av, p);
5429 #endif
5430 p = newp;
5432 assert (newsize >= nb &&
5433 (((unsigned long)(chunk2mem(p))) % alignment) == 0);
5436 /* Also give back spare room at the end */
5437 if (!chunk_is_mmapped(p)) {
5438 size = chunksize(p);
5439 if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
5440 remainder_size = size - nb;
5441 remainder = chunk_at_offset(p, nb);
5442 set_head(remainder, remainder_size | PREV_INUSE |
5443 (av != &main_arena ? NON_MAIN_ARENA : 0));
5444 set_head_size(p, nb);
5445 #ifdef ATOMIC_FASTBINS
5446 _int_free(av, remainder, 1);
5447 #else
5448 _int_free(av, remainder);
5449 #endif
5453 check_inuse_chunk(av, p);
5454 return chunk2mem(p);
5457 #if 0
5459 ------------------------------ calloc ------------------------------
5462 #if __STD_C
5463 Void_t* cALLOc(size_t n_elements, size_t elem_size)
5464 #else
5465 Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
5466 #endif
5468 mchunkptr p;
5469 unsigned long clearsize;
5470 unsigned long nclears;
5471 INTERNAL_SIZE_T* d;
5473 Void_t* mem = mALLOc(n_elements * elem_size);
5475 if (mem != 0) {
5476 p = mem2chunk(mem);
5478 #if MMAP_CLEARS
5479 if (!chunk_is_mmapped(p)) /* don't need to clear mmapped space */
5480 #endif
5483 Unroll clear of <= 36 bytes (72 if 8byte sizes)
5484 We know that contents have an odd number of
5485 INTERNAL_SIZE_T-sized words; minimally 3.
5488 d = (INTERNAL_SIZE_T*)mem;
5489 clearsize = chunksize(p) - SIZE_SZ;
5490 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
5491 assert(nclears >= 3);
5493 if (nclears > 9)
5494 MALLOC_ZERO(d, clearsize);
5496 else {
5497 *(d+0) = 0;
5498 *(d+1) = 0;
5499 *(d+2) = 0;
5500 if (nclears > 4) {
5501 *(d+3) = 0;
5502 *(d+4) = 0;
5503 if (nclears > 6) {
5504 *(d+5) = 0;
5505 *(d+6) = 0;
5506 if (nclears > 8) {
5507 *(d+7) = 0;
5508 *(d+8) = 0;
5515 return mem;
5517 #endif /* 0 */
5519 #ifndef _LIBC
5521 ------------------------- independent_calloc -------------------------
5524 Void_t**
5525 #if __STD_C
5526 _int_icalloc(mstate av, size_t n_elements, size_t elem_size, Void_t* chunks[])
5527 #else
5528 _int_icalloc(av, n_elements, elem_size, chunks)
5529 mstate av; size_t n_elements; size_t elem_size; Void_t* chunks[];
5530 #endif
5532 size_t sz = elem_size; /* serves as 1-element array */
5533 /* opts arg of 3 means all elements are same size, and should be cleared */
5534 return iALLOc(av, n_elements, &sz, 3, chunks);
5538 ------------------------- independent_comalloc -------------------------
5541 Void_t**
5542 #if __STD_C
5543 _int_icomalloc(mstate av, size_t n_elements, size_t sizes[], Void_t* chunks[])
5544 #else
5545 _int_icomalloc(av, n_elements, sizes, chunks)
5546 mstate av; size_t n_elements; size_t sizes[]; Void_t* chunks[];
5547 #endif
5549 return iALLOc(av, n_elements, sizes, 0, chunks);
5554 ------------------------------ ialloc ------------------------------
5555 ialloc provides common support for independent_X routines, handling all of
5556 the combinations that can result.
5558 The opts arg has:
5559 bit 0 set if all elements are same size (using sizes[0])
5560 bit 1 set if elements should be zeroed
5564 static Void_t**
5565 #if __STD_C
5566 iALLOc(mstate av, size_t n_elements, size_t* sizes, int opts, Void_t* chunks[])
5567 #else
5568 iALLOc(av, n_elements, sizes, opts, chunks)
5569 mstate av; size_t n_elements; size_t* sizes; int opts; Void_t* chunks[];
5570 #endif
5572 INTERNAL_SIZE_T element_size; /* chunksize of each element, if all same */
5573 INTERNAL_SIZE_T contents_size; /* total size of elements */
5574 INTERNAL_SIZE_T array_size; /* request size of pointer array */
5575 Void_t* mem; /* malloced aggregate space */
5576 mchunkptr p; /* corresponding chunk */
5577 INTERNAL_SIZE_T remainder_size; /* remaining bytes while splitting */
5578 Void_t** marray; /* either "chunks" or malloced ptr array */
5579 mchunkptr array_chunk; /* chunk for malloced ptr array */
5580 int mmx; /* to disable mmap */
5581 INTERNAL_SIZE_T size;
5582 INTERNAL_SIZE_T size_flags;
5583 size_t i;
5585 /* Ensure initialization/consolidation */
5586 if (have_fastchunks(av)) malloc_consolidate(av);
5588 /* compute array length, if needed */
5589 if (chunks != 0) {
5590 if (n_elements == 0)
5591 return chunks; /* nothing to do */
5592 marray = chunks;
5593 array_size = 0;
5595 else {
5596 /* if empty req, must still return chunk representing empty array */
5597 if (n_elements == 0)
5598 return (Void_t**) _int_malloc(av, 0);
5599 marray = 0;
5600 array_size = request2size(n_elements * (sizeof(Void_t*)));
5603 /* compute total element size */
5604 if (opts & 0x1) { /* all-same-size */
5605 element_size = request2size(*sizes);
5606 contents_size = n_elements * element_size;
5608 else { /* add up all the sizes */
5609 element_size = 0;
5610 contents_size = 0;
5611 for (i = 0; i != n_elements; ++i)
5612 contents_size += request2size(sizes[i]);
5615 /* subtract out alignment bytes from total to minimize overallocation */
5616 size = contents_size + array_size - MALLOC_ALIGN_MASK;
5619 Allocate the aggregate chunk.
5620 But first disable mmap so malloc won't use it, since
5621 we would not be able to later free/realloc space internal
5622 to a segregated mmap region.
5624 mmx = mp_.n_mmaps_max; /* disable mmap */
5625 mp_.n_mmaps_max = 0;
5626 mem = _int_malloc(av, size);
5627 mp_.n_mmaps_max = mmx; /* reset mmap */
5628 if (mem == 0)
5629 return 0;
5631 p = mem2chunk(mem);
5632 assert(!chunk_is_mmapped(p));
5633 remainder_size = chunksize(p);
5635 if (opts & 0x2) { /* optionally clear the elements */
5636 MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size);
5639 size_flags = PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0);
5641 /* If not provided, allocate the pointer array as final part of chunk */
5642 if (marray == 0) {
5643 array_chunk = chunk_at_offset(p, contents_size);
5644 marray = (Void_t**) (chunk2mem(array_chunk));
5645 set_head(array_chunk, (remainder_size - contents_size) | size_flags);
5646 remainder_size = contents_size;
5649 /* split out elements */
5650 for (i = 0; ; ++i) {
5651 marray[i] = chunk2mem(p);
5652 if (i != n_elements-1) {
5653 if (element_size != 0)
5654 size = element_size;
5655 else
5656 size = request2size(sizes[i]);
5657 remainder_size -= size;
5658 set_head(p, size | size_flags);
5659 p = chunk_at_offset(p, size);
5661 else { /* the final element absorbs any overallocation slop */
5662 set_head(p, remainder_size | size_flags);
5663 break;
5667 #if MALLOC_DEBUG
5668 if (marray != chunks) {
5669 /* final element must have exactly exhausted chunk */
5670 if (element_size != 0)
5671 assert(remainder_size == element_size);
5672 else
5673 assert(remainder_size == request2size(sizes[i]));
5674 check_inuse_chunk(av, mem2chunk(marray));
5677 for (i = 0; i != n_elements; ++i)
5678 check_inuse_chunk(av, mem2chunk(marray[i]));
5679 #endif
5681 return marray;
5683 #endif /* _LIBC */
5687 ------------------------------ valloc ------------------------------
5690 static Void_t*
5691 #if __STD_C
5692 _int_valloc(mstate av, size_t bytes)
5693 #else
5694 _int_valloc(av, bytes) mstate av; size_t bytes;
5695 #endif
5697 /* Ensure initialization/consolidation */
5698 if (have_fastchunks(av)) malloc_consolidate(av);
5699 return _int_memalign(av, mp_.pagesize, bytes);
5703 ------------------------------ pvalloc ------------------------------
5707 static Void_t*
5708 #if __STD_C
5709 _int_pvalloc(mstate av, size_t bytes)
5710 #else
5711 _int_pvalloc(av, bytes) mstate av, size_t bytes;
5712 #endif
5714 size_t pagesz;
5716 /* Ensure initialization/consolidation */
5717 if (have_fastchunks(av)) malloc_consolidate(av);
5718 pagesz = mp_.pagesize;
5719 return _int_memalign(av, pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
5724 ------------------------------ malloc_trim ------------------------------
5727 #if __STD_C
5728 static int mTRIm(mstate av, size_t pad)
5729 #else
5730 static int mTRIm(av, pad) mstate av; size_t pad;
5731 #endif
5733 /* Ensure initialization/consolidation */
5734 malloc_consolidate (av);
5736 const size_t ps = mp_.pagesize;
5737 int psindex = bin_index (ps);
5738 const size_t psm1 = ps - 1;
5740 int result = 0;
5741 for (int i = 1; i < NBINS; ++i)
5742 if (i == 1 || i >= psindex)
5744 mbinptr bin = bin_at (av, i);
5746 for (mchunkptr p = last (bin); p != bin; p = p->bk)
5748 INTERNAL_SIZE_T size = chunksize (p);
5750 if (size > psm1 + sizeof (struct malloc_chunk))
5752 /* See whether the chunk contains at least one unused page. */
5753 char *paligned_mem = (char *) (((uintptr_t) p
5754 + sizeof (struct malloc_chunk)
5755 + psm1) & ~psm1);
5757 assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem);
5758 assert ((char *) p + size > paligned_mem);
5760 /* This is the size we could potentially free. */
5761 size -= paligned_mem - (char *) p;
5763 if (size > psm1)
5765 #ifdef MALLOC_DEBUG
5766 /* When debugging we simulate destroying the memory
5767 content. */
5768 memset (paligned_mem, 0x89, size & ~psm1);
5769 #endif
5770 madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
5772 result = 1;
5778 #ifndef MORECORE_CANNOT_TRIM
5779 return result | (av == &main_arena ? sYSTRIm (pad, av) : 0);
5780 #else
5781 return result;
5782 #endif
5787 ------------------------- malloc_usable_size -------------------------
5790 #if __STD_C
5791 size_t mUSABLe(Void_t* mem)
5792 #else
5793 size_t mUSABLe(mem) Void_t* mem;
5794 #endif
5796 mchunkptr p;
5797 if (mem != 0) {
5798 p = mem2chunk(mem);
5799 if (chunk_is_mmapped(p))
5800 return chunksize(p) - 2*SIZE_SZ;
5801 else if (inuse(p))
5802 return chunksize(p) - SIZE_SZ;
5804 return 0;
5808 ------------------------------ mallinfo ------------------------------
5811 struct mallinfo mALLINFo(mstate av)
5813 struct mallinfo mi;
5814 size_t i;
5815 mbinptr b;
5816 mchunkptr p;
5817 INTERNAL_SIZE_T avail;
5818 INTERNAL_SIZE_T fastavail;
5819 int nblocks;
5820 int nfastblocks;
5822 /* Ensure initialization */
5823 if (av->top == 0) malloc_consolidate(av);
5825 check_malloc_state(av);
5827 /* Account for top */
5828 avail = chunksize(av->top);
5829 nblocks = 1; /* top always exists */
5831 /* traverse fastbins */
5832 nfastblocks = 0;
5833 fastavail = 0;
5835 for (i = 0; i < NFASTBINS; ++i) {
5836 for (p = fastbin (av, i); p != 0; p = p->fd) {
5837 ++nfastblocks;
5838 fastavail += chunksize(p);
5842 avail += fastavail;
5844 /* traverse regular bins */
5845 for (i = 1; i < NBINS; ++i) {
5846 b = bin_at(av, i);
5847 for (p = last(b); p != b; p = p->bk) {
5848 ++nblocks;
5849 avail += chunksize(p);
5853 mi.smblks = nfastblocks;
5854 mi.ordblks = nblocks;
5855 mi.fordblks = avail;
5856 mi.uordblks = av->system_mem - avail;
5857 mi.arena = av->system_mem;
5858 mi.hblks = mp_.n_mmaps;
5859 mi.hblkhd = mp_.mmapped_mem;
5860 mi.fsmblks = fastavail;
5861 mi.keepcost = chunksize(av->top);
5862 mi.usmblks = mp_.max_total_mem;
5863 return mi;
5867 ------------------------------ malloc_stats ------------------------------
5870 void mSTATs()
5872 int i;
5873 mstate ar_ptr;
5874 struct mallinfo mi;
5875 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
5876 #if THREAD_STATS
5877 long stat_lock_direct = 0, stat_lock_loop = 0, stat_lock_wait = 0;
5878 #endif
5880 if(__malloc_initialized < 0)
5881 ptmalloc_init ();
5882 #ifdef _LIBC
5883 _IO_flockfile (stderr);
5884 int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
5885 ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
5886 #endif
5887 for (i=0, ar_ptr = &main_arena;; i++) {
5888 (void)mutex_lock(&ar_ptr->mutex);
5889 mi = mALLINFo(ar_ptr);
5890 fprintf(stderr, "Arena %d:\n", i);
5891 fprintf(stderr, "system bytes = %10u\n", (unsigned int)mi.arena);
5892 fprintf(stderr, "in use bytes = %10u\n", (unsigned int)mi.uordblks);
5893 #if MALLOC_DEBUG > 1
5894 if (i > 0)
5895 dump_heap(heap_for_ptr(top(ar_ptr)));
5896 #endif
5897 system_b += mi.arena;
5898 in_use_b += mi.uordblks;
5899 #if THREAD_STATS
5900 stat_lock_direct += ar_ptr->stat_lock_direct;
5901 stat_lock_loop += ar_ptr->stat_lock_loop;
5902 stat_lock_wait += ar_ptr->stat_lock_wait;
5903 #endif
5904 (void)mutex_unlock(&ar_ptr->mutex);
5905 ar_ptr = ar_ptr->next;
5906 if(ar_ptr == &main_arena) break;
5908 #if HAVE_MMAP
5909 fprintf(stderr, "Total (incl. mmap):\n");
5910 #else
5911 fprintf(stderr, "Total:\n");
5912 #endif
5913 fprintf(stderr, "system bytes = %10u\n", system_b);
5914 fprintf(stderr, "in use bytes = %10u\n", in_use_b);
5915 #ifdef NO_THREADS
5916 fprintf(stderr, "max system bytes = %10u\n", (unsigned int)mp_.max_total_mem);
5917 #endif
5918 #if HAVE_MMAP
5919 fprintf(stderr, "max mmap regions = %10u\n", (unsigned int)mp_.max_n_mmaps);
5920 fprintf(stderr, "max mmap bytes = %10lu\n",
5921 (unsigned long)mp_.max_mmapped_mem);
5922 #endif
5923 #if THREAD_STATS
5924 fprintf(stderr, "heaps created = %10d\n", stat_n_heaps);
5925 fprintf(stderr, "locked directly = %10ld\n", stat_lock_direct);
5926 fprintf(stderr, "locked in loop = %10ld\n", stat_lock_loop);
5927 fprintf(stderr, "locked waiting = %10ld\n", stat_lock_wait);
5928 fprintf(stderr, "locked total = %10ld\n",
5929 stat_lock_direct + stat_lock_loop + stat_lock_wait);
5930 #endif
5931 #ifdef _LIBC
5932 ((_IO_FILE *) stderr)->_flags2 |= old_flags2;
5933 _IO_funlockfile (stderr);
5934 #endif
5939 ------------------------------ mallopt ------------------------------
5942 #if __STD_C
5943 int mALLOPt(int param_number, int value)
5944 #else
5945 int mALLOPt(param_number, value) int param_number; int value;
5946 #endif
5948 mstate av = &main_arena;
5949 int res = 1;
5951 if(__malloc_initialized < 0)
5952 ptmalloc_init ();
5953 (void)mutex_lock(&av->mutex);
5954 /* Ensure initialization/consolidation */
5955 malloc_consolidate(av);
5957 switch(param_number) {
5958 case M_MXFAST:
5959 if (value >= 0 && value <= MAX_FAST_SIZE) {
5960 set_max_fast(value);
5962 else
5963 res = 0;
5964 break;
5966 case M_TRIM_THRESHOLD:
5967 mp_.trim_threshold = value;
5968 mp_.no_dyn_threshold = 1;
5969 break;
5971 case M_TOP_PAD:
5972 mp_.top_pad = value;
5973 mp_.no_dyn_threshold = 1;
5974 break;
5976 case M_MMAP_THRESHOLD:
5977 #if USE_ARENAS
5978 /* Forbid setting the threshold too high. */
5979 if((unsigned long)value > HEAP_MAX_SIZE/2)
5980 res = 0;
5981 else
5982 #endif
5983 mp_.mmap_threshold = value;
5984 mp_.no_dyn_threshold = 1;
5985 break;
5987 case M_MMAP_MAX:
5988 #if !HAVE_MMAP
5989 if (value != 0)
5990 res = 0;
5991 else
5992 #endif
5993 mp_.n_mmaps_max = value;
5994 mp_.no_dyn_threshold = 1;
5995 break;
5997 case M_CHECK_ACTION:
5998 check_action = value;
5999 break;
6001 case M_PERTURB:
6002 perturb_byte = value;
6003 break;
6005 #ifdef PER_THREAD
6006 case M_ARENA_TEST:
6007 if (value > 0)
6008 mp_.arena_test = value;
6009 break;
6011 case M_ARENA_MAX:
6012 if (value > 0)
6013 mp_.arena_max = value;
6014 break;
6015 #endif
6017 (void)mutex_unlock(&av->mutex);
6018 return res;
6023 -------------------- Alternative MORECORE functions --------------------
6028 General Requirements for MORECORE.
6030 The MORECORE function must have the following properties:
6032 If MORECORE_CONTIGUOUS is false:
6034 * MORECORE must allocate in multiples of pagesize. It will
6035 only be called with arguments that are multiples of pagesize.
6037 * MORECORE(0) must return an address that is at least
6038 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
6040 else (i.e. If MORECORE_CONTIGUOUS is true):
6042 * Consecutive calls to MORECORE with positive arguments
6043 return increasing addresses, indicating that space has been
6044 contiguously extended.
6046 * MORECORE need not allocate in multiples of pagesize.
6047 Calls to MORECORE need not have args of multiples of pagesize.
6049 * MORECORE need not page-align.
6051 In either case:
6053 * MORECORE may allocate more memory than requested. (Or even less,
6054 but this will generally result in a malloc failure.)
6056 * MORECORE must not allocate memory when given argument zero, but
6057 instead return one past the end address of memory from previous
6058 nonzero call. This malloc does NOT call MORECORE(0)
6059 until at least one call with positive arguments is made, so
6060 the initial value returned is not important.
6062 * Even though consecutive calls to MORECORE need not return contiguous
6063 addresses, it must be OK for malloc'ed chunks to span multiple
6064 regions in those cases where they do happen to be contiguous.
6066 * MORECORE need not handle negative arguments -- it may instead
6067 just return MORECORE_FAILURE when given negative arguments.
6068 Negative arguments are always multiples of pagesize. MORECORE
6069 must not misinterpret negative args as large positive unsigned
6070 args. You can suppress all such calls from even occurring by defining
6071 MORECORE_CANNOT_TRIM,
6073 There is some variation across systems about the type of the
6074 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
6075 actually be size_t, because sbrk supports negative args, so it is
6076 normally the signed type of the same width as size_t (sometimes
6077 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
6078 matter though. Internally, we use "long" as arguments, which should
6079 work across all reasonable possibilities.
6081 Additionally, if MORECORE ever returns failure for a positive
6082 request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
6083 system allocator. This is a useful backup strategy for systems with
6084 holes in address spaces -- in this case sbrk cannot contiguously
6085 expand the heap, but mmap may be able to map noncontiguous space.
6087 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
6088 a function that always returns MORECORE_FAILURE.
6090 If you are using this malloc with something other than sbrk (or its
6091 emulation) to supply memory regions, you probably want to set
6092 MORECORE_CONTIGUOUS as false. As an example, here is a custom
6093 allocator kindly contributed for pre-OSX macOS. It uses virtually
6094 but not necessarily physically contiguous non-paged memory (locked
6095 in, present and won't get swapped out). You can use it by
6096 uncommenting this section, adding some #includes, and setting up the
6097 appropriate defines above:
6099 #define MORECORE osMoreCore
6100 #define MORECORE_CONTIGUOUS 0
6102 There is also a shutdown routine that should somehow be called for
6103 cleanup upon program exit.
6105 #define MAX_POOL_ENTRIES 100
6106 #define MINIMUM_MORECORE_SIZE (64 * 1024)
6107 static int next_os_pool;
6108 void *our_os_pools[MAX_POOL_ENTRIES];
6110 void *osMoreCore(int size)
6112 void *ptr = 0;
6113 static void *sbrk_top = 0;
6115 if (size > 0)
6117 if (size < MINIMUM_MORECORE_SIZE)
6118 size = MINIMUM_MORECORE_SIZE;
6119 if (CurrentExecutionLevel() == kTaskLevel)
6120 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
6121 if (ptr == 0)
6123 return (void *) MORECORE_FAILURE;
6125 // save ptrs so they can be freed during cleanup
6126 our_os_pools[next_os_pool] = ptr;
6127 next_os_pool++;
6128 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
6129 sbrk_top = (char *) ptr + size;
6130 return ptr;
6132 else if (size < 0)
6134 // we don't currently support shrink behavior
6135 return (void *) MORECORE_FAILURE;
6137 else
6139 return sbrk_top;
6143 // cleanup any allocated memory pools
6144 // called as last thing before shutting down driver
6146 void osCleanupMem(void)
6148 void **ptr;
6150 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
6151 if (*ptr)
6153 PoolDeallocate(*ptr);
6154 *ptr = 0;
6161 /* Helper code. */
6163 extern char **__libc_argv attribute_hidden;
6165 static void
6166 malloc_printerr(int action, const char *str, void *ptr)
6168 if ((action & 5) == 5)
6169 __libc_message (action & 2, "%s\n", str);
6170 else if (action & 1)
6172 char buf[2 * sizeof (uintptr_t) + 1];
6174 buf[sizeof (buf) - 1] = '\0';
6175 char *cp = _itoa_word ((uintptr_t) ptr, &buf[sizeof (buf) - 1], 16, 0);
6176 while (cp > buf)
6177 *--cp = '0';
6179 __libc_message (action & 2,
6180 "*** glibc detected *** %s: %s: 0x%s ***\n",
6181 __libc_argv[0] ?: "<unknown>", str, cp);
6183 else if (action & 2)
6184 abort ();
6187 #ifdef _LIBC
6188 # include <sys/param.h>
6190 /* We need a wrapper function for one of the additions of POSIX. */
6192 __posix_memalign (void **memptr, size_t alignment, size_t size)
6194 void *mem;
6195 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
6196 __const __malloc_ptr_t)) =
6197 __memalign_hook;
6199 /* Test whether the SIZE argument is valid. It must be a power of
6200 two multiple of sizeof (void *). */
6201 if (alignment % sizeof (void *) != 0
6202 || !powerof2 (alignment / sizeof (void *)) != 0
6203 || alignment == 0)
6204 return EINVAL;
6206 /* Call the hook here, so that caller is posix_memalign's caller
6207 and not posix_memalign itself. */
6208 if (__builtin_expect (hook != NULL, 0))
6209 mem = (*hook)(alignment, size, RETURN_ADDRESS (0));
6210 else
6211 mem = public_mEMALIGn (alignment, size);
6213 if (mem != NULL) {
6214 *memptr = mem;
6215 return 0;
6218 return ENOMEM;
6220 weak_alias (__posix_memalign, posix_memalign)
6224 malloc_info (int options, FILE *fp)
6226 /* For now, at least. */
6227 if (options != 0)
6228 return EINVAL;
6230 int n = 0;
6231 size_t total_nblocks = 0;
6232 size_t total_nfastblocks = 0;
6233 size_t total_avail = 0;
6234 size_t total_fastavail = 0;
6236 void mi_arena (mstate ar_ptr)
6238 fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
6240 size_t nblocks = 0;
6241 size_t nfastblocks = 0;
6242 size_t avail = 0;
6243 size_t fastavail = 0;
6244 struct
6246 size_t from;
6247 size_t to;
6248 size_t total;
6249 size_t count;
6250 } sizes[NFASTBINS + NBINS - 1];
6251 #define nsizes (sizeof (sizes) / sizeof (sizes[0]))
6253 mutex_lock (&ar_ptr->mutex);
6255 for (size_t i = 0; i < NFASTBINS; ++i)
6257 mchunkptr p = fastbin (ar_ptr, i);
6258 if (p != NULL)
6260 size_t nthissize = 0;
6261 size_t thissize = chunksize (p);
6263 while (p != NULL)
6265 ++nthissize;
6266 p = p->fd;
6269 fastavail += nthissize * thissize;
6270 nfastblocks += nthissize;
6271 sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
6272 sizes[i].to = thissize;
6273 sizes[i].count = nthissize;
6275 else
6276 sizes[i].from = sizes[i].to = sizes[i].count = 0;
6278 sizes[i].total = sizes[i].count * sizes[i].to;
6281 mbinptr bin = bin_at (ar_ptr, 1);
6282 struct malloc_chunk *r = bin->fd;
6283 while (r != bin)
6285 ++sizes[NFASTBINS].count;
6286 sizes[NFASTBINS].total += r->size;
6287 sizes[NFASTBINS].from = MIN (sizes[NFASTBINS].from, r->size);
6288 sizes[NFASTBINS].to = MAX (sizes[NFASTBINS].to, r->size);
6289 r = r->fd;
6291 nblocks += sizes[NFASTBINS].count;
6292 avail += sizes[NFASTBINS].total;
6294 for (size_t i = 2; i < NBINS; ++i)
6296 bin = bin_at (ar_ptr, i);
6297 r = bin->fd;
6298 sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
6299 sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
6300 = sizes[NFASTBINS - 1 + i].count = 0;
6302 while (r != bin)
6304 ++sizes[NFASTBINS - 1 + i].count;
6305 sizes[NFASTBINS - 1 + i].total += r->size;
6306 sizes[NFASTBINS - 1 + i].from = MIN (sizes[NFASTBINS - 1 + i].from,
6307 r->size);
6308 sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
6309 r->size);
6311 r = r->fd;
6314 if (sizes[NFASTBINS - 1 + i].count == 0)
6315 sizes[NFASTBINS - 1 + i].from = 0;
6316 nblocks += sizes[NFASTBINS - 1 + i].count;
6317 avail += sizes[NFASTBINS - 1 + i].total;
6320 mutex_unlock (&ar_ptr->mutex);
6322 total_nfastblocks += nfastblocks;
6323 total_fastavail += fastavail;
6325 total_nblocks += nblocks;
6326 total_avail += avail;
6328 for (size_t i = 0; i < nsizes; ++i)
6329 if (sizes[i].count != 0 && i != NFASTBINS)
6330 fprintf (fp, "\
6331 <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
6332 sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
6334 if (sizes[NFASTBINS].count != 0)
6335 fprintf (fp, "\
6336 <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
6337 sizes[NFASTBINS].from, sizes[NFASTBINS].to,
6338 sizes[NFASTBINS].total, sizes[NFASTBINS].count);
6340 fprintf (fp,
6341 "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
6342 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
6343 "</heap>\n",
6344 nfastblocks, fastavail, nblocks, avail);
6347 fputs ("<malloc version=\"1\">\n", fp);
6349 /* Iterate over all arenas currently in use. */
6350 mstate ar_ptr = &main_arena;
6353 mi_arena (ar_ptr);
6354 ar_ptr = ar_ptr->next;
6356 while (ar_ptr != &main_arena);
6358 fprintf (fp,
6359 "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
6360 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
6361 "</malloc>\n",
6362 total_nfastblocks, total_fastavail, total_nblocks, total_avail);
6364 return 0;
6368 strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
6369 strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
6370 strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
6371 strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
6372 strong_alias (__libc_memalign, __memalign)
6373 weak_alias (__libc_memalign, memalign)
6374 strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
6375 strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
6376 strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
6377 strong_alias (__libc_mallinfo, __mallinfo)
6378 weak_alias (__libc_mallinfo, mallinfo)
6379 strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
6381 weak_alias (__malloc_stats, malloc_stats)
6382 weak_alias (__malloc_usable_size, malloc_usable_size)
6383 weak_alias (__malloc_trim, malloc_trim)
6384 weak_alias (__malloc_get_state, malloc_get_state)
6385 weak_alias (__malloc_set_state, malloc_set_state)
6387 #endif /* _LIBC */
6389 /* ------------------------------------------------------------
6390 History:
6392 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
6396 * Local variables:
6397 * c-basic-offset: 2
6398 * End: