* malloc/malloc.c (MALLOC_ALIGNMENT): Revert to (2 * SIZE_SZ) value.
[glibc.git] / malloc / malloc.c
blob5fbd268fed3b752159ef48ec7af5677be8e51177
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 1996-2002,2003,2004,2005,2006 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If not,
19 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
23 This is a version (aka ptmalloc2) of malloc/free/realloc written by
24 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
26 * Version ptmalloc2-20011215
27 based on:
28 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
30 Note: There may be an updated version of this malloc obtainable at
31 http://www.malloc.de/malloc/ptmalloc2.tar.gz
32 Check before installing!
34 * Quickstart
36 In order to compile this implementation, a Makefile is provided with
37 the ptmalloc2 distribution, which has pre-defined targets for some
38 popular systems (e.g. "make posix" for Posix threads). All that is
39 typically required with regard to compiler flags is the selection of
40 the thread package via defining one out of USE_PTHREADS, USE_THR or
41 USE_SPROC. Check the thread-m.h file for what effects this has.
42 Many/most systems will additionally require USE_TSD_DATA_HACK to be
43 defined, so this is the default for "make posix".
45 * Why use this malloc?
47 This is not the fastest, most space-conserving, most portable, or
48 most tunable malloc ever written. However it is among the fastest
49 while also being among the most space-conserving, portable and tunable.
50 Consistent balance across these factors results in a good general-purpose
51 allocator for malloc-intensive programs.
53 The main properties of the algorithms are:
54 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
55 with ties normally decided via FIFO (i.e. least recently used).
56 * For small (<= 64 bytes by default) requests, it is a caching
57 allocator, that maintains pools of quickly recycled chunks.
58 * In between, and for combinations of large and small requests, it does
59 the best it can trying to meet both goals at once.
60 * For very large requests (>= 128KB by default), it relies on system
61 memory mapping facilities, if supported.
63 For a longer but slightly out of date high-level description, see
64 http://gee.cs.oswego.edu/dl/html/malloc.html
66 You may already by default be using a C library containing a malloc
67 that is based on some version of this malloc (for example in
68 linux). You might still want to use the one in this file in order to
69 customize settings or to avoid overheads associated with library
70 versions.
72 * Contents, described in more detail in "description of public routines" below.
74 Standard (ANSI/SVID/...) functions:
75 malloc(size_t n);
76 calloc(size_t n_elements, size_t element_size);
77 free(Void_t* p);
78 realloc(Void_t* p, size_t n);
79 memalign(size_t alignment, size_t n);
80 valloc(size_t n);
81 mallinfo()
82 mallopt(int parameter_number, int parameter_value)
84 Additional functions:
85 independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]);
86 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
87 pvalloc(size_t n);
88 cfree(Void_t* p);
89 malloc_trim(size_t pad);
90 malloc_usable_size(Void_t* p);
91 malloc_stats();
93 * Vital statistics:
95 Supported pointer representation: 4 or 8 bytes
96 Supported size_t representation: 4 or 8 bytes
97 Note that size_t is allowed to be 4 bytes even if pointers are 8.
98 You can adjust this by defining INTERNAL_SIZE_T
100 Alignment: 2 * sizeof(size_t) (default)
101 (i.e., 8 byte alignment with 4byte size_t). This suffices for
102 nearly all current machines and C compilers. However, you can
103 define MALLOC_ALIGNMENT to be wider than this if necessary.
105 Minimum overhead per allocated chunk: 4 or 8 bytes
106 Each malloced chunk has a hidden word of overhead holding size
107 and status information.
109 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
110 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
112 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
113 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
114 needed; 4 (8) for a trailing size field and 8 (16) bytes for
115 free list pointers. Thus, the minimum allocatable size is
116 16/24/32 bytes.
118 Even a request for zero bytes (i.e., malloc(0)) returns a
119 pointer to something of the minimum allocatable size.
121 The maximum overhead wastage (i.e., number of extra bytes
122 allocated than were requested in malloc) is less than or equal
123 to the minimum size, except for requests >= mmap_threshold that
124 are serviced via mmap(), where the worst case wastage is 2 *
125 sizeof(size_t) bytes plus the remainder from a system page (the
126 minimal mmap unit); typically 4096 or 8192 bytes.
128 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
129 8-byte size_t: 2^64 minus about two pages
131 It is assumed that (possibly signed) size_t values suffice to
132 represent chunk sizes. `Possibly signed' is due to the fact
133 that `size_t' may be defined on a system as either a signed or
134 an unsigned type. The ISO C standard says that it must be
135 unsigned, but a few systems are known not to adhere to this.
136 Additionally, even when size_t is unsigned, sbrk (which is by
137 default used to obtain memory from system) accepts signed
138 arguments, and may not be able to handle size_t-wide arguments
139 with negative sign bit. Generally, values that would
140 appear as negative after accounting for overhead and alignment
141 are supported only via mmap(), which does not have this
142 limitation.
144 Requests for sizes outside the allowed range will perform an optional
145 failure action and then return null. (Requests may also
146 also fail because a system is out of memory.)
148 Thread-safety: thread-safe unless NO_THREADS is defined
150 Compliance: I believe it is compliant with the 1997 Single Unix Specification
151 (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably
152 others as well.
154 * Synopsis of compile-time options:
156 People have reported using previous versions of this malloc on all
157 versions of Unix, sometimes by tweaking some of the defines
158 below. It has been tested most extensively on Solaris and
159 Linux. It is also reported to work on WIN32 platforms.
160 People also report using it in stand-alone embedded systems.
162 The implementation is in straight, hand-tuned ANSI C. It is not
163 at all modular. (Sorry!) It uses a lot of macros. To be at all
164 usable, this code should be compiled using an optimizing compiler
165 (for example gcc -O3) that can simplify expressions and control
166 paths. (FAQ: some macros import variables as arguments rather than
167 declare locals because people reported that some debuggers
168 otherwise get confused.)
170 OPTION DEFAULT VALUE
172 Compilation Environment options:
174 __STD_C derived from C compiler defines
175 WIN32 NOT defined
176 HAVE_MEMCPY defined
177 USE_MEMCPY 1 if HAVE_MEMCPY is defined
178 HAVE_MMAP defined as 1
179 MMAP_CLEARS 1
180 HAVE_MREMAP 0 unless linux defined
181 USE_ARENAS the same as HAVE_MMAP
182 malloc_getpagesize derived from system #includes, or 4096 if not
183 HAVE_USR_INCLUDE_MALLOC_H NOT defined
184 LACKS_UNISTD_H NOT defined unless WIN32
185 LACKS_SYS_PARAM_H NOT defined unless WIN32
186 LACKS_SYS_MMAN_H NOT defined unless WIN32
188 Changing default word sizes:
190 INTERNAL_SIZE_T size_t
191 MALLOC_ALIGNMENT MAX (2 * sizeof(INTERNAL_SIZE_T),
192 __alignof__ (long double))
194 Configuration and functionality options:
196 USE_DL_PREFIX NOT defined
197 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
198 USE_MALLOC_LOCK NOT defined
199 MALLOC_DEBUG NOT defined
200 REALLOC_ZERO_BYTES_FREES 1
201 MALLOC_FAILURE_ACTION errno = ENOMEM, if __STD_C defined, else no-op
202 TRIM_FASTBINS 0
204 Options for customizing MORECORE:
206 MORECORE sbrk
207 MORECORE_FAILURE -1
208 MORECORE_CONTIGUOUS 1
209 MORECORE_CANNOT_TRIM NOT defined
210 MORECORE_CLEARS 1
211 MMAP_AS_MORECORE_SIZE (1024 * 1024)
213 Tuning options that are also dynamically changeable via mallopt:
215 DEFAULT_MXFAST 64
216 DEFAULT_TRIM_THRESHOLD 128 * 1024
217 DEFAULT_TOP_PAD 0
218 DEFAULT_MMAP_THRESHOLD 128 * 1024
219 DEFAULT_MMAP_MAX 65536
221 There are several other #defined constants and macros that you
222 probably don't want to touch unless you are extending or adapting malloc. */
225 __STD_C should be nonzero if using ANSI-standard C compiler, a C++
226 compiler, or a C compiler sufficiently close to ANSI to get away
227 with it.
230 #ifndef __STD_C
231 #if defined(__STDC__) || defined(__cplusplus)
232 #define __STD_C 1
233 #else
234 #define __STD_C 0
235 #endif
236 #endif /*__STD_C*/
240 Void_t* is the pointer type that malloc should say it returns
243 #ifndef Void_t
244 #if (__STD_C || defined(WIN32))
245 #define Void_t void
246 #else
247 #define Void_t char
248 #endif
249 #endif /*Void_t*/
251 #if __STD_C
252 #include <stddef.h> /* for size_t */
253 #include <stdlib.h> /* for getenv(), abort() */
254 #else
255 #include <sys/types.h>
256 #endif
258 #include <malloc-machine.h>
260 #ifdef _LIBC
261 #include <stdio-common/_itoa.h>
262 #endif
264 #ifdef __cplusplus
265 extern "C" {
266 #endif
268 /* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */
270 /* #define LACKS_UNISTD_H */
272 #ifndef LACKS_UNISTD_H
273 #include <unistd.h>
274 #endif
276 /* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */
278 /* #define LACKS_SYS_PARAM_H */
281 #include <stdio.h> /* needed for malloc_stats */
282 #include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */
284 /* For uintptr_t. */
285 #include <stdint.h>
287 /* For va_arg, va_start, va_end. */
288 #include <stdarg.h>
290 /* For writev and struct iovec. */
291 #include <sys/uio.h>
292 /* For syslog. */
293 #include <sys/syslog.h>
295 /* For various dynamic linking things. */
296 #include <dlfcn.h>
300 Debugging:
302 Because freed chunks may be overwritten with bookkeeping fields, this
303 malloc will often die when freed memory is overwritten by user
304 programs. This can be very effective (albeit in an annoying way)
305 in helping track down dangling pointers.
307 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
308 enabled that will catch more memory errors. You probably won't be
309 able to make much sense of the actual assertion errors, but they
310 should help you locate incorrectly overwritten memory. The checking
311 is fairly extensive, and will slow down execution
312 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
313 will attempt to check every non-mmapped allocated and free chunk in
314 the course of computing the summmaries. (By nature, mmapped regions
315 cannot be checked very much automatically.)
317 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
318 this code. The assertions in the check routines spell out in more
319 detail the assumptions and invariants underlying the algorithms.
321 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
322 checking that all accesses to malloced memory stay within their
323 bounds. However, there are several add-ons and adaptations of this
324 or other mallocs available that do this.
327 #if MALLOC_DEBUG
328 #include <assert.h>
329 #else
330 #undef assert
331 #define assert(x) ((void)0)
332 #endif
336 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
337 of chunk sizes.
339 The default version is the same as size_t.
341 While not strictly necessary, it is best to define this as an
342 unsigned type, even if size_t is a signed type. This may avoid some
343 artificial size limitations on some systems.
345 On a 64-bit machine, you may be able to reduce malloc overhead by
346 defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
347 expense of not being able to handle more than 2^32 of malloced
348 space. If this limitation is acceptable, you are encouraged to set
349 this unless you are on a platform requiring 16byte alignments. In
350 this case the alignment requirements turn out to negate any
351 potential advantages of decreasing size_t word size.
353 Implementors: Beware of the possible combinations of:
354 - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
355 and might be the same width as int or as long
356 - size_t might have different width and signedness as INTERNAL_SIZE_T
357 - int and long might be 32 or 64 bits, and might be the same width
358 To deal with this, most comparisons and difference computations
359 among INTERNAL_SIZE_Ts should cast them to unsigned long, being
360 aware of the fact that casting an unsigned int to a wider long does
361 not sign-extend. (This also makes checking for negative numbers
362 awkward.) Some of these casts result in harmless compiler warnings
363 on some systems.
366 #ifndef INTERNAL_SIZE_T
367 #define INTERNAL_SIZE_T size_t
368 #endif
370 /* The corresponding word size */
371 #define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
375 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
376 It must be a power of two at least 2 * SIZE_SZ, even on machines
377 for which smaller alignments would suffice. It may be defined as
378 larger than this though. Note however that code and data structures
379 are optimized for the case of 8-byte alignment.
383 #ifndef MALLOC_ALIGNMENT
384 /* XXX This is the correct definition. It differs from 2*SIZE_SZ only on
385 powerpc32. For the time being, changing this is causing more
386 compatibility problems due to malloc_get_state/malloc_set_state than
387 will returning blocks not adequately aligned for long double objects
388 under -mlong-double-128. */
389 #define MALLOC_ALIGNMENT (2 * SIZE_SZ < __alignof__ (long double) \
390 ? __alignof__ (long double) : 2 * SIZE_SZ)
392 #define MALLOC_ALIGNMENT (2 * SIZE_SZ)
393 #endif
395 /* The corresponding bit mask value */
396 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
401 REALLOC_ZERO_BYTES_FREES should be set if a call to
402 realloc with zero bytes should be the same as a call to free.
403 This is required by the C standard. Otherwise, since this malloc
404 returns a unique pointer for malloc(0), so does realloc(p, 0).
407 #ifndef REALLOC_ZERO_BYTES_FREES
408 #define REALLOC_ZERO_BYTES_FREES 1
409 #endif
412 TRIM_FASTBINS controls whether free() of a very small chunk can
413 immediately lead to trimming. Setting to true (1) can reduce memory
414 footprint, but will almost always slow down programs that use a lot
415 of small chunks.
417 Define this only if you are willing to give up some speed to more
418 aggressively reduce system-level memory footprint when releasing
419 memory in programs that use many small chunks. You can get
420 essentially the same effect by setting MXFAST to 0, but this can
421 lead to even greater slowdowns in programs using many small chunks.
422 TRIM_FASTBINS is an in-between compile-time option, that disables
423 only those chunks bordering topmost memory from being placed in
424 fastbins.
427 #ifndef TRIM_FASTBINS
428 #define TRIM_FASTBINS 0
429 #endif
433 USE_DL_PREFIX will prefix all public routines with the string 'dl'.
434 This is necessary when you only want to use this malloc in one part
435 of a program, using your regular system malloc elsewhere.
438 /* #define USE_DL_PREFIX */
442 Two-phase name translation.
443 All of the actual routines are given mangled names.
444 When wrappers are used, they become the public callable versions.
445 When DL_PREFIX is used, the callable names are prefixed.
448 #ifdef USE_DL_PREFIX
449 #define public_cALLOc dlcalloc
450 #define public_fREe dlfree
451 #define public_cFREe dlcfree
452 #define public_mALLOc dlmalloc
453 #define public_mEMALIGn dlmemalign
454 #define public_rEALLOc dlrealloc
455 #define public_vALLOc dlvalloc
456 #define public_pVALLOc dlpvalloc
457 #define public_mALLINFo dlmallinfo
458 #define public_mALLOPt dlmallopt
459 #define public_mTRIm dlmalloc_trim
460 #define public_mSTATs dlmalloc_stats
461 #define public_mUSABLe dlmalloc_usable_size
462 #define public_iCALLOc dlindependent_calloc
463 #define public_iCOMALLOc dlindependent_comalloc
464 #define public_gET_STATe dlget_state
465 #define public_sET_STATe dlset_state
466 #else /* USE_DL_PREFIX */
467 #ifdef _LIBC
469 /* Special defines for the GNU C library. */
470 #define public_cALLOc __libc_calloc
471 #define public_fREe __libc_free
472 #define public_cFREe __libc_cfree
473 #define public_mALLOc __libc_malloc
474 #define public_mEMALIGn __libc_memalign
475 #define public_rEALLOc __libc_realloc
476 #define public_vALLOc __libc_valloc
477 #define public_pVALLOc __libc_pvalloc
478 #define public_mALLINFo __libc_mallinfo
479 #define public_mALLOPt __libc_mallopt
480 #define public_mTRIm __malloc_trim
481 #define public_mSTATs __malloc_stats
482 #define public_mUSABLe __malloc_usable_size
483 #define public_iCALLOc __libc_independent_calloc
484 #define public_iCOMALLOc __libc_independent_comalloc
485 #define public_gET_STATe __malloc_get_state
486 #define public_sET_STATe __malloc_set_state
487 #define malloc_getpagesize __getpagesize()
488 #define open __open
489 #define mmap __mmap
490 #define munmap __munmap
491 #define mremap __mremap
492 #define mprotect __mprotect
493 #define MORECORE (*__morecore)
494 #define MORECORE_FAILURE 0
496 Void_t * __default_morecore (ptrdiff_t);
497 Void_t *(*__morecore)(ptrdiff_t) = __default_morecore;
499 #else /* !_LIBC */
500 #define public_cALLOc calloc
501 #define public_fREe free
502 #define public_cFREe cfree
503 #define public_mALLOc malloc
504 #define public_mEMALIGn memalign
505 #define public_rEALLOc realloc
506 #define public_vALLOc valloc
507 #define public_pVALLOc pvalloc
508 #define public_mALLINFo mallinfo
509 #define public_mALLOPt mallopt
510 #define public_mTRIm malloc_trim
511 #define public_mSTATs malloc_stats
512 #define public_mUSABLe malloc_usable_size
513 #define public_iCALLOc independent_calloc
514 #define public_iCOMALLOc independent_comalloc
515 #define public_gET_STATe malloc_get_state
516 #define public_sET_STATe malloc_set_state
517 #endif /* _LIBC */
518 #endif /* USE_DL_PREFIX */
520 #ifndef _LIBC
521 #define __builtin_expect(expr, val) (expr)
523 #define fwrite(buf, size, count, fp) _IO_fwrite (buf, size, count, fp)
524 #endif
527 HAVE_MEMCPY should be defined if you are not otherwise using
528 ANSI STD C, but still have memcpy and memset in your C library
529 and want to use them in calloc and realloc. Otherwise simple
530 macro versions are defined below.
532 USE_MEMCPY should be defined as 1 if you actually want to
533 have memset and memcpy called. People report that the macro
534 versions are faster than libc versions on some systems.
536 Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
537 (of <= 36 bytes) are manually unrolled in realloc and calloc.
540 #define HAVE_MEMCPY
542 #ifndef USE_MEMCPY
543 #ifdef HAVE_MEMCPY
544 #define USE_MEMCPY 1
545 #else
546 #define USE_MEMCPY 0
547 #endif
548 #endif
551 #if (__STD_C || defined(HAVE_MEMCPY))
553 #ifdef _LIBC
554 # include <string.h>
555 #else
556 #ifdef WIN32
557 /* On Win32 memset and memcpy are already declared in windows.h */
558 #else
559 #if __STD_C
560 void* memset(void*, int, size_t);
561 void* memcpy(void*, const void*, size_t);
562 #else
563 Void_t* memset();
564 Void_t* memcpy();
565 #endif
566 #endif
567 #endif
568 #endif
571 MALLOC_FAILURE_ACTION is the action to take before "return 0" when
572 malloc fails to be able to return memory, either because memory is
573 exhausted or because of illegal arguments.
575 By default, sets errno if running on STD_C platform, else does nothing.
578 #ifndef MALLOC_FAILURE_ACTION
579 #if __STD_C
580 #define MALLOC_FAILURE_ACTION \
581 errno = ENOMEM;
583 #else
584 #define MALLOC_FAILURE_ACTION
585 #endif
586 #endif
589 MORECORE-related declarations. By default, rely on sbrk
593 #ifdef LACKS_UNISTD_H
594 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
595 #if __STD_C
596 extern Void_t* sbrk(ptrdiff_t);
597 #else
598 extern Void_t* sbrk();
599 #endif
600 #endif
601 #endif
604 MORECORE is the name of the routine to call to obtain more memory
605 from the system. See below for general guidance on writing
606 alternative MORECORE functions, as well as a version for WIN32 and a
607 sample version for pre-OSX macos.
610 #ifndef MORECORE
611 #define MORECORE sbrk
612 #endif
615 MORECORE_FAILURE is the value returned upon failure of MORECORE
616 as well as mmap. Since it cannot be an otherwise valid memory address,
617 and must reflect values of standard sys calls, you probably ought not
618 try to redefine it.
621 #ifndef MORECORE_FAILURE
622 #define MORECORE_FAILURE (-1)
623 #endif
626 If MORECORE_CONTIGUOUS is true, take advantage of fact that
627 consecutive calls to MORECORE with positive arguments always return
628 contiguous increasing addresses. This is true of unix sbrk. Even
629 if not defined, when regions happen to be contiguous, malloc will
630 permit allocations spanning regions obtained from different
631 calls. But defining this when applicable enables some stronger
632 consistency checks and space efficiencies.
635 #ifndef MORECORE_CONTIGUOUS
636 #define MORECORE_CONTIGUOUS 1
637 #endif
640 Define MORECORE_CANNOT_TRIM if your version of MORECORE
641 cannot release space back to the system when given negative
642 arguments. This is generally necessary only if you are using
643 a hand-crafted MORECORE function that cannot handle negative arguments.
646 /* #define MORECORE_CANNOT_TRIM */
648 /* MORECORE_CLEARS (default 1)
649 The degree to which the routine mapped to MORECORE zeroes out
650 memory: never (0), only for newly allocated space (1) or always
651 (2). The distinction between (1) and (2) is necessary because on
652 some systems, if the application first decrements and then
653 increments the break value, the contents of the reallocated space
654 are unspecified.
657 #ifndef MORECORE_CLEARS
658 #define MORECORE_CLEARS 1
659 #endif
663 Define HAVE_MMAP as true to optionally make malloc() use mmap() to
664 allocate very large blocks. These will be returned to the
665 operating system immediately after a free(). Also, if mmap
666 is available, it is used as a backup strategy in cases where
667 MORECORE fails to provide space from system.
669 This malloc is best tuned to work with mmap for large requests.
670 If you do not have mmap, operations involving very large chunks (1MB
671 or so) may be slower than you'd like.
674 #ifndef HAVE_MMAP
675 #define HAVE_MMAP 1
678 Standard unix mmap using /dev/zero clears memory so calloc doesn't
679 need to.
682 #ifndef MMAP_CLEARS
683 #define MMAP_CLEARS 1
684 #endif
686 #else /* no mmap */
687 #ifndef MMAP_CLEARS
688 #define MMAP_CLEARS 0
689 #endif
690 #endif
694 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
695 sbrk fails, and mmap is used as a backup (which is done only if
696 HAVE_MMAP). The value must be a multiple of page size. This
697 backup strategy generally applies only when systems have "holes" in
698 address space, so sbrk cannot perform contiguous expansion, but
699 there is still space available on system. On systems for which
700 this is known to be useful (i.e. most linux kernels), this occurs
701 only when programs allocate huge amounts of memory. Between this,
702 and the fact that mmap regions tend to be limited, the size should
703 be large, to avoid too many mmap calls and thus avoid running out
704 of kernel resources.
707 #ifndef MMAP_AS_MORECORE_SIZE
708 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
709 #endif
712 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
713 large blocks. This is currently only possible on Linux with
714 kernel versions newer than 1.3.77.
717 #ifndef HAVE_MREMAP
718 #ifdef linux
719 #define HAVE_MREMAP 1
720 #else
721 #define HAVE_MREMAP 0
722 #endif
724 #endif /* HAVE_MMAP */
726 /* Define USE_ARENAS to enable support for multiple `arenas'. These
727 are allocated using mmap(), are necessary for threads and
728 occasionally useful to overcome address space limitations affecting
729 sbrk(). */
731 #ifndef USE_ARENAS
732 #define USE_ARENAS HAVE_MMAP
733 #endif
737 The system page size. To the extent possible, this malloc manages
738 memory from the system in page-size units. Note that this value is
739 cached during initialization into a field of malloc_state. So even
740 if malloc_getpagesize is a function, it is only called once.
742 The following mechanics for getpagesize were adapted from bsd/gnu
743 getpagesize.h. If none of the system-probes here apply, a value of
744 4096 is used, which should be OK: If they don't apply, then using
745 the actual value probably doesn't impact performance.
749 #ifndef malloc_getpagesize
751 #ifndef LACKS_UNISTD_H
752 # include <unistd.h>
753 #endif
755 # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
756 # ifndef _SC_PAGE_SIZE
757 # define _SC_PAGE_SIZE _SC_PAGESIZE
758 # endif
759 # endif
761 # ifdef _SC_PAGE_SIZE
762 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
763 # else
764 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
765 extern size_t getpagesize();
766 # define malloc_getpagesize getpagesize()
767 # else
768 # ifdef WIN32 /* use supplied emulation of getpagesize */
769 # define malloc_getpagesize getpagesize()
770 # else
771 # ifndef LACKS_SYS_PARAM_H
772 # include <sys/param.h>
773 # endif
774 # ifdef EXEC_PAGESIZE
775 # define malloc_getpagesize EXEC_PAGESIZE
776 # else
777 # ifdef NBPG
778 # ifndef CLSIZE
779 # define malloc_getpagesize NBPG
780 # else
781 # define malloc_getpagesize (NBPG * CLSIZE)
782 # endif
783 # else
784 # ifdef NBPC
785 # define malloc_getpagesize NBPC
786 # else
787 # ifdef PAGESIZE
788 # define malloc_getpagesize PAGESIZE
789 # else /* just guess */
790 # define malloc_getpagesize (4096)
791 # endif
792 # endif
793 # endif
794 # endif
795 # endif
796 # endif
797 # endif
798 #endif
801 This version of malloc supports the standard SVID/XPG mallinfo
802 routine that returns a struct containing usage properties and
803 statistics. It should work on any SVID/XPG compliant system that has
804 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
805 install such a thing yourself, cut out the preliminary declarations
806 as described above and below and save them in a malloc.h file. But
807 there's no compelling reason to bother to do this.)
809 The main declaration needed is the mallinfo struct that is returned
810 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
811 bunch of fields that are not even meaningful in this version of
812 malloc. These fields are are instead filled by mallinfo() with
813 other numbers that might be of interest.
815 HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
816 /usr/include/malloc.h file that includes a declaration of struct
817 mallinfo. If so, it is included; else an SVID2/XPG2 compliant
818 version is declared below. These must be precisely the same for
819 mallinfo() to work. The original SVID version of this struct,
820 defined on most systems with mallinfo, declares all fields as
821 ints. But some others define as unsigned long. If your system
822 defines the fields using a type of different width than listed here,
823 you must #include your system version and #define
824 HAVE_USR_INCLUDE_MALLOC_H.
827 /* #define HAVE_USR_INCLUDE_MALLOC_H */
829 #ifdef HAVE_USR_INCLUDE_MALLOC_H
830 #include "/usr/include/malloc.h"
831 #endif
834 /* ---------- description of public routines ------------ */
837 malloc(size_t n)
838 Returns a pointer to a newly allocated chunk of at least n bytes, or null
839 if no space is available. Additionally, on failure, errno is
840 set to ENOMEM on ANSI C systems.
842 If n is zero, malloc returns a minumum-sized chunk. (The minimum
843 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
844 systems.) On most systems, size_t is an unsigned type, so calls
845 with negative arguments are interpreted as requests for huge amounts
846 of space, which will often fail. The maximum supported value of n
847 differs across systems, but is in all cases less than the maximum
848 representable value of a size_t.
850 #if __STD_C
851 Void_t* public_mALLOc(size_t);
852 #else
853 Void_t* public_mALLOc();
854 #endif
855 #ifdef libc_hidden_proto
856 libc_hidden_proto (public_mALLOc)
857 #endif
860 free(Void_t* p)
861 Releases the chunk of memory pointed to by p, that had been previously
862 allocated using malloc or a related routine such as realloc.
863 It has no effect if p is null. It can have arbitrary (i.e., bad!)
864 effects if p has already been freed.
866 Unless disabled (using mallopt), freeing very large spaces will
867 when possible, automatically trigger operations that give
868 back unused memory to the system, thus reducing program footprint.
870 #if __STD_C
871 void public_fREe(Void_t*);
872 #else
873 void public_fREe();
874 #endif
875 #ifdef libc_hidden_proto
876 libc_hidden_proto (public_fREe)
877 #endif
880 calloc(size_t n_elements, size_t element_size);
881 Returns a pointer to n_elements * element_size bytes, with all locations
882 set to zero.
884 #if __STD_C
885 Void_t* public_cALLOc(size_t, size_t);
886 #else
887 Void_t* public_cALLOc();
888 #endif
891 realloc(Void_t* p, size_t n)
892 Returns a pointer to a chunk of size n that contains the same data
893 as does chunk p up to the minimum of (n, p's size) bytes, or null
894 if no space is available.
896 The returned pointer may or may not be the same as p. The algorithm
897 prefers extending p when possible, otherwise it employs the
898 equivalent of a malloc-copy-free sequence.
900 If p is null, realloc is equivalent to malloc.
902 If space is not available, realloc returns null, errno is set (if on
903 ANSI) and p is NOT freed.
905 if n is for fewer bytes than already held by p, the newly unused
906 space is lopped off and freed if possible. Unless the #define
907 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
908 zero (re)allocates a minimum-sized chunk.
910 Large chunks that were internally obtained via mmap will always
911 be reallocated using malloc-copy-free sequences unless
912 the system supports MREMAP (currently only linux).
914 The old unix realloc convention of allowing the last-free'd chunk
915 to be used as an argument to realloc is not supported.
917 #if __STD_C
918 Void_t* public_rEALLOc(Void_t*, size_t);
919 #else
920 Void_t* public_rEALLOc();
921 #endif
922 #ifdef libc_hidden_proto
923 libc_hidden_proto (public_rEALLOc)
924 #endif
927 memalign(size_t alignment, size_t n);
928 Returns a pointer to a newly allocated chunk of n bytes, aligned
929 in accord with the alignment argument.
931 The alignment argument should be a power of two. If the argument is
932 not a power of two, the nearest greater power is used.
933 8-byte alignment is guaranteed by normal malloc calls, so don't
934 bother calling memalign with an argument of 8 or less.
936 Overreliance on memalign is a sure way to fragment space.
938 #if __STD_C
939 Void_t* public_mEMALIGn(size_t, size_t);
940 #else
941 Void_t* public_mEMALIGn();
942 #endif
943 #ifdef libc_hidden_proto
944 libc_hidden_proto (public_mEMALIGn)
945 #endif
948 valloc(size_t n);
949 Equivalent to memalign(pagesize, n), where pagesize is the page
950 size of the system. If the pagesize is unknown, 4096 is used.
952 #if __STD_C
953 Void_t* public_vALLOc(size_t);
954 #else
955 Void_t* public_vALLOc();
956 #endif
961 mallopt(int parameter_number, int parameter_value)
962 Sets tunable parameters The format is to provide a
963 (parameter-number, parameter-value) pair. mallopt then sets the
964 corresponding parameter to the argument value if it can (i.e., so
965 long as the value is meaningful), and returns 1 if successful else
966 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
967 normally defined in malloc.h. Only one of these (M_MXFAST) is used
968 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
969 so setting them has no effect. But this malloc also supports four
970 other options in mallopt. See below for details. Briefly, supported
971 parameters are as follows (listed defaults are for "typical"
972 configurations).
974 Symbol param # default allowed param values
975 M_MXFAST 1 64 0-80 (0 disables fastbins)
976 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
977 M_TOP_PAD -2 0 any
978 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
979 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
981 #if __STD_C
982 int public_mALLOPt(int, int);
983 #else
984 int public_mALLOPt();
985 #endif
989 mallinfo()
990 Returns (by copy) a struct containing various summary statistics:
992 arena: current total non-mmapped bytes allocated from system
993 ordblks: the number of free chunks
994 smblks: the number of fastbin blocks (i.e., small chunks that
995 have been freed but not use resused or consolidated)
996 hblks: current number of mmapped regions
997 hblkhd: total bytes held in mmapped regions
998 usmblks: the maximum total allocated space. This will be greater
999 than current total if trimming has occurred.
1000 fsmblks: total bytes held in fastbin blocks
1001 uordblks: current total allocated space (normal or mmapped)
1002 fordblks: total free space
1003 keepcost: the maximum number of bytes that could ideally be released
1004 back to system via malloc_trim. ("ideally" means that
1005 it ignores page restrictions etc.)
1007 Because these fields are ints, but internal bookkeeping may
1008 be kept as longs, the reported values may wrap around zero and
1009 thus be inaccurate.
1011 #if __STD_C
1012 struct mallinfo public_mALLINFo(void);
1013 #else
1014 struct mallinfo public_mALLINFo();
1015 #endif
1017 #ifndef _LIBC
1019 independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]);
1021 independent_calloc is similar to calloc, but instead of returning a
1022 single cleared space, it returns an array of pointers to n_elements
1023 independent elements that can hold contents of size elem_size, each
1024 of which starts out cleared, and can be independently freed,
1025 realloc'ed etc. The elements are guaranteed to be adjacently
1026 allocated (this is not guaranteed to occur with multiple callocs or
1027 mallocs), which may also improve cache locality in some
1028 applications.
1030 The "chunks" argument is optional (i.e., may be null, which is
1031 probably the most typical usage). If it is null, the returned array
1032 is itself dynamically allocated and should also be freed when it is
1033 no longer needed. Otherwise, the chunks array must be of at least
1034 n_elements in length. It is filled in with the pointers to the
1035 chunks.
1037 In either case, independent_calloc returns this pointer array, or
1038 null if the allocation failed. If n_elements is zero and "chunks"
1039 is null, it returns a chunk representing an array with zero elements
1040 (which should be freed if not wanted).
1042 Each element must be individually freed when it is no longer
1043 needed. If you'd like to instead be able to free all at once, you
1044 should instead use regular calloc and assign pointers into this
1045 space to represent elements. (In this case though, you cannot
1046 independently free elements.)
1048 independent_calloc simplifies and speeds up implementations of many
1049 kinds of pools. It may also be useful when constructing large data
1050 structures that initially have a fixed number of fixed-sized nodes,
1051 but the number is not known at compile time, and some of the nodes
1052 may later need to be freed. For example:
1054 struct Node { int item; struct Node* next; };
1056 struct Node* build_list() {
1057 struct Node** pool;
1058 int n = read_number_of_nodes_needed();
1059 if (n <= 0) return 0;
1060 pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
1061 if (pool == 0) die();
1062 // organize into a linked list...
1063 struct Node* first = pool[0];
1064 for (i = 0; i < n-1; ++i)
1065 pool[i]->next = pool[i+1];
1066 free(pool); // Can now free the array (or not, if it is needed later)
1067 return first;
1070 #if __STD_C
1071 Void_t** public_iCALLOc(size_t, size_t, Void_t**);
1072 #else
1073 Void_t** public_iCALLOc();
1074 #endif
1077 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
1079 independent_comalloc allocates, all at once, a set of n_elements
1080 chunks with sizes indicated in the "sizes" array. It returns
1081 an array of pointers to these elements, each of which can be
1082 independently freed, realloc'ed etc. The elements are guaranteed to
1083 be adjacently allocated (this is not guaranteed to occur with
1084 multiple callocs or mallocs), which may also improve cache locality
1085 in some applications.
1087 The "chunks" argument is optional (i.e., may be null). If it is null
1088 the returned array is itself dynamically allocated and should also
1089 be freed when it is no longer needed. Otherwise, the chunks array
1090 must be of at least n_elements in length. It is filled in with the
1091 pointers to the chunks.
1093 In either case, independent_comalloc returns this pointer array, or
1094 null if the allocation failed. If n_elements is zero and chunks is
1095 null, it returns a chunk representing an array with zero elements
1096 (which should be freed if not wanted).
1098 Each element must be individually freed when it is no longer
1099 needed. If you'd like to instead be able to free all at once, you
1100 should instead use a single regular malloc, and assign pointers at
1101 particular offsets in the aggregate space. (In this case though, you
1102 cannot independently free elements.)
1104 independent_comallac differs from independent_calloc in that each
1105 element may have a different size, and also that it does not
1106 automatically clear elements.
1108 independent_comalloc can be used to speed up allocation in cases
1109 where several structs or objects must always be allocated at the
1110 same time. For example:
1112 struct Head { ... }
1113 struct Foot { ... }
1115 void send_message(char* msg) {
1116 int msglen = strlen(msg);
1117 size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
1118 void* chunks[3];
1119 if (independent_comalloc(3, sizes, chunks) == 0)
1120 die();
1121 struct Head* head = (struct Head*)(chunks[0]);
1122 char* body = (char*)(chunks[1]);
1123 struct Foot* foot = (struct Foot*)(chunks[2]);
1124 // ...
1127 In general though, independent_comalloc is worth using only for
1128 larger values of n_elements. For small values, you probably won't
1129 detect enough difference from series of malloc calls to bother.
1131 Overuse of independent_comalloc can increase overall memory usage,
1132 since it cannot reuse existing noncontiguous small chunks that
1133 might be available for some of the elements.
1135 #if __STD_C
1136 Void_t** public_iCOMALLOc(size_t, size_t*, Void_t**);
1137 #else
1138 Void_t** public_iCOMALLOc();
1139 #endif
1141 #endif /* _LIBC */
1145 pvalloc(size_t n);
1146 Equivalent to valloc(minimum-page-that-holds(n)), that is,
1147 round up n to nearest pagesize.
1149 #if __STD_C
1150 Void_t* public_pVALLOc(size_t);
1151 #else
1152 Void_t* public_pVALLOc();
1153 #endif
1156 cfree(Void_t* p);
1157 Equivalent to free(p).
1159 cfree is needed/defined on some systems that pair it with calloc,
1160 for odd historical reasons (such as: cfree is used in example
1161 code in the first edition of K&R).
1163 #if __STD_C
1164 void public_cFREe(Void_t*);
1165 #else
1166 void public_cFREe();
1167 #endif
1170 malloc_trim(size_t pad);
1172 If possible, gives memory back to the system (via negative
1173 arguments to sbrk) if there is unused memory at the `high' end of
1174 the malloc pool. You can call this after freeing large blocks of
1175 memory to potentially reduce the system-level memory requirements
1176 of a program. However, it cannot guarantee to reduce memory. Under
1177 some allocation patterns, some large free blocks of memory will be
1178 locked between two used chunks, so they cannot be given back to
1179 the system.
1181 The `pad' argument to malloc_trim represents the amount of free
1182 trailing space to leave untrimmed. If this argument is zero,
1183 only the minimum amount of memory to maintain internal data
1184 structures will be left (one page or less). Non-zero arguments
1185 can be supplied to maintain enough trailing space to service
1186 future expected allocations without having to re-obtain memory
1187 from the system.
1189 Malloc_trim returns 1 if it actually released any memory, else 0.
1190 On systems that do not support "negative sbrks", it will always
1191 rreturn 0.
1193 #if __STD_C
1194 int public_mTRIm(size_t);
1195 #else
1196 int public_mTRIm();
1197 #endif
1200 malloc_usable_size(Void_t* p);
1202 Returns the number of bytes you can actually use in
1203 an allocated chunk, which may be more than you requested (although
1204 often not) due to alignment and minimum size constraints.
1205 You can use this many bytes without worrying about
1206 overwriting other allocated objects. This is not a particularly great
1207 programming practice. malloc_usable_size can be more useful in
1208 debugging and assertions, for example:
1210 p = malloc(n);
1211 assert(malloc_usable_size(p) >= 256);
1214 #if __STD_C
1215 size_t public_mUSABLe(Void_t*);
1216 #else
1217 size_t public_mUSABLe();
1218 #endif
1221 malloc_stats();
1222 Prints on stderr the amount of space obtained from the system (both
1223 via sbrk and mmap), the maximum amount (which may be more than
1224 current if malloc_trim and/or munmap got called), and the current
1225 number of bytes allocated via malloc (or realloc, etc) but not yet
1226 freed. Note that this is the number of bytes allocated, not the
1227 number requested. It will be larger than the number requested
1228 because of alignment and bookkeeping overhead. Because it includes
1229 alignment wastage as being in use, this figure may be greater than
1230 zero even when no user-level chunks are allocated.
1232 The reported current and maximum system memory can be inaccurate if
1233 a program makes other calls to system memory allocation functions
1234 (normally sbrk) outside of malloc.
1236 malloc_stats prints only the most commonly interesting statistics.
1237 More information can be obtained by calling mallinfo.
1240 #if __STD_C
1241 void public_mSTATs(void);
1242 #else
1243 void public_mSTATs();
1244 #endif
1247 malloc_get_state(void);
1249 Returns the state of all malloc variables in an opaque data
1250 structure.
1252 #if __STD_C
1253 Void_t* public_gET_STATe(void);
1254 #else
1255 Void_t* public_gET_STATe();
1256 #endif
1259 malloc_set_state(Void_t* state);
1261 Restore the state of all malloc variables from data obtained with
1262 malloc_get_state().
1264 #if __STD_C
1265 int public_sET_STATe(Void_t*);
1266 #else
1267 int public_sET_STATe();
1268 #endif
1270 #ifdef _LIBC
1272 posix_memalign(void **memptr, size_t alignment, size_t size);
1274 POSIX wrapper like memalign(), checking for validity of size.
1276 int __posix_memalign(void **, size_t, size_t);
1277 #endif
1279 /* mallopt tuning options */
1282 M_MXFAST is the maximum request size used for "fastbins", special bins
1283 that hold returned chunks without consolidating their spaces. This
1284 enables future requests for chunks of the same size to be handled
1285 very quickly, but can increase fragmentation, and thus increase the
1286 overall memory footprint of a program.
1288 This malloc manages fastbins very conservatively yet still
1289 efficiently, so fragmentation is rarely a problem for values less
1290 than or equal to the default. The maximum supported value of MXFAST
1291 is 80. You wouldn't want it any higher than this anyway. Fastbins
1292 are designed especially for use with many small structs, objects or
1293 strings -- the default handles structs/objects/arrays with sizes up
1294 to 8 4byte fields, or small strings representing words, tokens,
1295 etc. Using fastbins for larger objects normally worsens
1296 fragmentation without improving speed.
1298 M_MXFAST is set in REQUEST size units. It is internally used in
1299 chunksize units, which adds padding and alignment. You can reduce
1300 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
1301 algorithm to be a closer approximation of fifo-best-fit in all cases,
1302 not just for larger requests, but will generally cause it to be
1303 slower.
1307 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
1308 #ifndef M_MXFAST
1309 #define M_MXFAST 1
1310 #endif
1312 #ifndef DEFAULT_MXFAST
1313 #define DEFAULT_MXFAST 64
1314 #endif
1318 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
1319 to keep before releasing via malloc_trim in free().
1321 Automatic trimming is mainly useful in long-lived programs.
1322 Because trimming via sbrk can be slow on some systems, and can
1323 sometimes be wasteful (in cases where programs immediately
1324 afterward allocate more large chunks) the value should be high
1325 enough so that your overall system performance would improve by
1326 releasing this much memory.
1328 The trim threshold and the mmap control parameters (see below)
1329 can be traded off with one another. Trimming and mmapping are
1330 two different ways of releasing unused memory back to the
1331 system. Between these two, it is often possible to keep
1332 system-level demands of a long-lived program down to a bare
1333 minimum. For example, in one test suite of sessions measuring
1334 the XF86 X server on Linux, using a trim threshold of 128K and a
1335 mmap threshold of 192K led to near-minimal long term resource
1336 consumption.
1338 If you are using this malloc in a long-lived program, it should
1339 pay to experiment with these values. As a rough guide, you
1340 might set to a value close to the average size of a process
1341 (program) running on your system. Releasing this much memory
1342 would allow such a process to run in memory. Generally, it's
1343 worth it to tune for trimming rather tham memory mapping when a
1344 program undergoes phases where several large chunks are
1345 allocated and released in ways that can reuse each other's
1346 storage, perhaps mixed with phases where there are no such
1347 chunks at all. And in well-behaved long-lived programs,
1348 controlling release of large blocks via trimming versus mapping
1349 is usually faster.
1351 However, in most programs, these parameters serve mainly as
1352 protection against the system-level effects of carrying around
1353 massive amounts of unneeded memory. Since frequent calls to
1354 sbrk, mmap, and munmap otherwise degrade performance, the default
1355 parameters are set to relatively high values that serve only as
1356 safeguards.
1358 The trim value It must be greater than page size to have any useful
1359 effect. To disable trimming completely, you can set to
1360 (unsigned long)(-1)
1362 Trim settings interact with fastbin (MXFAST) settings: Unless
1363 TRIM_FASTBINS is defined, automatic trimming never takes place upon
1364 freeing a chunk with size less than or equal to MXFAST. Trimming is
1365 instead delayed until subsequent freeing of larger chunks. However,
1366 you can still force an attempted trim by calling malloc_trim.
1368 Also, trimming is not generally possible in cases where
1369 the main arena is obtained via mmap.
1371 Note that the trick some people use of mallocing a huge space and
1372 then freeing it at program startup, in an attempt to reserve system
1373 memory, doesn't have the intended effect under automatic trimming,
1374 since that memory will immediately be returned to the system.
1377 #define M_TRIM_THRESHOLD -1
1379 #ifndef DEFAULT_TRIM_THRESHOLD
1380 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
1381 #endif
1384 M_TOP_PAD is the amount of extra `padding' space to allocate or
1385 retain whenever sbrk is called. It is used in two ways internally:
1387 * When sbrk is called to extend the top of the arena to satisfy
1388 a new malloc request, this much padding is added to the sbrk
1389 request.
1391 * When malloc_trim is called automatically from free(),
1392 it is used as the `pad' argument.
1394 In both cases, the actual amount of padding is rounded
1395 so that the end of the arena is always a system page boundary.
1397 The main reason for using padding is to avoid calling sbrk so
1398 often. Having even a small pad greatly reduces the likelihood
1399 that nearly every malloc request during program start-up (or
1400 after trimming) will invoke sbrk, which needlessly wastes
1401 time.
1403 Automatic rounding-up to page-size units is normally sufficient
1404 to avoid measurable overhead, so the default is 0. However, in
1405 systems where sbrk is relatively slow, it can pay to increase
1406 this value, at the expense of carrying around more memory than
1407 the program needs.
1410 #define M_TOP_PAD -2
1412 #ifndef DEFAULT_TOP_PAD
1413 #define DEFAULT_TOP_PAD (0)
1414 #endif
1417 M_MMAP_THRESHOLD is the request size threshold for using mmap()
1418 to service a request. Requests of at least this size that cannot
1419 be allocated using already-existing space will be serviced via mmap.
1420 (If enough normal freed space already exists it is used instead.)
1422 Using mmap segregates relatively large chunks of memory so that
1423 they can be individually obtained and released from the host
1424 system. A request serviced through mmap is never reused by any
1425 other request (at least not directly; the system may just so
1426 happen to remap successive requests to the same locations).
1428 Segregating space in this way has the benefits that:
1430 1. Mmapped space can ALWAYS be individually released back
1431 to the system, which helps keep the system level memory
1432 demands of a long-lived program low.
1433 2. Mapped memory can never become `locked' between
1434 other chunks, as can happen with normally allocated chunks, which
1435 means that even trimming via malloc_trim would not release them.
1436 3. On some systems with "holes" in address spaces, mmap can obtain
1437 memory that sbrk cannot.
1439 However, it has the disadvantages that:
1441 1. The space cannot be reclaimed, consolidated, and then
1442 used to service later requests, as happens with normal chunks.
1443 2. It can lead to more wastage because of mmap page alignment
1444 requirements
1445 3. It causes malloc performance to be more dependent on host
1446 system memory management support routines which may vary in
1447 implementation quality and may impose arbitrary
1448 limitations. Generally, servicing a request via normal
1449 malloc steps is faster than going through a system's mmap.
1451 The advantages of mmap nearly always outweigh disadvantages for
1452 "large" chunks, but the value of "large" varies across systems. The
1453 default is an empirically derived value that works well in most
1454 systems.
1457 #define M_MMAP_THRESHOLD -3
1459 #ifndef DEFAULT_MMAP_THRESHOLD
1460 #define DEFAULT_MMAP_THRESHOLD (128 * 1024)
1461 #endif
1464 M_MMAP_MAX is the maximum number of requests to simultaneously
1465 service using mmap. This parameter exists because
1466 some systems have a limited number of internal tables for
1467 use by mmap, and using more than a few of them may degrade
1468 performance.
1470 The default is set to a value that serves only as a safeguard.
1471 Setting to 0 disables use of mmap for servicing large requests. If
1472 HAVE_MMAP is not set, the default value is 0, and attempts to set it
1473 to non-zero values in mallopt will fail.
1476 #define M_MMAP_MAX -4
1478 #ifndef DEFAULT_MMAP_MAX
1479 #if HAVE_MMAP
1480 #define DEFAULT_MMAP_MAX (65536)
1481 #else
1482 #define DEFAULT_MMAP_MAX (0)
1483 #endif
1484 #endif
1486 #ifdef __cplusplus
1487 } /* end of extern "C" */
1488 #endif
1490 #include <malloc.h>
1492 #ifndef BOUNDED_N
1493 #define BOUNDED_N(ptr, sz) (ptr)
1494 #endif
1495 #ifndef RETURN_ADDRESS
1496 #define RETURN_ADDRESS(X_) (NULL)
1497 #endif
1499 /* On some platforms we can compile internal, not exported functions better.
1500 Let the environment provide a macro and define it to be empty if it
1501 is not available. */
1502 #ifndef internal_function
1503 # define internal_function
1504 #endif
1506 /* Forward declarations. */
1507 struct malloc_chunk;
1508 typedef struct malloc_chunk* mchunkptr;
1510 /* Internal routines. */
1512 #if __STD_C
1514 Void_t* _int_malloc(mstate, size_t);
1515 void _int_free(mstate, Void_t*);
1516 Void_t* _int_realloc(mstate, Void_t*, size_t);
1517 Void_t* _int_memalign(mstate, size_t, size_t);
1518 Void_t* _int_valloc(mstate, size_t);
1519 static Void_t* _int_pvalloc(mstate, size_t);
1520 /*static Void_t* cALLOc(size_t, size_t);*/
1521 #ifndef _LIBC
1522 static Void_t** _int_icalloc(mstate, size_t, size_t, Void_t**);
1523 static Void_t** _int_icomalloc(mstate, size_t, size_t*, Void_t**);
1524 #endif
1525 static int mTRIm(size_t);
1526 static size_t mUSABLe(Void_t*);
1527 static void mSTATs(void);
1528 static int mALLOPt(int, int);
1529 static struct mallinfo mALLINFo(mstate);
1530 static void malloc_printerr(int action, const char *str, void *ptr);
1532 static Void_t* internal_function mem2mem_check(Void_t *p, size_t sz);
1533 static int internal_function top_check(void);
1534 static void internal_function munmap_chunk(mchunkptr p);
1535 #if HAVE_MREMAP
1536 static mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size);
1537 #endif
1539 static Void_t* malloc_check(size_t sz, const Void_t *caller);
1540 static void free_check(Void_t* mem, const Void_t *caller);
1541 static Void_t* realloc_check(Void_t* oldmem, size_t bytes,
1542 const Void_t *caller);
1543 static Void_t* memalign_check(size_t alignment, size_t bytes,
1544 const Void_t *caller);
1545 #ifndef NO_THREADS
1546 # ifdef _LIBC
1547 # if USE___THREAD || (defined USE_TLS && !defined SHARED)
1548 /* These routines are never needed in this configuration. */
1549 # define NO_STARTER
1550 # endif
1551 # endif
1552 # ifdef NO_STARTER
1553 # undef NO_STARTER
1554 # else
1555 static Void_t* malloc_starter(size_t sz, const Void_t *caller);
1556 static Void_t* memalign_starter(size_t aln, size_t sz, const Void_t *caller);
1557 static void free_starter(Void_t* mem, const Void_t *caller);
1558 # endif
1559 static Void_t* malloc_atfork(size_t sz, const Void_t *caller);
1560 static void free_atfork(Void_t* mem, const Void_t *caller);
1561 #endif
1563 #else
1565 Void_t* _int_malloc();
1566 void _int_free();
1567 Void_t* _int_realloc();
1568 Void_t* _int_memalign();
1569 Void_t* _int_valloc();
1570 Void_t* _int_pvalloc();
1571 /*static Void_t* cALLOc();*/
1572 static Void_t** _int_icalloc();
1573 static Void_t** _int_icomalloc();
1574 static int mTRIm();
1575 static size_t mUSABLe();
1576 static void mSTATs();
1577 static int mALLOPt();
1578 static struct mallinfo mALLINFo();
1580 #endif
1585 /* ------------- Optional versions of memcopy ---------------- */
1588 #if USE_MEMCPY
1591 Note: memcpy is ONLY invoked with non-overlapping regions,
1592 so the (usually slower) memmove is not needed.
1595 #define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
1596 #define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
1598 #else /* !USE_MEMCPY */
1600 /* Use Duff's device for good zeroing/copying performance. */
1602 #define MALLOC_ZERO(charp, nbytes) \
1603 do { \
1604 INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
1605 unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1606 long mcn; \
1607 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1608 switch (mctmp) { \
1609 case 0: for(;;) { *mzp++ = 0; \
1610 case 7: *mzp++ = 0; \
1611 case 6: *mzp++ = 0; \
1612 case 5: *mzp++ = 0; \
1613 case 4: *mzp++ = 0; \
1614 case 3: *mzp++ = 0; \
1615 case 2: *mzp++ = 0; \
1616 case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
1618 } while(0)
1620 #define MALLOC_COPY(dest,src,nbytes) \
1621 do { \
1622 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
1623 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
1624 unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1625 long mcn; \
1626 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1627 switch (mctmp) { \
1628 case 0: for(;;) { *mcdst++ = *mcsrc++; \
1629 case 7: *mcdst++ = *mcsrc++; \
1630 case 6: *mcdst++ = *mcsrc++; \
1631 case 5: *mcdst++ = *mcsrc++; \
1632 case 4: *mcdst++ = *mcsrc++; \
1633 case 3: *mcdst++ = *mcsrc++; \
1634 case 2: *mcdst++ = *mcsrc++; \
1635 case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
1637 } while(0)
1639 #endif
1641 /* ------------------ MMAP support ------------------ */
1644 #if HAVE_MMAP
1646 #include <fcntl.h>
1647 #ifndef LACKS_SYS_MMAN_H
1648 #include <sys/mman.h>
1649 #endif
1651 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1652 # define MAP_ANONYMOUS MAP_ANON
1653 #endif
1654 #if !defined(MAP_FAILED)
1655 # define MAP_FAILED ((char*)-1)
1656 #endif
1658 #ifndef MAP_NORESERVE
1659 # ifdef MAP_AUTORESRV
1660 # define MAP_NORESERVE MAP_AUTORESRV
1661 # else
1662 # define MAP_NORESERVE 0
1663 # endif
1664 #endif
1667 Nearly all versions of mmap support MAP_ANONYMOUS,
1668 so the following is unlikely to be needed, but is
1669 supplied just in case.
1672 #ifndef MAP_ANONYMOUS
1674 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
1676 #define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
1677 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1678 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
1679 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
1681 #else
1683 #define MMAP(addr, size, prot, flags) \
1684 (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1686 #endif
1689 #endif /* HAVE_MMAP */
1693 ----------------------- Chunk representations -----------------------
1698 This struct declaration is misleading (but accurate and necessary).
1699 It declares a "view" into memory allowing access to necessary
1700 fields at known offsets from a given base. See explanation below.
1703 struct malloc_chunk {
1705 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
1706 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
1708 struct malloc_chunk* fd; /* double links -- used only if free. */
1709 struct malloc_chunk* bk;
1714 malloc_chunk details:
1716 (The following includes lightly edited explanations by Colin Plumb.)
1718 Chunks of memory are maintained using a `boundary tag' method as
1719 described in e.g., Knuth or Standish. (See the paper by Paul
1720 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1721 survey of such techniques.) Sizes of free chunks are stored both
1722 in the front of each chunk and at the end. This makes
1723 consolidating fragmented chunks into bigger chunks very fast. The
1724 size fields also hold bits representing whether chunks are free or
1725 in use.
1727 An allocated chunk looks like this:
1730 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1731 | Size of previous chunk, if allocated | |
1732 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1733 | Size of chunk, in bytes |M|P|
1734 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1735 | User data starts here... .
1737 . (malloc_usable_size() bytes) .
1739 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1740 | Size of chunk |
1741 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1744 Where "chunk" is the front of the chunk for the purpose of most of
1745 the malloc code, but "mem" is the pointer that is returned to the
1746 user. "Nextchunk" is the beginning of the next contiguous chunk.
1748 Chunks always begin on even word boundries, so the mem portion
1749 (which is returned to the user) is also on an even word boundary, and
1750 thus at least double-word aligned.
1752 Free chunks are stored in circular doubly-linked lists, and look like this:
1754 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1755 | Size of previous chunk |
1756 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1757 `head:' | Size of chunk, in bytes |P|
1758 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1759 | Forward pointer to next chunk in list |
1760 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1761 | Back pointer to previous chunk in list |
1762 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1763 | Unused space (may be 0 bytes long) .
1766 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1767 `foot:' | Size of chunk, in bytes |
1768 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1770 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1771 chunk size (which is always a multiple of two words), is an in-use
1772 bit for the *previous* chunk. If that bit is *clear*, then the
1773 word before the current chunk size contains the previous chunk
1774 size, and can be used to find the front of the previous chunk.
1775 The very first chunk allocated always has this bit set,
1776 preventing access to non-existent (or non-owned) memory. If
1777 prev_inuse is set for any given chunk, then you CANNOT determine
1778 the size of the previous chunk, and might even get a memory
1779 addressing fault when trying to do so.
1781 Note that the `foot' of the current chunk is actually represented
1782 as the prev_size of the NEXT chunk. This makes it easier to
1783 deal with alignments etc but can be very confusing when trying
1784 to extend or adapt this code.
1786 The two exceptions to all this are
1788 1. The special chunk `top' doesn't bother using the
1789 trailing size field since there is no next contiguous chunk
1790 that would have to index off it. After initialization, `top'
1791 is forced to always exist. If it would become less than
1792 MINSIZE bytes long, it is replenished.
1794 2. Chunks allocated via mmap, which have the second-lowest-order
1795 bit M (IS_MMAPPED) set in their size fields. Because they are
1796 allocated one-by-one, each must contain its own trailing size field.
1801 ---------- Size and alignment checks and conversions ----------
1804 /* conversion from malloc headers to user pointers, and back */
1806 #define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
1807 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
1809 /* The smallest possible chunk */
1810 #define MIN_CHUNK_SIZE (sizeof(struct malloc_chunk))
1812 /* The smallest size we can malloc is an aligned minimal chunk */
1814 #define MINSIZE \
1815 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1817 /* Check if m has acceptable alignment */
1819 #define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1821 #define misaligned_chunk(p) \
1822 ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
1823 & MALLOC_ALIGN_MASK)
1827 Check if a request is so large that it would wrap around zero when
1828 padded and aligned. To simplify some other code, the bound is made
1829 low enough so that adding MINSIZE will also not wrap around zero.
1832 #define REQUEST_OUT_OF_RANGE(req) \
1833 ((unsigned long)(req) >= \
1834 (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))
1836 /* pad request bytes into a usable size -- internal version */
1838 #define request2size(req) \
1839 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1840 MINSIZE : \
1841 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1843 /* Same, except also perform argument check */
1845 #define checked_request2size(req, sz) \
1846 if (REQUEST_OUT_OF_RANGE(req)) { \
1847 MALLOC_FAILURE_ACTION; \
1848 return 0; \
1850 (sz) = request2size(req);
1853 --------------- Physical chunk operations ---------------
1857 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1858 #define PREV_INUSE 0x1
1860 /* extract inuse bit of previous chunk */
1861 #define prev_inuse(p) ((p)->size & PREV_INUSE)
1864 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1865 #define IS_MMAPPED 0x2
1867 /* check for mmap()'ed chunk */
1868 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1871 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1872 from a non-main arena. This is only set immediately before handing
1873 the chunk to the user, if necessary. */
1874 #define NON_MAIN_ARENA 0x4
1876 /* check for chunk from non-main arena */
1877 #define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
1881 Bits to mask off when extracting size
1883 Note: IS_MMAPPED is intentionally not masked off from size field in
1884 macros for which mmapped chunks should never be seen. This should
1885 cause helpful core dumps to occur if it is tried by accident by
1886 people extending or adapting this malloc.
1888 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED|NON_MAIN_ARENA)
1890 /* Get size, ignoring use bits */
1891 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
1894 /* Ptr to next physical malloc_chunk. */
1895 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~SIZE_BITS) ))
1897 /* Ptr to previous physical malloc_chunk */
1898 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
1900 /* Treat space at ptr + offset as a chunk */
1901 #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
1903 /* extract p's inuse bit */
1904 #define inuse(p)\
1905 ((((mchunkptr)(((char*)(p))+((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
1907 /* set/clear chunk as being inuse without otherwise disturbing */
1908 #define set_inuse(p)\
1909 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
1911 #define clear_inuse(p)\
1912 ((mchunkptr)(((char*)(p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
1915 /* check/set/clear inuse bits in known places */
1916 #define inuse_bit_at_offset(p, s)\
1917 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
1919 #define set_inuse_bit_at_offset(p, s)\
1920 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
1922 #define clear_inuse_bit_at_offset(p, s)\
1923 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
1926 /* Set size at head, without disturbing its use bit */
1927 #define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_BITS) | (s)))
1929 /* Set size/use field */
1930 #define set_head(p, s) ((p)->size = (s))
1932 /* Set size at footer (only when chunk is not in use) */
1933 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
1937 -------------------- Internal data structures --------------------
1939 All internal state is held in an instance of malloc_state defined
1940 below. There are no other static variables, except in two optional
1941 cases:
1942 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1943 * If HAVE_MMAP is true, but mmap doesn't support
1944 MAP_ANONYMOUS, a dummy file descriptor for mmap.
1946 Beware of lots of tricks that minimize the total bookkeeping space
1947 requirements. The result is a little over 1K bytes (for 4byte
1948 pointers and size_t.)
1952 Bins
1954 An array of bin headers for free chunks. Each bin is doubly
1955 linked. The bins are approximately proportionally (log) spaced.
1956 There are a lot of these bins (128). This may look excessive, but
1957 works very well in practice. Most bins hold sizes that are
1958 unusual as malloc request sizes, but are more usual for fragments
1959 and consolidated sets of chunks, which is what these bins hold, so
1960 they can be found quickly. All procedures maintain the invariant
1961 that no consolidated chunk physically borders another one, so each
1962 chunk in a list is known to be preceeded and followed by either
1963 inuse chunks or the ends of memory.
1965 Chunks in bins are kept in size order, with ties going to the
1966 approximately least recently used chunk. Ordering isn't needed
1967 for the small bins, which all contain the same-sized chunks, but
1968 facilitates best-fit allocation for larger chunks. These lists
1969 are just sequential. Keeping them in order almost never requires
1970 enough traversal to warrant using fancier ordered data
1971 structures.
1973 Chunks of the same size are linked with the most
1974 recently freed at the front, and allocations are taken from the
1975 back. This results in LRU (FIFO) allocation order, which tends
1976 to give each chunk an equal opportunity to be consolidated with
1977 adjacent freed chunks, resulting in larger free chunks and less
1978 fragmentation.
1980 To simplify use in double-linked lists, each bin header acts
1981 as a malloc_chunk. This avoids special-casing for headers.
1982 But to conserve space and improve locality, we allocate
1983 only the fd/bk pointers of bins, and then use repositioning tricks
1984 to treat these as the fields of a malloc_chunk*.
1987 typedef struct malloc_chunk* mbinptr;
1989 /* addressing -- note that bin_at(0) does not exist */
1990 #define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - (SIZE_SZ<<1)))
1992 /* analog of ++bin */
1993 #define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
1995 /* Reminders about list directionality within bins */
1996 #define first(b) ((b)->fd)
1997 #define last(b) ((b)->bk)
1999 /* Take a chunk off a bin list */
2000 #define unlink(P, BK, FD) { \
2001 FD = P->fd; \
2002 BK = P->bk; \
2003 if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \
2004 malloc_printerr (check_action, "corrupted double-linked list", P); \
2005 else { \
2006 FD->bk = BK; \
2007 BK->fd = FD; \
2012 Indexing
2014 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
2015 8 bytes apart. Larger bins are approximately logarithmically spaced:
2017 64 bins of size 8
2018 32 bins of size 64
2019 16 bins of size 512
2020 8 bins of size 4096
2021 4 bins of size 32768
2022 2 bins of size 262144
2023 1 bin of size what's left
2025 There is actually a little bit of slop in the numbers in bin_index
2026 for the sake of speed. This makes no difference elsewhere.
2028 The bins top out around 1MB because we expect to service large
2029 requests via mmap.
2032 #define NBINS 128
2033 #define NSMALLBINS 64
2034 #define SMALLBIN_WIDTH 8
2035 #define MIN_LARGE_SIZE 512
2037 #define in_smallbin_range(sz) \
2038 ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
2040 #define smallbin_index(sz) (((unsigned)(sz)) >> 3)
2042 #define largebin_index(sz) \
2043 (((((unsigned long)(sz)) >> 6) <= 32)? 56 + (((unsigned long)(sz)) >> 6): \
2044 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
2045 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
2046 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
2047 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
2048 126)
2050 #define bin_index(sz) \
2051 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
2055 Unsorted chunks
2057 All remainders from chunk splits, as well as all returned chunks,
2058 are first placed in the "unsorted" bin. They are then placed
2059 in regular bins after malloc gives them ONE chance to be used before
2060 binning. So, basically, the unsorted_chunks list acts as a queue,
2061 with chunks being placed on it in free (and malloc_consolidate),
2062 and taken off (to be either used or placed in bins) in malloc.
2064 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
2065 does not have to be taken into account in size comparisons.
2068 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
2069 #define unsorted_chunks(M) (bin_at(M, 1))
2074 The top-most available chunk (i.e., the one bordering the end of
2075 available memory) is treated specially. It is never included in
2076 any bin, is used only if no other chunk is available, and is
2077 released back to the system if it is very large (see
2078 M_TRIM_THRESHOLD). Because top initially
2079 points to its own bin with initial zero size, thus forcing
2080 extension on the first malloc request, we avoid having any special
2081 code in malloc to check whether it even exists yet. But we still
2082 need to do so when getting memory from system, so we make
2083 initial_top treat the bin as a legal but unusable chunk during the
2084 interval between initialization and the first call to
2085 sYSMALLOc. (This is somewhat delicate, since it relies on
2086 the 2 preceding words to be zero during this interval as well.)
2089 /* Conveniently, the unsorted bin can be used as dummy top on first call */
2090 #define initial_top(M) (unsorted_chunks(M))
2093 Binmap
2095 To help compensate for the large number of bins, a one-level index
2096 structure is used for bin-by-bin searching. `binmap' is a
2097 bitvector recording whether bins are definitely empty so they can
2098 be skipped over during during traversals. The bits are NOT always
2099 cleared as soon as bins are empty, but instead only
2100 when they are noticed to be empty during traversal in malloc.
2103 /* Conservatively use 32 bits per map word, even if on 64bit system */
2104 #define BINMAPSHIFT 5
2105 #define BITSPERMAP (1U << BINMAPSHIFT)
2106 #define BINMAPSIZE (NBINS / BITSPERMAP)
2108 #define idx2block(i) ((i) >> BINMAPSHIFT)
2109 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
2111 #define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
2112 #define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
2113 #define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
2116 Fastbins
2118 An array of lists holding recently freed small chunks. Fastbins
2119 are not doubly linked. It is faster to single-link them, and
2120 since chunks are never removed from the middles of these lists,
2121 double linking is not necessary. Also, unlike regular bins, they
2122 are not even processed in FIFO order (they use faster LIFO) since
2123 ordering doesn't much matter in the transient contexts in which
2124 fastbins are normally used.
2126 Chunks in fastbins keep their inuse bit set, so they cannot
2127 be consolidated with other free chunks. malloc_consolidate
2128 releases all chunks in fastbins and consolidates them with
2129 other free chunks.
2132 typedef struct malloc_chunk* mfastbinptr;
2134 /* offset 2 to use otherwise unindexable first 2 bins */
2135 #define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
2137 /* The maximum fastbin request size we support */
2138 #define MAX_FAST_SIZE 80
2140 #define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
2143 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
2144 that triggers automatic consolidation of possibly-surrounding
2145 fastbin chunks. This is a heuristic, so the exact value should not
2146 matter too much. It is defined at half the default trim threshold as a
2147 compromise heuristic to only attempt consolidation if it is likely
2148 to lead to trimming. However, it is not dynamically tunable, since
2149 consolidation reduces fragmentation surrounding large chunks even
2150 if trimming is not used.
2153 #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
2156 Since the lowest 2 bits in max_fast don't matter in size comparisons,
2157 they are used as flags.
2161 FASTCHUNKS_BIT held in max_fast indicates that there are probably
2162 some fastbin chunks. It is set true on entering a chunk into any
2163 fastbin, and cleared only in malloc_consolidate.
2165 The truth value is inverted so that have_fastchunks will be true
2166 upon startup (since statics are zero-filled), simplifying
2167 initialization checks.
2170 #define FASTCHUNKS_BIT (1U)
2172 #define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0)
2173 #define clear_fastchunks(M) ((M)->flags |= FASTCHUNKS_BIT)
2174 #define set_fastchunks(M) ((M)->flags &= ~FASTCHUNKS_BIT)
2177 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
2178 regions. Otherwise, contiguity is exploited in merging together,
2179 when possible, results from consecutive MORECORE calls.
2181 The initial value comes from MORECORE_CONTIGUOUS, but is
2182 changed dynamically if mmap is ever used as an sbrk substitute.
2185 #define NONCONTIGUOUS_BIT (2U)
2187 #define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
2188 #define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
2189 #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
2190 #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
2193 Set value of max_fast.
2194 Use impossibly small value if 0.
2195 Precondition: there are no existing fastbin chunks.
2196 Setting the value clears fastchunk bit but preserves noncontiguous bit.
2199 #define set_max_fast(s) \
2200 global_max_fast = ((s) == 0)? SMALLBIN_WIDTH: request2size(s)
2201 #define get_max_fast() global_max_fast
2205 ----------- Internal state representation and initialization -----------
2208 struct malloc_state {
2209 /* Serialize access. */
2210 mutex_t mutex;
2212 /* Flags (formerly in max_fast). */
2213 int flags;
2215 #if THREAD_STATS
2216 /* Statistics for locking. Only used if THREAD_STATS is defined. */
2217 long stat_lock_direct, stat_lock_loop, stat_lock_wait;
2218 #endif
2220 /* Fastbins */
2221 mfastbinptr fastbins[NFASTBINS];
2223 /* Base of the topmost chunk -- not otherwise kept in a bin */
2224 mchunkptr top;
2226 /* The remainder from the most recent split of a small request */
2227 mchunkptr last_remainder;
2229 /* Normal bins packed as described above */
2230 mchunkptr bins[NBINS * 2];
2232 /* Bitmap of bins */
2233 unsigned int binmap[BINMAPSIZE];
2235 /* Linked list */
2236 struct malloc_state *next;
2238 /* Memory allocated from the system in this arena. */
2239 INTERNAL_SIZE_T system_mem;
2240 INTERNAL_SIZE_T max_system_mem;
2243 struct malloc_par {
2244 /* Tunable parameters */
2245 unsigned long trim_threshold;
2246 INTERNAL_SIZE_T top_pad;
2247 INTERNAL_SIZE_T mmap_threshold;
2249 /* Memory map support */
2250 int n_mmaps;
2251 int n_mmaps_max;
2252 int max_n_mmaps;
2254 /* Cache malloc_getpagesize */
2255 unsigned int pagesize;
2257 /* Statistics */
2258 INTERNAL_SIZE_T mmapped_mem;
2259 /*INTERNAL_SIZE_T sbrked_mem;*/
2260 /*INTERNAL_SIZE_T max_sbrked_mem;*/
2261 INTERNAL_SIZE_T max_mmapped_mem;
2262 INTERNAL_SIZE_T max_total_mem; /* only kept for NO_THREADS */
2264 /* First address handed out by MORECORE/sbrk. */
2265 char* sbrk_base;
2268 /* There are several instances of this struct ("arenas") in this
2269 malloc. If you are adapting this malloc in a way that does NOT use
2270 a static or mmapped malloc_state, you MUST explicitly zero-fill it
2271 before using. This malloc relies on the property that malloc_state
2272 is initialized to all zeroes (as is true of C statics). */
2274 static struct malloc_state main_arena;
2276 /* There is only one instance of the malloc parameters. */
2278 static struct malloc_par mp_;
2281 /* Maximum size of memory handled in fastbins. */
2282 static INTERNAL_SIZE_T global_max_fast;
2285 Initialize a malloc_state struct.
2287 This is called only from within malloc_consolidate, which needs
2288 be called in the same contexts anyway. It is never called directly
2289 outside of malloc_consolidate because some optimizing compilers try
2290 to inline it at all call points, which turns out not to be an
2291 optimization at all. (Inlining it in malloc_consolidate is fine though.)
2294 #if __STD_C
2295 static void malloc_init_state(mstate av)
2296 #else
2297 static void malloc_init_state(av) mstate av;
2298 #endif
2300 int i;
2301 mbinptr bin;
2303 /* Establish circular links for normal bins */
2304 for (i = 1; i < NBINS; ++i) {
2305 bin = bin_at(av,i);
2306 bin->fd = bin->bk = bin;
2309 #if MORECORE_CONTIGUOUS
2310 if (av != &main_arena)
2311 #endif
2312 set_noncontiguous(av);
2313 if (av == &main_arena)
2314 set_max_fast(DEFAULT_MXFAST);
2315 av->flags |= FASTCHUNKS_BIT;
2317 av->top = initial_top(av);
2321 Other internal utilities operating on mstates
2324 #if __STD_C
2325 static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate);
2326 static int sYSTRIm(size_t, mstate);
2327 static void malloc_consolidate(mstate);
2328 #ifndef _LIBC
2329 static Void_t** iALLOc(mstate, size_t, size_t*, int, Void_t**);
2330 #endif
2331 #else
2332 static Void_t* sYSMALLOc();
2333 static int sYSTRIm();
2334 static void malloc_consolidate();
2335 static Void_t** iALLOc();
2336 #endif
2339 /* -------------- Early definitions for debugging hooks ---------------- */
2341 /* Define and initialize the hook variables. These weak definitions must
2342 appear before any use of the variables in a function (arena.c uses one). */
2343 #ifndef weak_variable
2344 #ifndef _LIBC
2345 #define weak_variable /**/
2346 #else
2347 /* In GNU libc we want the hook variables to be weak definitions to
2348 avoid a problem with Emacs. */
2349 #define weak_variable weak_function
2350 #endif
2351 #endif
2353 /* Forward declarations. */
2354 static Void_t* malloc_hook_ini __MALLOC_P ((size_t sz,
2355 const __malloc_ptr_t caller));
2356 static Void_t* realloc_hook_ini __MALLOC_P ((Void_t* ptr, size_t sz,
2357 const __malloc_ptr_t caller));
2358 static Void_t* memalign_hook_ini __MALLOC_P ((size_t alignment, size_t sz,
2359 const __malloc_ptr_t caller));
2361 void weak_variable (*__malloc_initialize_hook) (void) = NULL;
2362 void weak_variable (*__free_hook) (__malloc_ptr_t __ptr,
2363 const __malloc_ptr_t) = NULL;
2364 __malloc_ptr_t weak_variable (*__malloc_hook)
2365 (size_t __size, const __malloc_ptr_t) = malloc_hook_ini;
2366 __malloc_ptr_t weak_variable (*__realloc_hook)
2367 (__malloc_ptr_t __ptr, size_t __size, const __malloc_ptr_t)
2368 = realloc_hook_ini;
2369 __malloc_ptr_t weak_variable (*__memalign_hook)
2370 (size_t __alignment, size_t __size, const __malloc_ptr_t)
2371 = memalign_hook_ini;
2372 void weak_variable (*__after_morecore_hook) (void) = NULL;
2375 /* ---------------- Error behavior ------------------------------------ */
2377 #ifndef DEFAULT_CHECK_ACTION
2378 #define DEFAULT_CHECK_ACTION 3
2379 #endif
2381 static int check_action = DEFAULT_CHECK_ACTION;
2384 /* ------------------ Testing support ----------------------------------*/
2386 static int perturb_byte;
2388 #define alloc_perturb(p, n) memset (p, (perturb_byte ^ 0xff) & 0xff, n)
2389 #define free_perturb(p, n) memset (p, perturb_byte & 0xff, n)
2392 /* ------------------- Support for multiple arenas -------------------- */
2393 #include "arena.c"
2396 Debugging support
2398 These routines make a number of assertions about the states
2399 of data structures that should be true at all times. If any
2400 are not true, it's very likely that a user program has somehow
2401 trashed memory. (It's also possible that there is a coding error
2402 in malloc. In which case, please report it!)
2405 #if ! MALLOC_DEBUG
2407 #define check_chunk(A,P)
2408 #define check_free_chunk(A,P)
2409 #define check_inuse_chunk(A,P)
2410 #define check_remalloced_chunk(A,P,N)
2411 #define check_malloced_chunk(A,P,N)
2412 #define check_malloc_state(A)
2414 #else
2416 #define check_chunk(A,P) do_check_chunk(A,P)
2417 #define check_free_chunk(A,P) do_check_free_chunk(A,P)
2418 #define check_inuse_chunk(A,P) do_check_inuse_chunk(A,P)
2419 #define check_remalloced_chunk(A,P,N) do_check_remalloced_chunk(A,P,N)
2420 #define check_malloced_chunk(A,P,N) do_check_malloced_chunk(A,P,N)
2421 #define check_malloc_state(A) do_check_malloc_state(A)
2424 Properties of all chunks
2427 #if __STD_C
2428 static void do_check_chunk(mstate av, mchunkptr p)
2429 #else
2430 static void do_check_chunk(av, p) mstate av; mchunkptr p;
2431 #endif
2433 unsigned long sz = chunksize(p);
2434 /* min and max possible addresses assuming contiguous allocation */
2435 char* max_address = (char*)(av->top) + chunksize(av->top);
2436 char* min_address = max_address - av->system_mem;
2438 if (!chunk_is_mmapped(p)) {
2440 /* Has legal address ... */
2441 if (p != av->top) {
2442 if (contiguous(av)) {
2443 assert(((char*)p) >= min_address);
2444 assert(((char*)p + sz) <= ((char*)(av->top)));
2447 else {
2448 /* top size is always at least MINSIZE */
2449 assert((unsigned long)(sz) >= MINSIZE);
2450 /* top predecessor always marked inuse */
2451 assert(prev_inuse(p));
2455 else {
2456 #if HAVE_MMAP
2457 /* address is outside main heap */
2458 if (contiguous(av) && av->top != initial_top(av)) {
2459 assert(((char*)p) < min_address || ((char*)p) > max_address);
2461 /* chunk is page-aligned */
2462 assert(((p->prev_size + sz) & (mp_.pagesize-1)) == 0);
2463 /* mem is aligned */
2464 assert(aligned_OK(chunk2mem(p)));
2465 #else
2466 /* force an appropriate assert violation if debug set */
2467 assert(!chunk_is_mmapped(p));
2468 #endif
2473 Properties of free chunks
2476 #if __STD_C
2477 static void do_check_free_chunk(mstate av, mchunkptr p)
2478 #else
2479 static void do_check_free_chunk(av, p) mstate av; mchunkptr p;
2480 #endif
2482 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
2483 mchunkptr next = chunk_at_offset(p, sz);
2485 do_check_chunk(av, p);
2487 /* Chunk must claim to be free ... */
2488 assert(!inuse(p));
2489 assert (!chunk_is_mmapped(p));
2491 /* Unless a special marker, must have OK fields */
2492 if ((unsigned long)(sz) >= MINSIZE)
2494 assert((sz & MALLOC_ALIGN_MASK) == 0);
2495 assert(aligned_OK(chunk2mem(p)));
2496 /* ... matching footer field */
2497 assert(next->prev_size == sz);
2498 /* ... and is fully consolidated */
2499 assert(prev_inuse(p));
2500 assert (next == av->top || inuse(next));
2502 /* ... and has minimally sane links */
2503 assert(p->fd->bk == p);
2504 assert(p->bk->fd == p);
2506 else /* markers are always of size SIZE_SZ */
2507 assert(sz == SIZE_SZ);
2511 Properties of inuse chunks
2514 #if __STD_C
2515 static void do_check_inuse_chunk(mstate av, mchunkptr p)
2516 #else
2517 static void do_check_inuse_chunk(av, p) mstate av; mchunkptr p;
2518 #endif
2520 mchunkptr next;
2522 do_check_chunk(av, p);
2524 if (chunk_is_mmapped(p))
2525 return; /* mmapped chunks have no next/prev */
2527 /* Check whether it claims to be in use ... */
2528 assert(inuse(p));
2530 next = next_chunk(p);
2532 /* ... and is surrounded by OK chunks.
2533 Since more things can be checked with free chunks than inuse ones,
2534 if an inuse chunk borders them and debug is on, it's worth doing them.
2536 if (!prev_inuse(p)) {
2537 /* Note that we cannot even look at prev unless it is not inuse */
2538 mchunkptr prv = prev_chunk(p);
2539 assert(next_chunk(prv) == p);
2540 do_check_free_chunk(av, prv);
2543 if (next == av->top) {
2544 assert(prev_inuse(next));
2545 assert(chunksize(next) >= MINSIZE);
2547 else if (!inuse(next))
2548 do_check_free_chunk(av, next);
2552 Properties of chunks recycled from fastbins
2555 #if __STD_C
2556 static void do_check_remalloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2557 #else
2558 static void do_check_remalloced_chunk(av, p, s)
2559 mstate av; mchunkptr p; INTERNAL_SIZE_T s;
2560 #endif
2562 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
2564 if (!chunk_is_mmapped(p)) {
2565 assert(av == arena_for_chunk(p));
2566 if (chunk_non_main_arena(p))
2567 assert(av != &main_arena);
2568 else
2569 assert(av == &main_arena);
2572 do_check_inuse_chunk(av, p);
2574 /* Legal size ... */
2575 assert((sz & MALLOC_ALIGN_MASK) == 0);
2576 assert((unsigned long)(sz) >= MINSIZE);
2577 /* ... and alignment */
2578 assert(aligned_OK(chunk2mem(p)));
2579 /* chunk is less than MINSIZE more than request */
2580 assert((long)(sz) - (long)(s) >= 0);
2581 assert((long)(sz) - (long)(s + MINSIZE) < 0);
2585 Properties of nonrecycled chunks at the point they are malloced
2588 #if __STD_C
2589 static void do_check_malloced_chunk(mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2590 #else
2591 static void do_check_malloced_chunk(av, p, s)
2592 mstate av; mchunkptr p; INTERNAL_SIZE_T s;
2593 #endif
2595 /* same as recycled case ... */
2596 do_check_remalloced_chunk(av, p, s);
2599 ... plus, must obey implementation invariant that prev_inuse is
2600 always true of any allocated chunk; i.e., that each allocated
2601 chunk borders either a previously allocated and still in-use
2602 chunk, or the base of its memory arena. This is ensured
2603 by making all allocations from the the `lowest' part of any found
2604 chunk. This does not necessarily hold however for chunks
2605 recycled via fastbins.
2608 assert(prev_inuse(p));
2613 Properties of malloc_state.
2615 This may be useful for debugging malloc, as well as detecting user
2616 programmer errors that somehow write into malloc_state.
2618 If you are extending or experimenting with this malloc, you can
2619 probably figure out how to hack this routine to print out or
2620 display chunk addresses, sizes, bins, and other instrumentation.
2623 static void do_check_malloc_state(mstate av)
2625 int i;
2626 mchunkptr p;
2627 mchunkptr q;
2628 mbinptr b;
2629 unsigned int binbit;
2630 int empty;
2631 unsigned int idx;
2632 INTERNAL_SIZE_T size;
2633 unsigned long total = 0;
2634 int max_fast_bin;
2636 /* internal size_t must be no wider than pointer type */
2637 assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
2639 /* alignment is a power of 2 */
2640 assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
2642 /* cannot run remaining checks until fully initialized */
2643 if (av->top == 0 || av->top == initial_top(av))
2644 return;
2646 /* pagesize is a power of 2 */
2647 assert((mp_.pagesize & (mp_.pagesize-1)) == 0);
2649 /* A contiguous main_arena is consistent with sbrk_base. */
2650 if (av == &main_arena && contiguous(av))
2651 assert((char*)mp_.sbrk_base + av->system_mem ==
2652 (char*)av->top + chunksize(av->top));
2654 /* properties of fastbins */
2656 /* max_fast is in allowed range */
2657 assert((get_max_fast () & ~1) <= request2size(MAX_FAST_SIZE));
2659 max_fast_bin = fastbin_index(get_max_fast ());
2661 for (i = 0; i < NFASTBINS; ++i) {
2662 p = av->fastbins[i];
2664 /* all bins past max_fast are empty */
2665 if (i > max_fast_bin)
2666 assert(p == 0);
2668 while (p != 0) {
2669 /* each chunk claims to be inuse */
2670 do_check_inuse_chunk(av, p);
2671 total += chunksize(p);
2672 /* chunk belongs in this bin */
2673 assert(fastbin_index(chunksize(p)) == i);
2674 p = p->fd;
2678 if (total != 0)
2679 assert(have_fastchunks(av));
2680 else if (!have_fastchunks(av))
2681 assert(total == 0);
2683 /* check normal bins */
2684 for (i = 1; i < NBINS; ++i) {
2685 b = bin_at(av,i);
2687 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2688 if (i >= 2) {
2689 binbit = get_binmap(av,i);
2690 empty = last(b) == b;
2691 if (!binbit)
2692 assert(empty);
2693 else if (!empty)
2694 assert(binbit);
2697 for (p = last(b); p != b; p = p->bk) {
2698 /* each chunk claims to be free */
2699 do_check_free_chunk(av, p);
2700 size = chunksize(p);
2701 total += size;
2702 if (i >= 2) {
2703 /* chunk belongs in bin */
2704 idx = bin_index(size);
2705 assert(idx == i);
2706 /* lists are sorted */
2707 assert(p->bk == b ||
2708 (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p));
2710 /* chunk is followed by a legal chain of inuse chunks */
2711 for (q = next_chunk(p);
2712 (q != av->top && inuse(q) &&
2713 (unsigned long)(chunksize(q)) >= MINSIZE);
2714 q = next_chunk(q))
2715 do_check_inuse_chunk(av, q);
2719 /* top chunk is OK */
2720 check_chunk(av, av->top);
2722 /* sanity checks for statistics */
2724 #ifdef NO_THREADS
2725 assert(total <= (unsigned long)(mp_.max_total_mem));
2726 assert(mp_.n_mmaps >= 0);
2727 #endif
2728 assert(mp_.n_mmaps <= mp_.n_mmaps_max);
2729 assert(mp_.n_mmaps <= mp_.max_n_mmaps);
2731 assert((unsigned long)(av->system_mem) <=
2732 (unsigned long)(av->max_system_mem));
2734 assert((unsigned long)(mp_.mmapped_mem) <=
2735 (unsigned long)(mp_.max_mmapped_mem));
2737 #ifdef NO_THREADS
2738 assert((unsigned long)(mp_.max_total_mem) >=
2739 (unsigned long)(mp_.mmapped_mem) + (unsigned long)(av->system_mem));
2740 #endif
2742 #endif
2745 /* ----------------- Support for debugging hooks -------------------- */
2746 #include "hooks.c"
2749 /* ----------- Routines dealing with system allocation -------------- */
2752 sysmalloc handles malloc cases requiring more memory from the system.
2753 On entry, it is assumed that av->top does not have enough
2754 space to service request for nb bytes, thus requiring that av->top
2755 be extended or replaced.
2758 #if __STD_C
2759 static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
2760 #else
2761 static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
2762 #endif
2764 mchunkptr old_top; /* incoming value of av->top */
2765 INTERNAL_SIZE_T old_size; /* its size */
2766 char* old_end; /* its end address */
2768 long size; /* arg to first MORECORE or mmap call */
2769 char* brk; /* return value from MORECORE */
2771 long correction; /* arg to 2nd MORECORE call */
2772 char* snd_brk; /* 2nd return val */
2774 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2775 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
2776 char* aligned_brk; /* aligned offset into brk */
2778 mchunkptr p; /* the allocated/returned chunk */
2779 mchunkptr remainder; /* remainder from allocation */
2780 unsigned long remainder_size; /* its size */
2782 unsigned long sum; /* for updating stats */
2784 size_t pagemask = mp_.pagesize - 1;
2787 #if HAVE_MMAP
2790 If have mmap, and the request size meets the mmap threshold, and
2791 the system supports mmap, and there are few enough currently
2792 allocated mmapped regions, try to directly map this request
2793 rather than expanding top.
2796 if ((unsigned long)(nb) >= (unsigned long)(mp_.mmap_threshold) &&
2797 (mp_.n_mmaps < mp_.n_mmaps_max)) {
2799 char* mm; /* return value from mmap call*/
2802 Round up size to nearest page. For mmapped chunks, the overhead
2803 is one SIZE_SZ unit larger than for normal chunks, because there
2804 is no following chunk whose prev_size field could be used.
2806 size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
2808 /* Don't try if size wraps around 0 */
2809 if ((unsigned long)(size) > (unsigned long)(nb)) {
2811 mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
2813 if (mm != MAP_FAILED) {
2816 The offset to the start of the mmapped region is stored
2817 in the prev_size field of the chunk. This allows us to adjust
2818 returned start address to meet alignment requirements here
2819 and in memalign(), and still be able to compute proper
2820 address argument for later munmap in free() and realloc().
2823 front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
2824 if (front_misalign > 0) {
2825 correction = MALLOC_ALIGNMENT - front_misalign;
2826 p = (mchunkptr)(mm + correction);
2827 p->prev_size = correction;
2828 set_head(p, (size - correction) |IS_MMAPPED);
2830 else {
2831 p = (mchunkptr)mm;
2832 set_head(p, size|IS_MMAPPED);
2835 /* update statistics */
2837 if (++mp_.n_mmaps > mp_.max_n_mmaps)
2838 mp_.max_n_mmaps = mp_.n_mmaps;
2840 sum = mp_.mmapped_mem += size;
2841 if (sum > (unsigned long)(mp_.max_mmapped_mem))
2842 mp_.max_mmapped_mem = sum;
2843 #ifdef NO_THREADS
2844 sum += av->system_mem;
2845 if (sum > (unsigned long)(mp_.max_total_mem))
2846 mp_.max_total_mem = sum;
2847 #endif
2849 check_chunk(av, p);
2851 return chunk2mem(p);
2855 #endif
2857 /* Record incoming configuration of top */
2859 old_top = av->top;
2860 old_size = chunksize(old_top);
2861 old_end = (char*)(chunk_at_offset(old_top, old_size));
2863 brk = snd_brk = (char*)(MORECORE_FAILURE);
2866 If not the first time through, we require old_size to be
2867 at least MINSIZE and to have prev_inuse set.
2870 assert((old_top == initial_top(av) && old_size == 0) ||
2871 ((unsigned long) (old_size) >= MINSIZE &&
2872 prev_inuse(old_top) &&
2873 ((unsigned long)old_end & pagemask) == 0));
2875 /* Precondition: not enough current space to satisfy nb request */
2876 assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE));
2878 /* Precondition: all fastbins are consolidated */
2879 assert(!have_fastchunks(av));
2882 if (av != &main_arena) {
2884 heap_info *old_heap, *heap;
2885 size_t old_heap_size;
2887 /* First try to extend the current heap. */
2888 old_heap = heap_for_ptr(old_top);
2889 old_heap_size = old_heap->size;
2890 if (grow_heap(old_heap, MINSIZE + nb - old_size) == 0) {
2891 av->system_mem += old_heap->size - old_heap_size;
2892 arena_mem += old_heap->size - old_heap_size;
2893 #if 0
2894 if(mmapped_mem + arena_mem + sbrked_mem > max_total_mem)
2895 max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
2896 #endif
2897 set_head(old_top, (((char *)old_heap + old_heap->size) - (char *)old_top)
2898 | PREV_INUSE);
2900 else if ((heap = new_heap(nb + (MINSIZE + sizeof(*heap)), mp_.top_pad))) {
2901 /* Use a newly allocated heap. */
2902 heap->ar_ptr = av;
2903 heap->prev = old_heap;
2904 av->system_mem += heap->size;
2905 arena_mem += heap->size;
2906 #if 0
2907 if((unsigned long)(mmapped_mem + arena_mem + sbrked_mem) > max_total_mem)
2908 max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
2909 #endif
2910 /* Set up the new top. */
2911 top(av) = chunk_at_offset(heap, sizeof(*heap));
2912 set_head(top(av), (heap->size - sizeof(*heap)) | PREV_INUSE);
2914 /* Setup fencepost and free the old top chunk. */
2915 /* The fencepost takes at least MINSIZE bytes, because it might
2916 become the top chunk again later. Note that a footer is set
2917 up, too, although the chunk is marked in use. */
2918 old_size -= MINSIZE;
2919 set_head(chunk_at_offset(old_top, old_size + 2*SIZE_SZ), 0|PREV_INUSE);
2920 if (old_size >= MINSIZE) {
2921 set_head(chunk_at_offset(old_top, old_size), (2*SIZE_SZ)|PREV_INUSE);
2922 set_foot(chunk_at_offset(old_top, old_size), (2*SIZE_SZ));
2923 set_head(old_top, old_size|PREV_INUSE|NON_MAIN_ARENA);
2924 _int_free(av, chunk2mem(old_top));
2925 } else {
2926 set_head(old_top, (old_size + 2*SIZE_SZ)|PREV_INUSE);
2927 set_foot(old_top, (old_size + 2*SIZE_SZ));
2931 } else { /* av == main_arena */
2934 /* Request enough space for nb + pad + overhead */
2936 size = nb + mp_.top_pad + MINSIZE;
2939 If contiguous, we can subtract out existing space that we hope to
2940 combine with new space. We add it back later only if
2941 we don't actually get contiguous space.
2944 if (contiguous(av))
2945 size -= old_size;
2948 Round to a multiple of page size.
2949 If MORECORE is not contiguous, this ensures that we only call it
2950 with whole-page arguments. And if MORECORE is contiguous and
2951 this is not first time through, this preserves page-alignment of
2952 previous calls. Otherwise, we correct to page-align below.
2955 size = (size + pagemask) & ~pagemask;
2958 Don't try to call MORECORE if argument is so big as to appear
2959 negative. Note that since mmap takes size_t arg, it may succeed
2960 below even if we cannot call MORECORE.
2963 if (size > 0)
2964 brk = (char*)(MORECORE(size));
2966 if (brk != (char*)(MORECORE_FAILURE)) {
2967 /* Call the `morecore' hook if necessary. */
2968 if (__after_morecore_hook)
2969 (*__after_morecore_hook) ();
2970 } else {
2972 If have mmap, try using it as a backup when MORECORE fails or
2973 cannot be used. This is worth doing on systems that have "holes" in
2974 address space, so sbrk cannot extend to give contiguous space, but
2975 space is available elsewhere. Note that we ignore mmap max count
2976 and threshold limits, since the space will not be used as a
2977 segregated mmap region.
2980 #if HAVE_MMAP
2981 /* Cannot merge with old top, so add its size back in */
2982 if (contiguous(av))
2983 size = (size + old_size + pagemask) & ~pagemask;
2985 /* If we are relying on mmap as backup, then use larger units */
2986 if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE))
2987 size = MMAP_AS_MORECORE_SIZE;
2989 /* Don't try if size wraps around 0 */
2990 if ((unsigned long)(size) > (unsigned long)(nb)) {
2992 char *mbrk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
2994 if (mbrk != MAP_FAILED) {
2996 /* We do not need, and cannot use, another sbrk call to find end */
2997 brk = mbrk;
2998 snd_brk = brk + size;
3001 Record that we no longer have a contiguous sbrk region.
3002 After the first time mmap is used as backup, we do not
3003 ever rely on contiguous space since this could incorrectly
3004 bridge regions.
3006 set_noncontiguous(av);
3009 #endif
3012 if (brk != (char*)(MORECORE_FAILURE)) {
3013 if (mp_.sbrk_base == 0)
3014 mp_.sbrk_base = brk;
3015 av->system_mem += size;
3018 If MORECORE extends previous space, we can likewise extend top size.
3021 if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE))
3022 set_head(old_top, (size + old_size) | PREV_INUSE);
3024 else if (contiguous(av) && old_size && brk < old_end) {
3025 /* Oops! Someone else killed our space.. Can't touch anything. */
3026 assert(0);
3030 Otherwise, make adjustments:
3032 * If the first time through or noncontiguous, we need to call sbrk
3033 just to find out where the end of memory lies.
3035 * We need to ensure that all returned chunks from malloc will meet
3036 MALLOC_ALIGNMENT
3038 * If there was an intervening foreign sbrk, we need to adjust sbrk
3039 request size to account for fact that we will not be able to
3040 combine new space with existing space in old_top.
3042 * Almost all systems internally allocate whole pages at a time, in
3043 which case we might as well use the whole last page of request.
3044 So we allocate enough more memory to hit a page boundary now,
3045 which in turn causes future contiguous calls to page-align.
3048 else {
3049 front_misalign = 0;
3050 end_misalign = 0;
3051 correction = 0;
3052 aligned_brk = brk;
3054 /* handle contiguous cases */
3055 if (contiguous(av)) {
3057 /* Count foreign sbrk as system_mem. */
3058 if (old_size)
3059 av->system_mem += brk - old_end;
3061 /* Guarantee alignment of first new chunk made from this space */
3063 front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
3064 if (front_misalign > 0) {
3067 Skip over some bytes to arrive at an aligned position.
3068 We don't need to specially mark these wasted front bytes.
3069 They will never be accessed anyway because
3070 prev_inuse of av->top (and any chunk created from its start)
3071 is always true after initialization.
3074 correction = MALLOC_ALIGNMENT - front_misalign;
3075 aligned_brk += correction;
3079 If this isn't adjacent to existing space, then we will not
3080 be able to merge with old_top space, so must add to 2nd request.
3083 correction += old_size;
3085 /* Extend the end address to hit a page boundary */
3086 end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
3087 correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
3089 assert(correction >= 0);
3090 snd_brk = (char*)(MORECORE(correction));
3093 If can't allocate correction, try to at least find out current
3094 brk. It might be enough to proceed without failing.
3096 Note that if second sbrk did NOT fail, we assume that space
3097 is contiguous with first sbrk. This is a safe assumption unless
3098 program is multithreaded but doesn't use locks and a foreign sbrk
3099 occurred between our first and second calls.
3102 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3103 correction = 0;
3104 snd_brk = (char*)(MORECORE(0));
3105 } else
3106 /* Call the `morecore' hook if necessary. */
3107 if (__after_morecore_hook)
3108 (*__after_morecore_hook) ();
3111 /* handle non-contiguous cases */
3112 else {
3113 /* MORECORE/mmap must correctly align */
3114 assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
3116 /* Find out current end of memory */
3117 if (snd_brk == (char*)(MORECORE_FAILURE)) {
3118 snd_brk = (char*)(MORECORE(0));
3122 /* Adjust top based on results of second sbrk */
3123 if (snd_brk != (char*)(MORECORE_FAILURE)) {
3124 av->top = (mchunkptr)aligned_brk;
3125 set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
3126 av->system_mem += correction;
3129 If not the first time through, we either have a
3130 gap due to foreign sbrk or a non-contiguous region. Insert a
3131 double fencepost at old_top to prevent consolidation with space
3132 we don't own. These fenceposts are artificial chunks that are
3133 marked as inuse and are in any case too small to use. We need
3134 two to make sizes and alignments work out.
3137 if (old_size != 0) {
3139 Shrink old_top to insert fenceposts, keeping size a
3140 multiple of MALLOC_ALIGNMENT. We know there is at least
3141 enough space in old_top to do this.
3143 old_size = (old_size - 4*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
3144 set_head(old_top, old_size | PREV_INUSE);
3147 Note that the following assignments completely overwrite
3148 old_top when old_size was previously MINSIZE. This is
3149 intentional. We need the fencepost, even if old_top otherwise gets
3150 lost.
3152 chunk_at_offset(old_top, old_size )->size =
3153 (2*SIZE_SZ)|PREV_INUSE;
3155 chunk_at_offset(old_top, old_size + 2*SIZE_SZ)->size =
3156 (2*SIZE_SZ)|PREV_INUSE;
3158 /* If possible, release the rest. */
3159 if (old_size >= MINSIZE) {
3160 _int_free(av, chunk2mem(old_top));
3167 /* Update statistics */
3168 #ifdef NO_THREADS
3169 sum = av->system_mem + mp_.mmapped_mem;
3170 if (sum > (unsigned long)(mp_.max_total_mem))
3171 mp_.max_total_mem = sum;
3172 #endif
3176 } /* if (av != &main_arena) */
3178 if ((unsigned long)av->system_mem > (unsigned long)(av->max_system_mem))
3179 av->max_system_mem = av->system_mem;
3180 check_malloc_state(av);
3182 /* finally, do the allocation */
3183 p = av->top;
3184 size = chunksize(p);
3186 /* check that one of the above allocation paths succeeded */
3187 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
3188 remainder_size = size - nb;
3189 remainder = chunk_at_offset(p, nb);
3190 av->top = remainder;
3191 set_head(p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
3192 set_head(remainder, remainder_size | PREV_INUSE);
3193 check_malloced_chunk(av, p, nb);
3194 return chunk2mem(p);
3197 /* catch all failure paths */
3198 MALLOC_FAILURE_ACTION;
3199 return 0;
3204 sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
3205 to the system (via negative arguments to sbrk) if there is unused
3206 memory at the `high' end of the malloc pool. It is called
3207 automatically by free() when top space exceeds the trim
3208 threshold. It is also called by the public malloc_trim routine. It
3209 returns 1 if it actually released any memory, else 0.
3212 #if __STD_C
3213 static int sYSTRIm(size_t pad, mstate av)
3214 #else
3215 static int sYSTRIm(pad, av) size_t pad; mstate av;
3216 #endif
3218 long top_size; /* Amount of top-most memory */
3219 long extra; /* Amount to release */
3220 long released; /* Amount actually released */
3221 char* current_brk; /* address returned by pre-check sbrk call */
3222 char* new_brk; /* address returned by post-check sbrk call */
3223 size_t pagesz;
3225 pagesz = mp_.pagesize;
3226 top_size = chunksize(av->top);
3228 /* Release in pagesize units, keeping at least one page */
3229 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
3231 if (extra > 0) {
3234 Only proceed if end of memory is where we last set it.
3235 This avoids problems if there were foreign sbrk calls.
3237 current_brk = (char*)(MORECORE(0));
3238 if (current_brk == (char*)(av->top) + top_size) {
3241 Attempt to release memory. We ignore MORECORE return value,
3242 and instead call again to find out where new end of memory is.
3243 This avoids problems if first call releases less than we asked,
3244 of if failure somehow altered brk value. (We could still
3245 encounter problems if it altered brk in some very bad way,
3246 but the only thing we can do is adjust anyway, which will cause
3247 some downstream failure.)
3250 MORECORE(-extra);
3251 /* Call the `morecore' hook if necessary. */
3252 if (__after_morecore_hook)
3253 (*__after_morecore_hook) ();
3254 new_brk = (char*)(MORECORE(0));
3256 if (new_brk != (char*)MORECORE_FAILURE) {
3257 released = (long)(current_brk - new_brk);
3259 if (released != 0) {
3260 /* Success. Adjust top. */
3261 av->system_mem -= released;
3262 set_head(av->top, (top_size - released) | PREV_INUSE);
3263 check_malloc_state(av);
3264 return 1;
3269 return 0;
3272 #ifdef HAVE_MMAP
3274 static void
3275 internal_function
3276 #if __STD_C
3277 munmap_chunk(mchunkptr p)
3278 #else
3279 munmap_chunk(p) mchunkptr p;
3280 #endif
3282 INTERNAL_SIZE_T size = chunksize(p);
3284 assert (chunk_is_mmapped(p));
3285 #if 0
3286 assert(! ((char*)p >= mp_.sbrk_base && (char*)p < mp_.sbrk_base + mp_.sbrked_mem));
3287 assert((mp_.n_mmaps > 0));
3288 #endif
3290 uintptr_t block = (uintptr_t) p - p->prev_size;
3291 size_t total_size = p->prev_size + size;
3292 /* Unfortunately we have to do the compilers job by hand here. Normally
3293 we would test BLOCK and TOTAL-SIZE separately for compliance with the
3294 page size. But gcc does not recognize the optimization possibility
3295 (in the moment at least) so we combine the two values into one before
3296 the bit test. */
3297 if (__builtin_expect (((block | total_size) & (mp_.pagesize - 1)) != 0, 0))
3299 malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
3300 chunk2mem (p));
3301 return;
3304 mp_.n_mmaps--;
3305 mp_.mmapped_mem -= total_size;
3307 int ret __attribute__ ((unused)) = munmap((char *)block, total_size);
3309 /* munmap returns non-zero on failure */
3310 assert(ret == 0);
3313 #if HAVE_MREMAP
3315 static mchunkptr
3316 internal_function
3317 #if __STD_C
3318 mremap_chunk(mchunkptr p, size_t new_size)
3319 #else
3320 mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
3321 #endif
3323 size_t page_mask = mp_.pagesize - 1;
3324 INTERNAL_SIZE_T offset = p->prev_size;
3325 INTERNAL_SIZE_T size = chunksize(p);
3326 char *cp;
3328 assert (chunk_is_mmapped(p));
3329 #if 0
3330 assert(! ((char*)p >= mp_.sbrk_base && (char*)p < mp_.sbrk_base + mp_.sbrked_mem));
3331 assert((mp_.n_mmaps > 0));
3332 #endif
3333 assert(((size + offset) & (mp_.pagesize-1)) == 0);
3335 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
3336 new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
3338 cp = (char *)mremap((char *)p - offset, size + offset, new_size,
3339 MREMAP_MAYMOVE);
3341 if (cp == MAP_FAILED) return 0;
3343 p = (mchunkptr)(cp + offset);
3345 assert(aligned_OK(chunk2mem(p)));
3347 assert((p->prev_size == offset));
3348 set_head(p, (new_size - offset)|IS_MMAPPED);
3350 mp_.mmapped_mem -= size + offset;
3351 mp_.mmapped_mem += new_size;
3352 if ((unsigned long)mp_.mmapped_mem > (unsigned long)mp_.max_mmapped_mem)
3353 mp_.max_mmapped_mem = mp_.mmapped_mem;
3354 #ifdef NO_THREADS
3355 if ((unsigned long)(mp_.mmapped_mem + arena_mem + main_arena.system_mem) >
3356 mp_.max_total_mem)
3357 mp_.max_total_mem = mp_.mmapped_mem + arena_mem + main_arena.system_mem;
3358 #endif
3359 return p;
3362 #endif /* HAVE_MREMAP */
3364 #endif /* HAVE_MMAP */
3366 /*------------------------ Public wrappers. --------------------------------*/
3368 Void_t*
3369 public_mALLOc(size_t bytes)
3371 mstate ar_ptr;
3372 Void_t *victim;
3374 __malloc_ptr_t (*hook) (size_t, __const __malloc_ptr_t) = __malloc_hook;
3375 if (hook != NULL)
3376 return (*hook)(bytes, RETURN_ADDRESS (0));
3378 arena_get(ar_ptr, bytes);
3379 if(!ar_ptr)
3380 return 0;
3381 victim = _int_malloc(ar_ptr, bytes);
3382 if(!victim) {
3383 /* Maybe the failure is due to running out of mmapped areas. */
3384 if(ar_ptr != &main_arena) {
3385 (void)mutex_unlock(&ar_ptr->mutex);
3386 (void)mutex_lock(&main_arena.mutex);
3387 victim = _int_malloc(&main_arena, bytes);
3388 (void)mutex_unlock(&main_arena.mutex);
3389 } else {
3390 #if USE_ARENAS
3391 /* ... or sbrk() has failed and there is still a chance to mmap() */
3392 ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
3393 (void)mutex_unlock(&main_arena.mutex);
3394 if(ar_ptr) {
3395 victim = _int_malloc(ar_ptr, bytes);
3396 (void)mutex_unlock(&ar_ptr->mutex);
3398 #endif
3400 } else
3401 (void)mutex_unlock(&ar_ptr->mutex);
3402 assert(!victim || chunk_is_mmapped(mem2chunk(victim)) ||
3403 ar_ptr == arena_for_chunk(mem2chunk(victim)));
3404 return victim;
3406 #ifdef libc_hidden_def
3407 libc_hidden_def(public_mALLOc)
3408 #endif
3410 void
3411 public_fREe(Void_t* mem)
3413 mstate ar_ptr;
3414 mchunkptr p; /* chunk corresponding to mem */
3416 void (*hook) (__malloc_ptr_t, __const __malloc_ptr_t) = __free_hook;
3417 if (hook != NULL) {
3418 (*hook)(mem, RETURN_ADDRESS (0));
3419 return;
3422 if (mem == 0) /* free(0) has no effect */
3423 return;
3425 p = mem2chunk(mem);
3427 #if HAVE_MMAP
3428 if (chunk_is_mmapped(p)) /* release mmapped memory. */
3430 munmap_chunk(p);
3431 return;
3433 #endif
3435 ar_ptr = arena_for_chunk(p);
3436 #if THREAD_STATS
3437 if(!mutex_trylock(&ar_ptr->mutex))
3438 ++(ar_ptr->stat_lock_direct);
3439 else {
3440 (void)mutex_lock(&ar_ptr->mutex);
3441 ++(ar_ptr->stat_lock_wait);
3443 #else
3444 (void)mutex_lock(&ar_ptr->mutex);
3445 #endif
3446 _int_free(ar_ptr, mem);
3447 (void)mutex_unlock(&ar_ptr->mutex);
3449 #ifdef libc_hidden_def
3450 libc_hidden_def (public_fREe)
3451 #endif
3453 Void_t*
3454 public_rEALLOc(Void_t* oldmem, size_t bytes)
3456 mstate ar_ptr;
3457 INTERNAL_SIZE_T nb; /* padded request size */
3459 mchunkptr oldp; /* chunk corresponding to oldmem */
3460 INTERNAL_SIZE_T oldsize; /* its size */
3462 Void_t* newp; /* chunk to return */
3464 __malloc_ptr_t (*hook) (__malloc_ptr_t, size_t, __const __malloc_ptr_t) =
3465 __realloc_hook;
3466 if (hook != NULL)
3467 return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
3469 #if REALLOC_ZERO_BYTES_FREES
3470 if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; }
3471 #endif
3473 /* realloc of null is supposed to be same as malloc */
3474 if (oldmem == 0) return public_mALLOc(bytes);
3476 oldp = mem2chunk(oldmem);
3477 oldsize = chunksize(oldp);
3479 /* Little security check which won't hurt performance: the
3480 allocator never wrapps around at the end of the address space.
3481 Therefore we can exclude some size values which might appear
3482 here by accident or by "design" from some intruder. */
3483 if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
3484 || __builtin_expect (misaligned_chunk (oldp), 0))
3486 malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
3487 return NULL;
3490 checked_request2size(bytes, nb);
3492 #if HAVE_MMAP
3493 if (chunk_is_mmapped(oldp))
3495 Void_t* newmem;
3497 #if HAVE_MREMAP
3498 newp = mremap_chunk(oldp, nb);
3499 if(newp) return chunk2mem(newp);
3500 #endif
3501 /* Note the extra SIZE_SZ overhead. */
3502 if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
3503 /* Must alloc, copy, free. */
3504 newmem = public_mALLOc(bytes);
3505 if (newmem == 0) return 0; /* propagate failure */
3506 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
3507 munmap_chunk(oldp);
3508 return newmem;
3510 #endif
3512 ar_ptr = arena_for_chunk(oldp);
3513 #if THREAD_STATS
3514 if(!mutex_trylock(&ar_ptr->mutex))
3515 ++(ar_ptr->stat_lock_direct);
3516 else {
3517 (void)mutex_lock(&ar_ptr->mutex);
3518 ++(ar_ptr->stat_lock_wait);
3520 #else
3521 (void)mutex_lock(&ar_ptr->mutex);
3522 #endif
3524 #ifndef NO_THREADS
3525 /* As in malloc(), remember this arena for the next allocation. */
3526 tsd_setspecific(arena_key, (Void_t *)ar_ptr);
3527 #endif
3529 newp = _int_realloc(ar_ptr, oldmem, bytes);
3531 (void)mutex_unlock(&ar_ptr->mutex);
3532 assert(!newp || chunk_is_mmapped(mem2chunk(newp)) ||
3533 ar_ptr == arena_for_chunk(mem2chunk(newp)));
3534 return newp;
3536 #ifdef libc_hidden_def
3537 libc_hidden_def (public_rEALLOc)
3538 #endif
3540 Void_t*
3541 public_mEMALIGn(size_t alignment, size_t bytes)
3543 mstate ar_ptr;
3544 Void_t *p;
3546 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3547 __const __malloc_ptr_t)) =
3548 __memalign_hook;
3549 if (hook != NULL)
3550 return (*hook)(alignment, bytes, RETURN_ADDRESS (0));
3552 /* If need less alignment than we give anyway, just relay to malloc */
3553 if (alignment <= MALLOC_ALIGNMENT) return public_mALLOc(bytes);
3555 /* Otherwise, ensure that it is at least a minimum chunk size */
3556 if (alignment < MINSIZE) alignment = MINSIZE;
3558 arena_get(ar_ptr, bytes + alignment + MINSIZE);
3559 if(!ar_ptr)
3560 return 0;
3561 p = _int_memalign(ar_ptr, alignment, bytes);
3562 (void)mutex_unlock(&ar_ptr->mutex);
3563 if(!p) {
3564 /* Maybe the failure is due to running out of mmapped areas. */
3565 if(ar_ptr != &main_arena) {
3566 (void)mutex_lock(&main_arena.mutex);
3567 p = _int_memalign(&main_arena, alignment, bytes);
3568 (void)mutex_unlock(&main_arena.mutex);
3569 } else {
3570 #if USE_ARENAS
3571 /* ... or sbrk() has failed and there is still a chance to mmap() */
3572 ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, bytes);
3573 if(ar_ptr) {
3574 p = _int_memalign(ar_ptr, alignment, bytes);
3575 (void)mutex_unlock(&ar_ptr->mutex);
3577 #endif
3580 assert(!p || chunk_is_mmapped(mem2chunk(p)) ||
3581 ar_ptr == arena_for_chunk(mem2chunk(p)));
3582 return p;
3584 #ifdef libc_hidden_def
3585 libc_hidden_def (public_mEMALIGn)
3586 #endif
3588 Void_t*
3589 public_vALLOc(size_t bytes)
3591 mstate ar_ptr;
3592 Void_t *p;
3594 if(__malloc_initialized < 0)
3595 ptmalloc_init ();
3597 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3598 __const __malloc_ptr_t)) =
3599 __memalign_hook;
3600 if (hook != NULL)
3601 return (*hook)(mp_.pagesize, bytes, RETURN_ADDRESS (0));
3603 arena_get(ar_ptr, bytes + mp_.pagesize + MINSIZE);
3604 if(!ar_ptr)
3605 return 0;
3606 p = _int_valloc(ar_ptr, bytes);
3607 (void)mutex_unlock(&ar_ptr->mutex);
3608 return p;
3611 Void_t*
3612 public_pVALLOc(size_t bytes)
3614 mstate ar_ptr;
3615 Void_t *p;
3617 if(__malloc_initialized < 0)
3618 ptmalloc_init ();
3620 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
3621 __const __malloc_ptr_t)) =
3622 __memalign_hook;
3623 if (hook != NULL)
3624 return (*hook)(mp_.pagesize,
3625 (bytes + mp_.pagesize - 1) & ~(mp_.pagesize - 1),
3626 RETURN_ADDRESS (0));
3628 arena_get(ar_ptr, bytes + 2*mp_.pagesize + MINSIZE);
3629 p = _int_pvalloc(ar_ptr, bytes);
3630 (void)mutex_unlock(&ar_ptr->mutex);
3631 return p;
3634 Void_t*
3635 public_cALLOc(size_t n, size_t elem_size)
3637 mstate av;
3638 mchunkptr oldtop, p;
3639 INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
3640 Void_t* mem;
3641 unsigned long clearsize;
3642 unsigned long nclears;
3643 INTERNAL_SIZE_T* d;
3644 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, __const __malloc_ptr_t)) =
3645 __malloc_hook;
3647 /* size_t is unsigned so the behavior on overflow is defined. */
3648 bytes = n * elem_size;
3649 #define HALF_INTERNAL_SIZE_T \
3650 (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
3651 if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0)) {
3652 if (elem_size != 0 && bytes / elem_size != n) {
3653 MALLOC_FAILURE_ACTION;
3654 return 0;
3658 if (hook != NULL) {
3659 sz = bytes;
3660 mem = (*hook)(sz, RETURN_ADDRESS (0));
3661 if(mem == 0)
3662 return 0;
3663 #ifdef HAVE_MEMCPY
3664 return memset(mem, 0, sz);
3665 #else
3666 while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */
3667 return mem;
3668 #endif
3671 sz = bytes;
3673 arena_get(av, sz);
3674 if(!av)
3675 return 0;
3677 /* Check if we hand out the top chunk, in which case there may be no
3678 need to clear. */
3679 #if MORECORE_CLEARS
3680 oldtop = top(av);
3681 oldtopsize = chunksize(top(av));
3682 #if MORECORE_CLEARS < 2
3683 /* Only newly allocated memory is guaranteed to be cleared. */
3684 if (av == &main_arena &&
3685 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *)oldtop)
3686 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *)oldtop);
3687 #endif
3688 #endif
3689 mem = _int_malloc(av, sz);
3691 /* Only clearing follows, so we can unlock early. */
3692 (void)mutex_unlock(&av->mutex);
3694 assert(!mem || chunk_is_mmapped(mem2chunk(mem)) ||
3695 av == arena_for_chunk(mem2chunk(mem)));
3697 if (mem == 0) {
3698 /* Maybe the failure is due to running out of mmapped areas. */
3699 if(av != &main_arena) {
3700 (void)mutex_lock(&main_arena.mutex);
3701 mem = _int_malloc(&main_arena, sz);
3702 (void)mutex_unlock(&main_arena.mutex);
3703 } else {
3704 #if USE_ARENAS
3705 /* ... or sbrk() has failed and there is still a chance to mmap() */
3706 (void)mutex_lock(&main_arena.mutex);
3707 av = arena_get2(av->next ? av : 0, sz);
3708 (void)mutex_unlock(&main_arena.mutex);
3709 if(av) {
3710 mem = _int_malloc(av, sz);
3711 (void)mutex_unlock(&av->mutex);
3713 #endif
3715 if (mem == 0) return 0;
3717 p = mem2chunk(mem);
3719 /* Two optional cases in which clearing not necessary */
3720 #if HAVE_MMAP
3721 if (chunk_is_mmapped (p))
3723 if (__builtin_expect (perturb_byte, 0))
3724 MALLOC_ZERO (mem, sz);
3725 return mem;
3727 #endif
3729 csz = chunksize(p);
3731 #if MORECORE_CLEARS
3732 if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize)) {
3733 /* clear only the bytes from non-freshly-sbrked memory */
3734 csz = oldtopsize;
3736 #endif
3738 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3739 contents have an odd number of INTERNAL_SIZE_T-sized words;
3740 minimally 3. */
3741 d = (INTERNAL_SIZE_T*)mem;
3742 clearsize = csz - SIZE_SZ;
3743 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
3744 assert(nclears >= 3);
3746 if (nclears > 9)
3747 MALLOC_ZERO(d, clearsize);
3749 else {
3750 *(d+0) = 0;
3751 *(d+1) = 0;
3752 *(d+2) = 0;
3753 if (nclears > 4) {
3754 *(d+3) = 0;
3755 *(d+4) = 0;
3756 if (nclears > 6) {
3757 *(d+5) = 0;
3758 *(d+6) = 0;
3759 if (nclears > 8) {
3760 *(d+7) = 0;
3761 *(d+8) = 0;
3767 return mem;
3770 #ifndef _LIBC
3772 Void_t**
3773 public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks)
3775 mstate ar_ptr;
3776 Void_t** m;
3778 arena_get(ar_ptr, n*elem_size);
3779 if(!ar_ptr)
3780 return 0;
3782 m = _int_icalloc(ar_ptr, n, elem_size, chunks);
3783 (void)mutex_unlock(&ar_ptr->mutex);
3784 return m;
3787 Void_t**
3788 public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks)
3790 mstate ar_ptr;
3791 Void_t** m;
3793 arena_get(ar_ptr, 0);
3794 if(!ar_ptr)
3795 return 0;
3797 m = _int_icomalloc(ar_ptr, n, sizes, chunks);
3798 (void)mutex_unlock(&ar_ptr->mutex);
3799 return m;
3802 void
3803 public_cFREe(Void_t* m)
3805 public_fREe(m);
3808 #endif /* _LIBC */
3811 public_mTRIm(size_t s)
3813 int result;
3815 if(__malloc_initialized < 0)
3816 ptmalloc_init ();
3817 (void)mutex_lock(&main_arena.mutex);
3818 result = mTRIm(s);
3819 (void)mutex_unlock(&main_arena.mutex);
3820 return result;
3823 size_t
3824 public_mUSABLe(Void_t* m)
3826 size_t result;
3828 result = mUSABLe(m);
3829 return result;
3832 void
3833 public_mSTATs()
3835 mSTATs();
3838 struct mallinfo public_mALLINFo()
3840 struct mallinfo m;
3842 if(__malloc_initialized < 0)
3843 ptmalloc_init ();
3844 (void)mutex_lock(&main_arena.mutex);
3845 m = mALLINFo(&main_arena);
3846 (void)mutex_unlock(&main_arena.mutex);
3847 return m;
3851 public_mALLOPt(int p, int v)
3853 int result;
3854 result = mALLOPt(p, v);
3855 return result;
3859 ------------------------------ malloc ------------------------------
3862 Void_t*
3863 _int_malloc(mstate av, size_t bytes)
3865 INTERNAL_SIZE_T nb; /* normalized request size */
3866 unsigned int idx; /* associated bin index */
3867 mbinptr bin; /* associated bin */
3868 mfastbinptr* fb; /* associated fastbin */
3870 mchunkptr victim; /* inspected/selected chunk */
3871 INTERNAL_SIZE_T size; /* its size */
3872 int victim_index; /* its bin index */
3874 mchunkptr remainder; /* remainder from a split */
3875 unsigned long remainder_size; /* its size */
3877 unsigned int block; /* bit map traverser */
3878 unsigned int bit; /* bit map traverser */
3879 unsigned int map; /* current word of binmap */
3881 mchunkptr fwd; /* misc temp for linking */
3882 mchunkptr bck; /* misc temp for linking */
3885 Convert request size to internal form by adding SIZE_SZ bytes
3886 overhead plus possibly more to obtain necessary alignment and/or
3887 to obtain a size of at least MINSIZE, the smallest allocatable
3888 size. Also, checked_request2size traps (returning 0) request sizes
3889 that are so large that they wrap around zero when padded and
3890 aligned.
3893 checked_request2size(bytes, nb);
3896 If the size qualifies as a fastbin, first check corresponding bin.
3897 This code is safe to execute even if av is not yet initialized, so we
3898 can try it without checking, which saves some time on this fast path.
3901 if ((unsigned long)(nb) <= (unsigned long)(get_max_fast ())) {
3902 long int idx = fastbin_index(nb);
3903 fb = &(av->fastbins[idx]);
3904 if ( (victim = *fb) != 0) {
3905 if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
3906 malloc_printerr (check_action, "malloc(): memory corruption (fast)",
3907 chunk2mem (victim));
3908 *fb = victim->fd;
3909 check_remalloced_chunk(av, victim, nb);
3910 void *p = chunk2mem(victim);
3911 if (__builtin_expect (perturb_byte, 0))
3912 alloc_perturb (p, bytes);
3913 return p;
3918 If a small request, check regular bin. Since these "smallbins"
3919 hold one size each, no searching within bins is necessary.
3920 (For a large request, we need to wait until unsorted chunks are
3921 processed to find best fit. But for small ones, fits are exact
3922 anyway, so we can check now, which is faster.)
3925 if (in_smallbin_range(nb)) {
3926 idx = smallbin_index(nb);
3927 bin = bin_at(av,idx);
3929 if ( (victim = last(bin)) != bin) {
3930 if (victim == 0) /* initialization check */
3931 malloc_consolidate(av);
3932 else {
3933 bck = victim->bk;
3934 set_inuse_bit_at_offset(victim, nb);
3935 bin->bk = bck;
3936 bck->fd = bin;
3938 if (av != &main_arena)
3939 victim->size |= NON_MAIN_ARENA;
3940 check_malloced_chunk(av, victim, nb);
3941 void *p = chunk2mem(victim);
3942 if (__builtin_expect (perturb_byte, 0))
3943 alloc_perturb (p, bytes);
3944 return p;
3950 If this is a large request, consolidate fastbins before continuing.
3951 While it might look excessive to kill all fastbins before
3952 even seeing if there is space available, this avoids
3953 fragmentation problems normally associated with fastbins.
3954 Also, in practice, programs tend to have runs of either small or
3955 large requests, but less often mixtures, so consolidation is not
3956 invoked all that often in most programs. And the programs that
3957 it is called frequently in otherwise tend to fragment.
3960 else {
3961 idx = largebin_index(nb);
3962 if (have_fastchunks(av))
3963 malloc_consolidate(av);
3967 Process recently freed or remaindered chunks, taking one only if
3968 it is exact fit, or, if this a small request, the chunk is remainder from
3969 the most recent non-exact fit. Place other traversed chunks in
3970 bins. Note that this step is the only place in any routine where
3971 chunks are placed in bins.
3973 The outer loop here is needed because we might not realize until
3974 near the end of malloc that we should have consolidated, so must
3975 do so and retry. This happens at most once, and only when we would
3976 otherwise need to expand memory to service a "small" request.
3979 for(;;) {
3981 while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
3982 bck = victim->bk;
3983 if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)
3984 || __builtin_expect (victim->size > av->system_mem, 0))
3985 malloc_printerr (check_action, "malloc(): memory corruption",
3986 chunk2mem (victim));
3987 size = chunksize(victim);
3990 If a small request, try to use last remainder if it is the
3991 only chunk in unsorted bin. This helps promote locality for
3992 runs of consecutive small requests. This is the only
3993 exception to best-fit, and applies only when there is
3994 no exact fit for a small chunk.
3997 if (in_smallbin_range(nb) &&
3998 bck == unsorted_chunks(av) &&
3999 victim == av->last_remainder &&
4000 (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
4002 /* split and reattach remainder */
4003 remainder_size = size - nb;
4004 remainder = chunk_at_offset(victim, nb);
4005 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4006 av->last_remainder = remainder;
4007 remainder->bk = remainder->fd = unsorted_chunks(av);
4009 set_head(victim, nb | PREV_INUSE |
4010 (av != &main_arena ? NON_MAIN_ARENA : 0));
4011 set_head(remainder, remainder_size | PREV_INUSE);
4012 set_foot(remainder, remainder_size);
4014 check_malloced_chunk(av, victim, nb);
4015 void *p = chunk2mem(victim);
4016 if (__builtin_expect (perturb_byte, 0))
4017 alloc_perturb (p, bytes);
4018 return p;
4021 /* remove from unsorted list */
4022 unsorted_chunks(av)->bk = bck;
4023 bck->fd = unsorted_chunks(av);
4025 /* Take now instead of binning if exact fit */
4027 if (size == nb) {
4028 set_inuse_bit_at_offset(victim, size);
4029 if (av != &main_arena)
4030 victim->size |= NON_MAIN_ARENA;
4031 check_malloced_chunk(av, victim, nb);
4032 void *p = chunk2mem(victim);
4033 if (__builtin_expect (perturb_byte, 0))
4034 alloc_perturb (p, bytes);
4035 return p;
4038 /* place chunk in bin */
4040 if (in_smallbin_range(size)) {
4041 victim_index = smallbin_index(size);
4042 bck = bin_at(av, victim_index);
4043 fwd = bck->fd;
4045 else {
4046 victim_index = largebin_index(size);
4047 bck = bin_at(av, victim_index);
4048 fwd = bck->fd;
4050 /* maintain large bins in sorted order */
4051 if (fwd != bck) {
4052 /* Or with inuse bit to speed comparisons */
4053 size |= PREV_INUSE;
4054 /* if smaller than smallest, bypass loop below */
4055 assert((bck->bk->size & NON_MAIN_ARENA) == 0);
4056 if ((unsigned long)(size) <= (unsigned long)(bck->bk->size)) {
4057 fwd = bck;
4058 bck = bck->bk;
4060 else {
4061 assert((fwd->size & NON_MAIN_ARENA) == 0);
4062 while ((unsigned long)(size) < (unsigned long)(fwd->size)) {
4063 fwd = fwd->fd;
4064 assert((fwd->size & NON_MAIN_ARENA) == 0);
4066 bck = fwd->bk;
4071 mark_bin(av, victim_index);
4072 victim->bk = bck;
4073 victim->fd = fwd;
4074 fwd->bk = victim;
4075 bck->fd = victim;
4079 If a large request, scan through the chunks of current bin in
4080 sorted order to find smallest that fits. This is the only step
4081 where an unbounded number of chunks might be scanned without doing
4082 anything useful with them. However the lists tend to be short.
4085 if (!in_smallbin_range(nb)) {
4086 bin = bin_at(av, idx);
4088 /* skip scan if empty or largest chunk is too small */
4089 if ((victim = last(bin)) != bin &&
4090 (unsigned long)(first(bin)->size) >= (unsigned long)(nb)) {
4092 while (((unsigned long)(size = chunksize(victim)) <
4093 (unsigned long)(nb)))
4094 victim = victim->bk;
4096 remainder_size = size - nb;
4097 unlink(victim, bck, fwd);
4099 /* Exhaust */
4100 if (remainder_size < MINSIZE) {
4101 set_inuse_bit_at_offset(victim, size);
4102 if (av != &main_arena)
4103 victim->size |= NON_MAIN_ARENA;
4105 /* Split */
4106 else {
4107 remainder = chunk_at_offset(victim, nb);
4108 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4109 remainder->bk = remainder->fd = unsorted_chunks(av);
4110 set_head(victim, nb | PREV_INUSE |
4111 (av != &main_arena ? NON_MAIN_ARENA : 0));
4112 set_head(remainder, remainder_size | PREV_INUSE);
4113 set_foot(remainder, remainder_size);
4115 check_malloced_chunk(av, victim, nb);
4116 void *p = chunk2mem(victim);
4117 if (__builtin_expect (perturb_byte, 0))
4118 alloc_perturb (p, bytes);
4119 return p;
4124 Search for a chunk by scanning bins, starting with next largest
4125 bin. This search is strictly by best-fit; i.e., the smallest
4126 (with ties going to approximately the least recently used) chunk
4127 that fits is selected.
4129 The bitmap avoids needing to check that most blocks are nonempty.
4130 The particular case of skipping all bins during warm-up phases
4131 when no chunks have been returned yet is faster than it might look.
4134 ++idx;
4135 bin = bin_at(av,idx);
4136 block = idx2block(idx);
4137 map = av->binmap[block];
4138 bit = idx2bit(idx);
4140 for (;;) {
4142 /* Skip rest of block if there are no more set bits in this block. */
4143 if (bit > map || bit == 0) {
4144 do {
4145 if (++block >= BINMAPSIZE) /* out of bins */
4146 goto use_top;
4147 } while ( (map = av->binmap[block]) == 0);
4149 bin = bin_at(av, (block << BINMAPSHIFT));
4150 bit = 1;
4153 /* Advance to bin with set bit. There must be one. */
4154 while ((bit & map) == 0) {
4155 bin = next_bin(bin);
4156 bit <<= 1;
4157 assert(bit != 0);
4160 /* Inspect the bin. It is likely to be non-empty */
4161 victim = last(bin);
4163 /* If a false alarm (empty bin), clear the bit. */
4164 if (victim == bin) {
4165 av->binmap[block] = map &= ~bit; /* Write through */
4166 bin = next_bin(bin);
4167 bit <<= 1;
4170 else {
4171 size = chunksize(victim);
4173 /* We know the first chunk in this bin is big enough to use. */
4174 assert((unsigned long)(size) >= (unsigned long)(nb));
4176 remainder_size = size - nb;
4178 /* unlink */
4179 bck = victim->bk;
4180 bin->bk = bck;
4181 bck->fd = bin;
4183 /* Exhaust */
4184 if (remainder_size < MINSIZE) {
4185 set_inuse_bit_at_offset(victim, size);
4186 if (av != &main_arena)
4187 victim->size |= NON_MAIN_ARENA;
4190 /* Split */
4191 else {
4192 remainder = chunk_at_offset(victim, nb);
4194 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
4195 remainder->bk = remainder->fd = unsorted_chunks(av);
4196 /* advertise as last remainder */
4197 if (in_smallbin_range(nb))
4198 av->last_remainder = remainder;
4200 set_head(victim, nb | PREV_INUSE |
4201 (av != &main_arena ? NON_MAIN_ARENA : 0));
4202 set_head(remainder, remainder_size | PREV_INUSE);
4203 set_foot(remainder, remainder_size);
4205 check_malloced_chunk(av, victim, nb);
4206 void *p = chunk2mem(victim);
4207 if (__builtin_expect (perturb_byte, 0))
4208 alloc_perturb (p, bytes);
4209 return p;
4213 use_top:
4215 If large enough, split off the chunk bordering the end of memory
4216 (held in av->top). Note that this is in accord with the best-fit
4217 search rule. In effect, av->top is treated as larger (and thus
4218 less well fitting) than any other available chunk since it can
4219 be extended to be as large as necessary (up to system
4220 limitations).
4222 We require that av->top always exists (i.e., has size >=
4223 MINSIZE) after initialization, so if it would otherwise be
4224 exhuasted by current request, it is replenished. (The main
4225 reason for ensuring it exists is that we may need MINSIZE space
4226 to put in fenceposts in sysmalloc.)
4229 victim = av->top;
4230 size = chunksize(victim);
4232 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
4233 remainder_size = size - nb;
4234 remainder = chunk_at_offset(victim, nb);
4235 av->top = remainder;
4236 set_head(victim, nb | PREV_INUSE |
4237 (av != &main_arena ? NON_MAIN_ARENA : 0));
4238 set_head(remainder, remainder_size | PREV_INUSE);
4240 check_malloced_chunk(av, victim, nb);
4241 void *p = chunk2mem(victim);
4242 if (__builtin_expect (perturb_byte, 0))
4243 alloc_perturb (p, bytes);
4244 return p;
4248 If there is space available in fastbins, consolidate and retry,
4249 to possibly avoid expanding memory. This can occur only if nb is
4250 in smallbin range so we didn't consolidate upon entry.
4253 else if (have_fastchunks(av)) {
4254 assert(in_smallbin_range(nb));
4255 malloc_consolidate(av);
4256 idx = smallbin_index(nb); /* restore original bin index */
4260 Otherwise, relay to handle system-dependent cases
4262 else {
4263 void *p = sYSMALLOc(nb, av);
4264 if (__builtin_expect (perturb_byte, 0))
4265 alloc_perturb (p, bytes);
4266 return p;
4272 ------------------------------ free ------------------------------
4275 void
4276 _int_free(mstate av, Void_t* mem)
4278 mchunkptr p; /* chunk corresponding to mem */
4279 INTERNAL_SIZE_T size; /* its size */
4280 mfastbinptr* fb; /* associated fastbin */
4281 mchunkptr nextchunk; /* next contiguous chunk */
4282 INTERNAL_SIZE_T nextsize; /* its size */
4283 int nextinuse; /* true if nextchunk is used */
4284 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
4285 mchunkptr bck; /* misc temp for linking */
4286 mchunkptr fwd; /* misc temp for linking */
4288 const char *errstr = NULL;
4290 p = mem2chunk(mem);
4291 size = chunksize(p);
4293 /* Little security check which won't hurt performance: the
4294 allocator never wrapps around at the end of the address space.
4295 Therefore we can exclude some size values which might appear
4296 here by accident or by "design" from some intruder. */
4297 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
4298 || __builtin_expect (misaligned_chunk (p), 0))
4300 errstr = "free(): invalid pointer";
4301 errout:
4302 malloc_printerr (check_action, errstr, mem);
4303 return;
4305 /* We know that each chunk is at least MINSIZE bytes in size. */
4306 if (__builtin_expect (size < MINSIZE, 0))
4308 errstr = "free(): invalid size";
4309 goto errout;
4312 check_inuse_chunk(av, p);
4315 If eligible, place chunk on a fastbin so it can be found
4316 and used quickly in malloc.
4319 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
4321 #if TRIM_FASTBINS
4323 If TRIM_FASTBINS set, don't place chunks
4324 bordering top into fastbins
4326 && (chunk_at_offset(p, size) != av->top)
4327 #endif
4330 if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
4331 || __builtin_expect (chunksize (chunk_at_offset (p, size))
4332 >= av->system_mem, 0))
4334 errstr = "free(): invalid next size (fast)";
4335 goto errout;
4338 set_fastchunks(av);
4339 fb = &(av->fastbins[fastbin_index(size)]);
4340 /* Another simple check: make sure the top of the bin is not the
4341 record we are going to add (i.e., double free). */
4342 if (__builtin_expect (*fb == p, 0))
4344 errstr = "double free or corruption (fasttop)";
4345 goto errout;
4348 if (__builtin_expect (perturb_byte, 0))
4349 free_perturb (mem, size - SIZE_SZ);
4351 p->fd = *fb;
4352 *fb = p;
4356 Consolidate other non-mmapped chunks as they arrive.
4359 else if (!chunk_is_mmapped(p)) {
4360 nextchunk = chunk_at_offset(p, size);
4362 /* Lightweight tests: check whether the block is already the
4363 top block. */
4364 if (__builtin_expect (p == av->top, 0))
4366 errstr = "double free or corruption (top)";
4367 goto errout;
4369 /* Or whether the next chunk is beyond the boundaries of the arena. */
4370 if (__builtin_expect (contiguous (av)
4371 && (char *) nextchunk
4372 >= ((char *) av->top + chunksize(av->top)), 0))
4374 errstr = "double free or corruption (out)";
4375 goto errout;
4377 /* Or whether the block is actually not marked used. */
4378 if (__builtin_expect (!prev_inuse(nextchunk), 0))
4380 errstr = "double free or corruption (!prev)";
4381 goto errout;
4384 nextsize = chunksize(nextchunk);
4385 if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
4386 || __builtin_expect (nextsize >= av->system_mem, 0))
4388 errstr = "free(): invalid next size (normal)";
4389 goto errout;
4392 if (__builtin_expect (perturb_byte, 0))
4393 free_perturb (mem, size - SIZE_SZ);
4395 /* consolidate backward */
4396 if (!prev_inuse(p)) {
4397 prevsize = p->prev_size;
4398 size += prevsize;
4399 p = chunk_at_offset(p, -((long) prevsize));
4400 unlink(p, bck, fwd);
4403 if (nextchunk != av->top) {
4404 /* get and clear inuse bit */
4405 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4407 /* consolidate forward */
4408 if (!nextinuse) {
4409 unlink(nextchunk, bck, fwd);
4410 size += nextsize;
4411 } else
4412 clear_inuse_bit_at_offset(nextchunk, 0);
4415 Place the chunk in unsorted chunk list. Chunks are
4416 not placed into regular bins until after they have
4417 been given one chance to be used in malloc.
4420 bck = unsorted_chunks(av);
4421 fwd = bck->fd;
4422 p->bk = bck;
4423 p->fd = fwd;
4424 bck->fd = p;
4425 fwd->bk = p;
4427 set_head(p, size | PREV_INUSE);
4428 set_foot(p, size);
4430 check_free_chunk(av, p);
4434 If the chunk borders the current high end of memory,
4435 consolidate into top
4438 else {
4439 size += nextsize;
4440 set_head(p, size | PREV_INUSE);
4441 av->top = p;
4442 check_chunk(av, p);
4446 If freeing a large space, consolidate possibly-surrounding
4447 chunks. Then, if the total unused topmost memory exceeds trim
4448 threshold, ask malloc_trim to reduce top.
4450 Unless max_fast is 0, we don't know if there are fastbins
4451 bordering top, so we cannot tell for sure whether threshold
4452 has been reached unless fastbins are consolidated. But we
4453 don't want to consolidate on each free. As a compromise,
4454 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4455 is reached.
4458 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
4459 if (have_fastchunks(av))
4460 malloc_consolidate(av);
4462 if (av == &main_arena) {
4463 #ifndef MORECORE_CANNOT_TRIM
4464 if ((unsigned long)(chunksize(av->top)) >=
4465 (unsigned long)(mp_.trim_threshold))
4466 sYSTRIm(mp_.top_pad, av);
4467 #endif
4468 } else {
4469 /* Always try heap_trim(), even if the top chunk is not
4470 large, because the corresponding heap might go away. */
4471 heap_info *heap = heap_for_ptr(top(av));
4473 assert(heap->ar_ptr == av);
4474 heap_trim(heap, mp_.top_pad);
4480 If the chunk was allocated via mmap, release via munmap(). Note
4481 that if HAVE_MMAP is false but chunk_is_mmapped is true, then
4482 user must have overwritten memory. There's nothing we can do to
4483 catch this error unless MALLOC_DEBUG is set, in which case
4484 check_inuse_chunk (above) will have triggered error.
4487 else {
4488 #if HAVE_MMAP
4489 munmap_chunk (p);
4490 #endif
4495 ------------------------- malloc_consolidate -------------------------
4497 malloc_consolidate is a specialized version of free() that tears
4498 down chunks held in fastbins. Free itself cannot be used for this
4499 purpose since, among other things, it might place chunks back onto
4500 fastbins. So, instead, we need to use a minor variant of the same
4501 code.
4503 Also, because this routine needs to be called the first time through
4504 malloc anyway, it turns out to be the perfect place to trigger
4505 initialization code.
4508 #if __STD_C
4509 static void malloc_consolidate(mstate av)
4510 #else
4511 static void malloc_consolidate(av) mstate av;
4512 #endif
4514 mfastbinptr* fb; /* current fastbin being consolidated */
4515 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4516 mchunkptr p; /* current chunk being consolidated */
4517 mchunkptr nextp; /* next chunk to consolidate */
4518 mchunkptr unsorted_bin; /* bin header */
4519 mchunkptr first_unsorted; /* chunk to link to */
4521 /* These have same use as in free() */
4522 mchunkptr nextchunk;
4523 INTERNAL_SIZE_T size;
4524 INTERNAL_SIZE_T nextsize;
4525 INTERNAL_SIZE_T prevsize;
4526 int nextinuse;
4527 mchunkptr bck;
4528 mchunkptr fwd;
4531 If max_fast is 0, we know that av hasn't
4532 yet been initialized, in which case do so below
4535 if (get_max_fast () != 0) {
4536 clear_fastchunks(av);
4538 unsorted_bin = unsorted_chunks(av);
4541 Remove each chunk from fast bin and consolidate it, placing it
4542 then in unsorted bin. Among other reasons for doing this,
4543 placing in unsorted bin avoids needing to calculate actual bins
4544 until malloc is sure that chunks aren't immediately going to be
4545 reused anyway.
4548 maxfb = &(av->fastbins[fastbin_index(get_max_fast ())]);
4549 fb = &(av->fastbins[0]);
4550 do {
4551 if ( (p = *fb) != 0) {
4552 *fb = 0;
4554 do {
4555 check_inuse_chunk(av, p);
4556 nextp = p->fd;
4558 /* Slightly streamlined version of consolidation code in free() */
4559 size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
4560 nextchunk = chunk_at_offset(p, size);
4561 nextsize = chunksize(nextchunk);
4563 if (!prev_inuse(p)) {
4564 prevsize = p->prev_size;
4565 size += prevsize;
4566 p = chunk_at_offset(p, -((long) prevsize));
4567 unlink(p, bck, fwd);
4570 if (nextchunk != av->top) {
4571 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4573 if (!nextinuse) {
4574 size += nextsize;
4575 unlink(nextchunk, bck, fwd);
4576 } else
4577 clear_inuse_bit_at_offset(nextchunk, 0);
4579 first_unsorted = unsorted_bin->fd;
4580 unsorted_bin->fd = p;
4581 first_unsorted->bk = p;
4583 set_head(p, size | PREV_INUSE);
4584 p->bk = unsorted_bin;
4585 p->fd = first_unsorted;
4586 set_foot(p, size);
4589 else {
4590 size += nextsize;
4591 set_head(p, size | PREV_INUSE);
4592 av->top = p;
4595 } while ( (p = nextp) != 0);
4598 } while (fb++ != maxfb);
4600 else {
4601 malloc_init_state(av);
4602 check_malloc_state(av);
4607 ------------------------------ realloc ------------------------------
4610 Void_t*
4611 _int_realloc(mstate av, Void_t* oldmem, size_t bytes)
4613 INTERNAL_SIZE_T nb; /* padded request size */
4615 mchunkptr oldp; /* chunk corresponding to oldmem */
4616 INTERNAL_SIZE_T oldsize; /* its size */
4618 mchunkptr newp; /* chunk to return */
4619 INTERNAL_SIZE_T newsize; /* its size */
4620 Void_t* newmem; /* corresponding user mem */
4622 mchunkptr next; /* next contiguous chunk after oldp */
4624 mchunkptr remainder; /* extra space at end of newp */
4625 unsigned long remainder_size; /* its size */
4627 mchunkptr bck; /* misc temp for linking */
4628 mchunkptr fwd; /* misc temp for linking */
4630 unsigned long copysize; /* bytes to copy */
4631 unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
4632 INTERNAL_SIZE_T* s; /* copy source */
4633 INTERNAL_SIZE_T* d; /* copy destination */
4635 const char *errstr = NULL;
4638 checked_request2size(bytes, nb);
4640 oldp = mem2chunk(oldmem);
4641 oldsize = chunksize(oldp);
4643 /* Simple tests for old block integrity. */
4644 if (__builtin_expect (misaligned_chunk (oldp), 0))
4646 errstr = "realloc(): invalid pointer";
4647 errout:
4648 malloc_printerr (check_action, errstr, oldmem);
4649 return NULL;
4651 if (__builtin_expect (oldp->size <= 2 * SIZE_SZ, 0)
4652 || __builtin_expect (oldsize >= av->system_mem, 0))
4654 errstr = "realloc(): invalid old size";
4655 goto errout;
4658 check_inuse_chunk(av, oldp);
4660 if (!chunk_is_mmapped(oldp)) {
4662 next = chunk_at_offset(oldp, oldsize);
4663 INTERNAL_SIZE_T nextsize = chunksize(next);
4664 if (__builtin_expect (next->size <= 2 * SIZE_SZ, 0)
4665 || __builtin_expect (nextsize >= av->system_mem, 0))
4667 errstr = "realloc(): invalid next size";
4668 goto errout;
4671 if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
4672 /* already big enough; split below */
4673 newp = oldp;
4674 newsize = oldsize;
4677 else {
4678 /* Try to expand forward into top */
4679 if (next == av->top &&
4680 (unsigned long)(newsize = oldsize + nextsize) >=
4681 (unsigned long)(nb + MINSIZE)) {
4682 set_head_size(oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4683 av->top = chunk_at_offset(oldp, nb);
4684 set_head(av->top, (newsize - nb) | PREV_INUSE);
4685 check_inuse_chunk(av, oldp);
4686 return chunk2mem(oldp);
4689 /* Try to expand forward into next chunk; split off remainder below */
4690 else if (next != av->top &&
4691 !inuse(next) &&
4692 (unsigned long)(newsize = oldsize + nextsize) >=
4693 (unsigned long)(nb)) {
4694 newp = oldp;
4695 unlink(next, bck, fwd);
4698 /* allocate, copy, free */
4699 else {
4700 newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
4701 if (newmem == 0)
4702 return 0; /* propagate failure */
4704 newp = mem2chunk(newmem);
4705 newsize = chunksize(newp);
4708 Avoid copy if newp is next chunk after oldp.
4710 if (newp == next) {
4711 newsize += oldsize;
4712 newp = oldp;
4714 else {
4716 Unroll copy of <= 36 bytes (72 if 8byte sizes)
4717 We know that contents have an odd number of
4718 INTERNAL_SIZE_T-sized words; minimally 3.
4721 copysize = oldsize - SIZE_SZ;
4722 s = (INTERNAL_SIZE_T*)(oldmem);
4723 d = (INTERNAL_SIZE_T*)(newmem);
4724 ncopies = copysize / sizeof(INTERNAL_SIZE_T);
4725 assert(ncopies >= 3);
4727 if (ncopies > 9)
4728 MALLOC_COPY(d, s, copysize);
4730 else {
4731 *(d+0) = *(s+0);
4732 *(d+1) = *(s+1);
4733 *(d+2) = *(s+2);
4734 if (ncopies > 4) {
4735 *(d+3) = *(s+3);
4736 *(d+4) = *(s+4);
4737 if (ncopies > 6) {
4738 *(d+5) = *(s+5);
4739 *(d+6) = *(s+6);
4740 if (ncopies > 8) {
4741 *(d+7) = *(s+7);
4742 *(d+8) = *(s+8);
4748 _int_free(av, oldmem);
4749 check_inuse_chunk(av, newp);
4750 return chunk2mem(newp);
4755 /* If possible, free extra space in old or extended chunk */
4757 assert((unsigned long)(newsize) >= (unsigned long)(nb));
4759 remainder_size = newsize - nb;
4761 if (remainder_size < MINSIZE) { /* not enough extra to split off */
4762 set_head_size(newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4763 set_inuse_bit_at_offset(newp, newsize);
4765 else { /* split remainder */
4766 remainder = chunk_at_offset(newp, nb);
4767 set_head_size(newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4768 set_head(remainder, remainder_size | PREV_INUSE |
4769 (av != &main_arena ? NON_MAIN_ARENA : 0));
4770 /* Mark remainder as inuse so free() won't complain */
4771 set_inuse_bit_at_offset(remainder, remainder_size);
4772 _int_free(av, chunk2mem(remainder));
4775 check_inuse_chunk(av, newp);
4776 return chunk2mem(newp);
4780 Handle mmap cases
4783 else {
4784 #if HAVE_MMAP
4786 #if HAVE_MREMAP
4787 INTERNAL_SIZE_T offset = oldp->prev_size;
4788 size_t pagemask = mp_.pagesize - 1;
4789 char *cp;
4790 unsigned long sum;
4792 /* Note the extra SIZE_SZ overhead */
4793 newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;
4795 /* don't need to remap if still within same page */
4796 if (oldsize == newsize - offset)
4797 return oldmem;
4799 cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
4801 if (cp != MAP_FAILED) {
4803 newp = (mchunkptr)(cp + offset);
4804 set_head(newp, (newsize - offset)|IS_MMAPPED);
4806 assert(aligned_OK(chunk2mem(newp)));
4807 assert((newp->prev_size == offset));
4809 /* update statistics */
4810 sum = mp_.mmapped_mem += newsize - oldsize;
4811 if (sum > (unsigned long)(mp_.max_mmapped_mem))
4812 mp_.max_mmapped_mem = sum;
4813 #ifdef NO_THREADS
4814 sum += main_arena.system_mem;
4815 if (sum > (unsigned long)(mp_.max_total_mem))
4816 mp_.max_total_mem = sum;
4817 #endif
4819 return chunk2mem(newp);
4821 #endif
4823 /* Note the extra SIZE_SZ overhead. */
4824 if ((unsigned long)(oldsize) >= (unsigned long)(nb + SIZE_SZ))
4825 newmem = oldmem; /* do nothing */
4826 else {
4827 /* Must alloc, copy, free. */
4828 newmem = _int_malloc(av, nb - MALLOC_ALIGN_MASK);
4829 if (newmem != 0) {
4830 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
4831 _int_free(av, oldmem);
4834 return newmem;
4836 #else
4837 /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
4838 check_malloc_state(av);
4839 MALLOC_FAILURE_ACTION;
4840 return 0;
4841 #endif
4846 ------------------------------ memalign ------------------------------
4849 Void_t*
4850 _int_memalign(mstate av, size_t alignment, size_t bytes)
4852 INTERNAL_SIZE_T nb; /* padded request size */
4853 char* m; /* memory returned by malloc call */
4854 mchunkptr p; /* corresponding chunk */
4855 char* brk; /* alignment point within p */
4856 mchunkptr newp; /* chunk to return */
4857 INTERNAL_SIZE_T newsize; /* its size */
4858 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
4859 mchunkptr remainder; /* spare room at end to split off */
4860 unsigned long remainder_size; /* its size */
4861 INTERNAL_SIZE_T size;
4863 /* If need less alignment than we give anyway, just relay to malloc */
4865 if (alignment <= MALLOC_ALIGNMENT) return _int_malloc(av, bytes);
4867 /* Otherwise, ensure that it is at least a minimum chunk size */
4869 if (alignment < MINSIZE) alignment = MINSIZE;
4871 /* Make sure alignment is power of 2 (in case MINSIZE is not). */
4872 if ((alignment & (alignment - 1)) != 0) {
4873 size_t a = MALLOC_ALIGNMENT * 2;
4874 while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
4875 alignment = a;
4878 checked_request2size(bytes, nb);
4881 Strategy: find a spot within that chunk that meets the alignment
4882 request, and then possibly free the leading and trailing space.
4886 /* Call malloc with worst case padding to hit alignment. */
4888 m = (char*)(_int_malloc(av, nb + alignment + MINSIZE));
4890 if (m == 0) return 0; /* propagate failure */
4892 p = mem2chunk(m);
4894 if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */
4897 Find an aligned spot inside chunk. Since we need to give back
4898 leading space in a chunk of at least MINSIZE, if the first
4899 calculation places us at a spot with less than MINSIZE leader,
4900 we can move to the next aligned spot -- we've allocated enough
4901 total room so that this is always possible.
4904 brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) &
4905 -((signed long) alignment));
4906 if ((unsigned long)(brk - (char*)(p)) < MINSIZE)
4907 brk += alignment;
4909 newp = (mchunkptr)brk;
4910 leadsize = brk - (char*)(p);
4911 newsize = chunksize(p) - leadsize;
4913 /* For mmapped chunks, just adjust offset */
4914 if (chunk_is_mmapped(p)) {
4915 newp->prev_size = p->prev_size + leadsize;
4916 set_head(newp, newsize|IS_MMAPPED);
4917 return chunk2mem(newp);
4920 /* Otherwise, give back leader, use the rest */
4921 set_head(newp, newsize | PREV_INUSE |
4922 (av != &main_arena ? NON_MAIN_ARENA : 0));
4923 set_inuse_bit_at_offset(newp, newsize);
4924 set_head_size(p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4925 _int_free(av, chunk2mem(p));
4926 p = newp;
4928 assert (newsize >= nb &&
4929 (((unsigned long)(chunk2mem(p))) % alignment) == 0);
4932 /* Also give back spare room at the end */
4933 if (!chunk_is_mmapped(p)) {
4934 size = chunksize(p);
4935 if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
4936 remainder_size = size - nb;
4937 remainder = chunk_at_offset(p, nb);
4938 set_head(remainder, remainder_size | PREV_INUSE |
4939 (av != &main_arena ? NON_MAIN_ARENA : 0));
4940 set_head_size(p, nb);
4941 _int_free(av, chunk2mem(remainder));
4945 check_inuse_chunk(av, p);
4946 return chunk2mem(p);
4949 #if 0
4951 ------------------------------ calloc ------------------------------
4954 #if __STD_C
4955 Void_t* cALLOc(size_t n_elements, size_t elem_size)
4956 #else
4957 Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
4958 #endif
4960 mchunkptr p;
4961 unsigned long clearsize;
4962 unsigned long nclears;
4963 INTERNAL_SIZE_T* d;
4965 Void_t* mem = mALLOc(n_elements * elem_size);
4967 if (mem != 0) {
4968 p = mem2chunk(mem);
4970 #if MMAP_CLEARS
4971 if (!chunk_is_mmapped(p)) /* don't need to clear mmapped space */
4972 #endif
4975 Unroll clear of <= 36 bytes (72 if 8byte sizes)
4976 We know that contents have an odd number of
4977 INTERNAL_SIZE_T-sized words; minimally 3.
4980 d = (INTERNAL_SIZE_T*)mem;
4981 clearsize = chunksize(p) - SIZE_SZ;
4982 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
4983 assert(nclears >= 3);
4985 if (nclears > 9)
4986 MALLOC_ZERO(d, clearsize);
4988 else {
4989 *(d+0) = 0;
4990 *(d+1) = 0;
4991 *(d+2) = 0;
4992 if (nclears > 4) {
4993 *(d+3) = 0;
4994 *(d+4) = 0;
4995 if (nclears > 6) {
4996 *(d+5) = 0;
4997 *(d+6) = 0;
4998 if (nclears > 8) {
4999 *(d+7) = 0;
5000 *(d+8) = 0;
5007 return mem;
5009 #endif /* 0 */
5011 #ifndef _LIBC
5013 ------------------------- independent_calloc -------------------------
5016 Void_t**
5017 #if __STD_C
5018 _int_icalloc(mstate av, size_t n_elements, size_t elem_size, Void_t* chunks[])
5019 #else
5020 _int_icalloc(av, n_elements, elem_size, chunks)
5021 mstate av; size_t n_elements; size_t elem_size; Void_t* chunks[];
5022 #endif
5024 size_t sz = elem_size; /* serves as 1-element array */
5025 /* opts arg of 3 means all elements are same size, and should be cleared */
5026 return iALLOc(av, n_elements, &sz, 3, chunks);
5030 ------------------------- independent_comalloc -------------------------
5033 Void_t**
5034 #if __STD_C
5035 _int_icomalloc(mstate av, size_t n_elements, size_t sizes[], Void_t* chunks[])
5036 #else
5037 _int_icomalloc(av, n_elements, sizes, chunks)
5038 mstate av; size_t n_elements; size_t sizes[]; Void_t* chunks[];
5039 #endif
5041 return iALLOc(av, n_elements, sizes, 0, chunks);
5046 ------------------------------ ialloc ------------------------------
5047 ialloc provides common support for independent_X routines, handling all of
5048 the combinations that can result.
5050 The opts arg has:
5051 bit 0 set if all elements are same size (using sizes[0])
5052 bit 1 set if elements should be zeroed
5056 static Void_t**
5057 #if __STD_C
5058 iALLOc(mstate av, size_t n_elements, size_t* sizes, int opts, Void_t* chunks[])
5059 #else
5060 iALLOc(av, n_elements, sizes, opts, chunks)
5061 mstate av; size_t n_elements; size_t* sizes; int opts; Void_t* chunks[];
5062 #endif
5064 INTERNAL_SIZE_T element_size; /* chunksize of each element, if all same */
5065 INTERNAL_SIZE_T contents_size; /* total size of elements */
5066 INTERNAL_SIZE_T array_size; /* request size of pointer array */
5067 Void_t* mem; /* malloced aggregate space */
5068 mchunkptr p; /* corresponding chunk */
5069 INTERNAL_SIZE_T remainder_size; /* remaining bytes while splitting */
5070 Void_t** marray; /* either "chunks" or malloced ptr array */
5071 mchunkptr array_chunk; /* chunk for malloced ptr array */
5072 int mmx; /* to disable mmap */
5073 INTERNAL_SIZE_T size;
5074 INTERNAL_SIZE_T size_flags;
5075 size_t i;
5077 /* Ensure initialization/consolidation */
5078 if (have_fastchunks(av)) malloc_consolidate(av);
5080 /* compute array length, if needed */
5081 if (chunks != 0) {
5082 if (n_elements == 0)
5083 return chunks; /* nothing to do */
5084 marray = chunks;
5085 array_size = 0;
5087 else {
5088 /* if empty req, must still return chunk representing empty array */
5089 if (n_elements == 0)
5090 return (Void_t**) _int_malloc(av, 0);
5091 marray = 0;
5092 array_size = request2size(n_elements * (sizeof(Void_t*)));
5095 /* compute total element size */
5096 if (opts & 0x1) { /* all-same-size */
5097 element_size = request2size(*sizes);
5098 contents_size = n_elements * element_size;
5100 else { /* add up all the sizes */
5101 element_size = 0;
5102 contents_size = 0;
5103 for (i = 0; i != n_elements; ++i)
5104 contents_size += request2size(sizes[i]);
5107 /* subtract out alignment bytes from total to minimize overallocation */
5108 size = contents_size + array_size - MALLOC_ALIGN_MASK;
5111 Allocate the aggregate chunk.
5112 But first disable mmap so malloc won't use it, since
5113 we would not be able to later free/realloc space internal
5114 to a segregated mmap region.
5116 mmx = mp_.n_mmaps_max; /* disable mmap */
5117 mp_.n_mmaps_max = 0;
5118 mem = _int_malloc(av, size);
5119 mp_.n_mmaps_max = mmx; /* reset mmap */
5120 if (mem == 0)
5121 return 0;
5123 p = mem2chunk(mem);
5124 assert(!chunk_is_mmapped(p));
5125 remainder_size = chunksize(p);
5127 if (opts & 0x2) { /* optionally clear the elements */
5128 MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size);
5131 size_flags = PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0);
5133 /* If not provided, allocate the pointer array as final part of chunk */
5134 if (marray == 0) {
5135 array_chunk = chunk_at_offset(p, contents_size);
5136 marray = (Void_t**) (chunk2mem(array_chunk));
5137 set_head(array_chunk, (remainder_size - contents_size) | size_flags);
5138 remainder_size = contents_size;
5141 /* split out elements */
5142 for (i = 0; ; ++i) {
5143 marray[i] = chunk2mem(p);
5144 if (i != n_elements-1) {
5145 if (element_size != 0)
5146 size = element_size;
5147 else
5148 size = request2size(sizes[i]);
5149 remainder_size -= size;
5150 set_head(p, size | size_flags);
5151 p = chunk_at_offset(p, size);
5153 else { /* the final element absorbs any overallocation slop */
5154 set_head(p, remainder_size | size_flags);
5155 break;
5159 #if MALLOC_DEBUG
5160 if (marray != chunks) {
5161 /* final element must have exactly exhausted chunk */
5162 if (element_size != 0)
5163 assert(remainder_size == element_size);
5164 else
5165 assert(remainder_size == request2size(sizes[i]));
5166 check_inuse_chunk(av, mem2chunk(marray));
5169 for (i = 0; i != n_elements; ++i)
5170 check_inuse_chunk(av, mem2chunk(marray[i]));
5171 #endif
5173 return marray;
5175 #endif /* _LIBC */
5179 ------------------------------ valloc ------------------------------
5182 Void_t*
5183 #if __STD_C
5184 _int_valloc(mstate av, size_t bytes)
5185 #else
5186 _int_valloc(av, bytes) mstate av; size_t bytes;
5187 #endif
5189 /* Ensure initialization/consolidation */
5190 if (have_fastchunks(av)) malloc_consolidate(av);
5191 return _int_memalign(av, mp_.pagesize, bytes);
5195 ------------------------------ pvalloc ------------------------------
5199 Void_t*
5200 #if __STD_C
5201 _int_pvalloc(mstate av, size_t bytes)
5202 #else
5203 _int_pvalloc(av, bytes) mstate av, size_t bytes;
5204 #endif
5206 size_t pagesz;
5208 /* Ensure initialization/consolidation */
5209 if (have_fastchunks(av)) malloc_consolidate(av);
5210 pagesz = mp_.pagesize;
5211 return _int_memalign(av, pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
5216 ------------------------------ malloc_trim ------------------------------
5219 #if __STD_C
5220 int mTRIm(size_t pad)
5221 #else
5222 int mTRIm(pad) size_t pad;
5223 #endif
5225 mstate av = &main_arena; /* already locked */
5227 /* Ensure initialization/consolidation */
5228 malloc_consolidate(av);
5230 #ifndef MORECORE_CANNOT_TRIM
5231 return sYSTRIm(pad, av);
5232 #else
5233 return 0;
5234 #endif
5239 ------------------------- malloc_usable_size -------------------------
5242 #if __STD_C
5243 size_t mUSABLe(Void_t* mem)
5244 #else
5245 size_t mUSABLe(mem) Void_t* mem;
5246 #endif
5248 mchunkptr p;
5249 if (mem != 0) {
5250 p = mem2chunk(mem);
5251 if (chunk_is_mmapped(p))
5252 return chunksize(p) - 2*SIZE_SZ;
5253 else if (inuse(p))
5254 return chunksize(p) - SIZE_SZ;
5256 return 0;
5260 ------------------------------ mallinfo ------------------------------
5263 struct mallinfo mALLINFo(mstate av)
5265 struct mallinfo mi;
5266 size_t i;
5267 mbinptr b;
5268 mchunkptr p;
5269 INTERNAL_SIZE_T avail;
5270 INTERNAL_SIZE_T fastavail;
5271 int nblocks;
5272 int nfastblocks;
5274 /* Ensure initialization */
5275 if (av->top == 0) malloc_consolidate(av);
5277 check_malloc_state(av);
5279 /* Account for top */
5280 avail = chunksize(av->top);
5281 nblocks = 1; /* top always exists */
5283 /* traverse fastbins */
5284 nfastblocks = 0;
5285 fastavail = 0;
5287 for (i = 0; i < NFASTBINS; ++i) {
5288 for (p = av->fastbins[i]; p != 0; p = p->fd) {
5289 ++nfastblocks;
5290 fastavail += chunksize(p);
5294 avail += fastavail;
5296 /* traverse regular bins */
5297 for (i = 1; i < NBINS; ++i) {
5298 b = bin_at(av, i);
5299 for (p = last(b); p != b; p = p->bk) {
5300 ++nblocks;
5301 avail += chunksize(p);
5305 mi.smblks = nfastblocks;
5306 mi.ordblks = nblocks;
5307 mi.fordblks = avail;
5308 mi.uordblks = av->system_mem - avail;
5309 mi.arena = av->system_mem;
5310 mi.hblks = mp_.n_mmaps;
5311 mi.hblkhd = mp_.mmapped_mem;
5312 mi.fsmblks = fastavail;
5313 mi.keepcost = chunksize(av->top);
5314 mi.usmblks = mp_.max_total_mem;
5315 return mi;
5319 ------------------------------ malloc_stats ------------------------------
5322 void mSTATs()
5324 int i;
5325 mstate ar_ptr;
5326 struct mallinfo mi;
5327 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
5328 #if THREAD_STATS
5329 long stat_lock_direct = 0, stat_lock_loop = 0, stat_lock_wait = 0;
5330 #endif
5332 if(__malloc_initialized < 0)
5333 ptmalloc_init ();
5334 #ifdef _LIBC
5335 _IO_flockfile (stderr);
5336 int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
5337 ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
5338 #endif
5339 for (i=0, ar_ptr = &main_arena;; i++) {
5340 (void)mutex_lock(&ar_ptr->mutex);
5341 mi = mALLINFo(ar_ptr);
5342 fprintf(stderr, "Arena %d:\n", i);
5343 fprintf(stderr, "system bytes = %10u\n", (unsigned int)mi.arena);
5344 fprintf(stderr, "in use bytes = %10u\n", (unsigned int)mi.uordblks);
5345 #if MALLOC_DEBUG > 1
5346 if (i > 0)
5347 dump_heap(heap_for_ptr(top(ar_ptr)));
5348 #endif
5349 system_b += mi.arena;
5350 in_use_b += mi.uordblks;
5351 #if THREAD_STATS
5352 stat_lock_direct += ar_ptr->stat_lock_direct;
5353 stat_lock_loop += ar_ptr->stat_lock_loop;
5354 stat_lock_wait += ar_ptr->stat_lock_wait;
5355 #endif
5356 (void)mutex_unlock(&ar_ptr->mutex);
5357 ar_ptr = ar_ptr->next;
5358 if(ar_ptr == &main_arena) break;
5360 #if HAVE_MMAP
5361 fprintf(stderr, "Total (incl. mmap):\n");
5362 #else
5363 fprintf(stderr, "Total:\n");
5364 #endif
5365 fprintf(stderr, "system bytes = %10u\n", system_b);
5366 fprintf(stderr, "in use bytes = %10u\n", in_use_b);
5367 #ifdef NO_THREADS
5368 fprintf(stderr, "max system bytes = %10u\n", (unsigned int)mp_.max_total_mem);
5369 #endif
5370 #if HAVE_MMAP
5371 fprintf(stderr, "max mmap regions = %10u\n", (unsigned int)mp_.max_n_mmaps);
5372 fprintf(stderr, "max mmap bytes = %10lu\n",
5373 (unsigned long)mp_.max_mmapped_mem);
5374 #endif
5375 #if THREAD_STATS
5376 fprintf(stderr, "heaps created = %10d\n", stat_n_heaps);
5377 fprintf(stderr, "locked directly = %10ld\n", stat_lock_direct);
5378 fprintf(stderr, "locked in loop = %10ld\n", stat_lock_loop);
5379 fprintf(stderr, "locked waiting = %10ld\n", stat_lock_wait);
5380 fprintf(stderr, "locked total = %10ld\n",
5381 stat_lock_direct + stat_lock_loop + stat_lock_wait);
5382 #endif
5383 #ifdef _LIBC
5384 ((_IO_FILE *) stderr)->_flags2 |= old_flags2;
5385 _IO_funlockfile (stderr);
5386 #endif
5391 ------------------------------ mallopt ------------------------------
5394 #if __STD_C
5395 int mALLOPt(int param_number, int value)
5396 #else
5397 int mALLOPt(param_number, value) int param_number; int value;
5398 #endif
5400 mstate av = &main_arena;
5401 int res = 1;
5403 if(__malloc_initialized < 0)
5404 ptmalloc_init ();
5405 (void)mutex_lock(&av->mutex);
5406 /* Ensure initialization/consolidation */
5407 malloc_consolidate(av);
5409 switch(param_number) {
5410 case M_MXFAST:
5411 if (value >= 0 && value <= MAX_FAST_SIZE) {
5412 set_max_fast(value);
5414 else
5415 res = 0;
5416 break;
5418 case M_TRIM_THRESHOLD:
5419 mp_.trim_threshold = value;
5420 break;
5422 case M_TOP_PAD:
5423 mp_.top_pad = value;
5424 break;
5426 case M_MMAP_THRESHOLD:
5427 #if USE_ARENAS
5428 /* Forbid setting the threshold too high. */
5429 if((unsigned long)value > HEAP_MAX_SIZE/2)
5430 res = 0;
5431 else
5432 #endif
5433 mp_.mmap_threshold = value;
5434 break;
5436 case M_MMAP_MAX:
5437 #if !HAVE_MMAP
5438 if (value != 0)
5439 res = 0;
5440 else
5441 #endif
5442 mp_.n_mmaps_max = value;
5443 break;
5445 case M_CHECK_ACTION:
5446 check_action = value;
5447 break;
5449 case M_PERTURB:
5450 perturb_byte = value;
5451 break;
5453 (void)mutex_unlock(&av->mutex);
5454 return res;
5459 -------------------- Alternative MORECORE functions --------------------
5464 General Requirements for MORECORE.
5466 The MORECORE function must have the following properties:
5468 If MORECORE_CONTIGUOUS is false:
5470 * MORECORE must allocate in multiples of pagesize. It will
5471 only be called with arguments that are multiples of pagesize.
5473 * MORECORE(0) must return an address that is at least
5474 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
5476 else (i.e. If MORECORE_CONTIGUOUS is true):
5478 * Consecutive calls to MORECORE with positive arguments
5479 return increasing addresses, indicating that space has been
5480 contiguously extended.
5482 * MORECORE need not allocate in multiples of pagesize.
5483 Calls to MORECORE need not have args of multiples of pagesize.
5485 * MORECORE need not page-align.
5487 In either case:
5489 * MORECORE may allocate more memory than requested. (Or even less,
5490 but this will generally result in a malloc failure.)
5492 * MORECORE must not allocate memory when given argument zero, but
5493 instead return one past the end address of memory from previous
5494 nonzero call. This malloc does NOT call MORECORE(0)
5495 until at least one call with positive arguments is made, so
5496 the initial value returned is not important.
5498 * Even though consecutive calls to MORECORE need not return contiguous
5499 addresses, it must be OK for malloc'ed chunks to span multiple
5500 regions in those cases where they do happen to be contiguous.
5502 * MORECORE need not handle negative arguments -- it may instead
5503 just return MORECORE_FAILURE when given negative arguments.
5504 Negative arguments are always multiples of pagesize. MORECORE
5505 must not misinterpret negative args as large positive unsigned
5506 args. You can suppress all such calls from even occurring by defining
5507 MORECORE_CANNOT_TRIM,
5509 There is some variation across systems about the type of the
5510 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
5511 actually be size_t, because sbrk supports negative args, so it is
5512 normally the signed type of the same width as size_t (sometimes
5513 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
5514 matter though. Internally, we use "long" as arguments, which should
5515 work across all reasonable possibilities.
5517 Additionally, if MORECORE ever returns failure for a positive
5518 request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
5519 system allocator. This is a useful backup strategy for systems with
5520 holes in address spaces -- in this case sbrk cannot contiguously
5521 expand the heap, but mmap may be able to map noncontiguous space.
5523 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
5524 a function that always returns MORECORE_FAILURE.
5526 If you are using this malloc with something other than sbrk (or its
5527 emulation) to supply memory regions, you probably want to set
5528 MORECORE_CONTIGUOUS as false. As an example, here is a custom
5529 allocator kindly contributed for pre-OSX macOS. It uses virtually
5530 but not necessarily physically contiguous non-paged memory (locked
5531 in, present and won't get swapped out). You can use it by
5532 uncommenting this section, adding some #includes, and setting up the
5533 appropriate defines above:
5535 #define MORECORE osMoreCore
5536 #define MORECORE_CONTIGUOUS 0
5538 There is also a shutdown routine that should somehow be called for
5539 cleanup upon program exit.
5541 #define MAX_POOL_ENTRIES 100
5542 #define MINIMUM_MORECORE_SIZE (64 * 1024)
5543 static int next_os_pool;
5544 void *our_os_pools[MAX_POOL_ENTRIES];
5546 void *osMoreCore(int size)
5548 void *ptr = 0;
5549 static void *sbrk_top = 0;
5551 if (size > 0)
5553 if (size < MINIMUM_MORECORE_SIZE)
5554 size = MINIMUM_MORECORE_SIZE;
5555 if (CurrentExecutionLevel() == kTaskLevel)
5556 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5557 if (ptr == 0)
5559 return (void *) MORECORE_FAILURE;
5561 // save ptrs so they can be freed during cleanup
5562 our_os_pools[next_os_pool] = ptr;
5563 next_os_pool++;
5564 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5565 sbrk_top = (char *) ptr + size;
5566 return ptr;
5568 else if (size < 0)
5570 // we don't currently support shrink behavior
5571 return (void *) MORECORE_FAILURE;
5573 else
5575 return sbrk_top;
5579 // cleanup any allocated memory pools
5580 // called as last thing before shutting down driver
5582 void osCleanupMem(void)
5584 void **ptr;
5586 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5587 if (*ptr)
5589 PoolDeallocate(*ptr);
5590 *ptr = 0;
5597 /* Helper code. */
5599 extern char **__libc_argv attribute_hidden;
5601 static void
5602 malloc_printerr(int action, const char *str, void *ptr)
5604 if ((action & 5) == 5)
5605 __libc_message (action & 2, "%s\n", str);
5606 else if (action & 1)
5608 char buf[2 * sizeof (uintptr_t) + 1];
5610 buf[sizeof (buf) - 1] = '\0';
5611 char *cp = _itoa_word ((uintptr_t) ptr, &buf[sizeof (buf) - 1], 16, 0);
5612 while (cp > buf)
5613 *--cp = '0';
5615 __libc_message (action & 2,
5616 "*** glibc detected *** %s: %s: 0x%s ***\n",
5617 __libc_argv[0] ?: "<unknown>", str, cp);
5619 else if (action & 2)
5620 abort ();
5623 #ifdef _LIBC
5624 # include <sys/param.h>
5626 /* We need a wrapper function for one of the additions of POSIX. */
5628 __posix_memalign (void **memptr, size_t alignment, size_t size)
5630 void *mem;
5631 __malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
5632 __const __malloc_ptr_t)) =
5633 __memalign_hook;
5635 /* Test whether the SIZE argument is valid. It must be a power of
5636 two multiple of sizeof (void *). */
5637 if (alignment % sizeof (void *) != 0
5638 || !powerof2 (alignment / sizeof (void *)) != 0
5639 || alignment == 0)
5640 return EINVAL;
5642 /* Call the hook here, so that caller is posix_memalign's caller
5643 and not posix_memalign itself. */
5644 if (hook != NULL)
5645 mem = (*hook)(alignment, size, RETURN_ADDRESS (0));
5646 else
5647 mem = public_mEMALIGn (alignment, size);
5649 if (mem != NULL) {
5650 *memptr = mem;
5651 return 0;
5654 return ENOMEM;
5656 weak_alias (__posix_memalign, posix_memalign)
5658 strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
5659 strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
5660 strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
5661 strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
5662 strong_alias (__libc_memalign, __memalign)
5663 weak_alias (__libc_memalign, memalign)
5664 strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
5665 strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
5666 strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
5667 strong_alias (__libc_mallinfo, __mallinfo)
5668 weak_alias (__libc_mallinfo, mallinfo)
5669 strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
5671 weak_alias (__malloc_stats, malloc_stats)
5672 weak_alias (__malloc_usable_size, malloc_usable_size)
5673 weak_alias (__malloc_trim, malloc_trim)
5674 weak_alias (__malloc_get_state, malloc_get_state)
5675 weak_alias (__malloc_set_state, malloc_set_state)
5677 #endif /* _LIBC */
5679 /* ------------------------------------------------------------
5680 History:
5682 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
5686 * Local variables:
5687 * c-basic-offset: 2
5688 * End: