manual: Sort overview listing by manual order.
[glibc.git] / malloc / malloc.c
1 /* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 1996-2014 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
6
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If
19 not, see <http://www.gnu.org/licenses/>. */
20
21 /*
22 This is a version (aka ptmalloc2) of malloc/free/realloc written by
23 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
24
25 There have been substantial changesmade after the integration into
26 glibc in all parts of the code. Do not look for much commonality
27 with the ptmalloc2 version.
28
29 * Version ptmalloc2-20011215
30 based on:
31 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
32
33 * Quickstart
34
35 In order to compile this implementation, a Makefile is provided with
36 the ptmalloc2 distribution, which has pre-defined targets for some
37 popular systems (e.g. "make posix" for Posix threads). All that is
38 typically required with regard to compiler flags is the selection of
39 the thread package via defining one out of USE_PTHREADS, USE_THR or
40 USE_SPROC. Check the thread-m.h file for what effects this has.
41 Many/most systems will additionally require USE_TSD_DATA_HACK to be
42 defined, so this is the default for "make posix".
43
44 * Why use this malloc?
45
46 This is not the fastest, most space-conserving, most portable, or
47 most tunable malloc ever written. However it is among the fastest
48 while also being among the most space-conserving, portable and tunable.
49 Consistent balance across these factors results in a good general-purpose
50 allocator for malloc-intensive programs.
51
52 The main properties of the algorithms are:
53 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
54 with ties normally decided via FIFO (i.e. least recently used).
55 * For small (<= 64 bytes by default) requests, it is a caching
56 allocator, that maintains pools of quickly recycled chunks.
57 * In between, and for combinations of large and small requests, it does
58 the best it can trying to meet both goals at once.
59 * For very large requests (>= 128KB by default), it relies on system
60 memory mapping facilities, if supported.
61
62 For a longer but slightly out of date high-level description, see
63 http://gee.cs.oswego.edu/dl/html/malloc.html
64
65 You may already by default be using a C library containing a malloc
66 that is based on some version of this malloc (for example in
67 linux). You might still want to use the one in this file in order to
68 customize settings or to avoid overheads associated with library
69 versions.
70
71 * Contents, described in more detail in "description of public routines" below.
72
73 Standard (ANSI/SVID/...) functions:
74 malloc(size_t n);
75 calloc(size_t n_elements, size_t element_size);
76 free(void* p);
77 realloc(void* p, size_t n);
78 memalign(size_t alignment, size_t n);
79 valloc(size_t n);
80 mallinfo()
81 mallopt(int parameter_number, int parameter_value)
82
83 Additional functions:
84 independent_calloc(size_t n_elements, size_t size, void* chunks[]);
85 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
86 pvalloc(size_t n);
87 cfree(void* p);
88 malloc_trim(size_t pad);
89 malloc_usable_size(void* p);
90 malloc_stats();
91
92 * Vital statistics:
93
94 Supported pointer representation: 4 or 8 bytes
95 Supported size_t representation: 4 or 8 bytes
96 Note that size_t is allowed to be 4 bytes even if pointers are 8.
97 You can adjust this by defining INTERNAL_SIZE_T
98
99 Alignment: 2 * sizeof(size_t) (default)
100 (i.e., 8 byte alignment with 4byte size_t). This suffices for
101 nearly all current machines and C compilers. However, you can
102 define MALLOC_ALIGNMENT to be wider than this if necessary.
103
104 Minimum overhead per allocated chunk: 4 or 8 bytes
105 Each malloced chunk has a hidden word of overhead holding size
106 and status information.
107
108 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
109 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
110
111 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
112 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
113 needed; 4 (8) for a trailing size field and 8 (16) bytes for
114 free list pointers. Thus, the minimum allocatable size is
115 16/24/32 bytes.
116
117 Even a request for zero bytes (i.e., malloc(0)) returns a
118 pointer to something of the minimum allocatable size.
119
120 The maximum overhead wastage (i.e., number of extra bytes
121 allocated than were requested in malloc) is less than or equal
122 to the minimum size, except for requests >= mmap_threshold that
123 are serviced via mmap(), where the worst case wastage is 2 *
124 sizeof(size_t) bytes plus the remainder from a system page (the
125 minimal mmap unit); typically 4096 or 8192 bytes.
126
127 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
128 8-byte size_t: 2^64 minus about two pages
129
130 It is assumed that (possibly signed) size_t values suffice to
131 represent chunk sizes. `Possibly signed' is due to the fact
132 that `size_t' may be defined on a system as either a signed or
133 an unsigned type. The ISO C standard says that it must be
134 unsigned, but a few systems are known not to adhere to this.
135 Additionally, even when size_t is unsigned, sbrk (which is by
136 default used to obtain memory from system) accepts signed
137 arguments, and may not be able to handle size_t-wide arguments
138 with negative sign bit. Generally, values that would
139 appear as negative after accounting for overhead and alignment
140 are supported only via mmap(), which does not have this
141 limitation.
142
143 Requests for sizes outside the allowed range will perform an optional
144 failure action and then return null. (Requests may also
145 also fail because a system is out of memory.)
146
147 Thread-safety: thread-safe
148
149 Compliance: I believe it is compliant with the 1997 Single Unix Specification
150 Also SVID/XPG, ANSI C, and probably others as well.
151
152 * Synopsis of compile-time options:
153
154 People have reported using previous versions of this malloc on all
155 versions of Unix, sometimes by tweaking some of the defines
156 below. It has been tested most extensively on Solaris and Linux.
157 People also report using it in stand-alone embedded systems.
158
159 The implementation is in straight, hand-tuned ANSI C. It is not
160 at all modular. (Sorry!) It uses a lot of macros. To be at all
161 usable, this code should be compiled using an optimizing compiler
162 (for example gcc -O3) that can simplify expressions and control
163 paths. (FAQ: some macros import variables as arguments rather than
164 declare locals because people reported that some debuggers
165 otherwise get confused.)
166
167 OPTION DEFAULT VALUE
168
169 Compilation Environment options:
170
171 HAVE_MREMAP 0
172
173 Changing default word sizes:
174
175 INTERNAL_SIZE_T size_t
176 MALLOC_ALIGNMENT MAX (2 * sizeof(INTERNAL_SIZE_T),
177 __alignof__ (long double))
178
179 Configuration and functionality options:
180
181 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
182 USE_MALLOC_LOCK NOT defined
183 MALLOC_DEBUG NOT defined
184 REALLOC_ZERO_BYTES_FREES 1
185 TRIM_FASTBINS 0
186
187 Options for customizing MORECORE:
188
189 MORECORE sbrk
190 MORECORE_FAILURE -1
191 MORECORE_CONTIGUOUS 1
192 MORECORE_CANNOT_TRIM NOT defined
193 MORECORE_CLEARS 1
194 MMAP_AS_MORECORE_SIZE (1024 * 1024)
195
196 Tuning options that are also dynamically changeable via mallopt:
197
198 DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
199 DEFAULT_TRIM_THRESHOLD 128 * 1024
200 DEFAULT_TOP_PAD 0
201 DEFAULT_MMAP_THRESHOLD 128 * 1024
202 DEFAULT_MMAP_MAX 65536
203
204 There are several other #defined constants and macros that you
205 probably don't want to touch unless you are extending or adapting malloc. */
206
207 /*
208 void* is the pointer type that malloc should say it returns
209 */
210
211 #ifndef void
212 #define void void
213 #endif /*void*/
214
215 #include <stddef.h> /* for size_t */
216 #include <stdlib.h> /* for getenv(), abort() */
217 #include <unistd.h> /* for __libc_enable_secure */
218
219 #include <malloc-machine.h>
220 #include <malloc-sysdep.h>
221
222 #include <atomic.h>
223 #include <_itoa.h>
224 #include <bits/wordsize.h>
225 #include <sys/sysinfo.h>
226
227 #include <ldsodefs.h>
228
229 #include <unistd.h>
230 #include <stdio.h> /* needed for malloc_stats */
231 #include <errno.h>
232
233 #include <shlib-compat.h>
234
235 /* For uintptr_t. */
236 #include <stdint.h>
237
238 /* For va_arg, va_start, va_end. */
239 #include <stdarg.h>
240
241 /* For MIN, MAX, powerof2. */
242 #include <sys/param.h>
243
244
245 /*
246 Debugging:
247
248 Because freed chunks may be overwritten with bookkeeping fields, this
249 malloc will often die when freed memory is overwritten by user
250 programs. This can be very effective (albeit in an annoying way)
251 in helping track down dangling pointers.
252
253 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
254 enabled that will catch more memory errors. You probably won't be
255 able to make much sense of the actual assertion errors, but they
256 should help you locate incorrectly overwritten memory. The checking
257 is fairly extensive, and will slow down execution
258 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
259 will attempt to check every non-mmapped allocated and free chunk in
260 the course of computing the summmaries. (By nature, mmapped regions
261 cannot be checked very much automatically.)
262
263 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
264 this code. The assertions in the check routines spell out in more
265 detail the assumptions and invariants underlying the algorithms.
266
267 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
268 checking that all accesses to malloced memory stay within their
269 bounds. However, there are several add-ons and adaptations of this
270 or other mallocs available that do this.
271 */
272
273 #ifndef MALLOC_DEBUG
274 #define MALLOC_DEBUG 0
275 #endif
276
277 #ifdef NDEBUG
278 # define assert(expr) ((void) 0)
279 #else
280 # define assert(expr) \
281 ((expr) \
282 ? ((void) 0) \
283 : __malloc_assert (__STRING (expr), __FILE__, __LINE__, __func__))
284
285 extern const char *__progname;
286
287 static void
288 __malloc_assert (const char *assertion, const char *file, unsigned int line,
289 const char *function)
290 {
291 (void) __fxprintf (NULL, "%s%s%s:%u: %s%sAssertion `%s' failed.\n",
292 __progname, __progname[0] ? ": " : "",
293 file, line,
294 function ? function : "", function ? ": " : "",
295 assertion);
296 fflush (stderr);
297 abort ();
298 }
299 #endif
300
301
302 /*
303 INTERNAL_SIZE_T is the word-size used for internal bookkeeping
304 of chunk sizes.
305
306 The default version is the same as size_t.
307
308 While not strictly necessary, it is best to define this as an
309 unsigned type, even if size_t is a signed type. This may avoid some
310 artificial size limitations on some systems.
311
312 On a 64-bit machine, you may be able to reduce malloc overhead by
313 defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
314 expense of not being able to handle more than 2^32 of malloced
315 space. If this limitation is acceptable, you are encouraged to set
316 this unless you are on a platform requiring 16byte alignments. In
317 this case the alignment requirements turn out to negate any
318 potential advantages of decreasing size_t word size.
319
320 Implementors: Beware of the possible combinations of:
321 - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
322 and might be the same width as int or as long
323 - size_t might have different width and signedness as INTERNAL_SIZE_T
324 - int and long might be 32 or 64 bits, and might be the same width
325 To deal with this, most comparisons and difference computations
326 among INTERNAL_SIZE_Ts should cast them to unsigned long, being
327 aware of the fact that casting an unsigned int to a wider long does
328 not sign-extend. (This also makes checking for negative numbers
329 awkward.) Some of these casts result in harmless compiler warnings
330 on some systems.
331 */
332
333 #ifndef INTERNAL_SIZE_T
334 #define INTERNAL_SIZE_T size_t
335 #endif
336
337 /* The corresponding word size */
338 #define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
339
340
341 /*
342 MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
343 It must be a power of two at least 2 * SIZE_SZ, even on machines
344 for which smaller alignments would suffice. It may be defined as
345 larger than this though. Note however that code and data structures
346 are optimized for the case of 8-byte alignment.
347 */
348
349
350 #ifndef MALLOC_ALIGNMENT
351 # if !SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_16)
352 /* This is the correct definition when there is no past ABI to constrain it.
353
354 Among configurations with a past ABI constraint, it differs from
355 2*SIZE_SZ only on powerpc32. For the time being, changing this is
356 causing more compatibility problems due to malloc_get_state and
357 malloc_set_state than will returning blocks not adequately aligned for
358 long double objects under -mlong-double-128. */
359
360 # define MALLOC_ALIGNMENT (2 *SIZE_SZ < __alignof__ (long double) \
361 ? __alignof__ (long double) : 2 *SIZE_SZ)
362 # else
363 # define MALLOC_ALIGNMENT (2 *SIZE_SZ)
364 # endif
365 #endif
366
367 /* The corresponding bit mask value */
368 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
369
370
371
372 /*
373 REALLOC_ZERO_BYTES_FREES should be set if a call to
374 realloc with zero bytes should be the same as a call to free.
375 This is required by the C standard. Otherwise, since this malloc
376 returns a unique pointer for malloc(0), so does realloc(p, 0).
377 */
378
379 #ifndef REALLOC_ZERO_BYTES_FREES
380 #define REALLOC_ZERO_BYTES_FREES 1
381 #endif
382
383 /*
384 TRIM_FASTBINS controls whether free() of a very small chunk can
385 immediately lead to trimming. Setting to true (1) can reduce memory
386 footprint, but will almost always slow down programs that use a lot
387 of small chunks.
388
389 Define this only if you are willing to give up some speed to more
390 aggressively reduce system-level memory footprint when releasing
391 memory in programs that use many small chunks. You can get
392 essentially the same effect by setting MXFAST to 0, but this can
393 lead to even greater slowdowns in programs using many small chunks.
394 TRIM_FASTBINS is an in-between compile-time option, that disables
395 only those chunks bordering topmost memory from being placed in
396 fastbins.
397 */
398
399 #ifndef TRIM_FASTBINS
400 #define TRIM_FASTBINS 0
401 #endif
402
403
404 /* Definition for getting more memory from the OS. */
405 #define MORECORE (*__morecore)
406 #define MORECORE_FAILURE 0
407 void * __default_morecore (ptrdiff_t);
408 void *(*__morecore)(ptrdiff_t) = __default_morecore;
409
410
411 #include <string.h>
412
413 /*
414 MORECORE-related declarations. By default, rely on sbrk
415 */
416
417
418 /*
419 MORECORE is the name of the routine to call to obtain more memory
420 from the system. See below for general guidance on writing
421 alternative MORECORE functions, as well as a version for WIN32 and a
422 sample version for pre-OSX macos.
423 */
424
425 #ifndef MORECORE
426 #define MORECORE sbrk
427 #endif
428
429 /*
430 MORECORE_FAILURE is the value returned upon failure of MORECORE
431 as well as mmap. Since it cannot be an otherwise valid memory address,
432 and must reflect values of standard sys calls, you probably ought not
433 try to redefine it.
434 */
435
436 #ifndef MORECORE_FAILURE
437 #define MORECORE_FAILURE (-1)
438 #endif
439
440 /*
441 If MORECORE_CONTIGUOUS is true, take advantage of fact that
442 consecutive calls to MORECORE with positive arguments always return
443 contiguous increasing addresses. This is true of unix sbrk. Even
444 if not defined, when regions happen to be contiguous, malloc will
445 permit allocations spanning regions obtained from different
446 calls. But defining this when applicable enables some stronger
447 consistency checks and space efficiencies.
448 */
449
450 #ifndef MORECORE_CONTIGUOUS
451 #define MORECORE_CONTIGUOUS 1
452 #endif
453
454 /*
455 Define MORECORE_CANNOT_TRIM if your version of MORECORE
456 cannot release space back to the system when given negative
457 arguments. This is generally necessary only if you are using
458 a hand-crafted MORECORE function that cannot handle negative arguments.
459 */
460
461 /* #define MORECORE_CANNOT_TRIM */
462
463 /* MORECORE_CLEARS (default 1)
464 The degree to which the routine mapped to MORECORE zeroes out
465 memory: never (0), only for newly allocated space (1) or always
466 (2). The distinction between (1) and (2) is necessary because on
467 some systems, if the application first decrements and then
468 increments the break value, the contents of the reallocated space
469 are unspecified.
470 */
471
472 #ifndef MORECORE_CLEARS
473 # define MORECORE_CLEARS 1
474 #endif
475
476
477 /*
478 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
479 sbrk fails, and mmap is used as a backup. The value must be a
480 multiple of page size. This backup strategy generally applies only
481 when systems have "holes" in address space, so sbrk cannot perform
482 contiguous expansion, but there is still space available on system.
483 On systems for which this is known to be useful (i.e. most linux
484 kernels), this occurs only when programs allocate huge amounts of
485 memory. Between this, and the fact that mmap regions tend to be
486 limited, the size should be large, to avoid too many mmap calls and
487 thus avoid running out of kernel resources. */
488
489 #ifndef MMAP_AS_MORECORE_SIZE
490 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
491 #endif
492
493 /*
494 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
495 large blocks.
496 */
497
498 #ifndef HAVE_MREMAP
499 #define HAVE_MREMAP 0
500 #endif
501
502
503 /*
504 This version of malloc supports the standard SVID/XPG mallinfo
505 routine that returns a struct containing usage properties and
506 statistics. It should work on any SVID/XPG compliant system that has
507 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
508 install such a thing yourself, cut out the preliminary declarations
509 as described above and below and save them in a malloc.h file. But
510 there's no compelling reason to bother to do this.)
511
512 The main declaration needed is the mallinfo struct that is returned
513 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
514 bunch of fields that are not even meaningful in this version of
515 malloc. These fields are are instead filled by mallinfo() with
516 other numbers that might be of interest.
517 */
518
519
520 /* ---------- description of public routines ------------ */
521
522 /*
523 malloc(size_t n)
524 Returns a pointer to a newly allocated chunk of at least n bytes, or null
525 if no space is available. Additionally, on failure, errno is
526 set to ENOMEM on ANSI C systems.
527
528 If n is zero, malloc returns a minumum-sized chunk. (The minimum
529 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
530 systems.) On most systems, size_t is an unsigned type, so calls
531 with negative arguments are interpreted as requests for huge amounts
532 of space, which will often fail. The maximum supported value of n
533 differs across systems, but is in all cases less than the maximum
534 representable value of a size_t.
535 */
536 void* __libc_malloc(size_t);
537 libc_hidden_proto (__libc_malloc)
538
539 /*
540 free(void* p)
541 Releases the chunk of memory pointed to by p, that had been previously
542 allocated using malloc or a related routine such as realloc.
543 It has no effect if p is null. It can have arbitrary (i.e., bad!)
544 effects if p has already been freed.
545
546 Unless disabled (using mallopt), freeing very large spaces will
547 when possible, automatically trigger operations that give
548 back unused memory to the system, thus reducing program footprint.
549 */
550 void __libc_free(void*);
551 libc_hidden_proto (__libc_free)
552
553 /*
554 calloc(size_t n_elements, size_t element_size);
555 Returns a pointer to n_elements * element_size bytes, with all locations
556 set to zero.
557 */
558 void* __libc_calloc(size_t, size_t);
559
560 /*
561 realloc(void* p, size_t n)
562 Returns a pointer to a chunk of size n that contains the same data
563 as does chunk p up to the minimum of (n, p's size) bytes, or null
564 if no space is available.
565
566 The returned pointer may or may not be the same as p. The algorithm
567 prefers extending p when possible, otherwise it employs the
568 equivalent of a malloc-copy-free sequence.
569
570 If p is null, realloc is equivalent to malloc.
571
572 If space is not available, realloc returns null, errno is set (if on
573 ANSI) and p is NOT freed.
574
575 if n is for fewer bytes than already held by p, the newly unused
576 space is lopped off and freed if possible. Unless the #define
577 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
578 zero (re)allocates a minimum-sized chunk.
579
580 Large chunks that were internally obtained via mmap will always
581 be reallocated using malloc-copy-free sequences unless
582 the system supports MREMAP (currently only linux).
583
584 The old unix realloc convention of allowing the last-free'd chunk
585 to be used as an argument to realloc is not supported.
586 */
587 void* __libc_realloc(void*, size_t);
588 libc_hidden_proto (__libc_realloc)
589
590 /*
591 memalign(size_t alignment, size_t n);
592 Returns a pointer to a newly allocated chunk of n bytes, aligned
593 in accord with the alignment argument.
594
595 The alignment argument should be a power of two. If the argument is
596 not a power of two, the nearest greater power is used.
597 8-byte alignment is guaranteed by normal malloc calls, so don't
598 bother calling memalign with an argument of 8 or less.
599
600 Overreliance on memalign is a sure way to fragment space.
601 */
602 void* __libc_memalign(size_t, size_t);
603 libc_hidden_proto (__libc_memalign)
604
605 /*
606 valloc(size_t n);
607 Equivalent to memalign(pagesize, n), where pagesize is the page
608 size of the system. If the pagesize is unknown, 4096 is used.
609 */
610 void* __libc_valloc(size_t);
611
612
613
614 /*
615 mallopt(int parameter_number, int parameter_value)
616 Sets tunable parameters The format is to provide a
617 (parameter-number, parameter-value) pair. mallopt then sets the
618 corresponding parameter to the argument value if it can (i.e., so
619 long as the value is meaningful), and returns 1 if successful else
620 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
621 normally defined in malloc.h. Only one of these (M_MXFAST) is used
622 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
623 so setting them has no effect. But this malloc also supports four
624 other options in mallopt. See below for details. Briefly, supported
625 parameters are as follows (listed defaults are for "typical"
626 configurations).
627
628 Symbol param # default allowed param values
629 M_MXFAST 1 64 0-80 (0 disables fastbins)
630 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
631 M_TOP_PAD -2 0 any
632 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
633 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
634 */
635 int __libc_mallopt(int, int);
636 libc_hidden_proto (__libc_mallopt)
637
638
639 /*
640 mallinfo()
641 Returns (by copy) a struct containing various summary statistics:
642
643 arena: current total non-mmapped bytes allocated from system
644 ordblks: the number of free chunks
645 smblks: the number of fastbin blocks (i.e., small chunks that
646 have been freed but not use resused or consolidated)
647 hblks: current number of mmapped regions
648 hblkhd: total bytes held in mmapped regions
649 usmblks: the maximum total allocated space. This will be greater
650 than current total if trimming has occurred.
651 fsmblks: total bytes held in fastbin blocks
652 uordblks: current total allocated space (normal or mmapped)
653 fordblks: total free space
654 keepcost: the maximum number of bytes that could ideally be released
655 back to system via malloc_trim. ("ideally" means that
656 it ignores page restrictions etc.)
657
658 Because these fields are ints, but internal bookkeeping may
659 be kept as longs, the reported values may wrap around zero and
660 thus be inaccurate.
661 */
662 struct mallinfo __libc_mallinfo(void);
663
664
665 /*
666 pvalloc(size_t n);
667 Equivalent to valloc(minimum-page-that-holds(n)), that is,
668 round up n to nearest pagesize.
669 */
670 void* __libc_pvalloc(size_t);
671
672 /*
673 malloc_trim(size_t pad);
674
675 If possible, gives memory back to the system (via negative
676 arguments to sbrk) if there is unused memory at the `high' end of
677 the malloc pool. You can call this after freeing large blocks of
678 memory to potentially reduce the system-level memory requirements
679 of a program. However, it cannot guarantee to reduce memory. Under
680 some allocation patterns, some large free blocks of memory will be
681 locked between two used chunks, so they cannot be given back to
682 the system.
683
684 The `pad' argument to malloc_trim represents the amount of free
685 trailing space to leave untrimmed. If this argument is zero,
686 only the minimum amount of memory to maintain internal data
687 structures will be left (one page or less). Non-zero arguments
688 can be supplied to maintain enough trailing space to service
689 future expected allocations without having to re-obtain memory
690 from the system.
691
692 Malloc_trim returns 1 if it actually released any memory, else 0.
693 On systems that do not support "negative sbrks", it will always
694 return 0.
695 */
696 int __malloc_trim(size_t);
697
698 /*
699 malloc_usable_size(void* p);
700
701 Returns the number of bytes you can actually use in
702 an allocated chunk, which may be more than you requested (although
703 often not) due to alignment and minimum size constraints.
704 You can use this many bytes without worrying about
705 overwriting other allocated objects. This is not a particularly great
706 programming practice. malloc_usable_size can be more useful in
707 debugging and assertions, for example:
708
709 p = malloc(n);
710 assert(malloc_usable_size(p) >= 256);
711
712 */
713 size_t __malloc_usable_size(void*);
714
715 /*
716 malloc_stats();
717 Prints on stderr the amount of space obtained from the system (both
718 via sbrk and mmap), the maximum amount (which may be more than
719 current if malloc_trim and/or munmap got called), and the current
720 number of bytes allocated via malloc (or realloc, etc) but not yet
721 freed. Note that this is the number of bytes allocated, not the
722 number requested. It will be larger than the number requested
723 because of alignment and bookkeeping overhead. Because it includes
724 alignment wastage as being in use, this figure may be greater than
725 zero even when no user-level chunks are allocated.
726
727 The reported current and maximum system memory can be inaccurate if
728 a program makes other calls to system memory allocation functions
729 (normally sbrk) outside of malloc.
730
731 malloc_stats prints only the most commonly interesting statistics.
732 More information can be obtained by calling mallinfo.
733
734 */
735 void __malloc_stats(void);
736
737 /*
738 malloc_get_state(void);
739
740 Returns the state of all malloc variables in an opaque data
741 structure.
742 */
743 void* __malloc_get_state(void);
744
745 /*
746 malloc_set_state(void* state);
747
748 Restore the state of all malloc variables from data obtained with
749 malloc_get_state().
750 */
751 int __malloc_set_state(void*);
752
753 /*
754 posix_memalign(void **memptr, size_t alignment, size_t size);
755
756 POSIX wrapper like memalign(), checking for validity of size.
757 */
758 int __posix_memalign(void **, size_t, size_t);
759
760 /* mallopt tuning options */
761
762 /*
763 M_MXFAST is the maximum request size used for "fastbins", special bins
764 that hold returned chunks without consolidating their spaces. This
765 enables future requests for chunks of the same size to be handled
766 very quickly, but can increase fragmentation, and thus increase the
767 overall memory footprint of a program.
768
769 This malloc manages fastbins very conservatively yet still
770 efficiently, so fragmentation is rarely a problem for values less
771 than or equal to the default. The maximum supported value of MXFAST
772 is 80. You wouldn't want it any higher than this anyway. Fastbins
773 are designed especially for use with many small structs, objects or
774 strings -- the default handles structs/objects/arrays with sizes up
775 to 8 4byte fields, or small strings representing words, tokens,
776 etc. Using fastbins for larger objects normally worsens
777 fragmentation without improving speed.
778
779 M_MXFAST is set in REQUEST size units. It is internally used in
780 chunksize units, which adds padding and alignment. You can reduce
781 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
782 algorithm to be a closer approximation of fifo-best-fit in all cases,
783 not just for larger requests, but will generally cause it to be
784 slower.
785 */
786
787
788 /* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
789 #ifndef M_MXFAST
790 #define M_MXFAST 1
791 #endif
792
793 #ifndef DEFAULT_MXFAST
794 #define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
795 #endif
796
797
798 /*
799 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
800 to keep before releasing via malloc_trim in free().
801
802 Automatic trimming is mainly useful in long-lived programs.
803 Because trimming via sbrk can be slow on some systems, and can
804 sometimes be wasteful (in cases where programs immediately
805 afterward allocate more large chunks) the value should be high
806 enough so that your overall system performance would improve by
807 releasing this much memory.
808
809 The trim threshold and the mmap control parameters (see below)
810 can be traded off with one another. Trimming and mmapping are
811 two different ways of releasing unused memory back to the
812 system. Between these two, it is often possible to keep
813 system-level demands of a long-lived program down to a bare
814 minimum. For example, in one test suite of sessions measuring
815 the XF86 X server on Linux, using a trim threshold of 128K and a
816 mmap threshold of 192K led to near-minimal long term resource
817 consumption.
818
819 If you are using this malloc in a long-lived program, it should
820 pay to experiment with these values. As a rough guide, you
821 might set to a value close to the average size of a process
822 (program) running on your system. Releasing this much memory
823 would allow such a process to run in memory. Generally, it's
824 worth it to tune for trimming rather tham memory mapping when a
825 program undergoes phases where several large chunks are
826 allocated and released in ways that can reuse each other's
827 storage, perhaps mixed with phases where there are no such
828 chunks at all. And in well-behaved long-lived programs,
829 controlling release of large blocks via trimming versus mapping
830 is usually faster.
831
832 However, in most programs, these parameters serve mainly as
833 protection against the system-level effects of carrying around
834 massive amounts of unneeded memory. Since frequent calls to
835 sbrk, mmap, and munmap otherwise degrade performance, the default
836 parameters are set to relatively high values that serve only as
837 safeguards.
838
839 The trim value It must be greater than page size to have any useful
840 effect. To disable trimming completely, you can set to
841 (unsigned long)(-1)
842
843 Trim settings interact with fastbin (MXFAST) settings: Unless
844 TRIM_FASTBINS is defined, automatic trimming never takes place upon
845 freeing a chunk with size less than or equal to MXFAST. Trimming is
846 instead delayed until subsequent freeing of larger chunks. However,
847 you can still force an attempted trim by calling malloc_trim.
848
849 Also, trimming is not generally possible in cases where
850 the main arena is obtained via mmap.
851
852 Note that the trick some people use of mallocing a huge space and
853 then freeing it at program startup, in an attempt to reserve system
854 memory, doesn't have the intended effect under automatic trimming,
855 since that memory will immediately be returned to the system.
856 */
857
858 #define M_TRIM_THRESHOLD -1
859
860 #ifndef DEFAULT_TRIM_THRESHOLD
861 #define DEFAULT_TRIM_THRESHOLD (128 * 1024)
862 #endif
863
864 /*
865 M_TOP_PAD is the amount of extra `padding' space to allocate or
866 retain whenever sbrk is called. It is used in two ways internally:
867
868 * When sbrk is called to extend the top of the arena to satisfy
869 a new malloc request, this much padding is added to the sbrk
870 request.
871
872 * When malloc_trim is called automatically from free(),
873 it is used as the `pad' argument.
874
875 In both cases, the actual amount of padding is rounded
876 so that the end of the arena is always a system page boundary.
877
878 The main reason for using padding is to avoid calling sbrk so
879 often. Having even a small pad greatly reduces the likelihood
880 that nearly every malloc request during program start-up (or
881 after trimming) will invoke sbrk, which needlessly wastes
882 time.
883
884 Automatic rounding-up to page-size units is normally sufficient
885 to avoid measurable overhead, so the default is 0. However, in
886 systems where sbrk is relatively slow, it can pay to increase
887 this value, at the expense of carrying around more memory than
888 the program needs.
889 */
890
891 #define M_TOP_PAD -2
892
893 #ifndef DEFAULT_TOP_PAD
894 #define DEFAULT_TOP_PAD (0)
895 #endif
896
897 /*
898 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
899 adjusted MMAP_THRESHOLD.
900 */
901
902 #ifndef DEFAULT_MMAP_THRESHOLD_MIN
903 #define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
904 #endif
905
906 #ifndef DEFAULT_MMAP_THRESHOLD_MAX
907 /* For 32-bit platforms we cannot increase the maximum mmap
908 threshold much because it is also the minimum value for the
909 maximum heap size and its alignment. Going above 512k (i.e., 1M
910 for new heaps) wastes too much address space. */
911 # if __WORDSIZE == 32
912 # define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
913 # else
914 # define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
915 # endif
916 #endif
917
918 /*
919 M_MMAP_THRESHOLD is the request size threshold for using mmap()
920 to service a request. Requests of at least this size that cannot
921 be allocated using already-existing space will be serviced via mmap.
922 (If enough normal freed space already exists it is used instead.)
923
924 Using mmap segregates relatively large chunks of memory so that
925 they can be individually obtained and released from the host
926 system. A request serviced through mmap is never reused by any
927 other request (at least not directly; the system may just so
928 happen to remap successive requests to the same locations).
929
930 Segregating space in this way has the benefits that:
931
932 1. Mmapped space can ALWAYS be individually released back
933 to the system, which helps keep the system level memory
934 demands of a long-lived program low.
935 2. Mapped memory can never become `locked' between
936 other chunks, as can happen with normally allocated chunks, which
937 means that even trimming via malloc_trim would not release them.
938 3. On some systems with "holes" in address spaces, mmap can obtain
939 memory that sbrk cannot.
940
941 However, it has the disadvantages that:
942
943 1. The space cannot be reclaimed, consolidated, and then
944 used to service later requests, as happens with normal chunks.
945 2. It can lead to more wastage because of mmap page alignment
946 requirements
947 3. It causes malloc performance to be more dependent on host
948 system memory management support routines which may vary in
949 implementation quality and may impose arbitrary
950 limitations. Generally, servicing a request via normal
951 malloc steps is faster than going through a system's mmap.
952
953 The advantages of mmap nearly always outweigh disadvantages for
954 "large" chunks, but the value of "large" varies across systems. The
955 default is an empirically derived value that works well in most
956 systems.
957
958
959 Update in 2006:
960 The above was written in 2001. Since then the world has changed a lot.
961 Memory got bigger. Applications got bigger. The virtual address space
962 layout in 32 bit linux changed.
963
964 In the new situation, brk() and mmap space is shared and there are no
965 artificial limits on brk size imposed by the kernel. What is more,
966 applications have started using transient allocations larger than the
967 128Kb as was imagined in 2001.
968
969 The price for mmap is also high now; each time glibc mmaps from the
970 kernel, the kernel is forced to zero out the memory it gives to the
971 application. Zeroing memory is expensive and eats a lot of cache and
972 memory bandwidth. This has nothing to do with the efficiency of the
973 virtual memory system, by doing mmap the kernel just has no choice but
974 to zero.
975
976 In 2001, the kernel had a maximum size for brk() which was about 800
977 megabytes on 32 bit x86, at that point brk() would hit the first
978 mmaped shared libaries and couldn't expand anymore. With current 2.6
979 kernels, the VA space layout is different and brk() and mmap
980 both can span the entire heap at will.
981
982 Rather than using a static threshold for the brk/mmap tradeoff,
983 we are now using a simple dynamic one. The goal is still to avoid
984 fragmentation. The old goals we kept are
985 1) try to get the long lived large allocations to use mmap()
986 2) really large allocations should always use mmap()
987 and we're adding now:
988 3) transient allocations should use brk() to avoid forcing the kernel
989 having to zero memory over and over again
990
991 The implementation works with a sliding threshold, which is by default
992 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
993 out at 128Kb as per the 2001 default.
994
995 This allows us to satisfy requirement 1) under the assumption that long
996 lived allocations are made early in the process' lifespan, before it has
997 started doing dynamic allocations of the same size (which will
998 increase the threshold).
999
1000 The upperbound on the threshold satisfies requirement 2)
1001
1002 The threshold goes up in value when the application frees memory that was
1003 allocated with the mmap allocator. The idea is that once the application
1004 starts freeing memory of a certain size, it's highly probable that this is
1005 a size the application uses for transient allocations. This estimator
1006 is there to satisfy the new third requirement.
1007
1008 */
1009
1010 #define M_MMAP_THRESHOLD -3
1011
1012 #ifndef DEFAULT_MMAP_THRESHOLD
1013 #define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
1014 #endif
1015
1016 /*
1017 M_MMAP_MAX is the maximum number of requests to simultaneously
1018 service using mmap. This parameter exists because
1019 some systems have a limited number of internal tables for
1020 use by mmap, and using more than a few of them may degrade
1021 performance.
1022
1023 The default is set to a value that serves only as a safeguard.
1024 Setting to 0 disables use of mmap for servicing large requests.
1025 */
1026
1027 #define M_MMAP_MAX -4
1028
1029 #ifndef DEFAULT_MMAP_MAX
1030 #define DEFAULT_MMAP_MAX (65536)
1031 #endif
1032
1033 #include <malloc.h>
1034
1035 #ifndef RETURN_ADDRESS
1036 #define RETURN_ADDRESS(X_) (NULL)
1037 #endif
1038
1039 /* On some platforms we can compile internal, not exported functions better.
1040 Let the environment provide a macro and define it to be empty if it
1041 is not available. */
1042 #ifndef internal_function
1043 # define internal_function
1044 #endif
1045
1046 /* Forward declarations. */
1047 struct malloc_chunk;
1048 typedef struct malloc_chunk* mchunkptr;
1049
1050 /* Internal routines. */
1051
1052 static void* _int_malloc(mstate, size_t);
1053 static void _int_free(mstate, mchunkptr, int);
1054 static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
1055 INTERNAL_SIZE_T);
1056 static void* _int_memalign(mstate, size_t, size_t);
1057 static void* _mid_memalign(size_t, size_t, void *);
1058
1059 static void malloc_printerr(int action, const char *str, void *ptr);
1060
1061 static void* internal_function mem2mem_check(void *p, size_t sz);
1062 static int internal_function top_check(void);
1063 static void internal_function munmap_chunk(mchunkptr p);
1064 #if HAVE_MREMAP
1065 static mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size);
1066 #endif
1067
1068 static void* malloc_check(size_t sz, const void *caller);
1069 static void free_check(void* mem, const void *caller);
1070 static void* realloc_check(void* oldmem, size_t bytes,
1071 const void *caller);
1072 static void* memalign_check(size_t alignment, size_t bytes,
1073 const void *caller);
1074 #ifndef NO_THREADS
1075 static void* malloc_atfork(size_t sz, const void *caller);
1076 static void free_atfork(void* mem, const void *caller);
1077 #endif
1078
1079 /* ------------------ MMAP support ------------------ */
1080
1081
1082 #include <fcntl.h>
1083 #include <sys/mman.h>
1084
1085 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1086 # define MAP_ANONYMOUS MAP_ANON
1087 #endif
1088
1089 #ifndef MAP_NORESERVE
1090 # define MAP_NORESERVE 0
1091 #endif
1092
1093 #define MMAP(addr, size, prot, flags) \
1094 __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
1095
1096
1097 /*
1098 ----------------------- Chunk representations -----------------------
1099 */
1100
1101
1102 /*
1103 This struct declaration is misleading (but accurate and necessary).
1104 It declares a "view" into memory allowing access to necessary
1105 fields at known offsets from a given base. See explanation below.
1106 */
1107
1108 struct malloc_chunk {
1109
1110 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
1111 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
1112
1113 struct malloc_chunk* fd; /* double links -- used only if free. */
1114 struct malloc_chunk* bk;
1115
1116 /* Only used for large blocks: pointer to next larger size. */
1117 struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
1118 struct malloc_chunk* bk_nextsize;
1119 };
1120
1121
1122 /*
1123 malloc_chunk details:
1124
1125 (The following includes lightly edited explanations by Colin Plumb.)
1126
1127 Chunks of memory are maintained using a `boundary tag' method as
1128 described in e.g., Knuth or Standish. (See the paper by Paul
1129 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1130 survey of such techniques.) Sizes of free chunks are stored both
1131 in the front of each chunk and at the end. This makes
1132 consolidating fragmented chunks into bigger chunks very fast. The
1133 size fields also hold bits representing whether chunks are free or
1134 in use.
1135
1136 An allocated chunk looks like this:
1137
1138
1139 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1140 | Size of previous chunk, if allocated | |
1141 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1142 | Size of chunk, in bytes |M|P|
1143 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1144 | User data starts here... .
1145 . .
1146 . (malloc_usable_size() bytes) .
1147 . |
1148 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1149 | Size of chunk |
1150 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1151
1152
1153 Where "chunk" is the front of the chunk for the purpose of most of
1154 the malloc code, but "mem" is the pointer that is returned to the
1155 user. "Nextchunk" is the beginning of the next contiguous chunk.
1156
1157 Chunks always begin on even word boundaries, so the mem portion
1158 (which is returned to the user) is also on an even word boundary, and
1159 thus at least double-word aligned.
1160
1161 Free chunks are stored in circular doubly-linked lists, and look like this:
1162
1163 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1164 | Size of previous chunk |
1165 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1166 `head:' | Size of chunk, in bytes |P|
1167 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1168 | Forward pointer to next chunk in list |
1169 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1170 | Back pointer to previous chunk in list |
1171 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1172 | Unused space (may be 0 bytes long) .
1173 . .
1174 . |
1175 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1176 `foot:' | Size of chunk, in bytes |
1177 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1178
1179 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1180 chunk size (which is always a multiple of two words), is an in-use
1181 bit for the *previous* chunk. If that bit is *clear*, then the
1182 word before the current chunk size contains the previous chunk
1183 size, and can be used to find the front of the previous chunk.
1184 The very first chunk allocated always has this bit set,
1185 preventing access to non-existent (or non-owned) memory. If
1186 prev_inuse is set for any given chunk, then you CANNOT determine
1187 the size of the previous chunk, and might even get a memory
1188 addressing fault when trying to do so.
1189
1190 Note that the `foot' of the current chunk is actually represented
1191 as the prev_size of the NEXT chunk. This makes it easier to
1192 deal with alignments etc but can be very confusing when trying
1193 to extend or adapt this code.
1194
1195 The two exceptions to all this are
1196
1197 1. The special chunk `top' doesn't bother using the
1198 trailing size field since there is no next contiguous chunk
1199 that would have to index off it. After initialization, `top'
1200 is forced to always exist. If it would become less than
1201 MINSIZE bytes long, it is replenished.
1202
1203 2. Chunks allocated via mmap, which have the second-lowest-order
1204 bit M (IS_MMAPPED) set in their size fields. Because they are
1205 allocated one-by-one, each must contain its own trailing size field.
1206
1207 */
1208
1209 /*
1210 ---------- Size and alignment checks and conversions ----------
1211 */
1212
1213 /* conversion from malloc headers to user pointers, and back */
1214
1215 #define chunk2mem(p) ((void*)((char*)(p) + 2*SIZE_SZ))
1216 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
1217
1218 /* The smallest possible chunk */
1219 #define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
1220
1221 /* The smallest size we can malloc is an aligned minimal chunk */
1222
1223 #define MINSIZE \
1224 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1225
1226 /* Check if m has acceptable alignment */
1227
1228 #define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1229
1230 #define misaligned_chunk(p) \
1231 ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
1232 & MALLOC_ALIGN_MASK)
1233
1234
1235 /*
1236 Check if a request is so large that it would wrap around zero when
1237 padded and aligned. To simplify some other code, the bound is made
1238 low enough so that adding MINSIZE will also not wrap around zero.
1239 */
1240
1241 #define REQUEST_OUT_OF_RANGE(req) \
1242 ((unsigned long) (req) >= \
1243 (unsigned long) (INTERNAL_SIZE_T) (-2 * MINSIZE))
1244
1245 /* pad request bytes into a usable size -- internal version */
1246
1247 #define request2size(req) \
1248 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1249 MINSIZE : \
1250 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1251
1252 /* Same, except also perform argument check */
1253
1254 #define checked_request2size(req, sz) \
1255 if (REQUEST_OUT_OF_RANGE (req)) { \
1256 __set_errno (ENOMEM); \
1257 return 0; \
1258 } \
1259 (sz) = request2size (req);
1260
1261 /*
1262 --------------- Physical chunk operations ---------------
1263 */
1264
1265
1266 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1267 #define PREV_INUSE 0x1
1268
1269 /* extract inuse bit of previous chunk */
1270 #define prev_inuse(p) ((p)->size & PREV_INUSE)
1271
1272
1273 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1274 #define IS_MMAPPED 0x2
1275
1276 /* check for mmap()'ed chunk */
1277 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
1278
1279
1280 /* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1281 from a non-main arena. This is only set immediately before handing
1282 the chunk to the user, if necessary. */
1283 #define NON_MAIN_ARENA 0x4
1284
1285 /* check for chunk from non-main arena */
1286 #define chunk_non_main_arena(p) ((p)->size & NON_MAIN_ARENA)
1287
1288
1289 /*
1290 Bits to mask off when extracting size
1291
1292 Note: IS_MMAPPED is intentionally not masked off from size field in
1293 macros for which mmapped chunks should never be seen. This should
1294 cause helpful core dumps to occur if it is tried by accident by
1295 people extending or adapting this malloc.
1296 */
1297 #define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
1298
1299 /* Get size, ignoring use bits */
1300 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
1301
1302
1303 /* Ptr to next physical malloc_chunk. */
1304 #define next_chunk(p) ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))
1305
1306 /* Ptr to previous physical malloc_chunk */
1307 #define prev_chunk(p) ((mchunkptr) (((char *) (p)) - ((p)->prev_size)))
1308
1309 /* Treat space at ptr + offset as a chunk */
1310 #define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))
1311
1312 /* extract p's inuse bit */
1313 #define inuse(p) \
1314 ((((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size) & PREV_INUSE)
1315
1316 /* set/clear chunk as being inuse without otherwise disturbing */
1317 #define set_inuse(p) \
1318 ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size |= PREV_INUSE
1319
1320 #define clear_inuse(p) \
1321 ((mchunkptr) (((char *) (p)) + ((p)->size & ~SIZE_BITS)))->size &= ~(PREV_INUSE)
1322
1323
1324 /* check/set/clear inuse bits in known places */
1325 #define inuse_bit_at_offset(p, s) \
1326 (((mchunkptr) (((char *) (p)) + (s)))->size & PREV_INUSE)
1327
1328 #define set_inuse_bit_at_offset(p, s) \
1329 (((mchunkptr) (((char *) (p)) + (s)))->size |= PREV_INUSE)
1330
1331 #define clear_inuse_bit_at_offset(p, s) \
1332 (((mchunkptr) (((char *) (p)) + (s)))->size &= ~(PREV_INUSE))
1333
1334
1335 /* Set size at head, without disturbing its use bit */
1336 #define set_head_size(p, s) ((p)->size = (((p)->size & SIZE_BITS) | (s)))
1337
1338 /* Set size/use field */
1339 #define set_head(p, s) ((p)->size = (s))
1340
1341 /* Set size at footer (only when chunk is not in use) */
1342 #define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->prev_size = (s))
1343
1344
1345 /*
1346 -------------------- Internal data structures --------------------
1347
1348 All internal state is held in an instance of malloc_state defined
1349 below. There are no other static variables, except in two optional
1350 cases:
1351 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1352 * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
1353 for mmap.
1354
1355 Beware of lots of tricks that minimize the total bookkeeping space
1356 requirements. The result is a little over 1K bytes (for 4byte
1357 pointers and size_t.)
1358 */
1359
1360 /*
1361 Bins
1362
1363 An array of bin headers for free chunks. Each bin is doubly
1364 linked. The bins are approximately proportionally (log) spaced.
1365 There are a lot of these bins (128). This may look excessive, but
1366 works very well in practice. Most bins hold sizes that are
1367 unusual as malloc request sizes, but are more usual for fragments
1368 and consolidated sets of chunks, which is what these bins hold, so
1369 they can be found quickly. All procedures maintain the invariant
1370 that no consolidated chunk physically borders another one, so each
1371 chunk in a list is known to be preceeded and followed by either
1372 inuse chunks or the ends of memory.
1373
1374 Chunks in bins are kept in size order, with ties going to the
1375 approximately least recently used chunk. Ordering isn't needed
1376 for the small bins, which all contain the same-sized chunks, but
1377 facilitates best-fit allocation for larger chunks. These lists
1378 are just sequential. Keeping them in order almost never requires
1379 enough traversal to warrant using fancier ordered data
1380 structures.
1381
1382 Chunks of the same size are linked with the most
1383 recently freed at the front, and allocations are taken from the
1384 back. This results in LRU (FIFO) allocation order, which tends
1385 to give each chunk an equal opportunity to be consolidated with
1386 adjacent freed chunks, resulting in larger free chunks and less
1387 fragmentation.
1388
1389 To simplify use in double-linked lists, each bin header acts
1390 as a malloc_chunk. This avoids special-casing for headers.
1391 But to conserve space and improve locality, we allocate
1392 only the fd/bk pointers of bins, and then use repositioning tricks
1393 to treat these as the fields of a malloc_chunk*.
1394 */
1395
1396 typedef struct malloc_chunk *mbinptr;
1397
1398 /* addressing -- note that bin_at(0) does not exist */
1399 #define bin_at(m, i) \
1400 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
1401 - offsetof (struct malloc_chunk, fd))
1402
1403 /* analog of ++bin */
1404 #define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
1405
1406 /* Reminders about list directionality within bins */
1407 #define first(b) ((b)->fd)
1408 #define last(b) ((b)->bk)
1409
1410 /* Take a chunk off a bin list */
1411 #define unlink(P, BK, FD) { \
1412 FD = P->fd; \
1413 BK = P->bk; \
1414 if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \
1415 malloc_printerr (check_action, "corrupted double-linked list", P); \
1416 else { \
1417 FD->bk = BK; \
1418 BK->fd = FD; \
1419 if (!in_smallbin_range (P->size) \
1420 && __builtin_expect (P->fd_nextsize != NULL, 0)) { \
1421 assert (P->fd_nextsize->bk_nextsize == P); \
1422 assert (P->bk_nextsize->fd_nextsize == P); \
1423 if (FD->fd_nextsize == NULL) { \
1424 if (P->fd_nextsize == P) \
1425 FD->fd_nextsize = FD->bk_nextsize = FD; \
1426 else { \
1427 FD->fd_nextsize = P->fd_nextsize; \
1428 FD->bk_nextsize = P->bk_nextsize; \
1429 P->fd_nextsize->bk_nextsize = FD; \
1430 P->bk_nextsize->fd_nextsize = FD; \
1431 } \
1432 } else { \
1433 P->fd_nextsize->bk_nextsize = P->bk_nextsize; \
1434 P->bk_nextsize->fd_nextsize = P->fd_nextsize; \
1435 } \
1436 } \
1437 } \
1438 }
1439
1440 /*
1441 Indexing
1442
1443 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1444 8 bytes apart. Larger bins are approximately logarithmically spaced:
1445
1446 64 bins of size 8
1447 32 bins of size 64
1448 16 bins of size 512
1449 8 bins of size 4096
1450 4 bins of size 32768
1451 2 bins of size 262144
1452 1 bin of size what's left
1453
1454 There is actually a little bit of slop in the numbers in bin_index
1455 for the sake of speed. This makes no difference elsewhere.
1456
1457 The bins top out around 1MB because we expect to service large
1458 requests via mmap.
1459
1460 Bin 0 does not exist. Bin 1 is the unordered list; if that would be
1461 a valid chunk size the small bins are bumped up one.
1462 */
1463
1464 #define NBINS 128
1465 #define NSMALLBINS 64
1466 #define SMALLBIN_WIDTH MALLOC_ALIGNMENT
1467 #define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > 2 * SIZE_SZ)
1468 #define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
1469
1470 #define in_smallbin_range(sz) \
1471 ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
1472
1473 #define smallbin_index(sz) \
1474 ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
1475 + SMALLBIN_CORRECTION)
1476
1477 #define largebin_index_32(sz) \
1478 (((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\
1479 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1480 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1481 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1482 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1483 126)
1484
1485 #define largebin_index_32_big(sz) \
1486 (((((unsigned long) (sz)) >> 6) <= 45) ? 49 + (((unsigned long) (sz)) >> 6) :\
1487 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1488 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1489 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1490 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1491 126)
1492
1493 // XXX It remains to be seen whether it is good to keep the widths of
1494 // XXX the buckets the same or whether it should be scaled by a factor
1495 // XXX of two as well.
1496 #define largebin_index_64(sz) \
1497 (((((unsigned long) (sz)) >> 6) <= 48) ? 48 + (((unsigned long) (sz)) >> 6) :\
1498 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1499 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1500 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1501 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1502 126)
1503
1504 #define largebin_index(sz) \
1505 (SIZE_SZ == 8 ? largebin_index_64 (sz) \
1506 : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \
1507 : largebin_index_32 (sz))
1508
1509 #define bin_index(sz) \
1510 ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))
1511
1512
1513 /*
1514 Unsorted chunks
1515
1516 All remainders from chunk splits, as well as all returned chunks,
1517 are first placed in the "unsorted" bin. They are then placed
1518 in regular bins after malloc gives them ONE chance to be used before
1519 binning. So, basically, the unsorted_chunks list acts as a queue,
1520 with chunks being placed on it in free (and malloc_consolidate),
1521 and taken off (to be either used or placed in bins) in malloc.
1522
1523 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
1524 does not have to be taken into account in size comparisons.
1525 */
1526
1527 /* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
1528 #define unsorted_chunks(M) (bin_at (M, 1))
1529
1530 /*
1531 Top
1532
1533 The top-most available chunk (i.e., the one bordering the end of
1534 available memory) is treated specially. It is never included in
1535 any bin, is used only if no other chunk is available, and is
1536 released back to the system if it is very large (see
1537 M_TRIM_THRESHOLD). Because top initially
1538 points to its own bin with initial zero size, thus forcing
1539 extension on the first malloc request, we avoid having any special
1540 code in malloc to check whether it even exists yet. But we still
1541 need to do so when getting memory from system, so we make
1542 initial_top treat the bin as a legal but unusable chunk during the
1543 interval between initialization and the first call to
1544 sysmalloc. (This is somewhat delicate, since it relies on
1545 the 2 preceding words to be zero during this interval as well.)
1546 */
1547
1548 /* Conveniently, the unsorted bin can be used as dummy top on first call */
1549 #define initial_top(M) (unsorted_chunks (M))
1550
1551 /*
1552 Binmap
1553
1554 To help compensate for the large number of bins, a one-level index
1555 structure is used for bin-by-bin searching. `binmap' is a
1556 bitvector recording whether bins are definitely empty so they can
1557 be skipped over during during traversals. The bits are NOT always
1558 cleared as soon as bins are empty, but instead only
1559 when they are noticed to be empty during traversal in malloc.
1560 */
1561
1562 /* Conservatively use 32 bits per map word, even if on 64bit system */
1563 #define BINMAPSHIFT 5
1564 #define BITSPERMAP (1U << BINMAPSHIFT)
1565 #define BINMAPSIZE (NBINS / BITSPERMAP)
1566
1567 #define idx2block(i) ((i) >> BINMAPSHIFT)
1568 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
1569
1570 #define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i))
1571 #define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
1572 #define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i))
1573
1574 /*
1575 Fastbins
1576
1577 An array of lists holding recently freed small chunks. Fastbins
1578 are not doubly linked. It is faster to single-link them, and
1579 since chunks are never removed from the middles of these lists,
1580 double linking is not necessary. Also, unlike regular bins, they
1581 are not even processed in FIFO order (they use faster LIFO) since
1582 ordering doesn't much matter in the transient contexts in which
1583 fastbins are normally used.
1584
1585 Chunks in fastbins keep their inuse bit set, so they cannot
1586 be consolidated with other free chunks. malloc_consolidate
1587 releases all chunks in fastbins and consolidates them with
1588 other free chunks.
1589 */
1590
1591 typedef struct malloc_chunk *mfastbinptr;
1592 #define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
1593
1594 /* offset 2 to use otherwise unindexable first 2 bins */
1595 #define fastbin_index(sz) \
1596 ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
1597
1598
1599 /* The maximum fastbin request size we support */
1600 #define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
1601
1602 #define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
1603
1604 /*
1605 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
1606 that triggers automatic consolidation of possibly-surrounding
1607 fastbin chunks. This is a heuristic, so the exact value should not
1608 matter too much. It is defined at half the default trim threshold as a
1609 compromise heuristic to only attempt consolidation if it is likely
1610 to lead to trimming. However, it is not dynamically tunable, since
1611 consolidation reduces fragmentation surrounding large chunks even
1612 if trimming is not used.
1613 */
1614
1615 #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
1616
1617 /*
1618 Since the lowest 2 bits in max_fast don't matter in size comparisons,
1619 they are used as flags.
1620 */
1621
1622 /*
1623 FASTCHUNKS_BIT held in max_fast indicates that there are probably
1624 some fastbin chunks. It is set true on entering a chunk into any
1625 fastbin, and cleared only in malloc_consolidate.
1626
1627 The truth value is inverted so that have_fastchunks will be true
1628 upon startup (since statics are zero-filled), simplifying
1629 initialization checks.
1630 */
1631
1632 #define FASTCHUNKS_BIT (1U)
1633
1634 #define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0)
1635 #define clear_fastchunks(M) catomic_or (&(M)->flags, FASTCHUNKS_BIT)
1636 #define set_fastchunks(M) catomic_and (&(M)->flags, ~FASTCHUNKS_BIT)
1637
1638 /*
1639 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
1640 regions. Otherwise, contiguity is exploited in merging together,
1641 when possible, results from consecutive MORECORE calls.
1642
1643 The initial value comes from MORECORE_CONTIGUOUS, but is
1644 changed dynamically if mmap is ever used as an sbrk substitute.
1645 */
1646
1647 #define NONCONTIGUOUS_BIT (2U)
1648
1649 #define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
1650 #define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
1651 #define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
1652 #define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
1653
1654 /*
1655 Set value of max_fast.
1656 Use impossibly small value if 0.
1657 Precondition: there are no existing fastbin chunks.
1658 Setting the value clears fastchunk bit but preserves noncontiguous bit.
1659 */
1660
1661 #define set_max_fast(s) \
1662 global_max_fast = (((s) == 0) \
1663 ? SMALLBIN_WIDTH : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
1664 #define get_max_fast() global_max_fast
1665
1666
1667 /*
1668 ----------- Internal state representation and initialization -----------
1669 */
1670
1671 struct malloc_state
1672 {
1673 /* Serialize access. */
1674 mutex_t mutex;
1675
1676 /* Flags (formerly in max_fast). */
1677 int flags;
1678
1679 /* Fastbins */
1680 mfastbinptr fastbinsY[NFASTBINS];
1681
1682 /* Base of the topmost chunk -- not otherwise kept in a bin */
1683 mchunkptr top;
1684
1685 /* The remainder from the most recent split of a small request */
1686 mchunkptr last_remainder;
1687
1688 /* Normal bins packed as described above */
1689 mchunkptr bins[NBINS * 2 - 2];
1690
1691 /* Bitmap of bins */
1692 unsigned int binmap[BINMAPSIZE];
1693
1694 /* Linked list */
1695 struct malloc_state *next;
1696
1697 /* Linked list for free arenas. */
1698 struct malloc_state *next_free;
1699
1700 /* Memory allocated from the system in this arena. */
1701 INTERNAL_SIZE_T system_mem;
1702 INTERNAL_SIZE_T max_system_mem;
1703 };
1704
1705 struct malloc_par
1706 {
1707 /* Tunable parameters */
1708 unsigned long trim_threshold;
1709 INTERNAL_SIZE_T top_pad;
1710 INTERNAL_SIZE_T mmap_threshold;
1711 INTERNAL_SIZE_T arena_test;
1712 INTERNAL_SIZE_T arena_max;
1713
1714 /* Memory map support */
1715 int n_mmaps;
1716 int n_mmaps_max;
1717 int max_n_mmaps;
1718 /* the mmap_threshold is dynamic, until the user sets
1719 it manually, at which point we need to disable any
1720 dynamic behavior. */
1721 int no_dyn_threshold;
1722
1723 /* Statistics */
1724 INTERNAL_SIZE_T mmapped_mem;
1725 /*INTERNAL_SIZE_T sbrked_mem;*/
1726 /*INTERNAL_SIZE_T max_sbrked_mem;*/
1727 INTERNAL_SIZE_T max_mmapped_mem;
1728 INTERNAL_SIZE_T max_total_mem; /* only kept for NO_THREADS */
1729
1730 /* First address handed out by MORECORE/sbrk. */
1731 char *sbrk_base;
1732 };
1733
1734 /* There are several instances of this struct ("arenas") in this
1735 malloc. If you are adapting this malloc in a way that does NOT use
1736 a static or mmapped malloc_state, you MUST explicitly zero-fill it
1737 before using. This malloc relies on the property that malloc_state
1738 is initialized to all zeroes (as is true of C statics). */
1739
1740 static struct malloc_state main_arena =
1741 {
1742 .mutex = MUTEX_INITIALIZER,
1743 .next = &main_arena
1744 };
1745
1746 /* There is only one instance of the malloc parameters. */
1747
1748 static struct malloc_par mp_ =
1749 {
1750 .top_pad = DEFAULT_TOP_PAD,
1751 .n_mmaps_max = DEFAULT_MMAP_MAX,
1752 .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
1753 .trim_threshold = DEFAULT_TRIM_THRESHOLD,
1754 #define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
1755 .arena_test = NARENAS_FROM_NCORES (1)
1756 };
1757
1758
1759 /* Non public mallopt parameters. */
1760 #define M_ARENA_TEST -7
1761 #define M_ARENA_MAX -8
1762
1763
1764 /* Maximum size of memory handled in fastbins. */
1765 static INTERNAL_SIZE_T global_max_fast;
1766
1767 /*
1768 Initialize a malloc_state struct.
1769
1770 This is called only from within malloc_consolidate, which needs
1771 be called in the same contexts anyway. It is never called directly
1772 outside of malloc_consolidate because some optimizing compilers try
1773 to inline it at all call points, which turns out not to be an
1774 optimization at all. (Inlining it in malloc_consolidate is fine though.)
1775 */
1776
1777 static void
1778 malloc_init_state (mstate av)
1779 {
1780 int i;
1781 mbinptr bin;
1782
1783 /* Establish circular links for normal bins */
1784 for (i = 1; i < NBINS; ++i)
1785 {
1786 bin = bin_at (av, i);
1787 bin->fd = bin->bk = bin;
1788 }
1789
1790 #if MORECORE_CONTIGUOUS
1791 if (av != &main_arena)
1792 #endif
1793 set_noncontiguous (av);
1794 if (av == &main_arena)
1795 set_max_fast (DEFAULT_MXFAST);
1796 av->flags |= FASTCHUNKS_BIT;
1797
1798 av->top = initial_top (av);
1799 }
1800
1801 /*
1802 Other internal utilities operating on mstates
1803 */
1804
1805 static void *sysmalloc (INTERNAL_SIZE_T, mstate);
1806 static int systrim (size_t, mstate);
1807 static void malloc_consolidate (mstate);
1808
1809
1810 /* -------------- Early definitions for debugging hooks ---------------- */
1811
1812 /* Define and initialize the hook variables. These weak definitions must
1813 appear before any use of the variables in a function (arena.c uses one). */
1814 #ifndef weak_variable
1815 /* In GNU libc we want the hook variables to be weak definitions to
1816 avoid a problem with Emacs. */
1817 # define weak_variable weak_function
1818 #endif
1819
1820 /* Forward declarations. */
1821 static void *malloc_hook_ini (size_t sz,
1822 const void *caller) __THROW;
1823 static void *realloc_hook_ini (void *ptr, size_t sz,
1824 const void *caller) __THROW;
1825 static void *memalign_hook_ini (size_t alignment, size_t sz,
1826 const void *caller) __THROW;
1827
1828 void weak_variable (*__malloc_initialize_hook) (void) = NULL;
1829 void weak_variable (*__free_hook) (void *__ptr,
1830 const void *) = NULL;
1831 void *weak_variable (*__malloc_hook)
1832 (size_t __size, const void *) = malloc_hook_ini;
1833 void *weak_variable (*__realloc_hook)
1834 (void *__ptr, size_t __size, const void *)
1835 = realloc_hook_ini;
1836 void *weak_variable (*__memalign_hook)
1837 (size_t __alignment, size_t __size, const void *)
1838 = memalign_hook_ini;
1839 void weak_variable (*__after_morecore_hook) (void) = NULL;
1840
1841
1842 /* ---------------- Error behavior ------------------------------------ */
1843
1844 #ifndef DEFAULT_CHECK_ACTION
1845 # define DEFAULT_CHECK_ACTION 3
1846 #endif
1847
1848 static int check_action = DEFAULT_CHECK_ACTION;
1849
1850
1851 /* ------------------ Testing support ----------------------------------*/
1852
1853 static int perturb_byte;
1854
1855 static inline void
1856 alloc_perturb (char *p, size_t n)
1857 {
1858 if (__glibc_unlikely (perturb_byte))
1859 memset (p, perturb_byte ^ 0xff, n);
1860 }
1861
1862 static inline void
1863 free_perturb (char *p, size_t n)
1864 {
1865 if (__glibc_unlikely (perturb_byte))
1866 memset (p, perturb_byte, n);
1867 }
1868
1869
1870
1871 #include <stap-probe.h>
1872
1873 /* ------------------- Support for multiple arenas -------------------- */
1874 #include "arena.c"
1875
1876 /*
1877 Debugging support
1878
1879 These routines make a number of assertions about the states
1880 of data structures that should be true at all times. If any
1881 are not true, it's very likely that a user program has somehow
1882 trashed memory. (It's also possible that there is a coding error
1883 in malloc. In which case, please report it!)
1884 */
1885
1886 #if !MALLOC_DEBUG
1887
1888 # define check_chunk(A, P)
1889 # define check_free_chunk(A, P)
1890 # define check_inuse_chunk(A, P)
1891 # define check_remalloced_chunk(A, P, N)
1892 # define check_malloced_chunk(A, P, N)
1893 # define check_malloc_state(A)
1894
1895 #else
1896
1897 # define check_chunk(A, P) do_check_chunk (A, P)
1898 # define check_free_chunk(A, P) do_check_free_chunk (A, P)
1899 # define check_inuse_chunk(A, P) do_check_inuse_chunk (A, P)
1900 # define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
1901 # define check_malloced_chunk(A, P, N) do_check_malloced_chunk (A, P, N)
1902 # define check_malloc_state(A) do_check_malloc_state (A)
1903
1904 /*
1905 Properties of all chunks
1906 */
1907
1908 static void
1909 do_check_chunk (mstate av, mchunkptr p)
1910 {
1911 unsigned long sz = chunksize (p);
1912 /* min and max possible addresses assuming contiguous allocation */
1913 char *max_address = (char *) (av->top) + chunksize (av->top);
1914 char *min_address = max_address - av->system_mem;
1915
1916 if (!chunk_is_mmapped (p))
1917 {
1918 /* Has legal address ... */
1919 if (p != av->top)
1920 {
1921 if (contiguous (av))
1922 {
1923 assert (((char *) p) >= min_address);
1924 assert (((char *) p + sz) <= ((char *) (av->top)));
1925 }
1926 }
1927 else
1928 {
1929 /* top size is always at least MINSIZE */
1930 assert ((unsigned long) (sz) >= MINSIZE);
1931 /* top predecessor always marked inuse */
1932 assert (prev_inuse (p));
1933 }
1934 }
1935 else
1936 {
1937 /* address is outside main heap */
1938 if (contiguous (av) && av->top != initial_top (av))
1939 {
1940 assert (((char *) p) < min_address || ((char *) p) >= max_address);
1941 }
1942 /* chunk is page-aligned */
1943 assert (((p->prev_size + sz) & (GLRO (dl_pagesize) - 1)) == 0);
1944 /* mem is aligned */
1945 assert (aligned_OK (chunk2mem (p)));
1946 }
1947 }
1948
1949 /*
1950 Properties of free chunks
1951 */
1952
1953 static void
1954 do_check_free_chunk (mstate av, mchunkptr p)
1955 {
1956 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE | NON_MAIN_ARENA);
1957 mchunkptr next = chunk_at_offset (p, sz);
1958
1959 do_check_chunk (av, p);
1960
1961 /* Chunk must claim to be free ... */
1962 assert (!inuse (p));
1963 assert (!chunk_is_mmapped (p));
1964
1965 /* Unless a special marker, must have OK fields */
1966 if ((unsigned long) (sz) >= MINSIZE)
1967 {
1968 assert ((sz & MALLOC_ALIGN_MASK) == 0);
1969 assert (aligned_OK (chunk2mem (p)));
1970 /* ... matching footer field */
1971 assert (next->prev_size == sz);
1972 /* ... and is fully consolidated */
1973 assert (prev_inuse (p));
1974 assert (next == av->top || inuse (next));
1975
1976 /* ... and has minimally sane links */
1977 assert (p->fd->bk == p);
1978 assert (p->bk->fd == p);
1979 }
1980 else /* markers are always of size SIZE_SZ */
1981 assert (sz == SIZE_SZ);
1982 }
1983
1984 /*
1985 Properties of inuse chunks
1986 */
1987
1988 static void
1989 do_check_inuse_chunk (mstate av, mchunkptr p)
1990 {
1991 mchunkptr next;
1992
1993 do_check_chunk (av, p);
1994
1995 if (chunk_is_mmapped (p))
1996 return; /* mmapped chunks have no next/prev */
1997
1998 /* Check whether it claims to be in use ... */
1999 assert (inuse (p));
2000
2001 next = next_chunk (p);
2002
2003 /* ... and is surrounded by OK chunks.
2004 Since more things can be checked with free chunks than inuse ones,
2005 if an inuse chunk borders them and debug is on, it's worth doing them.
2006 */
2007 if (!prev_inuse (p))
2008 {
2009 /* Note that we cannot even look at prev unless it is not inuse */
2010 mchunkptr prv = prev_chunk (p);
2011 assert (next_chunk (prv) == p);
2012 do_check_free_chunk (av, prv);
2013 }
2014
2015 if (next == av->top)
2016 {
2017 assert (prev_inuse (next));
2018 assert (chunksize (next) >= MINSIZE);
2019 }
2020 else if (!inuse (next))
2021 do_check_free_chunk (av, next);
2022 }
2023
2024 /*
2025 Properties of chunks recycled from fastbins
2026 */
2027
2028 static void
2029 do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2030 {
2031 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE | NON_MAIN_ARENA);
2032
2033 if (!chunk_is_mmapped (p))
2034 {
2035 assert (av == arena_for_chunk (p));
2036 if (chunk_non_main_arena (p))
2037 assert (av != &main_arena);
2038 else
2039 assert (av == &main_arena);
2040 }
2041
2042 do_check_inuse_chunk (av, p);
2043
2044 /* Legal size ... */
2045 assert ((sz & MALLOC_ALIGN_MASK) == 0);
2046 assert ((unsigned long) (sz) >= MINSIZE);
2047 /* ... and alignment */
2048 assert (aligned_OK (chunk2mem (p)));
2049 /* chunk is less than MINSIZE more than request */
2050 assert ((long) (sz) - (long) (s) >= 0);
2051 assert ((long) (sz) - (long) (s + MINSIZE) < 0);
2052 }
2053
2054 /*
2055 Properties of nonrecycled chunks at the point they are malloced
2056 */
2057
2058 static void
2059 do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2060 {
2061 /* same as recycled case ... */
2062 do_check_remalloced_chunk (av, p, s);
2063
2064 /*
2065 ... plus, must obey implementation invariant that prev_inuse is
2066 always true of any allocated chunk; i.e., that each allocated
2067 chunk borders either a previously allocated and still in-use
2068 chunk, or the base of its memory arena. This is ensured
2069 by making all allocations from the `lowest' part of any found
2070 chunk. This does not necessarily hold however for chunks
2071 recycled via fastbins.
2072 */
2073
2074 assert (prev_inuse (p));
2075 }
2076
2077
2078 /*
2079 Properties of malloc_state.
2080
2081 This may be useful for debugging malloc, as well as detecting user
2082 programmer errors that somehow write into malloc_state.
2083
2084 If you are extending or experimenting with this malloc, you can
2085 probably figure out how to hack this routine to print out or
2086 display chunk addresses, sizes, bins, and other instrumentation.
2087 */
2088
2089 static void
2090 do_check_malloc_state (mstate av)
2091 {
2092 int i;
2093 mchunkptr p;
2094 mchunkptr q;
2095 mbinptr b;
2096 unsigned int idx;
2097 INTERNAL_SIZE_T size;
2098 unsigned long total = 0;
2099 int max_fast_bin;
2100
2101 /* internal size_t must be no wider than pointer type */
2102 assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *));
2103
2104 /* alignment is a power of 2 */
2105 assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
2106
2107 /* cannot run remaining checks until fully initialized */
2108 if (av->top == 0 || av->top == initial_top (av))
2109 return;
2110
2111 /* pagesize is a power of 2 */
2112 assert ((GLRO (dl_pagesize) & (GLRO (dl_pagesize) - 1)) == 0);
2113
2114 /* A contiguous main_arena is consistent with sbrk_base. */
2115 if (av == &main_arena && contiguous (av))
2116 assert ((char *) mp_.sbrk_base + av->system_mem ==
2117 (char *) av->top + chunksize (av->top));
2118
2119 /* properties of fastbins */
2120
2121 /* max_fast is in allowed range */
2122 assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE));
2123
2124 max_fast_bin = fastbin_index (get_max_fast ());
2125
2126 for (i = 0; i < NFASTBINS; ++i)
2127 {
2128 p = fastbin (av, i);
2129
2130 /* The following test can only be performed for the main arena.
2131 While mallopt calls malloc_consolidate to get rid of all fast
2132 bins (especially those larger than the new maximum) this does
2133 only happen for the main arena. Trying to do this for any
2134 other arena would mean those arenas have to be locked and
2135 malloc_consolidate be called for them. This is excessive. And
2136 even if this is acceptable to somebody it still cannot solve
2137 the problem completely since if the arena is locked a
2138 concurrent malloc call might create a new arena which then
2139 could use the newly invalid fast bins. */
2140
2141 /* all bins past max_fast are empty */
2142 if (av == &main_arena && i > max_fast_bin)
2143 assert (p == 0);
2144
2145 while (p != 0)
2146 {
2147 /* each chunk claims to be inuse */
2148 do_check_inuse_chunk (av, p);
2149 total += chunksize (p);
2150 /* chunk belongs in this bin */
2151 assert (fastbin_index (chunksize (p)) == i);
2152 p = p->fd;
2153 }
2154 }
2155
2156 if (total != 0)
2157 assert (have_fastchunks (av));
2158 else if (!have_fastchunks (av))
2159 assert (total == 0);
2160
2161 /* check normal bins */
2162 for (i = 1; i < NBINS; ++i)
2163 {
2164 b = bin_at (av, i);
2165
2166 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2167 if (i >= 2)
2168 {
2169 unsigned int binbit = get_binmap (av, i);
2170 int empty = last (b) == b;
2171 if (!binbit)
2172 assert (empty);
2173 else if (!empty)
2174 assert (binbit);
2175 }
2176
2177 for (p = last (b); p != b; p = p->bk)
2178 {
2179 /* each chunk claims to be free */
2180 do_check_free_chunk (av, p);
2181 size = chunksize (p);
2182 total += size;
2183 if (i >= 2)
2184 {
2185 /* chunk belongs in bin */
2186 idx = bin_index (size);
2187 assert (idx == i);
2188 /* lists are sorted */
2189 assert (p->bk == b ||
2190 (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p));
2191
2192 if (!in_smallbin_range (size))
2193 {
2194 if (p->fd_nextsize != NULL)
2195 {
2196 if (p->fd_nextsize == p)
2197 assert (p->bk_nextsize == p);
2198 else
2199 {
2200 if (p->fd_nextsize == first (b))
2201 assert (chunksize (p) < chunksize (p->fd_nextsize));
2202 else
2203 assert (chunksize (p) > chunksize (p->fd_nextsize));
2204
2205 if (p == first (b))
2206 assert (chunksize (p) > chunksize (p->bk_nextsize));
2207 else
2208 assert (chunksize (p) < chunksize (p->bk_nextsize));
2209 }
2210 }
2211 else
2212 assert (p->bk_nextsize == NULL);
2213 }
2214 }
2215 else if (!in_smallbin_range (size))
2216 assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
2217 /* chunk is followed by a legal chain of inuse chunks */
2218 for (q = next_chunk (p);
2219 (q != av->top && inuse (q) &&
2220 (unsigned long) (chunksize (q)) >= MINSIZE);
2221 q = next_chunk (q))
2222 do_check_inuse_chunk (av, q);
2223 }
2224 }
2225
2226 /* top chunk is OK */
2227 check_chunk (av, av->top);
2228 }
2229 #endif
2230
2231
2232 /* ----------------- Support for debugging hooks -------------------- */
2233 #include "hooks.c"
2234
2235
2236 /* ----------- Routines dealing with system allocation -------------- */
2237
2238 /*
2239 sysmalloc handles malloc cases requiring more memory from the system.
2240 On entry, it is assumed that av->top does not have enough
2241 space to service request for nb bytes, thus requiring that av->top
2242 be extended or replaced.
2243 */
2244
2245 static void *
2246 sysmalloc (INTERNAL_SIZE_T nb, mstate av)
2247 {
2248 mchunkptr old_top; /* incoming value of av->top */
2249 INTERNAL_SIZE_T old_size; /* its size */
2250 char *old_end; /* its end address */
2251
2252 long size; /* arg to first MORECORE or mmap call */
2253 char *brk; /* return value from MORECORE */
2254
2255 long correction; /* arg to 2nd MORECORE call */
2256 char *snd_brk; /* 2nd return val */
2257
2258 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2259 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
2260 char *aligned_brk; /* aligned offset into brk */
2261
2262 mchunkptr p; /* the allocated/returned chunk */
2263 mchunkptr remainder; /* remainder from allocation */
2264 unsigned long remainder_size; /* its size */
2265
2266
2267 size_t pagemask = GLRO (dl_pagesize) - 1;
2268 bool tried_mmap = false;
2269
2270
2271 /*
2272 If have mmap, and the request size meets the mmap threshold, and
2273 the system supports mmap, and there are few enough currently
2274 allocated mmapped regions, try to directly map this request
2275 rather than expanding top.
2276 */
2277
2278 if ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold) &&
2279 (mp_.n_mmaps < mp_.n_mmaps_max))
2280 {
2281 char *mm; /* return value from mmap call*/
2282
2283 try_mmap:
2284 /*
2285 Round up size to nearest page. For mmapped chunks, the overhead
2286 is one SIZE_SZ unit larger than for normal chunks, because there
2287 is no following chunk whose prev_size field could be used.
2288
2289 See the front_misalign handling below, for glibc there is no
2290 need for further alignments unless we have have high alignment.
2291 */
2292 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2293 size = (nb + SIZE_SZ + pagemask) & ~pagemask;
2294 else
2295 size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
2296 tried_mmap = true;
2297
2298 /* Don't try if size wraps around 0 */
2299 if ((unsigned long) (size) > (unsigned long) (nb))
2300 {
2301 mm = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
2302
2303 if (mm != MAP_FAILED)
2304 {
2305 /*
2306 The offset to the start of the mmapped region is stored
2307 in the prev_size field of the chunk. This allows us to adjust
2308 returned start address to meet alignment requirements here
2309 and in memalign(), and still be able to compute proper
2310 address argument for later munmap in free() and realloc().
2311 */
2312
2313 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2314 {
2315 /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
2316 MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
2317 aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
2318 assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
2319 front_misalign = 0;
2320 }
2321 else
2322 front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
2323 if (front_misalign > 0)
2324 {
2325 correction = MALLOC_ALIGNMENT - front_misalign;
2326 p = (mchunkptr) (mm + correction);
2327 p->prev_size = correction;
2328 set_head (p, (size - correction) | IS_MMAPPED);
2329 }
2330 else
2331 {
2332 p = (mchunkptr) mm;
2333 set_head (p, size | IS_MMAPPED);
2334 }
2335
2336 /* update statistics */
2337
2338 int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
2339 atomic_max (&mp_.max_n_mmaps, new);
2340
2341 unsigned long sum;
2342 sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
2343 atomic_max (&mp_.max_mmapped_mem, sum);
2344
2345 check_chunk (av, p);
2346
2347 return chunk2mem (p);
2348 }
2349 }
2350 }
2351
2352 /* Record incoming configuration of top */
2353
2354 old_top = av->top;
2355 old_size = chunksize (old_top);
2356 old_end = (char *) (chunk_at_offset (old_top, old_size));
2357
2358 brk = snd_brk = (char *) (MORECORE_FAILURE);
2359
2360 /*
2361 If not the first time through, we require old_size to be
2362 at least MINSIZE and to have prev_inuse set.
2363 */
2364
2365 assert ((old_top == initial_top (av) && old_size == 0) ||
2366 ((unsigned long) (old_size) >= MINSIZE &&
2367 prev_inuse (old_top) &&
2368 ((unsigned long) old_end & pagemask) == 0));
2369
2370 /* Precondition: not enough current space to satisfy nb request */
2371 assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
2372
2373
2374 if (av != &main_arena)
2375 {
2376 heap_info *old_heap, *heap;
2377 size_t old_heap_size;
2378
2379 /* First try to extend the current heap. */
2380 old_heap = heap_for_ptr (old_top);
2381 old_heap_size = old_heap->size;
2382 if ((long) (MINSIZE + nb - old_size) > 0
2383 && grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
2384 {
2385 av->system_mem += old_heap->size - old_heap_size;
2386 arena_mem += old_heap->size - old_heap_size;
2387 set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
2388 | PREV_INUSE);
2389 }
2390 else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
2391 {
2392 /* Use a newly allocated heap. */
2393 heap->ar_ptr = av;
2394 heap->prev = old_heap;
2395 av->system_mem += heap->size;
2396 arena_mem += heap->size;
2397 /* Set up the new top. */
2398 top (av) = chunk_at_offset (heap, sizeof (*heap));
2399 set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
2400
2401 /* Setup fencepost and free the old top chunk with a multiple of
2402 MALLOC_ALIGNMENT in size. */
2403 /* The fencepost takes at least MINSIZE bytes, because it might
2404 become the top chunk again later. Note that a footer is set
2405 up, too, although the chunk is marked in use. */
2406 old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
2407 set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), 0 | PREV_INUSE);
2408 if (old_size >= MINSIZE)
2409 {
2410 set_head (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ) | PREV_INUSE);
2411 set_foot (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ));
2412 set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
2413 _int_free (av, old_top, 1);
2414 }
2415 else
2416 {
2417 set_head (old_top, (old_size + 2 * SIZE_SZ) | PREV_INUSE);
2418 set_foot (old_top, (old_size + 2 * SIZE_SZ));
2419 }
2420 }
2421 else if (!tried_mmap)
2422 /* We can at least try to use to mmap memory. */
2423 goto try_mmap;
2424 }
2425 else /* av == main_arena */
2426
2427
2428 { /* Request enough space for nb + pad + overhead */
2429 size = nb + mp_.top_pad + MINSIZE;
2430
2431 /*
2432 If contiguous, we can subtract out existing space that we hope to
2433 combine with new space. We add it back later only if
2434 we don't actually get contiguous space.
2435 */
2436
2437 if (contiguous (av))
2438 size -= old_size;
2439
2440 /*
2441 Round to a multiple of page size.
2442 If MORECORE is not contiguous, this ensures that we only call it
2443 with whole-page arguments. And if MORECORE is contiguous and
2444 this is not first time through, this preserves page-alignment of
2445 previous calls. Otherwise, we correct to page-align below.
2446 */
2447
2448 size = (size + pagemask) & ~pagemask;
2449
2450 /*
2451 Don't try to call MORECORE if argument is so big as to appear
2452 negative. Note that since mmap takes size_t arg, it may succeed
2453 below even if we cannot call MORECORE.
2454 */
2455
2456 if (size > 0)
2457 {
2458 brk = (char *) (MORECORE (size));
2459 LIBC_PROBE (memory_sbrk_more, 2, brk, size);
2460 }
2461
2462 if (brk != (char *) (MORECORE_FAILURE))
2463 {
2464 /* Call the `morecore' hook if necessary. */
2465 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2466 if (__builtin_expect (hook != NULL, 0))
2467 (*hook)();
2468 }
2469 else
2470 {
2471 /*
2472 If have mmap, try using it as a backup when MORECORE fails or
2473 cannot be used. This is worth doing on systems that have "holes" in
2474 address space, so sbrk cannot extend to give contiguous space, but
2475 space is available elsewhere. Note that we ignore mmap max count
2476 and threshold limits, since the space will not be used as a
2477 segregated mmap region.
2478 */
2479
2480 /* Cannot merge with old top, so add its size back in */
2481 if (contiguous (av))
2482 size = (size + old_size + pagemask) & ~pagemask;
2483
2484 /* If we are relying on mmap as backup, then use larger units */
2485 if ((unsigned long) (size) < (unsigned long) (MMAP_AS_MORECORE_SIZE))
2486 size = MMAP_AS_MORECORE_SIZE;
2487
2488 /* Don't try if size wraps around 0 */
2489 if ((unsigned long) (size) > (unsigned long) (nb))
2490 {
2491 char *mbrk = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
2492
2493 if (mbrk != MAP_FAILED)
2494 {
2495 /* We do not need, and cannot use, another sbrk call to find end */
2496 brk = mbrk;
2497 snd_brk = brk + size;
2498
2499 /*
2500 Record that we no longer have a contiguous sbrk region.
2501 After the first time mmap is used as backup, we do not
2502 ever rely on contiguous space since this could incorrectly
2503 bridge regions.
2504 */
2505 set_noncontiguous (av);
2506 }
2507 }
2508 }
2509
2510 if (brk != (char *) (MORECORE_FAILURE))
2511 {
2512 if (mp_.sbrk_base == 0)
2513 mp_.sbrk_base = brk;
2514 av->system_mem += size;
2515
2516 /*
2517 If MORECORE extends previous space, we can likewise extend top size.
2518 */
2519
2520 if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
2521 set_head (old_top, (size + old_size) | PREV_INUSE);
2522
2523 else if (contiguous (av) && old_size && brk < old_end)
2524 {
2525 /* Oops! Someone else killed our space.. Can't touch anything. */
2526 malloc_printerr (3, "break adjusted to free malloc space", brk);
2527 }
2528
2529 /*
2530 Otherwise, make adjustments:
2531
2532 * If the first time through or noncontiguous, we need to call sbrk
2533 just to find out where the end of memory lies.
2534
2535 * We need to ensure that all returned chunks from malloc will meet
2536 MALLOC_ALIGNMENT
2537
2538 * If there was an intervening foreign sbrk, we need to adjust sbrk
2539 request size to account for fact that we will not be able to
2540 combine new space with existing space in old_top.
2541
2542 * Almost all systems internally allocate whole pages at a time, in
2543 which case we might as well use the whole last page of request.
2544 So we allocate enough more memory to hit a page boundary now,
2545 which in turn causes future contiguous calls to page-align.
2546 */
2547
2548 else
2549 {
2550 front_misalign = 0;
2551 end_misalign = 0;
2552 correction = 0;
2553 aligned_brk = brk;
2554
2555 /* handle contiguous cases */
2556 if (contiguous (av))
2557 {
2558 /* Count foreign sbrk as system_mem. */
2559 if (old_size)
2560 av->system_mem += brk - old_end;
2561
2562 /* Guarantee alignment of first new chunk made from this space */
2563
2564 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2565 if (front_misalign > 0)
2566 {
2567 /*
2568 Skip over some bytes to arrive at an aligned position.
2569 We don't need to specially mark these wasted front bytes.
2570 They will never be accessed anyway because
2571 prev_inuse of av->top (and any chunk created from its start)
2572 is always true after initialization.
2573 */
2574
2575 correction = MALLOC_ALIGNMENT - front_misalign;
2576 aligned_brk += correction;
2577 }
2578
2579 /*
2580 If this isn't adjacent to existing space, then we will not
2581 be able to merge with old_top space, so must add to 2nd request.
2582 */
2583
2584 correction += old_size;
2585
2586 /* Extend the end address to hit a page boundary */
2587 end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
2588 correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
2589
2590 assert (correction >= 0);
2591 snd_brk = (char *) (MORECORE (correction));
2592
2593 /*
2594 If can't allocate correction, try to at least find out current
2595 brk. It might be enough to proceed without failing.
2596
2597 Note that if second sbrk did NOT fail, we assume that space
2598 is contiguous with first sbrk. This is a safe assumption unless
2599 program is multithreaded but doesn't use locks and a foreign sbrk
2600 occurred between our first and second calls.
2601 */
2602
2603 if (snd_brk == (char *) (MORECORE_FAILURE))
2604 {
2605 correction = 0;
2606 snd_brk = (char *) (MORECORE (0));
2607 }
2608 else
2609 {
2610 /* Call the `morecore' hook if necessary. */
2611 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2612 if (__builtin_expect (hook != NULL, 0))
2613 (*hook)();
2614 }
2615 }
2616
2617 /* handle non-contiguous cases */
2618 else
2619 {
2620 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2621 /* MORECORE/mmap must correctly align */
2622 assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
2623 else
2624 {
2625 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2626 if (front_misalign > 0)
2627 {
2628 /*
2629 Skip over some bytes to arrive at an aligned position.
2630 We don't need to specially mark these wasted front bytes.
2631 They will never be accessed anyway because
2632 prev_inuse of av->top (and any chunk created from its start)
2633 is always true after initialization.
2634 */
2635
2636 aligned_brk += MALLOC_ALIGNMENT - front_misalign;
2637 }
2638 }
2639
2640 /* Find out current end of memory */
2641 if (snd_brk == (char *) (MORECORE_FAILURE))
2642 {
2643 snd_brk = (char *) (MORECORE (0));
2644 }
2645 }
2646
2647 /* Adjust top based on results of second sbrk */
2648 if (snd_brk != (char *) (MORECORE_FAILURE))
2649 {
2650 av->top = (mchunkptr) aligned_brk;
2651 set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
2652 av->system_mem += correction;
2653
2654 /*
2655 If not the first time through, we either have a
2656 gap due to foreign sbrk or a non-contiguous region. Insert a
2657 double fencepost at old_top to prevent consolidation with space
2658 we don't own. These fenceposts are artificial chunks that are
2659 marked as inuse and are in any case too small to use. We need
2660 two to make sizes and alignments work out.
2661 */
2662
2663 if (old_size != 0)
2664 {
2665 /*
2666 Shrink old_top to insert fenceposts, keeping size a
2667 multiple of MALLOC_ALIGNMENT. We know there is at least
2668 enough space in old_top to do this.
2669 */
2670 old_size = (old_size - 4 * SIZE_SZ) & ~MALLOC_ALIGN_MASK;
2671 set_head (old_top, old_size | PREV_INUSE);
2672
2673 /*
2674 Note that the following assignments completely overwrite
2675 old_top when old_size was previously MINSIZE. This is
2676 intentional. We need the fencepost, even if old_top otherwise gets
2677 lost.
2678 */
2679 chunk_at_offset (old_top, old_size)->size =
2680 (2 * SIZE_SZ) | PREV_INUSE;
2681
2682 chunk_at_offset (old_top, old_size + 2 * SIZE_SZ)->size =
2683 (2 * SIZE_SZ) | PREV_INUSE;
2684
2685 /* If possible, release the rest. */
2686 if (old_size >= MINSIZE)
2687 {
2688 _int_free (av, old_top, 1);
2689 }
2690 }
2691 }
2692 }
2693 }
2694 } /* if (av != &main_arena) */
2695
2696 if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
2697 av->max_system_mem = av->system_mem;
2698 check_malloc_state (av);
2699
2700 /* finally, do the allocation */
2701 p = av->top;
2702 size = chunksize (p);
2703
2704 /* check that one of the above allocation paths succeeded */
2705 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
2706 {
2707 remainder_size = size - nb;
2708 remainder = chunk_at_offset (p, nb);
2709 av->top = remainder;
2710 set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
2711 set_head (remainder, remainder_size | PREV_INUSE);
2712 check_malloced_chunk (av, p, nb);
2713 return chunk2mem (p);
2714 }
2715
2716 /* catch all failure paths */
2717 __set_errno (ENOMEM);
2718 return 0;
2719 }
2720
2721
2722 /*
2723 systrim is an inverse of sorts to sysmalloc. It gives memory back
2724 to the system (via negative arguments to sbrk) if there is unused
2725 memory at the `high' end of the malloc pool. It is called
2726 automatically by free() when top space exceeds the trim
2727 threshold. It is also called by the public malloc_trim routine. It
2728 returns 1 if it actually released any memory, else 0.
2729 */
2730
2731 static int
2732 systrim (size_t pad, mstate av)
2733 {
2734 long top_size; /* Amount of top-most memory */
2735 long extra; /* Amount to release */
2736 long released; /* Amount actually released */
2737 char *current_brk; /* address returned by pre-check sbrk call */
2738 char *new_brk; /* address returned by post-check sbrk call */
2739 size_t pagesz;
2740 long top_area;
2741
2742 pagesz = GLRO (dl_pagesize);
2743 top_size = chunksize (av->top);
2744
2745 top_area = top_size - MINSIZE - 1;
2746 if (top_area <= pad)
2747 return 0;
2748
2749 /* Release in pagesize units, keeping at least one page */
2750 extra = (top_area - pad) & ~(pagesz - 1);
2751
2752 /*
2753 Only proceed if end of memory is where we last set it.
2754 This avoids problems if there were foreign sbrk calls.
2755 */
2756 current_brk = (char *) (MORECORE (0));
2757 if (current_brk == (char *) (av->top) + top_size)
2758 {
2759 /*
2760 Attempt to release memory. We ignore MORECORE return value,
2761 and instead call again to find out where new end of memory is.
2762 This avoids problems if first call releases less than we asked,
2763 of if failure somehow altered brk value. (We could still
2764 encounter problems if it altered brk in some very bad way,
2765 but the only thing we can do is adjust anyway, which will cause
2766 some downstream failure.)
2767 */
2768
2769 MORECORE (-extra);
2770 /* Call the `morecore' hook if necessary. */
2771 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2772 if (__builtin_expect (hook != NULL, 0))
2773 (*hook)();
2774 new_brk = (char *) (MORECORE (0));
2775
2776 LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra);
2777
2778 if (new_brk != (char *) MORECORE_FAILURE)
2779 {
2780 released = (long) (current_brk - new_brk);
2781
2782 if (released != 0)
2783 {
2784 /* Success. Adjust top. */
2785 av->system_mem -= released;
2786 set_head (av->top, (top_size - released) | PREV_INUSE);
2787 check_malloc_state (av);
2788 return 1;
2789 }
2790 }
2791 }
2792 return 0;
2793 }
2794
2795 static void
2796 internal_function
2797 munmap_chunk (mchunkptr p)
2798 {
2799 INTERNAL_SIZE_T size = chunksize (p);
2800
2801 assert (chunk_is_mmapped (p));
2802
2803 uintptr_t block = (uintptr_t) p - p->prev_size;
2804 size_t total_size = p->prev_size + size;
2805 /* Unfortunately we have to do the compilers job by hand here. Normally
2806 we would test BLOCK and TOTAL-SIZE separately for compliance with the
2807 page size. But gcc does not recognize the optimization possibility
2808 (in the moment at least) so we combine the two values into one before
2809 the bit test. */
2810 if (__builtin_expect (((block | total_size) & (GLRO (dl_pagesize) - 1)) != 0, 0))
2811 {
2812 malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
2813 chunk2mem (p));
2814 return;
2815 }
2816
2817 atomic_decrement (&mp_.n_mmaps);
2818 atomic_add (&mp_.mmapped_mem, -total_size);
2819
2820 /* If munmap failed the process virtual memory address space is in a
2821 bad shape. Just leave the block hanging around, the process will
2822 terminate shortly anyway since not much can be done. */
2823 __munmap ((char *) block, total_size);
2824 }
2825
2826 #if HAVE_MREMAP
2827
2828 static mchunkptr
2829 internal_function
2830 mremap_chunk (mchunkptr p, size_t new_size)
2831 {
2832 size_t page_mask = GLRO (dl_pagesize) - 1;
2833 INTERNAL_SIZE_T offset = p->prev_size;
2834 INTERNAL_SIZE_T size = chunksize (p);
2835 char *cp;
2836
2837 assert (chunk_is_mmapped (p));
2838 assert (((size + offset) & (GLRO (dl_pagesize) - 1)) == 0);
2839
2840 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
2841 new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
2842
2843 /* No need to remap if the number of pages does not change. */
2844 if (size + offset == new_size)
2845 return p;
2846
2847 cp = (char *) __mremap ((char *) p - offset, size + offset, new_size,
2848 MREMAP_MAYMOVE);
2849
2850 if (cp == MAP_FAILED)
2851 return 0;
2852
2853 p = (mchunkptr) (cp + offset);
2854
2855 assert (aligned_OK (chunk2mem (p)));
2856
2857 assert ((p->prev_size == offset));
2858 set_head (p, (new_size - offset) | IS_MMAPPED);
2859
2860 INTERNAL_SIZE_T new;
2861 new = atomic_exchange_and_add (&mp_.mmapped_mem, new_size - size - offset)
2862 + new_size - size - offset;
2863 atomic_max (&mp_.max_mmapped_mem, new);
2864 return p;
2865 }
2866 #endif /* HAVE_MREMAP */
2867
2868 /*------------------------ Public wrappers. --------------------------------*/
2869
2870 void *
2871 __libc_malloc (size_t bytes)
2872 {
2873 mstate ar_ptr;
2874 void *victim;
2875
2876 void *(*hook) (size_t, const void *)
2877 = atomic_forced_read (__malloc_hook);
2878 if (__builtin_expect (hook != NULL, 0))
2879 return (*hook)(bytes, RETURN_ADDRESS (0));
2880
2881 arena_lookup (ar_ptr);
2882
2883 arena_lock (ar_ptr, bytes);
2884 if (!ar_ptr)
2885 return 0;
2886
2887 victim = _int_malloc (ar_ptr, bytes);
2888 if (!victim)
2889 {
2890 LIBC_PROBE (memory_malloc_retry, 1, bytes);
2891 ar_ptr = arena_get_retry (ar_ptr, bytes);
2892 if (__builtin_expect (ar_ptr != NULL, 1))
2893 {
2894 victim = _int_malloc (ar_ptr, bytes);
2895 (void) mutex_unlock (&ar_ptr->mutex);
2896 }
2897 }
2898 else
2899 (void) mutex_unlock (&ar_ptr->mutex);
2900 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
2901 ar_ptr == arena_for_chunk (mem2chunk (victim)));
2902 return victim;
2903 }
2904 libc_hidden_def (__libc_malloc)
2905
2906 void
2907 __libc_free (void *mem)
2908 {
2909 mstate ar_ptr;
2910 mchunkptr p; /* chunk corresponding to mem */
2911
2912 void (*hook) (void *, const void *)
2913 = atomic_forced_read (__free_hook);
2914 if (__builtin_expect (hook != NULL, 0))
2915 {
2916 (*hook)(mem, RETURN_ADDRESS (0));
2917 return;
2918 }
2919
2920 if (mem == 0) /* free(0) has no effect */
2921 return;
2922
2923 p = mem2chunk (mem);
2924
2925 if (chunk_is_mmapped (p)) /* release mmapped memory. */
2926 {
2927 /* see if the dynamic brk/mmap threshold needs adjusting */
2928 if (!mp_.no_dyn_threshold
2929 && p->size > mp_.mmap_threshold
2930 && p->size <= DEFAULT_MMAP_THRESHOLD_MAX)
2931 {
2932 mp_.mmap_threshold = chunksize (p);
2933 mp_.trim_threshold = 2 * mp_.mmap_threshold;
2934 LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
2935 mp_.mmap_threshold, mp_.trim_threshold);
2936 }
2937 munmap_chunk (p);
2938 return;
2939 }
2940
2941 ar_ptr = arena_for_chunk (p);
2942 _int_free (ar_ptr, p, 0);
2943 }
2944 libc_hidden_def (__libc_free)
2945
2946 void *
2947 __libc_realloc (void *oldmem, size_t bytes)
2948 {
2949 mstate ar_ptr;
2950 INTERNAL_SIZE_T nb; /* padded request size */
2951
2952 void *newp; /* chunk to return */
2953
2954 void *(*hook) (void *, size_t, const void *) =
2955 atomic_forced_read (__realloc_hook);
2956 if (__builtin_expect (hook != NULL, 0))
2957 return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
2958
2959 #if REALLOC_ZERO_BYTES_FREES
2960 if (bytes == 0 && oldmem != NULL)
2961 {
2962 __libc_free (oldmem); return 0;
2963 }
2964 #endif
2965
2966 /* realloc of null is supposed to be same as malloc */
2967 if (oldmem == 0)
2968 return __libc_malloc (bytes);
2969
2970 /* chunk corresponding to oldmem */
2971 const mchunkptr oldp = mem2chunk (oldmem);
2972 /* its size */
2973 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
2974
2975 /* Little security check which won't hurt performance: the
2976 allocator never wrapps around at the end of the address space.
2977 Therefore we can exclude some size values which might appear
2978 here by accident or by "design" from some intruder. */
2979 if (__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
2980 || __builtin_expect (misaligned_chunk (oldp), 0))
2981 {
2982 malloc_printerr (check_action, "realloc(): invalid pointer", oldmem);
2983 return NULL;
2984 }
2985
2986 checked_request2size (bytes, nb);
2987
2988 if (chunk_is_mmapped (oldp))
2989 {
2990 void *newmem;
2991
2992 #if HAVE_MREMAP
2993 newp = mremap_chunk (oldp, nb);
2994 if (newp)
2995 return chunk2mem (newp);
2996 #endif
2997 /* Note the extra SIZE_SZ overhead. */
2998 if (oldsize - SIZE_SZ >= nb)
2999 return oldmem; /* do nothing */
3000
3001 /* Must alloc, copy, free. */
3002 newmem = __libc_malloc (bytes);
3003 if (newmem == 0)
3004 return 0; /* propagate failure */
3005
3006 memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
3007 munmap_chunk (oldp);
3008 return newmem;
3009 }
3010
3011 ar_ptr = arena_for_chunk (oldp);
3012 (void) mutex_lock (&ar_ptr->mutex);
3013
3014
3015 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
3016
3017 (void) mutex_unlock (&ar_ptr->mutex);
3018 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3019 ar_ptr == arena_for_chunk (mem2chunk (newp)));
3020
3021 if (newp == NULL)
3022 {
3023 /* Try harder to allocate memory in other arenas. */
3024 LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
3025 newp = __libc_malloc (bytes);
3026 if (newp != NULL)
3027 {
3028 memcpy (newp, oldmem, oldsize - SIZE_SZ);
3029 _int_free (ar_ptr, oldp, 0);
3030 }
3031 }
3032
3033 return newp;
3034 }
3035 libc_hidden_def (__libc_realloc)
3036
3037 void *
3038 __libc_memalign (size_t alignment, size_t bytes)
3039 {
3040 void *address = RETURN_ADDRESS (0);
3041 return _mid_memalign (alignment, bytes, address);
3042 }
3043
3044 static void *
3045 _mid_memalign (size_t alignment, size_t bytes, void *address)
3046 {
3047 mstate ar_ptr;
3048 void *p;
3049
3050 void *(*hook) (size_t, size_t, const void *) =
3051 atomic_forced_read (__memalign_hook);
3052 if (__builtin_expect (hook != NULL, 0))
3053 return (*hook)(alignment, bytes, address);
3054
3055 /* If we need less alignment than we give anyway, just relay to malloc. */
3056 if (alignment <= MALLOC_ALIGNMENT)
3057 return __libc_malloc (bytes);
3058
3059 /* Otherwise, ensure that it is at least a minimum chunk size */
3060 if (alignment < MINSIZE)
3061 alignment = MINSIZE;
3062
3063 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
3064 power of 2 and will cause overflow in the check below. */
3065 if (alignment > SIZE_MAX / 2 + 1)
3066 {
3067 __set_errno (EINVAL);
3068 return 0;
3069 }
3070
3071 /* Check for overflow. */
3072 if (bytes > SIZE_MAX - alignment - MINSIZE)
3073 {
3074 __set_errno (ENOMEM);
3075 return 0;
3076 }
3077
3078
3079 /* Make sure alignment is power of 2. */
3080 if (!powerof2 (alignment))
3081 {
3082 size_t a = MALLOC_ALIGNMENT * 2;
3083 while (a < alignment)
3084 a <<= 1;
3085 alignment = a;
3086 }
3087
3088 arena_get (ar_ptr, bytes + alignment + MINSIZE);
3089 if (!ar_ptr)
3090 return 0;
3091
3092 p = _int_memalign (ar_ptr, alignment, bytes);
3093 if (!p)
3094 {
3095 LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
3096 ar_ptr = arena_get_retry (ar_ptr, bytes);
3097 if (__builtin_expect (ar_ptr != NULL, 1))
3098 {
3099 p = _int_memalign (ar_ptr, alignment, bytes);
3100 (void) mutex_unlock (&ar_ptr->mutex);
3101 }
3102 }
3103 else
3104 (void) mutex_unlock (&ar_ptr->mutex);
3105 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3106 ar_ptr == arena_for_chunk (mem2chunk (p)));
3107 return p;
3108 }
3109 /* For ISO C11. */
3110 weak_alias (__libc_memalign, aligned_alloc)
3111 libc_hidden_def (__libc_memalign)
3112
3113 void *
3114 __libc_valloc (size_t bytes)
3115 {
3116 if (__malloc_initialized < 0)
3117 ptmalloc_init ();
3118
3119 void *address = RETURN_ADDRESS (0);
3120 size_t pagesz = GLRO (dl_pagesize);
3121 return _mid_memalign (pagesz, bytes, address);
3122 }
3123
3124 void *
3125 __libc_pvalloc (size_t bytes)
3126 {
3127 if (__malloc_initialized < 0)
3128 ptmalloc_init ();
3129
3130 void *address = RETURN_ADDRESS (0);
3131 size_t pagesz = GLRO (dl_pagesize);
3132 size_t page_mask = GLRO (dl_pagesize) - 1;
3133 size_t rounded_bytes = (bytes + page_mask) & ~(page_mask);
3134
3135 /* Check for overflow. */
3136 if (bytes > SIZE_MAX - 2 * pagesz - MINSIZE)
3137 {
3138 __set_errno (ENOMEM);
3139 return 0;
3140 }
3141
3142 return _mid_memalign (pagesz, rounded_bytes, address);
3143 }
3144
3145 void *
3146 __libc_calloc (size_t n, size_t elem_size)
3147 {
3148 mstate av;
3149 mchunkptr oldtop, p;
3150 INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
3151 void *mem;
3152 unsigned long clearsize;
3153 unsigned long nclears;
3154 INTERNAL_SIZE_T *d;
3155
3156 /* size_t is unsigned so the behavior on overflow is defined. */
3157 bytes = n * elem_size;
3158 #define HALF_INTERNAL_SIZE_T \
3159 (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
3160 if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0))
3161 {
3162 if (elem_size != 0 && bytes / elem_size != n)
3163 {
3164 __set_errno (ENOMEM);
3165 return 0;
3166 }
3167 }
3168
3169 void *(*hook) (size_t, const void *) =
3170 atomic_forced_read (__malloc_hook);
3171 if (__builtin_expect (hook != NULL, 0))
3172 {
3173 sz = bytes;
3174 mem = (*hook)(sz, RETURN_ADDRESS (0));
3175 if (mem == 0)
3176 return 0;
3177
3178 return memset (mem, 0, sz);
3179 }
3180
3181 sz = bytes;
3182
3183 arena_get (av, sz);
3184 if (!av)
3185 return 0;
3186
3187 /* Check if we hand out the top chunk, in which case there may be no
3188 need to clear. */
3189 #if MORECORE_CLEARS
3190 oldtop = top (av);
3191 oldtopsize = chunksize (top (av));
3192 # if MORECORE_CLEARS < 2
3193 /* Only newly allocated memory is guaranteed to be cleared. */
3194 if (av == &main_arena &&
3195 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
3196 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
3197 # endif
3198 if (av != &main_arena)
3199 {
3200 heap_info *heap = heap_for_ptr (oldtop);
3201 if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
3202 oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
3203 }
3204 #endif
3205 mem = _int_malloc (av, sz);
3206
3207
3208 assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
3209 av == arena_for_chunk (mem2chunk (mem)));
3210
3211 if (mem == 0)
3212 {
3213 LIBC_PROBE (memory_calloc_retry, 1, sz);
3214 av = arena_get_retry (av, sz);
3215 if (__builtin_expect (av != NULL, 1))
3216 {
3217 mem = _int_malloc (av, sz);
3218 (void) mutex_unlock (&av->mutex);
3219 }
3220 if (mem == 0)
3221 return 0;
3222 }
3223 else
3224 (void) mutex_unlock (&av->mutex);
3225 p = mem2chunk (mem);
3226
3227 /* Two optional cases in which clearing not necessary */
3228 if (chunk_is_mmapped (p))
3229 {
3230 if (__builtin_expect (perturb_byte, 0))
3231 return memset (mem, 0, sz);
3232
3233 return mem;
3234 }
3235
3236 csz = chunksize (p);
3237
3238 #if MORECORE_CLEARS
3239 if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
3240 {
3241 /* clear only the bytes from non-freshly-sbrked memory */
3242 csz = oldtopsize;
3243 }
3244 #endif
3245
3246 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3247 contents have an odd number of INTERNAL_SIZE_T-sized words;
3248 minimally 3. */
3249 d = (INTERNAL_SIZE_T *) mem;
3250 clearsize = csz - SIZE_SZ;
3251 nclears = clearsize / sizeof (INTERNAL_SIZE_T);
3252 assert (nclears >= 3);
3253
3254 if (nclears > 9)
3255 return memset (d, 0, clearsize);
3256
3257 else
3258 {
3259 *(d + 0) = 0;
3260 *(d + 1) = 0;
3261 *(d + 2) = 0;
3262 if (nclears > 4)
3263 {
3264 *(d + 3) = 0;
3265 *(d + 4) = 0;
3266 if (nclears > 6)
3267 {
3268 *(d + 5) = 0;
3269 *(d + 6) = 0;
3270 if (nclears > 8)
3271 {
3272 *(d + 7) = 0;
3273 *(d + 8) = 0;
3274 }
3275 }
3276 }
3277 }
3278
3279 return mem;
3280 }
3281
3282 /*
3283 ------------------------------ malloc ------------------------------
3284 */
3285
3286 static void *
3287 _int_malloc (mstate av, size_t bytes)
3288 {
3289 INTERNAL_SIZE_T nb; /* normalized request size */
3290 unsigned int idx; /* associated bin index */
3291 mbinptr bin; /* associated bin */
3292
3293 mchunkptr victim; /* inspected/selected chunk */
3294 INTERNAL_SIZE_T size; /* its size */
3295 int victim_index; /* its bin index */
3296
3297 mchunkptr remainder; /* remainder from a split */
3298 unsigned long remainder_size; /* its size */
3299
3300 unsigned int block; /* bit map traverser */
3301 unsigned int bit; /* bit map traverser */
3302 unsigned int map; /* current word of binmap */
3303
3304 mchunkptr fwd; /* misc temp for linking */
3305 mchunkptr bck; /* misc temp for linking */
3306
3307 const char *errstr = NULL;
3308
3309 /*
3310 Convert request size to internal form by adding SIZE_SZ bytes
3311 overhead plus possibly more to obtain necessary alignment and/or
3312 to obtain a size of at least MINSIZE, the smallest allocatable
3313 size. Also, checked_request2size traps (returning 0) request sizes
3314 that are so large that they wrap around zero when padded and
3315 aligned.
3316 */
3317
3318 checked_request2size (bytes, nb);
3319
3320 /*
3321 If the size qualifies as a fastbin, first check corresponding bin.
3322 This code is safe to execute even if av is not yet initialized, so we
3323 can try it without checking, which saves some time on this fast path.
3324 */
3325
3326 if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
3327 {
3328 idx = fastbin_index (nb);
3329 mfastbinptr *fb = &fastbin (av, idx);
3330 mchunkptr pp = *fb;
3331 do
3332 {
3333 victim = pp;
3334 if (victim == NULL)
3335 break;
3336 }
3337 while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim))
3338 != victim);
3339 if (victim != 0)
3340 {
3341 if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
3342 {
3343 errstr = "malloc(): memory corruption (fast)";
3344 errout:
3345 malloc_printerr (check_action, errstr, chunk2mem (victim));
3346 return NULL;
3347 }
3348 check_remalloced_chunk (av, victim, nb);
3349 void *p = chunk2mem (victim);
3350 alloc_perturb (p, bytes);
3351 return p;
3352 }
3353 }
3354
3355 /*
3356 If a small request, check regular bin. Since these "smallbins"
3357 hold one size each, no searching within bins is necessary.
3358 (For a large request, we need to wait until unsorted chunks are
3359 processed to find best fit. But for small ones, fits are exact
3360 anyway, so we can check now, which is faster.)
3361 */
3362
3363 if (in_smallbin_range (nb))
3364 {
3365 idx = smallbin_index (nb);
3366 bin = bin_at (av, idx);
3367
3368 if ((victim = last (bin)) != bin)
3369 {
3370 if (victim == 0) /* initialization check */
3371 malloc_consolidate (av);
3372 else
3373 {
3374 bck = victim->bk;
3375 if (__glibc_unlikely (bck->fd != victim))
3376 {
3377 errstr = "malloc(): smallbin double linked list corrupted";
3378 goto errout;
3379 }
3380 set_inuse_bit_at_offset (victim, nb);
3381 bin->bk = bck;
3382 bck->fd = bin;
3383
3384 if (av != &main_arena)
3385 victim->size |= NON_MAIN_ARENA;
3386 check_malloced_chunk (av, victim, nb);
3387 void *p = chunk2mem (victim);
3388 alloc_perturb (p, bytes);
3389 return p;
3390 }
3391 }
3392 }
3393
3394 /*
3395 If this is a large request, consolidate fastbins before continuing.
3396 While it might look excessive to kill all fastbins before
3397 even seeing if there is space available, this avoids
3398 fragmentation problems normally associated with fastbins.
3399 Also, in practice, programs tend to have runs of either small or
3400 large requests, but less often mixtures, so consolidation is not
3401 invoked all that often in most programs. And the programs that
3402 it is called frequently in otherwise tend to fragment.
3403 */
3404
3405 else
3406 {
3407 idx = largebin_index (nb);
3408 if (have_fastchunks (av))
3409 malloc_consolidate (av);
3410 }
3411
3412 /*
3413 Process recently freed or remaindered chunks, taking one only if
3414 it is exact fit, or, if this a small request, the chunk is remainder from
3415 the most recent non-exact fit. Place other traversed chunks in
3416 bins. Note that this step is the only place in any routine where
3417 chunks are placed in bins.
3418
3419 The outer loop here is needed because we might not realize until
3420 near the end of malloc that we should have consolidated, so must
3421 do so and retry. This happens at most once, and only when we would
3422 otherwise need to expand memory to service a "small" request.
3423 */
3424
3425 for (;; )
3426 {
3427 int iters = 0;
3428 while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
3429 {
3430 bck = victim->bk;
3431 if (__builtin_expect (victim->size <= 2 * SIZE_SZ, 0)
3432 || __builtin_expect (victim->size > av->system_mem, 0))
3433 malloc_printerr (check_action, "malloc(): memory corruption",
3434 chunk2mem (victim));
3435 size = chunksize (victim);
3436
3437 /*
3438 If a small request, try to use last remainder if it is the
3439 only chunk in unsorted bin. This helps promote locality for
3440 runs of consecutive small requests. This is the only
3441 exception to best-fit, and applies only when there is
3442 no exact fit for a small chunk.
3443 */
3444
3445 if (in_smallbin_range (nb) &&
3446 bck == unsorted_chunks (av) &&
3447 victim == av->last_remainder &&
3448 (unsigned long) (size) > (unsigned long) (nb + MINSIZE))
3449 {
3450 /* split and reattach remainder */
3451 remainder_size = size - nb;
3452 remainder = chunk_at_offset (victim, nb);
3453 unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
3454 av->last_remainder = remainder;
3455 remainder->bk = remainder->fd = unsorted_chunks (av);
3456 if (!in_smallbin_range (remainder_size))
3457 {
3458 remainder->fd_nextsize = NULL;
3459 remainder->bk_nextsize = NULL;
3460 }
3461
3462 set_head (victim, nb | PREV_INUSE |
3463 (av != &main_arena ? NON_MAIN_ARENA : 0));
3464 set_head (remainder, remainder_size | PREV_INUSE);
3465 set_foot (remainder, remainder_size);
3466
3467 check_malloced_chunk (av, victim, nb);
3468 void *p = chunk2mem (victim);
3469 alloc_perturb (p, bytes);
3470 return p;
3471 }
3472
3473 /* remove from unsorted list */
3474 unsorted_chunks (av)->bk = bck;
3475 bck->fd = unsorted_chunks (av);
3476
3477 /* Take now instead of binning if exact fit */
3478
3479 if (size == nb)
3480 {
3481 set_inuse_bit_at_offset (victim, size);
3482 if (av != &main_arena)
3483 victim->size |= NON_MAIN_ARENA;
3484 check_malloced_chunk (av, victim, nb);
3485 void *p = chunk2mem (victim);
3486 alloc_perturb (p, bytes);
3487 return p;
3488 }
3489
3490 /* place chunk in bin */
3491
3492 if (in_smallbin_range (size))
3493 {
3494 victim_index = smallbin_index (size);
3495 bck = bin_at (av, victim_index);
3496 fwd = bck->fd;
3497 }
3498 else
3499 {
3500 victim_index = largebin_index (size);
3501 bck = bin_at (av, victim_index);
3502 fwd = bck->fd;
3503
3504 /* maintain large bins in sorted order */
3505 if (fwd != bck)
3506 {
3507 /* Or with inuse bit to speed comparisons */
3508 size |= PREV_INUSE;
3509 /* if smaller than smallest, bypass loop below */
3510 assert ((bck->bk->size & NON_MAIN_ARENA) == 0);
3511 if ((unsigned long) (size) < (unsigned long) (bck->bk->size))
3512 {
3513 fwd = bck;
3514 bck = bck->bk;
3515
3516 victim->fd_nextsize = fwd->fd;
3517 victim->bk_nextsize = fwd->fd->bk_nextsize;
3518 fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
3519 }
3520 else
3521 {
3522 assert ((fwd->size & NON_MAIN_ARENA) == 0);
3523 while ((unsigned long) size < fwd->size)
3524 {
3525 fwd = fwd->fd_nextsize;
3526 assert ((fwd->size & NON_MAIN_ARENA) == 0);
3527 }
3528
3529 if ((unsigned long) size == (unsigned long) fwd->size)
3530 /* Always insert in the second position. */
3531 fwd = fwd->fd;
3532 else
3533 {
3534 victim->fd_nextsize = fwd;
3535 victim->bk_nextsize = fwd->bk_nextsize;
3536 fwd->bk_nextsize = victim;
3537 victim->bk_nextsize->fd_nextsize = victim;
3538 }
3539 bck = fwd->bk;
3540 }
3541 }
3542 else
3543 victim->fd_nextsize = victim->bk_nextsize = victim;
3544 }
3545
3546 mark_bin (av, victim_index);
3547 victim->bk = bck;
3548 victim->fd = fwd;
3549 fwd->bk = victim;
3550 bck->fd = victim;
3551
3552 #define MAX_ITERS 10000
3553 if (++iters >= MAX_ITERS)
3554 break;
3555 }
3556
3557 /*
3558 If a large request, scan through the chunks of current bin in
3559 sorted order to find smallest that fits. Use the skip list for this.
3560 */
3561
3562 if (!in_smallbin_range (nb))
3563 {
3564 bin = bin_at (av, idx);
3565
3566 /* skip scan if empty or largest chunk is too small */
3567 if ((victim = first (bin)) != bin &&
3568 (unsigned long) (victim->size) >= (unsigned long) (nb))
3569 {
3570 victim = victim->bk_nextsize;
3571 while (((unsigned long) (size = chunksize (victim)) <
3572 (unsigned long) (nb)))
3573 victim = victim->bk_nextsize;
3574
3575 /* Avoid removing the first entry for a size so that the skip
3576 list does not have to be rerouted. */
3577 if (victim != last (bin) && victim->size == victim->fd->size)
3578 victim = victim->fd;
3579
3580 remainder_size = size - nb;
3581 unlink (victim, bck, fwd);
3582
3583 /* Exhaust */
3584 if (remainder_size < MINSIZE)
3585 {
3586 set_inuse_bit_at_offset (victim, size);
3587 if (av != &main_arena)
3588 victim->size |= NON_MAIN_ARENA;
3589 }
3590 /* Split */
3591 else
3592 {
3593 remainder = chunk_at_offset (victim, nb);
3594 /* We cannot assume the unsorted list is empty and therefore
3595 have to perform a complete insert here. */
3596 bck = unsorted_chunks (av);
3597 fwd = bck->fd;
3598 if (__glibc_unlikely (fwd->bk != bck))
3599 {
3600 errstr = "malloc(): corrupted unsorted chunks";
3601 goto errout;
3602 }
3603 remainder->bk = bck;
3604 remainder->fd = fwd;
3605 bck->fd = remainder;
3606 fwd->bk = remainder;
3607 if (!in_smallbin_range (remainder_size))
3608 {
3609 remainder->fd_nextsize = NULL;
3610 remainder->bk_nextsize = NULL;
3611 }
3612 set_head (victim, nb | PREV_INUSE |
3613 (av != &main_arena ? NON_MAIN_ARENA : 0));
3614 set_head (remainder, remainder_size | PREV_INUSE);
3615 set_foot (remainder, remainder_size);
3616 }
3617 check_malloced_chunk (av, victim, nb);
3618 void *p = chunk2mem (victim);
3619 alloc_perturb (p, bytes);
3620 return p;
3621 }
3622 }
3623
3624 /*
3625 Search for a chunk by scanning bins, starting with next largest
3626 bin. This search is strictly by best-fit; i.e., the smallest
3627 (with ties going to approximately the least recently used) chunk
3628 that fits is selected.
3629
3630 The bitmap avoids needing to check that most blocks are nonempty.
3631 The particular case of skipping all bins during warm-up phases
3632 when no chunks have been returned yet is faster than it might look.
3633 */
3634
3635 ++idx;
3636 bin = bin_at (av, idx);
3637 block = idx2block (idx);
3638 map = av->binmap[block];
3639 bit = idx2bit (idx);
3640
3641 for (;; )
3642 {
3643 /* Skip rest of block if there are no more set bits in this block. */
3644 if (bit > map || bit == 0)
3645 {
3646 do
3647 {
3648 if (++block >= BINMAPSIZE) /* out of bins */
3649 goto use_top;
3650 }
3651 while ((map = av->binmap[block]) == 0);
3652
3653 bin = bin_at (av, (block << BINMAPSHIFT));
3654 bit = 1;
3655 }
3656
3657 /* Advance to bin with set bit. There must be one. */
3658 while ((bit & map) == 0)
3659 {
3660 bin = next_bin (bin);
3661 bit <<= 1;
3662 assert (bit != 0);
3663 }
3664
3665 /* Inspect the bin. It is likely to be non-empty */
3666 victim = last (bin);
3667
3668 /* If a false alarm (empty bin), clear the bit. */
3669 if (victim == bin)
3670 {
3671 av->binmap[block] = map &= ~bit; /* Write through */
3672 bin = next_bin (bin);
3673 bit <<= 1;
3674 }
3675
3676 else
3677 {
3678 size = chunksize (victim);
3679
3680 /* We know the first chunk in this bin is big enough to use. */
3681 assert ((unsigned long) (size) >= (unsigned long) (nb));
3682
3683 remainder_size = size - nb;
3684
3685 /* unlink */
3686 unlink (victim, bck, fwd);
3687
3688 /* Exhaust */
3689 if (remainder_size < MINSIZE)
3690 {
3691 set_inuse_bit_at_offset (victim, size);
3692 if (av != &main_arena)
3693 victim->size |= NON_MAIN_ARENA;
3694 }
3695
3696 /* Split */
3697 else
3698 {
3699 remainder = chunk_at_offset (victim, nb);
3700
3701 /* We cannot assume the unsorted list is empty and therefore
3702 have to perform a complete insert here. */
3703 bck = unsorted_chunks (av);
3704 fwd = bck->fd;
3705 if (__glibc_unlikely (fwd->bk != bck))
3706 {
3707 errstr = "malloc(): corrupted unsorted chunks 2";
3708 goto errout;
3709 }
3710 remainder->bk = bck;
3711 remainder->fd = fwd;
3712 bck->fd = remainder;
3713 fwd->bk = remainder;
3714
3715 /* advertise as last remainder */
3716 if (in_smallbin_range (nb))
3717 av->last_remainder = remainder;
3718 if (!in_smallbin_range (remainder_size))
3719 {
3720 remainder->fd_nextsize = NULL;
3721 remainder->bk_nextsize = NULL;
3722 }
3723 set_head (victim, nb | PREV_INUSE |
3724 (av != &main_arena ? NON_MAIN_ARENA : 0));
3725 set_head (remainder, remainder_size | PREV_INUSE);
3726 set_foot (remainder, remainder_size);
3727 }
3728 check_malloced_chunk (av, victim, nb);
3729 void *p = chunk2mem (victim);
3730 alloc_perturb (p, bytes);
3731 return p;
3732 }
3733 }
3734
3735 use_top:
3736 /*
3737 If large enough, split off the chunk bordering the end of memory
3738 (held in av->top). Note that this is in accord with the best-fit
3739 search rule. In effect, av->top is treated as larger (and thus
3740 less well fitting) than any other available chunk since it can
3741 be extended to be as large as necessary (up to system
3742 limitations).
3743
3744 We require that av->top always exists (i.e., has size >=
3745 MINSIZE) after initialization, so if it would otherwise be
3746 exhausted by current request, it is replenished. (The main
3747 reason for ensuring it exists is that we may need MINSIZE space
3748 to put in fenceposts in sysmalloc.)
3749 */
3750
3751 victim = av->top;
3752 size = chunksize (victim);
3753
3754 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
3755 {
3756 remainder_size = size - nb;
3757 remainder = chunk_at_offset (victim, nb);
3758 av->top = remainder;
3759 set_head (victim, nb | PREV_INUSE |
3760 (av != &main_arena ? NON_MAIN_ARENA : 0));
3761 set_head (remainder, remainder_size | PREV_INUSE);
3762
3763 check_malloced_chunk (av, victim, nb);
3764 void *p = chunk2mem (victim);
3765 alloc_perturb (p, bytes);
3766 return p;
3767 }
3768
3769 /* When we are using atomic ops to free fast chunks we can get
3770 here for all block sizes. */
3771 else if (have_fastchunks (av))
3772 {
3773 malloc_consolidate (av);
3774 /* restore original bin index */
3775 if (in_smallbin_range (nb))
3776 idx = smallbin_index (nb);
3777 else
3778 idx = largebin_index (nb);
3779 }
3780
3781 /*
3782 Otherwise, relay to handle system-dependent cases
3783 */
3784 else
3785 {
3786 void *p = sysmalloc (nb, av);
3787 if (p != NULL)
3788 alloc_perturb (p, bytes);
3789 return p;
3790 }
3791 }
3792 }
3793
3794 /*
3795 ------------------------------ free ------------------------------
3796 */
3797
3798 static void
3799 _int_free (mstate av, mchunkptr p, int have_lock)
3800 {
3801 INTERNAL_SIZE_T size; /* its size */
3802 mfastbinptr *fb; /* associated fastbin */
3803 mchunkptr nextchunk; /* next contiguous chunk */
3804 INTERNAL_SIZE_T nextsize; /* its size */
3805 int nextinuse; /* true if nextchunk is used */
3806 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
3807 mchunkptr bck; /* misc temp for linking */
3808 mchunkptr fwd; /* misc temp for linking */
3809
3810 const char *errstr = NULL;
3811 int locked = 0;
3812
3813 size = chunksize (p);
3814
3815 /* Little security check which won't hurt performance: the
3816 allocator never wrapps around at the end of the address space.
3817 Therefore we can exclude some size values which might appear
3818 here by accident or by "design" from some intruder. */
3819 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
3820 || __builtin_expect (misaligned_chunk (p), 0))
3821 {
3822 errstr = "free(): invalid pointer";
3823 errout:
3824 if (!have_lock && locked)
3825 (void) mutex_unlock (&av->mutex);
3826 malloc_printerr (check_action, errstr, chunk2mem (p));
3827 return;
3828 }
3829 /* We know that each chunk is at least MINSIZE bytes in size or a
3830 multiple of MALLOC_ALIGNMENT. */
3831 if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
3832 {
3833 errstr = "free(): invalid size";
3834 goto errout;
3835 }
3836
3837 check_inuse_chunk(av, p);
3838
3839 /*
3840 If eligible, place chunk on a fastbin so it can be found
3841 and used quickly in malloc.
3842 */
3843
3844 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
3845
3846 #if TRIM_FASTBINS
3847 /*
3848 If TRIM_FASTBINS set, don't place chunks
3849 bordering top into fastbins
3850 */
3851 && (chunk_at_offset(p, size) != av->top)
3852 #endif
3853 ) {
3854
3855 if (__builtin_expect (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ, 0)
3856 || __builtin_expect (chunksize (chunk_at_offset (p, size))
3857 >= av->system_mem, 0))
3858 {
3859 /* We might not have a lock at this point and concurrent modifications
3860 of system_mem might have let to a false positive. Redo the test
3861 after getting the lock. */
3862 if (have_lock
3863 || ({ assert (locked == 0);
3864 mutex_lock(&av->mutex);
3865 locked = 1;
3866 chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
3867 || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
3868 }))
3869 {
3870 errstr = "free(): invalid next size (fast)";
3871 goto errout;
3872 }
3873 if (! have_lock)
3874 {
3875 (void)mutex_unlock(&av->mutex);
3876 locked = 0;
3877 }
3878 }
3879
3880 free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
3881
3882 set_fastchunks(av);
3883 unsigned int idx = fastbin_index(size);
3884 fb = &fastbin (av, idx);
3885
3886 /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
3887 mchunkptr old = *fb, old2;
3888 unsigned int old_idx = ~0u;
3889 do
3890 {
3891 /* Check that the top of the bin is not the record we are going to add
3892 (i.e., double free). */
3893 if (__builtin_expect (old == p, 0))
3894 {
3895 errstr = "double free or corruption (fasttop)";
3896 goto errout;
3897 }
3898 /* Check that size of fastbin chunk at the top is the same as
3899 size of the chunk that we are adding. We can dereference OLD
3900 only if we have the lock, otherwise it might have already been
3901 deallocated. See use of OLD_IDX below for the actual check. */
3902 if (have_lock && old != NULL)
3903 old_idx = fastbin_index(chunksize(old));
3904 p->fd = old2 = old;
3905 }
3906 while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
3907
3908 if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0))
3909 {
3910 errstr = "invalid fastbin entry (free)";
3911 goto errout;
3912 }
3913 }
3914
3915 /*
3916 Consolidate other non-mmapped chunks as they arrive.
3917 */
3918
3919 else if (!chunk_is_mmapped(p)) {
3920 if (! have_lock) {
3921 (void)mutex_lock(&av->mutex);
3922 locked = 1;
3923 }
3924
3925 nextchunk = chunk_at_offset(p, size);
3926
3927 /* Lightweight tests: check whether the block is already the
3928 top block. */
3929 if (__glibc_unlikely (p == av->top))
3930 {
3931 errstr = "double free or corruption (top)";
3932 goto errout;
3933 }
3934 /* Or whether the next chunk is beyond the boundaries of the arena. */
3935 if (__builtin_expect (contiguous (av)
3936 && (char *) nextchunk
3937 >= ((char *) av->top + chunksize(av->top)), 0))
3938 {
3939 errstr = "double free or corruption (out)";
3940 goto errout;
3941 }
3942 /* Or whether the block is actually not marked used. */
3943 if (__glibc_unlikely (!prev_inuse(nextchunk)))
3944 {
3945 errstr = "double free or corruption (!prev)";
3946 goto errout;
3947 }
3948
3949 nextsize = chunksize(nextchunk);
3950 if (__builtin_expect (nextchunk->size <= 2 * SIZE_SZ, 0)
3951 || __builtin_expect (nextsize >= av->system_mem, 0))
3952 {
3953 errstr = "free(): invalid next size (normal)";
3954 goto errout;
3955 }
3956
3957 free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
3958
3959 /* consolidate backward */
3960 if (!prev_inuse(p)) {
3961 prevsize = p->prev_size;
3962 size += prevsize;
3963 p = chunk_at_offset(p, -((long) prevsize));
3964 unlink(p, bck, fwd);
3965 }
3966
3967 if (nextchunk != av->top) {
3968 /* get and clear inuse bit */
3969 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
3970
3971 /* consolidate forward */
3972 if (!nextinuse) {
3973 unlink(nextchunk, bck, fwd);
3974 size += nextsize;
3975 } else
3976 clear_inuse_bit_at_offset(nextchunk, 0);
3977
3978 /*
3979 Place the chunk in unsorted chunk list. Chunks are
3980 not placed into regular bins until after they have
3981 been given one chance to be used in malloc.
3982 */
3983
3984 bck = unsorted_chunks(av);
3985 fwd = bck->fd;
3986 if (__glibc_unlikely (fwd->bk != bck))
3987 {
3988 errstr = "free(): corrupted unsorted chunks";
3989 goto errout;
3990 }
3991 p->fd = fwd;
3992 p->bk = bck;
3993 if (!in_smallbin_range(size))
3994 {
3995 p->fd_nextsize = NULL;
3996 p->bk_nextsize = NULL;
3997 }
3998 bck->fd = p;
3999 fwd->bk = p;
4000
4001 set_head(p, size | PREV_INUSE);
4002 set_foot(p, size);
4003
4004 check_free_chunk(av, p);
4005 }
4006
4007 /*
4008 If the chunk borders the current high end of memory,
4009 consolidate into top
4010 */
4011
4012 else {
4013 size += nextsize;
4014 set_head(p, size | PREV_INUSE);
4015 av->top = p;
4016 check_chunk(av, p);
4017 }
4018
4019 /*
4020 If freeing a large space, consolidate possibly-surrounding
4021 chunks. Then, if the total unused topmost memory exceeds trim
4022 threshold, ask malloc_trim to reduce top.
4023
4024 Unless max_fast is 0, we don't know if there are fastbins
4025 bordering top, so we cannot tell for sure whether threshold
4026 has been reached unless fastbins are consolidated. But we
4027 don't want to consolidate on each free. As a compromise,
4028 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4029 is reached.
4030 */
4031
4032 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
4033 if (have_fastchunks(av))
4034 malloc_consolidate(av);
4035
4036 if (av == &main_arena) {
4037 #ifndef MORECORE_CANNOT_TRIM
4038 if ((unsigned long)(chunksize(av->top)) >=
4039 (unsigned long)(mp_.trim_threshold))
4040 systrim(mp_.top_pad, av);
4041 #endif
4042 } else {
4043 /* Always try heap_trim(), even if the top chunk is not
4044 large, because the corresponding heap might go away. */
4045 heap_info *heap = heap_for_ptr(top(av));
4046
4047 assert(heap->ar_ptr == av);
4048 heap_trim(heap, mp_.top_pad);
4049 }
4050 }
4051
4052 if (! have_lock) {
4053 assert (locked);
4054 (void)mutex_unlock(&av->mutex);
4055 }
4056 }
4057 /*
4058 If the chunk was allocated via mmap, release via munmap().
4059 */
4060
4061 else {
4062 munmap_chunk (p);
4063 }
4064 }
4065
4066 /*
4067 ------------------------- malloc_consolidate -------------------------
4068
4069 malloc_consolidate is a specialized version of free() that tears
4070 down chunks held in fastbins. Free itself cannot be used for this
4071 purpose since, among other things, it might place chunks back onto
4072 fastbins. So, instead, we need to use a minor variant of the same
4073 code.
4074
4075 Also, because this routine needs to be called the first time through
4076 malloc anyway, it turns out to be the perfect place to trigger
4077 initialization code.
4078 */
4079
4080 static void malloc_consolidate(mstate av)
4081 {
4082 mfastbinptr* fb; /* current fastbin being consolidated */
4083 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4084 mchunkptr p; /* current chunk being consolidated */
4085 mchunkptr nextp; /* next chunk to consolidate */
4086 mchunkptr unsorted_bin; /* bin header */
4087 mchunkptr first_unsorted; /* chunk to link to */
4088
4089 /* These have same use as in free() */
4090 mchunkptr nextchunk;
4091 INTERNAL_SIZE_T size;
4092 INTERNAL_SIZE_T nextsize;
4093 INTERNAL_SIZE_T prevsize;
4094 int nextinuse;
4095 mchunkptr bck;
4096 mchunkptr fwd;
4097
4098 /*
4099 If max_fast is 0, we know that av hasn't
4100 yet been initialized, in which case do so below
4101 */
4102
4103 if (get_max_fast () != 0) {
4104 clear_fastchunks(av);
4105
4106 unsorted_bin = unsorted_chunks(av);
4107
4108 /*
4109 Remove each chunk from fast bin and consolidate it, placing it
4110 then in unsorted bin. Among other reasons for doing this,
4111 placing in unsorted bin avoids needing to calculate actual bins
4112 until malloc is sure that chunks aren't immediately going to be
4113 reused anyway.
4114 */
4115
4116 maxfb = &fastbin (av, NFASTBINS - 1);
4117 fb = &fastbin (av, 0);
4118 do {
4119 p = atomic_exchange_acq (fb, 0);
4120 if (p != 0) {
4121 do {
4122 check_inuse_chunk(av, p);
4123 nextp = p->fd;
4124
4125 /* Slightly streamlined version of consolidation code in free() */
4126 size = p->size & ~(PREV_INUSE|NON_MAIN_ARENA);
4127 nextchunk = chunk_at_offset(p, size);
4128 nextsize = chunksize(nextchunk);
4129
4130 if (!prev_inuse(p)) {
4131 prevsize = p->prev_size;
4132 size += prevsize;
4133 p = chunk_at_offset(p, -((long) prevsize));
4134 unlink(p, bck, fwd);
4135 }
4136
4137 if (nextchunk != av->top) {
4138 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4139
4140 if (!nextinuse) {
4141 size += nextsize;
4142 unlink(nextchunk, bck, fwd);
4143 } else
4144 clear_inuse_bit_at_offset(nextchunk, 0);
4145
4146 first_unsorted = unsorted_bin->fd;
4147 unsorted_bin->fd = p;
4148 first_unsorted->bk = p;
4149
4150 if (!in_smallbin_range (size)) {
4151 p->fd_nextsize = NULL;
4152 p->bk_nextsize = NULL;
4153 }
4154
4155 set_head(p, size | PREV_INUSE);
4156 p->bk = unsorted_bin;
4157 p->fd = first_unsorted;
4158 set_foot(p, size);
4159 }
4160
4161 else {
4162 size += nextsize;
4163 set_head(p, size | PREV_INUSE);
4164 av->top = p;
4165 }
4166
4167 } while ( (p = nextp) != 0);
4168
4169 }
4170 } while (fb++ != maxfb);
4171 }
4172 else {
4173 malloc_init_state(av);
4174 check_malloc_state(av);
4175 }
4176 }
4177
4178 /*
4179 ------------------------------ realloc ------------------------------
4180 */
4181
4182 void*
4183 _int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
4184 INTERNAL_SIZE_T nb)
4185 {
4186 mchunkptr newp; /* chunk to return */
4187 INTERNAL_SIZE_T newsize; /* its size */
4188 void* newmem; /* corresponding user mem */
4189
4190 mchunkptr next; /* next contiguous chunk after oldp */
4191
4192 mchunkptr remainder; /* extra space at end of newp */
4193 unsigned long remainder_size; /* its size */
4194
4195 mchunkptr bck; /* misc temp for linking */
4196 mchunkptr fwd; /* misc temp for linking */
4197
4198 unsigned long copysize; /* bytes to copy */
4199 unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
4200 INTERNAL_SIZE_T* s; /* copy source */
4201 INTERNAL_SIZE_T* d; /* copy destination */
4202
4203 const char *errstr = NULL;
4204
4205 /* oldmem size */
4206 if (__builtin_expect (oldp->size <= 2 * SIZE_SZ, 0)
4207 || __builtin_expect (oldsize >= av->system_mem, 0))
4208 {
4209 errstr = "realloc(): invalid old size";
4210 errout:
4211 malloc_printerr (check_action, errstr, chunk2mem (oldp));
4212 return NULL;
4213 }
4214
4215 check_inuse_chunk (av, oldp);
4216
4217 /* All callers already filter out mmap'ed chunks. */
4218 assert (!chunk_is_mmapped (oldp));
4219
4220 next = chunk_at_offset (oldp, oldsize);
4221 INTERNAL_SIZE_T nextsize = chunksize (next);
4222 if (__builtin_expect (next->size <= 2 * SIZE_SZ, 0)
4223 || __builtin_expect (nextsize >= av->system_mem, 0))
4224 {
4225 errstr = "realloc(): invalid next size";
4226 goto errout;
4227 }
4228
4229 if ((unsigned long) (oldsize) >= (unsigned long) (nb))
4230 {
4231 /* already big enough; split below */
4232 newp = oldp;
4233 newsize = oldsize;
4234 }
4235
4236 else
4237 {
4238 /* Try to expand forward into top */
4239 if (next == av->top &&
4240 (unsigned long) (newsize = oldsize + nextsize) >=
4241 (unsigned long) (nb + MINSIZE))
4242 {
4243 set_head_size (oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4244 av->top = chunk_at_offset (oldp, nb);
4245 set_head (av->top, (newsize - nb) | PREV_INUSE);
4246 check_inuse_chunk (av, oldp);
4247 return chunk2mem (oldp);
4248 }
4249
4250 /* Try to expand forward into next chunk; split off remainder below */
4251 else if (next != av->top &&
4252 !inuse (next) &&
4253 (unsigned long) (newsize = oldsize + nextsize) >=
4254 (unsigned long) (nb))
4255 {
4256 newp = oldp;
4257 unlink (next, bck, fwd);
4258 }
4259
4260 /* allocate, copy, free */
4261 else
4262 {
4263 newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK);
4264 if (newmem == 0)
4265 return 0; /* propagate failure */
4266
4267 newp = mem2chunk (newmem);
4268 newsize = chunksize (newp);
4269
4270 /*
4271 Avoid copy if newp is next chunk after oldp.
4272 */
4273 if (newp == next)
4274 {
4275 newsize += oldsize;
4276 newp = oldp;
4277 }
4278 else
4279 {
4280 /*
4281 Unroll copy of <= 36 bytes (72 if 8byte sizes)
4282 We know that contents have an odd number of
4283 INTERNAL_SIZE_T-sized words; minimally 3.
4284 */
4285
4286 copysize = oldsize - SIZE_SZ;
4287 s = (INTERNAL_SIZE_T *) (chunk2mem (oldp));
4288 d = (INTERNAL_SIZE_T *) (newmem);
4289 ncopies = copysize / sizeof (INTERNAL_SIZE_T);
4290 assert (ncopies >= 3);
4291
4292 if (ncopies > 9)
4293 memcpy (d, s, copysize);
4294
4295 else
4296 {
4297 *(d + 0) = *(s + 0);
4298 *(d + 1) = *(s + 1);
4299 *(d + 2) = *(s + 2);
4300 if (ncopies > 4)
4301 {
4302 *(d + 3) = *(s + 3);
4303 *(d + 4) = *(s + 4);
4304 if (ncopies > 6)
4305 {
4306 *(d + 5) = *(s + 5);
4307 *(d + 6) = *(s + 6);
4308 if (ncopies > 8)
4309 {
4310 *(d + 7) = *(s + 7);
4311 *(d + 8) = *(s + 8);
4312 }
4313 }
4314 }
4315 }
4316
4317 _int_free (av, oldp, 1);
4318 check_inuse_chunk (av, newp);
4319 return chunk2mem (newp);
4320 }
4321 }
4322 }
4323
4324 /* If possible, free extra space in old or extended chunk */
4325
4326 assert ((unsigned long) (newsize) >= (unsigned long) (nb));
4327
4328 remainder_size = newsize - nb;
4329
4330 if (remainder_size < MINSIZE) /* not enough extra to split off */
4331 {
4332 set_head_size (newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4333 set_inuse_bit_at_offset (newp, newsize);
4334 }
4335 else /* split remainder */
4336 {
4337 remainder = chunk_at_offset (newp, nb);
4338 set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4339 set_head (remainder, remainder_size | PREV_INUSE |
4340 (av != &main_arena ? NON_MAIN_ARENA : 0));
4341 /* Mark remainder as inuse so free() won't complain */
4342 set_inuse_bit_at_offset (remainder, remainder_size);
4343 _int_free (av, remainder, 1);
4344 }
4345
4346 check_inuse_chunk (av, newp);
4347 return chunk2mem (newp);
4348 }
4349
4350 /*
4351 ------------------------------ memalign ------------------------------
4352 */
4353
4354 static void *
4355 _int_memalign (mstate av, size_t alignment, size_t bytes)
4356 {
4357 INTERNAL_SIZE_T nb; /* padded request size */
4358 char *m; /* memory returned by malloc call */
4359 mchunkptr p; /* corresponding chunk */
4360 char *brk; /* alignment point within p */
4361 mchunkptr newp; /* chunk to return */
4362 INTERNAL_SIZE_T newsize; /* its size */
4363 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
4364 mchunkptr remainder; /* spare room at end to split off */
4365 unsigned long remainder_size; /* its size */
4366 INTERNAL_SIZE_T size;
4367
4368
4369
4370 checked_request2size (bytes, nb);
4371
4372 /*
4373 Strategy: find a spot within that chunk that meets the alignment
4374 request, and then possibly free the leading and trailing space.
4375 */
4376
4377
4378 /* Call malloc with worst case padding to hit alignment. */
4379
4380 m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
4381
4382 if (m == 0)
4383 return 0; /* propagate failure */
4384
4385 p = mem2chunk (m);
4386
4387 if ((((unsigned long) (m)) % alignment) != 0) /* misaligned */
4388
4389 { /*
4390 Find an aligned spot inside chunk. Since we need to give back
4391 leading space in a chunk of at least MINSIZE, if the first
4392 calculation places us at a spot with less than MINSIZE leader,
4393 we can move to the next aligned spot -- we've allocated enough
4394 total room so that this is always possible.
4395 */
4396 brk = (char *) mem2chunk (((unsigned long) (m + alignment - 1)) &
4397 - ((signed long) alignment));
4398 if ((unsigned long) (brk - (char *) (p)) < MINSIZE)
4399 brk += alignment;
4400
4401 newp = (mchunkptr) brk;
4402 leadsize = brk - (char *) (p);
4403 newsize = chunksize (p) - leadsize;
4404
4405 /* For mmapped chunks, just adjust offset */
4406 if (chunk_is_mmapped (p))
4407 {
4408 newp->prev_size = p->prev_size + leadsize;
4409 set_head (newp, newsize | IS_MMAPPED);
4410 return chunk2mem (newp);
4411 }
4412
4413 /* Otherwise, give back leader, use the rest */
4414 set_head (newp, newsize | PREV_INUSE |
4415 (av != &main_arena ? NON_MAIN_ARENA : 0));
4416 set_inuse_bit_at_offset (newp, newsize);
4417 set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4418 _int_free (av, p, 1);
4419 p = newp;
4420
4421 assert (newsize >= nb &&
4422 (((unsigned long) (chunk2mem (p))) % alignment) == 0);
4423 }
4424
4425 /* Also give back spare room at the end */
4426 if (!chunk_is_mmapped (p))
4427 {
4428 size = chunksize (p);
4429 if ((unsigned long) (size) > (unsigned long) (nb + MINSIZE))
4430 {
4431 remainder_size = size - nb;
4432 remainder = chunk_at_offset (p, nb);
4433 set_head (remainder, remainder_size | PREV_INUSE |
4434 (av != &main_arena ? NON_MAIN_ARENA : 0));
4435 set_head_size (p, nb);
4436 _int_free (av, remainder, 1);
4437 }
4438 }
4439
4440 check_inuse_chunk (av, p);
4441 return chunk2mem (p);
4442 }
4443
4444
4445 /*
4446 ------------------------------ malloc_trim ------------------------------
4447 */
4448
4449 static int
4450 mtrim (mstate av, size_t pad)
4451 {
4452 /* Ensure initialization/consolidation */
4453 malloc_consolidate (av);
4454
4455 const size_t ps = GLRO (dl_pagesize);
4456 int psindex = bin_index (ps);
4457 const size_t psm1 = ps - 1;
4458
4459 int result = 0;
4460 for (int i = 1; i < NBINS; ++i)
4461 if (i == 1 || i >= psindex)
4462 {
4463 mbinptr bin = bin_at (av, i);
4464
4465 for (mchunkptr p = last (bin); p != bin; p = p->bk)
4466 {
4467 INTERNAL_SIZE_T size = chunksize (p);
4468
4469 if (size > psm1 + sizeof (struct malloc_chunk))
4470 {
4471 /* See whether the chunk contains at least one unused page. */
4472 char *paligned_mem = (char *) (((uintptr_t) p
4473 + sizeof (struct malloc_chunk)
4474 + psm1) & ~psm1);
4475
4476 assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem);
4477 assert ((char *) p + size > paligned_mem);
4478
4479 /* This is the size we could potentially free. */
4480 size -= paligned_mem - (char *) p;
4481
4482 if (size > psm1)
4483 {
4484 #if MALLOC_DEBUG
4485 /* When debugging we simulate destroying the memory
4486 content. */
4487 memset (paligned_mem, 0x89, size & ~psm1);
4488 #endif
4489 __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
4490
4491 result = 1;
4492 }
4493 }
4494 }
4495 }
4496
4497 #ifndef MORECORE_CANNOT_TRIM
4498 return result | (av == &main_arena ? systrim (pad, av) : 0);
4499
4500 #else
4501 return result;
4502 #endif
4503 }
4504
4505
4506 int
4507 __malloc_trim (size_t s)
4508 {
4509 int result = 0;
4510
4511 if (__malloc_initialized < 0)
4512 ptmalloc_init ();
4513
4514 mstate ar_ptr = &main_arena;
4515 do
4516 {
4517 (void) mutex_lock (&ar_ptr->mutex);
4518 result |= mtrim (ar_ptr, s);
4519 (void) mutex_unlock (&ar_ptr->mutex);
4520
4521 ar_ptr = ar_ptr->next;
4522 }
4523 while (ar_ptr != &main_arena);
4524
4525 return result;
4526 }
4527
4528
4529 /*
4530 ------------------------- malloc_usable_size -------------------------
4531 */
4532
4533 static size_t
4534 musable (void *mem)
4535 {
4536 mchunkptr p;
4537 if (mem != 0)
4538 {
4539 p = mem2chunk (mem);
4540
4541 if (__builtin_expect (using_malloc_checking == 1, 0))
4542 return malloc_check_get_size (p);
4543
4544 if (chunk_is_mmapped (p))
4545 return chunksize (p) - 2 * SIZE_SZ;
4546 else if (inuse (p))
4547 return chunksize (p) - SIZE_SZ;
4548 }
4549 return 0;
4550 }
4551
4552
4553 size_t
4554 __malloc_usable_size (void *m)
4555 {
4556 size_t result;
4557
4558 result = musable (m);
4559 return result;
4560 }
4561
4562 /*
4563 ------------------------------ mallinfo ------------------------------
4564 Accumulate malloc statistics for arena AV into M.
4565 */
4566
4567 static void
4568 int_mallinfo (mstate av, struct mallinfo *m)
4569 {
4570 size_t i;
4571 mbinptr b;
4572 mchunkptr p;
4573 INTERNAL_SIZE_T avail;
4574 INTERNAL_SIZE_T fastavail;
4575 int nblocks;
4576 int nfastblocks;
4577
4578 /* Ensure initialization */
4579 if (av->top == 0)
4580 malloc_consolidate (av);
4581
4582 check_malloc_state (av);
4583
4584 /* Account for top */
4585 avail = chunksize (av->top);
4586 nblocks = 1; /* top always exists */
4587
4588 /* traverse fastbins */
4589 nfastblocks = 0;
4590 fastavail = 0;
4591
4592 for (i = 0; i < NFASTBINS; ++i)
4593 {
4594 for (p = fastbin (av, i); p != 0; p = p->fd)
4595 {
4596 ++nfastblocks;
4597 fastavail += chunksize (p);
4598 }
4599 }
4600
4601 avail += fastavail;
4602
4603 /* traverse regular bins */
4604 for (i = 1; i < NBINS; ++i)
4605 {
4606 b = bin_at (av, i);
4607 for (p = last (b); p != b; p = p->bk)
4608 {
4609 ++nblocks;
4610 avail += chunksize (p);
4611 }
4612 }
4613
4614 m->smblks += nfastblocks;
4615 m->ordblks += nblocks;
4616 m->fordblks += avail;
4617 m->uordblks += av->system_mem - avail;
4618 m->arena += av->system_mem;
4619 m->fsmblks += fastavail;
4620 if (av == &main_arena)
4621 {
4622 m->hblks = mp_.n_mmaps;
4623 m->hblkhd = mp_.mmapped_mem;
4624 m->usmblks = mp_.max_total_mem;
4625 m->keepcost = chunksize (av->top);
4626 }
4627 }
4628
4629
4630 struct mallinfo
4631 __libc_mallinfo ()
4632 {
4633 struct mallinfo m;
4634 mstate ar_ptr;
4635
4636 if (__malloc_initialized < 0)
4637 ptmalloc_init ();
4638
4639 memset (&m, 0, sizeof (m));
4640 ar_ptr = &main_arena;
4641 do
4642 {
4643 (void) mutex_lock (&ar_ptr->mutex);
4644 int_mallinfo (ar_ptr, &m);
4645 (void) mutex_unlock (&ar_ptr->mutex);
4646
4647 ar_ptr = ar_ptr->next;
4648 }
4649 while (ar_ptr != &main_arena);
4650
4651 return m;
4652 }
4653
4654 /*
4655 ------------------------------ malloc_stats ------------------------------
4656 */
4657
4658 void
4659 __malloc_stats (void)
4660 {
4661 int i;
4662 mstate ar_ptr;
4663 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
4664
4665 if (__malloc_initialized < 0)
4666 ptmalloc_init ();
4667 _IO_flockfile (stderr);
4668 int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
4669 ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
4670 for (i = 0, ar_ptr = &main_arena;; i++)
4671 {
4672 struct mallinfo mi;
4673
4674 memset (&mi, 0, sizeof (mi));
4675 (void) mutex_lock (&ar_ptr->mutex);
4676 int_mallinfo (ar_ptr, &mi);
4677 fprintf (stderr, "Arena %d:\n", i);
4678 fprintf (stderr, "system bytes = %10u\n", (unsigned int) mi.arena);
4679 fprintf (stderr, "in use bytes = %10u\n", (unsigned int) mi.uordblks);
4680 #if MALLOC_DEBUG > 1
4681 if (i > 0)
4682 dump_heap (heap_for_ptr (top (ar_ptr)));
4683 #endif
4684 system_b += mi.arena;
4685 in_use_b += mi.uordblks;
4686 (void) mutex_unlock (&ar_ptr->mutex);
4687 ar_ptr = ar_ptr->next;
4688 if (ar_ptr == &main_arena)
4689 break;
4690 }
4691 fprintf (stderr, "Total (incl. mmap):\n");
4692 fprintf (stderr, "system bytes = %10u\n", system_b);
4693 fprintf (stderr, "in use bytes = %10u\n", in_use_b);
4694 fprintf (stderr, "max mmap regions = %10u\n", (unsigned int) mp_.max_n_mmaps);
4695 fprintf (stderr, "max mmap bytes = %10lu\n",
4696 (unsigned long) mp_.max_mmapped_mem);
4697 ((_IO_FILE *) stderr)->_flags2 |= old_flags2;
4698 _IO_funlockfile (stderr);
4699 }
4700
4701
4702 /*
4703 ------------------------------ mallopt ------------------------------
4704 */
4705
4706 int
4707 __libc_mallopt (int param_number, int value)
4708 {
4709 mstate av = &main_arena;
4710 int res = 1;
4711
4712 if (__malloc_initialized < 0)
4713 ptmalloc_init ();
4714 (void) mutex_lock (&av->mutex);
4715 /* Ensure initialization/consolidation */
4716 malloc_consolidate (av);
4717
4718 LIBC_PROBE (memory_mallopt, 2, param_number, value);
4719
4720 switch (param_number)
4721 {
4722 case M_MXFAST:
4723 if (value >= 0 && value <= MAX_FAST_SIZE)
4724 {
4725 LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ());
4726 set_max_fast (value);
4727 }
4728 else
4729 res = 0;
4730 break;
4731
4732 case M_TRIM_THRESHOLD:
4733 LIBC_PROBE (memory_mallopt_trim_threshold, 3, value,
4734 mp_.trim_threshold, mp_.no_dyn_threshold);
4735 mp_.trim_threshold = value;
4736 mp_.no_dyn_threshold = 1;
4737 break;
4738
4739 case M_TOP_PAD:
4740 LIBC_PROBE (memory_mallopt_top_pad, 3, value,
4741 mp_.top_pad, mp_.no_dyn_threshold);
4742 mp_.top_pad = value;
4743 mp_.no_dyn_threshold = 1;
4744 break;
4745
4746 case M_MMAP_THRESHOLD:
4747 /* Forbid setting the threshold too high. */
4748 if ((unsigned long) value > HEAP_MAX_SIZE / 2)
4749 res = 0;
4750 else
4751 {
4752 LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value,
4753 mp_.mmap_threshold, mp_.no_dyn_threshold);
4754 mp_.mmap_threshold = value;
4755 mp_.no_dyn_threshold = 1;
4756 }
4757 break;
4758
4759 case M_MMAP_MAX:
4760 LIBC_PROBE (memory_mallopt_mmap_max, 3, value,
4761 mp_.n_mmaps_max, mp_.no_dyn_threshold);
4762 mp_.n_mmaps_max = value;
4763 mp_.no_dyn_threshold = 1;
4764 break;
4765
4766 case M_CHECK_ACTION:
4767 LIBC_PROBE (memory_mallopt_check_action, 2, value, check_action);
4768 check_action = value;
4769 break;
4770
4771 case M_PERTURB:
4772 LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
4773 perturb_byte = value;
4774 break;
4775
4776 case M_ARENA_TEST:
4777 if (value > 0)
4778 {
4779 LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
4780 mp_.arena_test = value;
4781 }
4782 break;
4783
4784 case M_ARENA_MAX:
4785 if (value > 0)
4786 {
4787 LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
4788 mp_.arena_max = value;
4789 }
4790 break;
4791 }
4792 (void) mutex_unlock (&av->mutex);
4793 return res;
4794 }
4795 libc_hidden_def (__libc_mallopt)
4796
4797
4798 /*
4799 -------------------- Alternative MORECORE functions --------------------
4800 */
4801
4802
4803 /*
4804 General Requirements for MORECORE.
4805
4806 The MORECORE function must have the following properties:
4807
4808 If MORECORE_CONTIGUOUS is false:
4809
4810 * MORECORE must allocate in multiples of pagesize. It will
4811 only be called with arguments that are multiples of pagesize.
4812
4813 * MORECORE(0) must return an address that is at least
4814 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
4815
4816 else (i.e. If MORECORE_CONTIGUOUS is true):
4817
4818 * Consecutive calls to MORECORE with positive arguments
4819 return increasing addresses, indicating that space has been
4820 contiguously extended.
4821
4822 * MORECORE need not allocate in multiples of pagesize.
4823 Calls to MORECORE need not have args of multiples of pagesize.
4824
4825 * MORECORE need not page-align.
4826
4827 In either case:
4828
4829 * MORECORE may allocate more memory than requested. (Or even less,
4830 but this will generally result in a malloc failure.)
4831
4832 * MORECORE must not allocate memory when given argument zero, but
4833 instead return one past the end address of memory from previous
4834 nonzero call. This malloc does NOT call MORECORE(0)
4835 until at least one call with positive arguments is made, so
4836 the initial value returned is not important.
4837
4838 * Even though consecutive calls to MORECORE need not return contiguous
4839 addresses, it must be OK for malloc'ed chunks to span multiple
4840 regions in those cases where they do happen to be contiguous.
4841
4842 * MORECORE need not handle negative arguments -- it may instead
4843 just return MORECORE_FAILURE when given negative arguments.
4844 Negative arguments are always multiples of pagesize. MORECORE
4845 must not misinterpret negative args as large positive unsigned
4846 args. You can suppress all such calls from even occurring by defining
4847 MORECORE_CANNOT_TRIM,
4848
4849 There is some variation across systems about the type of the
4850 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
4851 actually be size_t, because sbrk supports negative args, so it is
4852 normally the signed type of the same width as size_t (sometimes
4853 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
4854 matter though. Internally, we use "long" as arguments, which should
4855 work across all reasonable possibilities.
4856
4857 Additionally, if MORECORE ever returns failure for a positive
4858 request, then mmap is used as a noncontiguous system allocator. This
4859 is a useful backup strategy for systems with holes in address spaces
4860 -- in this case sbrk cannot contiguously expand the heap, but mmap
4861 may be able to map noncontiguous space.
4862
4863 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
4864 a function that always returns MORECORE_FAILURE.
4865
4866 If you are using this malloc with something other than sbrk (or its
4867 emulation) to supply memory regions, you probably want to set
4868 MORECORE_CONTIGUOUS as false. As an example, here is a custom
4869 allocator kindly contributed for pre-OSX macOS. It uses virtually
4870 but not necessarily physically contiguous non-paged memory (locked
4871 in, present and won't get swapped out). You can use it by
4872 uncommenting this section, adding some #includes, and setting up the
4873 appropriate defines above:
4874
4875 *#define MORECORE osMoreCore
4876 *#define MORECORE_CONTIGUOUS 0
4877
4878 There is also a shutdown routine that should somehow be called for
4879 cleanup upon program exit.
4880
4881 *#define MAX_POOL_ENTRIES 100
4882 *#define MINIMUM_MORECORE_SIZE (64 * 1024)
4883 static int next_os_pool;
4884 void *our_os_pools[MAX_POOL_ENTRIES];
4885
4886 void *osMoreCore(int size)
4887 {
4888 void *ptr = 0;
4889 static void *sbrk_top = 0;
4890
4891 if (size > 0)
4892 {
4893 if (size < MINIMUM_MORECORE_SIZE)
4894 size = MINIMUM_MORECORE_SIZE;
4895 if (CurrentExecutionLevel() == kTaskLevel)
4896 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
4897 if (ptr == 0)
4898 {
4899 return (void *) MORECORE_FAILURE;
4900 }
4901 // save ptrs so they can be freed during cleanup
4902 our_os_pools[next_os_pool] = ptr;
4903 next_os_pool++;
4904 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
4905 sbrk_top = (char *) ptr + size;
4906 return ptr;
4907 }
4908 else if (size < 0)
4909 {
4910 // we don't currently support shrink behavior
4911 return (void *) MORECORE_FAILURE;
4912 }
4913 else
4914 {
4915 return sbrk_top;
4916 }
4917 }
4918
4919 // cleanup any allocated memory pools
4920 // called as last thing before shutting down driver
4921
4922 void osCleanupMem(void)
4923 {
4924 void **ptr;
4925
4926 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
4927 if (*ptr)
4928 {
4929 PoolDeallocate(*ptr);
4930 * ptr = 0;
4931 }
4932 }
4933
4934 */
4935
4936
4937 /* Helper code. */
4938
4939 extern char **__libc_argv attribute_hidden;
4940
4941 static void
4942 malloc_printerr (int action, const char *str, void *ptr)
4943 {
4944 if ((action & 5) == 5)
4945 __libc_message (action & 2, "%s\n", str);
4946 else if (action & 1)
4947 {
4948 char buf[2 * sizeof (uintptr_t) + 1];
4949
4950 buf[sizeof (buf) - 1] = '\0';
4951 char *cp = _itoa_word ((uintptr_t) ptr, &buf[sizeof (buf) - 1], 16, 0);
4952 while (cp > buf)
4953 *--cp = '0';
4954
4955 __libc_message (action & 2, "*** Error in `%s': %s: 0x%s ***\n",
4956 __libc_argv[0] ? : "<unknown>", str, cp);
4957 }
4958 else if (action & 2)
4959 abort ();
4960 }
4961
4962 /* We need a wrapper function for one of the additions of POSIX. */
4963 int
4964 __posix_memalign (void **memptr, size_t alignment, size_t size)
4965 {
4966 void *mem;
4967
4968 /* Test whether the SIZE argument is valid. It must be a power of
4969 two multiple of sizeof (void *). */
4970 if (alignment % sizeof (void *) != 0
4971 || !powerof2 (alignment / sizeof (void *)) != 0
4972 || alignment == 0)
4973 return EINVAL;
4974
4975
4976 void *address = RETURN_ADDRESS (0);
4977 mem = _mid_memalign (alignment, size, address);
4978
4979 if (mem != NULL)
4980 {
4981 *memptr = mem;
4982 return 0;
4983 }
4984
4985 return ENOMEM;
4986 }
4987 weak_alias (__posix_memalign, posix_memalign)
4988
4989
4990 int
4991 malloc_info (int options, FILE *fp)
4992 {
4993 /* For now, at least. */
4994 if (options != 0)
4995 return EINVAL;
4996
4997 int n = 0;
4998 size_t total_nblocks = 0;
4999 size_t total_nfastblocks = 0;
5000 size_t total_avail = 0;
5001 size_t total_fastavail = 0;
5002 size_t total_system = 0;
5003 size_t total_max_system = 0;
5004 size_t total_aspace = 0;
5005 size_t total_aspace_mprotect = 0;
5006
5007 void
5008 mi_arena (mstate ar_ptr)
5009 {
5010 fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
5011
5012 size_t nblocks = 0;
5013 size_t nfastblocks = 0;
5014 size_t avail = 0;
5015 size_t fastavail = 0;
5016 struct
5017 {
5018 size_t from;
5019 size_t to;
5020 size_t total;
5021 size_t count;
5022 } sizes[NFASTBINS + NBINS - 1];
5023 #define nsizes (sizeof (sizes) / sizeof (sizes[0]))
5024
5025 mutex_lock (&ar_ptr->mutex);
5026
5027 for (size_t i = 0; i < NFASTBINS; ++i)
5028 {
5029 mchunkptr p = fastbin (ar_ptr, i);
5030 if (p != NULL)
5031 {
5032 size_t nthissize = 0;
5033 size_t thissize = chunksize (p);
5034
5035 while (p != NULL)
5036 {
5037 ++nthissize;
5038 p = p->fd;
5039 }
5040
5041 fastavail += nthissize * thissize;
5042 nfastblocks += nthissize;
5043 sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
5044 sizes[i].to = thissize;
5045 sizes[i].count = nthissize;
5046 }
5047 else
5048 sizes[i].from = sizes[i].to = sizes[i].count = 0;
5049
5050 sizes[i].total = sizes[i].count * sizes[i].to;
5051 }
5052
5053
5054 mbinptr bin;
5055 struct malloc_chunk *r;
5056
5057 for (size_t i = 1; i < NBINS; ++i)
5058 {
5059 bin = bin_at (ar_ptr, i);
5060 r = bin->fd;
5061 sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
5062 sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
5063 = sizes[NFASTBINS - 1 + i].count = 0;
5064
5065 if (r != NULL)
5066 while (r != bin)
5067 {
5068 ++sizes[NFASTBINS - 1 + i].count;
5069 sizes[NFASTBINS - 1 + i].total += r->size;
5070 sizes[NFASTBINS - 1 + i].from
5071 = MIN (sizes[NFASTBINS - 1 + i].from, r->size);
5072 sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
5073 r->size);
5074
5075 r = r->fd;
5076 }
5077
5078 if (sizes[NFASTBINS - 1 + i].count == 0)
5079 sizes[NFASTBINS - 1 + i].from = 0;
5080 nblocks += sizes[NFASTBINS - 1 + i].count;
5081 avail += sizes[NFASTBINS - 1 + i].total;
5082 }
5083
5084 mutex_unlock (&ar_ptr->mutex);
5085
5086 total_nfastblocks += nfastblocks;
5087 total_fastavail += fastavail;
5088
5089 total_nblocks += nblocks;
5090 total_avail += avail;
5091
5092 for (size_t i = 0; i < nsizes; ++i)
5093 if (sizes[i].count != 0 && i != NFASTBINS)
5094 fprintf (fp, " \
5095 <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5096 sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
5097
5098 if (sizes[NFASTBINS].count != 0)
5099 fprintf (fp, "\
5100 <unsorted from=\"%zu\" to=\"