fix prerequisites due to libc changes
[buildroot.git] / package / qtopia4 / qtopia-4.3.1-add-avr32-arch.patch
bloba885a4f03f280bf41f31c6aef5889235605b2bb1
1 diff -Nupr a/include/Qt/qatomic_avr32.h b/include/Qt/qatomic_avr32.h
2 --- a/include/Qt/qatomic_avr32.h 1970-01-01 01:00:00.000000000 +0100
3 +++ b/include/Qt/qatomic_avr32.h 2006-07-27 07:55:09.000000000 +0200
4 @@ -0,0 +1 @@
5 +#include "../../src/corelib/arch/qatomic_avr32.h"
6 diff -Nupr a/include/QtCore/qatomic_avr32.h b/include/QtCore/qatomic_avr32.h
7 --- a/include/QtCore/qatomic_avr32.h 1970-01-01 01:00:00.000000000 +0100
8 +++ b/include/QtCore/qatomic_avr32.h 2006-07-27 07:55:28.000000000 +0200
9 @@ -0,0 +1 @@
10 +#include "../../src/corelib/arch/qatomic_avr32.h"
11 diff -Nupr a/src/corelib/arch/arch.pri b/src/corelib/arch/arch.pri
12 --- a/src/corelib/arch/arch.pri 2006-06-30 09:49:44.000000000 +0200
13 +++ b/src/corelib/arch/arch.pri 2006-07-26 11:03:43.000000000 +0200
14 @@ -13,6 +13,7 @@ mac:HEADERS += arch/qatomic_macosx.h \
15 arch/qatomic_generic.h \
16 arch/qatomic_powerpc.h \
17 arch/qatomic_arm.h \
18 + arch/qatomic_avr32.h \
19 arch/qatomic_i386.h \
20 arch/qatomic_mips.h \
21 arch/qatomic_s390.h \
22 diff -Nupr a/src/corelib/arch/avr32/arch.pri b/src/corelib/arch/avr32/arch.pri
23 --- a/src/corelib/arch/avr32/arch.pri 1970-01-01 01:00:00.000000000 +0100
24 +++ b/src/corelib/arch/avr32/arch.pri 2006-07-26 11:02:16.000000000 +0200
25 @@ -0,0 +1,5 @@
27 +# AVR32 architecture
29 +SOURCES += $$QT_ARCH_CPP/qatomic.cpp \
30 + $$QT_ARCH_CPP/malloc.c
31 diff -Nupr a/src/corelib/arch/avr32/malloc.c b/src/corelib/arch/avr32/malloc.c
32 --- a/src/corelib/arch/avr32/malloc.c 1970-01-01 01:00:00.000000000 +0100
33 +++ b/src/corelib/arch/avr32/malloc.c 2006-07-28 10:29:44.000000000 +0200
34 @@ -0,0 +1,5819 @@
35 +/****************************************************************************
36 +**
37 +** This file is part of the QtCore module of the Qt Toolkit.
38 +**
39 +** This file contains third party code which is not governed by the Qt
40 +** Commercial License Agreement. Please read the license headers below
41 +** for more information.
42 +**
43 +** Further information about Qt licensing is available at:
44 +** http://www.trolltech.com/products/qt/licensing.html or by
45 +** contacting info@trolltech.com.
46 +**
47 +** This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
48 +** WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
49 +**
50 +****************************************************************************/
52 +/* ---- config.h */
53 +#define KDE_MALLOC
54 +#define KDE_MALLOC_FULL
55 +#define KDE_MALLOC_AVR32
56 +/* ---- */
58 +#ifdef KDE_MALLOC
60 +#ifdef KDE_MALLOC_DEBUG
61 +#define DEBUG
62 +#endif
64 +#define USE_MALLOC_LOCK
65 +#define INLINE __inline__
66 +/*#define INLINE*/
67 +#define USE_MEMCPY 0
68 +#define MMAP_CLEARS 1
70 +/*
71 + This is a version (aka dlmalloc) of malloc/free/realloc written by
72 + Doug Lea and released to the public domain. Use, modify, and
73 + redistribute this code without permission or acknowledgment in any
74 + way you wish. Send questions, comments, complaints, performance
75 + data, etc to dl@cs.oswego.edu
77 +* VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
79 + Note: There may be an updated version of this malloc obtainable at
80 + ftp://gee.cs.oswego.edu/pub/misc/malloc.c
81 + Check before installing!
83 +* Quickstart
85 + This library is all in one file to simplify the most common usage:
86 + ftp it, compile it (-O), and link it into another program. All
87 + of the compile-time options default to reasonable values for use on
88 + most unix platforms. Compile -DWIN32 for reasonable defaults on windows.
89 + You might later want to step through various compile-time and dynamic
90 + tuning options.
92 + For convenience, an include file for code using this malloc is at:
93 + ftp://gee.cs.oswego.edu/pub/misc/malloc-2.7.0.h
94 + You don't really need this .h file unless you call functions not
95 + defined in your system include files. The .h file contains only the
96 + excerpts from this file needed for using this malloc on ANSI C/C++
97 + systems, so long as you haven't changed compile-time options about
98 + naming and tuning parameters. If you do, then you can create your
99 + own malloc.h that does include all settings by cutting at the point
100 + indicated below.
102 +* Why use this malloc?
104 + This is not the fastest, most space-conserving, most portable, or
105 + most tunable malloc ever written. However it is among the fastest
106 + while also being among the most space-conserving, portable and tunable.
107 + Consistent balance across these factors results in a good general-purpose
108 + allocator for malloc-intensive programs.
110 + The main properties of the algorithms are:
111 + * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
112 + with ties normally decided via FIFO (i.e. least recently used).
113 + * For small (<= 64 bytes by default) requests, it is a caching
114 + allocator, that maintains pools of quickly recycled chunks.
115 + * In between, and for combinations of large and small requests, it does
116 + the best it can trying to meet both goals at once.
117 + * For very large requests (>= 128KB by default), it relies on system
118 + memory mapping facilities, if supported.
120 + For a longer but slightly out of date high-level description, see
121 + http://gee.cs.oswego.edu/dl/html/malloc.html
123 + You may already by default be using a C library containing a malloc
124 + that is based on some version of this malloc (for example in
125 + linux). You might still want to use the one in this file in order to
126 + customize settings or to avoid overheads associated with library
127 + versions.
129 +* Contents, described in more detail in "description of public routines" below.
131 + Standard (ANSI/SVID/...) functions:
132 + malloc(size_t n);
133 + calloc(size_t n_elements, size_t element_size);
134 + free(Void_t* p);
135 + realloc(Void_t* p, size_t n);
136 + memalign(size_t alignment, size_t n);
137 + valloc(size_t n);
138 + mallinfo()
139 + mallopt(int parameter_number, int parameter_value)
141 + Additional functions:
142 + independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]);
143 + independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
144 + pvalloc(size_t n);
145 + cfree(Void_t* p);
146 + malloc_trim(size_t pad);
147 + malloc_usable_size(Void_t* p);
148 + malloc_stats();
150 +* Vital statistics:
152 + Supported pointer representation: 4 or 8 bytes
153 + Supported size_t representation: 4 or 8 bytes
154 + Note that size_t is allowed to be 4 bytes even if pointers are 8.
155 + You can adjust this by defining INTERNAL_SIZE_T
157 + Alignment: 2 * sizeof(size_t) (default)
158 + (i.e., 8 byte alignment with 4byte size_t). This suffices for
159 + nearly all current machines and C compilers. However, you can
160 + define MALLOC_ALIGNMENT to be wider than this if necessary.
162 + Minimum overhead per allocated chunk: 4 or 8 bytes
163 + Each malloced chunk has a hidden word of overhead holding size
164 + and status information.
166 + Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
167 + 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
169 + When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
170 + ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
171 + needed; 4 (8) for a trailing size field and 8 (16) bytes for
172 + free list pointers. Thus, the minimum allocatable size is
173 + 16/24/32 bytes.
175 + Even a request for zero bytes (i.e., malloc(0)) returns a
176 + pointer to something of the minimum allocatable size.
178 + The maximum overhead wastage (i.e., number of extra bytes
179 + allocated than were requested in malloc) is less than or equal
180 + to the minimum size, except for requests >= mmap_threshold that
181 + are serviced via mmap(), where the worst case wastage is 2 *
182 + sizeof(size_t) bytes plus the remainder from a system page (the
183 + minimal mmap unit); typically 4096 or 8192 bytes.
185 + Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
186 + 8-byte size_t: 2^64 minus about two pages
188 + It is assumed that (possibly signed) size_t values suffice to
189 + represent chunk sizes. `Possibly signed' is due to the fact
190 + that `size_t' may be defined on a system as either a signed or
191 + an unsigned type. The ISO C standard says that it must be
192 + unsigned, but a few systems are known not to adhere to this.
193 + Additionally, even when size_t is unsigned, sbrk (which is by
194 + default used to obtain memory from system) accepts signed
195 + arguments, and may not be able to handle size_t-wide arguments
196 + with negative sign bit. Generally, values that would
197 + appear as negative after accounting for overhead and alignment
198 + are supported only via mmap(), which does not have this
199 + limitation.
201 + Requests for sizes outside the allowed range will perform an optional
202 + failure action and then return null. (Requests may also
203 + also fail because a system is out of memory.)
205 + Thread-safety: NOT thread-safe unless USE_MALLOC_LOCK defined
207 + When USE_MALLOC_LOCK is defined, wrappers are created to
208 + surround every public call with either a pthread mutex or
209 + a win32 spinlock (depending on WIN32). This is not
210 + especially fast, and can be a major bottleneck.
211 + It is designed only to provide minimal protection
212 + in concurrent environments, and to provide a basis for
213 + extensions. If you are using malloc in a concurrent program,
214 + you would be far better off obtaining ptmalloc, which is
215 + derived from a version of this malloc, and is well-tuned for
216 + concurrent programs. (See http://www.malloc.de)
218 + Compliance: I believe it is compliant with the 1997 Single Unix Specification
219 + (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably
220 + others as well.
222 +* Synopsis of compile-time options:
224 + People have reported using previous versions of this malloc on all
225 + versions of Unix, sometimes by tweaking some of the defines
226 + below. It has been tested most extensively on Solaris and
227 + Linux. It is also reported to work on WIN32 platforms.
228 + People also report using it in stand-alone embedded systems.
230 + The implementation is in straight, hand-tuned ANSI C. It is not
231 + at all modular. (Sorry!) It uses a lot of macros. To be at all
232 + usable, this code should be compiled using an optimizing compiler
233 + (for example gcc -O3) that can simplify expressions and control
234 + paths. (FAQ: some macros import variables as arguments rather than
235 + declare locals because people reported that some debuggers
236 + otherwise get confused.)
238 + OPTION DEFAULT VALUE
240 + Compilation Environment options:
242 + __STD_C derived from C compiler defines
243 + WIN32 NOT defined
244 + HAVE_MEMCPY defined
245 + USE_MEMCPY 1 if HAVE_MEMCPY is defined
246 + HAVE_MMAP defined as 1
247 + MMAP_CLEARS 1
248 + HAVE_MREMAP 0 unless linux defined
249 + malloc_getpagesize derived from system #includes, or 4096 if not
250 + HAVE_USR_INCLUDE_MALLOC_H NOT defined
251 + LACKS_UNISTD_H NOT defined unless WIN32
252 + LACKS_SYS_PARAM_H NOT defined unless WIN32
253 + LACKS_SYS_MMAN_H NOT defined unless WIN32
255 + Changing default word sizes:
257 + INTERNAL_SIZE_T size_t
258 + MALLOC_ALIGNMENT 2 * sizeof(INTERNAL_SIZE_T)
260 + Configuration and functionality options:
262 + USE_DL_PREFIX NOT defined
263 + USE_PUBLIC_MALLOC_WRAPPERS NOT defined
264 + USE_MALLOC_LOCK NOT defined
265 + DEBUG NOT defined
266 + REALLOC_ZERO_BYTES_FREES NOT defined
267 + MALLOC_FAILURE_ACTION errno = ENOMEM, if __STD_C defined, else no-op
268 + TRIM_FASTBINS 0
270 + Options for customizing MORECORE:
272 + MORECORE sbrk
273 + MORECORE_CONTIGUOUS 1
274 + MORECORE_CANNOT_TRIM NOT defined
275 + MMAP_AS_MORECORE_SIZE (1024 * 1024)
277 + Tuning options that are also dynamically changeable via mallopt:
279 + DEFAULT_MXFAST 64
280 + DEFAULT_TRIM_THRESHOLD 128 * 1024
281 + DEFAULT_TOP_PAD 0
282 + DEFAULT_MMAP_THRESHOLD 128 * 1024
283 + DEFAULT_MMAP_MAX 65536
285 + There are several other #defined constants and macros that you
286 + probably don't want to touch unless you are extending or adapting malloc.
290 + WIN32 sets up defaults for MS environment and compilers.
291 + Otherwise defaults are for unix.
294 +/* #define WIN32 */
296 +#ifdef WIN32
298 +#define WIN32_LEAN_AND_MEAN
299 +#include <windows.h>
301 +/* Win32 doesn't supply or need the following headers */
302 +#define LACKS_UNISTD_H
303 +#define LACKS_SYS_PARAM_H
304 +#define LACKS_SYS_MMAN_H
306 +/* Use the supplied emulation of sbrk */
307 +#define MORECORE sbrk
308 +#define MORECORE_CONTIGUOUS 1
309 +#define MORECORE_FAILURE ((void*)(-1))
311 +/* Use the supplied emulation of mmap and munmap */
312 +#define HAVE_MMAP 1
313 +#define MUNMAP_FAILURE (-1)
314 +#define MMAP_CLEARS 1
316 +/* These values don't really matter in windows mmap emulation */
317 +#define MAP_PRIVATE 1
318 +#define MAP_ANONYMOUS 2
319 +#define PROT_READ 1
320 +#define PROT_WRITE 2
322 +/* Emulation functions defined at the end of this file */
324 +/* If USE_MALLOC_LOCK, use supplied critical-section-based lock functions */
325 +#ifdef USE_MALLOC_LOCK
326 +static int slwait(int *sl);
327 +static int slrelease(int *sl);
328 +#endif
330 +static long getpagesize(void);
331 +static long getregionsize(void);
332 +static void *sbrk(long size);
333 +static void *mmap(void *ptr, long size, long prot, long type, long handle, long arg);
334 +static long munmap(void *ptr, long size);
336 +static void vminfo (unsigned long *free, unsigned long *reserved, unsigned long *committed);
337 +static int cpuinfo (int whole, unsigned long *kernel, unsigned long *user);
339 +#endif
342 + __STD_C should be nonzero if using ANSI-standard C compiler, a C++
343 + compiler, or a C compiler sufficiently close to ANSI to get away
344 + with it.
347 +#ifndef __STD_C
348 +#if defined(__STDC__) || defined(_cplusplus)
349 +#define __STD_C 1
350 +#else
351 +#define __STD_C 0
352 +#endif
353 +#endif /*__STD_C*/
357 + Void_t* is the pointer type that malloc should say it returns
360 +#ifndef Void_t
361 +#if (__STD_C || defined(WIN32))
362 +#define Void_t void
363 +#else
364 +#define Void_t char
365 +#endif
366 +#endif /*Void_t*/
368 +#if __STD_C
369 +#include <stddef.h> /* for size_t */
370 +#else
371 +#include <sys/types.h>
372 +#endif
374 +#ifdef __cplusplus
375 +extern "C" {
376 +#endif
378 +/* define LACKS_UNISTD_H if your system does not have a <unistd.h>. */
380 +/* #define LACKS_UNISTD_H */
382 +#ifndef LACKS_UNISTD_H
383 +#include <unistd.h>
384 +#endif
386 +/* define LACKS_SYS_PARAM_H if your system does not have a <sys/param.h>. */
388 +/* #define LACKS_SYS_PARAM_H */
391 +#include <stdio.h> /* needed for malloc_stats */
392 +#include <errno.h> /* needed for optional MALLOC_FAILURE_ACTION */
396 + Debugging:
398 + Because freed chunks may be overwritten with bookkeeping fields, this
399 + malloc will often die when freed memory is overwritten by user
400 + programs. This can be very effective (albeit in an annoying way)
401 + in helping track down dangling pointers.
403 + If you compile with -DDEBUG, a number of assertion checks are
404 + enabled that will catch more memory errors. You probably won't be
405 + able to make much sense of the actual assertion errors, but they
406 + should help you locate incorrectly overwritten memory. The
407 + checking is fairly extensive, and will slow down execution
408 + noticeably. Calling malloc_stats or mallinfo with DEBUG set will
409 + attempt to check every non-mmapped allocated and free chunk in the
410 + course of computing the summmaries. (By nature, mmapped regions
411 + cannot be checked very much automatically.)
413 + Setting DEBUG may also be helpful if you are trying to modify
414 + this code. The assertions in the check routines spell out in more
415 + detail the assumptions and invariants underlying the algorithms.
417 + Setting DEBUG does NOT provide an automated mechanism for checking
418 + that all accesses to malloced memory stay within their
419 + bounds. However, there are several add-ons and adaptations of this
420 + or other mallocs available that do this.
423 +#ifdef DEBUG
424 +#include <assert.h>
425 +#else
426 +#define assert(x) ((void)0)
427 +#endif
431 + INTERNAL_SIZE_T is the word-size used for internal bookkeeping
432 + of chunk sizes.
434 + The default version is the same as size_t.
436 + While not strictly necessary, it is best to define this as an
437 + unsigned type, even if size_t is a signed type. This may avoid some
438 + artificial size limitations on some systems.
440 + On a 64-bit machine, you may be able to reduce malloc overhead by
441 + defining INTERNAL_SIZE_T to be a 32 bit `unsigned int' at the
442 + expense of not being able to handle more than 2^32 of malloced
443 + space. If this limitation is acceptable, you are encouraged to set
444 + this unless you are on a platform requiring 16byte alignments. In
445 + this case the alignment requirements turn out to negate any
446 + potential advantages of decreasing size_t word size.
448 + Implementors: Beware of the possible combinations of:
449 + - INTERNAL_SIZE_T might be signed or unsigned, might be 32 or 64 bits,
450 + and might be the same width as int or as long
451 + - size_t might have different width and signedness as INTERNAL_SIZE_T
452 + - int and long might be 32 or 64 bits, and might be the same width
453 + To deal with this, most comparisons and difference computations
454 + among INTERNAL_SIZE_Ts should cast them to unsigned long, being
455 + aware of the fact that casting an unsigned int to a wider long does
456 + not sign-extend. (This also makes checking for negative numbers
457 + awkward.) Some of these casts result in harmless compiler warnings
458 + on some systems.
461 +#ifndef INTERNAL_SIZE_T
462 +#define INTERNAL_SIZE_T size_t
463 +#endif
465 +/* The corresponding word size */
466 +#define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
470 + MALLOC_ALIGNMENT is the minimum alignment for malloc'ed chunks.
471 + It must be a power of two at least 2 * SIZE_SZ, even on machines
472 + for which smaller alignments would suffice. It may be defined as
473 + larger than this though. Note however that code and data structures
474 + are optimized for the case of 8-byte alignment.
478 +#ifndef MALLOC_ALIGNMENT
479 +#define MALLOC_ALIGNMENT (2 * SIZE_SZ)
480 +#endif
482 +/* The corresponding bit mask value */
483 +#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
488 + REALLOC_ZERO_BYTES_FREES should be set if a call to
489 + realloc with zero bytes should be the same as a call to free.
490 + Some people think it should. Otherwise, since this malloc
491 + returns a unique pointer for malloc(0), so does realloc(p, 0).
494 +/* #define REALLOC_ZERO_BYTES_FREES */
497 + TRIM_FASTBINS controls whether free() of a very small chunk can
498 + immediately lead to trimming. Setting to true (1) can reduce memory
499 + footprint, but will almost always slow down programs that use a lot
500 + of small chunks.
502 + Define this only if you are willing to give up some speed to more
503 + aggressively reduce system-level memory footprint when releasing
504 + memory in programs that use many small chunks. You can get
505 + essentially the same effect by setting MXFAST to 0, but this can
506 + lead to even greater slowdowns in programs using many small chunks.
507 + TRIM_FASTBINS is an in-between compile-time option, that disables
508 + only those chunks bordering topmost memory from being placed in
509 + fastbins.
512 +#ifndef TRIM_FASTBINS
513 +#define TRIM_FASTBINS 0
514 +#endif
518 + USE_DL_PREFIX will prefix all public routines with the string 'dl'.
519 + This is necessary when you only want to use this malloc in one part
520 + of a program, using your regular system malloc elsewhere.
523 +/* #define USE_DL_PREFIX */
527 + USE_MALLOC_LOCK causes wrapper functions to surround each
528 + callable routine with pthread mutex lock/unlock.
530 + USE_MALLOC_LOCK forces USE_PUBLIC_MALLOC_WRAPPERS to be defined
534 +/* #define USE_MALLOC_LOCK */
538 + If USE_PUBLIC_MALLOC_WRAPPERS is defined, every public routine is
539 + actually a wrapper function that first calls MALLOC_PREACTION, then
540 + calls the internal routine, and follows it with
541 + MALLOC_POSTACTION. This is needed for locking, but you can also use
542 + this, without USE_MALLOC_LOCK, for purposes of interception,
543 + instrumentation, etc. It is a sad fact that using wrappers often
544 + noticeably degrades performance of malloc-intensive programs.
547 +#ifdef USE_MALLOC_LOCK
548 +#define USE_PUBLIC_MALLOC_WRAPPERS
549 +#else
550 +/* #define USE_PUBLIC_MALLOC_WRAPPERS */
551 +#endif
555 + Two-phase name translation.
556 + All of the actual routines are given mangled names.
557 + When wrappers are used, they become the public callable versions.
558 + When DL_PREFIX is used, the callable names are prefixed.
561 +#ifndef USE_PUBLIC_MALLOC_WRAPPERS
562 +#define cALLOc public_cALLOc
563 +#define fREe public_fREe
564 +#define cFREe public_cFREe
565 +#define mALLOc public_mALLOc
566 +#define mEMALIGn public_mEMALIGn
567 +#define rEALLOc public_rEALLOc
568 +#define vALLOc public_vALLOc
569 +#define pVALLOc public_pVALLOc
570 +#define mALLINFo public_mALLINFo
571 +#define mALLOPt public_mALLOPt
572 +#define mTRIm public_mTRIm
573 +#define mSTATs public_mSTATs
574 +#define mUSABLe public_mUSABLe
575 +#define iCALLOc public_iCALLOc
576 +#define iCOMALLOc public_iCOMALLOc
577 +#endif
579 +#ifdef USE_DL_PREFIX
580 +#define public_cALLOc dlcalloc
581 +#define public_fREe dlfree
582 +#define public_cFREe dlcfree
583 +#define public_mALLOc dlmalloc
584 +#define public_mEMALIGn dlmemalign
585 +#define public_rEALLOc dlrealloc
586 +#define public_vALLOc dlvalloc
587 +#define public_pVALLOc dlpvalloc
588 +#define public_mALLINFo dlmallinfo
589 +#define public_mALLOPt dlmallopt
590 +#define public_mTRIm dlmalloc_trim
591 +#define public_mSTATs dlmalloc_stats
592 +#define public_mUSABLe dlmalloc_usable_size
593 +#define public_iCALLOc dlindependent_calloc
594 +#define public_iCOMALLOc dlindependent_comalloc
595 +#else /* USE_DL_PREFIX */
596 +#define public_cALLOc calloc
597 +#define public_fREe free
598 +#define public_cFREe cfree
599 +#define public_mALLOc malloc
600 +#define public_mEMALIGn memalign
601 +#define public_rEALLOc realloc
602 +#define public_vALLOc valloc
603 +#define public_pVALLOc pvalloc
604 +#define public_mALLINFo mallinfo
605 +#define public_mALLOPt mallopt
606 +#define public_mTRIm malloc_trim
607 +#define public_mSTATs malloc_stats
608 +#define public_mUSABLe malloc_usable_size
609 +#define public_iCALLOc independent_calloc
610 +#define public_iCOMALLOc independent_comalloc
611 +#endif /* USE_DL_PREFIX */
615 + HAVE_MEMCPY should be defined if you are not otherwise using
616 + ANSI STD C, but still have memcpy and memset in your C library
617 + and want to use them in calloc and realloc. Otherwise simple
618 + macro versions are defined below.
620 + USE_MEMCPY should be defined as 1 if you actually want to
621 + have memset and memcpy called. People report that the macro
622 + versions are faster than libc versions on some systems.
624 + Even if USE_MEMCPY is set to 1, loops to copy/clear small chunks
625 + (of <= 36 bytes) are manually unrolled in realloc and calloc.
628 +/* If it's available it's defined in config.h. */
629 +/* #define HAVE_MEMCPY */
631 +#ifndef USE_MEMCPY
632 +#ifdef HAVE_MEMCPY
633 +#define USE_MEMCPY 1
634 +#else
635 +#define USE_MEMCPY 0
636 +#endif
637 +#endif
640 +#if (__STD_C || defined(HAVE_MEMCPY))
642 +#ifdef WIN32
643 +/* On Win32 memset and memcpy are already declared in windows.h */
644 +#else
645 +#if __STD_C
646 +void* memset(void*, int, size_t);
647 +void* memcpy(void*, const void*, size_t);
648 +#else
649 +Void_t* memset();
650 +Void_t* memcpy();
651 +#endif
652 +#endif
653 +#endif
656 + MALLOC_FAILURE_ACTION is the action to take before "return 0" when
657 + malloc fails to be able to return memory, either because memory is
658 + exhausted or because of illegal arguments.
660 + By default, sets errno if running on STD_C platform, else does nothing.
663 +#ifndef MALLOC_FAILURE_ACTION
664 +#if __STD_C
665 +#define MALLOC_FAILURE_ACTION \
666 + errno = ENOMEM;
668 +#else
669 +#define MALLOC_FAILURE_ACTION
670 +#endif
671 +#endif
674 + MORECORE-related declarations. By default, rely on sbrk
678 +#ifdef LACKS_UNISTD_H
679 +#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
680 +#if __STD_C
681 +extern Void_t* sbrk(ptrdiff_t);
682 +#else
683 +extern Void_t* sbrk();
684 +#endif
685 +#endif
686 +#endif
689 + MORECORE is the name of the routine to call to obtain more memory
690 + from the system. See below for general guidance on writing
691 + alternative MORECORE functions, as well as a version for WIN32 and a
692 + sample version for pre-OSX macos.
695 +#ifndef MORECORE
696 +#define MORECORE sbrk
697 +#endif
700 + MORECORE_FAILURE is the value returned upon failure of MORECORE
701 + as well as mmap. Since it cannot be an otherwise valid memory address,
702 + and must reflect values of standard sys calls, you probably ought not
703 + try to redefine it.
706 +#ifndef MORECORE_FAILURE
707 +#define MORECORE_FAILURE (-1)
708 +#endif
711 + If MORECORE_CONTIGUOUS is true, take advantage of fact that
712 + consecutive calls to MORECORE with positive arguments always return
713 + contiguous increasing addresses. This is true of unix sbrk. Even
714 + if not defined, when regions happen to be contiguous, malloc will
715 + permit allocations spanning regions obtained from different
716 + calls. But defining this when applicable enables some stronger
717 + consistency checks and space efficiencies.
720 +#ifndef MORECORE_CONTIGUOUS
721 +#define MORECORE_CONTIGUOUS 1
722 +#endif
725 + Define MORECORE_CANNOT_TRIM if your version of MORECORE
726 + cannot release space back to the system when given negative
727 + arguments. This is generally necessary only if you are using
728 + a hand-crafted MORECORE function that cannot handle negative arguments.
731 +/* #define MORECORE_CANNOT_TRIM */
735 + Define HAVE_MMAP as true to optionally make malloc() use mmap() to
736 + allocate very large blocks. These will be returned to the
737 + operating system immediately after a free(). Also, if mmap
738 + is available, it is used as a backup strategy in cases where
739 + MORECORE fails to provide space from system.
741 + This malloc is best tuned to work with mmap for large requests.
742 + If you do not have mmap, operations involving very large chunks (1MB
743 + or so) may be slower than you'd like.
746 +#ifndef HAVE_MMAP
747 +#define HAVE_MMAP 1
748 +#endif
750 +#if HAVE_MMAP
752 + Standard unix mmap using /dev/zero clears memory so calloc doesn't
753 + need to.
756 +#ifndef MMAP_CLEARS
757 +#define MMAP_CLEARS 1
758 +#endif
760 +#else /* no mmap */
761 +#ifndef MMAP_CLEARS
762 +#define MMAP_CLEARS 0
763 +#endif
764 +#endif
768 + MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
769 + sbrk fails, and mmap is used as a backup (which is done only if
770 + HAVE_MMAP). The value must be a multiple of page size. This
771 + backup strategy generally applies only when systems have "holes" in
772 + address space, so sbrk cannot perform contiguous expansion, but
773 + there is still space available on system. On systems for which
774 + this is known to be useful (i.e. most linux kernels), this occurs
775 + only when programs allocate huge amounts of memory. Between this,
776 + and the fact that mmap regions tend to be limited, the size should
777 + be large, to avoid too many mmap calls and thus avoid running out
778 + of kernel resources.
781 +#ifndef MMAP_AS_MORECORE_SIZE
782 +#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
783 +#endif
786 + Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
787 + large blocks. This is currently only possible on Linux with
788 + kernel versions newer than 1.3.77.
791 +#ifndef HAVE_MREMAP
792 +#if defined(linux) || defined(__linux__) || defined(__linux)
793 +#define HAVE_MREMAP 1
794 +#else
795 +#define HAVE_MREMAP 0
796 +#endif
798 +#endif /* HAVE_MMAP */
802 + The system page size. To the extent possible, this malloc manages
803 + memory from the system in page-size units. Note that this value is
804 + cached during initialization into a field of malloc_state. So even
805 + if malloc_getpagesize is a function, it is only called once.
807 + The following mechanics for getpagesize were adapted from bsd/gnu
808 + getpagesize.h. If none of the system-probes here apply, a value of
809 + 4096 is used, which should be OK: If they don't apply, then using
810 + the actual value probably doesn't impact performance.
814 +#ifndef malloc_getpagesize
816 +#ifndef LACKS_UNISTD_H
817 +# include <unistd.h>
818 +#endif
820 +# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
821 +# ifndef _SC_PAGE_SIZE
822 +# define _SC_PAGE_SIZE _SC_PAGESIZE
823 +# endif
824 +# endif
826 +# ifdef _SC_PAGE_SIZE
827 +# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
828 +# else
829 +# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
830 + extern size_t getpagesize();
831 +# define malloc_getpagesize getpagesize()
832 +# else
833 +# ifdef WIN32 /* use supplied emulation of getpagesize */
834 +# define malloc_getpagesize getpagesize()
835 +# else
836 +# ifndef LACKS_SYS_PARAM_H
837 +# include <sys/param.h>
838 +# endif
839 +# ifdef EXEC_PAGESIZE
840 +# define malloc_getpagesize EXEC_PAGESIZE
841 +# else
842 +# ifdef NBPG
843 +# ifndef CLSIZE
844 +# define malloc_getpagesize NBPG
845 +# else
846 +# define malloc_getpagesize (NBPG * CLSIZE)
847 +# endif
848 +# else
849 +# ifdef NBPC
850 +# define malloc_getpagesize NBPC
851 +# else
852 +# ifdef PAGESIZE
853 +# define malloc_getpagesize PAGESIZE
854 +# else /* just guess */
855 +# define malloc_getpagesize (4096)
856 +# endif
857 +# endif
858 +# endif
859 +# endif
860 +# endif
861 +# endif
862 +# endif
863 +#endif
866 + This version of malloc supports the standard SVID/XPG mallinfo
867 + routine that returns a struct containing usage properties and
868 + statistics. It should work on any SVID/XPG compliant system that has
869 + a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
870 + install such a thing yourself, cut out the preliminary declarations
871 + as described above and below and save them in a malloc.h file. But
872 + there's no compelling reason to bother to do this.)
874 + The main declaration needed is the mallinfo struct that is returned
875 + (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
876 + bunch of field that are not even meaningful in this version of
877 + malloc. These fields are are instead filled by mallinfo() with
878 + other numbers that might be of interest.
880 + HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
881 + /usr/include/malloc.h file that includes a declaration of struct
882 + mallinfo. If so, it is included; else an SVID2/XPG2 compliant
883 + version is declared below. These must be precisely the same for
884 + mallinfo() to work. The original SVID version of this struct,
885 + defined on most systems with mallinfo, declares all fields as
886 + ints. But some others define as unsigned long. If your system
887 + defines the fields using a type of different width than listed here,
888 + you must #include your system version and #define
889 + HAVE_USR_INCLUDE_MALLOC_H.
892 +/* #define HAVE_USR_INCLUDE_MALLOC_H */
894 +/*#ifdef HAVE_USR_INCLUDE_MALLOC_H*/
895 +#if 0
896 +#include "/usr/include/malloc.h"
897 +#else
899 +/* SVID2/XPG mallinfo structure */
901 +struct mallinfo {
902 + int arena; /* non-mmapped space allocated from system */
903 + int ordblks; /* number of free chunks */
904 + int smblks; /* number of fastbin blocks */
905 + int hblks; /* number of mmapped regions */
906 + int hblkhd; /* space in mmapped regions */
907 + int usmblks; /* maximum total allocated space */
908 + int fsmblks; /* space available in freed fastbin blocks */
909 + int uordblks; /* total allocated space */
910 + int fordblks; /* total free space */
911 + int keepcost; /* top-most, releasable (via malloc_trim) space */
915 + SVID/XPG defines four standard parameter numbers for mallopt,
916 + normally defined in malloc.h. Only one of these (M_MXFAST) is used
917 + in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
918 + so setting them has no effect. But this malloc also supports other
919 + options in mallopt described below.
921 +#endif
924 +/* ---------- description of public routines ------------ */
927 + malloc(size_t n)
928 + Returns a pointer to a newly allocated chunk of at least n bytes, or null
929 + if no space is available. Additionally, on failure, errno is
930 + set to ENOMEM on ANSI C systems.
932 + If n is zero, malloc returns a minumum-sized chunk. (The minimum
933 + size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
934 + systems.) On most systems, size_t is an unsigned type, so calls
935 + with negative arguments are interpreted as requests for huge amounts
936 + of space, which will often fail. The maximum supported value of n
937 + differs across systems, but is in all cases less than the maximum
938 + representable value of a size_t.
940 +#if __STD_C
941 +Void_t* public_mALLOc(size_t);
942 +#else
943 +Void_t* public_mALLOc();
944 +#endif
947 + free(Void_t* p)
948 + Releases the chunk of memory pointed to by p, that had been previously
949 + allocated using malloc or a related routine such as realloc.
950 + It has no effect if p is null. It can have arbitrary (i.e., bad!)
951 + effects if p has already been freed.
953 + Unless disabled (using mallopt), freeing very large spaces will
954 + when possible, automatically trigger operations that give
955 + back unused memory to the system, thus reducing program footprint.
957 +#if __STD_C
958 +void public_fREe(Void_t*);
959 +#else
960 +void public_fREe();
961 +#endif
964 + calloc(size_t n_elements, size_t element_size);
965 + Returns a pointer to n_elements * element_size bytes, with all locations
966 + set to zero.
968 +#if __STD_C
969 +Void_t* public_cALLOc(size_t, size_t);
970 +#else
971 +Void_t* public_cALLOc();
972 +#endif
975 + realloc(Void_t* p, size_t n)
976 + Returns a pointer to a chunk of size n that contains the same data
977 + as does chunk p up to the minimum of (n, p's size) bytes, or null
978 + if no space is available.
980 + The returned pointer may or may not be the same as p. The algorithm
981 + prefers extending p when possible, otherwise it employs the
982 + equivalent of a malloc-copy-free sequence.
984 + If p is null, realloc is equivalent to malloc.
986 + If space is not available, realloc returns null, errno is set (if on
987 + ANSI) and p is NOT freed.
989 + if n is for fewer bytes than already held by p, the newly unused
990 + space is lopped off and freed if possible. Unless the #define
991 + REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
992 + zero (re)allocates a minimum-sized chunk.
994 + Large chunks that were internally obtained via mmap will always
995 + be reallocated using malloc-copy-free sequences unless
996 + the system supports MREMAP (currently only linux).
998 + The old unix realloc convention of allowing the last-free'd chunk
999 + to be used as an argument to realloc is not supported.
1001 +#if __STD_C
1002 +Void_t* public_rEALLOc(Void_t*, size_t);
1003 +#else
1004 +Void_t* public_rEALLOc();
1005 +#endif
1008 + memalign(size_t alignment, size_t n);
1009 + Returns a pointer to a newly allocated chunk of n bytes, aligned
1010 + in accord with the alignment argument.
1012 + The alignment argument should be a power of two. If the argument is
1013 + not a power of two, the nearest greater power is used.
1014 + 8-byte alignment is guaranteed by normal malloc calls, so don't
1015 + bother calling memalign with an argument of 8 or less.
1017 + Overreliance on memalign is a sure way to fragment space.
1019 +#if __STD_C
1020 +Void_t* public_mEMALIGn(size_t, size_t);
1021 +#else
1022 +Void_t* public_mEMALIGn();
1023 +#endif
1026 + valloc(size_t n);
1027 + Equivalent to memalign(pagesize, n), where pagesize is the page
1028 + size of the system. If the pagesize is unknown, 4096 is used.
1030 +#if __STD_C
1031 +Void_t* public_vALLOc(size_t);
1032 +#else
1033 +Void_t* public_vALLOc();
1034 +#endif
1039 + mallopt(int parameter_number, int parameter_value)
1040 + Sets tunable parameters The format is to provide a
1041 + (parameter-number, parameter-value) pair. mallopt then sets the
1042 + corresponding parameter to the argument value if it can (i.e., so
1043 + long as the value is meaningful), and returns 1 if successful else
1044 + 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
1045 + normally defined in malloc.h. Only one of these (M_MXFAST) is used
1046 + in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
1047 + so setting them has no effect. But this malloc also supports four
1048 + other options in mallopt. See below for details. Briefly, supported
1049 + parameters are as follows (listed defaults are for "typical"
1050 + configurations).
1052 + Symbol param # default allowed param values
1053 + M_MXFAST 1 64 0-80 (0 disables fastbins)
1054 + M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
1055 + M_TOP_PAD -2 0 any
1056 + M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
1057 + M_MMAP_MAX -4 65536 any (0 disables use of mmap)
1059 +#if __STD_C
1060 +int public_mALLOPt(int, int);
1061 +#else
1062 +int public_mALLOPt();
1063 +#endif
1067 + mallinfo()
1068 + Returns (by copy) a struct containing various summary statistics:
1070 + arena: current total non-mmapped bytes allocated from system
1071 + ordblks: the number of free chunks
1072 + smblks: the number of fastbin blocks (i.e., small chunks that
1073 + have been freed but not use resused or consolidated)
1074 + hblks: current number of mmapped regions
1075 + hblkhd: total bytes held in mmapped regions
1076 + usmblks: the maximum total allocated space. This will be greater
1077 + than current total if trimming has occurred.
1078 + fsmblks: total bytes held in fastbin blocks
1079 + uordblks: current total allocated space (normal or mmapped)
1080 + fordblks: total free space
1081 + keepcost: the maximum number of bytes that could ideally be released
1082 + back to system via malloc_trim. ("ideally" means that
1083 + it ignores page restrictions etc.)
1085 + Because these fields are ints, but internal bookkeeping may
1086 + be kept as longs, the reported values may wrap around zero and
1087 + thus be inaccurate.
1089 +#if __STD_C
1090 +struct mallinfo public_mALLINFo(void);
1091 +#else
1092 +struct mallinfo public_mALLINFo();
1093 +#endif
1096 + independent_calloc(size_t n_elements, size_t element_size, Void_t* chunks[]);
1098 + independent_calloc is similar to calloc, but instead of returning a
1099 + single cleared space, it returns an array of pointers to n_elements
1100 + independent elements that can hold contents of size elem_size, each
1101 + of which starts out cleared, and can be independently freed,
1102 + realloc'ed etc. The elements are guaranteed to be adjacently
1103 + allocated (this is not guaranteed to occur with multiple callocs or
1104 + mallocs), which may also improve cache locality in some
1105 + applications.
1107 + The "chunks" argument is optional (i.e., may be null, which is
1108 + probably the most typical usage). If it is null, the returned array
1109 + is itself dynamically allocated and should also be freed when it is
1110 + no longer needed. Otherwise, the chunks array must be of at least
1111 + n_elements in length. It is filled in with the pointers to the
1112 + chunks.
1114 + In either case, independent_calloc returns this pointer array, or
1115 + null if the allocation failed. If n_elements is zero and "chunks"
1116 + is null, it returns a chunk representing an array with zero elements
1117 + (which should be freed if not wanted).
1119 + Each element must be individually freed when it is no longer
1120 + needed. If you'd like to instead be able to free all at once, you
1121 + should instead use regular calloc and assign pointers into this
1122 + space to represent elements. (In this case though, you cannot
1123 + independently free elements.)
1125 + independent_calloc simplifies and speeds up implementations of many
1126 + kinds of pools. It may also be useful when constructing large data
1127 + structures that initially have a fixed number of fixed-sized nodes,
1128 + but the number is not known at compile time, and some of the nodes
1129 + may later need to be freed. For example:
1131 + struct Node { int item; struct Node* next; };
1133 + struct Node* build_list() {
1134 + struct Node** pool;
1135 + int n = read_number_of_nodes_needed();
1136 + if (n <= 0) return 0;
1137 + pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
1138 + if (pool == 0) die();
1139 + // organize into a linked list...
1140 + struct Node* first = pool[0];
1141 + for (i = 0; i < n-1; ++i)
1142 + pool[i]->next = pool[i+1];
1143 + free(pool); // Can now free the array (or not, if it is needed later)
1144 + return first;
1147 +#if __STD_C
1148 +Void_t** public_iCALLOc(size_t, size_t, Void_t**);
1149 +#else
1150 +Void_t** public_iCALLOc();
1151 +#endif
1154 + independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
1156 + independent_comalloc allocates, all at once, a set of n_elements
1157 + chunks with sizes indicated in the "sizes" array. It returns
1158 + an array of pointers to these elements, each of which can be
1159 + independently freed, realloc'ed etc. The elements are guaranteed to
1160 + be adjacently allocated (this is not guaranteed to occur with
1161 + multiple callocs or mallocs), which may also improve cache locality
1162 + in some applications.
1164 + The "chunks" argument is optional (i.e., may be null). If it is null
1165 + the returned array is itself dynamically allocated and should also
1166 + be freed when it is no longer needed. Otherwise, the chunks array
1167 + must be of at least n_elements in length. It is filled in with the
1168 + pointers to the chunks.
1170 + In either case, independent_comalloc returns this pointer array, or
1171 + null if the allocation failed. If n_elements is zero and chunks is
1172 + null, it returns a chunk representing an array with zero elements
1173 + (which should be freed if not wanted).
1175 + Each element must be individually freed when it is no longer
1176 + needed. If you'd like to instead be able to free all at once, you
1177 + should instead use a single regular malloc, and assign pointers at
1178 + particular offsets in the aggregate space. (In this case though, you
1179 + cannot independently free elements.)
1181 + independent_comallac differs from independent_calloc in that each
1182 + element may have a different size, and also that it does not
1183 + automatically clear elements.
1185 + independent_comalloc can be used to speed up allocation in cases
1186 + where several structs or objects must always be allocated at the
1187 + same time. For example:
1189 + struct Head { ... }
1190 + struct Foot { ... }
1192 + void send_message(char* msg) {
1193 + int msglen = strlen(msg);
1194 + size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
1195 + void* chunks[3];
1196 + if (independent_comalloc(3, sizes, chunks) == 0)
1197 + die();
1198 + struct Head* head = (struct Head*)(chunks[0]);
1199 + char* body = (char*)(chunks[1]);
1200 + struct Foot* foot = (struct Foot*)(chunks[2]);
1201 + // ...
1204 + In general though, independent_comalloc is worth using only for
1205 + larger values of n_elements. For small values, you probably won't
1206 + detect enough difference from series of malloc calls to bother.
1208 + Overuse of independent_comalloc can increase overall memory usage,
1209 + since it cannot reuse existing noncontiguous small chunks that
1210 + might be available for some of the elements.
1212 +#if __STD_C
1213 +Void_t** public_iCOMALLOc(size_t, size_t*, Void_t**);
1214 +#else
1215 +Void_t** public_iCOMALLOc();
1216 +#endif
1220 + pvalloc(size_t n);
1221 + Equivalent to valloc(minimum-page-that-holds(n)), that is,
1222 + round up n to nearest pagesize.
1223 + */
1224 +#if __STD_C
1225 +Void_t* public_pVALLOc(size_t);
1226 +#else
1227 +Void_t* public_pVALLOc();
1228 +#endif
1231 + cfree(Void_t* p);
1232 + Equivalent to free(p).
1234 + cfree is needed/defined on some systems that pair it with calloc,
1235 + for odd historical reasons (such as: cfree is used in example
1236 + code in the first edition of K&R).
1238 +#if __STD_C
1239 +void public_cFREe(Void_t*);
1240 +#else
1241 +void public_cFREe();
1242 +#endif
1245 + malloc_trim(size_t pad);
1247 + If possible, gives memory back to the system (via negative
1248 + arguments to sbrk) if there is unused memory at the `high' end of
1249 + the malloc pool. You can call this after freeing large blocks of
1250 + memory to potentially reduce the system-level memory requirements
1251 + of a program. However, it cannot guarantee to reduce memory. Under
1252 + some allocation patterns, some large free blocks of memory will be
1253 + locked between two used chunks, so they cannot be given back to
1254 + the system.
1256 + The `pad' argument to malloc_trim represents the amount of free
1257 + trailing space to leave untrimmed. If this argument is zero,
1258 + only the minimum amount of memory to maintain internal data
1259 + structures will be left (one page or less). Non-zero arguments
1260 + can be supplied to maintain enough trailing space to service
1261 + future expected allocations without having to re-obtain memory
1262 + from the system.
1264 + Malloc_trim returns 1 if it actually released any memory, else 0.
1265 + On systems that do not support "negative sbrks", it will always
1266 + rreturn 0.
1268 +#if __STD_C
1269 +int public_mTRIm(size_t);
1270 +#else
1271 +int public_mTRIm();
1272 +#endif
1275 + malloc_usable_size(Void_t* p);
1277 + Returns the number of bytes you can actually use in
1278 + an allocated chunk, which may be more than you requested (although
1279 + often not) due to alignment and minimum size constraints.
1280 + You can use this many bytes without worrying about
1281 + overwriting other allocated objects. This is not a particularly great
1282 + programming practice. malloc_usable_size can be more useful in
1283 + debugging and assertions, for example:
1285 + p = malloc(n);
1286 + assert(malloc_usable_size(p) >= 256);
1289 +#if __STD_C
1290 +size_t public_mUSABLe(Void_t*);
1291 +#else
1292 +size_t public_mUSABLe();
1293 +#endif
1296 + malloc_stats();
1297 + Prints on stderr the amount of space obtained from the system (both
1298 + via sbrk and mmap), the maximum amount (which may be more than
1299 + current if malloc_trim and/or munmap got called), and the current
1300 + number of bytes allocated via malloc (or realloc, etc) but not yet
1301 + freed. Note that this is the number of bytes allocated, not the
1302 + number requested. It will be larger than the number requested
1303 + because of alignment and bookkeeping overhead. Because it includes
1304 + alignment wastage as being in use, this figure may be greater than
1305 + zero even when no user-level chunks are allocated.
1307 + The reported current and maximum system memory can be inaccurate if
1308 + a program makes other calls to system memory allocation functions
1309 + (normally sbrk) outside of malloc.
1311 + malloc_stats prints only the most commonly interesting statistics.
1312 + More information can be obtained by calling mallinfo.
1315 +#if __STD_C
1316 +void public_mSTATs();
1317 +#else
1318 +void public_mSTATs();
1319 +#endif
1321 +/* mallopt tuning options */
1324 + M_MXFAST is the maximum request size used for "fastbins", special bins
1325 + that hold returned chunks without consolidating their spaces. This
1326 + enables future requests for chunks of the same size to be handled
1327 + very quickly, but can increase fragmentation, and thus increase the
1328 + overall memory footprint of a program.
1330 + This malloc manages fastbins very conservatively yet still
1331 + efficiently, so fragmentation is rarely a problem for values less
1332 + than or equal to the default. The maximum supported value of MXFAST
1333 + is 80. You wouldn't want it any higher than this anyway. Fastbins
1334 + are designed especially for use with many small structs, objects or
1335 + strings -- the default handles structs/objects/arrays with sizes up
1336 + to 8 4byte fields, or small strings representing words, tokens,
1337 + etc. Using fastbins for larger objects normally worsens
1338 + fragmentation without improving speed.
1340 + M_MXFAST is set in REQUEST size units. It is internally used in
1341 + chunksize units, which adds padding and alignment. You can reduce
1342 + M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
1343 + algorithm to be a closer approximation of fifo-best-fit in all cases,
1344 + not just for larger requests, but will generally cause it to be
1345 + slower.
1349 +/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
1350 +#ifndef M_MXFAST
1351 +#define M_MXFAST 1
1352 +#endif
1354 +#ifndef DEFAULT_MXFAST
1355 +#define DEFAULT_MXFAST 64
1356 +#endif
1360 + M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
1361 + to keep before releasing via malloc_trim in free().
1363 + Automatic trimming is mainly useful in long-lived programs.
1364 + Because trimming via sbrk can be slow on some systems, and can
1365 + sometimes be wasteful (in cases where programs immediately
1366 + afterward allocate more large chunks) the value should be high
1367 + enough so that your overall system performance would improve by
1368 + releasing this much memory.
1370 + The trim threshold and the mmap control parameters (see below)
1371 + can be traded off with one another. Trimming and mmapping are
1372 + two different ways of releasing unused memory back to the
1373 + system. Between these two, it is often possible to keep
1374 + system-level demands of a long-lived program down to a bare
1375 + minimum. For example, in one test suite of sessions measuring
1376 + the XF86 X server on Linux, using a trim threshold of 128K and a
1377 + mmap threshold of 192K led to near-minimal long term resource
1378 + consumption.
1380 + If you are using this malloc in a long-lived program, it should
1381 + pay to experiment with these values. As a rough guide, you
1382 + might set to a value close to the average size of a process
1383 + (program) running on your system. Releasing this much memory
1384 + would allow such a process to run in memory. Generally, it's
1385 + worth it to tune for trimming rather tham memory mapping when a
1386 + program undergoes phases where several large chunks are
1387 + allocated and released in ways that can reuse each other's
1388 + storage, perhaps mixed with phases where there are no such
1389 + chunks at all. And in well-behaved long-lived programs,
1390 + controlling release of large blocks via trimming versus mapping
1391 + is usually faster.
1393 + However, in most programs, these parameters serve mainly as
1394 + protection against the system-level effects of carrying around
1395 + massive amounts of unneeded memory. Since frequent calls to
1396 + sbrk, mmap, and munmap otherwise degrade performance, the default
1397 + parameters are set to relatively high values that serve only as
1398 + safeguards.
1400 + The trim value It must be greater than page size to have any useful
1401 + effect. To disable trimming completely, you can set to
1402 + (unsigned long)(-1)
1404 + Trim settings interact with fastbin (MXFAST) settings: Unless
1405 + TRIM_FASTBINS is defined, automatic trimming never takes place upon
1406 + freeing a chunk with size less than or equal to MXFAST. Trimming is
1407 + instead delayed until subsequent freeing of larger chunks. However,
1408 + you can still force an attempted trim by calling malloc_trim.
1410 + Also, trimming is not generally possible in cases where
1411 + the main arena is obtained via mmap.
1413 + Note that the trick some people use of mallocing a huge space and
1414 + then freeing it at program startup, in an attempt to reserve system
1415 + memory, doesn't have the intended effect under automatic trimming,
1416 + since that memory will immediately be returned to the system.
1419 +#define M_TRIM_THRESHOLD -1
1421 +#ifndef DEFAULT_TRIM_THRESHOLD
1422 +#define DEFAULT_TRIM_THRESHOLD (128 * 1024)
1423 +#endif
1426 + M_TOP_PAD is the amount of extra `padding' space to allocate or
1427 + retain whenever sbrk is called. It is used in two ways internally:
1429 + * When sbrk is called to extend the top of the arena to satisfy
1430 + a new malloc request, this much padding is added to the sbrk
1431 + request.
1433 + * When malloc_trim is called automatically from free(),
1434 + it is used as the `pad' argument.
1436 + In both cases, the actual amount of padding is rounded
1437 + so that the end of the arena is always a system page boundary.
1439 + The main reason for using padding is to avoid calling sbrk so
1440 + often. Having even a small pad greatly reduces the likelihood
1441 + that nearly every malloc request during program start-up (or
1442 + after trimming) will invoke sbrk, which needlessly wastes
1443 + time.
1445 + Automatic rounding-up to page-size units is normally sufficient
1446 + to avoid measurable overhead, so the default is 0. However, in
1447 + systems where sbrk is relatively slow, it can pay to increase
1448 + this value, at the expense of carrying around more memory than
1449 + the program needs.
1452 +#define M_TOP_PAD -2
1454 +#ifndef DEFAULT_TOP_PAD
1455 +#define DEFAULT_TOP_PAD (0)
1456 +#endif
1459 + M_MMAP_THRESHOLD is the request size threshold for using mmap()
1460 + to service a request. Requests of at least this size that cannot
1461 + be allocated using already-existing space will be serviced via mmap.
1462 + (If enough normal freed space already exists it is used instead.)
1464 + Using mmap segregates relatively large chunks of memory so that
1465 + they can be individually obtained and released from the host
1466 + system. A request serviced through mmap is never reused by any
1467 + other request (at least not directly; the system may just so
1468 + happen to remap successive requests to the same locations).
1470 + Segregating space in this way has the benefits that:
1472 + 1. Mmapped space can ALWAYS be individually released back
1473 + to the system, which helps keep the system level memory
1474 + demands of a long-lived program low.
1475 + 2. Mapped memory can never become `locked' between
1476 + other chunks, as can happen with normally allocated chunks, which
1477 + means that even trimming via malloc_trim would not release them.
1478 + 3. On some systems with "holes" in address spaces, mmap can obtain
1479 + memory that sbrk cannot.
1481 + However, it has the disadvantages that:
1483 + 1. The space cannot be reclaimed, consolidated, and then
1484 + used to service later requests, as happens with normal chunks.
1485 + 2. It can lead to more wastage because of mmap page alignment
1486 + requirements
1487 + 3. It causes malloc performance to be more dependent on host
1488 + system memory management support routines which may vary in
1489 + implementation quality and may impose arbitrary
1490 + limitations. Generally, servicing a request via normal
1491 + malloc steps is faster than going through a system's mmap.
1493 + The advantages of mmap nearly always outweigh disadvantages for
1494 + "large" chunks, but the value of "large" varies across systems. The
1495 + default is an empirically derived value that works well in most
1496 + systems.
1499 +#define M_MMAP_THRESHOLD -3
1501 +#ifndef DEFAULT_MMAP_THRESHOLD
1502 +#define DEFAULT_MMAP_THRESHOLD (128 * 1024)
1503 +#endif
1506 + M_MMAP_MAX is the maximum number of requests to simultaneously
1507 + service using mmap. This parameter exists because
1508 +. Some systems have a limited number of internal tables for
1509 + use by mmap, and using more than a few of them may degrade
1510 + performance.
1512 + The default is set to a value that serves only as a safeguard.
1513 + Setting to 0 disables use of mmap for servicing large requests. If
1514 + HAVE_MMAP is not set, the default value is 0, and attempts to set it
1515 + to non-zero values in mallopt will fail.
1518 +#define M_MMAP_MAX -4
1520 +#ifndef DEFAULT_MMAP_MAX
1521 +#if HAVE_MMAP
1522 +#define DEFAULT_MMAP_MAX (65536)
1523 +#else
1524 +#define DEFAULT_MMAP_MAX (0)
1525 +#endif
1526 +#endif
1528 +#ifdef __cplusplus
1529 +}; /* end of extern "C" */
1530 +#endif
1533 + ========================================================================
1534 + To make a fully customizable malloc.h header file, cut everything
1535 + above this line, put into file malloc.h, edit to suit, and #include it
1536 + on the next line, as well as in programs that use this malloc.
1537 + ========================================================================
1540 +/* #include "malloc.h" */
1542 +/* --------------------- public wrappers ---------------------- */
1544 +#ifdef USE_PUBLIC_MALLOC_WRAPPERS
1546 +/* Declare all routines as internal */
1547 +#if __STD_C
1548 +static Void_t* mALLOc(size_t);
1549 +static void fREe(Void_t*);
1550 +static Void_t* rEALLOc(Void_t*, size_t);
1551 +static Void_t* mEMALIGn(size_t, size_t);
1552 +static Void_t* vALLOc(size_t);
1553 +static Void_t* pVALLOc(size_t);
1554 +static Void_t* cALLOc(size_t, size_t);
1555 +static Void_t** iCALLOc(size_t, size_t, Void_t**);
1556 +static Void_t** iCOMALLOc(size_t, size_t*, Void_t**);
1557 +static void cFREe(Void_t*);
1558 +static int mTRIm(size_t);
1559 +static size_t mUSABLe(Void_t*);
1560 +static void mSTATs();
1561 +static int mALLOPt(int, int);
1562 +static struct mallinfo mALLINFo(void);
1563 +#else
1564 +static Void_t* mALLOc();
1565 +static void fREe();
1566 +static Void_t* rEALLOc();
1567 +static Void_t* mEMALIGn();
1568 +static Void_t* vALLOc();
1569 +static Void_t* pVALLOc();
1570 +static Void_t* cALLOc();
1571 +static Void_t** iCALLOc();
1572 +static Void_t** iCOMALLOc();
1573 +static void cFREe();
1574 +static int mTRIm();
1575 +static size_t mUSABLe();
1576 +static void mSTATs();
1577 +static int mALLOPt();
1578 +static struct mallinfo mALLINFo();
1579 +#endif
1582 + MALLOC_PREACTION and MALLOC_POSTACTION should be
1583 + defined to return 0 on success, and nonzero on failure.
1584 + The return value of MALLOC_POSTACTION is currently ignored
1585 + in wrapper functions since there is no reasonable default
1586 + action to take on failure.
1590 +#ifdef USE_MALLOC_LOCK
1592 +#ifdef WIN32
1594 +static int mALLOC_MUTEx;
1595 +#define MALLOC_PREACTION slwait(&mALLOC_MUTEx)
1596 +#define MALLOC_POSTACTION slrelease(&mALLOC_MUTEx)
1598 +#else
1600 +#if 0
1601 +#include <pthread.h>
1603 +static pthread_mutex_t mALLOC_MUTEx = PTHREAD_MUTEX_INITIALIZER;
1605 +#define MALLOC_PREACTION pthread_mutex_lock(&mALLOC_MUTEx)
1606 +#define MALLOC_POSTACTION pthread_mutex_unlock(&mALLOC_MUTEx)
1608 +#else
1610 +#ifdef KDE_MALLOC_X86
1611 +#include "x86.h"
1612 +#elif defined(KDE_MALLOC_AVR32)
1614 +#include <sched.h>
1615 +#include <time.h>
1617 +static __inline__ int q_atomic_swp(volatile unsigned int *ptr,
1618 + unsigned int newval)
1620 + register int ret;
1621 + asm volatile("xchg %0,%1,%2"
1622 + : "=&r"(ret)
1623 + : "r"(ptr), "r"(newval)
1624 + : "memory", "cc");
1625 + return ret;
1628 +typedef struct {
1629 + volatile unsigned int lock;
1630 + int pad0_;
1631 +} mutex_t;
1633 +#define MUTEX_INITIALIZER { 0, 0 }
1635 +static __inline__ int lock(mutex_t *m) {
1636 + int cnt = 0;
1637 + struct timespec tm;
1639 + for(;;) {
1640 + if (q_atomic_swp(&m->lock, 1) == 0)
1641 + return 0;
1642 +#ifdef _POSIX_PRIORITY_SCHEDULING
1643 + if(cnt < 50) {
1644 + sched_yield();
1645 + cnt++;
1646 + } else
1647 +#endif
1649 + tm.tv_sec = 0;
1650 + tm.tv_nsec = 2000001;
1651 + nanosleep(&tm, NULL);
1652 + cnt = 0;
1657 +static __inline__ int unlock(mutex_t *m) {
1658 + m->lock = 0;
1659 + return 0;
1662 +#else
1663 +#error Unknown spinlock implementation
1664 +#endif
1666 +static mutex_t spinlock = MUTEX_INITIALIZER;
1668 +#define MALLOC_PREACTION lock( &spinlock )
1669 +#define MALLOC_POSTACTION unlock( &spinlock )
1671 +#endif
1673 +#endif /* USE_MALLOC_LOCK */
1675 +#else
1677 +/* Substitute anything you like for these */
1679 +#define MALLOC_PREACTION (0)
1680 +#define MALLOC_POSTACTION (0)
1682 +#endif
1684 +#if 0
1685 +Void_t* public_mALLOc(size_t bytes) {
1686 + Void_t* m;
1687 + if (MALLOC_PREACTION != 0) {
1688 + return 0;
1690 + m = mALLOc(bytes);
1691 + if (MALLOC_POSTACTION != 0) {
1693 + return m;
1696 +void public_fREe(Void_t* m) {
1697 + if (MALLOC_PREACTION != 0) {
1698 + return;
1700 + fREe(m);
1701 + if (MALLOC_POSTACTION != 0) {
1705 +Void_t* public_rEALLOc(Void_t* m, size_t bytes) {
1706 + if (MALLOC_PREACTION != 0) {
1707 + return 0;
1709 + m = rEALLOc(m, bytes);
1710 + if (MALLOC_POSTACTION != 0) {
1712 + return m;
1715 +Void_t* public_mEMALIGn(size_t alignment, size_t bytes) {
1716 + Void_t* m;
1717 + if (MALLOC_PREACTION != 0) {
1718 + return 0;
1720 + m = mEMALIGn(alignment, bytes);
1721 + if (MALLOC_POSTACTION != 0) {
1723 + return m;
1726 +Void_t* public_vALLOc(size_t bytes) {
1727 + Void_t* m;
1728 + if (MALLOC_PREACTION != 0) {
1729 + return 0;
1731 + m = vALLOc(bytes);
1732 + if (MALLOC_POSTACTION != 0) {
1734 + return m;
1737 +Void_t* public_pVALLOc(size_t bytes) {
1738 + Void_t* m;
1739 + if (MALLOC_PREACTION != 0) {
1740 + return 0;
1742 + m = pVALLOc(bytes);
1743 + if (MALLOC_POSTACTION != 0) {
1745 + return m;
1748 +Void_t* public_cALLOc(size_t n, size_t elem_size) {
1749 + Void_t* m;
1750 + if (MALLOC_PREACTION != 0) {
1751 + return 0;
1753 + m = cALLOc(n, elem_size);
1754 + if (MALLOC_POSTACTION != 0) {
1756 + return m;
1760 +Void_t** public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks) {
1761 + Void_t** m;
1762 + if (MALLOC_PREACTION != 0) {
1763 + return 0;
1765 + m = iCALLOc(n, elem_size, chunks);
1766 + if (MALLOC_POSTACTION != 0) {
1768 + return m;
1771 +Void_t** public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks) {
1772 + Void_t** m;
1773 + if (MALLOC_PREACTION != 0) {
1774 + return 0;
1776 + m = iCOMALLOc(n, sizes, chunks);
1777 + if (MALLOC_POSTACTION != 0) {
1779 + return m;
1782 +void public_cFREe(Void_t* m) {
1783 + if (MALLOC_PREACTION != 0) {
1784 + return;
1786 + cFREe(m);
1787 + if (MALLOC_POSTACTION != 0) {
1791 +int public_mTRIm(size_t s) {
1792 + int result;
1793 + if (MALLOC_PREACTION != 0) {
1794 + return 0;
1796 + result = mTRIm(s);
1797 + if (MALLOC_POSTACTION != 0) {
1799 + return result;
1802 +size_t public_mUSABLe(Void_t* m) {
1803 + size_t result;
1804 + if (MALLOC_PREACTION != 0) {
1805 + return 0;
1807 + result = mUSABLe(m);
1808 + if (MALLOC_POSTACTION != 0) {
1810 + return result;
1813 +void public_mSTATs() {
1814 + if (MALLOC_PREACTION != 0) {
1815 + return;
1817 + mSTATs();
1818 + if (MALLOC_POSTACTION != 0) {
1822 +struct mallinfo public_mALLINFo() {
1823 + struct mallinfo m;
1824 + if (MALLOC_PREACTION != 0) {
1825 + struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1826 + return nm;
1828 + m = mALLINFo();
1829 + if (MALLOC_POSTACTION != 0) {
1831 + return m;
1834 +int public_mALLOPt(int p, int v) {
1835 + int result;
1836 + if (MALLOC_PREACTION != 0) {
1837 + return 0;
1839 + result = mALLOPt(p, v);
1840 + if (MALLOC_POSTACTION != 0) {
1842 + return result;
1844 +#endif
1846 +#endif
1850 +/* ------------- Optional versions of memcopy ---------------- */
1853 +#if USE_MEMCPY
1856 + Note: memcpy is ONLY invoked with non-overlapping regions,
1857 + so the (usually slower) memmove is not needed.
1860 +#define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
1861 +#define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
1863 +#else /* !USE_MEMCPY */
1865 +/* Use Duff's device for good zeroing/copying performance. */
1867 +#define MALLOC_ZERO(charp, nbytes) \
1868 +do { \
1869 + INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
1870 + unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1871 + long mcn; \
1872 + if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1873 + switch (mctmp) { \
1874 + case 0: for(;;) { *mzp++ = 0; \
1875 + case 7: *mzp++ = 0; \
1876 + case 6: *mzp++ = 0; \
1877 + case 5: *mzp++ = 0; \
1878 + case 4: *mzp++ = 0; \
1879 + case 3: *mzp++ = 0; \
1880 + case 2: *mzp++ = 0; \
1881 + case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
1882 + } \
1883 +} while(0)
1885 +#define MALLOC_COPY(dest,src,nbytes) \
1886 +do { \
1887 + INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
1888 + INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
1889 + unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
1890 + long mcn; \
1891 + if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
1892 + switch (mctmp) { \
1893 + case 0: for(;;) { *mcdst++ = *mcsrc++; \
1894 + case 7: *mcdst++ = *mcsrc++; \
1895 + case 6: *mcdst++ = *mcsrc++; \
1896 + case 5: *mcdst++ = *mcsrc++; \
1897 + case 4: *mcdst++ = *mcsrc++; \
1898 + case 3: *mcdst++ = *mcsrc++; \
1899 + case 2: *mcdst++ = *mcsrc++; \
1900 + case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
1901 + } \
1902 +} while(0)
1904 +#endif
1906 +/* ------------------ MMAP support ------------------ */
1909 +#if HAVE_MMAP
1911 +#include <fcntl.h>
1912 +#ifndef LACKS_SYS_MMAN_H
1913 +#include <sys/mman.h>
1914 +#endif
1916 +#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1917 +#define MAP_ANONYMOUS MAP_ANON
1918 +#endif
1921 + Nearly all versions of mmap support MAP_ANONYMOUS,
1922 + so the following is unlikely to be needed, but is
1923 + supplied just in case.
1926 +#ifndef MAP_ANONYMOUS
1928 +static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
1930 +#define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
1931 + (dev_zero_fd = open("/dev/zero", O_RDWR), \
1932 + mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
1933 + mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
1935 +#else
1937 +#define MMAP(addr, size, prot, flags) \
1938 + (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
1940 +#endif
1943 +#endif /* HAVE_MMAP */
1947 + ----------------------- Chunk representations -----------------------
1952 + This struct declaration is misleading (but accurate and necessary).
1953 + It declares a "view" into memory allowing access to necessary
1954 + fields at known offsets from a given base. See explanation below.
1957 +struct malloc_chunk {
1959 + INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
1960 + INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
1962 + struct malloc_chunk* fd; /* double links -- used only if free. */
1963 + struct malloc_chunk* bk;
1967 +typedef struct malloc_chunk* mchunkptr;
1970 + malloc_chunk details:
1972 + (The following includes lightly edited explanations by Colin Plumb.)
1974 + Chunks of memory are maintained using a `boundary tag' method as
1975 + described in e.g., Knuth or Standish. (See the paper by Paul
1976 + Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1977 + survey of such techniques.) Sizes of free chunks are stored both
1978 + in the front of each chunk and at the end. This makes
1979 + consolidating fragmented chunks into bigger chunks very fast. The
1980 + size fields also hold bits representing whether chunks are free or
1981 + in use.
1983 + An allocated chunk looks like this:
1986 + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1987 + | Size of previous chunk, if allocated | |
1988 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1989 + | Size of chunk, in bytes |P|
1990 + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1991 + | User data starts here... .
1992 + . .
1993 + . (malloc_usable_space() bytes) .
1994 + . |
1995 +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1996 + | Size of chunk |
1997 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2000 + Where "chunk" is the front of the chunk for the purpose of most of
2001 + the malloc code, but "mem" is the pointer that is returned to the
2002 + user. "Nextchunk" is the beginning of the next contiguous chunk.
2004 + Chunks always begin on even word boundaries, so the mem portion
2005 + (which is returned to the user) is also on an even word boundary, and
2006 + thus at least double-word aligned.
2008 + Free chunks are stored in circular doubly-linked lists, and look like this:
2010 + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2011 + | Size of previous chunk |
2012 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2013 + `head:' | Size of chunk, in bytes |P|
2014 + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2015 + | Forward pointer to next chunk in list |
2016 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2017 + | Back pointer to previous chunk in list |
2018 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2019 + | Unused space (may be 0 bytes long) .
2020 + . .
2021 + . |
2022 +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2023 + `foot:' | Size of chunk, in bytes |
2024 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2026 + The P (PREV_INUSE) bit, stored in the unused low-order bit of the
2027 + chunk size (which is always a multiple of two words), is an in-use
2028 + bit for the *previous* chunk. If that bit is *clear*, then the
2029 + word before the current chunk size contains the previous chunk
2030 + size, and can be used to find the front of the previous chunk.
2031 + The very first chunk allocated always has this bit set,
2032 + preventing access to non-existent (or non-owned) memory. If
2033 + prev_inuse is set for any given chunk, then you CANNOT determine
2034 + the size of the previous chunk, and might even get a memory
2035 + addressing fault when trying to do so.
2037 + Note that the `foot' of the current chunk is actually represented
2038 + as the prev_size of the NEXT chunk. This makes it easier to
2039 + deal with alignments etc but can be very confusing when trying
2040 + to extend or adapt this code.
2042 + The two exceptions to all this are
2044 + 1. The special chunk `top' doesn't bother using the
2045 + trailing size field since there is no next contiguous chunk
2046 + that would have to index off it. After initialization, `top'
2047 + is forced to always exist. If it would become less than
2048 + MINSIZE bytes long, it is replenished.
2050 + 2. Chunks allocated via mmap, which have the second-lowest-order
2051 + bit (IS_MMAPPED) set in their size fields. Because they are
2052 + allocated one-by-one, each must contain its own trailing size field.
2057 + ---------- Size and alignment checks and conversions ----------
2060 +/* conversion from malloc headers to user pointers, and back */
2062 +#define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
2063 +#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
2065 +/* The smallest possible chunk */
2066 +#define MIN_CHUNK_SIZE (sizeof(struct malloc_chunk))
2068 +/* The smallest size we can malloc is an aligned minimal chunk */
2070 +#define MINSIZE \
2071 + (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
2073 +/* Check if m has acceptable alignment */
2075 +#define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
2079 + Check if a request is so large that it would wrap around zero when
2080 + padded and aligned. To simplify some other code, the bound is made
2081 + low enough so that adding MINSIZE will also not wrap around zero.
2084 +#define REQUEST_OUT_OF_RANGE(req) \
2085 + ((unsigned long)(req) >= \
2086 + (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))
2088 +/* pad request bytes into a usable size -- internal version */
2090 +#define request2size(req) \
2091 + (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
2092 + MINSIZE : \
2093 + ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
2095 +/* Same, except also perform argument check */
2097 +#define checked_request2size(req, sz) \
2098 + if (REQUEST_OUT_OF_RANGE(req)) { \
2099 + MALLOC_FAILURE_ACTION; \
2100 + return 0; \
2101 + } \
2102 + (sz) = request2size(req);
2105 + --------------- Physical chunk operations ---------------
2109 +/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
2110 +#define PREV_INUSE 0x1
2112 +/* extract inuse bit of previous chunk */
2113 +#define prev_inuse(p) ((p)->size & PREV_INUSE)
2116 +/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
2117 +#define IS_MMAPPED 0x2
2119 +/* check for mmap()'ed chunk */
2120 +#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
2123 + Bits to mask off when extracting size
2125 + Note: IS_MMAPPED is intentionally not masked off from size field in
2126 + macros for which mmapped chunks should never be seen. This should
2127 + cause helpful core dumps to occur if it is tried by accident by
2128 + people extending or adapting this malloc.
2130 +#define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
2132 +/* Get size, ignoring use bits */
2133 +#define chunksize(p) ((p)->size & ~(SIZE_BITS))
2136 +/* Ptr to next physical malloc_chunk. */
2137 +#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
2139 +/* Ptr to previous physical malloc_chunk */
2140 +#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
2142 +/* Treat space at ptr + offset as a chunk */
2143 +#define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
2145 +/* extract p's inuse bit */
2146 +#define inuse(p)\
2147 +((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
2149 +/* set/clear chunk as being inuse without otherwise disturbing */
2150 +#define set_inuse(p)\
2151 +((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
2153 +#define clear_inuse(p)\
2154 +((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
2157 +/* check/set/clear inuse bits in known places */
2158 +#define inuse_bit_at_offset(p, s)\
2159 + (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
2161 +#define set_inuse_bit_at_offset(p, s)\
2162 + (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
2164 +#define clear_inuse_bit_at_offset(p, s)\
2165 + (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
2168 +/* Set size at head, without disturbing its use bit */
2169 +#define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
2171 +/* Set size/use field */
2172 +#define set_head(p, s) ((p)->size = (s))
2174 +/* Set size at footer (only when chunk is not in use) */
2175 +#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
2179 + -------------------- Internal data structures --------------------
2181 + All internal state is held in an instance of malloc_state defined
2182 + below. There are no other static variables, except in two optional
2183 + cases:
2184 + * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
2185 + * If HAVE_MMAP is true, but mmap doesn't support
2186 + MAP_ANONYMOUS, a dummy file descriptor for mmap.
2188 + Beware of lots of tricks that minimize the total bookkeeping space
2189 + requirements. The result is a little over 1K bytes (for 4byte
2190 + pointers and size_t.)
2194 + Bins
2196 + An array of bin headers for free chunks. Each bin is doubly
2197 + linked. The bins are approximately proportionally (log) spaced.
2198 + There are a lot of these bins (128). This may look excessive, but
2199 + works very well in practice. Most bins hold sizes that are
2200 + unusual as malloc request sizes, but are more usual for fragments
2201 + and consolidated sets of chunks, which is what these bins hold, so
2202 + they can be found quickly. All procedures maintain the invariant
2203 + that no consolidated chunk physically borders another one, so each
2204 + chunk in a list is known to be preceded and followed by either
2205 + inuse chunks or the ends of memory.
2207 + Chunks in bins are kept in size order, with ties going to the
2208 + approximately least recently used chunk. Ordering isn't needed
2209 + for the small bins, which all contain the same-sized chunks, but
2210 + facilitates best-fit allocation for larger chunks. These lists
2211 + are just sequential. Keeping them in order almost never requires
2212 + enough traversal to warrant using fancier ordered data
2213 + structures.
2215 + Chunks of the same size are linked with the most
2216 + recently freed at the front, and allocations are taken from the
2217 + back. This results in LRU (FIFO) allocation order, which tends
2218 + to give each chunk an equal opportunity to be consolidated with
2219 + adjacent freed chunks, resulting in larger free chunks and less
2220 + fragmentation.
2222 + To simplify use in double-linked lists, each bin header acts
2223 + as a malloc_chunk. This avoids special-casing for headers.
2224 + But to conserve space and improve locality, we allocate
2225 + only the fd/bk pointers of bins, and then use repositioning tricks
2226 + to treat these as the fields of a malloc_chunk*.
2229 +typedef struct malloc_chunk* mbinptr;
2231 +/* addressing -- note that bin_at(0) does not exist */
2232 +#define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - (SIZE_SZ<<1)))
2234 +/* analog of ++bin */
2235 +#define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
2237 +/* Reminders about list directionality within bins */
2238 +#define first(b) ((b)->fd)
2239 +#define last(b) ((b)->bk)
2241 +/* Take a chunk off a bin list */
2242 +#define unlink(P, BK, FD) { \
2243 + FD = P->fd; \
2244 + BK = P->bk; \
2245 + FD->bk = BK; \
2246 + BK->fd = FD; \
2250 + Indexing
2252 + Bins for sizes < 512 bytes contain chunks of all the same size, spaced
2253 + 8 bytes apart. Larger bins are approximately logarithmically spaced:
2255 + 64 bins of size 8
2256 + 32 bins of size 64
2257 + 16 bins of size 512
2258 + 8 bins of size 4096
2259 + 4 bins of size 32768
2260 + 2 bins of size 262144
2261 + 1 bin of size what's left
2263 + There is actually a little bit of slop in the numbers in bin_index
2264 + for the sake of speed. This makes no difference elsewhere.
2266 + The bins top out around 1MB because we expect to service large
2267 + requests via mmap.
2270 +#define NBINS 128
2271 +#define NSMALLBINS 64
2272 +#define SMALLBIN_WIDTH 8
2273 +#define MIN_LARGE_SIZE 512
2275 +#define in_smallbin_range(sz) \
2276 + ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
2278 +#define smallbin_index(sz) (((unsigned)(sz)) >> 3)
2280 +#define largebin_index(sz) \
2281 +(((((unsigned long)(sz)) >> 6) <= 32)? 56 + (((unsigned long)(sz)) >> 6): \
2282 + ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
2283 + ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
2284 + ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
2285 + ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
2286 + 126)
2288 +#define bin_index(sz) \
2289 + ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
2293 + Unsorted chunks
2295 + All remainders from chunk splits, as well as all returned chunks,
2296 + are first placed in the "unsorted" bin. They are then placed
2297 + in regular bins after malloc gives them ONE chance to be used before
2298 + binning. So, basically, the unsorted_chunks list acts as a queue,
2299 + with chunks being placed on it in free (and malloc_consolidate),
2300 + and taken off (to be either used or placed in bins) in malloc.
2303 +/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
2304 +#define unsorted_chunks(M) (bin_at(M, 1))
2307 + Top
2309 + The top-most available chunk (i.e., the one bordering the end of
2310 + available memory) is treated specially. It is never included in
2311 + any bin, is used only if no other chunk is available, and is
2312 + released back to the system if it is very large (see
2313 + M_TRIM_THRESHOLD). Because top initially
2314 + points to its own bin with initial zero size, thus forcing
2315 + extension on the first malloc request, we avoid having any special
2316 + code in malloc to check whether it even exists yet. But we still
2317 + need to do so when getting memory from system, so we make
2318 + initial_top treat the bin as a legal but unusable chunk during the
2319 + interval between initialization and the first call to
2320 + sYSMALLOc. (This is somewhat delicate, since it relies on
2321 + the 2 preceding words to be zero during this interval as well.)
2324 +/* Conveniently, the unsorted bin can be used as dummy top on first call */
2325 +#define initial_top(M) (unsorted_chunks(M))
2328 + Binmap
2330 + To help compensate for the large number of bins, a one-level index
2331 + structure is used for bin-by-bin searching. `binmap' is a
2332 + bitvector recording whether bins are definitely empty so they can
2333 + be skipped over during during traversals. The bits are NOT always
2334 + cleared as soon as bins are empty, but instead only
2335 + when they are noticed to be empty during traversal in malloc.
2338 +/* Conservatively use 32 bits per map word, even if on 64bit system */
2339 +#define BINMAPSHIFT 5
2340 +#define BITSPERMAP (1U << BINMAPSHIFT)
2341 +#define BINMAPSIZE (NBINS / BITSPERMAP)
2343 +#define idx2block(i) ((i) >> BINMAPSHIFT)
2344 +#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
2346 +#define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
2347 +#define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
2348 +#define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
2351 + Fastbins
2353 + An array of lists holding recently freed small chunks. Fastbins
2354 + are not doubly linked. It is faster to single-link them, and
2355 + since chunks are never removed from the middles of these lists,
2356 + double linking is not necessary. Also, unlike regular bins, they
2357 + are not even processed in FIFO order (they use faster LIFO) since
2358 + ordering doesn't much matter in the transient contexts in which
2359 + fastbins are normally used.
2361 + Chunks in fastbins keep their inuse bit set, so they cannot
2362 + be consolidated with other free chunks. malloc_consolidate
2363 + releases all chunks in fastbins and consolidates them with
2364 + other free chunks.
2367 +typedef struct malloc_chunk* mfastbinptr;
2369 +/* offset 2 to use otherwise unindexable first 2 bins */
2370 +#define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
2372 +/* The maximum fastbin request size we support */
2373 +#define MAX_FAST_SIZE 80
2375 +#define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
2378 + FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
2379 + that triggers automatic consolidation of possibly-surrounding
2380 + fastbin chunks. This is a heuristic, so the exact value should not
2381 + matter too much. It is defined at half the default trim threshold as a
2382 + compromise heuristic to only attempt consolidation if it is likely
2383 + to lead to trimming. However, it is not dynamically tunable, since
2384 + consolidation reduces fragmentation surrounding loarge chunks even
2385 + if trimming is not used.
2388 +#define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
2391 + Since the lowest 2 bits in max_fast don't matter in size comparisons,
2392 + they are used as flags.
2396 + FASTCHUNKS_BIT held in max_fast indicates that there are probably
2397 + some fastbin chunks. It is set true on entering a chunk into any
2398 + fastbin, and cleared only in malloc_consolidate.
2400 + The truth value is inverted so that have_fastchunks will be true
2401 + upon startup (since statics are zero-filled), simplifying
2402 + initialization checks.
2405 +#define FASTCHUNKS_BIT (1U)
2407 +#define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT) == 0)
2408 +#define clear_fastchunks(M) ((M)->max_fast |= FASTCHUNKS_BIT)
2409 +#define set_fastchunks(M) ((M)->max_fast &= ~FASTCHUNKS_BIT)
2412 + NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
2413 + regions. Otherwise, contiguity is exploited in merging together,
2414 + when possible, results from consecutive MORECORE calls.
2416 + The initial value comes from MORECORE_CONTIGUOUS, but is
2417 + changed dynamically if mmap is ever used as an sbrk substitute.
2420 +#define NONCONTIGUOUS_BIT (2U)
2422 +#define contiguous(M) (((M)->max_fast & NONCONTIGUOUS_BIT) == 0)
2423 +#define noncontiguous(M) (((M)->max_fast & NONCONTIGUOUS_BIT) != 0)
2424 +#define set_noncontiguous(M) ((M)->max_fast |= NONCONTIGUOUS_BIT)
2425 +#define set_contiguous(M) ((M)->max_fast &= ~NONCONTIGUOUS_BIT)
2428 + Set value of max_fast.
2429 + Use impossibly small value if 0.
2430 + Precondition: there are no existing fastbin chunks.
2431 + Setting the value clears fastchunk bit but preserves noncontiguous bit.
2434 +#define set_max_fast(M, s) \
2435 + (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
2436 + FASTCHUNKS_BIT | \
2437 + ((M)->max_fast & NONCONTIGUOUS_BIT)
2441 + ----------- Internal state representation and initialization -----------
2444 +struct malloc_state {
2446 + /* The maximum chunk size to be eligible for fastbin */
2447 + INTERNAL_SIZE_T max_fast; /* low 2 bits used as flags */
2449 + /* Fastbins */
2450 + mfastbinptr fastbins[NFASTBINS];
2452 + /* Base of the topmost chunk -- not otherwise kept in a bin */
2453 + mchunkptr top;
2455 + /* The remainder from the most recent split of a small request */
2456 + mchunkptr last_remainder;
2458 + /* Normal bins packed as described above */
2459 + mchunkptr bins[NBINS * 2];
2461 + /* Bitmap of bins */
2462 + unsigned int binmap[BINMAPSIZE];
2464 + /* Tunable parameters */
2465 + unsigned long trim_threshold;
2466 + INTERNAL_SIZE_T top_pad;
2467 + INTERNAL_SIZE_T mmap_threshold;
2469 + /* Memory map support */
2470 + int n_mmaps;
2471 + int n_mmaps_max;
2472 + int max_n_mmaps;
2474 + /* Cache malloc_getpagesize */
2475 + unsigned int pagesize;
2477 + /* Statistics */
2478 + INTERNAL_SIZE_T mmapped_mem;
2479 + INTERNAL_SIZE_T sbrked_mem;
2480 + INTERNAL_SIZE_T max_sbrked_mem;
2481 + INTERNAL_SIZE_T max_mmapped_mem;
2482 + INTERNAL_SIZE_T max_total_mem;
2485 +typedef struct malloc_state *mstate;
2488 + There is exactly one instance of this struct in this malloc.
2489 + If you are adapting this malloc in a way that does NOT use a static
2490 + malloc_state, you MUST explicitly zero-fill it before using. This
2491 + malloc relies on the property that malloc_state is initialized to
2492 + all zeroes (as is true of C statics).
2495 +static struct malloc_state av_; /* never directly referenced */
2498 + All uses of av_ are via get_malloc_state().
2499 + At most one "call" to get_malloc_state is made per invocation of
2500 + the public versions of malloc and free, but other routines
2501 + that in turn invoke malloc and/or free may call more then once.
2502 + Also, it is called in check* routines if DEBUG is set.
2505 +#define get_malloc_state() (&(av_))
2508 + Initialize a malloc_state struct.
2510 + This is called only from within malloc_consolidate, which needs
2511 + be called in the same contexts anyway. It is never called directly
2512 + outside of malloc_consolidate because some optimizing compilers try
2513 + to inline it at all call points, which turns out not to be an
2514 + optimization at all. (Inlining it in malloc_consolidate is fine though.)
2517 +#if __STD_C
2518 +static void malloc_init_state(mstate av)
2519 +#else
2520 +static void malloc_init_state(av) mstate av;
2521 +#endif
2523 + int i;
2524 + mbinptr bin;
2526 + /* Establish circular links for normal bins */
2527 + for (i = 1; i < NBINS; ++i) {
2528 + bin = bin_at(av,i);
2529 + bin->fd = bin->bk = bin;
2532 + av->top_pad = DEFAULT_TOP_PAD;
2533 + av->n_mmaps_max = DEFAULT_MMAP_MAX;
2534 + av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;
2535 + av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
2537 +#if !MORECORE_CONTIGUOUS
2538 + set_noncontiguous(av);
2539 +#endif
2541 + set_max_fast(av, DEFAULT_MXFAST);
2543 + av->top = initial_top(av);
2544 + av->pagesize = malloc_getpagesize;
2548 + Other internal utilities operating on mstates
2551 +#if __STD_C
2552 +static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate);
2553 +static int sYSTRIm(size_t, mstate);
2554 +static void malloc_consolidate(mstate);
2555 +static Void_t** iALLOc(size_t, size_t*, int, Void_t**);
2556 +#else
2557 +static Void_t* sYSMALLOc();
2558 +static int sYSTRIm();
2559 +static void malloc_consolidate();
2560 +static Void_t** iALLOc();
2561 +#endif
2564 + Debugging support
2566 + These routines make a number of assertions about the states
2567 + of data structures that should be true at all times. If any
2568 + are not true, it's very likely that a user program has somehow
2569 + trashed memory. (It's also possible that there is a coding error
2570 + in malloc. In which case, please report it!)
2573 +#ifndef DEBUG
2575 +#define check_chunk(P)
2576 +#define check_free_chunk(P)
2577 +#define check_inuse_chunk(P)
2578 +#define check_remalloced_chunk(P,N)
2579 +#define check_malloced_chunk(P,N)
2580 +#define check_malloc_state()
2582 +#else
2583 +#define check_chunk(P) do_check_chunk(P)
2584 +#define check_free_chunk(P) do_check_free_chunk(P)
2585 +#define check_inuse_chunk(P) do_check_inuse_chunk(P)
2586 +#define check_remalloced_chunk(P,N) do_check_remalloced_chunk(P,N)
2587 +#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
2588 +#define check_malloc_state() do_check_malloc_state()
2591 + Properties of all chunks
2594 +INLINE
2595 +#if __STD_C
2596 +static void do_check_chunk(mchunkptr p)
2597 +#else
2598 +static void do_check_chunk(p) mchunkptr p;
2599 +#endif
2601 + mstate av = get_malloc_state();
2602 + unsigned long sz = chunksize(p);
2603 + /* min and max possible addresses assuming contiguous allocation */
2604 + char* max_address = (char*)(av->top) + chunksize(av->top);
2605 + char* min_address = max_address - av->sbrked_mem;
2607 + if (!chunk_is_mmapped(p)) {
2609 + /* Has legal address ... */
2610 + if (p != av->top) {
2611 + if (contiguous(av)) {
2612 + assert(((char*)p) >= min_address);
2613 + assert(((char*)p + sz) <= ((char*)(av->top)));
2616 + else {
2617 + /* top size is always at least MINSIZE */
2618 + assert((unsigned long)(sz) >= MINSIZE);
2619 + /* top predecessor always marked inuse */
2620 + assert(prev_inuse(p));
2624 + else {
2625 +#if HAVE_MMAP
2626 + /* address is outside main heap */
2627 + if (contiguous(av) && av->top != initial_top(av)) {
2628 + assert(((char*)p) < min_address || ((char*)p) > max_address);
2630 + /* chunk is page-aligned */
2631 + assert(((p->prev_size + sz) & (av->pagesize-1)) == 0);
2632 + /* mem is aligned */
2633 + assert(aligned_OK(chunk2mem(p)));
2634 +#else
2635 + /* force an appropriate assert violation if debug set */
2636 + assert(!chunk_is_mmapped(p));
2637 +#endif
2642 + Properties of free chunks
2645 +INLINE
2646 +#if __STD_C
2647 +static void do_check_free_chunk(mchunkptr p)
2648 +#else
2649 +static void do_check_free_chunk(p) mchunkptr p;
2650 +#endif
2652 + mstate av = get_malloc_state();
2654 + INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
2655 + mchunkptr next = chunk_at_offset(p, sz);
2657 + do_check_chunk(p);
2659 + /* Chunk must claim to be free ... */
2660 + assert(!inuse(p));
2661 + assert (!chunk_is_mmapped(p));
2663 + /* Unless a special marker, must have OK fields */
2664 + if ((unsigned long)(sz) >= MINSIZE)
2666 + assert((sz & MALLOC_ALIGN_MASK) == 0);
2667 + assert(aligned_OK(chunk2mem(p)));
2668 + /* ... matching footer field */
2669 + assert(next->prev_size == sz);
2670 + /* ... and is fully consolidated */
2671 + assert(prev_inuse(p));
2672 + assert (next == av->top || inuse(next));
2674 + /* ... and has minimally sane links */
2675 + assert(p->fd->bk == p);
2676 + assert(p->bk->fd == p);
2678 + else /* markers are always of size SIZE_SZ */
2679 + assert(sz == SIZE_SZ);
2683 + Properties of inuse chunks
2686 +INLINE
2687 +#if __STD_C
2688 +static void do_check_inuse_chunk(mchunkptr p)
2689 +#else
2690 +static void do_check_inuse_chunk(p) mchunkptr p;
2691 +#endif
2693 + mstate av = get_malloc_state();
2694 + mchunkptr next;
2695 + do_check_chunk(p);
2697 + if (chunk_is_mmapped(p))
2698 + return; /* mmapped chunks have no next/prev */
2700 + /* Check whether it claims to be in use ... */
2701 + assert(inuse(p));
2703 + next = next_chunk(p);
2705 + /* ... and is surrounded by OK chunks.
2706 + Since more things can be checked with free chunks than inuse ones,
2707 + if an inuse chunk borders them and debug is on, it's worth doing them.
2708 + */
2709 + if (!prev_inuse(p)) {
2710 + /* Note that we cannot even look at prev unless it is not inuse */
2711 + mchunkptr prv = prev_chunk(p);
2712 + assert(next_chunk(prv) == p);
2713 + do_check_free_chunk(prv);
2716 + if (next == av->top) {
2717 + assert(prev_inuse(next));
2718 + assert(chunksize(next) >= MINSIZE);
2720 + else if (!inuse(next))
2721 + do_check_free_chunk(next);
2725 + Properties of chunks recycled from fastbins
2728 +INLINE
2729 +#if __STD_C
2730 +static void do_check_remalloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
2731 +#else
2732 +static void do_check_remalloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
2733 +#endif
2735 + INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
2737 + do_check_inuse_chunk(p);
2739 + /* Legal size ... */
2740 + assert((sz & MALLOC_ALIGN_MASK) == 0);
2741 + assert((unsigned long)(sz) >= MINSIZE);
2742 + /* ... and alignment */
2743 + assert(aligned_OK(chunk2mem(p)));
2744 + /* chunk is less than MINSIZE more than request */
2745 + assert((long)(sz) - (long)(s) >= 0);
2746 + assert((long)(sz) - (long)(s + MINSIZE) < 0);
2750 + Properties of nonrecycled chunks at the point they are malloced
2753 +INLINE
2754 +#if __STD_C
2755 +static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
2756 +#else
2757 +static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
2758 +#endif
2760 + /* same as recycled case ... */
2761 + do_check_remalloced_chunk(p, s);
2763 + /*
2764 + ... plus, must obey implementation invariant that prev_inuse is
2765 + always true of any allocated chunk; i.e., that each allocated
2766 + chunk borders either a previously allocated and still in-use
2767 + chunk, or the base of its memory arena. This is ensured
2768 + by making all allocations from the the `lowest' part of any found
2769 + chunk. This does not necessarily hold however for chunks
2770 + recycled via fastbins.
2771 + */
2773 + assert(prev_inuse(p));
2778 + Properties of malloc_state.
2780 + This may be useful for debugging malloc, as well as detecting user
2781 + programmer errors that somehow write into malloc_state.
2783 + If you are extending or experimenting with this malloc, you can
2784 + probably figure out how to hack this routine to print out or
2785 + display chunk addresses, sizes, bins, and other instrumentation.
2788 +static void do_check_malloc_state()
2790 + mstate av = get_malloc_state();
2791 + int i;
2792 + mchunkptr p;
2793 + mchunkptr q;
2794 + mbinptr b;
2795 + unsigned int binbit;
2796 + int empty;
2797 + unsigned int idx;
2798 + INTERNAL_SIZE_T size;
2799 + unsigned long total = 0;
2800 + int max_fast_bin;
2802 + /* internal size_t must be no wider than pointer type */
2803 + assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
2805 + /* alignment is a power of 2 */
2806 + assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
2808 + /* cannot run remaining checks until fully initialized */
2809 + if (av->top == 0 || av->top == initial_top(av))
2810 + return;
2812 + /* pagesize is a power of 2 */
2813 + assert((av->pagesize & (av->pagesize-1)) == 0);
2815 + /* properties of fastbins */
2817 + /* max_fast is in allowed range */
2818 + assert((av->max_fast & ~1) <= request2size(MAX_FAST_SIZE));
2820 + max_fast_bin = fastbin_index(av->max_fast);
2822 + for (i = 0; i < NFASTBINS; ++i) {
2823 + p = av->fastbins[i];
2825 + /* all bins past max_fast are empty */
2826 + if (i > max_fast_bin)
2827 + assert(p == 0);
2829 + while (p != 0) {
2830 + /* each chunk claims to be inuse */
2831 + do_check_inuse_chunk(p);
2832 + total += chunksize(p);
2833 + /* chunk belongs in this bin */
2834 + assert(fastbin_index(chunksize(p)) == i);
2835 + p = p->fd;
2839 + if (total != 0)
2840 + assert(have_fastchunks(av));
2841 + else if (!have_fastchunks(av))
2842 + assert(total == 0);
2844 + /* check normal bins */
2845 + for (i = 1; i < NBINS; ++i) {
2846 + b = bin_at(av,i);
2848 + /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2849 + if (i >= 2) {
2850 + binbit = get_binmap(av,i);
2851 + empty = last(b) == b;
2852 + if (!binbit)
2853 + assert(empty);
2854 + else if (!empty)
2855 + assert(binbit);
2858 + for (p = last(b); p != b; p = p->bk) {
2859 + /* each chunk claims to be free */
2860 + do_check_free_chunk(p);
2861 + size = chunksize(p);
2862 + total += size;
2863 + if (i >= 2) {
2864 + /* chunk belongs in bin */
2865 + idx = bin_index(size);
2866 + assert(idx == i);
2867 + /* lists are sorted */
2868 + assert(p->bk == b ||
2869 + (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p));
2871 + /* chunk is followed by a legal chain of inuse chunks */
2872 + for (q = next_chunk(p);
2873 + (q != av->top && inuse(q) &&
2874 + (unsigned long)(chunksize(q)) >= MINSIZE);
2875 + q = next_chunk(q))
2876 + do_check_inuse_chunk(q);
2880 + /* top chunk is OK */
2881 + check_chunk(av->top);
2883 + /* sanity checks for statistics */
2885 + assert(total <= (unsigned long)(av->max_total_mem));
2886 + assert(av->n_mmaps >= 0);
2887 + assert(av->n_mmaps <= av->n_mmaps_max);
2888 + assert(av->n_mmaps <= av->max_n_mmaps);
2890 + assert((unsigned long)(av->sbrked_mem) <=
2891 + (unsigned long)(av->max_sbrked_mem));
2893 + assert((unsigned long)(av->mmapped_mem) <=
2894 + (unsigned long)(av->max_mmapped_mem));
2896 + assert((unsigned long)(av->max_total_mem) >=
2897 + (unsigned long)(av->mmapped_mem) + (unsigned long)(av->sbrked_mem));
2899 +#endif
2902 +/* ----------- Routines dealing with system allocation -------------- */
2905 + sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
2906 + to the system (via negative arguments to sbrk) if there is unused
2907 + memory at the `high' end of the malloc pool. It is called
2908 + automatically by free() when top space exceeds the trim
2909 + threshold. It is also called by the public malloc_trim routine. It
2910 + returns 1 if it actually released any memory, else 0.
2913 +INLINE
2914 +#if __STD_C
2915 +static int sYSTRIm(size_t pad, mstate av)
2916 +#else
2917 +static int sYSTRIm(pad, av) size_t pad; mstate av;
2918 +#endif
2920 + long top_size; /* Amount of top-most memory */
2921 + long extra; /* Amount to release */
2922 + long released; /* Amount actually released */
2923 + char* current_brk; /* address returned by pre-check sbrk call */
2924 + char* new_brk; /* address returned by post-check sbrk call */
2925 + size_t pagesz;
2927 + pagesz = av->pagesize;
2928 + top_size = chunksize(av->top);
2930 + /* Release in pagesize units, keeping at least one page */
2931 + extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
2933 + if (extra > 0) {
2935 + /*
2936 + Only proceed if end of memory is where we last set it.
2937 + This avoids problems if there were foreign sbrk calls.
2938 + */
2939 + current_brk = (char*)(MORECORE(0));
2940 + if (current_brk == (char*)(av->top) + top_size) {
2942 + /*
2943 + Attempt to release memory. We ignore MORECORE return value,
2944 + and instead call again to find out where new end of memory is.
2945 + This avoids problems if first call releases less than we asked,
2946 + of if failure somehow altered brk value. (We could still
2947 + encounter problems if it altered brk in some very bad way,
2948 + but the only thing we can do is adjust anyway, which will cause
2949 + some downstream failure.)
2950 + */
2952 + MORECORE(-extra);
2953 + new_brk = (char*)(MORECORE(0));
2955 + if (new_brk != (char*)MORECORE_FAILURE) {
2956 + released = (long)(current_brk - new_brk);
2958 + if (released != 0) {
2959 + /* Success. Adjust top. */
2960 + av->sbrked_mem -= released;
2961 + set_head(av->top, (top_size - released) | PREV_INUSE);
2962 + check_malloc_state();
2963 + return 1;
2968 + return 0;
2972 + ------------------------- malloc_consolidate -------------------------
2974 + malloc_consolidate is a specialized version of free() that tears
2975 + down chunks held in fastbins. Free itself cannot be used for this
2976 + purpose since, among other things, it might place chunks back onto
2977 + fastbins. So, instead, we need to use a minor variant of the same
2978 + code.
2980 + Also, because this routine needs to be called the first time through
2981 + malloc anyway, it turns out to be the perfect place to trigger
2982 + initialization code.
2985 +INLINE
2986 +#if __STD_C
2987 +static void malloc_consolidate(mstate av)
2988 +#else
2989 +static void malloc_consolidate(av) mstate av;
2990 +#endif
2992 + mfastbinptr* fb; /* current fastbin being consolidated */
2993 + mfastbinptr* maxfb; /* last fastbin (for loop control) */
2994 + mchunkptr p; /* current chunk being consolidated */
2995 + mchunkptr nextp; /* next chunk to consolidate */
2996 + mchunkptr unsorted_bin; /* bin header */
2997 + mchunkptr first_unsorted; /* chunk to link to */
2999 + /* These have same use as in free() */
3000 + mchunkptr nextchunk;
3001 + INTERNAL_SIZE_T size;
3002 + INTERNAL_SIZE_T nextsize;
3003 + INTERNAL_SIZE_T prevsize;
3004 + int nextinuse;
3005 + mchunkptr bck;
3006 + mchunkptr fwd;
3008 + /*
3009 + If max_fast is 0, we know that av hasn't
3010 + yet been initialized, in which case do so below
3011 + */
3013 + if (av->max_fast != 0) {
3014 + clear_fastchunks(av);
3016 + unsorted_bin = unsorted_chunks(av);
3018 + /*
3019 + Remove each chunk from fast bin and consolidate it, placing it
3020 + then in unsorted bin. Among other reasons for doing this,
3021 + placing in unsorted bin avoids needing to calculate actual bins
3022 + until malloc is sure that chunks aren't immediately going to be
3023 + reused anyway.
3024 + */
3026 + maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
3027 + fb = &(av->fastbins[0]);
3028 + do {
3029 + if ( (p = *fb) != 0) {
3030 + *fb = 0;
3032 + do {
3033 + check_inuse_chunk(p);
3034 + nextp = p->fd;
3036 + /* Slightly streamlined version of consolidation code in free() */
3037 + size = p->size & ~PREV_INUSE;
3038 + nextchunk = chunk_at_offset(p, size);
3039 + nextsize = chunksize(nextchunk);
3041 + if (!prev_inuse(p)) {
3042 + prevsize = p->prev_size;
3043 + size += prevsize;
3044 + p = chunk_at_offset(p, -((long) prevsize));
3045 + unlink(p, bck, fwd);
3048 + if (nextchunk != av->top) {
3049 + nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
3050 + set_head(nextchunk, nextsize);
3052 + if (!nextinuse) {
3053 + size += nextsize;
3054 + unlink(nextchunk, bck, fwd);
3057 + first_unsorted = unsorted_bin->fd;
3058 + unsorted_bin->fd = p;
3059 + first_unsorted->bk = p;
3061 + set_head(p, size | PREV_INUSE);
3062 + p->bk = unsorted_bin;
3063 + p->fd = first_unsorted;
3064 + set_foot(p, size);
3067 + else {
3068 + size += nextsize;
3069 + set_head(p, size | PREV_INUSE);
3070 + av->top = p;
3073 + } while ( (p = nextp) != 0);
3076 + } while (fb++ != maxfb);
3078 + else {
3079 + malloc_init_state(av);
3080 + check_malloc_state();
3085 + ------------------------------ free ------------------------------
3088 +INLINE
3089 +#if __STD_C
3090 +void fREe(Void_t* mem)
3091 +#else
3092 +void fREe(mem) Void_t* mem;
3093 +#endif
3095 + mstate av = get_malloc_state();
3097 + mchunkptr p; /* chunk corresponding to mem */
3098 + INTERNAL_SIZE_T size; /* its size */
3099 + mfastbinptr* fb; /* associated fastbin */
3100 + mchunkptr nextchunk; /* next contiguous chunk */
3101 + INTERNAL_SIZE_T nextsize; /* its size */
3102 + int nextinuse; /* true if nextchunk is used */
3103 + INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
3104 + mchunkptr bck; /* misc temp for linking */
3105 + mchunkptr fwd; /* misc temp for linking */
3108 + /* free(0) has no effect */
3109 + if (mem != 0) {
3110 + p = mem2chunk(mem);
3111 + size = chunksize(p);
3113 + check_inuse_chunk(p);
3115 + /*
3116 + If eligible, place chunk on a fastbin so it can be found
3117 + and used quickly in malloc.
3118 + */
3120 + if ((unsigned long)(size) <= (unsigned long)(av->max_fast)
3122 +#if TRIM_FASTBINS
3123 + /*
3124 + If TRIM_FASTBINS set, don't place chunks
3125 + bordering top into fastbins
3126 + */
3127 + && (chunk_at_offset(p, size) != av->top)
3128 +#endif
3129 + ) {
3131 + set_fastchunks(av);
3132 + fb = &(av->fastbins[fastbin_index(size)]);
3133 + p->fd = *fb;
3134 + *fb = p;
3137 + /*
3138 + Consolidate other non-mmapped chunks as they arrive.
3139 + */
3141 + else if (!chunk_is_mmapped(p)) {
3142 + nextchunk = chunk_at_offset(p, size);
3143 + nextsize = chunksize(nextchunk);
3145 + /* consolidate backward */
3146 + if (!prev_inuse(p)) {
3147 + prevsize = p->prev_size;
3148 + size += prevsize;
3149 + p = chunk_at_offset(p, -((long) prevsize));
3150 + unlink(p, bck, fwd);
3153 + if (nextchunk != av->top) {
3154 + /* get and clear inuse bit */
3155 + nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
3156 + set_head(nextchunk, nextsize);
3158 + /* consolidate forward */
3159 + if (!nextinuse) {
3160 + unlink(nextchunk, bck, fwd);
3161 + size += nextsize;
3164 + /*
3165 + Place the chunk in unsorted chunk list. Chunks are
3166 + not placed into regular bins until after they have
3167 + been given one chance to be used in malloc.
3168 + */
3170 + bck = unsorted_chunks(av);
3171 + fwd = bck->fd;
3172 + p->bk = bck;
3173 + p->fd = fwd;
3174 + bck->fd = p;
3175 + fwd->bk = p;
3177 + set_head(p, size | PREV_INUSE);
3178 + set_foot(p, size);
3180 + check_free_chunk(p);
3183 + /*
3184 + If the chunk borders the current high end of memory,
3185 + consolidate into top
3186 + */
3188 + else {
3189 + size += nextsize;
3190 + set_head(p, size | PREV_INUSE);
3191 + av->top = p;
3192 + check_chunk(p);
3195 + /*
3196 + If freeing a large space, consolidate possibly-surrounding
3197 + chunks. Then, if the total unused topmost memory exceeds trim
3198 + threshold, ask malloc_trim to reduce top.
3200 + Unless max_fast is 0, we don't know if there are fastbins
3201 + bordering top, so we cannot tell for sure whether threshold
3202 + has been reached unless fastbins are consolidated. But we
3203 + don't want to consolidate on each free. As a compromise,
3204 + consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
3205 + is reached.
3206 + */
3208 + if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
3209 + if (have_fastchunks(av))
3210 + malloc_consolidate(av);
3212 +#ifndef MORECORE_CANNOT_TRIM
3213 + if ((unsigned long)(chunksize(av->top)) >=
3214 + (unsigned long)(av->trim_threshold))
3215 + sYSTRIm(av->top_pad, av);
3216 +#endif
3220 + /*
3221 + If the chunk was allocated via mmap, release via munmap()
3222 + Note that if HAVE_MMAP is false but chunk_is_mmapped is
3223 + true, then user must have overwritten memory. There's nothing
3224 + we can do to catch this error unless DEBUG is set, in which case
3225 + check_inuse_chunk (above) will have triggered error.
3226 + */
3228 + else {
3229 +#if HAVE_MMAP
3230 + int ret;
3231 + INTERNAL_SIZE_T offset = p->prev_size;
3232 + av->n_mmaps--;
3233 + av->mmapped_mem -= (size + offset);
3234 + ret = munmap((char*)p - offset, size + offset);
3235 + /* munmap returns non-zero on failure */
3236 + assert(ret == 0);
3237 +#endif
3243 + sysmalloc handles malloc cases requiring more memory from the system.
3244 + On entry, it is assumed that av->top does not have enough
3245 + space to service request for nb bytes, thus requiring that av->top
3246 + be extended or replaced.
3249 +INLINE
3250 +#if __STD_C
3251 +static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
3252 +#else
3253 +static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
3254 +#endif
3256 + mchunkptr old_top; /* incoming value of av->top */
3257 + INTERNAL_SIZE_T old_size; /* its size */
3258 + char* old_end; /* its end address */
3260 + long size; /* arg to first MORECORE or mmap call */
3261 + char* brk; /* return value from MORECORE */
3263 + long correction; /* arg to 2nd MORECORE call */
3264 + char* snd_brk; /* 2nd return val */
3266 + INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
3267 + INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
3268 + char* aligned_brk; /* aligned offset into brk */
3270 + mchunkptr p; /* the allocated/returned chunk */
3271 + mchunkptr remainder; /* remainder from allocation */
3272 + unsigned long remainder_size; /* its size */
3274 + unsigned long sum; /* for updating stats */
3276 + size_t pagemask = av->pagesize - 1;
3279 +#if HAVE_MMAP
3281 + /*
3282 + If have mmap, and the request size meets the mmap threshold, and
3283 + the system supports mmap, and there are few enough currently
3284 + allocated mmapped regions, try to directly map this request
3285 + rather than expanding top.
3286 + */
3288 + if ((unsigned long)(nb) >= (unsigned long)(av->mmap_threshold) &&
3289 + (av->n_mmaps < av->n_mmaps_max)) {
3291 + char* mm; /* return value from mmap call*/
3293 + /*
3294 + Round up size to nearest page. For mmapped chunks, the overhead
3295 + is one SIZE_SZ unit larger than for normal chunks, because there
3296 + is no following chunk whose prev_size field could be used.
3297 + */
3298 + size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
3300 + /* Don't try if size wraps around 0 */
3301 + if ((unsigned long)(size) > (unsigned long)(nb)) {
3303 + mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
3305 + if (mm != (char*)(MORECORE_FAILURE)) {
3307 + /*
3308 + The offset to the start of the mmapped region is stored
3309 + in the prev_size field of the chunk. This allows us to adjust
3310 + returned start address to meet alignment requirements here
3311 + and in memalign(), and still be able to compute proper
3312 + address argument for later munmap in free() and realloc().
3313 + */
3315 + front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
3316 + if (front_misalign > 0) {
3317 + correction = MALLOC_ALIGNMENT - front_misalign;
3318 + p = (mchunkptr)(mm + correction);
3319 + p->prev_size = correction;
3320 + set_head(p, (size - correction) |IS_MMAPPED);
3322 + else {
3323 + p = (mchunkptr)mm;
3324 + p->prev_size = 0;
3325 + set_head(p, size|IS_MMAPPED);
3328 + /* update statistics */
3330 + if (++av->n_mmaps > av->max_n_mmaps)
3331 + av->max_n_mmaps = av->n_mmaps;
3333 + sum = av->mmapped_mem += size;
3334 + if (sum > (unsigned long)(av->max_mmapped_mem))
3335 + av->max_mmapped_mem = sum;
3336 + sum += av->sbrked_mem;
3337 + if (sum > (unsigned long)(av->max_total_mem))
3338 + av->max_total_mem = sum;
3340 + check_chunk(p);
3342 + return chunk2mem(p);
3346 +#endif
3348 + /* Record incoming configuration of top */
3350 + old_top = av->top;
3351 + old_size = chunksize(old_top);
3352 + old_end = (char*)(chunk_at_offset(old_top, old_size));
3354 + brk = snd_brk = (char*)(MORECORE_FAILURE);
3356 + /*
3357 + If not the first time through, we require old_size to be
3358 + at least MINSIZE and to have prev_inuse set.
3359 + */
3361 + assert((old_top == initial_top(av) && old_size == 0) ||
3362 + ((unsigned long) (old_size) >= MINSIZE &&
3363 + prev_inuse(old_top)));
3365 + /* Precondition: not enough current space to satisfy nb request */
3366 + assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE));
3368 + /* Precondition: all fastbins are consolidated */
3369 + assert(!have_fastchunks(av));
3372 + /* Request enough space for nb + pad + overhead */
3374 + size = nb + av->top_pad + MINSIZE;
3376 + /*
3377 + If contiguous, we can subtract out existing space that we hope to
3378 + combine with new space. We add it back later only if
3379 + we don't actually get contiguous space.
3380 + */
3382 + if (contiguous(av))
3383 + size -= old_size;
3385 + /*
3386 + Round to a multiple of page size.
3387 + If MORECORE is not contiguous, this ensures that we only call it
3388 + with whole-page arguments. And if MORECORE is contiguous and
3389 + this is not first time through, this preserves page-alignment of
3390 + previous calls. Otherwise, we correct to page-align below.
3391 + */
3393 + size = (size + pagemask) & ~pagemask;
3395 + /*
3396 + Don't try to call MORECORE if argument is so big as to appear
3397 + negative. Note that since mmap takes size_t arg, it may succeed
3398 + below even if we cannot call MORECORE.
3399 + */
3401 + if (size > 0)
3402 + brk = (char*)(MORECORE(size));
3404 + /*
3405 + If have mmap, try using it as a backup when MORECORE fails or
3406 + cannot be used. This is worth doing on systems that have "holes" in
3407 + address space, so sbrk cannot extend to give contiguous space, but
3408 + space is available elsewhere. Note that we ignore mmap max count
3409 + and threshold limits, since the space will not be used as a
3410 + segregated mmap region.
3411 + */
3413 +#if HAVE_MMAP
3414 + if (brk == (char*)(MORECORE_FAILURE)) {
3416 + /* Cannot merge with old top, so add its size back in */
3417 + if (contiguous(av))
3418 + size = (size + old_size + pagemask) & ~pagemask;
3420 + /* If we are relying on mmap as backup, then use larger units */
3421 + if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE))
3422 + size = MMAP_AS_MORECORE_SIZE;
3424 + /* Don't try if size wraps around 0 */
3425 + if ((unsigned long)(size) > (unsigned long)(nb)) {
3427 + brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
3429 + if (brk != (char*)(MORECORE_FAILURE)) {
3431 + /* We do not need, and cannot use, another sbrk call to find end */
3432 + snd_brk = brk + size;
3434 + /*
3435 + Record that we no longer have a contiguous sbrk region.
3436 + After the first time mmap is used as backup, we do not
3437 + ever rely on contiguous space since this could incorrectly
3438 + bridge regions.
3439 + */
3440 + set_noncontiguous(av);
3444 +#endif
3446 + if (brk != (char*)(MORECORE_FAILURE)) {
3447 + av->sbrked_mem += size;
3449 + /*
3450 + If MORECORE extends previous space, we can likewise extend top size.
3451 + */
3453 + if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
3454 + set_head(old_top, (size + old_size) | PREV_INUSE);
3457 + /*
3458 + Otherwise, make adjustments:
3460 + * If the first time through or noncontiguous, we need to call sbrk
3461 + just to find out where the end of memory lies.
3463 + * We need to ensure that all returned chunks from malloc will meet
3464 + MALLOC_ALIGNMENT
3466 + * If there was an intervening foreign sbrk, we need to adjust sbrk
3467 + request size to account for fact that we will not be able to
3468 + combine new space with existing space in old_top.
3470 + * Almost all systems internally allocate whole pages at a time, in
3471 + which case we might as well use the whole last page of request.
3472 + So we allocate enough more memory to hit a page boundary now,
3473 + which in turn causes future contiguous calls to page-align.
3474 + */
3476 + else {
3477 + front_misalign = 0;
3478 + end_misalign = 0;
3479 + correction = 0;
3480 + aligned_brk = brk;
3482 + /* handle contiguous cases */
3483 + if (contiguous(av)) {
3485 + /* Guarantee alignment of first new chunk made from this space */
3487 + front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
3488 + if (front_misalign > 0) {
3490 + /*
3491 + Skip over some bytes to arrive at an aligned position.
3492 + We don't need to specially mark these wasted front bytes.
3493 + They will never be accessed anyway because
3494 + prev_inuse of av->top (and any chunk created from its start)
3495 + is always true after initialization.
3496 + */
3498 + correction = MALLOC_ALIGNMENT - front_misalign;
3499 + aligned_brk += correction;
3502 + /*
3503 + If this isn't adjacent to existing space, then we will not
3504 + be able to merge with old_top space, so must add to 2nd request.
3505 + */
3507 + correction += old_size;
3509 + /* Extend the end address to hit a page boundary */
3510 + end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
3511 + correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
3513 + assert(correction >= 0);
3514 + snd_brk = (char*)(MORECORE(correction));
3516 + /*
3517 + If can't allocate correction, try to at least find out current
3518 + brk. It might be enough to proceed without failing.
3520 + Note that if second sbrk did NOT fail, we assume that space
3521 + is contiguous with first sbrk. This is a safe assumption unless
3522 + program is multithreaded but doesn't use locks and a foreign sbrk
3523 + occurred between our first and second calls.
3524 + */
3526 + if (snd_brk == (char*)(MORECORE_FAILURE)) {
3527 + correction = 0;
3528 + snd_brk = (char*)(MORECORE(0));
3532 + /* handle non-contiguous cases */
3533 + else {
3534 + /* MORECORE/mmap must correctly align */
3535 + assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
3537 + /* Find out current end of memory */
3538 + if (snd_brk == (char*)(MORECORE_FAILURE)) {
3539 + snd_brk = (char*)(MORECORE(0));
3543 + /* Adjust top based on results of second sbrk */
3544 + if (snd_brk != (char*)(MORECORE_FAILURE)) {
3545 + av->top = (mchunkptr)aligned_brk;
3546 + set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
3547 + av->sbrked_mem += correction;
3549 + /*
3550 + If not the first time through, we either have a
3551 + gap due to foreign sbrk or a non-contiguous region. Insert a
3552 + double fencepost at old_top to prevent consolidation with space
3553 + we don't own. These fenceposts are artificial chunks that are
3554 + marked as inuse and are in any case too small to use. We need
3555 + two to make sizes and alignments work out.
3556 + */
3558 + if (old_size != 0) {
3559 + /*
3560 + Shrink old_top to insert fenceposts, keeping size a
3561 + multiple of MALLOC_ALIGNMENT. We know there is at least
3562 + enough space in old_top to do this.
3563 + */
3564 + old_size = (old_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
3565 + set_head(old_top, old_size | PREV_INUSE);
3567 + /*
3568 + Note that the following assignments completely overwrite
3569 + old_top when old_size was previously MINSIZE. This is
3570 + intentional. We need the fencepost, even if old_top otherwise gets
3571 + lost.
3572 + */
3573 + chunk_at_offset(old_top, old_size )->size =
3574 + SIZE_SZ|PREV_INUSE;
3576 + chunk_at_offset(old_top, old_size + SIZE_SZ)->size =
3577 + SIZE_SZ|PREV_INUSE;
3579 + /* If possible, release the rest. */
3580 + if (old_size >= MINSIZE) {
3581 + fREe(chunk2mem(old_top));
3588 + /* Update statistics */
3589 + sum = av->sbrked_mem;
3590 + if (sum > (unsigned long)(av->max_sbrked_mem))
3591 + av->max_sbrked_mem = sum;
3593 + sum += av->mmapped_mem;
3594 + if (sum > (unsigned long)(av->max_total_mem))
3595 + av->max_total_mem = sum;
3597 + check_malloc_state();
3599 + /* finally, do the allocation */
3600 + p = av->top;
3601 + size = chunksize(p);
3603 + /* check that one of the above allocation paths succeeded */
3604 + if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
3605 + remainder_size = size - nb;
3606 + remainder = chunk_at_offset(p, nb);
3607 + av->top = remainder;
3608 + set_head(p, nb | PREV_INUSE);
3609 + set_head(remainder, remainder_size | PREV_INUSE);
3610 + check_malloced_chunk(p, nb);
3611 + return chunk2mem(p);
3615 + /* catch all failure paths */
3616 + MALLOC_FAILURE_ACTION;
3617 + return 0;
3622 + ------------------------------ malloc ------------------------------
3625 +INLINE
3626 +#if __STD_C
3627 +Void_t* mALLOc(size_t bytes)
3628 +#else
3629 + Void_t* mALLOc(bytes) size_t bytes;
3630 +#endif
3632 + mstate av = get_malloc_state();
3634 + INTERNAL_SIZE_T nb; /* normalized request size */
3635 + unsigned int idx; /* associated bin index */
3636 + mbinptr bin; /* associated bin */
3637 + mfastbinptr* fb; /* associated fastbin */
3639 + mchunkptr victim; /* inspected/selected chunk */
3640 + INTERNAL_SIZE_T size; /* its size */
3641 + int victim_index; /* its bin index */
3643 + mchunkptr remainder; /* remainder from a split */
3644 + unsigned long remainder_size; /* its size */
3646 + unsigned int block; /* bit map traverser */
3647 + unsigned int bit; /* bit map traverser */
3648 + unsigned int map; /* current word of binmap */
3650 + mchunkptr fwd; /* misc temp for linking */
3651 + mchunkptr bck; /* misc temp for linking */
3653 + /*
3654 + Convert request size to internal form by adding SIZE_SZ bytes
3655 + overhead plus possibly more to obtain necessary alignment and/or
3656 + to obtain a size of at least MINSIZE, the smallest allocatable
3657 + size. Also, checked_request2size traps (returning 0) request sizes
3658 + that are so large that they wrap around zero when padded and
3659 + aligned.
3660 + */
3662 + checked_request2size(bytes, nb);
3664 + /*
3665 + If the size qualifies as a fastbin, first check corresponding bin.
3666 + This code is safe to execute even if av is not yet initialized, so we
3667 + can try it without checking, which saves some time on this fast path.
3668 + */
3670 + if ((unsigned long)(nb) <= (unsigned long)(av->max_fast)) {
3671 + fb = &(av->fastbins[(fastbin_index(nb))]);
3672 + if ( (victim = *fb) != 0) {
3673 + *fb = victim->fd;
3674 + check_remalloced_chunk(victim, nb);
3675 + return chunk2mem(victim);
3679 + /*
3680 + If a small request, check regular bin. Since these "smallbins"
3681 + hold one size each, no searching within bins is necessary.
3682 + (For a large request, we need to wait until unsorted chunks are
3683 + processed to find best fit. But for small ones, fits are exact
3684 + anyway, so we can check now, which is faster.)
3685 + */
3687 + if (in_smallbin_range(nb)) {
3688 + idx = smallbin_index(nb);
3689 + bin = bin_at(av,idx);
3691 + if ( (victim = last(bin)) != bin) {
3692 + if (victim == 0) /* initialization check */
3693 + malloc_consolidate(av);
3694 + else {
3695 + bck = victim->bk;
3696 + set_inuse_bit_at_offset(victim, nb);
3697 + bin->bk = bck;
3698 + bck->fd = bin;
3700 + check_malloced_chunk(victim, nb);
3701 + return chunk2mem(victim);
3706 + /*
3707 + If this is a large request, consolidate fastbins before continuing.
3708 + While it might look excessive to kill all fastbins before
3709 + even seeing if there is space available, this avoids
3710 + fragmentation problems normally associated with fastbins.
3711 + Also, in practice, programs tend to have runs of either small or
3712 + large requests, but less often mixtures, so consolidation is not
3713 + invoked all that often in most programs. And the programs that
3714 + it is called frequently in otherwise tend to fragment.
3715 + */
3717 + else {
3718 + idx = largebin_index(nb);
3719 + if (have_fastchunks(av))
3720 + malloc_consolidate(av);
3723 + /*
3724 + Process recently freed or remaindered chunks, taking one only if
3725 + it is exact fit, or, if this a small request, the chunk is remainder from
3726 + the most recent non-exact fit. Place other traversed chunks in
3727 + bins. Note that this step is the only place in any routine where
3728 + chunks are placed in bins.
3730 + The outer loop here is needed because we might not realize until
3731 + near the end of malloc that we should have consolidated, so must
3732 + do so and retry. This happens at most once, and only when we would
3733 + otherwise need to expand memory to service a "small" request.
3734 + */
3736 + for(;;) {
3738 + while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
3739 + bck = victim->bk;
3740 + size = chunksize(victim);
3742 + /*
3743 + If a small request, try to use last remainder if it is the
3744 + only chunk in unsorted bin. This helps promote locality for
3745 + runs of consecutive small requests. This is the only
3746 + exception to best-fit, and applies only when there is
3747 + no exact fit for a small chunk.
3748 + */
3750 + if (in_smallbin_range(nb) &&
3751 + bck == unsorted_chunks(av) &&
3752 + victim == av->last_remainder &&
3753 + (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
3755 + /* split and reattach remainder */
3756 + remainder_size = size - nb;
3757 + remainder = chunk_at_offset(victim, nb);
3758 + unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
3759 + av->last_remainder = remainder;
3760 + remainder->bk = remainder->fd = unsorted_chunks(av);
3762 + set_head(victim, nb | PREV_INUSE);
3763 + set_head(remainder, remainder_size | PREV_INUSE);
3764 + set_foot(remainder, remainder_size);
3766 + check_malloced_chunk(victim, nb);
3767 + return chunk2mem(victim);
3770 + /* remove from unsorted list */
3771 + unsorted_chunks(av)->bk = bck;
3772 + bck->fd = unsorted_chunks(av);
3774 + /* Take now instead of binning if exact fit */
3776 + if (size == nb) {
3777 + set_inuse_bit_at_offset(victim, size);
3778 + check_malloced_chunk(victim, nb);
3779 + return chunk2mem(victim);
3782 + /* place chunk in bin */
3784 + if (in_smallbin_range(size)) {
3785 + victim_index = smallbin_index(size);
3786 + bck = bin_at(av, victim_index);
3787 + fwd = bck->fd;
3789 + else {
3790 + victim_index = largebin_index(size);
3791 + bck = bin_at(av, victim_index);
3792 + fwd = bck->fd;
3794 + /* maintain large bins in sorted order */
3795 + if (fwd != bck) {
3796 + size |= PREV_INUSE; /* Or with inuse bit to speed comparisons */
3797 + /* if smaller than smallest, bypass loop below */
3798 + if ((unsigned long)(size) <= (unsigned long)(bck->bk->size)) {
3799 + fwd = bck;
3800 + bck = bck->bk;
3802 + else {
3803 + while ((unsigned long)(size) < (unsigned long)(fwd->size))
3804 + fwd = fwd->fd;
3805 + bck = fwd->bk;
3810 + mark_bin(av, victim_index);
3811 + victim->bk = bck;
3812 + victim->fd = fwd;
3813 + fwd->bk = victim;
3814 + bck->fd = victim;
3817 + /*
3818 + If a large request, scan through the chunks of current bin in
3819 + sorted order to find smallest that fits. This is the only step
3820 + where an unbounded number of chunks might be scanned without doing
3821 + anything useful with them. However the lists tend to be short.
3822 + */
3824 + if (!in_smallbin_range(nb)) {
3825 + bin = bin_at(av, idx);
3827 + /* skip scan if empty or largest chunk is too small */
3828 + if ((victim = last(bin)) != bin &&
3829 + (unsigned long)(first(bin)->size) >= (unsigned long)(nb)) {
3831 + while (((unsigned long)(size = chunksize(victim)) <
3832 + (unsigned long)(nb)))
3833 + victim = victim->bk;
3835 + remainder_size = size - nb;
3836 + unlink(victim, bck, fwd);
3838 + /* Exhaust */
3839 + if (remainder_size < MINSIZE) {
3840 + set_inuse_bit_at_offset(victim, size);
3841 + check_malloced_chunk(victim, nb);
3842 + return chunk2mem(victim);
3844 + /* Split */
3845 + else {
3846 + remainder = chunk_at_offset(victim, nb);
3847 + unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
3848 + remainder->bk = remainder->fd = unsorted_chunks(av);
3849 + set_head(victim, nb | PREV_INUSE);
3850 + set_head(remainder, remainder_size | PREV_INUSE);
3851 + set_foot(remainder, remainder_size);
3852 + check_malloced_chunk(victim, nb);
3853 + return chunk2mem(victim);
3858 + /*
3859 + Search for a chunk by scanning bins, starting with next largest
3860 + bin. This search is strictly by best-fit; i.e., the smallest
3861 + (with ties going to approximately the least recently used) chunk
3862 + that fits is selected.
3864 + The bitmap avoids needing to check that most blocks are nonempty.
3865 + The particular case of skipping all bins during warm-up phases
3866 + when no chunks have been returned yet is faster than it might look.
3867 + */
3869 + ++idx;
3870 + bin = bin_at(av,idx);
3871 + block = idx2block(idx);
3872 + map = av->binmap[block];
3873 + bit = idx2bit(idx);
3875 + for (;;) {
3877 + /* Skip rest of block if there are no more set bits in this block. */
3878 + if (bit > map || bit == 0) {
3879 + do {
3880 + if (++block >= BINMAPSIZE) /* out of bins */
3881 + goto use_top;
3882 + } while ( (map = av->binmap[block]) == 0);
3884 + bin = bin_at(av, (block << BINMAPSHIFT));
3885 + bit = 1;
3888 + /* Advance to bin with set bit. There must be one. */
3889 + while ((bit & map) == 0) {
3890 + bin = next_bin(bin);
3891 + bit <<= 1;
3892 + assert(bit != 0);
3895 + /* Inspect the bin. It is likely to be non-empty */
3896 + victim = last(bin);
3898 + /* If a false alarm (empty bin), clear the bit. */
3899 + if (victim == bin) {
3900 + av->binmap[block] = map &= ~bit; /* Write through */
3901 + bin = next_bin(bin);
3902 + bit <<= 1;
3905 + else {
3906 + size = chunksize(victim);
3908 + /* We know the first chunk in this bin is big enough to use. */
3909 + assert((unsigned long)(size) >= (unsigned long)(nb));
3911 + remainder_size = size - nb;
3913 + /* unlink */
3914 + bck = victim->bk;
3915 + bin->bk = bck;
3916 + bck->fd = bin;
3918 + /* Exhaust */
3919 + if (remainder_size < MINSIZE) {
3920 + set_inuse_bit_at_offset(victim, size);
3921 + check_malloced_chunk(victim, nb);
3922 + return chunk2mem(victim);
3925 + /* Split */
3926 + else {
3927 + remainder = chunk_at_offset(victim, nb);
3929 + unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
3930 + remainder->bk = remainder->fd = unsorted_chunks(av);
3931 + /* advertise as last remainder */
3932 + if (in_smallbin_range(nb))
3933 + av->last_remainder = remainder;
3935 + set_head(victim, nb | PREV_INUSE);
3936 + set_head(remainder, remainder_size | PREV_INUSE);
3937 + set_foot(remainder, remainder_size);
3938 + check_malloced_chunk(victim, nb);
3939 + return chunk2mem(victim);
3944 + use_top:
3945 + /*
3946 + If large enough, split off the chunk bordering the end of memory
3947 + (held in av->top). Note that this is in accord with the best-fit
3948 + search rule. In effect, av->top is treated as larger (and thus
3949 + less well fitting) than any other available chunk since it can
3950 + be extended to be as large as necessary (up to system
3951 + limitations).
3953 + We require that av->top always exists (i.e., has size >=
3954 + MINSIZE) after initialization, so if it would otherwise be
3955 + exhuasted by current request, it is replenished. (The main
3956 + reason for ensuring it exists is that we may need MINSIZE space
3957 + to put in fenceposts in sysmalloc.)
3958 + */
3960 + victim = av->top;
3961 + size = chunksize(victim);
3963 + if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
3964 + remainder_size = size - nb;
3965 + remainder = chunk_at_offset(victim, nb);
3966 + av->top = remainder;
3967 + set_head(victim, nb | PREV_INUSE);
3968 + set_head(remainder, remainder_size | PREV_INUSE);
3970 + check_malloced_chunk(victim, nb);
3971 + return chunk2mem(victim);
3974 + /*
3975 + If there is space available in fastbins, consolidate and retry,
3976 + to possibly avoid expanding memory. This can occur only if nb is
3977 + in smallbin range so we didn't consolidate upon entry.
3978 + */
3980 + else if (have_fastchunks(av)) {
3981 + assert(in_smallbin_range(nb));
3982 + malloc_consolidate(av);
3983 + idx = smallbin_index(nb); /* restore original bin index */
3986 + /*
3987 + Otherwise, relay to handle system-dependent cases
3988 + */
3989 + else
3990 + return sYSMALLOc(nb, av);
3995 + ------------------------------ realloc ------------------------------
3999 +INLINE
4000 +#if __STD_C
4001 +Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
4002 +#else
4003 +Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
4004 +#endif
4006 + mstate av = get_malloc_state();
4008 + INTERNAL_SIZE_T nb; /* padded request size */
4010 + mchunkptr oldp; /* chunk corresponding to oldmem */
4011 + INTERNAL_SIZE_T oldsize; /* its size */
4013 + mchunkptr newp; /* chunk to return */
4014 + INTERNAL_SIZE_T newsize; /* its size */
4015 + Void_t* newmem; /* corresponding user mem */
4017 + mchunkptr next; /* next contiguous chunk after oldp */
4019 + mchunkptr remainder; /* extra space at end of newp */
4020 + unsigned long remainder_size; /* its size */
4022 + mchunkptr bck; /* misc temp for linking */
4023 + mchunkptr fwd; /* misc temp for linking */
4025 + unsigned long copysize; /* bytes to copy */
4026 + unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
4027 + INTERNAL_SIZE_T* s; /* copy source */
4028 + INTERNAL_SIZE_T* d; /* copy destination */
4031 +#ifdef REALLOC_ZERO_BYTES_FREES
4032 + if (bytes == 0) {
4033 + fREe(oldmem);
4034 + return 0;
4036 +#endif
4038 + /* realloc of null is supposed to be same as malloc */
4039 + if (oldmem == 0) return mALLOc(bytes);
4041 + checked_request2size(bytes, nb);
4043 + oldp = mem2chunk(oldmem);
4044 + oldsize = chunksize(oldp);
4046 + check_inuse_chunk(oldp);
4048 + if (!chunk_is_mmapped(oldp)) {
4050 + if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
4051 + /* already big enough; split below */
4052 + newp = oldp;
4053 + newsize = oldsize;
4056 + else {
4057 + next = chunk_at_offset(oldp, oldsize);
4059 + /* Try to expand forward into top */
4060 + if (next == av->top &&
4061 + (unsigned long)(newsize = oldsize + chunksize(next)) >=
4062 + (unsigned long)(nb + MINSIZE)) {
4063 + set_head_size(oldp, nb);
4064 + av->top = chunk_at_offset(oldp, nb);
4065 + set_head(av->top, (newsize - nb) | PREV_INUSE);
4066 + return chunk2mem(oldp);
4069 + /* Try to expand forward into next chunk; split off remainder below */
4070 + else if (next != av->top &&
4071 + !inuse(next) &&
4072 + (unsigned long)(newsize = oldsize + chunksize(next)) >=
4073 + (unsigned long)(nb)) {
4074 + newp = oldp;
4075 + unlink(next, bck, fwd);
4078 + /* allocate, copy, free */
4079 + else {
4080 + newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
4081 + if (newmem == 0)
4082 + return 0; /* propagate failure */
4084 + newp = mem2chunk(newmem);
4085 + newsize = chunksize(newp);
4087 + /*
4088 + Avoid copy if newp is next chunk after oldp.
4089 + */
4090 + if (newp == next) {
4091 + newsize += oldsize;
4092 + newp = oldp;
4094 + else {
4095 + /*
4096 + Unroll copy of <= 36 bytes (72 if 8byte sizes)
4097 + We know that contents have an odd number of
4098 + INTERNAL_SIZE_T-sized words; minimally 3.
4099 + */
4101 + copysize = oldsize - SIZE_SZ;
4102 + s = (INTERNAL_SIZE_T*)(oldmem);
4103 + d = (INTERNAL_SIZE_T*)(newmem);
4104 + ncopies = copysize / sizeof(INTERNAL_SIZE_T);
4105 + assert(ncopies >= 3);
4107 + if (ncopies > 9)
4108 + MALLOC_COPY(d, s, copysize);
4110 + else {
4111 + *(d+0) = *(s+0);
4112 + *(d+1) = *(s+1);
4113 + *(d+2) = *(s+2);
4114 + if (ncopies > 4) {
4115 + *(d+3) = *(s+3);
4116 + *(d+4) = *(s+4);
4117 + if (ncopies > 6) {
4118 + *(d+5) = *(s+5);
4119 + *(d+6) = *(s+6);
4120 + if (ncopies > 8) {
4121 + *(d+7) = *(s+7);
4122 + *(d+8) = *(s+8);
4128 + fREe(oldmem);
4129 + check_inuse_chunk(newp);
4130 + return chunk2mem(newp);
4135 + /* If possible, free extra space in old or extended chunk */
4137 + assert((unsigned long)(newsize) >= (unsigned long)(nb));
4139 + remainder_size = newsize - nb;
4141 + if (remainder_size < MINSIZE) { /* not enough extra to split off */
4142 + set_head_size(newp, newsize);
4143 + set_inuse_bit_at_offset(newp, newsize);
4145 + else { /* split remainder */
4146 + remainder = chunk_at_offset(newp, nb);
4147 + set_head_size(newp, nb);
4148 + set_head(remainder, remainder_size | PREV_INUSE);
4149 + /* Mark remainder as inuse so free() won't complain */
4150 + set_inuse_bit_at_offset(remainder, remainder_size);
4151 + fREe(chunk2mem(remainder));
4154 + check_inuse_chunk(newp);
4155 + return chunk2mem(newp);
4158 + /*
4159 + Handle mmap cases
4160 + */
4162 + else {
4163 +#if HAVE_MMAP
4165 +#if HAVE_MREMAP
4166 + INTERNAL_SIZE_T offset = oldp->prev_size;
4167 + size_t pagemask = av->pagesize - 1;
4168 + char *cp;
4169 + unsigned long sum;
4171 + /* Note the extra SIZE_SZ overhead */
4172 + newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;
4174 + /* don't need to remap if still within same page */
4175 + if (oldsize == newsize - offset)
4176 + return oldmem;
4178 + cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
4180 + if (cp != (char*)MORECORE_FAILURE) {
4182 + newp = (mchunkptr)(cp + offset);
4183 + set_head(newp, (newsize - offset)|IS_MMAPPED);
4185 + assert(aligned_OK(chunk2mem(newp)));
4186 + assert((newp->prev_size == offset));
4188 + /* update statistics */
4189 + sum = av->mmapped_mem += newsize - oldsize;
4190 + if (sum > (unsigned long)(av->max_mmapped_mem))
4191 + av->max_mmapped_mem = sum;
4192 + sum += av->sbrked_mem;
4193 + if (sum > (unsigned long)(av->max_total_mem))
4194 + av->max_total_mem = sum;
4196 + return chunk2mem(newp);
4198 +#endif
4200 + /* Note the extra SIZE_SZ overhead. */
4201 + if ((unsigned long)(oldsize) >= (unsigned long)(nb + SIZE_SZ))
4202 + newmem = oldmem; /* do nothing */
4203 + else {
4204 + /* Must alloc, copy, free. */
4205 + newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
4206 + if (newmem != 0) {
4207 + MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
4208 + fREe(oldmem);
4211 + return newmem;
4213 +#else
4214 + /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
4215 + check_malloc_state();
4216 + MALLOC_FAILURE_ACTION;
4217 + return 0;
4218 +#endif
4223 + ------------------------------ memalign ------------------------------
4226 +INLINE
4227 +#if __STD_C
4228 +Void_t* mEMALIGn(size_t alignment, size_t bytes)
4229 +#else
4230 +Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
4231 +#endif
4233 + INTERNAL_SIZE_T nb; /* padded request size */
4234 + char* m; /* memory returned by malloc call */
4235 + mchunkptr p; /* corresponding chunk */
4236 + char* brk; /* alignment point within p */
4237 + mchunkptr newp; /* chunk to return */
4238 + INTERNAL_SIZE_T newsize; /* its size */
4239 + INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
4240 + mchunkptr remainder; /* spare room at end to split off */
4241 + unsigned long remainder_size; /* its size */
4242 + INTERNAL_SIZE_T size;
4244 + /* If need less alignment than we give anyway, just relay to malloc */
4246 + if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
4248 + /* Otherwise, ensure that it is at least a minimum chunk size */
4250 + if (alignment < MINSIZE) alignment = MINSIZE;
4252 + /* Make sure alignment is power of 2 (in case MINSIZE is not). */
4253 + if ((alignment & (alignment - 1)) != 0) {
4254 + size_t a = MALLOC_ALIGNMENT * 2;
4255 + while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
4256 + alignment = a;
4259 + checked_request2size(bytes, nb);
4261 + /*
4262 + Strategy: find a spot within that chunk that meets the alignment
4263 + request, and then possibly free the leading and trailing space.
4264 + */
4267 + /* Call malloc with worst case padding to hit alignment. */
4269 + m = (char*)(mALLOc(nb + alignment + MINSIZE));
4271 + if (m == 0) return 0; /* propagate failure */
4273 + p = mem2chunk(m);
4275 + if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */
4277 + /*
4278 + Find an aligned spot inside chunk. Since we need to give back
4279 + leading space in a chunk of at least MINSIZE, if the first
4280 + calculation places us at a spot with less than MINSIZE leader,
4281 + we can move to the next aligned spot -- we've allocated enough
4282 + total room so that this is always possible.
4283 + */
4285 + brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) &
4286 + -((signed long) alignment));
4287 + if ((unsigned long)(brk - (char*)(p)) < MINSIZE)
4288 + brk += alignment;
4290 + newp = (mchunkptr)brk;
4291 + leadsize = brk - (char*)(p);
4292 + newsize = chunksize(p) - leadsize;
4294 + /* For mmapped chunks, just adjust offset */
4295 + if (chunk_is_mmapped(p)) {
4296 + newp->prev_size = p->prev_size + leadsize;
4297 + set_head(newp, newsize|IS_MMAPPED);
4298 + return chunk2mem(newp);
4301 + /* Otherwise, give back leader, use the rest */
4302 + set_head(newp, newsize | PREV_INUSE);
4303 + set_inuse_bit_at_offset(newp, newsize);
4304 + set_head_size(p, leadsize);
4305 + fREe(chunk2mem(p));
4306 + p = newp;
4308 + assert (newsize >= nb &&
4309 + (((unsigned long)(chunk2mem(p))) % alignment) == 0);
4312 + /* Also give back spare room at the end */
4313 + if (!chunk_is_mmapped(p)) {
4314 + size = chunksize(p);
4315 + if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
4316 + remainder_size = size - nb;
4317 + remainder = chunk_at_offset(p, nb);
4318 + set_head(remainder, remainder_size | PREV_INUSE);
4319 + set_head_size(p, nb);
4320 + fREe(chunk2mem(remainder));
4324 + check_inuse_chunk(p);
4325 + return chunk2mem(p);
4329 + ------------------------------ calloc ------------------------------
4332 +INLINE
4333 +#if __STD_C
4334 +Void_t* cALLOc(size_t n_elements, size_t elem_size)
4335 +#else
4336 +Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
4337 +#endif
4339 + mchunkptr p;
4340 + unsigned long clearsize;
4341 + unsigned long nclears;
4342 + INTERNAL_SIZE_T* d;
4344 + Void_t* mem = mALLOc(n_elements * elem_size);
4346 + if (mem != 0) {
4347 + p = mem2chunk(mem);
4349 + if (!chunk_is_mmapped(p))
4351 + /*
4352 + Unroll clear of <= 36 bytes (72 if 8byte sizes)
4353 + We know that contents have an odd number of
4354 + INTERNAL_SIZE_T-sized words; minimally 3.
4355 + */
4357 + d = (INTERNAL_SIZE_T*)mem;
4358 + clearsize = chunksize(p) - SIZE_SZ;
4359 + nclears = clearsize / sizeof(INTERNAL_SIZE_T);
4360 + assert(nclears >= 3);
4362 + if (nclears > 9)
4363 + MALLOC_ZERO(d, clearsize);
4365 + else {
4366 + *(d+0) = 0;
4367 + *(d+1) = 0;
4368 + *(d+2) = 0;
4369 + if (nclears > 4) {
4370 + *(d+3) = 0;
4371 + *(d+4) = 0;
4372 + if (nclears > 6) {
4373 + *(d+5) = 0;
4374 + *(d+6) = 0;
4375 + if (nclears > 8) {
4376 + *(d+7) = 0;
4377 + *(d+8) = 0;
4383 +#if ! MMAP_CLEARS
4384 + else
4386 + d = (INTERNAL_SIZE_T*)mem;
4387 + clearsize = chunksize(p) - 2 * SIZE_SZ;
4388 + MALLOC_ZERO(d, clearsize);
4390 +#endif
4392 + return mem;
4396 + ------------------------------ cfree ------------------------------
4399 +INLINE
4400 +#if __STD_C
4401 +void cFREe(Void_t *mem)
4402 +#else
4403 +void cFREe(mem) Void_t *mem;
4404 +#endif
4406 + fREe(mem);
4410 + ------------------------------ ialloc ------------------------------
4411 + ialloc provides common support for independent_X routines, handling all of
4412 + the combinations that can result.
4414 + The opts arg has:
4415 + bit 0 set if all elements are same size (using sizes[0])
4416 + bit 1 set if elements should be zeroed
4420 +INLINE
4421 +#if __STD_C
4422 +static Void_t** iALLOc(size_t n_elements,
4423 + size_t* sizes,
4424 + int opts,
4425 + Void_t* chunks[])
4426 +#else
4427 +static Void_t** iALLOc(n_elements, sizes, opts, chunks) size_t n_elements; size_t* sizes; int opts; Void_t* chunks[];
4428 +#endif
4430 + mstate av = get_malloc_state();
4431 + INTERNAL_SIZE_T element_size; /* chunksize of each element, if all same */
4432 + INTERNAL_SIZE_T contents_size; /* total size of elements */
4433 + INTERNAL_SIZE_T array_size; /* request size of pointer array */
4434 + Void_t* mem; /* malloced aggregate space */
4435 + mchunkptr p; /* corresponding chunk */
4436 + INTERNAL_SIZE_T remainder_size; /* remaining bytes while splitting */
4437 + Void_t** marray; /* either "chunks" or malloced ptr array */
4438 + mchunkptr array_chunk; /* chunk for malloced ptr array */
4439 + int mmx; /* to disable mmap */
4440 + INTERNAL_SIZE_T size;
4441 + size_t i;
4443 + /* Ensure initialization/consolidation */
4444 + if (have_fastchunks(av)) malloc_consolidate(av);
4446 + /* compute array length, if needed */
4447 + if (chunks != 0) {
4448 + if (n_elements == 0)
4449 + return chunks; /* nothing to do */
4450 + marray = chunks;
4451 + array_size = 0;
4453 + else {
4454 + /* if empty req, must still return chunk representing empty array */
4455 + if (n_elements == 0)
4456 + return (Void_t**) mALLOc(0);
4457 + marray = 0;
4458 + array_size = request2size(n_elements * (sizeof(Void_t*)));
4461 + /* compute total element size */
4462 + if (opts & 0x1) { /* all-same-size */
4463 + element_size = request2size(*sizes);
4464 + contents_size = n_elements * element_size;
4466 + else { /* add up all the sizes */
4467 + element_size = 0;
4468 + contents_size = 0;
4469 + for (i = 0; i != n_elements; ++i)
4470 + contents_size += request2size(sizes[i]);
4473 + /* subtract out alignment bytes from total to minimize overallocation */
4474 + size = contents_size + array_size - MALLOC_ALIGN_MASK;
4476 + /*
4477 + Allocate the aggregate chunk.
4478 + But first disable mmap so malloc won't use it, since
4479 + we would not be able to later free/realloc space internal
4480 + to a segregated mmap region.
4481 + */
4482 + mmx = av->n_mmaps_max; /* disable mmap */
4483 + av->n_mmaps_max = 0;
4484 + mem = mALLOc(size);
4485 + av->n_mmaps_max = mmx; /* reset mmap */
4486 + if (mem == 0)
4487 + return 0;
4489 + p = mem2chunk(mem);
4490 + assert(!chunk_is_mmapped(p));
4491 + remainder_size = chunksize(p);
4493 + if (opts & 0x2) { /* optionally clear the elements */
4494 + MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size);
4497 + /* If not provided, allocate the pointer array as final part of chunk */
4498 + if (marray == 0) {
4499 + array_chunk = chunk_at_offset(p, contents_size);
4500 + marray = (Void_t**) (chunk2mem(array_chunk));
4501 + set_head(array_chunk, (remainder_size - contents_size) | PREV_INUSE);
4502 + remainder_size = contents_size;
4505 + /* split out elements */
4506 + for (i = 0; ; ++i) {
4507 + marray[i] = chunk2mem(p);
4508 + if (i != n_elements-1) {
4509 + if (element_size != 0)
4510 + size = element_size;
4511 + else
4512 + size = request2size(sizes[i]);
4513 + remainder_size -= size;
4514 + set_head(p, size | PREV_INUSE);
4515 + p = chunk_at_offset(p, size);
4517 + else { /* the final element absorbs any overallocation slop */
4518 + set_head(p, remainder_size | PREV_INUSE);
4519 + break;
4523 +#ifdef DEBUG
4524 + if (marray != chunks) {
4525 + /* final element must have exactly exhausted chunk */
4526 + if (element_size != 0)
4527 + assert(remainder_size == element_size);
4528 + else
4529 + assert(remainder_size == request2size(sizes[i]));
4530 + check_inuse_chunk(mem2chunk(marray));
4533 + for (i = 0; i != n_elements; ++i)
4534 + check_inuse_chunk(mem2chunk(marray[i]));
4535 +#endif
4537 + return marray;
4542 + ------------------------- independent_calloc -------------------------
4545 +INLINE
4546 +#if __STD_C
4547 +Void_t** iCALLOc(size_t n_elements, size_t elem_size, Void_t* chunks[])
4548 +#else
4549 +Void_t** iCALLOc(n_elements, elem_size, chunks) size_t n_elements; size_t elem_size; Void_t* chunks[];
4550 +#endif
4552 + size_t sz = elem_size; /* serves as 1-element array */
4553 + /* opts arg of 3 means all elements are same size, and should be cleared */
4554 + return iALLOc(n_elements, &sz, 3, chunks);
4558 + ------------------------- independent_comalloc -------------------------
4561 +INLINE
4562 +#if __STD_C
4563 +Void_t** iCOMALLOc(size_t n_elements, size_t sizes[], Void_t* chunks[])
4564 +#else
4565 +Void_t** iCOMALLOc(n_elements, sizes, chunks) size_t n_elements; size_t sizes[]; Void_t* chunks[];
4566 +#endif
4568 + return iALLOc(n_elements, sizes, 0, chunks);
4573 + ------------------------------ valloc ------------------------------
4576 +INLINE
4577 +#if __STD_C
4578 +Void_t* vALLOc(size_t bytes)
4579 +#else
4580 +Void_t* vALLOc(bytes) size_t bytes;
4581 +#endif
4583 + /* Ensure initialization/consolidation */
4584 + mstate av = get_malloc_state();
4585 + if (have_fastchunks(av)) malloc_consolidate(av);
4586 + return mEMALIGn(av->pagesize, bytes);
4590 + ------------------------------ pvalloc ------------------------------
4594 +#if __STD_C
4595 +Void_t* pVALLOc(size_t bytes)
4596 +#else
4597 +Void_t* pVALLOc(bytes) size_t bytes;
4598 +#endif
4600 + mstate av = get_malloc_state();
4601 + size_t pagesz;
4603 + /* Ensure initialization/consolidation */
4604 + if (have_fastchunks(av)) malloc_consolidate(av);
4605 + pagesz = av->pagesize;
4606 + return mEMALIGn(pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
4611 + ------------------------------ malloc_trim ------------------------------
4614 +INLINE
4615 +#if __STD_C
4616 +int mTRIm(size_t pad)
4617 +#else
4618 +int mTRIm(pad) size_t pad;
4619 +#endif
4621 + mstate av = get_malloc_state();
4622 + /* Ensure initialization/consolidation */
4623 + malloc_consolidate(av);
4625 +#ifndef MORECORE_CANNOT_TRIM
4626 + return sYSTRIm(pad, av);
4627 +#else
4628 + return 0;
4629 +#endif
4634 + ------------------------- malloc_usable_size -------------------------
4637 +INLINE
4638 +#if __STD_C
4639 +size_t mUSABLe(Void_t* mem)
4640 +#else
4641 +size_t mUSABLe(mem) Void_t* mem;
4642 +#endif
4644 + mchunkptr p;
4645 + if (mem != 0) {
4646 + p = mem2chunk(mem);
4647 + if (chunk_is_mmapped(p))
4648 + return chunksize(p) - 2*SIZE_SZ;
4649 + else if (inuse(p))
4650 + return chunksize(p) - SIZE_SZ;
4652 + return 0;
4656 + ------------------------------ mallinfo ------------------------------
4659 +struct mallinfo mALLINFo()
4661 + mstate av = get_malloc_state();
4662 + struct mallinfo mi;
4663 + unsigned int i;
4664 + mbinptr b;
4665 + mchunkptr p;
4666 + INTERNAL_SIZE_T avail;
4667 + INTERNAL_SIZE_T fastavail;
4668 + int nblocks;
4669 + int nfastblocks;
4671 + /* Ensure initialization */
4672 + if (av->top == 0) malloc_consolidate(av);
4674 + check_malloc_state();
4676 + /* Account for top */
4677 + avail = chunksize(av->top);
4678 + nblocks = 1; /* top always exists */
4680 + /* traverse fastbins */
4681 + nfastblocks = 0;
4682 + fastavail = 0;
4684 + for (i = 0; i < NFASTBINS; ++i) {
4685 + for (p = av->fastbins[i]; p != 0; p = p->fd) {
4686 + ++nfastblocks;
4687 + fastavail += chunksize(p);
4691 + avail += fastavail;
4693 + /* traverse regular bins */
4694 + for (i = 1; i < NBINS; ++i) {
4695 + b = bin_at(av, i);
4696 + for (p = last(b); p != b; p = p->bk) {
4697 + ++nblocks;
4698 + avail += chunksize(p);
4702 + mi.smblks = nfastblocks;
4703 + mi.ordblks = nblocks;
4704 + mi.fordblks = avail;
4705 + mi.uordblks = av->sbrked_mem - avail;
4706 + mi.arena = av->sbrked_mem;
4707 + mi.hblks = av->n_mmaps;
4708 + mi.hblkhd = av->mmapped_mem;
4709 + mi.fsmblks = fastavail;
4710 + mi.keepcost = chunksize(av->top);
4711 + mi.usmblks = av->max_total_mem;
4712 + return mi;
4716 + ------------------------------ malloc_stats ------------------------------
4719 +void mSTATs()
4721 + struct mallinfo mi = mALLINFo();
4723 +#ifdef WIN32
4725 + unsigned long free, reserved, committed;
4726 + vminfo (&free, &reserved, &committed);
4727 + fprintf(stderr, "free bytes = %10lu\n",
4728 + free);
4729 + fprintf(stderr, "reserved bytes = %10lu\n",
4730 + reserved);
4731 + fprintf(stderr, "committed bytes = %10lu\n",
4732 + committed);
4734 +#endif
4737 + fprintf(stderr, "max system bytes = %10lu\n",
4738 + (unsigned long)(mi.usmblks));
4739 + fprintf(stderr, "system bytes = %10lu\n",
4740 + (unsigned long)(mi.arena + mi.hblkhd));
4741 + fprintf(stderr, "in use bytes = %10lu\n",
4742 + (unsigned long)(mi.uordblks + mi.hblkhd));
4745 +#ifdef WIN32
4747 + unsigned long kernel, user;
4748 + if (cpuinfo (TRUE, &kernel, &user)) {
4749 + fprintf(stderr, "kernel ms = %10lu\n",
4750 + kernel);
4751 + fprintf(stderr, "user ms = %10lu\n",
4752 + user);
4755 +#endif
4760 + ------------------------------ mallopt ------------------------------
4763 +INLINE
4764 +#if __STD_C
4765 +int mALLOPt(int param_number, int value)
4766 +#else
4767 +int mALLOPt(param_number, value) int param_number; int value;
4768 +#endif
4770 + mstate av = get_malloc_state();
4771 + /* Ensure initialization/consolidation */
4772 + malloc_consolidate(av);
4774 + switch(param_number) {
4775 + case M_MXFAST:
4776 + if (value >= 0 && value <= MAX_FAST_SIZE) {
4777 + set_max_fast(av, value);
4778 + return 1;
4780 + else
4781 + return 0;
4783 + case M_TRIM_THRESHOLD:
4784 + av->trim_threshold = value;
4785 + return 1;
4787 + case M_TOP_PAD:
4788 + av->top_pad = value;
4789 + return 1;
4791 + case M_MMAP_THRESHOLD:
4792 + av->mmap_threshold = value;
4793 + return 1;
4795 + case M_MMAP_MAX:
4796 +#if !HAVE_MMAP
4797 + if (value != 0)
4798 + return 0;
4799 +#endif
4800 + av->n_mmaps_max = value;
4801 + return 1;
4803 + default:
4804 + return 0;
4810 + -------------------- Alternative MORECORE functions --------------------
4815 + General Requirements for MORECORE.
4817 + The MORECORE function must have the following properties:
4819 + If MORECORE_CONTIGUOUS is false:
4821 + * MORECORE must allocate in multiples of pagesize. It will
4822 + only be called with arguments that are multiples of pagesize.
4824 + * MORECORE(0) must return an address that is at least
4825 + MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
4827 + else (i.e. If MORECORE_CONTIGUOUS is true):
4829 + * Consecutive calls to MORECORE with positive arguments
4830 + return increasing addresses, indicating that space has been
4831 + contiguously extended.
4833 + * MORECORE need not allocate in multiples of pagesize.
4834 + Calls to MORECORE need not have args of multiples of pagesize.
4836 + * MORECORE need not page-align.
4838 + In either case:
4840 + * MORECORE may allocate more memory than requested. (Or even less,
4841 + but this will generally result in a malloc failure.)
4843 + * MORECORE must not allocate memory when given argument zero, but
4844 + instead return one past the end address of memory from previous
4845 + nonzero call. This malloc does NOT call MORECORE(0)
4846 + until at least one call with positive arguments is made, so
4847 + the initial value returned is not important.
4849 + * Even though consecutive calls to MORECORE need not return contiguous
4850 + addresses, it must be OK for malloc'ed chunks to span multiple
4851 + regions in those cases where they do happen to be contiguous.
4853 + * MORECORE need not handle negative arguments -- it may instead
4854 + just return MORECORE_FAILURE when given negative arguments.
4855 + Negative arguments are always multiples of pagesize. MORECORE
4856 + must not misinterpret negative args as large positive unsigned
4857 + args. You can suppress all such calls from even occurring by defining
4858 + MORECORE_CANNOT_TRIM,
4860 + There is some variation across systems about the type of the
4861 + argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
4862 + actually be size_t, because sbrk supports negative args, so it is
4863 + normally the signed type of the same width as size_t (sometimes
4864 + declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
4865 + matter though. Internally, we use "long" as arguments, which should
4866 + work across all reasonable possibilities.
4868 + Additionally, if MORECORE ever returns failure for a positive
4869 + request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
4870 + system allocator. This is a useful backup strategy for systems with
4871 + holes in address spaces -- in this case sbrk cannot contiguously
4872 + expand the heap, but mmap may be able to map noncontiguous space.
4874 + If you'd like mmap to ALWAYS be used, you can define MORECORE to be
4875 + a function that always returns MORECORE_FAILURE.
4877 + If you are using this malloc with something other than sbrk (or its
4878 + emulation) to supply memory regions, you probably want to set
4879 + MORECORE_CONTIGUOUS as false. As an example, here is a custom
4880 + allocator kindly contributed for pre-OSX macOS. It uses virtually
4881 + but not necessarily physically contiguous non-paged memory (locked
4882 + in, present and won't get swapped out). You can use it by
4883 + uncommenting this section, adding some #includes, and setting up the
4884 + appropriate defines above:
4886 + #define MORECORE osMoreCore
4887 + #define MORECORE_CONTIGUOUS 0
4889 + There is also a shutdown routine that should somehow be called for
4890 + cleanup upon program exit.
4892 + #define MAX_POOL_ENTRIES 100
4893 + #define MINIMUM_MORECORE_SIZE (64 * 1024)
4894 + static int next_os_pool;
4895 + void *our_os_pools[MAX_POOL_ENTRIES];
4897 + void *osMoreCore(int size)
4899 + void *ptr = 0;
4900 + static void *sbrk_top = 0;
4902 + if (size > 0)
4904 + if (size < MINIMUM_MORECORE_SIZE)
4905 + size = MINIMUM_MORECORE_SIZE;
4906 + if (CurrentExecutionLevel() == kTaskLevel)
4907 + ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
4908 + if (ptr == 0)
4910 + return (void *) MORECORE_FAILURE;
4912 + // save ptrs so they can be freed during cleanup
4913 + our_os_pools[next_os_pool] = ptr;
4914 + next_os_pool++;
4915 + ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
4916 + sbrk_top = (char *) ptr + size;
4917 + return ptr;
4919 + else if (size < 0)
4921 + // we don't currently support shrink behavior
4922 + return (void *) MORECORE_FAILURE;
4924 + else
4926 + return sbrk_top;
4930 + // cleanup any allocated memory pools
4931 + // called as last thing before shutting down driver
4933 + void osCleanupMem(void)
4935 + void **ptr;
4937 + for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
4938 + if (*ptr)
4940 + PoolDeallocate(*ptr);
4941 + *ptr = 0;
4949 + --------------------------------------------------------------
4951 + Emulation of sbrk for win32.
4952 + Donated by J. Walter <Walter@GeNeSys-e.de>.
4953 + For additional information about this code, and malloc on Win32, see
4954 + http://www.genesys-e.de/jwalter/
4958 +#ifdef WIN32
4960 +#ifdef _DEBUG
4961 +/* #define TRACE */
4962 +#endif
4964 +/* Support for USE_MALLOC_LOCK */
4965 +#ifdef USE_MALLOC_LOCK
4967 +/* Wait for spin lock */
4968 +static int slwait (int *sl) {
4969 + while (InterlockedCompareExchange ((void **) sl, (void *) 1, (void *) 0) != 0)
4970 + Sleep (0);
4971 + return 0;
4974 +/* Release spin lock */
4975 +static int slrelease (int *sl) {
4976 + InterlockedExchange (sl, 0);
4977 + return 0;
4980 +#ifdef NEEDED
4981 +/* Spin lock for emulation code */
4982 +static int g_sl;
4983 +#endif
4985 +#endif /* USE_MALLOC_LOCK */
4987 +/* getpagesize for windows */
4988 +static long getpagesize (void) {
4989 + static long g_pagesize = 0;
4990 + if (! g_pagesize) {
4991 + SYSTEM_INFO system_info;
4992 + GetSystemInfo (&system_info);
4993 + g_pagesize = system_info.dwPageSize;
4995 + return g_pagesize;
4997 +static long getregionsize (void) {
4998 + static long g_regionsize = 0;
4999 + if (! g_regionsize) {
5000 + SYSTEM_INFO system_info;
5001 + GetSystemInfo (&system_info);
5002 + g_regionsize = system_info.dwAllocationGranularity;
5004 + return g_regionsize;
5007 +/* A region list entry */
5008 +typedef struct _region_list_entry {
5009 + void *top_allocated;
5010 + void *top_committed;
5011 + void *top_reserved;
5012 + long reserve_size;
5013 + struct _region_list_entry *previous;
5014 +} region_list_entry;
5016 +/* Allocate and link a region entry in the region list */
5017 +static int region_list_append (region_list_entry **last, void *base_reserved, long reserve_size) {
5018 + region_list_entry *next = HeapAlloc (GetProcessHeap (), 0, sizeof (region_list_entry));
5019 + if (! next)
5020 + return FALSE;
5021 + next->top_allocated = (char *) base_reserved;
5022 + next->top_committed = (char *) base_reserved;
5023 + next->top_reserved = (char *) base_reserved + reserve_size;
5024 + next->reserve_size = reserve_size;
5025 + next->previous = *last;
5026 + *last = next;
5027 + return TRUE;
5029 +/* Free and unlink the last region entry from the region list */
5030 +static int region_list_remove (region_list_entry **last) {
5031 + region_list_entry *previous = (*last)->previous;
5032 + if (! HeapFree (GetProcessHeap (), sizeof (region_list_entry), *last))
5033 + return FALSE;
5034 + *last = previous;
5035 + return TRUE;
5038 +#define CEIL(size,to) (((size)+(to)-1)&~((to)-1))
5039 +#define FLOOR(size,to) ((size)&~((to)-1))
5041 +#define SBRK_SCALE 0
5042 +/* #define SBRK_SCALE 1 */
5043 +/* #define SBRK_SCALE 2 */
5044 +/* #define SBRK_SCALE 4 */
5046 +/* sbrk for windows */
5047 +static void *sbrk (long size) {
5048 + static long g_pagesize, g_my_pagesize;
5049 + static long g_regionsize, g_my_regionsize;
5050 + static region_list_entry *g_last;
5051 + void *result = (void *) MORECORE_FAILURE;
5052 +#ifdef TRACE
5053 + printf ("sbrk %d\n", size);
5054 +#endif
5055 +#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
5056 + /* Wait for spin lock */
5057 + slwait (&g_sl);
5058 +#endif
5059 + /* First time initialization */
5060 + if (! g_pagesize) {
5061 + g_pagesize = getpagesize ();
5062 + g_my_pagesize = g_pagesize << SBRK_SCALE;
5064 + if (! g_regionsize) {
5065 + g_regionsize = getregionsize ();
5066 + g_my_regionsize = g_regionsize << SBRK_SCALE;
5068 + if (! g_last) {
5069 + if (! region_list_append (&g_last, 0, 0))
5070 + goto sbrk_exit;
5072 + /* Assert invariants */
5073 + assert (g_last);
5074 + assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated &&
5075 + g_last->top_allocated <= g_last->top_committed);
5076 + assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed &&
5077 + g_last->top_committed <= g_last->top_reserved &&
5078 + (unsigned) g_last->top_committed % g_pagesize == 0);
5079 + assert ((unsigned) g_last->top_reserved % g_regionsize == 0);
5080 + assert ((unsigned) g_last->reserve_size % g_regionsize == 0);
5081 + /* Allocation requested? */
5082 + if (size >= 0) {
5083 + /* Allocation size is the requested size */
5084 + long allocate_size = size;
5085 + /* Compute the size to commit */
5086 + long to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
5087 + /* Do we reach the commit limit? */
5088 + if (to_commit > 0) {
5089 + /* Round size to commit */
5090 + long commit_size = CEIL (to_commit, g_my_pagesize);
5091 + /* Compute the size to reserve */
5092 + long to_reserve = (char *) g_last->top_committed + commit_size - (char *) g_last->top_reserved;
5093 + /* Do we reach the reserve limit? */
5094 + if (to_reserve > 0) {
5095 + /* Compute the remaining size to commit in the current region */
5096 + long remaining_commit_size = (char *) g_last->top_reserved - (char *) g_last->top_committed;
5097 + if (remaining_commit_size > 0) {
5098 + /* Assert preconditions */
5099 + assert ((unsigned) g_last->top_committed % g_pagesize == 0);
5100 + assert (0 < remaining_commit_size && remaining_commit_size % g_pagesize == 0); {
5101 + /* Commit this */
5102 + void *base_committed = VirtualAlloc (g_last->top_committed, remaining_commit_size,
5103 + MEM_COMMIT, PAGE_READWRITE);
5104 + /* Check returned pointer for consistency */
5105 + if (base_committed != g_last->top_committed)
5106 + goto sbrk_exit;
5107 + /* Assert postconditions */
5108 + assert ((unsigned) base_committed % g_pagesize == 0);
5109 +#ifdef TRACE
5110 + printf ("Commit %p %d\n", base_committed, remaining_commit_size);
5111 +#endif
5112 + /* Adjust the regions commit top */
5113 + g_last->top_committed = (char *) base_committed + remaining_commit_size;
5115 + } {
5116 + /* Now we are going to search and reserve. */
5117 + int contiguous = -1;
5118 + int found = FALSE;
5119 + MEMORY_BASIC_INFORMATION memory_info;
5120 + void *base_reserved;
5121 + long reserve_size;
5122 + do {
5123 + /* Assume contiguous memory */
5124 + contiguous = TRUE;
5125 + /* Round size to reserve */
5126 + reserve_size = CEIL (to_reserve, g_my_regionsize);
5127 + /* Start with the current region's top */
5128 + memory_info.BaseAddress = g_last->top_reserved;
5129 + /* Assert preconditions */
5130 + assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
5131 + assert (0 < reserve_size && reserve_size % g_regionsize == 0);
5132 + while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) {
5133 + /* Assert postconditions */
5134 + assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
5135 +#ifdef TRACE
5136 + printf ("Query %p %d %s\n", memory_info.BaseAddress, memory_info.RegionSize,
5137 + memory_info.State == MEM_FREE ? "FREE":
5138 + (memory_info.State == MEM_RESERVE ? "RESERVED":
5139 + (memory_info.State == MEM_COMMIT ? "COMMITTED": "?")));
5140 +#endif
5141 + /* Region is free, well aligned and big enough: we are done */
5142 + if (memory_info.State == MEM_FREE &&
5143 + (unsigned) memory_info.BaseAddress % g_regionsize == 0 &&
5144 + memory_info.RegionSize >= (unsigned) reserve_size) {
5145 + found = TRUE;
5146 + break;
5148 + /* From now on we can't get contiguous memory! */
5149 + contiguous = FALSE;
5150 + /* Recompute size to reserve */
5151 + reserve_size = CEIL (allocate_size, g_my_regionsize);
5152 + memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize;
5153 + /* Assert preconditions */
5154 + assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
5155 + assert (0 < reserve_size && reserve_size % g_regionsize == 0);
5157 + /* Search failed? */
5158 + if (! found)
5159 + goto sbrk_exit;
5160 + /* Assert preconditions */
5161 + assert ((unsigned) memory_info.BaseAddress % g_regionsize == 0);
5162 + assert (0 < reserve_size && reserve_size % g_regionsize == 0);
5163 + /* Try to reserve this */
5164 + base_reserved = VirtualAlloc (memory_info.BaseAddress, reserve_size,
5165 + MEM_RESERVE, PAGE_NOACCESS);
5166 + if (! base_reserved) {
5167 + int rc = GetLastError ();
5168 + if (rc != ERROR_INVALID_ADDRESS)
5169 + goto sbrk_exit;
5171 + /* A null pointer signals (hopefully) a race condition with another thread. */
5172 + /* In this case, we try again. */
5173 + } while (! base_reserved);
5174 + /* Check returned pointer for consistency */
5175 + if (memory_info.BaseAddress && base_reserved != memory_info.BaseAddress)
5176 + goto sbrk_exit;
5177 + /* Assert postconditions */
5178 + assert ((unsigned) base_reserved % g_regionsize == 0);
5179 +#ifdef TRACE
5180 + printf ("Reserve %p %d\n", base_reserved, reserve_size);
5181 +#endif
5182 + /* Did we get contiguous memory? */
5183 + if (contiguous) {
5184 + long start_size = (char *) g_last->top_committed - (char *) g_last->top_allocated;
5185 + /* Adjust allocation size */
5186 + allocate_size -= start_size;
5187 + /* Adjust the regions allocation top */
5188 + g_last->top_allocated = g_last->top_committed;
5189 + /* Recompute the size to commit */
5190 + to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
5191 + /* Round size to commit */
5192 + commit_size = CEIL (to_commit, g_my_pagesize);
5194 + /* Append the new region to the list */
5195 + if (! region_list_append (&g_last, base_reserved, reserve_size))
5196 + goto sbrk_exit;
5197 + /* Didn't we get contiguous memory? */
5198 + if (! contiguous) {
5199 + /* Recompute the size to commit */
5200 + to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
5201 + /* Round size to commit */
5202 + commit_size = CEIL (to_commit, g_my_pagesize);
5206 + /* Assert preconditions */
5207 + assert ((unsigned) g_last->top_committed % g_pagesize == 0);
5208 + assert (0 < commit_size && commit_size % g_pagesize == 0); {
5209 + /* Commit this */
5210 + void *base_committed = VirtualAlloc (g_last->top_committed, commit_size,
5211 + MEM_COMMIT, PAGE_READWRITE);
5212 + /* Check returned pointer for consistency */
5213 + if (base_committed != g_last->top_committed)
5214 + goto sbrk_exit;
5215 + /* Assert postconditions */
5216 + assert ((unsigned) base_committed % g_pagesize == 0);
5217 +#ifdef TRACE
5218 + printf ("Commit %p %d\n", base_committed, commit_size);
5219 +#endif
5220 + /* Adjust the regions commit top */
5221 + g_last->top_committed = (char *) base_committed + commit_size;
5224 + /* Adjust the regions allocation top */
5225 + g_last->top_allocated = (char *) g_last->top_allocated + allocate_size;
5226 + result = (char *) g_last->top_allocated - size;
5227 + /* Deallocation requested? */
5228 + } else if (size < 0) {
5229 + long deallocate_size = - size;
5230 + /* As long as we have a region to release */
5231 + while ((char *) g_last->top_allocated - deallocate_size < (char *) g_last->top_reserved - g_last->reserve_size) {
5232 + /* Get the size to release */
5233 + long release_size = g_last->reserve_size;
5234 + /* Get the base address */
5235 + void *base_reserved = (char *) g_last->top_reserved - release_size;
5236 + /* Assert preconditions */
5237 + assert ((unsigned) base_reserved % g_regionsize == 0);
5238 + assert (0 < release_size && release_size % g_regionsize == 0); {
5239 + /* Release this */
5240 + int rc = VirtualFree (base_reserved, 0,
5241 + MEM_RELEASE);
5242 + /* Check returned code for consistency */
5243 + if (! rc)
5244 + goto sbrk_exit;
5245 +#ifdef TRACE
5246 + printf ("Release %p %d\n", base_reserved, release_size);
5247 +#endif
5249 + /* Adjust deallocation size */
5250 + deallocate_size -= (char *) g_last->top_allocated - (char *) base_reserved;
5251 + /* Remove the old region from the list */
5252 + if (! region_list_remove (&g_last))
5253 + goto sbrk_exit;
5254 + } {
5255 + /* Compute the size to decommit */
5256 + long to_decommit = (char *) g_last->top_committed - ((char *) g_last->top_allocated - deallocate_size);
5257 + if (to_decommit >= g_my_pagesize) {
5258 + /* Compute the size to decommit */
5259 + long decommit_size = FLOOR (to_decommit, g_my_pagesize);
5260 + /* Compute the base address */
5261 + void *base_committed = (char *) g_last->top_committed - decommit_size;
5262 + /* Assert preconditions */
5263 + assert ((unsigned) base_committed % g_pagesize == 0);
5264 + assert (0 < decommit_size && decommit_size % g_pagesize == 0); {
5265 + /* Decommit this */
5266 + int rc = VirtualFree ((char *) base_committed, decommit_size,
5267 + MEM_DECOMMIT);
5268 + /* Check returned code for consistency */
5269 + if (! rc)
5270 + goto sbrk_exit;
5271 +#ifdef TRACE
5272 + printf ("Decommit %p %d\n", base_committed, decommit_size);
5273 +#endif
5275 + /* Adjust deallocation size and regions commit and allocate top */
5276 + deallocate_size -= (char *) g_last->top_allocated - (char *) base_committed;
5277 + g_last->top_committed = base_committed;
5278 + g_last->top_allocated = base_committed;
5281 + /* Adjust regions allocate top */
5282 + g_last->top_allocated = (char *) g_last->top_allocated - deallocate_size;
5283 + /* Check for underflow */
5284 + if ((char *) g_last->top_reserved - g_last->reserve_size > (char *) g_last->top_allocated ||
5285 + g_last->top_allocated > g_last->top_committed) {
5286 + /* Adjust regions allocate top */
5287 + g_last->top_allocated = (char *) g_last->top_reserved - g_last->reserve_size;
5288 + goto sbrk_exit;
5290 + result = g_last->top_allocated;
5292 + /* Assert invariants */
5293 + assert (g_last);
5294 + assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated &&
5295 + g_last->top_allocated <= g_last->top_committed);
5296 + assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed &&
5297 + g_last->top_committed <= g_last->top_reserved &&
5298 + (unsigned) g_last->top_committed % g_pagesize == 0);
5299 + assert ((unsigned) g_last->top_reserved % g_regionsize == 0);
5300 + assert ((unsigned) g_last->reserve_size % g_regionsize == 0);
5302 +sbrk_exit:
5303 +#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
5304 + /* Release spin lock */
5305 + slrelease (&g_sl);
5306 +#endif
5307 + return result;
5310 +/* mmap for windows */
5311 +static void *mmap (void *ptr, long size, long prot, long type, long handle, long arg) {
5312 + static long g_pagesize;
5313 + static long g_regionsize;
5314 +#ifdef TRACE
5315 + printf ("mmap %d\n", size);
5316 +#endif
5317 +#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
5318 + /* Wait for spin lock */
5319 + slwait (&g_sl);
5320 +#endif
5321 + /* First time initialization */
5322 + if (! g_pagesize)
5323 + g_pagesize = getpagesize ();
5324 + if (! g_regionsize)
5325 + g_regionsize = getregionsize ();
5326 + /* Assert preconditions */
5327 + assert ((unsigned) ptr % g_regionsize == 0);
5328 + assert (size % g_pagesize == 0);
5329 + /* Allocate this */
5330 + ptr = VirtualAlloc (ptr, size,
5331 + MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, PAGE_READWRITE);
5332 + if (! ptr) {
5333 + ptr = (void *) MORECORE_FAILURE;
5334 + goto mmap_exit;
5336 + /* Assert postconditions */
5337 + assert ((unsigned) ptr % g_regionsize == 0);
5338 +#ifdef TRACE
5339 + printf ("Commit %p %d\n", ptr, size);
5340 +#endif
5341 +mmap_exit:
5342 +#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
5343 + /* Release spin lock */
5344 + slrelease (&g_sl);
5345 +#endif
5346 + return ptr;
5349 +/* munmap for windows */
5350 +static long munmap (void *ptr, long size) {
5351 + static long g_pagesize;
5352 + static long g_regionsize;
5353 + int rc = MUNMAP_FAILURE;
5354 +#ifdef TRACE
5355 + printf ("munmap %p %d\n", ptr, size);
5356 +#endif
5357 +#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
5358 + /* Wait for spin lock */
5359 + slwait (&g_sl);
5360 +#endif
5361 + /* First time initialization */
5362 + if (! g_pagesize)
5363 + g_pagesize = getpagesize ();
5364 + if (! g_regionsize)
5365 + g_regionsize = getregionsize ();
5366 + /* Assert preconditions */
5367 + assert ((unsigned) ptr % g_regionsize == 0);
5368 + assert (size % g_pagesize == 0);
5369 + /* Free this */
5370 + if (! VirtualFree (ptr, 0,
5371 + MEM_RELEASE))
5372 + goto munmap_exit;
5373 + rc = 0;
5374 +#ifdef TRACE
5375 + printf ("Release %p %d\n", ptr, size);
5376 +#endif
5377 +munmap_exit:
5378 +#if defined (USE_MALLOC_LOCK) && defined (NEEDED)
5379 + /* Release spin lock */
5380 + slrelease (&g_sl);
5381 +#endif
5382 + return rc;
5385 +static void vminfo (unsigned long *free, unsigned long *reserved, unsigned long *committed) {
5386 + MEMORY_BASIC_INFORMATION memory_info;
5387 + memory_info.BaseAddress = 0;
5388 + *free = *reserved = *committed = 0;
5389 + while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) {
5390 + switch (memory_info.State) {
5391 + case MEM_FREE:
5392 + *free += memory_info.RegionSize;
5393 + break;
5394 + case MEM_RESERVE:
5395 + *reserved += memory_info.RegionSize;
5396 + break;
5397 + case MEM_COMMIT:
5398 + *committed += memory_info.RegionSize;
5399 + break;
5401 + memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize;
5405 +static int cpuinfo (int whole, unsigned long *kernel, unsigned long *user) {
5406 + if (whole) {
5407 + __int64 creation64, exit64, kernel64, user64;
5408 + int rc = GetProcessTimes (GetCurrentProcess (),
5409 + (FILETIME *) &creation64,
5410 + (FILETIME *) &exit64,
5411 + (FILETIME *) &kernel64,
5412 + (FILETIME *) &user64);
5413 + if (! rc) {
5414 + *kernel = 0;
5415 + *user = 0;
5416 + return FALSE;
5418 + *kernel = (unsigned long) (kernel64 / 10000);
5419 + *user = (unsigned long) (user64 / 10000);
5420 + return TRUE;
5421 + } else {
5422 + __int64 creation64, exit64, kernel64, user64;
5423 + int rc = GetThreadTimes (GetCurrentThread (),
5424 + (FILETIME *) &creation64,
5425 + (FILETIME *) &exit64,
5426 + (FILETIME *) &kernel64,
5427 + (FILETIME *) &user64);
5428 + if (! rc) {
5429 + *kernel = 0;
5430 + *user = 0;
5431 + return FALSE;
5433 + *kernel = (unsigned long) (kernel64 / 10000);
5434 + *user = (unsigned long) (user64 / 10000);
5435 + return TRUE;
5439 +#endif /* WIN32 */
5441 +/* ------------------------------------------------------------
5442 +History:
5444 + V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
5445 + * Introduce independent_comalloc and independent_calloc.
5446 + Thanks to Michael Pachos for motivation and help.
5447 + * Make optional .h file available
5448 + * Allow > 2GB requests on 32bit systems.
5449 + * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
5450 + Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
5451 + and Anonymous.
5452 + * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
5453 + helping test this.)
5454 + * memalign: check alignment arg
5455 + * realloc: don't try to shift chunks backwards, since this
5456 + leads to more fragmentation in some programs and doesn't
5457 + seem to help in any others.
5458 + * Collect all cases in malloc requiring system memory into sYSMALLOc
5459 + * Use mmap as backup to sbrk
5460 + * Place all internal state in malloc_state
5461 + * Introduce fastbins (although similar to 2.5.1)
5462 + * Many minor tunings and cosmetic improvements
5463 + * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
5464 + * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
5465 + Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
5466 + * Include errno.h to support default failure action.
5468 + V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
5469 + * return null for negative arguments
5470 + * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
5471 + * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
5472 + (e.g. WIN32 platforms)
5473 + * Cleanup header file inclusion for WIN32 platforms
5474 + * Cleanup code to avoid Microsoft Visual C++ compiler complaints
5475 + * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
5476 + memory allocation routines
5477 + * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
5478 + * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
5479 + usage of 'assert' in non-WIN32 code
5480 + * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
5481 + avoid infinite loop
5482 + * Always call 'fREe()' rather than 'free()'
5484 + V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
5485 + * Fixed ordering problem with boundary-stamping
5487 + V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
5488 + * Added pvalloc, as recommended by H.J. Liu
5489 + * Added 64bit pointer support mainly from Wolfram Gloger
5490 + * Added anonymously donated WIN32 sbrk emulation
5491 + * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
5492 + * malloc_extend_top: fix mask error that caused wastage after
5493 + foreign sbrks
5494 + * Add linux mremap support code from HJ Liu
5496 + V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
5497 + * Integrated most documentation with the code.
5498 + * Add support for mmap, with help from
5499 + Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5500 + * Use last_remainder in more cases.
5501 + * Pack bins using idea from colin@nyx10.cs.du.edu
5502 + * Use ordered bins instead of best-fit threshold
5503 + * Eliminate block-local decls to simplify tracing and debugging.
5504 + * Support another case of realloc via move into top
5505 + * Fix error occurring when initial sbrk_base not word-aligned.
5506 + * Rely on page size for units instead of SBRK_UNIT to
5507 + avoid surprises about sbrk alignment conventions.
5508 + * Add mallinfo, mallopt. Thanks to Raymond Nijssen
5509 + (raymond@es.ele.tue.nl) for the suggestion.
5510 + * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
5511 + * More precautions for cases where other routines call sbrk,
5512 + courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5513 + * Added macros etc., allowing use in linux libc from
5514 + H.J. Lu (hjl@gnu.ai.mit.edu)
5515 + * Inverted this history list
5517 + V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
5518 + * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
5519 + * Removed all preallocation code since under current scheme
5520 + the work required to undo bad preallocations exceeds
5521 + the work saved in good cases for most test programs.
5522 + * No longer use return list or unconsolidated bins since
5523 + no scheme using them consistently outperforms those that don't
5524 + given above changes.
5525 + * Use best fit for very large chunks to prevent some worst-cases.
5526 + * Added some support for debugging
5528 + V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
5529 + * Removed footers when chunks are in use. Thanks to
5530 + Paul Wilson (wilson@cs.texas.edu) for the suggestion.
5532 + V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
5533 + * Added malloc_trim, with help from Wolfram Gloger
5534 + (wmglo@Dent.MED.Uni-Muenchen.DE).
5536 + V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
5538 + V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
5539 + * realloc: try to expand in both directions
5540 + * malloc: swap order of clean-bin strategy;
5541 + * realloc: only conditionally expand backwards
5542 + * Try not to scavenge used bins
5543 + * Use bin counts as a guide to preallocation
5544 + * Occasionally bin return list chunks in first scan
5545 + * Add a few optimizations from colin@nyx10.cs.du.edu
5547 + V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
5548 + * faster bin computation & slightly different binning
5549 + * merged all consolidations to one part of malloc proper
5550 + (eliminating old malloc_find_space & malloc_clean_bin)
5551 + * Scan 2 returns chunks (not just 1)
5552 + * Propagate failure in realloc if malloc returns 0
5553 + * Add stuff to allow compilation on non-ANSI compilers
5554 + from kpv@research.att.com
5556 + V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
5557 + * removed potential for odd address access in prev_chunk
5558 + * removed dependency on getpagesize.h
5559 + * misc cosmetics and a bit more internal documentation
5560 + * anticosmetics: mangled names in macros to evade debugger strangeness
5561 + * tested on sparc, hp-700, dec-mips, rs6000
5562 + with gcc & native cc (hp, dec only) allowing
5563 + Detlefs & Zorn comparison study (in SIGPLAN Notices.)
5565 + Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
5566 + * Based loosely on libg++-1.2X malloc. (It retains some of the overall
5567 + structure of old version, but most details differ.)
5571 +#ifdef USE_PUBLIC_MALLOC_WRAPPERS
5573 +#ifndef KDE_MALLOC_FULL
5575 +#ifdef KDE_MALLOC_GLIBC
5576 +#include "glibc.h"
5577 +#else
5578 +/* cannot use dlsym(RTLD_NEXT,...) here, it calls malloc()*/
5579 +#error Unknown libc
5580 +#endif
5582 +/* 0 - uninitialized
5583 + 1 - this malloc
5584 + 2 - standard libc malloc*/
5585 +extern char* getenv(const char*);
5586 +static int malloc_type = 0;
5587 +static void init_malloc_type(void)
5589 + const char* const env = getenv( "KDE_MALLOC" );
5590 + if( env == NULL )
5591 + malloc_type = 1;
5592 + else if( env[ 0 ] == '0' || env[ 0 ] == 'n' || env[ 0 ] == 'N' )
5593 + malloc_type = 2;
5594 + else
5595 + malloc_type = 1;
5598 +#endif
5600 +Void_t* public_mALLOc(size_t bytes) {
5601 +#ifndef KDE_MALLOC_FULL
5602 + if( malloc_type == 1 )
5604 +#endif
5605 + Void_t* m;
5606 + if (MALLOC_PREACTION != 0) {
5607 + return 0;
5609 + m = mALLOc(bytes);
5610 + if (MALLOC_POSTACTION != 0) {
5612 + return m;
5613 +#ifndef KDE_MALLOC_FULL
5615 + if( malloc_type == 2 )
5616 + return libc_malloc( bytes );
5617 + init_malloc_type();
5618 + return public_mALLOc( bytes );
5619 +#endif
5622 +void public_fREe(Void_t* m) {
5623 +#ifndef KDE_MALLOC_FULL
5624 + if( malloc_type == 1 )
5626 +#endif
5627 + if (MALLOC_PREACTION != 0) {
5628 + return;
5630 + fREe(m);
5631 + if (MALLOC_POSTACTION != 0) {
5633 +#ifndef KDE_MALLOC_FULL
5634 + return;
5636 + if( malloc_type == 2 )
5638 + libc_free( m );
5639 + return;
5641 + init_malloc_type();
5642 + public_fREe( m );
5643 +#endif
5646 +Void_t* public_rEALLOc(Void_t* m, size_t bytes) {
5647 +#ifndef KDE_MALLOC_FULL
5648 + if( malloc_type == 1 )
5650 +#endif
5651 + if (MALLOC_PREACTION != 0) {
5652 + return 0;
5654 + m = rEALLOc(m, bytes);
5655 + if (MALLOC_POSTACTION != 0) {
5657 + return m;
5658 +#ifndef KDE_MALLOC_FULL
5660 + if( malloc_type == 2 )
5661 + return libc_realloc( m, bytes );
5662 + init_malloc_type();
5663 + return public_rEALLOc( m, bytes );
5664 +#endif
5667 +Void_t* public_mEMALIGn(size_t alignment, size_t bytes) {
5668 +#ifndef KDE_MALLOC_FULL
5669 + if( malloc_type == 1 )
5671 +#endif
5672 + Void_t* m;
5673 + if (MALLOC_PREACTION != 0) {
5674 + return 0;
5676 + m = mEMALIGn(alignment, bytes);
5677 + if (MALLOC_POSTACTION != 0) {
5679 + return m;
5680 +#ifndef KDE_MALLOC_FULL
5682 + if( malloc_type == 2 )
5683 + return libc_memalign( alignment, bytes );
5684 + init_malloc_type();
5685 + return public_mEMALIGn( alignment, bytes );
5686 +#endif
5689 +Void_t* public_vALLOc(size_t bytes) {
5690 +#ifndef KDE_MALLOC_FULL
5691 + if( malloc_type == 1 )
5693 +#endif
5694 + Void_t* m;
5695 + if (MALLOC_PREACTION != 0) {
5696 + return 0;
5698 + m = vALLOc(bytes);
5699 + if (MALLOC_POSTACTION != 0) {
5701 + return m;
5702 +#ifndef KDE_MALLOC_FULL
5704 + if( malloc_type == 2 )
5705 + return libc_valloc( bytes );
5706 + init_malloc_type();
5707 + return public_vALLOc( bytes );
5708 +#endif
5711 +Void_t* public_pVALLOc(size_t bytes) {
5712 +#ifndef KDE_MALLOC_FULL
5713 + if( malloc_type == 1 )
5715 +#endif
5716 + Void_t* m;
5717 + if (MALLOC_PREACTION != 0) {
5718 + return 0;
5720 + m = pVALLOc(bytes);
5721 + if (MALLOC_POSTACTION != 0) {
5723 + return m;
5724 +#ifndef KDE_MALLOC_FULL
5726 + if( malloc_type == 2 )
5727 + return libc_pvalloc( bytes );
5728 + init_malloc_type();
5729 + return public_pVALLOc( bytes );
5730 +#endif
5733 +Void_t* public_cALLOc(size_t n, size_t elem_size) {
5734 +#ifndef KDE_MALLOC_FULL
5735 + if( malloc_type == 1 )
5737 +#endif
5738 + Void_t* m;
5739 + if (MALLOC_PREACTION != 0) {
5740 + return 0;
5742 + m = cALLOc(n, elem_size);
5743 + if (MALLOC_POSTACTION != 0) {
5745 + return m;
5746 +#ifndef KDE_MALLOC_FULL
5748 + if( malloc_type == 2 )
5749 + return libc_calloc( n, elem_size );
5750 + init_malloc_type();
5751 + return public_cALLOc( n, elem_size );
5752 +#endif
5755 +void public_cFREe(Void_t* m) {
5756 +#ifndef KDE_MALLOC_FULL
5757 + if( malloc_type == 1 )
5759 +#endif
5760 + if (MALLOC_PREACTION != 0) {
5761 + return;
5763 + cFREe(m);
5764 + if (MALLOC_POSTACTION != 0) {
5766 +#ifndef KDE_MALLOC_FULL
5767 + return;
5769 + if( malloc_type == 2 )
5771 + libc_cfree( m );
5772 + return;
5774 + init_malloc_type();
5775 + public_cFREe( m );
5776 +#endif
5779 +struct mallinfo public_mALLINFo() {
5780 +#ifndef KDE_MALLOC_FULL
5781 + if( malloc_type == 1 )
5783 +#endif
5784 + struct mallinfo m;
5785 + if (MALLOC_PREACTION != 0) {
5786 + struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
5787 + return nm;
5789 + m = mALLINFo();
5790 + if (MALLOC_POSTACTION != 0) {
5792 + return m;
5793 +#ifndef KDE_MALLOC_FULL
5795 + if( malloc_type == 2 )
5796 + return libc_mallinfo();
5797 + init_malloc_type();
5798 + return public_mALLINFo();
5799 +#endif
5802 +int public_mALLOPt(int p, int v) {
5803 +#ifndef KDE_MALLOC_FULL
5804 + if( malloc_type == 1 )
5806 +#endif
5807 + int result;
5808 + if (MALLOC_PREACTION != 0) {
5809 + return 0;
5811 + result = mALLOPt(p, v);
5812 + if (MALLOC_POSTACTION != 0) {
5814 + return result;
5815 +#ifndef KDE_MALLOC_FULL
5817 + if( malloc_type == 2 )
5818 + return libc_mallopt( p, v );
5819 + init_malloc_type();
5820 + return public_mALLOPt( p, v );
5821 +#endif
5823 +#endif
5825 +int
5826 +posix_memalign (void **memptr, size_t alignment, size_t size)
5828 + void *mem;
5830 + /* Test whether the SIZE argument is valid. It must be a power of
5831 + two multiple of sizeof (void *). */
5832 + if (size % sizeof (void *) != 0 || (size & (size - 1)) != 0)
5833 + return EINVAL;
5835 + mem = memalign (alignment, size);
5837 + if (mem != NULL) {
5838 + *memptr = mem;
5839 + return 0;
5842 + return ENOMEM;
5845 +#else
5846 +/* Some linkers (Solaris 2.6) don't like empty archives, but for
5847 + easier Makefile's we want to link against libklmalloc.la every time,
5848 + so simply make it non-empty. */
5849 +void kde_malloc_dummy_function ()
5851 + return;
5853 +#endif
5854 diff -Nupr a/src/corelib/arch/avr32/qatomic.cpp b/src/corelib/arch/avr32/qatomic.cpp
5855 --- a/src/corelib/arch/avr32/qatomic.cpp 1970-01-01 01:00:00.000000000 +0100
5856 +++ b/src/corelib/arch/avr32/qatomic.cpp 2006-07-26 11:02:43.000000000 +0200
5857 @@ -0,0 +1,24 @@
5858 +/****************************************************************************
5860 +** Copyright (C) 1992-2006 Trolltech ASA. All rights reserved.
5862 +** This file is part of the QtCore module of the Qt Toolkit.
5864 +** Licensees holding valid Qt Preview licenses may use this file in
5865 +** accordance with the Qt Preview License Agreement provided with the
5866 +** Software.
5868 +** See http://www.trolltech.com/pricing.html or email sales@trolltech.com for
5869 +** information about Qt Commercial License Agreements.
5871 +** Contact info@trolltech.com if any conditions of this licensing are
5872 +** not clear to you.
5874 +** This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
5875 +** WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
5877 +****************************************************************************/
5879 +#include "QtCore/qatomic_avr32.h"
5881 +Q_CORE_EXPORT long q_atomic_lock = 0;
5882 diff -Nupr a/src/corelib/arch/qatomic_arch.h b/src/corelib/arch/qatomic_arch.h
5883 --- a/src/corelib/arch/qatomic_arch.h 2006-06-30 09:49:44.000000000 +0200
5884 +++ b/src/corelib/arch/qatomic_arch.h 2006-07-27 12:42:58.000000000 +0200
5885 @@ -47,6 +47,8 @@ QT_BEGIN_HEADER
5886 # include "QtCore/qatomic_alpha.h"
5887 #elif defined(QT_ARCH_ARM)
5888 # include "QtCore/qatomic_arm.h"
5889 +#elif defined(QT_ARCH_AVR32)
5890 +# include "QtCore/qatomic_avr32.h"
5891 #elif defined(QT_ARCH_BOUNDSCHECKER)
5892 # include "QtCore/qatomic_boundschecker.h"
5893 #elif defined(QT_ARCH_GENERIC)
5894 diff -Nupr a/src/corelib/arch/qatomic_avr32.h b/src/corelib/arch/qatomic_avr32.h
5895 --- a/src/corelib/arch/qatomic_avr32.h 1970-01-01 01:00:00.000000000 +0100
5896 +++ b/src/corelib/arch/qatomic_avr32.h 2006-07-28 10:30:08.000000000 +0200
5897 @@ -0,0 +1,132 @@
5898 +/****************************************************************************
5900 +** Copyright (C) 1992-2006 Trolltech ASA. All rights reserved.
5902 +** This file is part of the QtCore module of the Qt Toolkit.
5904 +** Licensees holding valid Qt Preview licenses may use this file in
5905 +** accordance with the Qt Preview License Agreement provided with the
5906 +** Software.
5908 +** See http://www.trolltech.com/pricing.html or email sales@trolltech.com for
5909 +** information about Qt Commercial License Agreements.
5911 +** Contact info@trolltech.com if any conditions of this licensing are
5912 +** not clear to you.
5914 +** This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
5915 +** WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
5917 +****************************************************************************/
5919 +#ifndef AVR32_QATOMIC_H
5920 +#define AVR32_QATOMIC_H
5922 +#include <QtCore/qglobal.h>
5924 +QT_BEGIN_HEADER
5926 +extern Q_CORE_EXPORT long q_atomic_lock;
5928 +inline long q_atomic_swp(volatile long *ptr, long newval)
5930 + register int ret;
5931 + asm volatile("xchg %0,%1,%2"
5932 + : "=&r"(ret)
5933 + : "r"(ptr), "r"(newval)
5934 + : "memory", "cc");
5935 + return ret;
5938 +inline int q_atomic_test_and_set_int(volatile int *ptr, int expected, int newval)
5940 + int ret = 0;
5941 + while (q_atomic_swp(&q_atomic_lock, ~0) != 0);
5942 + if (*ptr == expected) {
5943 + *ptr = newval;
5944 + ret = 1;
5946 + q_atomic_swp(&q_atomic_lock, 0);
5947 + return ret;
5950 +inline int q_atomic_test_and_set_acquire_int(volatile int *ptr, int expected, int newval)
5952 + return q_atomic_test_and_set_int(ptr, expected, newval);
5955 +inline int q_atomic_test_and_set_release_int(volatile int *ptr, int expected, int newval)
5957 + return q_atomic_test_and_set_int(ptr, expected, newval);
5960 +inline int q_atomic_test_and_set_ptr(volatile void *ptr, void *expected, void *newval)
5962 + int ret = 0;
5963 + while (q_atomic_swp(&q_atomic_lock, ~0) != 0) ;
5964 + if (*reinterpret_cast<void * volatile *>(ptr) == expected) {
5965 + *reinterpret_cast<void * volatile *>(ptr) = newval;
5966 + ret = 1;
5968 + q_atomic_swp(&q_atomic_lock, 0);
5969 + return ret;
5972 +inline int q_atomic_increment(volatile int *ptr)
5974 + while (q_atomic_swp(&q_atomic_lock, ~0) != 0) ;
5975 + int originalValue = *ptr;
5976 + *ptr = originalValue + 1;
5977 + q_atomic_swp(&q_atomic_lock, 0);
5978 + return originalValue != -1;
5981 +inline int q_atomic_decrement(volatile int *ptr)
5983 + while (q_atomic_swp(&q_atomic_lock, ~0) != 0) ;
5984 + int originalValue = *ptr;
5985 + *ptr = originalValue - 1;
5986 + q_atomic_swp(&q_atomic_lock, 0);
5987 + return originalValue != 1;
5990 +inline int q_atomic_set_int(volatile int *ptr, int newval)
5992 + while (q_atomic_swp(&q_atomic_lock, ~0) != 0) ;
5993 + int originalValue = *ptr;
5994 + *ptr = newval;
5995 + q_atomic_swp(&q_atomic_lock, 0);
5996 + return originalValue;
5999 +inline void *q_atomic_set_ptr(volatile void *ptr, void *newval)
6001 + while (q_atomic_swp(&q_atomic_lock, ~0) != 0) ;
6002 + void *originalValue = *reinterpret_cast<void * volatile *>(ptr);
6003 + *reinterpret_cast<void * volatile *>(ptr) = newval;
6004 + q_atomic_swp(&q_atomic_lock, 0);
6005 + return originalValue;
6008 +inline int q_atomic_fetch_and_add_int(volatile int *ptr, int value)
6010 + while (q_atomic_swp(&q_atomic_lock, ~0) != 0) ;
6011 + int originalValue = *ptr;
6012 + *ptr += value;
6013 + q_atomic_swp(&q_atomic_lock, 0);
6014 + return originalValue;
6017 +inline int q_atomic_fetch_and_add_acquire_int(volatile int *ptr, int value)
6019 + return q_atomic_fetch_and_add_int(ptr, value);
6022 +inline int q_atomic_fetch_and_add_release_int(volatile int *ptr, int value)
6024 + return q_atomic_fetch_and_add_int(ptr, value);
6027 +QT_END_HEADER
6029 +#endif // AVR32_QATOMIC_H
6030 diff -Nupr a/src/corelib/io/qfilesystemwatcher_inotify.cpp b/src/corelib/io/qfilesystemwatcher_inotify.cpp
6031 --- a/src/corelib/io/qfilesystemwatcher_inotify.cpp 2006-06-30 09:49:45.000000000 +0200
6032 +++ b/src/corelib/io/qfilesystemwatcher_inotify.cpp 2006-07-27 13:24:27.000000000 +0200
6033 @@ -87,6 +87,10 @@
6034 # define __NR_inotify_init 316
6035 # define __NR_inotify_add_watch 317
6036 # define __NR_inotify_rm_watch 318
6037 +#elif defined (__avr32__)
6038 +# define __NR_inotify_init 240
6039 +# define __NR_inotify_add_watch 241
6040 +# define __NR_inotify_rm_watch 242
6041 #elif defined (__SH4__)
6042 # define __NR_inotify_init 290
6043 # define __NR_inotify_add_watch 291
6044 diff -uprN a/mkspecs/qws/linux-avr32-g++/qmake.conf b/mkspecs/qws/linux-avr32-g++/qmake.conf
6045 --- a/mkspecs/qws/linux-avr32-g++/qmake.conf 1970-01-01 01:00:00.000000000 +0100
6046 +++ b/mkspecs/qws/linux-avr32-g++/qmake.conf 2006-08-01 08:47:12.000000000 +0200
6047 @@ -0,0 +1,85 @@
6049 +# qmake configuration for linux-g++ using the avr32-linux-g++ crosscompiler
6052 +MAKEFILE_GENERATOR = UNIX
6053 +TEMPLATE = app
6054 +CONFIG += qt warn_on release link_prl
6055 +QT += core gui network
6056 +QMAKE_INCREMENTAL_STYLE = sublib
6058 +QMAKE_CC = avr32-linux-gcc
6059 +QMAKE_LEX = flex
6060 +QMAKE_LEXFLAGS =
6061 +QMAKE_YACC = yacc
6062 +QMAKE_YACCFLAGS = -d
6063 +QMAKE_CFLAGS = -pipe
6064 +QMAKE_CFLAGS_WARN_ON = -Wall -W
6065 +QMAKE_CFLAGS_WARN_OFF =
6066 +QMAKE_CFLAGS_RELEASE = -O2
6067 +QMAKE_CFLAGS_DEBUG = -g -O2
6068 +QMAKE_CFLAGS_SHLIB = -fPIC
6069 +QMAKE_CFLAGS_YACC = -Wno-unused -Wno-parentheses
6070 +QMAKE_CFLAGS_THREAD = -D_REENTRANT
6071 +QMAKE_CFLAGS_HIDESYMS = -fvisibility=hidden
6073 +QMAKE_CXX = avr32-linux-g++
6074 +QMAKE_CXXFLAGS = $$QMAKE_CFLAGS -fno-exceptions
6075 +QMAKE_CXXFLAGS_WARN_ON = $$QMAKE_CFLAGS_WARN_ON
6076 +QMAKE_CXXFLAGS_WARN_OFF = $$QMAKE_CFLAGS_WARN_OFF
6077 +QMAKE_CXXFLAGS_RELEASE = $$QMAKE_CFLAGS_RELEASE
6078 +QMAKE_CXXFLAGS_DEBUG = $$QMAKE_CFLAGS_DEBUG
6079 +QMAKE_CXXFLAGS_SHLIB = $$QMAKE_CFLAGS_SHLIB
6080 +QMAKE_CXXFLAGS_YACC = $$QMAKE_CFLAGS_YACC
6081 +QMAKE_CXXFLAGS_THREAD = $$QMAKE_CFLAGS_THREAD
6082 +QMAKE_CXXFLAGS_HIDESYMS = $$QMAKE_CFLAGS_HIDESYMS -fvisibility-inlines-hidden
6084 +QMAKE_INCDIR =
6085 +QMAKE_LIBDIR =
6086 +QMAKE_INCDIR_X11 =
6087 +QMAKE_LIBDIR_X11 =
6088 +QMAKE_INCDIR_QT = $$[QT_INSTALL_HEADERS]
6089 +QMAKE_LIBDIR_QT = $$[QT_INSTALL_LIBS]
6090 +QMAKE_INCDIR_OPENGL =
6091 +QMAKE_LIBDIR_OPENGL =
6092 +QMAKE_INCDIR_QTOPIA = $(QPEDIR)/include
6093 +QMAKE_LIBDIR_QTOPIA = $(QPEDIR)/lib
6095 +QMAKE_LINK = avr32-linux-g++
6096 +QMAKE_LINK_SHLIB = avr32-linux-g++
6097 +QMAKE_LFLAGS =
6098 +QMAKE_LFLAGS_RELEASE =
6099 +QMAKE_LFLAGS_DEBUG =
6100 +QMAKE_LFLAGS_SHLIB = -shared
6101 +QMAKE_LFLAGS_PLUGIN = $$QMAKE_LFLAGS_SHLIB
6102 +QMAKE_LFLAGS_SONAME = -Wl,-soname,
6103 +QMAKE_LFLAGS_THREAD =
6104 +QMAKE_RPATH = -Wl,-rpath,
6106 +QMAKE_LIBS =
6107 +QMAKE_LIBS_DYNLOAD = -ldl
6108 +QMAKE_LIBS_X11 =
6109 +QMAKE_LIBS_X11SM =
6110 +QMAKE_LIBS_QT = -lqte
6111 +QMAKE_LIBS_QT_THREAD = -lqte-mt
6112 +QMAKE_LIBS_QT_OPENGL = -lqgl
6113 +QMAKE_LIBS_QTOPIA = -lqpe -lqtopia
6114 +QMAKE_LIBS_THREAD = -lpthread
6116 +QMAKE_MOC = $$[QT_INSTALL_BINS]/moc
6117 +QMAKE_UIC = $$[QT_INSTALL_BINS]/uic
6119 +QMAKE_AR = avr32-linux-ar cqs
6120 +QMAKE_RANLIB = avr32-linux-ranlib
6122 +QMAKE_TAR = tar -cf
6123 +QMAKE_GZIP = gzip -9f
6125 +QMAKE_COPY = cp -f
6126 +QMAKE_MOVE = mv -f
6127 +QMAKE_DEL_FILE = rm -f
6128 +QMAKE_DEL_DIR = rmdir
6129 +QMAKE_STRIP = avr32-linux-strip
6130 +QMAKE_CHK_DIR_EXISTS = test -d
6131 +QMAKE_MKDIR = mkdir -p
6132 +load(qt_config)
6133 diff -uprN a/mkspecs/qws/linux-avr32-g++/qplatformdefs.h b/mkspecs/qws/linux-avr32-g++/qplatformdefs.h
6134 --- a/mkspecs/qws/linux-avr32-g++/qplatformdefs.h 1970-01-01 01:00:00.000000000 +0100
6135 +++ b/mkspecs/qws/linux-avr32-g++/qplatformdefs.h 2006-07-26 09:16:52.000000000 +0200
6136 @@ -0,0 +1,22 @@
6137 +/****************************************************************************
6139 +** Copyright (C) 1992-2006 Trolltech ASA. All rights reserved.
6141 +** This file is part of the qmake spec of the Qt Toolkit.
6143 +** Licensees holding valid Qt Preview licenses may use this file in
6144 +** accordance with the Qt Preview License Agreement provided with the
6145 +** Software.
6147 +** See http://www.trolltech.com/pricing.html or email sales@trolltech.com for
6148 +** information about Qt Commercial License Agreements.
6150 +** Contact info@trolltech.com if any conditions of this licensing are
6151 +** not clear to you.
6153 +** This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
6154 +** WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
6156 +****************************************************************************/
6158 +#include "../../linux-g++/qplatformdefs.h"
6159 --- a/include/QtCore/headers.pri 2007-08-02 15:03:31.000000000 +0200
6160 +++ b/include/QtCore/headers.pri 2007-08-02 15:03:44.000000000 +0200
6161 @@ -1,2 +1,2 @@
6162 -SYNCQT.HEADER_FILES = ../corelib/io/qdatastream.h ../corelib/io/qdebug.h ../corelib/io/qtextstream.h ../corelib/io/qtemporaryfile.h ../corelib/io/qfsfileengine.h ../corelib/io/qiodevice.h ../corelib/io/qprocess.h ../corelib/io/qresource.h ../corelib/io/qdiriterator.h ../corelib/io/qbuffer.h ../corelib/io/qfilesystemwatcher.h ../corelib/io/qdir.h ../corelib/io/qurl.h ../corelib/io/qabstractfileengine.h ../corelib/io/qfileinfo.h ../corelib/io/qsettings.h ../corelib/io/qfile.h ../corelib/arch/qatomic_arch.h ../corelib/arch/qatomic_i386.h ../corelib/arch/qatomic_sparc.h ../corelib/arch/qatomic_x86_64.h ../corelib/arch/qatomic_ia64.h ../corelib/arch/qatomic_parisc.h ../corelib/arch/qatomic_mips.h ../corelib/arch/qatomic_s390.h ../corelib/arch/qatomic_arm.h ../corelib/arch/qatomic_powerpc.h ../corelib/arch/qatomic_alpha.h ../corelib/arch/qatomic_boundschecker.h ../corelib/arch/qatomic_generic.h ../corelib/tools/qcache.h ../corelib/tools/qline.h ../corelib/tools/qlist.h ../corelib/tools/qpair.h ../corelib/tools/qpoint.h ../corelib/tools/qrect.h ../corelib/tools/qsize.h ../corelib/tools/qstringlist.h ../corelib/tools/qstringmatcher.h ../corelib/tools/qlinkedlist.h ../corelib/tools/qbitarray.h ../corelib/tools/qvector.h ../corelib/tools/qbytearraymatcher.h ../corelib/tools/qqueue.h ../corelib/tools/qbytearray.h ../corelib/tools/qalgorithms.h ../corelib/tools/qvarlengtharray.h ../corelib/tools/qshareddata.h ../corelib/tools/qcryptographichash.h ../corelib/tools/qiterator.h ../corelib/tools/qlocale.h ../corelib/tools/qstack.h ../corelib/tools/qmap.h ../corelib/tools/qset.h ../corelib/tools/qdatetime.h ../corelib/tools/qstring.h ../corelib/tools/qcontainerfwd.h ../corelib/tools/qregexp.h ../corelib/tools/qchar.h ../corelib/tools/qhash.h ../corelib/tools/qtimeline.h ../corelib/codecs/qtextcodecplugin.h ../corelib/codecs/qtextcodec.h ../corelib/global/qconfig-large.h ../corelib/global/qconfig-dist.h ../corelib/global/qconfig-small.h ../corelib/global/qlibraryinfo.h ../corelib/global/qendian.h ../corelib/global/qfeatures.h ../corelib/global/qglobal.h ../corelib/global/qconfig-minimal.h ../corelib/global/qnamespace.h ../corelib/global/qnumeric.h ../corelib/global/qconfig-medium.h ../corelib/kernel/qtranslator.h ../corelib/kernel/qvariant.h ../corelib/kernel/qmimedata.h ../corelib/kernel/qeventloop.h ../corelib/kernel/qcoreapplication.h ../corelib/kernel/qabstractitemmodel.h ../corelib/kernel/qsignalmapper.h ../corelib/kernel/qobjectcleanuphandler.h ../corelib/kernel/qbasictimer.h ../corelib/kernel/qsocketnotifier.h ../corelib/kernel/qobject.h ../corelib/kernel/qtimer.h ../corelib/kernel/qmetatype.h ../corelib/kernel/qabstracteventdispatcher.h ../corelib/kernel/qpointer.h ../corelib/kernel/qmetaobject.h ../corelib/kernel/qcoreevent.h ../corelib/kernel/qobjectdefs.h ../corelib/plugin/qpluginloader.h ../corelib/plugin/quuid.h ../corelib/plugin/qlibrary.h ../corelib/plugin/qplugin.h ../corelib/plugin/qfactoryinterface.h ../corelib/thread/qsemaphore.h ../corelib/thread/qthreadstorage.h ../corelib/thread/qwaitcondition.h ../corelib/thread/qthread.h ../corelib/thread/qmutex.h ../corelib/thread/qreadwritelock.h ../corelib/thread/qatomic.h ../../include/QtCore/QtCore
6163 +SYNCQT.HEADER_FILES = ../corelib/io/qdatastream.h ../corelib/io/qdebug.h ../corelib/io/qtextstream.h ../corelib/io/qtemporaryfile.h ../corelib/io/qfsfileengine.h ../corelib/io/qiodevice.h ../corelib/io/qprocess.h ../corelib/io/qresource.h ../corelib/io/qdiriterator.h ../corelib/io/qbuffer.h ../corelib/io/qfilesystemwatcher.h ../corelib/io/qdir.h ../corelib/io/qurl.h ../corelib/io/qabstractfileengine.h ../corelib/io/qfileinfo.h ../corelib/io/qsettings.h ../corelib/io/qfile.h ../corelib/arch/qatomic_arch.h ../corelib/arch/qatomic_i386.h ../corelib/arch/qatomic_sparc.h ../corelib/arch/qatomic_x86_64.h ../corelib/arch/qatomic_ia64.h ../corelib/arch/qatomic_parisc.h ../corelib/arch/qatomic_mips.h ../corelib/arch/qatomic_s390.h ../corelib/arch/qatomic_arm.h ../corelib/arch/qatomic_avr32.h ../corelib/arch/qatomic_powerpc.h ../corelib/arch/qatomic_alpha.h ../corelib/arch/qatomic_boundschecker.h ../corelib/arch/qatomic_generic.h ../corelib/tools/qcache.h ../corelib/tools/qline.h ../corelib/tools/qlist.h ../corelib/tools/qpair.h ../corelib/tools/qpoint.h ../corelib/tools/qrect.h ../corelib/tools/qsize.h ../corelib/tools/qstringlist.h ../corelib/tools/qstringmatcher.h ../corelib/tools/qlinkedlist.h ../corelib/tools/qbitarray.h ../corelib/tools/qvector.h ../corelib/tools/qbytearraymatcher.h ../corelib/tools/qqueue.h ../corelib/tools/qbytearray.h ../corelib/tools/qalgorithms.h ../corelib/tools/qvarlengtharray.h ../corelib/tools/qshareddata.h ../corelib/tools/qcryptographichash.h ../corelib/tools/qiterator.h ../corelib/tools/qlocale.h ../corelib/tools/qstack.h ../corelib/tools/qmap.h ../corelib/tools/qset.h ../corelib/tools/qdatetime.h ../corelib/tools/qstring.h ../corelib/tools/qcontainerfwd.h ../corelib/tools/qregexp.h ../corelib/tools/qchar.h ../corelib/tools/qhash.h ../corelib/tools/qtimeline.h ../corelib/codecs/qtextcodecplugin.h ../corelib/codecs/qtextcodec.h ../corelib/global/qconfig-large.h ../corelib/global/qconfig-dist.h ../corelib/global/qconfig-small.h ../corelib/global/qlibraryinfo.h ../corelib/global/qendian.h ../corelib/global/qfeatures.h ../corelib/global/qglobal.h ../corelib/global/qconfig-minimal.h ../corelib/global/qnamespace.h ../corelib/global/qnumeric.h ../corelib/global/qconfig-medium.h ../corelib/kernel/qtranslator.h ../corelib/kernel/qvariant.h ../corelib/kernel/qmimedata.h ../corelib/kernel/qeventloop.h ../corelib/kernel/qcoreapplication.h ../corelib/kernel/qabstractitemmodel.h ../corelib/kernel/qsignalmapper.h ../corelib/kernel/qobjectcleanuphandler.h ../corelib/kernel/qbasictimer.h ../corelib/kernel/qsocketnotifier.h ../corelib/kernel/qobject.h ../corelib/kernel/qtimer.h ../corelib/kernel/qmetatype.h ../corelib/kernel/qabstracteventdispatcher.h ../corelib/kernel/qpointer.h ../corelib/kernel/qmetaobject.h ../corelib/kernel/qcoreevent.h ../corelib/kernel/qobjectdefs.h ../corelib/plugin/qpluginloader.h ../corelib/plugin/quuid.h ../corelib/plugin/qlibrary.h ../corelib/plugin/qplugin.h ../corelib/plugin/qfactoryinterface.h ../corelib/thread/qsemaphore.h ../corelib/thread/qthreadstorage.h ../corelib/thread/qwaitcondition.h ../corelib/thread/qthread.h ../corelib/thread/qmutex.h ../corelib/thread/qreadwritelock.h ../corelib/thread/qatomic.h ../../include/QtCore/QtCore
6164 SYNCQT.HEADER_CLASSES = ../../include/QtCore/QDataStream ../../include/QtCore/QtDebug ../../include/QtCore/QDebug ../../include/QtCore/QNoDebug ../../include/QtCore/QTextStream ../../include/QtCore/QTextStreamFunction ../../include/QtCore/QTextStreamManipulator ../../include/QtCore/QTS ../../include/QtCore/QTextIStream ../../include/QtCore/QTextOStream ../../include/QtCore/QTemporaryFile ../../include/QtCore/QFSFileEngine ../../include/QtCore/QIODevice ../../include/QtCore/Q_PID ../../include/QtCore/QProcess ../../include/QtCore/QResource ../../include/QtCore/QDirIterator ../../include/QtCore/QBuffer ../../include/QtCore/QFileSystemWatcher ../../include/QtCore/QDir ../../include/QtCore/QUrl ../../include/QtCore/QAbstractFileEngine ../../include/QtCore/QAbstractFileEngineHandler ../../include/QtCore/QAbstractFileEngineIterator ../../include/QtCore/QFileInfo ../../include/QtCore/QFileInfoList ../../include/QtCore/QFileInfoListIterator ../../include/QtCore/QSettings ../../include/QtCore/QFile ../../include/QtCore/QBasicAtomic ../../include/QtCore/QBasicAtomicPointer ../../include/QtCore/QCache ../../include/QtCore/QLine ../../include/QtCore/QLineF ../../include/QtCore/QListData ../../include/QtCore/QList ../../include/QtCore/QListIterator ../../include/QtCore/QMutableListIterator ../../include/QtCore/QPair ../../include/QtCore/QPoint ../../include/QtCore/QPointF ../../include/QtCore/QRect ../../include/QtCore/QRectF ../../include/QtCore/QSize ../../include/QtCore/QSizeF ../../include/QtCore/QStringListIterator ../../include/QtCore/QMutableStringListIterator ../../include/QtCore/QStringList ../../include/QtCore/QStringMatcher ../../include/QtCore/QLinkedListData ../../include/QtCore/QLinkedListNode ../../include/QtCore/QLinkedList ../../include/QtCore/QLinkedListIterator ../../include/QtCore/QMutableLinkedListIterator ../../include/QtCore/QBitArray ../../include/QtCore/QBitRef ../../include/QtCore/QVectorData ../../include/QtCore/QVectorTypedData ../../include/QtCore/QVector ../../include/QtCore/QVectorIterator ../../include/QtCore/QMutableVectorIterator ../../include/QtCore/QByteArrayMatcher ../../include/QtCore/QQueue ../../include/QtCore/QByteArray ../../include/QtCore/QByteRef ../../include/QtCore/QtAlgorithms ../../include/QtCore/QVarLengthArray ../../include/QtCore/QSharedData ../../include/QtCore/QSharedDataPointer ../../include/QtCore/QExplicitlySharedDataPointer ../../include/QtCore/QCryptographicHash ../../include/QtCore/QLocale ../../include/QtCore/QSystemLocale ../../include/QtCore/QStack ../../include/QtCore/QMapData ../../include/QtCore/QMap ../../include/QtCore/QMultiMap ../../include/QtCore/QMapIterator ../../include/QtCore/QMutableMapIterator ../../include/QtCore/QSet ../../include/QtCore/QSetIterator ../../include/QtCore/QMutableSetIterator ../../include/QtCore/QDate ../../include/QtCore/QTime ../../include/QtCore/QDateTime ../../include/QtCore/QStdWString ../../include/QtCore/QString ../../include/QtCore/QLatin1String ../../include/QtCore/QCharRef ../../include/QtCore/QConstString ../../include/QtCore/QStringRef ../../include/QtCore/QtContainerFwd ../../include/QtCore/QRegExp ../../include/QtCore/QLatin1Char ../../include/QtCore/QChar ../../include/QtCore/QHashData ../../include/QtCore/QHashDummyValue ../../include/QtCore/QHashDummyNode ../../include/QtCore/QHashNode ../../include/QtCore/QHash ../../include/QtCore/QMultiHash ../../include/QtCore/QHashIterator ../../include/QtCore/QMutableHashIterator ../../include/QtCore/QTimeLine ../../include/QtCore/QTextCodecFactoryInterface ../../include/QtCore/QTextCodecPlugin ../../include/QtCore/QTextCodec ../../include/QtCore/QTextEncoder ../../include/QtCore/QTextDecoder ../../include/QtCore/QLibraryInfo ../../include/QtCore/QtEndian ../../include/QtCore/QtGlobal ../../include/QtCore/QUintForSize ../../include/QtCore/QUintForType ../../include/QtCore/QIntForSize ../../include/QtCore/QIntForType ../../include/QtCore/QNoImplicitBoolCast ../../include/QtCore/Q_INT8 ../../include/QtCore/Q_UINT8 ../../include/QtCore/Q_INT16 ../../include/QtCore/Q_UINT16 ../../include/QtCore/Q_INT32 ../../include/QtCore/Q_UINT32 ../../include/QtCore/Q_INT64 ../../include/QtCore/Q_UINT64 ../../include/QtCore/Q_LLONG ../../include/QtCore/Q_ULLONG ../../include/QtCore/Q_LONG ../../include/QtCore/Q_ULONG ../../include/QtCore/QSysInfo ../../include/QtCore/QtMsgHandler ../../include/QtCore/QGlobalStatic ../../include/QtCore/QGlobalStaticDeleter ../../include/QtCore/QBool ../../include/QtCore/QTypeInfo ../../include/QtCore/QFlag ../../include/QtCore/QFlags ../../include/QtCore/QForeachContainer ../../include/QtCore/QForeachContainerBase ../../include/QtCore/Qt ../../include/QtCore/QInternal ../../include/QtCore/QCOORD ../../include/QtCore/QTranslator ../../include/QtCore/QVariant ../../include/QtCore/QVariantList ../../include/QtCore/QVariantMap ../../include/QtCore/QVariantComparisonHelper ../../include/QtCore/QMimeData ../../include/QtCore/QEventLoop ../../include/QtCore/QCoreApplication ../../include/QtCore/QtCleanUpFunction ../../include/QtCore/QModelIndex ../../include/QtCore/QPersistentModelIndex ../../include/QtCore/QModelIndexList ../../include/QtCore/QAbstractItemModel ../../include/QtCore/QAbstractTableModel ../../include/QtCore/QAbstractListModel ../../include/QtCore/QSignalMapper ../../include/QtCore/QObjectCleanupHandler ../../include/QtCore/QBasicTimer ../../include/QtCore/QSocketNotifier ../../include/QtCore/QObjectList ../../include/QtCore/QObjectData ../../include/QtCore/QObject ../../include/QtCore/QObjectUserData ../../include/QtCore/QTimer ../../include/QtCore/QMetaType ../../include/QtCore/QMetaTypeId ../../include/QtCore/QMetaTypeId2 ../../include/QtCore/QAbstractEventDispatcher ../../include/QtCore/QPointer ../../include/QtCore/QMetaMethod ../../include/QtCore/QMetaEnum ../../include/QtCore/QMetaProperty ../../include/QtCore/QMetaClassInfo ../../include/QtCore/QEvent ../../include/QtCore/QTimerEvent ../../include/QtCore/QChildEvent ../../include/QtCore/QCustomEvent ../../include/QtCore/QDynamicPropertyChangeEvent ../../include/QtCore/QGenericArgument ../../include/QtCore/QGenericReturnArgument ../../include/QtCore/QArgument ../../include/QtCore/QReturnArgument ../../include/QtCore/QMetaObject ../../include/QtCore/QPluginLoader ../../include/QtCore/QUuid ../../include/QtCore/QLibrary ../../include/QtCore/QtPlugin ../../include/QtCore/QtPluginInstanceFunction ../../include/QtCore/QFactoryInterface ../../include/QtCore/QSemaphore ../../include/QtCore/QThreadStorageData ../../include/QtCore/QThreadStorage ../../include/QtCore/QWaitCondition ../../include/QtCore/QThread ../../include/QtCore/QMutex ../../include/QtCore/QMutexLocker ../../include/QtCore/QReadWriteLock ../../include/QtCore/QReadLocker ../../include/QtCore/QWriteLocker ../../include/QtCore/QAtomic ../../include/QtCore/QAtomicPointer