2010-05-11 Rodrigo Kumpera <rkumpera@novell.com>
[mono.git] / mono / utils / dlmalloc.h
blob6b27a98c102380783ab77cce3c399e6008dbf2c8
1 /*
2 Default header file for malloc-2.8.x, written by Doug Lea
3 and released to the public domain, as explained at
4 http://creativecommons.org/licenses/publicdomain.
6 last update: Mon Aug 15 08:55:52 2005 Doug Lea (dl at gee)
8 This header is for ANSI C/C++ only. You can set any of
9 the following #defines before including:
11 * If USE_DL_PREFIX is defined, it is assumed that malloc.c
12 was also compiled with this option, so all routines
13 have names starting with "dl".
15 * If HAVE_USR_INCLUDE_MALLOC_H is defined, it is assumed that this
16 file will be #included AFTER <malloc.h>. This is needed only if
17 your system defines a struct mallinfo that is incompatible with the
18 standard one declared here. Otherwise, you can include this file
19 INSTEAD of your system system <malloc.h>. At least on ANSI, all
20 declarations should be compatible with system versions
22 * If MSPACES is defined, declarations for mspace versions are included.
25 #ifndef MALLOC_280_H
26 #define MALLOC_280_H
28 #ifdef __cplusplus
29 extern "C" {
30 #endif
32 #include <stddef.h> /* for size_t */
34 #if !ONLY_MSPACES
36 #ifndef USE_DL_PREFIX
37 #define dlcalloc calloc
38 #define dlfree free
39 #define dlmalloc malloc
40 #define dlmemalign memalign
41 #define dlrealloc realloc
42 #define dlvalloc valloc
43 #define dlpvalloc pvalloc
44 #define dlmallinfo mallinfo
45 #define dlmallopt mallopt
46 #define dlmalloc_trim malloc_trim
47 #define dlmalloc_stats malloc_stats
48 #define dlmalloc_usable_size malloc_usable_size
49 #define dlmalloc_footprint malloc_footprint
50 #define dlindependent_calloc independent_calloc
51 #define dlindependent_comalloc independent_comalloc
52 #endif /* USE_DL_PREFIX */
56 malloc(size_t n)
57 Returns a pointer to a newly allocated chunk of at least n bytes, or
58 null if no space is available, in which case errno is set to ENOMEM
59 on ANSI C systems.
61 If n is zero, malloc returns a minimum-sized chunk. (The minimum
62 size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
63 systems.) Note that size_t is an unsigned type, so calls with
64 arguments that would be negative if signed are interpreted as
65 requests for huge amounts of space, which will often fail. The
66 maximum supported value of n differs across systems, but is in all
67 cases less than the maximum representable value of a size_t.
69 void* dlmalloc(size_t);
72 free(void* p)
73 Releases the chunk of memory pointed to by p, that had been previously
74 allocated using malloc or a related routine such as realloc.
75 It has no effect if p is null. If p was not malloced or already
76 freed, free(p) will by default cuase the current program to abort.
78 void dlfree(void*);
81 calloc(size_t n_elements, size_t element_size);
82 Returns a pointer to n_elements * element_size bytes, with all locations
83 set to zero.
85 void* dlcalloc(size_t, size_t);
88 realloc(void* p, size_t n)
89 Returns a pointer to a chunk of size n that contains the same data
90 as does chunk p up to the minimum of (n, p's size) bytes, or null
91 if no space is available.
93 The returned pointer may or may not be the same as p. The algorithm
94 prefers extending p in most cases when possible, otherwise it
95 employs the equivalent of a malloc-copy-free sequence.
97 If p is null, realloc is equivalent to malloc.
99 If space is not available, realloc returns null, errno is set (if on
100 ANSI) and p is NOT freed.
102 if n is for fewer bytes than already held by p, the newly unused
103 space is lopped off and freed if possible. realloc with a size
104 argument of zero (re)allocates a minimum-sized chunk.
106 The old unix realloc convention of allowing the last-free'd chunk
107 to be used as an argument to realloc is not supported.
110 void* dlrealloc(void*, size_t);
113 memalign(size_t alignment, size_t n);
114 Returns a pointer to a newly allocated chunk of n bytes, aligned
115 in accord with the alignment argument.
117 The alignment argument should be a power of two. If the argument is
118 not a power of two, the nearest greater power is used.
119 8-byte alignment is guaranteed by normal malloc calls, so don't
120 bother calling memalign with an argument of 8 or less.
122 Overreliance on memalign is a sure way to fragment space.
124 void* dlmemalign(size_t, size_t);
127 valloc(size_t n);
128 Equivalent to memalign(pagesize, n), where pagesize is the page
129 size of the system. If the pagesize is unknown, 4096 is used.
131 void* dlvalloc(size_t);
134 mallopt(int parameter_number, int parameter_value)
135 Sets tunable parameters The format is to provide a
136 (parameter-number, parameter-value) pair. mallopt then sets the
137 corresponding parameter to the argument value if it can (i.e., so
138 long as the value is meaningful), and returns 1 if successful else
139 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
140 normally defined in malloc.h. None of these are use in this malloc,
141 so setting them has no effect. But this malloc also supports other
142 options in mallopt:
144 Symbol param # default allowed param values
145 M_TRIM_THRESHOLD -1 2*1024*1024 any (-1U disables trimming)
146 M_GRANULARITY -2 page size any power of 2 >= page size
147 M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
149 int dlmallopt(int, int);
151 #define M_TRIM_THRESHOLD (-1)
152 #define M_GRANULARITY (-2)
153 #define M_MMAP_THRESHOLD (-3)
157 malloc_footprint();
158 Returns the number of bytes obtained from the system. The total
159 number of bytes allocated by malloc, realloc etc., is less than this
160 value. Unlike mallinfo, this function returns only a precomputed
161 result, so can be called frequently to monitor memory consumption.
162 Even if locks are otherwise defined, this function does not use them,
163 so results might not be up to date.
165 size_t dlmalloc_footprint(void);
167 #if !NO_MALLINFO
169 mallinfo()
170 Returns (by copy) a struct containing various summary statistics:
172 arena: current total non-mmapped bytes allocated from system
173 ordblks: the number of free chunks
174 smblks: always zero.
175 hblks: current number of mmapped regions
176 hblkhd: total bytes held in mmapped regions
177 usmblks: the maximum total allocated space. This will be greater
178 than current total if trimming has occurred.
179 fsmblks: always zero
180 uordblks: current total allocated space (normal or mmapped)
181 fordblks: total free space
182 keepcost: the maximum number of bytes that could ideally be released
183 back to system via malloc_trim. ("ideally" means that
184 it ignores page restrictions etc.)
186 Because these fields are ints, but internal bookkeeping may
187 be kept as longs, the reported values may wrap around zero and
188 thus be inaccurate.
190 #ifndef HAVE_USR_INCLUDE_MALLOC_H
191 #ifndef _MALLOC_H
192 #ifndef MALLINFO_FIELD_TYPE
193 #define MALLINFO_FIELD_TYPE size_t
194 #endif /* MALLINFO_FIELD_TYPE */
195 struct mallinfo {
196 MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
197 MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
198 MALLINFO_FIELD_TYPE smblks; /* always 0 */
199 MALLINFO_FIELD_TYPE hblks; /* always 0 */
200 MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
201 MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
202 MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
203 MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
204 MALLINFO_FIELD_TYPE fordblks; /* total free space */
205 MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
207 #endif /* _MALLOC_H */
208 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
210 struct mallinfo dlmallinfo(void);
211 #endif /* NO_MALLINFO */
214 independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
216 independent_calloc is similar to calloc, but instead of returning a
217 single cleared space, it returns an array of pointers to n_elements
218 independent elements that can hold contents of size elem_size, each
219 of which starts out cleared, and can be independently freed,
220 realloc'ed etc. The elements are guaranteed to be adjacently
221 allocated (this is not guaranteed to occur with multiple callocs or
222 mallocs), which may also improve cache locality in some
223 applications.
225 The "chunks" argument is optional (i.e., may be null, which is
226 probably the most typical usage). If it is null, the returned array
227 is itself dynamically allocated and should also be freed when it is
228 no longer needed. Otherwise, the chunks array must be of at least
229 n_elements in length. It is filled in with the pointers to the
230 chunks.
232 In either case, independent_calloc returns this pointer array, or
233 null if the allocation failed. If n_elements is zero and "chunks"
234 is null, it returns a chunk representing an array with zero elements
235 (which should be freed if not wanted).
237 Each element must be individually freed when it is no longer
238 needed. If you'd like to instead be able to free all at once, you
239 should instead use regular calloc and assign pointers into this
240 space to represent elements. (In this case though, you cannot
241 independently free elements.)
243 independent_calloc simplifies and speeds up implementations of many
244 kinds of pools. It may also be useful when constructing large data
245 structures that initially have a fixed number of fixed-sized nodes,
246 but the number is not known at compile time, and some of the nodes
247 may later need to be freed. For example:
249 struct Node { int item; struct Node* next; };
251 struct Node* build_list() {
252 struct Node** pool;
253 int n = read_number_of_nodes_needed();
254 if (n <= 0) return 0;
255 pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
256 if (pool == 0) die();
257 // organize into a linked list...
258 struct Node* first = pool[0];
259 for (i = 0; i < n-1; ++i)
260 pool[i]->next = pool[i+1];
261 free(pool); // Can now free the array (or not, if it is needed later)
262 return first;
265 void** dlindependent_calloc(size_t, size_t, void**);
268 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
270 independent_comalloc allocates, all at once, a set of n_elements
271 chunks with sizes indicated in the "sizes" array. It returns
272 an array of pointers to these elements, each of which can be
273 independently freed, realloc'ed etc. The elements are guaranteed to
274 be adjacently allocated (this is not guaranteed to occur with
275 multiple callocs or mallocs), which may also improve cache locality
276 in some applications.
278 The "chunks" argument is optional (i.e., may be null). If it is null
279 the returned array is itself dynamically allocated and should also
280 be freed when it is no longer needed. Otherwise, the chunks array
281 must be of at least n_elements in length. It is filled in with the
282 pointers to the chunks.
284 In either case, independent_comalloc returns this pointer array, or
285 null if the allocation failed. If n_elements is zero and chunks is
286 null, it returns a chunk representing an array with zero elements
287 (which should be freed if not wanted).
289 Each element must be individually freed when it is no longer
290 needed. If you'd like to instead be able to free all at once, you
291 should instead use a single regular malloc, and assign pointers at
292 particular offsets in the aggregate space. (In this case though, you
293 cannot independently free elements.)
295 independent_comallac differs from independent_calloc in that each
296 element may have a different size, and also that it does not
297 automatically clear elements.
299 independent_comalloc can be used to speed up allocation in cases
300 where several structs or objects must always be allocated at the
301 same time. For example:
303 struct Head { ... }
304 struct Foot { ... }
306 void send_message(char* msg) {
307 int msglen = strlen(msg);
308 size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
309 void* chunks[3];
310 if (independent_comalloc(3, sizes, chunks) == 0)
311 die();
312 struct Head* head = (struct Head*)(chunks[0]);
313 char* body = (char*)(chunks[1]);
314 struct Foot* foot = (struct Foot*)(chunks[2]);
315 // ...
318 In general though, independent_comalloc is worth using only for
319 larger values of n_elements. For small values, you probably won't
320 detect enough difference from series of malloc calls to bother.
322 Overuse of independent_comalloc can increase overall memory usage,
323 since it cannot reuse existing noncontiguous small chunks that
324 might be available for some of the elements.
326 void** dlindependent_comalloc(size_t, size_t*, void**);
330 pvalloc(size_t n);
331 Equivalent to valloc(minimum-page-that-holds(n)), that is,
332 round up n to nearest pagesize.
334 void* dlpvalloc(size_t);
337 malloc_trim(size_t pad);
339 If possible, gives memory back to the system (via negative arguments
340 to sbrk) if there is unused memory at the `high' end of the malloc
341 pool or in unused MMAP segments. You can call this after freeing
342 large blocks of memory to potentially reduce the system-level memory
343 requirements of a program. However, it cannot guarantee to reduce
344 memory. Under some allocation patterns, some large free blocks of
345 memory will be locked between two used chunks, so they cannot be
346 given back to the system.
348 The `pad' argument to malloc_trim represents the amount of free
349 trailing space to leave untrimmed. If this argument is zero, only
350 the minimum amount of memory to maintain internal data structures
351 will be left. Non-zero arguments can be supplied to maintain enough
352 trailing space to service future expected allocations without having
353 to re-obtain memory from the system.
355 Malloc_trim returns 1 if it actually released any memory, else 0.
357 int dlmalloc_trim(size_t);
360 malloc_usable_size(void* p);
362 Returns the number of bytes you can actually use in
363 an allocated chunk, which may be more than you requested (although
364 often not) due to alignment and minimum size constraints.
365 You can use this many bytes without worrying about
366 overwriting other allocated objects. This is not a particularly great
367 programming practice. malloc_usable_size can be more useful in
368 debugging and assertions, for example:
370 p = malloc(n);
371 assert(malloc_usable_size(p) >= 256);
373 size_t dlmalloc_usable_size(void*);
376 malloc_stats();
377 Prints on stderr the amount of space obtained from the system (both
378 via sbrk and mmap), the maximum amount (which may be more than
379 current if malloc_trim and/or munmap got called), and the current
380 number of bytes allocated via malloc (or realloc, etc) but not yet
381 freed. Note that this is the number of bytes allocated, not the
382 number requested. It will be larger than the number requested
383 because of alignment and bookkeeping overhead. Because it includes
384 alignment wastage as being in use, this figure may be greater than
385 zero even when no user-level chunks are allocated.
387 The reported current and maximum system memory can be inaccurate if
388 a program makes other calls to system memory allocation functions
389 (normally sbrk) outside of malloc.
391 malloc_stats prints only the most commonly interesting statistics.
392 More information can be obtained by calling mallinfo.
394 void dlmalloc_stats(void);
396 #endif /* !ONLY_MSPACES */
398 #if MSPACES
401 mspace is an opaque type representing an independent
402 region of space that supports mspace_malloc, etc.
404 typedef void* mspace;
407 create_mspace creates and returns a new independent space with the
408 given initial capacity, or, if 0, the default granularity size. It
409 returns null if there is no system memory available to create the
410 space. If argument locked is non-zero, the space uses a separate
411 lock to control access. The capacity of the space will grow
412 dynamically as needed to service mspace_malloc requests. You can
413 control the sizes of incremental increases of this space by
414 compiling with a different DEFAULT_GRANULARITY or dynamically
415 setting with mallopt(M_GRANULARITY, value).
417 mspace create_mspace(size_t capacity, int locked);
420 destroy_mspace destroys the given space, and attempts to return all
421 of its memory back to the system, returning the total number of
422 bytes freed. After destruction, the results of access to all memory
423 used by the space become undefined.
425 size_t destroy_mspace(mspace msp);
428 create_mspace_with_base uses the memory supplied as the initial base
429 of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
430 space is used for bookkeeping, so the capacity must be at least this
431 large. (Otherwise 0 is returned.) When this initial space is
432 exhausted, additional memory will be obtained from the system.
433 Destroying this space will deallocate all additionally allocated
434 space (if possible) but not the initial base.
436 mspace create_mspace_with_base(void* base, size_t capacity, int locked);
439 mspace_malloc behaves as malloc, but operates within
440 the given space.
442 void* mspace_malloc(mspace msp, size_t bytes);
445 mspace_free behaves as free, but operates within
446 the given space.
448 If compiled with FOOTERS==1, mspace_free is not actually needed.
449 free may be called instead of mspace_free because freed chunks from
450 any space are handled by their originating spaces.
452 void mspace_free(mspace msp, void* mem);
455 mspace_realloc behaves as realloc, but operates within
456 the given space.
458 If compiled with FOOTERS==1, mspace_realloc is not actually
459 needed. realloc may be called instead of mspace_realloc because
460 realloced chunks from any space are handled by their originating
461 spaces.
463 void* mspace_realloc(mspace msp, void* mem, size_t newsize);
466 mspace_calloc behaves as calloc, but operates within
467 the given space.
469 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
472 mspace_memalign behaves as memalign, but operates within
473 the given space.
475 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
478 mspace_independent_calloc behaves as independent_calloc, but
479 operates within the given space.
481 void** mspace_independent_calloc(mspace msp, size_t n_elements,
482 size_t elem_size, void* chunks[]);
485 mspace_independent_comalloc behaves as independent_comalloc, but
486 operates within the given space.
488 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
489 size_t sizes[], void* chunks[]);
492 mspace_footprint() returns the number of bytes obtained from the
493 system for this space.
495 size_t mspace_footprint(mspace msp);
498 #if !NO_MALLINFO
500 mspace_mallinfo behaves as mallinfo, but reports properties of
501 the given space.
503 struct mallinfo mspace_mallinfo(mspace msp);
504 #endif /* NO_MALLINFO */
507 mspace_malloc_stats behaves as malloc_stats, but reports
508 properties of the given space.
510 void mspace_malloc_stats(mspace msp);
513 mspace_trim behaves as malloc_trim, but
514 operates within the given space.
516 int mspace_trim(mspace msp, size_t pad);
519 An alias for mallopt.
521 int mspace_mallopt(int, int);
523 #endif /* MSPACES */
525 #ifdef __cplusplus
526 }; /* end of extern "C" */
527 #endif
529 #endif /* MALLOC_280_H */