[LoongArch64] Part-5:add loongarch support in some files for LoongArch64. (#21769)
[mono-project.git] / mono / utils / dlmalloc.h
blob327cc4c0c14d03216d3d27e9027b35fff8397e02
1 /*
2 Default header file for malloc-2.8.x, written by Doug Lea
3 and released to the public domain, as explained at
4 http://creativecommons.org/licenses/publicdomain.
6 last update: Mon Aug 15 08:55:52 2005 Doug Lea (dl at gee)
8 This header is for ANSI C/C++ only. You can set any of
9 the following #defines before including:
11 * If USE_DL_PREFIX is defined, it is assumed that malloc.c
12 was also compiled with this option, so all routines
13 have names starting with "dl".
15 * If HAVE_USR_INCLUDE_MALLOC_H is defined, it is assumed that this
16 file will be #included AFTER <malloc.h>. This is needed only if
17 your system defines a struct mallinfo that is incompatible with the
18 standard one declared here. Otherwise, you can include this file
19 INSTEAD of your system system <malloc.h>. At least on ANSI, all
20 declarations should be compatible with system versions
22 * If MSPACES is defined, declarations for mspace versions are included.
25 #ifndef MALLOC_280_H
26 #define MALLOC_280_H
28 #include <stddef.h> /* for size_t */
29 #include <mono/utils/mono-compiler.h>
31 #if !ONLY_MSPACES
33 #ifndef USE_DL_PREFIX
34 #define dlcalloc calloc
35 #define dlfree free
36 #define dlmalloc malloc
37 #define dlmemalign memalign
38 #define dlrealloc realloc
39 #define dlvalloc valloc
40 #define dlpvalloc pvalloc
41 #define dlmallinfo mallinfo
42 #define dlmallopt mallopt
43 #define dlmalloc_trim malloc_trim
44 #define dlmalloc_stats malloc_stats
45 #define dlmalloc_usable_size malloc_usable_size
46 #define dlmalloc_footprint malloc_footprint
47 #define dlindependent_calloc independent_calloc
48 #define dlindependent_comalloc independent_comalloc
49 #endif /* USE_DL_PREFIX */
51 #define dlcalloc mono_dlcalloc
52 #define dlfree mono_dlfree
53 #define dlmalloc mono_dlmalloc
54 #define dlmemalign mono_dlmemalign
55 #define dlrealloc mono_dlrealloc
56 #define dlvalloc mono_dlvalloc
57 #define dlpvalloc mono_dlpvalloc
58 #define dlmallinfo mono_dlmallinfo
59 #define dlmallopt mono_dlmallopt
60 #define dlmalloc_trim mono_dlmalloc_trim
61 #define dlmalloc_stats mono_dlmalloc_stats
62 #define dlmalloc_usable_size mono_dlmalloc_usable_size
63 #define dlmalloc_footprint mono_dlmalloc_footprint
64 #define dlindependent_calloc mono_dlindependent_calloc
65 #define dlindependent_comalloc mono_dlindependent_comalloc
68 malloc(size_t n)
69 Returns a pointer to a newly allocated chunk of at least n bytes, or
70 null if no space is available, in which case errno is set to ENOMEM
71 on ANSI C systems.
73 If n is zero, malloc returns a minimum-sized chunk. (The minimum
74 size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
75 systems.) Note that size_t is an unsigned type, so calls with
76 arguments that would be negative if signed are interpreted as
77 requests for huge amounts of space, which will often fail. The
78 maximum supported value of n differs across systems, but is in all
79 cases less than the maximum representable value of a size_t.
81 void* dlmalloc(size_t);
84 free(void* p)
85 Releases the chunk of memory pointed to by p, that had been previously
86 allocated using malloc or a related routine such as realloc.
87 It has no effect if p is null. If p was not malloced or already
88 freed, free(p) will by default cuase the current program to abort.
90 void dlfree(void*);
93 calloc(size_t n_elements, size_t element_size);
94 Returns a pointer to n_elements * element_size bytes, with all locations
95 set to zero.
97 void* dlcalloc(size_t, size_t);
100 realloc(void* p, size_t n)
101 Returns a pointer to a chunk of size n that contains the same data
102 as does chunk p up to the minimum of (n, p's size) bytes, or null
103 if no space is available.
105 The returned pointer may or may not be the same as p. The algorithm
106 prefers extending p in most cases when possible, otherwise it
107 employs the equivalent of a malloc-copy-free sequence.
109 If p is null, realloc is equivalent to malloc.
111 If space is not available, realloc returns null, errno is set (if on
112 ANSI) and p is NOT freed.
114 if n is for fewer bytes than already held by p, the newly unused
115 space is lopped off and freed if possible. realloc with a size
116 argument of zero (re)allocates a minimum-sized chunk.
118 The old unix realloc convention of allowing the last-free'd chunk
119 to be used as an argument to realloc is not supported.
122 void* dlrealloc(void*, size_t);
125 memalign(size_t alignment, size_t n);
126 Returns a pointer to a newly allocated chunk of n bytes, aligned
127 in accord with the alignment argument.
129 The alignment argument should be a power of two. If the argument is
130 not a power of two, the nearest greater power is used.
131 8-byte alignment is guaranteed by normal malloc calls, so don't
132 bother calling memalign with an argument of 8 or less.
134 Overreliance on memalign is a sure way to fragment space.
136 void* dlmemalign(size_t, size_t);
139 valloc(size_t n);
140 Equivalent to memalign(pagesize, n), where pagesize is the page
141 size of the system. If the pagesize is unknown, 4096 is used.
143 void* dlvalloc(size_t);
146 mallopt(int parameter_number, int parameter_value)
147 Sets tunable parameters The format is to provide a
148 (parameter-number, parameter-value) pair. mallopt then sets the
149 corresponding parameter to the argument value if it can (i.e., so
150 long as the value is meaningful), and returns 1 if successful else
151 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
152 normally defined in malloc.h. None of these are use in this malloc,
153 so setting them has no effect. But this malloc also supports other
154 options in mallopt:
156 Symbol param # default allowed param values
157 M_TRIM_THRESHOLD -1 2*1024*1024 any (-1U disables trimming)
158 M_GRANULARITY -2 page size any power of 2 >= page size
159 M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
161 int dlmallopt(int, int);
163 #define M_TRIM_THRESHOLD (-1)
164 #define M_GRANULARITY (-2)
165 #define M_MMAP_THRESHOLD (-3)
169 malloc_footprint();
170 Returns the number of bytes obtained from the system. The total
171 number of bytes allocated by malloc, realloc etc., is less than this
172 value. Unlike mallinfo, this function returns only a precomputed
173 result, so can be called frequently to monitor memory consumption.
174 Even if locks are otherwise defined, this function does not use them,
175 so results might not be up to date.
177 size_t dlmalloc_footprint(void);
179 #if !NO_MALLINFO
181 mallinfo()
182 Returns (by copy) a struct containing various summary statistics:
184 arena: current total non-mmapped bytes allocated from system
185 ordblks: the number of free chunks
186 smblks: always zero.
187 hblks: current number of mmapped regions
188 hblkhd: total bytes held in mmapped regions
189 usmblks: the maximum total allocated space. This will be greater
190 than current total if trimming has occurred.
191 fsmblks: always zero
192 uordblks: current total allocated space (normal or mmapped)
193 fordblks: total free space
194 keepcost: the maximum number of bytes that could ideally be released
195 back to system via malloc_trim. ("ideally" means that
196 it ignores page restrictions etc.)
198 Because these fields are ints, but internal bookkeeping may
199 be kept as longs, the reported values may wrap around zero and
200 thus be inaccurate.
202 #ifndef HAVE_USR_INCLUDE_MALLOC_H
203 #ifndef _MALLOC_H
204 #ifndef MALLINFO_FIELD_TYPE
205 #define MALLINFO_FIELD_TYPE size_t
206 #endif /* MALLINFO_FIELD_TYPE */
207 struct mallinfo {
208 MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
209 MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
210 MALLINFO_FIELD_TYPE smblks; /* always 0 */
211 MALLINFO_FIELD_TYPE hblks; /* always 0 */
212 MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
213 MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
214 MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
215 MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
216 MALLINFO_FIELD_TYPE fordblks; /* total free space */
217 MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
219 #endif /* _MALLOC_H */
220 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
222 struct mallinfo dlmallinfo(void);
223 #endif /* NO_MALLINFO */
226 independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
228 independent_calloc is similar to calloc, but instead of returning a
229 single cleared space, it returns an array of pointers to n_elements
230 independent elements that can hold contents of size elem_size, each
231 of which starts out cleared, and can be independently freed,
232 realloc'ed etc. The elements are guaranteed to be adjacently
233 allocated (this is not guaranteed to occur with multiple callocs or
234 mallocs), which may also improve cache locality in some
235 applications.
237 The "chunks" argument is optional (i.e., may be null, which is
238 probably the most typical usage). If it is null, the returned array
239 is itself dynamically allocated and should also be freed when it is
240 no longer needed. Otherwise, the chunks array must be of at least
241 n_elements in length. It is filled in with the pointers to the
242 chunks.
244 In either case, independent_calloc returns this pointer array, or
245 null if the allocation failed. If n_elements is zero and "chunks"
246 is null, it returns a chunk representing an array with zero elements
247 (which should be freed if not wanted).
249 Each element must be individually freed when it is no longer
250 needed. If you'd like to instead be able to free all at once, you
251 should instead use regular calloc and assign pointers into this
252 space to represent elements. (In this case though, you cannot
253 independently free elements.)
255 independent_calloc simplifies and speeds up implementations of many
256 kinds of pools. It may also be useful when constructing large data
257 structures that initially have a fixed number of fixed-sized nodes,
258 but the number is not known at compile time, and some of the nodes
259 may later need to be freed. For example:
261 struct Node { int item; struct Node* next; };
263 struct Node* build_list() {
264 struct Node** pool;
265 int n = read_number_of_nodes_needed();
266 if (n <= 0) return 0;
267 pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
268 if (pool == 0) die();
269 // organize into a linked list...
270 struct Node* first = pool[0];
271 for (i = 0; i < n-1; ++i)
272 pool[i]->next = pool[i+1];
273 free(pool); // Can now free the array (or not, if it is needed later)
274 return first;
277 void** dlindependent_calloc(size_t, size_t, void**);
280 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
282 independent_comalloc allocates, all at once, a set of n_elements
283 chunks with sizes indicated in the "sizes" array. It returns
284 an array of pointers to these elements, each of which can be
285 independently freed, realloc'ed etc. The elements are guaranteed to
286 be adjacently allocated (this is not guaranteed to occur with
287 multiple callocs or mallocs), which may also improve cache locality
288 in some applications.
290 The "chunks" argument is optional (i.e., may be null). If it is null
291 the returned array is itself dynamically allocated and should also
292 be freed when it is no longer needed. Otherwise, the chunks array
293 must be of at least n_elements in length. It is filled in with the
294 pointers to the chunks.
296 In either case, independent_comalloc returns this pointer array, or
297 null if the allocation failed. If n_elements is zero and chunks is
298 null, it returns a chunk representing an array with zero elements
299 (which should be freed if not wanted).
301 Each element must be individually freed when it is no longer
302 needed. If you'd like to instead be able to free all at once, you
303 should instead use a single regular malloc, and assign pointers at
304 particular offsets in the aggregate space. (In this case though, you
305 cannot independently free elements.)
307 independent_comallac differs from independent_calloc in that each
308 element may have a different size, and also that it does not
309 automatically clear elements.
311 independent_comalloc can be used to speed up allocation in cases
312 where several structs or objects must always be allocated at the
313 same time. For example:
315 struct Head { ... }
316 struct Foot { ... }
318 void send_message(char* msg) {
319 int msglen = strlen(msg);
320 size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
321 void* chunks[3];
322 if (independent_comalloc(3, sizes, chunks) == 0)
323 die();
324 struct Head* head = (struct Head*)(chunks[0]);
325 char* body = (char*)(chunks[1]);
326 struct Foot* foot = (struct Foot*)(chunks[2]);
327 // ...
330 In general though, independent_comalloc is worth using only for
331 larger values of n_elements. For small values, you probably won't
332 detect enough difference from series of malloc calls to bother.
334 Overuse of independent_comalloc can increase overall memory usage,
335 since it cannot reuse existing noncontiguous small chunks that
336 might be available for some of the elements.
338 void** dlindependent_comalloc(size_t, size_t*, void**);
342 pvalloc(size_t n);
343 Equivalent to valloc(minimum-page-that-holds(n)), that is,
344 round up n to nearest pagesize.
346 void* dlpvalloc(size_t);
349 malloc_trim(size_t pad);
351 If possible, gives memory back to the system (via negative arguments
352 to sbrk) if there is unused memory at the `high' end of the malloc
353 pool or in unused MMAP segments. You can call this after freeing
354 large blocks of memory to potentially reduce the system-level memory
355 requirements of a program. However, it cannot guarantee to reduce
356 memory. Under some allocation patterns, some large free blocks of
357 memory will be locked between two used chunks, so they cannot be
358 given back to the system.
360 The `pad' argument to malloc_trim represents the amount of free
361 trailing space to leave untrimmed. If this argument is zero, only
362 the minimum amount of memory to maintain internal data structures
363 will be left. Non-zero arguments can be supplied to maintain enough
364 trailing space to service future expected allocations without having
365 to re-obtain memory from the system.
367 Malloc_trim returns 1 if it actually released any memory, else 0.
369 int dlmalloc_trim(size_t);
372 malloc_usable_size(void* p);
374 Returns the number of bytes you can actually use in
375 an allocated chunk, which may be more than you requested (although
376 often not) due to alignment and minimum size constraints.
377 You can use this many bytes without worrying about
378 overwriting other allocated objects. This is not a particularly great
379 programming practice. malloc_usable_size can be more useful in
380 debugging and assertions, for example:
382 p = malloc(n);
383 assert(malloc_usable_size(p) >= 256);
385 size_t dlmalloc_usable_size(void*);
388 malloc_stats();
389 Prints on stderr the amount of space obtained from the system (both
390 via sbrk and mmap), the maximum amount (which may be more than
391 current if malloc_trim and/or munmap got called), and the current
392 number of bytes allocated via malloc (or realloc, etc) but not yet
393 freed. Note that this is the number of bytes allocated, not the
394 number requested. It will be larger than the number requested
395 because of alignment and bookkeeping overhead. Because it includes
396 alignment wastage as being in use, this figure may be greater than
397 zero even when no user-level chunks are allocated.
399 The reported current and maximum system memory can be inaccurate if
400 a program makes other calls to system memory allocation functions
401 (normally sbrk) outside of malloc.
403 malloc_stats prints only the most commonly interesting statistics.
404 More information can be obtained by calling mallinfo.
406 void dlmalloc_stats(void);
408 #endif /* !ONLY_MSPACES */
410 #if MSPACES
413 mspace is an opaque type representing an independent
414 region of space that supports mspace_malloc, etc.
416 typedef void* mspace;
419 create_mspace creates and returns a new independent space with the
420 given initial capacity, or, if 0, the default granularity size. It
421 returns null if there is no system memory available to create the
422 space. If argument locked is non-zero, the space uses a separate
423 lock to control access. The capacity of the space will grow
424 dynamically as needed to service mspace_malloc requests. You can
425 control the sizes of incremental increases of this space by
426 compiling with a different DEFAULT_GRANULARITY or dynamically
427 setting with mallopt(M_GRANULARITY, value).
429 mspace create_mspace(size_t capacity, int locked);
432 destroy_mspace destroys the given space, and attempts to return all
433 of its memory back to the system, returning the total number of
434 bytes freed. After destruction, the results of access to all memory
435 used by the space become undefined.
437 size_t destroy_mspace(mspace msp);
440 create_mspace_with_base uses the memory supplied as the initial base
441 of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
442 space is used for bookkeeping, so the capacity must be at least this
443 large. (Otherwise 0 is returned.) When this initial space is
444 exhausted, additional memory will be obtained from the system.
445 Destroying this space will deallocate all additionally allocated
446 space (if possible) but not the initial base.
448 mspace create_mspace_with_base(void* base, size_t capacity, int locked);
451 mspace_malloc behaves as malloc, but operates within
452 the given space.
454 void* mspace_malloc(mspace msp, size_t bytes);
457 mspace_free behaves as free, but operates within
458 the given space.
460 If compiled with FOOTERS==1, mspace_free is not actually needed.
461 free may be called instead of mspace_free because freed chunks from
462 any space are handled by their originating spaces.
464 void mspace_free(mspace msp, void* mem);
467 mspace_realloc behaves as realloc, but operates within
468 the given space.
470 If compiled with FOOTERS==1, mspace_realloc is not actually
471 needed. realloc may be called instead of mspace_realloc because
472 realloced chunks from any space are handled by their originating
473 spaces.
475 void* mspace_realloc(mspace msp, void* mem, size_t newsize);
478 mspace_calloc behaves as calloc, but operates within
479 the given space.
481 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
484 mspace_memalign behaves as memalign, but operates within
485 the given space.
487 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
490 mspace_independent_calloc behaves as independent_calloc, but
491 operates within the given space.
493 void** mspace_independent_calloc(mspace msp, size_t n_elements,
494 size_t elem_size, void* chunks[]);
497 mspace_independent_comalloc behaves as independent_comalloc, but
498 operates within the given space.
500 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
501 size_t sizes[], void* chunks[]);
504 mspace_footprint() returns the number of bytes obtained from the
505 system for this space.
507 size_t mspace_footprint(mspace msp);
510 #if !NO_MALLINFO
512 mspace_mallinfo behaves as mallinfo, but reports properties of
513 the given space.
515 struct mallinfo mspace_mallinfo(mspace msp);
516 #endif /* NO_MALLINFO */
519 mspace_malloc_stats behaves as malloc_stats, but reports
520 properties of the given space.
522 void mspace_malloc_stats(mspace msp);
525 mspace_trim behaves as malloc_trim, but
526 operates within the given space.
528 int mspace_trim(mspace msp, size_t pad);
531 An alias for mallopt.
533 int mspace_mallopt(int, int);
535 #endif /* MSPACES */
537 #endif /* MALLOC_280_H */