[Mono.Runtime.Tests] Exclude simd tests
[mono-project.git] / mono / metadata / mempool.c
blob85879ec1e6028ae6b901018118e472f1face9ed9
1 /**
2 * \file
3 * efficient memory allocation
5 * MonoMemPool is for fast allocation of memory. We free
6 * all memory when the pool is destroyed.
8 * Author:
9 * Dietmar Maurer (dietmar@ximian.com)
11 * Copyright 2001-2003 Ximian, Inc (http://www.ximian.com)
12 * Copyright 2004-2009 Novell, Inc (http://www.novell.com)
13 * Copyright 2011 Xamarin Inc. (http://www.xamarin.com)
14 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
17 #include <config.h>
18 #include <glib.h>
19 #include <string.h>
21 #include "mempool.h"
22 #include "mempool-internals.h"
23 #include "utils/unlocked.h"
26 * MonoMemPool is for fast allocation of memory. We free
27 * all memory when the pool is destroyed.
30 #define MEM_ALIGN 8
31 #define ALIGN_SIZE(s) (((s) + MEM_ALIGN - 1) & ~(MEM_ALIGN - 1))
33 // Size of memory at start of mempool reserved for header
34 #define SIZEOF_MEM_POOL (ALIGN_SIZE (sizeof (MonoMemPool)))
36 #if MONO_SMALL_CONFIG
37 #define MONO_MEMPOOL_PAGESIZE 4096
38 #define MONO_MEMPOOL_MINSIZE 256
39 #else
40 #define MONO_MEMPOOL_PAGESIZE 8192
41 #define MONO_MEMPOOL_MINSIZE 512
42 #endif
44 // The --with-malloc-mempools debug-build flag causes mempools to be allocated in single-element blocks, so tools like Valgrind can run better.
45 #if USE_MALLOC_FOR_MEMPOOLS
46 #define INDIVIDUAL_ALLOCATIONS
47 #define MONO_MEMPOOL_PREFER_INDIVIDUAL_ALLOCATION_SIZE 0
48 #else
49 #define MONO_MEMPOOL_PREFER_INDIVIDUAL_ALLOCATION_SIZE MONO_MEMPOOL_PAGESIZE
50 #endif
52 #ifndef G_LIKELY
53 #define G_LIKELY(a) (a)
54 #define G_UNLIKELY(a) (a)
55 #endif
57 // A mempool is a linked list of memory blocks, each of which begins with this header structure.
58 // The initial block in the linked list is special, and tracks additional information.
59 struct _MonoMemPool {
60 // Next block after this one in linked list
61 MonoMemPool *next;
63 // Size of this memory block only
64 guint32 size;
66 // Used in "initial block" only: Beginning of current free space in mempool (may be in some block other than the first one)
67 guint8 *pos;
69 // Used in "initial block" only: End of current free space in mempool (ie, the first byte following the end of usable space)
70 guint8 *end;
72 union {
73 // Unused: Imposing floating point memory rules on _MonoMemPool's final field ensures proper alignment of whole header struct
74 double pad;
76 // Used in "initial block" only: Number of bytes so far allocated (whether used or not) in the whole mempool
77 guint32 allocated;
78 } d;
81 static gint64 total_bytes_allocated = 0;
83 /**
84 * mono_mempool_new:
86 * Returns: a new memory pool.
88 MonoMemPool *
89 mono_mempool_new (void)
91 return mono_mempool_new_size (MONO_MEMPOOL_PAGESIZE);
94 /**
95 * mono_mempool_new_size:
96 * \param initial_size the amount of memory to initially reserve for the memory pool.
97 * \returns a new memory pool with a specific initial memory reservation.
99 MonoMemPool *
100 mono_mempool_new_size (int initial_size)
102 MonoMemPool *pool;
104 #ifdef INDIVIDUAL_ALLOCATIONS
105 // In individual allocation mode, create initial block with zero storage space.
106 initial_size = SIZEOF_MEM_POOL;
107 #else
108 if (initial_size < MONO_MEMPOOL_MINSIZE)
109 initial_size = MONO_MEMPOOL_MINSIZE;
110 #endif
112 pool = (MonoMemPool *)g_malloc (initial_size);
114 pool->next = NULL;
115 pool->pos = (guint8*)pool + SIZEOF_MEM_POOL; // Start after header
116 pool->end = (guint8*)pool + initial_size; // End at end of allocated space
117 pool->d.allocated = pool->size = initial_size;
118 UnlockedAdd64 (&total_bytes_allocated, initial_size);
119 return pool;
123 * mono_mempool_destroy:
124 * \param pool the memory pool to destroy
126 * Free all memory associated with this pool.
128 void
129 mono_mempool_destroy (MonoMemPool *pool)
131 MonoMemPool *p, *n;
133 UnlockedSubtract64 (&total_bytes_allocated, pool->d.allocated);
135 p = pool;
136 while (p) {
137 n = p->next;
138 g_free (p);
139 p = n;
144 * mono_mempool_invalidate:
145 * \param pool the memory pool to invalidate
147 * Fill the memory associated with this pool to 0x2a (42). Useful for debugging.
149 void
150 mono_mempool_invalidate (MonoMemPool *pool)
152 MonoMemPool *p, *n;
154 p = pool;
155 while (p) {
156 n = p->next;
157 memset (p, 42, p->size);
158 p = n;
163 * mono_mempool_stats:
164 * \param pool the memory pool we need stats for
166 * Print a few stats about the mempool:
167 * - Total memory allocated (malloced) by mem pool
168 * - Number of chunks/blocks memory is allocated in
169 * - How much memory is available to dispense before a new malloc must occur?
171 void
172 mono_mempool_stats (MonoMemPool *pool)
174 MonoMemPool *p;
175 int count = 0;
176 guint32 still_free;
178 p = pool;
179 while (p) {
180 p = p->next;
181 count++;
183 if (pool) {
184 still_free = pool->end - pool->pos;
185 g_print ("Mempool %p stats:\n", pool);
186 g_print ("Total mem allocated: %d\n", pool->d.allocated);
187 g_print ("Num chunks: %d\n", count);
188 g_print ("Free memory: %d\n", still_free);
192 #ifdef TRACE_ALLOCATIONS
193 #include <execinfo.h>
194 #include "metadata/appdomain.h"
195 #include "metadata/metadata-internals.h"
197 static mono_mutex_t mempool_tracing_lock;
198 #define BACKTRACE_DEPTH 7
199 static void
200 mono_backtrace (int size)
202 void *array[BACKTRACE_DEPTH];
203 char **names;
204 int i, symbols;
205 static gboolean inited;
207 if (!inited) {
208 mono_os_mutex_init_recursive (&mempool_tracing_lock);
209 inited = TRUE;
212 mono_os_mutex_lock (&mempool_tracing_lock);
213 g_print ("Allocating %d bytes\n", size);
214 MONO_ENTER_GC_SAFE;
215 symbols = backtrace (array, BACKTRACE_DEPTH);
216 names = backtrace_symbols (array, symbols);
217 MONO_EXIT_GC_SAFE;
218 for (i = 1; i < symbols; ++i) {
219 g_print ("\t%s\n", names [i]);
221 g_free (names);
222 mono_os_mutex_unlock (&mempool_tracing_lock);
225 #endif
228 * get_next_size:
229 * @pool: the memory pool to use
230 * @size: size of the memory entity we are trying to allocate
232 * A mempool is growing; give a recommended size for the next block.
233 * Each block in a mempool should be about 150% bigger than the previous one,
234 * or bigger if it is necessary to include the new entity.
236 * Returns: the recommended size.
238 static guint
239 get_next_size (MonoMemPool *pool, int size)
241 int target = pool->next? pool->next->size: pool->size;
242 size += SIZEOF_MEM_POOL;
243 /* increase the size */
244 target += target / 2;
245 while (target < size) {
246 target += target / 2;
248 if (target > MONO_MEMPOOL_PAGESIZE && size <= MONO_MEMPOOL_PAGESIZE)
249 target = MONO_MEMPOOL_PAGESIZE;
250 return target;
254 * mono_mempool_alloc:
255 * \param pool the memory pool to use
256 * \param size size of the memory block
258 * Allocates a new block of memory in \p pool .
260 * \returns the address of a newly allocated memory block.
262 gpointer
263 (mono_mempool_alloc) (MonoMemPool *pool, guint size)
265 gpointer rval = pool->pos; // Return value
267 // Normal case: Just bump up pos pointer and we are done
268 size = ALIGN_SIZE (size);
269 pool->pos = (guint8*)rval + size;
271 #ifdef TRACE_ALLOCATIONS
272 if (pool == mono_get_corlib ()->mempool) {
273 mono_backtrace (size);
275 #endif
277 // If we have just overflowed the current block, we need to back up and try again.
278 if (G_UNLIKELY (pool->pos >= pool->end)) {
279 pool->pos -= size; // Back out
281 // For large objects, allocate the object into its own block.
282 // (In individual allocation mode, the constant will be 0 and this path will always be taken)
283 if (size >= MONO_MEMPOOL_PREFER_INDIVIDUAL_ALLOCATION_SIZE) {
284 guint new_size = SIZEOF_MEM_POOL + size;
285 MonoMemPool *np = (MonoMemPool *)g_malloc (new_size);
287 np->next = pool->next;
288 np->size = new_size;
289 pool->next = np;
290 pool->d.allocated += new_size;
291 UnlockedAdd64 (&total_bytes_allocated, new_size);
293 rval = (guint8*)np + SIZEOF_MEM_POOL;
294 } else {
295 // Notice: any unused memory at the end of the old head becomes simply abandoned in this case until the mempool is freed (see Bugzilla #35136)
296 guint new_size = get_next_size (pool, size);
297 MonoMemPool *np = (MonoMemPool *)g_malloc (new_size);
299 np->next = pool->next;
300 np->size = new_size;
301 pool->next = np;
302 pool->pos = (guint8*)np + SIZEOF_MEM_POOL;
303 pool->end = (guint8*)np + new_size;
304 pool->d.allocated += new_size;
305 UnlockedAdd64 (&total_bytes_allocated, new_size);
307 rval = pool->pos;
308 pool->pos += size;
312 return rval;
316 * mono_mempool_alloc0:
318 * same as \c mono_mempool_alloc, but fills memory with zero.
320 gpointer
321 (mono_mempool_alloc0) (MonoMemPool *pool, guint size)
323 gpointer rval;
325 // For the fast path, repeat the first few lines of mono_mempool_alloc
326 size = ALIGN_SIZE (size);
327 rval = pool->pos;
328 pool->pos = (guint8*)rval + size;
330 // If that doesn't work fall back on mono_mempool_alloc to handle new chunk allocation
331 if (G_UNLIKELY (pool->pos >= pool->end)) {
332 rval = mono_mempool_alloc (pool, size);
334 #ifdef TRACE_ALLOCATIONS
335 else if (pool == mono_get_corlib ()->mempool) {
336 mono_backtrace (size);
338 #endif
340 memset (rval, 0, size);
341 return rval;
345 * mono_mempool_contains_addr:
347 * Determines whether \p addr is inside the memory used by the mempool.
349 gboolean
350 mono_mempool_contains_addr (MonoMemPool *pool,
351 gpointer addr)
353 MonoMemPool *p = pool;
355 while (p) {
356 if (addr >= (gpointer)p && addr < (gpointer)((guint8*)p + p->size))
357 return TRUE;
358 p = p->next;
361 return FALSE;
365 * mono_mempool_strdup:
367 * Same as strdup, but allocates memory from the mempool.
368 * Returns: a pointer to the newly allocated string data inside the mempool.
370 char*
371 mono_mempool_strdup (MonoMemPool *pool,
372 const char *s)
374 int l;
375 char *res;
377 if (s == NULL)
378 return NULL;
380 l = strlen (s);
381 res = (char *)mono_mempool_alloc (pool, l + 1);
382 memcpy (res, s, l + 1);
384 return res;
387 char*
388 mono_mempool_strdup_vprintf (MonoMemPool *pool, const char *format, va_list args)
390 size_t buflen;
391 char *buf;
392 va_list args2;
393 va_copy (args2, args);
394 int len = vsnprintf (NULL, 0, format, args2);
395 va_end (args2);
397 if (len >= 0 && (buf = (char*)mono_mempool_alloc (pool, (buflen = (size_t) (len + 1)))) != NULL) {
398 vsnprintf (buf, buflen, format, args);
399 } else {
400 buf = NULL;
402 return buf;
405 char*
406 mono_mempool_strdup_printf (MonoMemPool *pool, const char *format, ...)
408 char *buf;
409 va_list args;
410 va_start (args, format);
411 buf = mono_mempool_strdup_vprintf (pool, format, args);
412 va_end (args);
413 return buf;
417 * mono_mempool_get_allocated:
419 * Return the amount of memory allocated for this mempool.
421 guint32
422 mono_mempool_get_allocated (MonoMemPool *pool)
424 return pool->d.allocated;
428 * mono_mempool_get_bytes_allocated:
430 * Return the number of bytes currently allocated for mempools.
432 long
433 mono_mempool_get_bytes_allocated (void)
435 return UnlockedRead64 (&total_bytes_allocated);