Merge pull request #3936 from kumpera/monoclass_reorg2
[mono-project.git] / mono / metadata / mempool.c
blob7a871f034bfd58b28de0092507c84d2cc6c41b37
1 /*
2 * mempool.c: efficient memory allocation
4 * MonoMemPool is for fast allocation of memory. We free
5 * all memory when the pool is destroyed.
7 * Author:
8 * Dietmar Maurer (dietmar@ximian.com)
10 * Copyright 2001-2003 Ximian, Inc (http://www.ximian.com)
11 * Copyright 2004-2009 Novell, Inc (http://www.novell.com)
12 * Copyright 2011 Xamarin Inc. (http://www.xamarin.com)
13 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 #include <config.h>
17 #include <glib.h>
18 #include <string.h>
20 #include "mempool.h"
21 #include "mempool-internals.h"
24 * MonoMemPool is for fast allocation of memory. We free
25 * all memory when the pool is destroyed.
28 #define MEM_ALIGN 8
29 #define ALIGN_SIZE(s) (((s) + MEM_ALIGN - 1) & ~(MEM_ALIGN - 1))
31 // Size of memory at start of mempool reserved for header
32 #define SIZEOF_MEM_POOL (ALIGN_SIZE (sizeof (MonoMemPool)))
34 #if MONO_SMALL_CONFIG
35 #define MONO_MEMPOOL_PAGESIZE 4096
36 #define MONO_MEMPOOL_MINSIZE 256
37 #else
38 #define MONO_MEMPOOL_PAGESIZE 8192
39 #define MONO_MEMPOOL_MINSIZE 512
40 #endif
42 // The --with-malloc-mempools debug-build flag causes mempools to be allocated in single-element blocks, so tools like Valgrind can run better.
43 #if USE_MALLOC_FOR_MEMPOOLS
44 #define INDIVIDUAL_ALLOCATIONS
45 #define MONO_MEMPOOL_PREFER_INDIVIDUAL_ALLOCATION_SIZE 0
46 #else
47 #define MONO_MEMPOOL_PREFER_INDIVIDUAL_ALLOCATION_SIZE MONO_MEMPOOL_PAGESIZE
48 #endif
50 #ifndef G_LIKELY
51 #define G_LIKELY(a) (a)
52 #define G_UNLIKELY(a) (a)
53 #endif
55 // A mempool is a linked list of memory blocks, each of which begins with this header structure.
56 // The initial block in the linked list is special, and tracks additional information.
57 struct _MonoMemPool {
58 // Next block after this one in linked list
59 MonoMemPool *next;
61 // Size of this memory block only
62 guint32 size;
64 // Used in "initial block" only: Beginning of current free space in mempool (may be in some block other than the first one)
65 guint8 *pos;
67 // Used in "initial block" only: End of current free space in mempool (ie, the first byte following the end of usable space)
68 guint8 *end;
70 union {
71 // Unused: Imposing floating point memory rules on _MonoMemPool's final field ensures proper alignment of whole header struct
72 double pad;
74 // Used in "initial block" only: Number of bytes so far allocated (whether used or not) in the whole mempool
75 guint32 allocated;
76 } d;
79 static long total_bytes_allocated = 0;
81 /**
82 * mono_mempool_new:
84 * Returns: a new memory pool.
86 MonoMemPool *
87 mono_mempool_new (void)
89 return mono_mempool_new_size (MONO_MEMPOOL_PAGESIZE);
92 /**
93 * mono_mempool_new_size:
94 * @initial_size: the amount of memory to initially reserve for the memory pool.
96 * Returns: a new memory pool with a specific initial memory reservation.
98 MonoMemPool *
99 mono_mempool_new_size (int initial_size)
101 MonoMemPool *pool;
103 #ifdef INDIVIDUAL_ALLOCATIONS
104 // In individual allocation mode, create initial block with zero storage space.
105 initial_size = SIZEOF_MEM_POOL;
106 #else
107 if (initial_size < MONO_MEMPOOL_MINSIZE)
108 initial_size = MONO_MEMPOOL_MINSIZE;
109 #endif
111 pool = (MonoMemPool *)g_malloc (initial_size);
113 pool->next = NULL;
114 pool->pos = (guint8*)pool + SIZEOF_MEM_POOL; // Start after header
115 pool->end = (guint8*)pool + initial_size; // End at end of allocated space
116 pool->d.allocated = pool->size = initial_size;
117 total_bytes_allocated += initial_size;
118 return pool;
122 * mono_mempool_destroy:
123 * @pool: the memory pool to destroy
125 * Free all memory associated with this pool.
127 void
128 mono_mempool_destroy (MonoMemPool *pool)
130 MonoMemPool *p, *n;
132 total_bytes_allocated -= pool->d.allocated;
134 p = pool;
135 while (p) {
136 n = p->next;
137 g_free (p);
138 p = n;
143 * mono_mempool_invalidate:
144 * @pool: the memory pool to invalidate
146 * Fill the memory associated with this pool to 0x2a (42). Useful for debugging.
148 void
149 mono_mempool_invalidate (MonoMemPool *pool)
151 MonoMemPool *p, *n;
153 p = pool;
154 while (p) {
155 n = p->next;
156 memset (p, 42, p->size);
157 p = n;
162 * mono_mempool_stats:
163 * @pool: the momory pool we need stats for
165 * Print a few stats about the mempool:
166 * - Total memory allocated (malloced) by mem pool
167 * - Number of chunks/blocks memory is allocated in
168 * - How much memory is available to dispense before a new malloc must occur?
170 void
171 mono_mempool_stats (MonoMemPool *pool)
173 MonoMemPool *p;
174 int count = 0;
175 guint32 still_free;
177 p = pool;
178 while (p) {
179 p = p->next;
180 count++;
182 if (pool) {
183 still_free = pool->end - pool->pos;
184 g_print ("Mempool %p stats:\n", pool);
185 g_print ("Total mem allocated: %d\n", pool->d.allocated);
186 g_print ("Num chunks: %d\n", count);
187 g_print ("Free memory: %d\n", still_free);
191 #ifdef TRACE_ALLOCATIONS
192 #include <execinfo.h>
193 #include "metadata/appdomain.h"
194 #include "metadata/metadata-internals.h"
196 static mono_mutex_t mempool_tracing_lock;
197 #define BACKTRACE_DEPTH 7
198 static void
199 mono_backtrace (int size)
201 void *array[BACKTRACE_DEPTH];
202 char **names;
203 int i, symbols;
204 static gboolean inited;
206 if (!inited) {
207 mono_os_mutex_init_recursive (&mempool_tracing_lock);
208 inited = TRUE;
211 mono_os_mutex_lock (&mempool_tracing_lock);
212 g_print ("Allocating %d bytes\n", size);
213 MONO_ENTER_GC_SAFE;
214 symbols = backtrace (array, BACKTRACE_DEPTH);
215 names = backtrace_symbols (array, symbols);
216 MONO_EXIT_GC_SAFE;
217 for (i = 1; i < symbols; ++i) {
218 g_print ("\t%s\n", names [i]);
220 g_free (names);
221 mono_os_mutex_unlock (&mempool_tracing_lock);
224 #endif
227 * mono_mempool_alloc:
228 * @pool: the memory pool to use
229 * @size: size of the memory entity we are trying to allocate
231 * A mempool is growing; give a recommended size for the next block.
232 * Each block in a mempool should be about 150% bigger than the previous one,
233 * or bigger if it is necessary to include the new entity.
235 * Returns: the recommended size.
237 static guint
238 get_next_size (MonoMemPool *pool, int size)
240 int target = pool->next? pool->next->size: pool->size;
241 size += SIZEOF_MEM_POOL;
242 /* increase the size */
243 target += target / 2;
244 while (target < size) {
245 target += target / 2;
247 if (target > MONO_MEMPOOL_PAGESIZE && size <= MONO_MEMPOOL_PAGESIZE)
248 target = MONO_MEMPOOL_PAGESIZE;
249 return target;
253 * mono_mempool_alloc:
254 * @pool: the memory pool to use
255 * @size: size of the memory block
257 * Allocates a new block of memory in @pool.
259 * Returns: the address of a newly allocated memory block.
261 gpointer
262 mono_mempool_alloc (MonoMemPool *pool, guint size)
264 gpointer rval = pool->pos; // Return value
266 // Normal case: Just bump up pos pointer and we are done
267 size = ALIGN_SIZE (size);
268 pool->pos = (guint8*)rval + size;
270 #ifdef TRACE_ALLOCATIONS
271 if (pool == mono_get_corlib ()->mempool) {
272 mono_backtrace (size);
274 #endif
276 // If we have just overflowed the current block, we need to back up and try again.
277 if (G_UNLIKELY (pool->pos >= pool->end)) {
278 pool->pos -= size; // Back out
280 // For large objects, allocate the object into its own block.
281 // (In individual allocation mode, the constant will be 0 and this path will always be taken)
282 if (size >= MONO_MEMPOOL_PREFER_INDIVIDUAL_ALLOCATION_SIZE) {
283 guint new_size = SIZEOF_MEM_POOL + size;
284 MonoMemPool *np = (MonoMemPool *)g_malloc (new_size);
286 np->next = pool->next;
287 np->size = new_size;
288 pool->next = np;
289 pool->d.allocated += new_size;
290 total_bytes_allocated += new_size;
292 rval = (guint8*)np + SIZEOF_MEM_POOL;
293 } else {
294 // Notice: any unused memory at the end of the old head becomes simply abandoned in this case until the mempool is freed (see Bugzilla #35136)
295 guint new_size = get_next_size (pool, size);
296 MonoMemPool *np = (MonoMemPool *)g_malloc (new_size);
298 np->next = pool->next;
299 np->size = new_size;
300 pool->next = np;
301 pool->pos = (guint8*)np + SIZEOF_MEM_POOL;
302 pool->end = (guint8*)np + new_size;
303 pool->d.allocated += new_size;
304 total_bytes_allocated += new_size;
306 rval = pool->pos;
307 pool->pos += size;
311 return rval;
315 * mono_mempool_alloc0:
317 * same as mono_mempool_alloc, but fills memory with zero.
319 gpointer
320 mono_mempool_alloc0 (MonoMemPool *pool, guint size)
322 gpointer rval;
324 // For the fast path, repeat the first few lines of mono_mempool_alloc
325 size = ALIGN_SIZE (size);
326 rval = pool->pos;
327 pool->pos = (guint8*)rval + size;
329 // If that doesn't work fall back on mono_mempool_alloc to handle new chunk allocation
330 if (G_UNLIKELY (pool->pos >= pool->end)) {
331 rval = mono_mempool_alloc (pool, size);
333 #ifdef TRACE_ALLOCATIONS
334 else if (pool == mono_get_corlib ()->mempool) {
335 mono_backtrace (size);
337 #endif
339 memset (rval, 0, size);
340 return rval;
344 * mono_mempool_contains_addr:
346 * Determines whenever ADDR is inside the memory used by the mempool.
348 gboolean
349 mono_mempool_contains_addr (MonoMemPool *pool,
350 gpointer addr)
352 MonoMemPool *p = pool;
354 while (p) {
355 if (addr >= (gpointer)p && addr < (gpointer)((guint8*)p + p->size))
356 return TRUE;
357 p = p->next;
360 return FALSE;
364 * mono_mempool_strdup:
366 * Same as strdup, but allocates memory from the mempool.
367 * Returns: a pointer to the newly allocated string data inside the mempool.
369 char*
370 mono_mempool_strdup (MonoMemPool *pool,
371 const char *s)
373 int l;
374 char *res;
376 if (s == NULL)
377 return NULL;
379 l = strlen (s);
380 res = (char *)mono_mempool_alloc (pool, l + 1);
381 memcpy (res, s, l + 1);
383 return res;
386 char*
387 mono_mempool_strdup_vprintf (MonoMemPool *pool, const char *format, va_list args)
389 size_t buflen;
390 char *buf;
391 va_list args2;
392 va_copy (args2, args);
393 int len = vsnprintf (NULL, 0, format, args2);
394 va_end (args2);
396 if (len >= 0 && (buf = (char*)mono_mempool_alloc (pool, (buflen = (size_t) (len + 1)))) != NULL) {
397 vsnprintf (buf, buflen, format, args);
398 } else {
399 buf = NULL;
401 return buf;
404 char*
405 mono_mempool_strdup_printf (MonoMemPool *pool, const char *format, ...)
407 char *buf;
408 va_list args;
409 va_start (args, format);
410 buf = mono_mempool_strdup_vprintf (pool, format, args);
411 va_end (args);
412 return buf;
416 * mono_mempool_get_allocated:
418 * Return the amount of memory allocated for this mempool.
420 guint32
421 mono_mempool_get_allocated (MonoMemPool *pool)
423 return pool->d.allocated;
427 * mono_mempool_get_bytes_allocated:
429 * Return the number of bytes currently allocated for mempools.
431 long
432 mono_mempool_get_bytes_allocated (void)
434 return total_bytes_allocated;