3 * efficient memory allocation
5 * MonoMemPool is for fast allocation of memory. We free
6 * all memory when the pool is destroyed.
9 * Dietmar Maurer (dietmar@ximian.com)
11 * Copyright 2001-2003 Ximian, Inc (http://www.ximian.com)
12 * Copyright 2004-2009 Novell, Inc (http://www.novell.com)
13 * Copyright 2011 Xamarin Inc. (http://www.xamarin.com)
14 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
22 #include "mempool-internals.h"
23 #include "utils/unlocked.h"
26 * MonoMemPool is for fast allocation of memory. We free
27 * all memory when the pool is destroyed.
31 #define ALIGN_SIZE(s) (((s) + MEM_ALIGN - 1) & ~(MEM_ALIGN - 1))
33 // Size of memory at start of mempool reserved for header
34 #define SIZEOF_MEM_POOL (ALIGN_SIZE (sizeof (MonoMemPool)))
37 #define MONO_MEMPOOL_PAGESIZE 4096
38 #define MONO_MEMPOOL_MINSIZE 256
40 #define MONO_MEMPOOL_PAGESIZE 8192
41 #define MONO_MEMPOOL_MINSIZE 512
44 // The --with-malloc-mempools debug-build flag causes mempools to be allocated in single-element blocks, so tools like Valgrind can run better.
45 #if USE_MALLOC_FOR_MEMPOOLS
46 #define INDIVIDUAL_ALLOCATIONS
47 #define MONO_MEMPOOL_PREFER_INDIVIDUAL_ALLOCATION_SIZE 0
49 #define MONO_MEMPOOL_PREFER_INDIVIDUAL_ALLOCATION_SIZE MONO_MEMPOOL_PAGESIZE
53 #define G_LIKELY(a) (a)
54 #define G_UNLIKELY(a) (a)
57 // A mempool is a linked list of memory blocks, each of which begins with this header structure.
58 // The initial block in the linked list is special, and tracks additional information.
60 // Next block after this one in linked list
63 // Size of this memory block only
66 // Used in "initial block" only: Beginning of current free space in mempool (may be in some block other than the first one)
69 // Used in "initial block" only: End of current free space in mempool (ie, the first byte following the end of usable space)
73 // Unused: Imposing floating point memory rules on _MonoMemPool's final field ensures proper alignment of whole header struct
76 // Used in "initial block" only: Number of bytes so far allocated (whether used or not) in the whole mempool
81 static gint64 total_bytes_allocated
= 0;
86 * Returns: a new memory pool.
89 mono_mempool_new (void)
91 return mono_mempool_new_size (MONO_MEMPOOL_PAGESIZE
);
95 * mono_mempool_new_size:
96 * \param initial_size the amount of memory to initially reserve for the memory pool.
97 * \returns a new memory pool with a specific initial memory reservation.
100 mono_mempool_new_size (int initial_size
)
104 #ifdef INDIVIDUAL_ALLOCATIONS
105 // In individual allocation mode, create initial block with zero storage space.
106 initial_size
= SIZEOF_MEM_POOL
;
108 if (initial_size
< MONO_MEMPOOL_MINSIZE
)
109 initial_size
= MONO_MEMPOOL_MINSIZE
;
112 pool
= (MonoMemPool
*)g_malloc (initial_size
);
115 pool
->pos
= (guint8
*)pool
+ SIZEOF_MEM_POOL
; // Start after header
116 pool
->end
= (guint8
*)pool
+ initial_size
; // End at end of allocated space
117 pool
->d
.allocated
= pool
->size
= initial_size
;
118 UnlockedAdd64 (&total_bytes_allocated
, initial_size
);
123 * mono_mempool_destroy:
124 * \param pool the memory pool to destroy
126 * Free all memory associated with this pool.
129 mono_mempool_destroy (MonoMemPool
*pool
)
133 UnlockedSubtract64 (&total_bytes_allocated
, pool
->d
.allocated
);
144 * mono_mempool_invalidate:
145 * \param pool the memory pool to invalidate
147 * Fill the memory associated with this pool to 0x2a (42). Useful for debugging.
150 mono_mempool_invalidate (MonoMemPool
*pool
)
157 memset (p
, 42, p
->size
);
163 * mono_mempool_stats:
164 * \param pool the memory pool we need stats for
166 * Print a few stats about the mempool:
167 * - Total memory allocated (malloced) by mem pool
168 * - Number of chunks/blocks memory is allocated in
169 * - How much memory is available to dispense before a new malloc must occur?
172 mono_mempool_stats (MonoMemPool
*pool
)
184 still_free
= pool
->end
- pool
->pos
;
185 g_print ("Mempool %p stats:\n", pool
);
186 g_print ("Total mem allocated: %d\n", pool
->d
.allocated
);
187 g_print ("Num chunks: %d\n", count
);
188 g_print ("Free memory: %d\n", still_free
);
192 #ifdef TRACE_ALLOCATIONS
193 #include <execinfo.h>
194 #include "metadata/appdomain.h"
195 #include "metadata/metadata-internals.h"
197 static mono_mutex_t mempool_tracing_lock
;
198 #define BACKTRACE_DEPTH 7
200 mono_backtrace (int size
)
202 void *array
[BACKTRACE_DEPTH
];
205 static gboolean inited
;
208 mono_os_mutex_init_recursive (&mempool_tracing_lock
);
212 mono_os_mutex_lock (&mempool_tracing_lock
);
213 g_print ("Allocating %d bytes\n", size
);
215 symbols
= backtrace (array
, BACKTRACE_DEPTH
);
216 names
= backtrace_symbols (array
, symbols
);
218 for (i
= 1; i
< symbols
; ++i
) {
219 g_print ("\t%s\n", names
[i
]);
222 mono_os_mutex_unlock (&mempool_tracing_lock
);
229 * @pool: the memory pool to use
230 * @size: size of the memory entity we are trying to allocate
232 * A mempool is growing; give a recommended size for the next block.
233 * Each block in a mempool should be about 150% bigger than the previous one,
234 * or bigger if it is necessary to include the new entity.
236 * Returns: the recommended size.
239 get_next_size (MonoMemPool
*pool
, int size
)
241 int target
= pool
->next
? pool
->next
->size
: pool
->size
;
242 size
+= SIZEOF_MEM_POOL
;
243 /* increase the size */
244 target
+= target
/ 2;
245 while (target
< size
) {
246 target
+= target
/ 2;
248 if (target
> MONO_MEMPOOL_PAGESIZE
&& size
<= MONO_MEMPOOL_PAGESIZE
)
249 target
= MONO_MEMPOOL_PAGESIZE
;
254 * mono_mempool_alloc:
255 * \param pool the memory pool to use
256 * \param size size of the memory block
258 * Allocates a new block of memory in \p pool .
260 * \returns the address of a newly allocated memory block.
263 (mono_mempool_alloc
) (MonoMemPool
*pool
, guint size
)
265 gpointer rval
= pool
->pos
; // Return value
267 // Normal case: Just bump up pos pointer and we are done
268 size
= ALIGN_SIZE (size
);
269 pool
->pos
= (guint8
*)rval
+ size
;
271 #ifdef TRACE_ALLOCATIONS
272 if (pool
== mono_get_corlib ()->mempool
) {
273 mono_backtrace (size
);
277 // If we have just overflowed the current block, we need to back up and try again.
278 if (G_UNLIKELY (pool
->pos
>= pool
->end
)) {
279 pool
->pos
-= size
; // Back out
281 // For large objects, allocate the object into its own block.
282 // (In individual allocation mode, the constant will be 0 and this path will always be taken)
283 if (size
>= MONO_MEMPOOL_PREFER_INDIVIDUAL_ALLOCATION_SIZE
) {
284 guint new_size
= SIZEOF_MEM_POOL
+ size
;
285 MonoMemPool
*np
= (MonoMemPool
*)g_malloc (new_size
);
287 np
->next
= pool
->next
;
290 pool
->d
.allocated
+= new_size
;
291 UnlockedAdd64 (&total_bytes_allocated
, new_size
);
293 rval
= (guint8
*)np
+ SIZEOF_MEM_POOL
;
295 // Notice: any unused memory at the end of the old head becomes simply abandoned in this case until the mempool is freed (see Bugzilla #35136)
296 guint new_size
= get_next_size (pool
, size
);
297 MonoMemPool
*np
= (MonoMemPool
*)g_malloc (new_size
);
299 np
->next
= pool
->next
;
302 pool
->pos
= (guint8
*)np
+ SIZEOF_MEM_POOL
;
303 pool
->end
= (guint8
*)np
+ new_size
;
304 pool
->d
.allocated
+= new_size
;
305 UnlockedAdd64 (&total_bytes_allocated
, new_size
);
316 * mono_mempool_alloc0:
318 * same as \c mono_mempool_alloc, but fills memory with zero.
321 (mono_mempool_alloc0
) (MonoMemPool
*pool
, guint size
)
323 size
= ALIGN_SIZE (size
);
324 const gpointer rval
= mono_mempool_alloc (pool
, size
);
326 memset (rval
, 0, size
);
331 * mono_mempool_contains_addr:
333 * Determines whether \p addr is inside the memory used by the mempool.
336 mono_mempool_contains_addr (MonoMemPool
*pool
,
339 MonoMemPool
*p
= pool
;
342 if (addr
>= (gpointer
)p
&& addr
< (gpointer
)((guint8
*)p
+ p
->size
))
351 * mono_mempool_strdup:
353 * Same as strdup, but allocates memory from the mempool.
354 * Returns: a pointer to the newly allocated string data inside the mempool.
357 mono_mempool_strdup (MonoMemPool
*pool
,
367 res
= (char *)mono_mempool_alloc (pool
, l
+ 1);
368 memcpy (res
, s
, l
+ 1);
374 mono_mempool_strdup_vprintf (MonoMemPool
*pool
, const char *format
, va_list args
)
379 va_copy (args2
, args
);
380 int len
= vsnprintf (NULL
, 0, format
, args2
);
383 if (len
>= 0 && (buf
= (char*)mono_mempool_alloc (pool
, (buflen
= (size_t) (len
+ 1)))) != NULL
) {
384 vsnprintf (buf
, buflen
, format
, args
);
392 mono_mempool_strdup_printf (MonoMemPool
*pool
, const char *format
, ...)
396 va_start (args
, format
);
397 buf
= mono_mempool_strdup_vprintf (pool
, format
, args
);
403 * mono_mempool_get_allocated:
405 * Return the amount of memory allocated for this mempool.
408 mono_mempool_get_allocated (MonoMemPool
*pool
)
410 return pool
->d
.allocated
;
414 * mono_mempool_get_bytes_allocated:
416 * Return the number of bytes currently allocated for mempools.
419 mono_mempool_get_bytes_allocated (void)
421 return UnlockedRead64 (&total_bytes_allocated
);