[sgen] Add debug option for valloc limit
[mono-project.git] / mono / utils / mono-codeman.c
blob1f0e365357a36246979439cf3236b00a86b66eb7
1 /**
2 * \file
3 */
5 #include "config.h"
7 #ifdef HAVE_UNISTD_H
8 #include <unistd.h>
9 #endif
10 #include <stdlib.h>
11 #include <string.h>
12 #include <assert.h>
13 #include <glib.h>
15 /* For dlmalloc.h */
16 #define USE_DL_PREFIX 1
18 #include "mono-codeman.h"
19 #include "mono-mmap.h"
20 #include "mono-counters.h"
21 #include "dlmalloc.h"
22 #include <mono/metadata/profiler-private.h>
23 #ifdef HAVE_VALGRIND_MEMCHECK_H
24 #include <valgrind/memcheck.h>
25 #endif
27 #include <mono/utils/mono-os-mutex.h>
30 static uintptr_t code_memory_used = 0;
31 static size_t dynamic_code_alloc_count;
32 static size_t dynamic_code_bytes_count;
33 static size_t dynamic_code_frees_count;
34 static MonoCodeManagerCallbacks code_manager_callbacks;
37 * AMD64 processors maintain icache coherency only for pages which are
38 * marked executable. Also, windows DEP requires us to obtain executable memory from
39 * malloc when using dynamic code managers. The system malloc can't do this so we use a
40 * slighly modified version of Doug Lea's Malloc package for this purpose:
41 * http://g.oswego.edu/dl/html/malloc.html
44 #define MIN_PAGES 16
46 #if defined(__ia64__) || defined(__x86_64__) || defined (_WIN64)
48 * We require 16 byte alignment on amd64 so the fp literals embedded in the code are
49 * properly aligned for SSE2.
51 #define MIN_ALIGN 16
52 #else
53 #define MIN_ALIGN 8
54 #endif
56 /* if a chunk has less than this amount of free space it's considered full */
57 #define MAX_WASTAGE 32
58 #define MIN_BSIZE 32
60 #ifdef __x86_64__
61 #define ARCH_MAP_FLAGS MONO_MMAP_32BIT
62 #else
63 #define ARCH_MAP_FLAGS 0
64 #endif
66 #define MONO_PROT_RWX (MONO_MMAP_READ|MONO_MMAP_WRITE|MONO_MMAP_EXEC)
68 typedef struct _CodeChunck CodeChunk;
70 enum {
71 CODE_FLAG_MMAP,
72 CODE_FLAG_MALLOC
75 struct _CodeChunck {
76 char *data;
77 int pos;
78 int size;
79 CodeChunk *next;
80 unsigned int flags: 8;
81 /* this number of bytes is available to resolve addresses far in memory */
82 unsigned int bsize: 24;
85 struct _MonoCodeManager {
86 int dynamic;
87 int read_only;
88 CodeChunk *current;
89 CodeChunk *full;
90 CodeChunk *last;
93 #define ALIGN_INT(val,alignment) (((val) + (alignment - 1)) & ~(alignment - 1))
95 #define VALLOC_FREELIST_SIZE 16
97 static mono_mutex_t valloc_mutex;
98 static GHashTable *valloc_freelists;
100 static void*
101 codechunk_valloc (void *preferred, guint32 size)
103 void *ptr;
104 GSList *freelist;
106 if (!valloc_freelists) {
107 mono_os_mutex_init_recursive (&valloc_mutex);
108 valloc_freelists = g_hash_table_new (NULL, NULL);
112 * Keep a small freelist of memory blocks to decrease pressure on the kernel memory subsystem to avoid #3321.
114 mono_os_mutex_lock (&valloc_mutex);
115 freelist = (GSList *) g_hash_table_lookup (valloc_freelists, GUINT_TO_POINTER (size));
116 if (freelist) {
117 ptr = freelist->data;
118 memset (ptr, 0, size);
119 freelist = g_slist_delete_link (freelist, freelist);
120 g_hash_table_insert (valloc_freelists, GUINT_TO_POINTER (size), freelist);
121 } else {
122 ptr = mono_valloc (preferred, size, MONO_PROT_RWX | ARCH_MAP_FLAGS, MONO_MEM_ACCOUNT_CODE);
123 if (!ptr && preferred)
124 ptr = mono_valloc (NULL, size, MONO_PROT_RWX | ARCH_MAP_FLAGS, MONO_MEM_ACCOUNT_CODE);
126 mono_os_mutex_unlock (&valloc_mutex);
127 return ptr;
130 static void
131 codechunk_vfree (void *ptr, guint32 size)
133 GSList *freelist;
135 mono_os_mutex_lock (&valloc_mutex);
136 freelist = (GSList *) g_hash_table_lookup (valloc_freelists, GUINT_TO_POINTER (size));
137 if (!freelist || g_slist_length (freelist) < VALLOC_FREELIST_SIZE) {
138 freelist = g_slist_prepend (freelist, ptr);
139 g_hash_table_insert (valloc_freelists, GUINT_TO_POINTER (size), freelist);
140 } else {
141 mono_vfree (ptr, size, MONO_MEM_ACCOUNT_CODE);
143 mono_os_mutex_unlock (&valloc_mutex);
146 static void
147 codechunk_cleanup (void)
149 GHashTableIter iter;
150 gpointer key, value;
152 if (!valloc_freelists)
153 return;
154 g_hash_table_iter_init (&iter, valloc_freelists);
155 while (g_hash_table_iter_next (&iter, &key, &value)) {
156 GSList *freelist = (GSList *) value;
157 GSList *l;
159 for (l = freelist; l; l = l->next) {
160 mono_vfree (l->data, GPOINTER_TO_UINT (key), MONO_MEM_ACCOUNT_CODE);
162 g_slist_free (freelist);
164 g_hash_table_destroy (valloc_freelists);
167 void
168 mono_code_manager_init (void)
170 mono_counters_register ("Dynamic code allocs", MONO_COUNTER_JIT | MONO_COUNTER_ULONG, &dynamic_code_alloc_count);
171 mono_counters_register ("Dynamic code bytes", MONO_COUNTER_JIT | MONO_COUNTER_ULONG, &dynamic_code_bytes_count);
172 mono_counters_register ("Dynamic code frees", MONO_COUNTER_JIT | MONO_COUNTER_ULONG, &dynamic_code_frees_count);
175 void
176 mono_code_manager_cleanup (void)
178 codechunk_cleanup ();
181 void
182 mono_code_manager_install_callbacks (MonoCodeManagerCallbacks* callbacks)
184 code_manager_callbacks = *callbacks;
188 * mono_code_manager_new:
190 * Creates a new code manager. A code manager can be used to allocate memory
191 * suitable for storing native code that can be later executed.
192 * A code manager allocates memory from the operating system in large chunks
193 * (typically 64KB in size) so that many methods can be allocated inside them
194 * close together, improving cache locality.
196 * Returns: the new code manager
198 MonoCodeManager*
199 mono_code_manager_new (void)
201 return (MonoCodeManager *) g_malloc0 (sizeof (MonoCodeManager));
205 * mono_code_manager_new_dynamic:
207 * Creates a new code manager suitable for holding native code that can be
208 * used for single or small methods that need to be deallocated independently
209 * of other native code.
211 * Returns: the new code manager
213 MonoCodeManager*
214 mono_code_manager_new_dynamic (void)
216 MonoCodeManager *cman = mono_code_manager_new ();
217 cman->dynamic = 1;
218 return cman;
222 static void
223 free_chunklist (CodeChunk *chunk)
225 CodeChunk *dead;
227 #if defined(HAVE_VALGRIND_MEMCHECK_H) && defined (VALGRIND_JIT_UNREGISTER_MAP)
228 int valgrind_unregister = 0;
229 if (RUNNING_ON_VALGRIND)
230 valgrind_unregister = 1;
231 #define valgrind_unregister(x) do { if (valgrind_unregister) { VALGRIND_JIT_UNREGISTER_MAP(NULL,x); } } while (0)
232 #else
233 #define valgrind_unregister(x)
234 #endif
236 for (; chunk; ) {
237 dead = chunk;
238 mono_profiler_code_chunk_destroy ((gpointer) dead->data);
239 if (code_manager_callbacks.chunk_destroy)
240 code_manager_callbacks.chunk_destroy ((gpointer)dead->data);
241 chunk = chunk->next;
242 if (dead->flags == CODE_FLAG_MMAP) {
243 codechunk_vfree (dead->data, dead->size);
244 /* valgrind_unregister(dead->data); */
245 } else if (dead->flags == CODE_FLAG_MALLOC) {
246 dlfree (dead->data);
248 code_memory_used -= dead->size;
249 g_free (dead);
254 * mono_code_manager_destroy:
255 * \param cman a code manager
256 * Free all the memory associated with the code manager \p cman.
258 void
259 mono_code_manager_destroy (MonoCodeManager *cman)
261 free_chunklist (cman->full);
262 free_chunklist (cman->current);
263 g_free (cman);
267 * mono_code_manager_invalidate:
268 * \param cman a code manager
269 * Fill all the memory with an invalid native code value
270 * so that any attempt to execute code allocated in the code
271 * manager \p cman will fail. This is used for debugging purposes.
273 void
274 mono_code_manager_invalidate (MonoCodeManager *cman)
276 CodeChunk *chunk;
278 #if defined(__i386__) || defined(__x86_64__)
279 int fill_value = 0xcc; /* x86 break */
280 #else
281 int fill_value = 0x2a;
282 #endif
284 for (chunk = cman->current; chunk; chunk = chunk->next)
285 memset (chunk->data, fill_value, chunk->size);
286 for (chunk = cman->full; chunk; chunk = chunk->next)
287 memset (chunk->data, fill_value, chunk->size);
291 * mono_code_manager_set_read_only:
292 * \param cman a code manager
293 * Make the code manager read only, so further allocation requests cause an assert.
295 void
296 mono_code_manager_set_read_only (MonoCodeManager *cman)
298 cman->read_only = TRUE;
302 * mono_code_manager_foreach:
303 * \param cman a code manager
304 * \param func a callback function pointer
305 * \param user_data additional data to pass to \p func
306 * Invokes the callback \p func for each different chunk of memory allocated
307 * in the code manager \p cman.
309 void
310 mono_code_manager_foreach (MonoCodeManager *cman, MonoCodeManagerFunc func, void *user_data)
312 CodeChunk *chunk;
313 for (chunk = cman->current; chunk; chunk = chunk->next) {
314 if (func (chunk->data, chunk->size, chunk->bsize, user_data))
315 return;
317 for (chunk = cman->full; chunk; chunk = chunk->next) {
318 if (func (chunk->data, chunk->size, chunk->bsize, user_data))
319 return;
323 /* BIND_ROOM is the divisor for the chunck of code size dedicated
324 * to binding branches (branches not reachable with the immediate displacement)
325 * bind_size = size/BIND_ROOM;
326 * we should reduce it and make MIN_PAGES bigger for such systems
328 #if defined(__ppc__) || defined(__powerpc__)
329 #define BIND_ROOM 4
330 #endif
331 #if defined(TARGET_ARM64)
332 #define BIND_ROOM 4
333 #endif
335 static CodeChunk*
336 new_codechunk (CodeChunk *last, int dynamic, int size)
338 int minsize, flags = CODE_FLAG_MMAP;
339 int chunk_size, bsize = 0;
340 int pagesize, valloc_granule;
341 CodeChunk *chunk;
342 void *ptr;
344 #ifdef FORCE_MALLOC
345 flags = CODE_FLAG_MALLOC;
346 #endif
348 pagesize = mono_pagesize ();
349 valloc_granule = mono_valloc_granule ();
351 if (dynamic) {
352 chunk_size = size;
353 flags = CODE_FLAG_MALLOC;
354 } else {
355 minsize = MAX (pagesize * MIN_PAGES, valloc_granule);
356 if (size < minsize)
357 chunk_size = minsize;
358 else {
359 /* Allocate MIN_ALIGN-1 more than we need so we can still */
360 /* guarantee MIN_ALIGN alignment for individual allocs */
361 /* from mono_code_manager_reserve_align. */
362 size += MIN_ALIGN - 1;
363 size &= ~(MIN_ALIGN - 1);
364 chunk_size = size;
365 chunk_size += valloc_granule - 1;
366 chunk_size &= ~ (valloc_granule - 1);
369 #ifdef BIND_ROOM
370 if (dynamic)
371 /* Reserve more space since there are no other chunks we might use if this one gets full */
372 bsize = (chunk_size * 2) / BIND_ROOM;
373 else
374 bsize = chunk_size / BIND_ROOM;
375 if (bsize < MIN_BSIZE)
376 bsize = MIN_BSIZE;
377 bsize += MIN_ALIGN -1;
378 bsize &= ~ (MIN_ALIGN - 1);
379 if (chunk_size - size < bsize) {
380 chunk_size = size + bsize;
381 if (!dynamic) {
382 chunk_size += valloc_granule - 1;
383 chunk_size &= ~ (valloc_granule - 1);
386 #endif
388 if (flags == CODE_FLAG_MALLOC) {
389 ptr = dlmemalign (MIN_ALIGN, chunk_size + MIN_ALIGN - 1);
390 if (!ptr)
391 return NULL;
392 } else {
393 /* Try to allocate code chunks next to each other to help the VM */
394 ptr = NULL;
395 if (last)
396 ptr = codechunk_valloc ((guint8*)last->data + last->size, chunk_size);
397 if (!ptr)
398 ptr = codechunk_valloc (NULL, chunk_size);
399 if (!ptr)
400 return NULL;
403 if (flags == CODE_FLAG_MALLOC) {
404 #ifdef BIND_ROOM
405 /* Make sure the thunks area is zeroed */
406 memset (ptr, 0, bsize);
407 #endif
410 chunk = (CodeChunk *) g_malloc (sizeof (CodeChunk));
411 if (!chunk) {
412 if (flags == CODE_FLAG_MALLOC)
413 dlfree (ptr);
414 else
415 mono_vfree (ptr, chunk_size, MONO_MEM_ACCOUNT_CODE);
416 return NULL;
418 chunk->next = NULL;
419 chunk->size = chunk_size;
420 chunk->data = (char *) ptr;
421 chunk->flags = flags;
422 chunk->pos = bsize;
423 chunk->bsize = bsize;
424 if (code_manager_callbacks.chunk_new)
425 code_manager_callbacks.chunk_new ((gpointer)chunk->data, chunk->size);
426 mono_profiler_code_chunk_new((gpointer) chunk->data, chunk->size);
428 code_memory_used += chunk_size;
429 mono_runtime_resource_check_limit (MONO_RESOURCE_JIT_CODE, code_memory_used);
430 /*printf ("code chunk at: %p\n", ptr);*/
431 return chunk;
435 * mono_code_manager_reserve_align:
436 * \param cman a code manager
437 * \param size size of memory to allocate
438 * \param alignment power of two alignment value
439 * Allocates at least \p size bytes of memory inside the code manager \p cman.
440 * \returns the pointer to the allocated memory or NULL on failure
442 void*
443 mono_code_manager_reserve_align (MonoCodeManager *cman, int size, int alignment)
445 CodeChunk *chunk, *prev;
446 void *ptr;
447 guint32 align_mask = alignment - 1;
449 g_assert (!cman->read_only);
451 /* eventually allow bigger alignments, but we need to fix the dynamic alloc code to
452 * handle this before
454 g_assert (alignment <= MIN_ALIGN);
456 if (cman->dynamic) {
457 ++dynamic_code_alloc_count;
458 dynamic_code_bytes_count += size;
461 if (!cman->current) {
462 cman->current = new_codechunk (cman->last, cman->dynamic, size);
463 if (!cman->current)
464 return NULL;
465 cman->last = cman->current;
468 for (chunk = cman->current; chunk; chunk = chunk->next) {
469 if (ALIGN_INT (chunk->pos, alignment) + size <= chunk->size) {
470 chunk->pos = ALIGN_INT (chunk->pos, alignment);
471 /* Align the chunk->data we add to chunk->pos */
472 /* or we can't guarantee proper alignment */
473 ptr = (void*)((((uintptr_t)chunk->data + align_mask) & ~(uintptr_t)align_mask) + chunk->pos);
474 chunk->pos = ((char*)ptr - chunk->data) + size;
475 return ptr;
479 * no room found, move one filled chunk to cman->full
480 * to keep cman->current from growing too much
482 prev = NULL;
483 for (chunk = cman->current; chunk; prev = chunk, chunk = chunk->next) {
484 if (chunk->pos + MIN_ALIGN * 4 <= chunk->size)
485 continue;
486 if (prev) {
487 prev->next = chunk->next;
488 } else {
489 cman->current = chunk->next;
491 chunk->next = cman->full;
492 cman->full = chunk;
493 break;
495 chunk = new_codechunk (cman->last, cman->dynamic, size);
496 if (!chunk)
497 return NULL;
498 chunk->next = cman->current;
499 cman->current = chunk;
500 cman->last = cman->current;
501 chunk->pos = ALIGN_INT (chunk->pos, alignment);
502 /* Align the chunk->data we add to chunk->pos */
503 /* or we can't guarantee proper alignment */
504 ptr = (void*)((((uintptr_t)chunk->data + align_mask) & ~(uintptr_t)align_mask) + chunk->pos);
505 chunk->pos = ((char*)ptr - chunk->data) + size;
506 return ptr;
510 * mono_code_manager_reserve:
511 * \param cman a code manager
512 * \param size size of memory to allocate
513 * Allocates at least \p size bytes of memory inside the code manager \p cman.
514 * \returns the pointer to the allocated memory or NULL on failure
516 void*
517 mono_code_manager_reserve (MonoCodeManager *cman, int size)
519 return mono_code_manager_reserve_align (cman, size, MIN_ALIGN);
523 * mono_code_manager_commit:
524 * \param cman a code manager
525 * \param data the pointer returned by mono_code_manager_reserve ()
526 * \param size the size requested in the call to mono_code_manager_reserve ()
527 * \param newsize the new size to reserve
528 * If we reserved too much room for a method and we didn't allocate
529 * already from the code manager, we can get back the excess allocation
530 * for later use in the code manager.
532 void
533 mono_code_manager_commit (MonoCodeManager *cman, void *data, int size, int newsize)
535 g_assert (newsize <= size);
537 if (cman->current && (size != newsize) && (data == cman->current->data + cman->current->pos - size)) {
538 cman->current->pos -= size - newsize;
543 * mono_code_manager_size:
544 * \param cman a code manager
545 * \param used_size pointer to an integer for the result
546 * This function can be used to get statistics about a code manager:
547 * the integer pointed to by \p used_size will contain how much
548 * memory is actually used inside the code managed \p cman.
549 * \returns the amount of memory allocated in \p cman
552 mono_code_manager_size (MonoCodeManager *cman, int *used_size)
554 CodeChunk *chunk;
555 guint32 size = 0;
556 guint32 used = 0;
557 for (chunk = cman->current; chunk; chunk = chunk->next) {
558 size += chunk->size;
559 used += chunk->pos;
561 for (chunk = cman->full; chunk; chunk = chunk->next) {
562 size += chunk->size;
563 used += chunk->pos;
565 if (used_size)
566 *used_size = used;
567 return size;