12 #define USE_DL_PREFIX 1
14 #include "mono-codeman.h"
15 #include "mono-mmap.h"
16 #include "mono-counters.h"
18 #include <mono/io-layer/io-layer.h>
19 #include <mono/metadata/profiler-private.h>
20 #ifdef HAVE_VALGRIND_MEMCHECK_H
21 #include <valgrind/memcheck.h>
24 #include <mono/utils/mono-os-mutex.h>
27 static uintptr_t code_memory_used
= 0;
28 static size_t dynamic_code_alloc_count
;
29 static size_t dynamic_code_bytes_count
;
30 static size_t dynamic_code_frees_count
;
31 static MonoCodeManagerCallbacks code_manager_callbacks
;
34 * AMD64 processors maintain icache coherency only for pages which are
35 * marked executable. Also, windows DEP requires us to obtain executable memory from
36 * malloc when using dynamic code managers. The system malloc can't do this so we use a
37 * slighly modified version of Doug Lea's Malloc package for this purpose:
38 * http://g.oswego.edu/dl/html/malloc.html
43 #if defined(__ia64__) || defined(__x86_64__) || defined (_WIN64)
45 * We require 16 byte alignment on amd64 so the fp literals embedded in the code are
46 * properly aligned for SSE2.
53 /* if a chunk has less than this amount of free space it's considered full */
54 #define MAX_WASTAGE 32
58 #define ARCH_MAP_FLAGS MONO_MMAP_32BIT
60 #define ARCH_MAP_FLAGS 0
63 #define MONO_PROT_RWX (MONO_MMAP_READ|MONO_MMAP_WRITE|MONO_MMAP_EXEC)
65 typedef struct _CodeChunck CodeChunk
;
77 unsigned int flags
: 8;
78 /* this number of bytes is available to resolve addresses far in memory */
79 unsigned int bsize
: 24;
82 struct _MonoCodeManager
{
90 #define ALIGN_INT(val,alignment) (((val) + (alignment - 1)) & ~(alignment - 1))
92 #define VALLOC_FREELIST_SIZE 16
94 static mono_mutex_t valloc_mutex
;
95 static GHashTable
*valloc_freelists
;
98 codechunk_valloc (void *preferred
, guint32 size
)
103 if (!valloc_freelists
) {
104 mono_os_mutex_init_recursive (&valloc_mutex
);
105 valloc_freelists
= g_hash_table_new (NULL
, NULL
);
109 * Keep a small freelist of memory blocks to decrease pressure on the kernel memory subsystem to avoid #3321.
111 mono_os_mutex_lock (&valloc_mutex
);
112 freelist
= (GSList
*) g_hash_table_lookup (valloc_freelists
, GUINT_TO_POINTER (size
));
114 ptr
= freelist
->data
;
115 memset (ptr
, 0, size
);
116 freelist
= g_slist_delete_link (freelist
, freelist
);
117 g_hash_table_insert (valloc_freelists
, GUINT_TO_POINTER (size
), freelist
);
119 ptr
= mono_valloc (preferred
, size
, MONO_PROT_RWX
| ARCH_MAP_FLAGS
, MONO_MEM_ACCOUNT_CODE
);
120 if (!ptr
&& preferred
)
121 ptr
= mono_valloc (NULL
, size
, MONO_PROT_RWX
| ARCH_MAP_FLAGS
, MONO_MEM_ACCOUNT_CODE
);
123 mono_os_mutex_unlock (&valloc_mutex
);
128 codechunk_vfree (void *ptr
, guint32 size
)
132 mono_os_mutex_lock (&valloc_mutex
);
133 freelist
= (GSList
*) g_hash_table_lookup (valloc_freelists
, GUINT_TO_POINTER (size
));
134 if (!freelist
|| g_slist_length (freelist
) < VALLOC_FREELIST_SIZE
) {
135 freelist
= g_slist_prepend (freelist
, ptr
);
136 g_hash_table_insert (valloc_freelists
, GUINT_TO_POINTER (size
), freelist
);
138 mono_vfree (ptr
, size
, MONO_MEM_ACCOUNT_CODE
);
140 mono_os_mutex_unlock (&valloc_mutex
);
144 codechunk_cleanup (void)
149 if (!valloc_freelists
)
151 g_hash_table_iter_init (&iter
, valloc_freelists
);
152 while (g_hash_table_iter_next (&iter
, &key
, &value
)) {
153 GSList
*freelist
= (GSList
*) value
;
156 for (l
= freelist
; l
; l
= l
->next
) {
157 mono_vfree (l
->data
, GPOINTER_TO_UINT (key
), MONO_MEM_ACCOUNT_CODE
);
159 g_slist_free (freelist
);
161 g_hash_table_destroy (valloc_freelists
);
165 mono_code_manager_init (void)
167 mono_counters_register ("Dynamic code allocs", MONO_COUNTER_JIT
| MONO_COUNTER_ULONG
, &dynamic_code_alloc_count
);
168 mono_counters_register ("Dynamic code bytes", MONO_COUNTER_JIT
| MONO_COUNTER_ULONG
, &dynamic_code_bytes_count
);
169 mono_counters_register ("Dynamic code frees", MONO_COUNTER_JIT
| MONO_COUNTER_ULONG
, &dynamic_code_frees_count
);
173 mono_code_manager_cleanup (void)
175 codechunk_cleanup ();
179 mono_code_manager_install_callbacks (MonoCodeManagerCallbacks
* callbacks
)
181 code_manager_callbacks
= *callbacks
;
185 * mono_code_manager_new:
187 * Creates a new code manager. A code manager can be used to allocate memory
188 * suitable for storing native code that can be later executed.
189 * A code manager allocates memory from the operating system in large chunks
190 * (typically 64KB in size) so that many methods can be allocated inside them
191 * close together, improving cache locality.
193 * Returns: the new code manager
196 mono_code_manager_new (void)
198 return (MonoCodeManager
*) g_malloc0 (sizeof (MonoCodeManager
));
202 * mono_code_manager_new_dynamic:
204 * Creates a new code manager suitable for holding native code that can be
205 * used for single or small methods that need to be deallocated independently
206 * of other native code.
208 * Returns: the new code manager
211 mono_code_manager_new_dynamic (void)
213 MonoCodeManager
*cman
= mono_code_manager_new ();
220 free_chunklist (CodeChunk
*chunk
)
224 #if defined(HAVE_VALGRIND_MEMCHECK_H) && defined (VALGRIND_JIT_UNREGISTER_MAP)
225 int valgrind_unregister
= 0;
226 if (RUNNING_ON_VALGRIND
)
227 valgrind_unregister
= 1;
228 #define valgrind_unregister(x) do { if (valgrind_unregister) { VALGRIND_JIT_UNREGISTER_MAP(NULL,x); } } while (0)
230 #define valgrind_unregister(x)
235 mono_profiler_code_chunk_destroy ((gpointer
) dead
->data
);
236 if (code_manager_callbacks
.chunk_destroy
)
237 code_manager_callbacks
.chunk_destroy ((gpointer
)dead
->data
);
239 if (dead
->flags
== CODE_FLAG_MMAP
) {
240 codechunk_vfree (dead
->data
, dead
->size
);
241 /* valgrind_unregister(dead->data); */
242 } else if (dead
->flags
== CODE_FLAG_MALLOC
) {
245 code_memory_used
-= dead
->size
;
251 * mono_code_manager_destroy:
252 * @cman: a code manager
254 * Free all the memory associated with the code manager @cman.
257 mono_code_manager_destroy (MonoCodeManager
*cman
)
259 free_chunklist (cman
->full
);
260 free_chunklist (cman
->current
);
265 * mono_code_manager_invalidate:
266 * @cman: a code manager
268 * Fill all the memory with an invalid native code value
269 * so that any attempt to execute code allocated in the code
270 * manager @cman will fail. This is used for debugging purposes.
273 mono_code_manager_invalidate (MonoCodeManager
*cman
)
277 #if defined(__i386__) || defined(__x86_64__)
278 int fill_value
= 0xcc; /* x86 break */
280 int fill_value
= 0x2a;
283 for (chunk
= cman
->current
; chunk
; chunk
= chunk
->next
)
284 memset (chunk
->data
, fill_value
, chunk
->size
);
285 for (chunk
= cman
->full
; chunk
; chunk
= chunk
->next
)
286 memset (chunk
->data
, fill_value
, chunk
->size
);
290 * mono_code_manager_set_read_only:
291 * @cman: a code manager
293 * Make the code manager read only, so further allocation requests cause an assert.
296 mono_code_manager_set_read_only (MonoCodeManager
*cman
)
298 cman
->read_only
= TRUE
;
302 * mono_code_manager_foreach:
303 * @cman: a code manager
304 * @func: a callback function pointer
305 * @user_data: additional data to pass to @func
307 * Invokes the callback @func for each different chunk of memory allocated
308 * in the code manager @cman.
311 mono_code_manager_foreach (MonoCodeManager
*cman
, MonoCodeManagerFunc func
, void *user_data
)
314 for (chunk
= cman
->current
; chunk
; chunk
= chunk
->next
) {
315 if (func (chunk
->data
, chunk
->size
, chunk
->bsize
, user_data
))
318 for (chunk
= cman
->full
; chunk
; chunk
= chunk
->next
) {
319 if (func (chunk
->data
, chunk
->size
, chunk
->bsize
, user_data
))
324 /* BIND_ROOM is the divisor for the chunck of code size dedicated
325 * to binding branches (branches not reachable with the immediate displacement)
326 * bind_size = size/BIND_ROOM;
327 * we should reduce it and make MIN_PAGES bigger for such systems
329 #if defined(__ppc__) || defined(__powerpc__)
332 #if defined(TARGET_ARM64)
337 new_codechunk (CodeChunk
*last
, int dynamic
, int size
)
339 int minsize
, flags
= CODE_FLAG_MMAP
;
340 int chunk_size
, bsize
= 0;
341 int pagesize
, valloc_granule
;
346 flags
= CODE_FLAG_MALLOC
;
349 pagesize
= mono_pagesize ();
350 valloc_granule
= mono_valloc_granule ();
354 flags
= CODE_FLAG_MALLOC
;
356 minsize
= MAX (pagesize
* MIN_PAGES
, valloc_granule
);
358 chunk_size
= minsize
;
360 /* Allocate MIN_ALIGN-1 more than we need so we can still */
361 /* guarantee MIN_ALIGN alignment for individual allocs */
362 /* from mono_code_manager_reserve_align. */
363 size
+= MIN_ALIGN
- 1;
364 size
&= ~(MIN_ALIGN
- 1);
366 chunk_size
+= valloc_granule
- 1;
367 chunk_size
&= ~ (valloc_granule
- 1);
372 /* Reserve more space since there are no other chunks we might use if this one gets full */
373 bsize
= (chunk_size
* 2) / BIND_ROOM
;
375 bsize
= chunk_size
/ BIND_ROOM
;
376 if (bsize
< MIN_BSIZE
)
378 bsize
+= MIN_ALIGN
-1;
379 bsize
&= ~ (MIN_ALIGN
- 1);
380 if (chunk_size
- size
< bsize
) {
381 chunk_size
= size
+ bsize
;
383 chunk_size
+= valloc_granule
- 1;
384 chunk_size
&= ~ (valloc_granule
- 1);
389 if (flags
== CODE_FLAG_MALLOC
) {
390 ptr
= dlmemalign (MIN_ALIGN
, chunk_size
+ MIN_ALIGN
- 1);
394 /* Try to allocate code chunks next to each other to help the VM */
397 ptr
= codechunk_valloc ((guint8
*)last
->data
+ last
->size
, chunk_size
);
399 ptr
= codechunk_valloc (NULL
, chunk_size
);
404 if (flags
== CODE_FLAG_MALLOC
) {
406 /* Make sure the thunks area is zeroed */
407 memset (ptr
, 0, bsize
);
411 chunk
= (CodeChunk
*) g_malloc (sizeof (CodeChunk
));
413 if (flags
== CODE_FLAG_MALLOC
)
416 mono_vfree (ptr
, chunk_size
, MONO_MEM_ACCOUNT_CODE
);
420 chunk
->size
= chunk_size
;
421 chunk
->data
= (char *) ptr
;
422 chunk
->flags
= flags
;
424 chunk
->bsize
= bsize
;
425 if (code_manager_callbacks
.chunk_new
)
426 code_manager_callbacks
.chunk_new ((gpointer
)chunk
->data
, chunk
->size
);
427 mono_profiler_code_chunk_new((gpointer
) chunk
->data
, chunk
->size
);
429 code_memory_used
+= chunk_size
;
430 mono_runtime_resource_check_limit (MONO_RESOURCE_JIT_CODE
, code_memory_used
);
431 /*printf ("code chunk at: %p\n", ptr);*/
436 * mono_code_manager_reserve:
437 * @cman: a code manager
438 * @size: size of memory to allocate
439 * @alignment: power of two alignment value
441 * Allocates at least @size bytes of memory inside the code manager @cman.
443 * Returns: the pointer to the allocated memory or #NULL on failure
446 mono_code_manager_reserve_align (MonoCodeManager
*cman
, int size
, int alignment
)
448 CodeChunk
*chunk
, *prev
;
450 guint32 align_mask
= alignment
- 1;
452 g_assert (!cman
->read_only
);
454 /* eventually allow bigger alignments, but we need to fix the dynamic alloc code to
457 g_assert (alignment
<= MIN_ALIGN
);
460 ++dynamic_code_alloc_count
;
461 dynamic_code_bytes_count
+= size
;
464 if (!cman
->current
) {
465 cman
->current
= new_codechunk (cman
->last
, cman
->dynamic
, size
);
468 cman
->last
= cman
->current
;
471 for (chunk
= cman
->current
; chunk
; chunk
= chunk
->next
) {
472 if (ALIGN_INT (chunk
->pos
, alignment
) + size
<= chunk
->size
) {
473 chunk
->pos
= ALIGN_INT (chunk
->pos
, alignment
);
474 /* Align the chunk->data we add to chunk->pos */
475 /* or we can't guarantee proper alignment */
476 ptr
= (void*)((((uintptr_t)chunk
->data
+ align_mask
) & ~(uintptr_t)align_mask
) + chunk
->pos
);
477 chunk
->pos
= ((char*)ptr
- chunk
->data
) + size
;
482 * no room found, move one filled chunk to cman->full
483 * to keep cman->current from growing too much
486 for (chunk
= cman
->current
; chunk
; prev
= chunk
, chunk
= chunk
->next
) {
487 if (chunk
->pos
+ MIN_ALIGN
* 4 <= chunk
->size
)
490 prev
->next
= chunk
->next
;
492 cman
->current
= chunk
->next
;
494 chunk
->next
= cman
->full
;
498 chunk
= new_codechunk (cman
->last
, cman
->dynamic
, size
);
501 chunk
->next
= cman
->current
;
502 cman
->current
= chunk
;
503 cman
->last
= cman
->current
;
504 chunk
->pos
= ALIGN_INT (chunk
->pos
, alignment
);
505 /* Align the chunk->data we add to chunk->pos */
506 /* or we can't guarantee proper alignment */
507 ptr
= (void*)((((uintptr_t)chunk
->data
+ align_mask
) & ~(uintptr_t)align_mask
) + chunk
->pos
);
508 chunk
->pos
= ((char*)ptr
- chunk
->data
) + size
;
513 * mono_code_manager_reserve:
514 * @cman: a code manager
515 * @size: size of memory to allocate
517 * Allocates at least @size bytes of memory inside the code manager @cman.
519 * Returns: the pointer to the allocated memory or #NULL on failure
522 mono_code_manager_reserve (MonoCodeManager
*cman
, int size
)
524 return mono_code_manager_reserve_align (cman
, size
, MIN_ALIGN
);
528 * mono_code_manager_commit:
529 * @cman: a code manager
530 * @data: the pointer returned by mono_code_manager_reserve ()
531 * @size: the size requested in the call to mono_code_manager_reserve ()
532 * @newsize: the new size to reserve
534 * If we reserved too much room for a method and we didn't allocate
535 * already from the code manager, we can get back the excess allocation
536 * for later use in the code manager.
539 mono_code_manager_commit (MonoCodeManager
*cman
, void *data
, int size
, int newsize
)
541 g_assert (newsize
<= size
);
543 if (cman
->current
&& (size
!= newsize
) && (data
== cman
->current
->data
+ cman
->current
->pos
- size
)) {
544 cman
->current
->pos
-= size
- newsize
;
549 * mono_code_manager_size:
550 * @cman: a code manager
551 * @used_size: pointer to an integer for the result
553 * This function can be used to get statistics about a code manager:
554 * the integer pointed to by @used_size will contain how much
555 * memory is actually used inside the code managed @cman.
557 * Returns: the amount of memory allocated in @cman
560 mono_code_manager_size (MonoCodeManager
*cman
, int *used_size
)
565 for (chunk
= cman
->current
; chunk
; chunk
= chunk
->next
) {
569 for (chunk
= cman
->full
; chunk
; chunk
= chunk
->next
) {