2010-04-07 Rodrigo Kumpera <rkumpera@novell.com>
[mono.git] / mono / utils / mono-codeman.c
blobfcef18e0f7dcddf886182188ce19566072cb2976
1 #include "config.h"
3 #ifdef HAVE_UNISTD_H
4 #include <unistd.h>
5 #endif
6 #include <stdlib.h>
7 #include <string.h>
8 #include <assert.h>
9 #include <glib.h>
11 /* For dlmalloc.h */
12 #define USE_DL_PREFIX 1
14 #include "mono-codeman.h"
15 #include "mono-mmap.h"
16 #include "dlmalloc.h"
17 #include <mono/metadata/class-internals.h>
18 #include <mono/metadata/profiler-private.h>
19 #ifdef HAVE_VALGRIND_MEMCHECK_H
20 #include <valgrind/memcheck.h>
21 #endif
24 * AMD64 processors maintain icache coherency only for pages which are
25 * marked executable. Also, windows DEP requires us to obtain executable memory from
26 * malloc when using dynamic code managers. The system malloc can't do this so we use a
27 * slighly modified version of Doug Lea's Malloc package for this purpose:
28 * http://g.oswego.edu/dl/html/malloc.html
31 #define MIN_PAGES 16
33 #if defined(__ia64__) || defined(__x86_64__)
35 * We require 16 byte alignment on amd64 so the fp literals embedded in the code are
36 * properly aligned for SSE2.
38 #define MIN_ALIGN 16
39 #else
40 #define MIN_ALIGN 8
41 #endif
43 /* if a chunk has less than this amount of free space it's considered full */
44 #define MAX_WASTAGE 32
45 #define MIN_BSIZE 32
47 #ifdef __x86_64__
48 #define ARCH_MAP_FLAGS MONO_MMAP_32BIT
49 #else
50 #define ARCH_MAP_FLAGS 0
51 #endif
53 #define MONO_PROT_RWX (MONO_MMAP_READ|MONO_MMAP_WRITE|MONO_MMAP_EXEC)
55 typedef struct _CodeChunck CodeChunk;
57 enum {
58 CODE_FLAG_MMAP,
59 CODE_FLAG_MALLOC
62 struct _CodeChunck {
63 char *data;
64 int pos;
65 int size;
66 CodeChunk *next;
67 unsigned int flags: 8;
68 /* this number of bytes is available to resolve addresses far in memory */
69 unsigned int bsize: 24;
72 struct _MonoCodeManager {
73 int dynamic;
74 int read_only;
75 CodeChunk *current;
76 CodeChunk *full;
79 #define ALIGN_INT(val,alignment) (((val) + (alignment - 1)) & ~(alignment - 1))
81 /**
82 * mono_code_manager_new:
84 * Creates a new code manager. A code manager can be used to allocate memory
85 * suitable for storing native code that can be later executed.
86 * A code manager allocates memory from the operating system in large chunks
87 * (typically 64KB in size) so that many methods can be allocated inside them
88 * close together, improving cache locality.
90 * Returns: the new code manager
92 MonoCodeManager*
93 mono_code_manager_new (void)
95 MonoCodeManager *cman = malloc (sizeof (MonoCodeManager));
96 if (!cman)
97 return NULL;
98 cman->current = NULL;
99 cman->full = NULL;
100 cman->dynamic = 0;
101 cman->read_only = 0;
102 return cman;
106 * mono_code_manager_new_dynamic:
108 * Creates a new code manager suitable for holding native code that can be
109 * used for single or small methods that need to be deallocated independently
110 * of other native code.
112 * Returns: the new code manager
114 MonoCodeManager*
115 mono_code_manager_new_dynamic (void)
117 MonoCodeManager *cman = mono_code_manager_new ();
118 cman->dynamic = 1;
119 return cman;
123 static void
124 free_chunklist (CodeChunk *chunk)
126 CodeChunk *dead;
128 #if defined(HAVE_VALGRIND_MEMCHECK_H) && defined (VALGRIND_JIT_UNREGISTER_MAP)
129 int valgrind_unregister = 0;
130 if (RUNNING_ON_VALGRIND)
131 valgrind_unregister = 1;
132 #define valgrind_unregister(x) do { if (valgrind_unregister) { VALGRIND_JIT_UNREGISTER_MAP(NULL,x); } } while (0)
133 #else
134 #define valgrind_unregister(x)
135 #endif
137 for (; chunk; ) {
138 dead = chunk;
139 mono_profiler_code_chunk_destroy ((gpointer) dead->data);
140 chunk = chunk->next;
141 if (dead->flags == CODE_FLAG_MMAP) {
142 mono_vfree (dead->data, dead->size);
143 /* valgrind_unregister(dead->data); */
144 } else if (dead->flags == CODE_FLAG_MALLOC) {
145 dlfree (dead->data);
147 free (dead);
152 * mono_code_manager_destroy:
153 * @cman: a code manager
155 * Free all the memory associated with the code manager @cman.
157 void
158 mono_code_manager_destroy (MonoCodeManager *cman)
160 free_chunklist (cman->full);
161 free_chunklist (cman->current);
162 free (cman);
166 * mono_code_manager_invalidate:
167 * @cman: a code manager
169 * Fill all the memory with an invalid native code value
170 * so that any attempt to execute code allocated in the code
171 * manager @cman will fail. This is used for debugging purposes.
173 void
174 mono_code_manager_invalidate (MonoCodeManager *cman)
176 CodeChunk *chunk;
178 #if defined(__i386__) || defined(__x86_64__)
179 int fill_value = 0xcc; /* x86 break */
180 #else
181 int fill_value = 0x2a;
182 #endif
184 for (chunk = cman->current; chunk; chunk = chunk->next)
185 memset (chunk->data, fill_value, chunk->size);
186 for (chunk = cman->full; chunk; chunk = chunk->next)
187 memset (chunk->data, fill_value, chunk->size);
191 * mono_code_manager_set_read_only:
192 * @cman: a code manager
194 * Make the code manager read only, so further allocation requests cause an assert.
196 void
197 mono_code_manager_set_read_only (MonoCodeManager *cman)
199 cman->read_only = TRUE;
203 * mono_code_manager_foreach:
204 * @cman: a code manager
205 * @func: a callback function pointer
206 * @user_data: additional data to pass to @func
208 * Invokes the callback @func for each different chunk of memory allocated
209 * in the code manager @cman.
211 void
212 mono_code_manager_foreach (MonoCodeManager *cman, MonoCodeManagerFunc func, void *user_data)
214 CodeChunk *chunk;
215 for (chunk = cman->current; chunk; chunk = chunk->next) {
216 if (func (chunk->data, chunk->size, chunk->bsize, user_data))
217 return;
219 for (chunk = cman->full; chunk; chunk = chunk->next) {
220 if (func (chunk->data, chunk->size, chunk->bsize, user_data))
221 return;
225 /* BIND_ROOM is the divisor for the chunck of code size dedicated
226 * to binding branches (branches not reachable with the immediate displacement)
227 * bind_size = size/BIND_ROOM;
228 * we should reduce it and make MIN_PAGES bigger for such systems
230 #if defined(__ppc__) || defined(__powerpc__)
231 #define BIND_ROOM 4
232 #endif
233 #if defined(__arm__)
234 #define BIND_ROOM 8
235 #endif
237 static CodeChunk*
238 new_codechunk (int dynamic, int size)
240 int minsize, flags = CODE_FLAG_MMAP;
241 int chunk_size, bsize = 0;
242 int pagesize;
243 CodeChunk *chunk;
244 void *ptr;
246 #ifdef FORCE_MALLOC
247 flags = CODE_FLAG_MALLOC;
248 #endif
250 pagesize = mono_pagesize ();
252 if (dynamic) {
253 chunk_size = size;
254 flags = CODE_FLAG_MALLOC;
255 } else {
256 minsize = pagesize * MIN_PAGES;
257 if (size < minsize)
258 chunk_size = minsize;
259 else {
260 chunk_size = size;
261 chunk_size += pagesize - 1;
262 chunk_size &= ~ (pagesize - 1);
265 #ifdef BIND_ROOM
266 bsize = chunk_size / BIND_ROOM;
267 if (bsize < MIN_BSIZE)
268 bsize = MIN_BSIZE;
269 bsize += MIN_ALIGN -1;
270 bsize &= ~ (MIN_ALIGN - 1);
271 if (chunk_size - size < bsize) {
272 chunk_size = size + bsize;
273 chunk_size += pagesize - 1;
274 chunk_size &= ~ (pagesize - 1);
276 #endif
278 if (flags == CODE_FLAG_MALLOC) {
279 ptr = dlmemalign (MIN_ALIGN, chunk_size + MIN_ALIGN - 1);
280 if (!ptr)
281 return NULL;
282 } else {
283 ptr = mono_valloc (NULL, chunk_size, MONO_PROT_RWX | ARCH_MAP_FLAGS);
284 if (!ptr)
285 return NULL;
288 if (flags == CODE_FLAG_MALLOC) {
289 #ifdef BIND_ROOM
290 /* Make sure the thunks area is zeroed */
291 memset (ptr, 0, bsize);
292 #endif
295 chunk = malloc (sizeof (CodeChunk));
296 if (!chunk) {
297 if (flags == CODE_FLAG_MALLOC)
298 dlfree (ptr);
299 else
300 mono_vfree (ptr, chunk_size);
301 return NULL;
303 chunk->next = NULL;
304 chunk->size = chunk_size;
305 chunk->data = ptr;
306 chunk->flags = flags;
307 chunk->pos = bsize;
308 chunk->bsize = bsize;
309 mono_profiler_code_chunk_new((gpointer) chunk->data, chunk->size);
311 /*printf ("code chunk at: %p\n", ptr);*/
312 return chunk;
316 * mono_code_manager_reserve:
317 * @cman: a code manager
318 * @size: size of memory to allocate
319 * @alignment: power of two alignment value
321 * Allocates at least @size bytes of memory inside the code manager @cman.
323 * Returns: the pointer to the allocated memory or #NULL on failure
325 void*
326 mono_code_manager_reserve_align (MonoCodeManager *cman, int size, int alignment)
328 CodeChunk *chunk, *prev;
329 void *ptr;
331 g_assert (!cman->read_only);
333 /* eventually allow bigger alignments, but we need to fix the dynamic alloc code to
334 * handle this before
336 g_assert (alignment <= MIN_ALIGN);
338 if (cman->dynamic) {
339 ++mono_stats.dynamic_code_alloc_count;
340 mono_stats.dynamic_code_bytes_count += size;
343 if (!cman->current) {
344 cman->current = new_codechunk (cman->dynamic, size);
345 if (!cman->current)
346 return NULL;
349 for (chunk = cman->current; chunk; chunk = chunk->next) {
350 if (ALIGN_INT (chunk->pos, alignment) + size <= chunk->size) {
351 chunk->pos = ALIGN_INT (chunk->pos, alignment);
352 ptr = chunk->data + chunk->pos;
353 chunk->pos += size;
354 return ptr;
358 * no room found, move one filled chunk to cman->full
359 * to keep cman->current from growing too much
361 prev = NULL;
362 for (chunk = cman->current; chunk; prev = chunk, chunk = chunk->next) {
363 if (chunk->pos + MIN_ALIGN * 4 <= chunk->size)
364 continue;
365 if (prev) {
366 prev->next = chunk->next;
367 } else {
368 cman->current = chunk->next;
370 chunk->next = cman->full;
371 cman->full = chunk;
372 break;
374 chunk = new_codechunk (cman->dynamic, size);
375 if (!chunk)
376 return NULL;
377 chunk->next = cman->current;
378 cman->current = chunk;
379 chunk->pos = ALIGN_INT (chunk->pos, alignment);
380 ptr = chunk->data + chunk->pos;
381 chunk->pos += size;
382 return ptr;
386 * mono_code_manager_reserve:
387 * @cman: a code manager
388 * @size: size of memory to allocate
390 * Allocates at least @size bytes of memory inside the code manager @cman.
392 * Returns: the pointer to the allocated memory or #NULL on failure
394 void*
395 mono_code_manager_reserve (MonoCodeManager *cman, int size)
397 return mono_code_manager_reserve_align (cman, size, MIN_ALIGN);
401 * mono_code_manager_commit:
402 * @cman: a code manager
403 * @data: the pointer returned by mono_code_manager_reserve ()
404 * @size: the size requested in the call to mono_code_manager_reserve ()
405 * @newsize: the new size to reserve
407 * If we reserved too much room for a method and we didn't allocate
408 * already from the code manager, we can get back the excess allocation
409 * for later use in the code manager.
411 void
412 mono_code_manager_commit (MonoCodeManager *cman, void *data, int size, int newsize)
414 g_assert (newsize <= size);
416 if (cman->current && (size != newsize) && (data == cman->current->data + cman->current->pos - size)) {
417 cman->current->pos -= size - newsize;
422 * mono_code_manager_size:
423 * @cman: a code manager
424 * @used_size: pointer to an integer for the result
426 * This function can be used to get statistics about a code manager:
427 * the integer pointed to by @used_size will contain how much
428 * memory is actually used inside the code managed @cman.
430 * Returns: the amount of memory allocated in @cman
433 mono_code_manager_size (MonoCodeManager *cman, int *used_size)
435 CodeChunk *chunk;
436 guint32 size = 0;
437 guint32 used = 0;
438 for (chunk = cman->current; chunk; chunk = chunk->next) {
439 size += chunk->size;
440 used += chunk->pos;
442 for (chunk = cman->full; chunk; chunk = chunk->next) {
443 size += chunk->size;
444 used += chunk->pos;
446 if (used_size)
447 *used_size = used;
448 return size;