- PreferredHeight is a little different than i thought.
[mono-project.git] / mono / utils / mono-codeman.c
blob7897c3bc41362e9b16cea8e553e43d3322f2f48a
1 #include "config.h"
2 #include <unistd.h>
3 #include <stdlib.h>
4 #include <string.h>
5 #include <assert.h>
6 #include <glib.h>
8 #include "mono-codeman.h"
9 #include "mono-mmap.h"
11 #define MIN_PAGES 16
13 #if defined(__ia64__) || defined(__x86_64__)
15 * We require 16 byte alignment on amd64 so the fp literals embedded in the code are
16 * properly aligned for SSE2.
18 #define MIN_ALIGN 16
19 #else
20 #define MIN_ALIGN 8
21 #endif
23 /* if a chunk has less than this amount of free space it's considered full */
24 #define MAX_WASTAGE 32
25 #define MIN_BSIZE 32
27 #ifdef __x86_64__
28 #define ARCH_MAP_FLAGS MONO_MMAP_32BIT
29 #else
30 #define ARCH_MAP_FLAGS 0
31 #endif
33 #define MONO_PROT_RWX (MONO_MMAP_READ|MONO_MMAP_WRITE|MONO_MMAP_EXEC)
35 typedef struct _CodeChunck CodeChunk;
37 enum {
38 CODE_FLAG_MMAP,
39 CODE_FLAG_MALLOC
42 struct _CodeChunck {
43 char *data;
44 int pos;
45 int size;
46 CodeChunk *next;
47 unsigned int flags: 8;
48 /* this number of bytes is available to resolve addresses far in memory */
49 unsigned int bsize: 24;
52 struct _MonoCodeManager {
53 int dynamic;
54 CodeChunk *current;
55 CodeChunk *full;
58 MonoCodeManager*
59 mono_code_manager_new (void)
61 MonoCodeManager *cman = malloc (sizeof (MonoCodeManager));
62 if (!cman)
63 return NULL;
64 cman->current = NULL;
65 cman->full = NULL;
66 cman->dynamic = 0;
67 return cman;
70 MonoCodeManager*
71 mono_code_manager_new_dynamic (void)
73 MonoCodeManager *cman = mono_code_manager_new ();
74 cman->dynamic = 1;
75 return cman;
79 static void
80 free_chunklist (CodeChunk *chunk)
82 CodeChunk *dead;
83 for (; chunk; ) {
84 dead = chunk;
85 chunk = chunk->next;
86 if (dead->flags == CODE_FLAG_MMAP) {
87 mono_vfree (dead->data, dead->size);
88 } else if (dead->flags == CODE_FLAG_MALLOC) {
89 free (dead->data);
91 free (dead);
95 void
96 mono_code_manager_destroy (MonoCodeManager *cman)
98 free_chunklist (cman->full);
99 free_chunklist (cman->current);
100 free (cman);
103 /* fill all the memory with the 0x2a (42) value */
104 void
105 mono_code_manager_invalidate (MonoCodeManager *cman)
107 CodeChunk *chunk;
109 #if defined(__i386__) || defined(__x86_64__)
110 int fill_value = 0xcc; /* x86 break */
111 #else
112 int fill_value = 0x2a;
113 #endif
115 for (chunk = cman->current; chunk; chunk = chunk->next)
116 memset (chunk->data, fill_value, chunk->size);
117 for (chunk = cman->full; chunk; chunk = chunk->next)
118 memset (chunk->data, fill_value, chunk->size);
121 void
122 mono_code_manager_foreach (MonoCodeManager *cman, MonoCodeManagerFunc func, void *user_data)
124 CodeChunk *chunk;
125 for (chunk = cman->current; chunk; chunk = chunk->next) {
126 if (func (chunk->data, chunk->size, chunk->bsize, user_data))
127 return;
129 for (chunk = cman->full; chunk; chunk = chunk->next) {
130 if (func (chunk->data, chunk->size, chunk->bsize, user_data))
131 return;
135 /* BIND_ROOM is the divisor for the chunck of code size dedicated
136 * to binding branches (branches not reachable with the immediate displacement)
137 * bind_size = size/BIND_ROOM;
138 * we should reduce it and make MIN_PAGES bigger for such systems
140 #if defined(__ppc__) || defined(__powerpc__)
141 #define BIND_ROOM 4
142 #endif
143 #if defined(__arm__)
144 #define BIND_ROOM 8
145 #endif
147 static CodeChunk*
148 new_codechunk (int dynamic, int size)
150 int minsize, flags = CODE_FLAG_MMAP;
151 int chunk_size, bsize = 0;
152 int pagesize;
153 CodeChunk *chunk;
154 void *ptr;
156 #ifdef FORCE_MALLOC
157 flags = CODE_FLAG_MALLOC;
158 #endif
160 pagesize = mono_pagesize ();
162 if (dynamic) {
163 chunk_size = size;
164 flags = CODE_FLAG_MALLOC;
165 } else {
166 minsize = pagesize * MIN_PAGES;
167 if (size < minsize)
168 chunk_size = minsize;
169 else {
170 chunk_size = size;
171 chunk_size += pagesize - 1;
172 chunk_size &= ~ (pagesize - 1);
175 #ifdef BIND_ROOM
176 bsize = chunk_size / BIND_ROOM;
177 if (bsize < MIN_BSIZE)
178 bsize = MIN_BSIZE;
179 bsize += MIN_ALIGN -1;
180 bsize &= ~ (MIN_ALIGN - 1);
181 if (chunk_size - size < bsize) {
182 chunk_size = size + bsize;
183 chunk_size += pagesize - 1;
184 chunk_size &= ~ (pagesize - 1);
186 #endif
188 /* does it make sense to use the mmap-like API? */
189 if (flags == CODE_FLAG_MALLOC) {
190 ptr = malloc (chunk_size);
191 if (!ptr)
192 return NULL;
193 } else {
194 ptr = mono_valloc (NULL, chunk_size, MONO_PROT_RWX | ARCH_MAP_FLAGS);
195 if (!ptr)
196 return NULL;
199 if (flags == CODE_FLAG_MALLOC) {
201 * AMD64 processors maintain icache coherency only for pages which are
202 * marked executable.
205 char *page_start = (char *) (((gssize) (ptr)) & ~ (pagesize - 1));
206 int pages = ((char*)ptr + chunk_size - page_start + pagesize - 1) / pagesize;
207 int err = mono_mprotect (page_start, pages * pagesize, MONO_PROT_RWX);
208 assert (!err);
211 #ifdef BIND_ROOM
212 /* Make sure the thunks area is zeroed */
213 memset (ptr, 0, bsize);
214 #endif
217 chunk = malloc (sizeof (CodeChunk));
218 if (!chunk) {
219 if (flags == CODE_FLAG_MALLOC)
220 free (ptr);
221 else
222 mono_vfree (ptr, chunk_size);
223 return NULL;
225 chunk->next = NULL;
226 chunk->size = chunk_size;
227 chunk->data = ptr;
228 chunk->flags = flags;
229 chunk->pos = bsize;
230 chunk->bsize = bsize;
232 /*printf ("code chunk at: %p\n", ptr);*/
233 return chunk;
236 void*
237 mono_code_manager_reserve (MonoCodeManager *cman, int size)
239 CodeChunk *chunk, *prev;
240 void *ptr;
242 size += MIN_ALIGN;
243 size &= ~ (MIN_ALIGN - 1);
245 if (!cman->current) {
246 cman->current = new_codechunk (cman->dynamic, size);
247 if (!cman->current)
248 return NULL;
251 for (chunk = cman->current; chunk; chunk = chunk->next) {
252 if (chunk->pos + size <= chunk->size) {
253 ptr = chunk->data + chunk->pos;
254 chunk->pos += size;
255 return ptr;
259 * no room found, move one filled chunk to cman->full
260 * to keep cman->current from growing too much
262 prev = NULL;
263 for (chunk = cman->current; chunk; prev = chunk, chunk = chunk->next) {
264 if (chunk->pos + MIN_ALIGN * 4 <= chunk->size)
265 continue;
266 if (prev) {
267 prev->next = chunk->next;
268 } else {
269 cman->current = chunk->next;
271 chunk->next = cman->full;
272 cman->full = chunk;
273 break;
275 chunk = new_codechunk (cman->dynamic, size);
276 if (!chunk)
277 return NULL;
278 chunk->next = cman->current;
279 cman->current = chunk;
280 ptr = chunk->data + chunk->pos;
281 chunk->pos += size;
282 return ptr;
286 * if we reserved too much room for a method and we didn't allocate
287 * already from the code manager, we can get back the excess allocation.
289 void
290 mono_code_manager_commit (MonoCodeManager *cman, void *data, int size, int newsize)
292 newsize += MIN_ALIGN;
293 newsize &= ~ (MIN_ALIGN - 1);
294 size += MIN_ALIGN;
295 size &= ~ (MIN_ALIGN - 1);
297 if (cman->current && (size != newsize) && (data == cman->current->data + cman->current->pos - size)) {
298 cman->current->pos -= size - newsize;