8 #include "mono-codeman.h"
13 #if defined(__ia64__) || defined(__x86_64__)
15 * We require 16 byte alignment on amd64 so the fp literals embedded in the code are
16 * properly aligned for SSE2.
23 /* if a chunk has less than this amount of free space it's considered full */
24 #define MAX_WASTAGE 32
28 #define ARCH_MAP_FLAGS MONO_MMAP_32BIT
30 #define ARCH_MAP_FLAGS 0
33 #define MONO_PROT_RWX (MONO_MMAP_READ|MONO_MMAP_WRITE|MONO_MMAP_EXEC)
35 typedef struct _CodeChunck CodeChunk
;
47 unsigned int flags
: 8;
48 /* this number of bytes is available to resolve addresses far in memory */
49 unsigned int bsize
: 24;
52 struct _MonoCodeManager
{
59 mono_code_manager_new (void)
61 MonoCodeManager
*cman
= malloc (sizeof (MonoCodeManager
));
71 mono_code_manager_new_dynamic (void)
73 MonoCodeManager
*cman
= mono_code_manager_new ();
80 free_chunklist (CodeChunk
*chunk
)
86 if (dead
->flags
== CODE_FLAG_MMAP
) {
87 mono_vfree (dead
->data
, dead
->size
);
88 } else if (dead
->flags
== CODE_FLAG_MALLOC
) {
96 mono_code_manager_destroy (MonoCodeManager
*cman
)
98 free_chunklist (cman
->full
);
99 free_chunklist (cman
->current
);
103 /* fill all the memory with the 0x2a (42) value */
105 mono_code_manager_invalidate (MonoCodeManager
*cman
)
109 #if defined(__i386__) || defined(__x86_64__)
110 int fill_value
= 0xcc; /* x86 break */
112 int fill_value
= 0x2a;
115 for (chunk
= cman
->current
; chunk
; chunk
= chunk
->next
)
116 memset (chunk
->data
, fill_value
, chunk
->size
);
117 for (chunk
= cman
->full
; chunk
; chunk
= chunk
->next
)
118 memset (chunk
->data
, fill_value
, chunk
->size
);
122 mono_code_manager_foreach (MonoCodeManager
*cman
, MonoCodeManagerFunc func
, void *user_data
)
125 for (chunk
= cman
->current
; chunk
; chunk
= chunk
->next
) {
126 if (func (chunk
->data
, chunk
->size
, chunk
->bsize
, user_data
))
129 for (chunk
= cman
->full
; chunk
; chunk
= chunk
->next
) {
130 if (func (chunk
->data
, chunk
->size
, chunk
->bsize
, user_data
))
135 /* BIND_ROOM is the divisor for the chunck of code size dedicated
136 * to binding branches (branches not reachable with the immediate displacement)
137 * bind_size = size/BIND_ROOM;
138 * we should reduce it and make MIN_PAGES bigger for such systems
140 #if defined(__ppc__) || defined(__powerpc__)
148 new_codechunk (int dynamic
, int size
)
150 int minsize
, flags
= CODE_FLAG_MMAP
;
151 int chunk_size
, bsize
= 0;
157 flags
= CODE_FLAG_MALLOC
;
160 pagesize
= mono_pagesize ();
164 flags
= CODE_FLAG_MALLOC
;
166 minsize
= pagesize
* MIN_PAGES
;
168 chunk_size
= minsize
;
171 chunk_size
+= pagesize
- 1;
172 chunk_size
&= ~ (pagesize
- 1);
176 bsize
= chunk_size
/ BIND_ROOM
;
177 if (bsize
< MIN_BSIZE
)
179 bsize
+= MIN_ALIGN
-1;
180 bsize
&= ~ (MIN_ALIGN
- 1);
181 if (chunk_size
- size
< bsize
) {
182 chunk_size
= size
+ bsize
;
183 chunk_size
+= pagesize
- 1;
184 chunk_size
&= ~ (pagesize
- 1);
188 /* does it make sense to use the mmap-like API? */
189 if (flags
== CODE_FLAG_MALLOC
) {
190 ptr
= malloc (chunk_size
);
194 ptr
= mono_valloc (NULL
, chunk_size
, MONO_PROT_RWX
| ARCH_MAP_FLAGS
);
199 if (flags
== CODE_FLAG_MALLOC
) {
201 * AMD64 processors maintain icache coherency only for pages which are
205 char *page_start
= (char *) (((gssize
) (ptr
)) & ~ (pagesize
- 1));
206 int pages
= ((char*)ptr
+ chunk_size
- page_start
+ pagesize
- 1) / pagesize
;
207 int err
= mono_mprotect (page_start
, pages
* pagesize
, MONO_PROT_RWX
);
212 /* Make sure the thunks area is zeroed */
213 memset (ptr
, 0, bsize
);
217 chunk
= malloc (sizeof (CodeChunk
));
219 if (flags
== CODE_FLAG_MALLOC
)
222 mono_vfree (ptr
, chunk_size
);
226 chunk
->size
= chunk_size
;
228 chunk
->flags
= flags
;
230 chunk
->bsize
= bsize
;
232 /*printf ("code chunk at: %p\n", ptr);*/
237 mono_code_manager_reserve (MonoCodeManager
*cman
, int size
)
239 CodeChunk
*chunk
, *prev
;
243 size
&= ~ (MIN_ALIGN
- 1);
245 if (!cman
->current
) {
246 cman
->current
= new_codechunk (cman
->dynamic
, size
);
251 for (chunk
= cman
->current
; chunk
; chunk
= chunk
->next
) {
252 if (chunk
->pos
+ size
<= chunk
->size
) {
253 ptr
= chunk
->data
+ chunk
->pos
;
259 * no room found, move one filled chunk to cman->full
260 * to keep cman->current from growing too much
263 for (chunk
= cman
->current
; chunk
; prev
= chunk
, chunk
= chunk
->next
) {
264 if (chunk
->pos
+ MIN_ALIGN
* 4 <= chunk
->size
)
267 prev
->next
= chunk
->next
;
269 cman
->current
= chunk
->next
;
271 chunk
->next
= cman
->full
;
275 chunk
= new_codechunk (cman
->dynamic
, size
);
278 chunk
->next
= cman
->current
;
279 cman
->current
= chunk
;
280 ptr
= chunk
->data
+ chunk
->pos
;
286 * if we reserved too much room for a method and we didn't allocate
287 * already from the code manager, we can get back the excess allocation.
290 mono_code_manager_commit (MonoCodeManager
*cman
, void *data
, int size
, int newsize
)
292 newsize
+= MIN_ALIGN
;
293 newsize
&= ~ (MIN_ALIGN
- 1);
295 size
&= ~ (MIN_ALIGN
- 1);
297 if (cman
->current
&& (size
!= newsize
) && (data
== cman
->current
->data
+ cman
->current
->pos
- size
)) {
298 cman
->current
->pos
-= size
- newsize
;