2 * tramp-x86.c: JIT trampoline code for x86
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/metadata-internals.h>
15 #include <mono/metadata/marshal.h>
16 #include <mono/metadata/tabledefs.h>
17 #include <mono/metadata/mono-debug.h>
18 #include <mono/metadata/mono-debug-debugger.h>
19 #include <mono/metadata/monitor.h>
20 #include <mono/metadata/gc-internal.h>
21 #include <mono/arch/x86/x86-codegen.h>
23 #include <mono/utils/memcheck.h>
28 static guint8
* nullified_class_init_trampoline
;
31 * mono_arch_get_unbox_trampoline:
33 * @addr: pointer to native code for @m
35 * when value type methods are called through the vtable we need to unbox the
36 * this argument. This method returns a pointer to a trampoline which does
37 * unboxing before calling the method
40 mono_arch_get_unbox_trampoline (MonoMethod
*m
, gpointer addr
)
44 MonoDomain
*domain
= mono_domain_get ();
46 start
= code
= mono_domain_code_reserve (domain
, 16);
48 x86_alu_membase_imm (code
, X86_ADD
, X86_ESP
, this_pos
, sizeof (MonoObject
));
49 x86_jump_code (code
, addr
);
50 g_assert ((code
- start
) < 16);
52 nacl_domain_code_validate (domain
, &start
, 16, &code
);
58 mono_arch_get_static_rgctx_trampoline (MonoMethod
*m
, MonoMethodRuntimeGenericContext
*mrgctx
, gpointer addr
)
63 MonoDomain
*domain
= mono_domain_get ();
67 start
= code
= mono_domain_code_reserve (domain
, buf_len
);
69 x86_mov_reg_imm (code
, MONO_ARCH_RGCTX_REG
, mrgctx
);
70 x86_jump_code (code
, addr
);
71 g_assert ((code
- start
) <= buf_len
);
73 nacl_domain_code_validate (domain
, &start
, buf_len
, &code
);
74 mono_arch_flush_icache (start
, code
- start
);
80 mono_arch_get_llvm_imt_trampoline (MonoDomain
*domain
, MonoMethod
*m
, int vt_offset
)
88 start
= code
= mono_domain_code_reserve (domain
, buf_len
);
90 this_offset
= mono_x86_get_this_arg_offset (NULL
, mono_method_signature (m
));
93 x86_mov_reg_imm (code
, MONO_ARCH_IMT_REG
, m
);
95 x86_mov_reg_membase (code
, X86_EAX
, X86_ESP
, this_offset
+ 4, 4);
96 /* Load vtable address */
97 x86_mov_reg_membase (code
, X86_EAX
, X86_EAX
, 0, 4);
98 x86_jump_membase (code
, X86_EAX
, vt_offset
);
100 g_assert ((code
- start
) < buf_len
);
102 nacl_domain_code_validate (domain
, &start
, buf_len
, &code
);
104 mono_arch_flush_icache (start
, code
- start
);
110 mono_arch_patch_callsite (guint8
*method_start
, guint8
*orig_code
, guint8
*addr
)
112 #if defined(__default_codegen__)
115 gboolean can_write
= mono_breakpoint_clean_code (method_start
, orig_code
, 8, buf
, sizeof (buf
));
119 /* go to the start of the call instruction
121 * address_byte = (m << 6) | (o << 3) | reg
122 * call opcode: 0xff address_byte displacement
128 if ((code
[1] == 0xe8)) {
130 InterlockedExchange ((gint32
*)(orig_code
+ 2), (guint
)addr
- ((guint
)orig_code
+ 1) - 5);
132 /* Tell valgrind to recompile the patched code */
133 VALGRIND_DISCARD_TRANSLATIONS (orig_code
+ 2, 4);
135 } else if (code
[1] == 0xe9) {
136 /* A PLT entry: jmp <DISP> */
138 InterlockedExchange ((gint32
*)(orig_code
+ 2), (guint
)addr
- ((guint
)orig_code
+ 1) - 5);
140 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", code
[0], code
[1], code
[2], code
[3],
141 code
[4], code
[5], code
[6]);
142 g_assert_not_reached ();
144 #elif defined(__native_client__)
145 /* Target must be bundle-aligned */
146 g_assert (((guint32
)addr
& kNaClAlignmentMask
) == 0);
148 /* 0xe8 = call <DISP>, 0xe9 = jump <DISP> */
149 if ((orig_code
[-5] == 0xe8) || orig_code
[-6] == 0xe9) {
151 gint32 offset
= (gint32
)addr
- (gint32
)orig_code
;
152 guint8 buf
[sizeof(gint32
)];
153 *((gint32
*)(buf
)) = offset
;
154 ret
= nacl_dyncode_modify (orig_code
- sizeof(gint32
), buf
, sizeof(gint32
));
157 printf ("Invalid trampoline sequence %p: %02x %02x %02x %02x %02x\n", orig_code
, orig_code
[-5], orig_code
[-4], orig_code
[-3], orig_code
[-2], orig_code
[-1]);
158 g_assert_not_reached ();
164 mono_arch_patch_plt_entry (guint8
*code
, gpointer
*got
, mgreg_t
*regs
, guint8
*addr
)
168 /* Patch the jump table entry used by the plt entry */
170 #if defined(__native_client_codegen__) || defined(__native_client__)
171 /* for both compiler and runtime */
173 /* mov <DISP>(%ebx), %ecx */
174 /* and 0xffffffe0, %ecx */
176 g_assert (code
[0] == 0x8b);
177 g_assert (code
[1] == 0x8b);
179 offset
= *(guint32
*)(code
+ 2);
180 #elif defined(__default_codegen__)
181 /* A PLT entry: jmp *<DISP>(%ebx) */
182 g_assert (code
[0] == 0xff);
183 g_assert (code
[1] == 0xa3);
185 offset
= *(guint32
*)(code
+ 2);
186 #endif /* __native_client_codegen__ */
188 got
= (gpointer
*)(gsize
) regs
[MONO_ARCH_GOT_REG
];
189 *(guint8
**)((guint8
*)got
+ offset
) = addr
;
193 get_vcall_slot (guint8
*code
, mgreg_t
*regs
, int *displacement
)
195 const int kBufSize
= NACL_SIZE (8, 16);
200 mono_breakpoint_clean_code (NULL
, code
, kBufSize
, buf
, kBufSize
);
205 if ((code
[0] == 0xff) && ((code
[1] & 0x18) == 0x10) && ((code
[1] >> 6) == 2)) {
206 reg
= code
[1] & 0x07;
207 disp
= *((gint32
*)(code
+ 2));
208 #if defined(__native_client_codegen__) || defined(__native_client__)
209 } else if ((code
[1] == 0x83) && (code
[2] == 0xe1) && (code
[4] == 0xff) &&
210 (code
[5] == 0xd1) && (code
[-5] == 0x8b)) {
211 disp
= *((gint32
*)(code
- 3));
212 reg
= code
[-4] & 0x07;
213 } else if ((code
[-2] == 0x8b) && (code
[1] == 0x83) && (code
[4] == 0xff)) {
214 reg
= code
[-1] & 0x07;
215 disp
= (signed char)code
[0];
218 g_assert_not_reached ();
222 *displacement
= disp
;
223 return (gpointer
)regs
[reg
];
227 get_vcall_slot_addr (guint8
* code
, mgreg_t
*regs
)
231 vt
= get_vcall_slot (code
, regs
, &displacement
);
234 return (gpointer
*)((char*)vt
+ displacement
);
238 mono_arch_nullify_class_init_trampoline (guint8
*code
, mgreg_t
*regs
)
241 gboolean can_write
= mono_breakpoint_clean_code (NULL
, code
, 6, buf
, sizeof (buf
));
247 if (code
[0] == 0xe8) {
248 #if defined(__default_codegen__)
249 if (!mono_running_on_valgrind ()) {
252 * Thread safe code patching using the algorithm from the paper
253 * 'Practicing JUDO: Java Under Dynamic Optimizations'
256 * First atomically change the the first 2 bytes of the call to a
260 InterlockedExchange ((gint32
*)code
, ops
);
262 /* Then change the other bytes to a nop */
267 /* Then atomically change the first 4 bytes to a nop as well */
269 InterlockedExchange ((gint32
*)code
, ops
);
270 /* FIXME: the calltree skin trips on the self modifying code above */
272 /* Tell valgrind to recompile the patched code */
273 //VALGRIND_DISCARD_TRANSLATIONS (code, 8);
275 #elif defined(__native_client_codegen__)
276 mono_arch_patch_callsite (code
, code
+ 5, nullified_class_init_trampoline
);
278 } else if (code
[0] == 0x90 || code
[0] == 0xeb) {
279 /* Already changed by another thread */
281 } else if ((code
[-1] == 0xff) && (x86_modrm_reg (code
[0]) == 0x2)) {
282 /* call *<OFFSET>(<REG>) -> Call made from AOT code */
283 gpointer
*vtable_slot
;
285 vtable_slot
= get_vcall_slot_addr (code
+ 5, regs
);
286 g_assert (vtable_slot
);
288 *vtable_slot
= nullified_class_init_trampoline
;
290 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", code
[0], code
[1], code
[2], code
[3],
291 code
[4], code
[5], code
[6]);
292 g_assert_not_reached ();
297 mono_arch_nullify_plt_entry (guint8
*code
, mgreg_t
*regs
)
299 if (mono_aot_only
&& !nullified_class_init_trampoline
)
300 nullified_class_init_trampoline
= mono_aot_get_trampoline ("nullified_class_init_trampoline");
302 mono_arch_patch_plt_entry (code
, NULL
, regs
, nullified_class_init_trampoline
);
306 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type
, MonoTrampInfo
**info
, gboolean aot
)
308 guint8
*buf
, *code
, *tramp
;
309 int pushed_args
, pushed_args_caller_saved
;
310 GSList
*unwind_ops
= NULL
;
311 MonoJumpInfo
*ji
= NULL
;
313 unwind_ops
= mono_arch_get_cie_program ();
315 code
= buf
= mono_global_codeman_reserve (256);
317 /* Note that there is a single argument to the trampoline
318 * and it is stored at: esp + pushed_args * sizeof (gpointer)
319 * the ret address is at: esp + (pushed_args + 1) * sizeof (gpointer)
322 /* Put all registers into an array on the stack
323 * If this code is changed, make sure to update the offset value in
324 * mono_arch_get_this_arg_from_call () in mini-x86.c.
326 x86_push_reg (code
, X86_EDI
);
327 x86_push_reg (code
, X86_ESI
);
328 x86_push_reg (code
, X86_EBP
);
329 x86_push_reg (code
, X86_ESP
);
330 x86_push_reg (code
, X86_EBX
);
331 x86_push_reg (code
, X86_EDX
);
332 x86_push_reg (code
, X86_ECX
);
333 x86_push_reg (code
, X86_EAX
);
335 pushed_args_caller_saved
= pushed_args
= 8;
337 /* Align stack on apple */
338 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 4);
344 /* save the IP (caller ip) */
345 if (tramp_type
== MONO_TRAMPOLINE_JUMP
)
346 x86_push_imm (code
, 0);
348 x86_push_membase (code
, X86_ESP
, (pushed_args
+ 1) * sizeof (gpointer
));
352 x86_push_reg (code
, X86_EBP
);
353 x86_push_reg (code
, X86_ESI
);
354 x86_push_reg (code
, X86_EDI
);
355 x86_push_reg (code
, X86_EBX
);
360 x86_push_reg (code
, X86_ESP
);
361 /* Adjust ESP so it points to the previous frame */
362 x86_alu_membase_imm (code
, X86_ADD
, X86_ESP
, 0, (pushed_args
+ 2) * 4);
366 /* save method info */
367 if ((tramp_type
== MONO_TRAMPOLINE_JIT
) || (tramp_type
== MONO_TRAMPOLINE_JUMP
))
368 x86_push_membase (code
, X86_ESP
, pushed_args
* sizeof (gpointer
));
370 x86_push_imm (code
, 0);
374 /* On apple, the stack is correctly aligned to 16 bytes because pushed_args is
375 * 16 and there is the extra trampoline arg + the return ip pushed by call
376 * FIXME: Note that if an exception happens while some args are pushed
377 * on the stack, the stack will be misaligned.
379 g_assert (pushed_args
== 16);
381 /* get the address of lmf for the current thread */
383 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "mono_get_lmf_addr");
384 x86_call_reg (code
, X86_EAX
);
386 x86_call_code (code
, mono_get_lmf_addr
);
389 x86_push_reg (code
, X86_EAX
);
390 /* push *lfm (previous_lmf) */
391 x86_push_membase (code
, X86_EAX
, 0);
392 /* Signal to mono_arch_find_jit_info () that this is a trampoline frame */
393 x86_alu_membase_imm (code
, X86_ADD
, X86_ESP
, 0, 1);
395 x86_mov_membase_reg (code
, X86_EAX
, 0, X86_ESP
, 4);
400 /* starting the call sequence */
402 /* FIXME: Push the trampoline address */
403 x86_push_imm (code
, 0);
407 /* push the method info */
408 x86_push_membase (code
, X86_ESP
, pushed_args
* sizeof (gpointer
));
412 /* push the return address onto the stack */
413 if (tramp_type
== MONO_TRAMPOLINE_JUMP
)
414 x86_push_imm (code
, 0);
416 x86_push_membase (code
, X86_ESP
, (pushed_args
+ 1) * sizeof (gpointer
));
418 /* push the address of the register array */
419 x86_lea_membase (code
, X86_EAX
, X86_ESP
, (pushed_args
- 8) * sizeof (gpointer
));
420 x86_push_reg (code
, X86_EAX
);
425 /* check the stack is aligned after the ret ip is pushed */
426 /*x86_mov_reg_reg (buf, X86_EDX, X86_ESP, 4);
427 x86_alu_reg_imm (buf, X86_AND, X86_EDX, 15);
428 x86_alu_reg_imm (buf, X86_CMP, X86_EDX, 0);
429 x86_branch_disp (buf, X86_CC_Z, 3, FALSE);
430 x86_breakpoint (buf);*/
433 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, X86_ESP
, ((pushed_args
+ 2) * 4));
436 char *icall_name
= g_strdup_printf ("trampoline_func_%d", tramp_type
);
437 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, icall_name
);
438 x86_call_reg (code
, X86_EAX
);
440 tramp
= (guint8
*)mono_get_trampoline_func (tramp_type
);
441 x86_call_code (code
, tramp
);
444 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4*4);
448 /* Check for thread interruption */
449 /* This is not perf critical code so no need to check the interrupt flag */
450 /* Align the stack on osx */
451 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 3 * 4);
452 x86_push_reg (code
, X86_EAX
);
454 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "mono_thread_force_interruption_checkpoint");
455 x86_call_reg (code
, X86_EAX
);
457 x86_call_code (code
, (guint8
*)mono_thread_force_interruption_checkpoint
);
459 x86_pop_reg (code
, X86_EAX
);
460 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 3 * 4);
464 /* ebx = previous_lmf */
465 x86_pop_reg (code
, X86_EBX
);
467 x86_alu_reg_imm (code
, X86_SUB
, X86_EBX
, 1);
470 x86_pop_reg (code
, X86_EDI
);
473 /* *(lmf) = previous_lmf */
474 x86_mov_membase_reg (code
, X86_EDI
, 0, X86_EBX
, 4);
476 /* discard method info */
477 x86_pop_reg (code
, X86_ESI
);
481 x86_pop_reg (code
, X86_ESI
);
484 /* restore caller saved regs */
485 x86_pop_reg (code
, X86_EBX
);
486 x86_pop_reg (code
, X86_EDI
);
487 x86_pop_reg (code
, X86_ESI
);
488 x86_pop_reg (code
, X86_EBP
);
492 /* discard save IP */
493 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
496 /* restore LMF end */
498 if (!MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type
)) {
500 * Overwrite the method ptr with the address we need to jump to,
503 x86_mov_membase_reg (code
, X86_ESP
, pushed_args
* sizeof (gpointer
), X86_EAX
, 4);
506 /* Restore caller saved registers */
507 x86_mov_reg_membase (code
, X86_ECX
, X86_ESP
, (pushed_args
- pushed_args_caller_saved
+ X86_ECX
) * 4, 4);
508 x86_mov_reg_membase (code
, X86_EDX
, X86_ESP
, (pushed_args
- pushed_args_caller_saved
+ X86_EDX
) * 4, 4);
509 if ((tramp_type
== MONO_TRAMPOLINE_RESTORE_STACK_PROT
) || (tramp_type
== MONO_TRAMPOLINE_AOT_PLT
))
510 x86_mov_reg_membase (code
, X86_EAX
, X86_ESP
, (pushed_args
- pushed_args_caller_saved
+ X86_EAX
) * 4, 4);
512 if (!MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type
)) {
513 /* Pop saved reg array + stack align */
514 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 9 * 4);
516 g_assert (pushed_args
== 0);
518 /* Pop saved reg array + stack align + method ptr */
519 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 10 * 4);
522 /* We've popped one more stack item than we've pushed (the
523 method ptr argument), so we must end up at -1. */
524 g_assert (pushed_args
== -1);
529 nacl_global_codeman_validate (&buf
, 256, &code
);
530 g_assert ((code
- buf
) <= 256);
533 *info
= mono_tramp_info_create (mono_get_generic_trampoline_name (tramp_type
), buf
, code
- buf
, ji
, unwind_ops
);
535 if (tramp_type
== MONO_TRAMPOLINE_CLASS_INIT
) {
536 /* Initialize the nullified class init trampoline used in the AOT case */
537 nullified_class_init_trampoline
= mono_arch_get_nullified_class_init_trampoline (NULL
);
544 mono_arch_get_nullified_class_init_trampoline (MonoTrampInfo
**info
)
547 int tramp_size
= NACL_SIZE (16, kNaClAlignment
);
549 code
= buf
= mono_global_codeman_reserve (tramp_size
);
552 nacl_global_codeman_validate (&buf
, tramp_size
, &code
);
554 mono_arch_flush_icache (buf
, code
- buf
);
557 *info
= mono_tramp_info_create (g_strdup_printf ("nullified_class_init_trampoline"), buf
, code
- buf
, NULL
, NULL
);
559 if (mono_jit_map_is_enabled ())
560 mono_emit_jit_tramp (buf
, code
- buf
, "nullified_class_init_trampoline");
565 #define TRAMPOLINE_SIZE 10
568 mono_arch_create_specific_trampoline (gpointer arg1
, MonoTrampolineType tramp_type
, MonoDomain
*domain
, guint32
*code_len
)
570 guint8
*code
, *buf
, *tramp
;
572 tramp
= mono_get_trampoline_code (tramp_type
);
574 code
= buf
= mono_domain_code_reserve_align (domain
, TRAMPOLINE_SIZE
, NACL_SIZE (4, kNaClAlignment
));
576 x86_push_imm (buf
, arg1
);
577 x86_jump_code (buf
, tramp
);
578 g_assert ((buf
- code
) <= TRAMPOLINE_SIZE
);
580 nacl_domain_code_validate (domain
, &code
, NACL_SIZE (4, kNaClAlignment
), &buf
);
582 mono_arch_flush_icache (code
, buf
- code
);
585 *code_len
= buf
- code
;
591 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot
, MonoTrampInfo
**info
, gboolean aot
)
595 guint8
**rgctx_null_jumps
;
600 MonoJumpInfo
*ji
= NULL
;
601 GSList
*unwind_ops
= NULL
;
603 unwind_ops
= mono_arch_get_cie_program ();
605 mrgctx
= MONO_RGCTX_SLOT_IS_MRGCTX (slot
);
606 index
= MONO_RGCTX_SLOT_INDEX (slot
);
608 index
+= MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT
/ sizeof (gpointer
);
609 for (depth
= 0; ; ++depth
) {
610 int size
= mono_class_rgctx_get_array_size (depth
, mrgctx
);
612 if (index
< size
- 1)
617 #if defined(__default_codegen__)
618 tramp_size
= (aot
? 64 : 36) + 6 * depth
;
619 #elif defined(__native_client_codegen__)
620 tramp_size
= (aot
? 64 : 36) + 2 * kNaClAlignment
+
621 6 * (depth
+ kNaClAlignment
);
624 code
= buf
= mono_global_codeman_reserve (tramp_size
);
626 rgctx_null_jumps
= g_malloc (sizeof (guint8
*) * (depth
+ 2));
628 /* load vtable/mrgctx ptr */
629 x86_mov_reg_membase (code
, X86_EAX
, X86_ESP
, 4, 4);
631 /* load rgctx ptr from vtable */
632 x86_mov_reg_membase (code
, X86_EAX
, X86_EAX
, G_STRUCT_OFFSET (MonoVTable
, runtime_generic_context
), 4);
633 /* is the rgctx ptr null? */
634 x86_test_reg_reg (code
, X86_EAX
, X86_EAX
);
635 /* if yes, jump to actual trampoline */
636 rgctx_null_jumps
[0] = code
;
637 x86_branch8 (code
, X86_CC_Z
, -1, 1);
640 for (i
= 0; i
< depth
; ++i
) {
641 /* load ptr to next array */
642 if (mrgctx
&& i
== 0)
643 x86_mov_reg_membase (code
, X86_EAX
, X86_EAX
, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT
, 4);
645 x86_mov_reg_membase (code
, X86_EAX
, X86_EAX
, 0, 4);
646 /* is the ptr null? */
647 x86_test_reg_reg (code
, X86_EAX
, X86_EAX
);
648 /* if yes, jump to actual trampoline */
649 rgctx_null_jumps
[i
+ 1] = code
;
650 x86_branch8 (code
, X86_CC_Z
, -1, 1);
654 x86_mov_reg_membase (code
, X86_EAX
, X86_EAX
, sizeof (gpointer
) * (index
+ 1), 4);
655 /* is the slot null? */
656 x86_test_reg_reg (code
, X86_EAX
, X86_EAX
);
657 /* if yes, jump to actual trampoline */
658 rgctx_null_jumps
[depth
+ 1] = code
;
659 x86_branch8 (code
, X86_CC_Z
, -1, 1);
660 /* otherwise return */
663 for (i
= mrgctx
? 1 : 0; i
<= depth
+ 1; ++i
)
664 x86_patch (rgctx_null_jumps
[i
], code
);
666 g_free (rgctx_null_jumps
);
668 x86_mov_reg_membase (code
, MONO_ARCH_VTABLE_REG
, X86_ESP
, 4, 4);
671 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot
));
672 x86_jump_reg (code
, X86_EAX
);
674 tramp
= mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot
), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH
, mono_get_root_domain (), NULL
);
676 /* jump to the actual trampoline */
677 x86_jump_code (code
, tramp
);
680 nacl_global_codeman_validate (&buf
, tramp_size
, &code
);
681 mono_arch_flush_icache (buf
, code
- buf
);
683 g_assert (code
- buf
<= tramp_size
);
686 *info
= mono_tramp_info_create (mono_get_rgctx_fetch_trampoline_name (slot
), buf
, code
- buf
, ji
, unwind_ops
);
692 mono_arch_create_generic_class_init_trampoline (MonoTrampInfo
**info
, gboolean aot
)
696 static int byte_offset
= -1;
697 static guint8 bitmask
;
700 GSList
*unwind_ops
= NULL
;
701 MonoJumpInfo
*ji
= NULL
;
705 code
= buf
= mono_global_codeman_reserve (tramp_size
);
707 unwind_ops
= mono_arch_get_cie_program ();
710 mono_marshal_find_bitfield_offset (MonoVTable
, initialized
, &byte_offset
, &bitmask
);
712 x86_test_membase_imm (code
, MONO_ARCH_VTABLE_REG
, byte_offset
, bitmask
);
714 x86_branch8 (code
, X86_CC_Z
, -1, 1);
718 x86_patch (jump
, code
);
720 /* Push the vtable so the stack is the same as in a specific trampoline */
721 x86_push_reg (code
, MONO_ARCH_VTABLE_REG
);
724 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "generic_trampoline_generic_class_init");
725 x86_jump_reg (code
, X86_EAX
);
727 tramp
= mono_get_trampoline_code (MONO_TRAMPOLINE_GENERIC_CLASS_INIT
);
729 /* jump to the actual trampoline */
730 x86_jump_code (code
, tramp
);
733 mono_arch_flush_icache (code
, code
- buf
);
735 g_assert (code
- buf
<= tramp_size
);
736 #ifdef __native_client_codegen__
737 g_assert (code
- buf
<= kNaClAlignment
);
740 nacl_global_codeman_validate (&buf
, tramp_size
, &code
);
743 *info
= mono_tramp_info_create (g_strdup_printf ("generic_class_init_trampoline"), buf
, code
- buf
, ji
, unwind_ops
);
748 #ifdef MONO_ARCH_MONITOR_OBJECT_REG
750 * The code produced by this trampoline is equivalent to this:
753 * if (obj->synchronisation) {
754 * if (obj->synchronisation->owner == 0) {
755 * if (cmpxch (&obj->synchronisation->owner, TID, 0) == 0)
758 * if (obj->synchronisation->owner == TID) {
759 * ++obj->synchronisation->nest;
764 * return full_monitor_enter ();
768 mono_arch_create_monitor_enter_trampoline (MonoTrampInfo
**info
, gboolean aot
)
770 guint8
*tramp
= mono_get_trampoline_code (MONO_TRAMPOLINE_MONITOR_ENTER
);
772 guint8
*jump_obj_null
, *jump_sync_null
, *jump_other_owner
, *jump_cmpxchg_failed
, *jump_tid
, *jump_sync_thin_hash
= NULL
;
774 int owner_offset
, nest_offset
, dummy
;
775 MonoJumpInfo
*ji
= NULL
;
776 GSList
*unwind_ops
= NULL
;
778 g_assert (MONO_ARCH_MONITOR_OBJECT_REG
== X86_EAX
);
780 mono_monitor_threads_sync_members_offset (&owner_offset
, &nest_offset
, &dummy
);
781 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (owner_offset
) == sizeof (gpointer
));
782 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset
) == sizeof (guint32
));
783 owner_offset
= MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset
);
784 nest_offset
= MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset
);
786 tramp_size
= NACL_SIZE (96, 128);
788 code
= buf
= mono_global_codeman_reserve (tramp_size
);
790 if (mono_thread_get_tls_offset () != -1) {
791 /* MonoObject* obj is in EAX */
793 x86_test_reg_reg (code
, X86_EAX
, X86_EAX
);
794 /* if yes, jump to actual trampoline */
795 jump_obj_null
= code
;
796 x86_branch8 (code
, X86_CC_Z
, -1, 1);
798 /* load obj->synchronization to ECX */
799 x86_mov_reg_membase (code
, X86_ECX
, X86_EAX
, G_STRUCT_OFFSET (MonoObject
, synchronisation
), 4);
801 if (mono_gc_is_moving ()) {
802 /*if bit zero is set it's a thin hash*/
803 /*FIXME use testb encoding*/
804 x86_test_reg_imm (code
, X86_ECX
, 0x01);
805 jump_sync_thin_hash
= code
;
806 x86_branch8 (code
, X86_CC_NE
, -1, 1);
808 /*clear bits used by the gc*/
809 x86_alu_reg_imm (code
, X86_AND
, X86_ECX
, ~0x3);
812 /* is synchronization null? */
813 x86_test_reg_reg (code
, X86_ECX
, X86_ECX
);
815 /* if yes, jump to actual trampoline */
816 jump_sync_null
= code
;
817 x86_branch8 (code
, X86_CC_Z
, -1, 1);
819 /* load MonoInternalThread* into EDX */
820 code
= mono_x86_emit_tls_get (code
, X86_EDX
, mono_thread_get_tls_offset ());
821 /* load TID into EDX */
822 x86_mov_reg_membase (code
, X86_EDX
, X86_EDX
, G_STRUCT_OFFSET (MonoInternalThread
, tid
), 4);
824 /* is synchronization->owner null? */
825 x86_alu_membase_imm (code
, X86_CMP
, X86_ECX
, owner_offset
, 0);
826 /* if not, jump to next case */
828 x86_branch8 (code
, X86_CC_NZ
, -1, 1);
830 /* if yes, try a compare-exchange with the TID */
831 /* free up register EAX, needed for the zero */
832 x86_push_reg (code
, X86_EAX
);
834 x86_alu_reg_reg (code
, X86_XOR
, X86_EAX
, X86_EAX
);
835 /* compare and exchange */
836 x86_prefix (code
, X86_LOCK_PREFIX
);
837 x86_cmpxchg_membase_reg (code
, X86_ECX
, owner_offset
, X86_EDX
);
838 /* if not successful, jump to actual trampoline */
839 jump_cmpxchg_failed
= code
;
840 x86_branch8 (code
, X86_CC_NZ
, -1, 1);
841 /* if successful, pop and return */
842 x86_pop_reg (code
, X86_EAX
);
845 /* next case: synchronization->owner is not null */
846 x86_patch (jump_tid
, code
);
847 /* is synchronization->owner == TID? */
848 x86_alu_membase_reg (code
, X86_CMP
, X86_ECX
, owner_offset
, X86_EDX
);
849 /* if not, jump to actual trampoline */
850 jump_other_owner
= code
;
851 x86_branch8 (code
, X86_CC_NZ
, -1, 1);
852 /* if yes, increment nest */
853 x86_inc_membase (code
, X86_ECX
, nest_offset
);
858 x86_patch (jump_obj_null
, code
);
859 if (jump_sync_thin_hash
)
860 x86_patch (jump_sync_thin_hash
, code
);
861 x86_patch (jump_sync_null
, code
);
862 x86_patch (jump_other_owner
, code
);
863 x86_push_reg (code
, X86_EAX
);
864 /* jump to the actual trampoline */
865 x86_patch (jump_cmpxchg_failed
, code
);
867 /* We are calling the generic trampoline directly, the argument is pushed
868 * on the stack just like a specific trampoline.
870 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "generic_trampoline_monitor_enter");
871 x86_jump_reg (code
, X86_EAX
);
873 x86_jump_code (code
, tramp
);
876 /* push obj and jump to the actual trampoline */
877 x86_push_reg (code
, X86_EAX
);
879 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "generic_trampoline_monitor_enter");
880 x86_jump_reg (code
, X86_EAX
);
882 x86_jump_code (code
, tramp
);
886 mono_arch_flush_icache (buf
, code
- buf
);
887 g_assert (code
- buf
<= tramp_size
);
889 nacl_global_codeman_validate (&buf
, tramp_size
, &code
);
892 *info
= mono_tramp_info_create (g_strdup_printf ("monitor_enter_trampoline"), buf
, code
- buf
, ji
, unwind_ops
);
898 mono_arch_create_monitor_exit_trampoline (MonoTrampInfo
**info
, gboolean aot
)
900 guint8
*tramp
= mono_get_trampoline_code (MONO_TRAMPOLINE_MONITOR_EXIT
);
902 guint8
*jump_obj_null
, *jump_have_waiters
, *jump_sync_null
, *jump_not_owned
, *jump_sync_thin_hash
= NULL
;
905 int owner_offset
, nest_offset
, entry_count_offset
;
906 MonoJumpInfo
*ji
= NULL
;
907 GSList
*unwind_ops
= NULL
;
909 g_assert (MONO_ARCH_MONITOR_OBJECT_REG
== X86_EAX
);
911 mono_monitor_threads_sync_members_offset (&owner_offset
, &nest_offset
, &entry_count_offset
);
912 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (owner_offset
) == sizeof (gpointer
));
913 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset
) == sizeof (guint32
));
914 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (entry_count_offset
) == sizeof (gint32
));
915 owner_offset
= MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset
);
916 nest_offset
= MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset
);
917 entry_count_offset
= MONO_THREADS_SYNC_MEMBER_OFFSET (entry_count_offset
);
919 tramp_size
= NACL_SIZE (96, 128);
921 code
= buf
= mono_global_codeman_reserve (tramp_size
);
923 if (mono_thread_get_tls_offset () != -1) {
924 /* MonoObject* obj is in EAX */
926 x86_test_reg_reg (code
, X86_EAX
, X86_EAX
);
927 /* if yes, jump to actual trampoline */
928 jump_obj_null
= code
;
929 x86_branch8 (code
, X86_CC_Z
, -1, 1);
931 /* load obj->synchronization to ECX */
932 x86_mov_reg_membase (code
, X86_ECX
, X86_EAX
, G_STRUCT_OFFSET (MonoObject
, synchronisation
), 4);
934 if (mono_gc_is_moving ()) {
935 /*if bit zero is set it's a thin hash*/
936 /*FIXME use testb encoding*/
937 x86_test_reg_imm (code
, X86_ECX
, 0x01);
938 jump_sync_thin_hash
= code
;
939 x86_branch8 (code
, X86_CC_NE
, -1, 1);
941 /*clear bits used by the gc*/
942 x86_alu_reg_imm (code
, X86_AND
, X86_ECX
, ~0x3);
945 /* is synchronization null? */
946 x86_test_reg_reg (code
, X86_ECX
, X86_ECX
);
947 /* if yes, jump to actual trampoline */
948 jump_sync_null
= code
;
949 x86_branch8 (code
, X86_CC_Z
, -1, 1);
951 /* next case: synchronization is not null */
952 /* load MonoInternalThread* into EDX */
953 code
= mono_x86_emit_tls_get (code
, X86_EDX
, mono_thread_get_tls_offset ());
954 /* load TID into EDX */
955 x86_mov_reg_membase (code
, X86_EDX
, X86_EDX
, G_STRUCT_OFFSET (MonoInternalThread
, tid
), 4);
956 /* is synchronization->owner == TID */
957 x86_alu_membase_reg (code
, X86_CMP
, X86_ECX
, owner_offset
, X86_EDX
);
958 /* if no, jump to actual trampoline */
959 jump_not_owned
= code
;
960 x86_branch8 (code
, X86_CC_NZ
, -1, 1);
962 /* next case: synchronization->owner == TID */
963 /* is synchronization->nest == 1 */
964 x86_alu_membase_imm (code
, X86_CMP
, X86_ECX
, nest_offset
, 1);
965 /* if not, jump to next case */
967 x86_branch8 (code
, X86_CC_NZ
, -1, 1);
968 /* if yes, is synchronization->entry_count zero? */
969 x86_alu_membase_imm (code
, X86_CMP
, X86_ECX
, entry_count_offset
, 0);
970 /* if not, jump to actual trampoline */
971 jump_have_waiters
= code
;
972 x86_branch8 (code
, X86_CC_NZ
, -1 , 1);
973 /* if yes, set synchronization->owner to null and return */
974 x86_mov_membase_imm (code
, X86_ECX
, owner_offset
, 0, 4);
977 /* next case: synchronization->nest is not 1 */
978 x86_patch (jump_next
, code
);
979 /* decrease synchronization->nest and return */
980 x86_dec_membase (code
, X86_ECX
, nest_offset
);
983 /* push obj and jump to the actual trampoline */
984 x86_patch (jump_obj_null
, code
);
985 if (jump_sync_thin_hash
)
986 x86_patch (jump_sync_thin_hash
, code
);
987 x86_patch (jump_have_waiters
, code
);
988 x86_patch (jump_not_owned
, code
);
989 x86_patch (jump_sync_null
, code
);
992 /* push obj and jump to the actual trampoline */
993 x86_push_reg (code
, X86_EAX
);
995 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "generic_trampoline_monitor_exit");
996 x86_jump_reg (code
, X86_EAX
);
998 x86_jump_code (code
, tramp
);
1001 nacl_global_codeman_validate (&buf
, tramp_size
, &code
);
1003 mono_arch_flush_icache (buf
, code
- buf
);
1004 g_assert (code
- buf
<= tramp_size
);
1007 *info
= mono_tramp_info_create (g_strdup_printf ("monitor_exit_trampoline"), buf
, code
- buf
, ji
, unwind_ops
);
1015 mono_arch_create_monitor_enter_trampoline (MonoTrampInfo
**info
, gboolean aot
)
1017 g_assert_not_reached ();
1022 mono_arch_create_monitor_exit_trampoline (MonoTrampInfo
**info
, gboolean aot
)
1024 g_assert_not_reached ();
1031 mono_arch_invalidate_method (MonoJitInfo
*ji
, void *func
, gpointer func_arg
)
1033 /* FIXME: This is not thread safe */
1034 guint8
*code
= ji
->code_start
;
1036 x86_push_imm (code
, func_arg
);
1037 x86_call_code (code
, (guint8
*)func
);
1041 handler_block_trampoline_helper (gpointer
*ptr
)
1043 MonoJitTlsData
*jit_tls
= mono_native_tls_get_value (mono_jit_tls_id
);
1044 *ptr
= jit_tls
->handler_block_return_address
;
1048 mono_arch_create_handler_block_trampoline (void)
1050 guint8
*tramp
= mono_get_trampoline_code (MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD
);
1052 int tramp_size
= 64;
1053 code
= buf
= mono_global_codeman_reserve (tramp_size
);
1056 This trampoline restore the call chain of the handler block then jumps into the code that deals with it.
1059 if (mono_get_jit_tls_offset () != -1) {
1060 code
= mono_x86_emit_tls_get (code
, X86_EAX
, mono_get_jit_tls_offset ());
1061 x86_mov_reg_membase (code
, X86_EAX
, X86_EAX
, G_STRUCT_OFFSET (MonoJitTlsData
, handler_block_return_address
), 4);
1063 /*Fix stack alignment*/
1064 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 0x8);
1065 x86_push_reg (code
, X86_EAX
);
1066 x86_jump_code (code
, tramp
);
1068 /*Slow path uses a c helper*/
1069 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 0x8);
1070 x86_push_reg (code
, X86_ESP
);
1071 x86_push_imm (code
, tramp
);
1072 x86_jump_code (code
, handler_block_trampoline_helper
);
1075 nacl_global_codeman_validate (&buf
, tramp_size
, &code
);
1077 mono_arch_flush_icache (buf
, code
- buf
);
1078 g_assert (code
- buf
<= tramp_size
);
1080 if (mono_jit_map_is_enabled ())
1081 mono_emit_jit_tramp (buf
, code
- buf
, "handler_block_trampoline");
1087 mono_arch_get_call_target (guint8
*code
)
1089 if (code
[-5] == 0xe8) {
1090 guint32 disp
= *(guint32
*)(code
- 4);
1091 guint8
*target
= code
+ disp
;
1100 mono_arch_get_plt_info_offset (guint8
*plt_entry
, mgreg_t
*regs
, guint8
*code
)
1102 return *(guint32
*)(plt_entry
+ NACL_SIZE (6, 12));