2 * tramp-x86.c: JIT trampoline code for x86
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
13 #include <mono/metadata/abi-details.h>
14 #include <mono/metadata/appdomain.h>
15 #include <mono/metadata/metadata-internals.h>
16 #include <mono/metadata/marshal.h>
17 #include <mono/metadata/tabledefs.h>
18 #include <mono/metadata/mono-debug.h>
19 #include <mono/metadata/mono-debug-debugger.h>
20 #include <mono/metadata/profiler-private.h>
21 #include <mono/metadata/gc-internals.h>
22 #include <mono/arch/x86/x86-codegen.h>
24 #include <mono/utils/memcheck.h>
28 #include "debugger-agent.h"
29 #include "jit-icalls.h"
31 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
34 * mono_arch_get_unbox_trampoline:
36 * @addr: pointer to native code for @m
38 * when value type methods are called through the vtable we need to unbox the
39 * this argument. This method returns a pointer to a trampoline which does
40 * unboxing before calling the method
43 mono_arch_get_unbox_trampoline (MonoMethod
*m
, gpointer addr
)
46 int this_pos
= 4, size
= 16;
47 MonoDomain
*domain
= mono_domain_get ();
50 start
= code
= mono_domain_code_reserve (domain
, size
);
52 unwind_ops
= mono_arch_get_cie_program ();
54 x86_alu_membase_imm (code
, X86_ADD
, X86_ESP
, this_pos
, sizeof (MonoObject
));
55 x86_jump_code (code
, addr
);
56 g_assert ((code
- start
) < size
);
58 mono_profiler_code_buffer_new (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE
, m
);
60 mono_tramp_info_register (mono_tramp_info_create (NULL
, start
, code
- start
, NULL
, unwind_ops
), domain
);
66 mono_arch_get_static_rgctx_trampoline (MonoMethod
*m
, MonoMethodRuntimeGenericContext
*mrgctx
, gpointer addr
)
72 MonoDomain
*domain
= mono_domain_get ();
76 start
= code
= mono_domain_code_reserve (domain
, buf_len
);
78 unwind_ops
= mono_arch_get_cie_program ();
80 x86_mov_reg_imm (code
, MONO_ARCH_RGCTX_REG
, mrgctx
);
81 x86_jump_code (code
, addr
);
82 g_assert ((code
- start
) <= buf_len
);
84 mono_arch_flush_icache (start
, code
- start
);
85 mono_profiler_code_buffer_new (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE
, NULL
);
87 mono_tramp_info_register (mono_tramp_info_create (NULL
, start
, code
- start
, NULL
, unwind_ops
), domain
);
93 mono_arch_patch_callsite (guint8
*method_start
, guint8
*orig_code
, guint8
*addr
)
97 gboolean can_write
= mono_breakpoint_clean_code (method_start
, orig_code
, 8, buf
, sizeof (buf
));
101 /* go to the start of the call instruction
103 * address_byte = (m << 6) | (o << 3) | reg
104 * call opcode: 0xff address_byte displacement
110 if (code
[1] == 0xe8) {
112 InterlockedExchange ((gint32
*)(orig_code
+ 2), (guint
)addr
- ((guint
)orig_code
+ 1) - 5);
114 /* Tell valgrind to recompile the patched code */
115 VALGRIND_DISCARD_TRANSLATIONS (orig_code
+ 2, 4);
117 } else if (code
[1] == 0xe9) {
118 /* A PLT entry: jmp <DISP> */
120 InterlockedExchange ((gint32
*)(orig_code
+ 2), (guint
)addr
- ((guint
)orig_code
+ 1) - 5);
122 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", code
[0], code
[1], code
[2], code
[3],
123 code
[4], code
[5], code
[6]);
124 g_assert_not_reached ();
129 mono_arch_patch_plt_entry (guint8
*code
, gpointer
*got
, mgreg_t
*regs
, guint8
*addr
)
133 /* Patch the jump table entry used by the plt entry */
135 /* A PLT entry: jmp *<DISP>(%ebx) */
136 g_assert (code
[0] == 0xff);
137 g_assert (code
[1] == 0xa3);
139 offset
= *(guint32
*)(code
+ 2);
141 got
= (gpointer
*)(gsize
) regs
[MONO_ARCH_GOT_REG
];
142 *(guint8
**)((guint8
*)got
+ offset
) = addr
;
146 get_vcall_slot (guint8
*code
, mgreg_t
*regs
, int *displacement
)
148 const int kBufSize
= 8;
153 mono_breakpoint_clean_code (NULL
, code
, kBufSize
, buf
, kBufSize
);
158 if ((code
[0] == 0xff) && ((code
[1] & 0x18) == 0x10) && ((code
[1] >> 6) == 2)) {
159 reg
= code
[1] & 0x07;
160 disp
= *((gint32
*)(code
+ 2));
162 g_assert_not_reached ();
166 *displacement
= disp
;
167 return (gpointer
)regs
[reg
];
171 get_vcall_slot_addr (guint8
* code
, mgreg_t
*regs
)
175 vt
= get_vcall_slot (code
, regs
, &displacement
);
178 return (gpointer
*)((char*)vt
+ displacement
);
182 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type
, MonoTrampInfo
**info
, gboolean aot
)
185 guint8
*buf
, *code
, *tramp
, *br_ex_check
;
186 GSList
*unwind_ops
= NULL
;
187 MonoJumpInfo
*ji
= NULL
;
188 int i
, offset
, frame_size
, regarray_offset
, lmf_offset
, caller_ip_offset
, arg_offset
;
189 int cfa_offset
; /* cfa = cfa_reg + cfa_offset */
191 code
= buf
= mono_global_codeman_reserve (256);
193 /* Note that there is a single argument to the trampoline
194 * and it is stored at: esp + pushed_args * sizeof (gpointer)
195 * the ret address is at: esp + (pushed_args + 1) * sizeof (gpointer)
198 /* Compute frame offsets relative to the frame pointer %ebp */
199 arg_offset
= sizeof (mgreg_t
);
200 caller_ip_offset
= 2 * sizeof (mgreg_t
);
202 offset
+= sizeof (MonoLMF
);
203 lmf_offset
= -offset
;
204 offset
+= X86_NREG
* sizeof (mgreg_t
);
205 regarray_offset
= -offset
;
207 offset
+= 4 * sizeof (mgreg_t
);
208 frame_size
= ALIGN_TO (offset
, MONO_ARCH_FRAME_ALIGNMENT
);
210 /* ret addr and arg are on the stack */
211 cfa_offset
= 2 * sizeof (mgreg_t
);
212 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, X86_ESP
, cfa_offset
);
213 // IP saved at CFA - 4
214 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, X86_NREG
, -4);
217 x86_push_reg (code
, X86_EBP
);
218 cfa_offset
+= sizeof (mgreg_t
);
219 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
220 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, X86_EBP
, -cfa_offset
);
222 x86_mov_reg_reg (code
, X86_EBP
, X86_ESP
, sizeof (mgreg_t
));
223 mono_add_unwind_op_def_cfa_reg (unwind_ops
, code
, buf
, X86_EBP
);
225 /* There are three words on the stack, adding + 4 aligns the stack to 16, which is needed on osx */
226 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, frame_size
+ sizeof (mgreg_t
));
228 /* Save all registers */
229 for (i
= X86_EAX
; i
<= X86_EDI
; ++i
) {
233 /* Save original ebp */
234 /* EAX is already saved */
235 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, 0, sizeof (mgreg_t
));
237 } else if (i
== X86_ESP
) {
238 /* Save original esp */
239 /* EAX is already saved */
240 x86_mov_reg_reg (code
, X86_EAX
, X86_EBP
, sizeof (mgreg_t
));
241 /* Saved ebp + trampoline arg + return addr */
242 x86_alu_reg_imm (code
, X86_ADD
, X86_EAX
, 3 * sizeof (mgreg_t
));
245 x86_mov_membase_reg (code
, X86_EBP
, regarray_offset
+ (i
* sizeof (mgreg_t
)), reg
, sizeof (mgreg_t
));
250 if (tramp_type
== MONO_TRAMPOLINE_JUMP
) {
251 x86_mov_membase_imm (code
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, eip
), 0, sizeof (mgreg_t
));
253 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, caller_ip_offset
, sizeof (mgreg_t
));
254 x86_mov_membase_reg (code
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, eip
), X86_EAX
, sizeof (mgreg_t
));
257 if ((tramp_type
== MONO_TRAMPOLINE_JIT
) || (tramp_type
== MONO_TRAMPOLINE_JUMP
)) {
258 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, arg_offset
, sizeof (mgreg_t
));
259 x86_mov_membase_reg (code
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, method
), X86_EAX
, sizeof (mgreg_t
));
261 x86_mov_membase_imm (code
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, method
), 0, sizeof (mgreg_t
));
264 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, regarray_offset
+ (X86_ESP
* sizeof (mgreg_t
)), sizeof (mgreg_t
));
265 x86_mov_membase_reg (code
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, esp
), X86_EAX
, sizeof (mgreg_t
));
266 /* callee save registers */
267 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, regarray_offset
+ (X86_EBX
* sizeof (mgreg_t
)), sizeof (mgreg_t
));
268 x86_mov_membase_reg (code
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, ebx
), X86_EAX
, sizeof (mgreg_t
));
269 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, regarray_offset
+ (X86_EDI
* sizeof (mgreg_t
)), sizeof (mgreg_t
));
270 x86_mov_membase_reg (code
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, edi
), X86_EAX
, sizeof (mgreg_t
));
271 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, regarray_offset
+ (X86_ESI
* sizeof (mgreg_t
)), sizeof (mgreg_t
));
272 x86_mov_membase_reg (code
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, esi
), X86_EAX
, sizeof (mgreg_t
));
273 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, regarray_offset
+ (X86_EBP
* sizeof (mgreg_t
)), sizeof (mgreg_t
));
274 x86_mov_membase_reg (code
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, ebp
), X86_EAX
, sizeof (mgreg_t
));
277 /* get the address of lmf for the current thread */
279 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "mono_get_lmf_addr");
280 x86_call_reg (code
, X86_EAX
);
282 x86_call_code (code
, mono_get_lmf_addr
);
284 /* lmf->lmf_addr = lmf_addr (%eax) */
285 x86_mov_membase_reg (code
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, lmf_addr
), X86_EAX
, sizeof (mgreg_t
));
286 /* lmf->previous_lmf = *(lmf_addr) */
287 x86_mov_reg_membase (code
, X86_ECX
, X86_EAX
, 0, sizeof (mgreg_t
));
288 /* Signal to mono_arch_unwind_frame () that this is a trampoline frame */
289 x86_alu_reg_imm (code
, X86_ADD
, X86_ECX
, 1);
290 x86_mov_membase_reg (code
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, previous_lmf
), X86_ECX
, sizeof (mgreg_t
));
291 /* *lmf_addr = lmf */
292 x86_lea_membase (code
, X86_ECX
, X86_EBP
, lmf_offset
);
293 x86_mov_membase_reg (code
, X86_EAX
, 0, X86_ECX
, sizeof (mgreg_t
));
295 /* Call trampoline function */
296 /* Arg 1 - registers */
297 x86_lea_membase (code
, X86_EAX
, X86_EBP
, regarray_offset
);
298 x86_mov_membase_reg (code
, X86_ESP
, (0 * sizeof (mgreg_t
)), X86_EAX
, sizeof (mgreg_t
));
299 /* Arg2 - calling code */
300 if (tramp_type
== MONO_TRAMPOLINE_JUMP
) {
301 x86_mov_membase_imm (code
, X86_ESP
, (1 * sizeof (mgreg_t
)), 0, sizeof (mgreg_t
));
303 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, caller_ip_offset
, sizeof (mgreg_t
));
304 x86_mov_membase_reg (code
, X86_ESP
, (1 * sizeof (mgreg_t
)), X86_EAX
, sizeof (mgreg_t
));
306 /* Arg3 - trampoline argument */
307 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, arg_offset
, sizeof (mgreg_t
));
308 x86_mov_membase_reg (code
, X86_ESP
, (2 * sizeof (mgreg_t
)), X86_EAX
, sizeof (mgreg_t
));
309 /* Arg4 - trampoline address */
311 x86_mov_membase_imm (code
, X86_ESP
, (3 * sizeof (mgreg_t
)), 0, sizeof (mgreg_t
));
314 /* check the stack is aligned after the ret ip is pushed */
316 x86_mov_reg_reg (code, X86_EDX, X86_ESP, 4);
317 x86_alu_reg_imm (code, X86_AND, X86_EDX, 15);
318 x86_alu_reg_imm (code, X86_CMP, X86_EDX, 0);
319 x86_branch_disp (code, X86_CC_Z, 3, FALSE);
320 x86_breakpoint (code);
325 char *icall_name
= g_strdup_printf ("trampoline_func_%d", tramp_type
);
326 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, icall_name
);
327 x86_call_reg (code
, X86_EAX
);
329 tramp
= (guint8
*)mono_get_trampoline_func (tramp_type
);
330 x86_call_code (code
, tramp
);
334 * Overwrite the trampoline argument with the address we need to jump to,
337 x86_mov_membase_reg (code
, X86_EBP
, arg_offset
, X86_EAX
, 4);
340 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, lmf_addr
), sizeof (mgreg_t
));
341 x86_mov_reg_membase (code
, X86_ECX
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, previous_lmf
), sizeof (mgreg_t
));
342 x86_alu_reg_imm (code
, X86_SUB
, X86_ECX
, 1);
343 x86_mov_membase_reg (code
, X86_EAX
, 0, X86_ECX
, sizeof (mgreg_t
));
345 /* Check for interruptions */
347 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "mono_thread_force_interruption_checkpoint_noraise");
348 x86_call_reg (code
, X86_EAX
);
350 x86_call_code (code
, (guint8
*)mono_thread_force_interruption_checkpoint_noraise
);
353 x86_test_reg_reg (code
, X86_EAX
, X86_EAX
);
355 x86_branch8 (code
, X86_CC_Z
, -1, 1);
359 * We have an exception we want to throw in the caller's frame, so pop
360 * the trampoline frame and throw from the caller.
364 * The exception is in eax.
365 * We are calling the throw trampoline used by OP_THROW, so we have to setup the
366 * stack to look the same.
367 * The stack contains the ret addr, and the trampoline argument, the throw trampoline
368 * expects it to contain the ret addr and the exception. It also needs to be aligned
369 * after the exception is pushed.
372 x86_push_reg (code
, X86_EAX
);
373 /* Push the exception */
374 x86_push_reg (code
, X86_EAX
);
375 //x86_breakpoint (code);
376 /* Push the original return value */
377 x86_push_membase (code
, X86_ESP
, 3 * 4);
379 * EH is initialized after trampolines, so get the address of the variable
380 * which contains throw_exception, and load it from there.
383 /* Not really a jit icall */
384 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "throw_exception_addr");
386 x86_mov_reg_imm (code
, X86_ECX
, (guint8
*)mono_get_throw_exception_addr ());
388 x86_mov_reg_membase (code
, X86_ECX
, X86_ECX
, 0, sizeof(gpointer
));
389 x86_jump_reg (code
, X86_ECX
);
392 mono_x86_patch (br_ex_check
, code
);
394 /* Restore registers */
395 for (i
= X86_EAX
; i
<= X86_EDI
; ++i
) {
396 if (i
== X86_ESP
|| i
== X86_EBP
)
398 if (i
== X86_EAX
&& !((tramp_type
== MONO_TRAMPOLINE_RESTORE_STACK_PROT
) || (tramp_type
== MONO_TRAMPOLINE_AOT_PLT
)))
400 x86_mov_reg_membase (code
, i
, X86_EBP
, regarray_offset
+ (i
* 4), 4);
405 cfa_offset
-= sizeof (mgreg_t
);
406 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, X86_ESP
, cfa_offset
);
407 mono_add_unwind_op_same_value (unwind_ops
, code
, buf
, X86_EBP
);
409 if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type
)) {
410 /* Load the value returned by the trampoline */
411 x86_mov_reg_membase (code
, X86_EAX
, X86_ESP
, 0, 4);
412 /* The trampoline returns normally, pop the trampoline argument */
413 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
414 cfa_offset
-= sizeof (mgreg_t
);
415 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
418 /* The trampoline argument is at the top of the stack, and it contains the address we need to branch to */
419 if (tramp_type
== MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD
) {
420 x86_pop_reg (code
, X86_EAX
);
421 cfa_offset
-= sizeof (mgreg_t
);
422 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
423 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 0x8);
424 x86_jump_reg (code
, X86_EAX
);
430 g_assert ((code
- buf
) <= 256);
431 mono_profiler_code_buffer_new (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
);
433 tramp_name
= mono_get_generic_trampoline_name (tramp_type
);
434 *info
= mono_tramp_info_create (tramp_name
, buf
, code
- buf
, ji
, unwind_ops
);
440 #define TRAMPOLINE_SIZE 10
443 mono_arch_create_specific_trampoline (gpointer arg1
, MonoTrampolineType tramp_type
, MonoDomain
*domain
, guint32
*code_len
)
445 guint8
*code
, *buf
, *tramp
;
447 tramp
= mono_get_trampoline_code (tramp_type
);
449 code
= buf
= mono_domain_code_reserve_align (domain
, TRAMPOLINE_SIZE
, 4);
451 x86_push_imm (buf
, arg1
);
452 x86_jump_code (buf
, tramp
);
453 g_assert ((buf
- code
) <= TRAMPOLINE_SIZE
);
455 mono_arch_flush_icache (code
, buf
- code
);
456 mono_profiler_code_buffer_new (code
, buf
- code
, MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE
, mono_get_generic_trampoline_simple_name (tramp_type
));
459 *code_len
= buf
- code
;
465 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot
, MonoTrampInfo
**info
, gboolean aot
)
469 guint8
**rgctx_null_jumps
;
474 MonoJumpInfo
*ji
= NULL
;
475 GSList
*unwind_ops
= NULL
;
477 unwind_ops
= mono_arch_get_cie_program ();
479 mrgctx
= MONO_RGCTX_SLOT_IS_MRGCTX (slot
);
480 index
= MONO_RGCTX_SLOT_INDEX (slot
);
482 index
+= MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT
/ sizeof (gpointer
);
483 for (depth
= 0; ; ++depth
) {
484 int size
= mono_class_rgctx_get_array_size (depth
, mrgctx
);
486 if (index
< size
- 1)
491 tramp_size
= (aot
? 64 : 36) + 6 * depth
;
493 code
= buf
= mono_global_codeman_reserve (tramp_size
);
495 rgctx_null_jumps
= g_malloc (sizeof (guint8
*) * (depth
+ 2));
497 /* load vtable/mrgctx ptr */
498 x86_mov_reg_membase (code
, X86_EAX
, X86_ESP
, 4, 4);
500 /* load rgctx ptr from vtable */
501 x86_mov_reg_membase (code
, X86_EAX
, X86_EAX
, MONO_STRUCT_OFFSET (MonoVTable
, runtime_generic_context
), 4);
502 /* is the rgctx ptr null? */
503 x86_test_reg_reg (code
, X86_EAX
, X86_EAX
);
504 /* if yes, jump to actual trampoline */
505 rgctx_null_jumps
[0] = code
;
506 x86_branch8 (code
, X86_CC_Z
, -1, 1);
509 for (i
= 0; i
< depth
; ++i
) {
510 /* load ptr to next array */
511 if (mrgctx
&& i
== 0)
512 x86_mov_reg_membase (code
, X86_EAX
, X86_EAX
, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT
, 4);
514 x86_mov_reg_membase (code
, X86_EAX
, X86_EAX
, 0, 4);
515 /* is the ptr null? */
516 x86_test_reg_reg (code
, X86_EAX
, X86_EAX
);
517 /* if yes, jump to actual trampoline */
518 rgctx_null_jumps
[i
+ 1] = code
;
519 x86_branch8 (code
, X86_CC_Z
, -1, 1);
523 x86_mov_reg_membase (code
, X86_EAX
, X86_EAX
, sizeof (gpointer
) * (index
+ 1), 4);
524 /* is the slot null? */
525 x86_test_reg_reg (code
, X86_EAX
, X86_EAX
);
526 /* if yes, jump to actual trampoline */
527 rgctx_null_jumps
[depth
+ 1] = code
;
528 x86_branch8 (code
, X86_CC_Z
, -1, 1);
529 /* otherwise return */
532 for (i
= mrgctx
? 1 : 0; i
<= depth
+ 1; ++i
)
533 x86_patch (rgctx_null_jumps
[i
], code
);
535 g_free (rgctx_null_jumps
);
537 x86_mov_reg_membase (code
, MONO_ARCH_VTABLE_REG
, X86_ESP
, 4, 4);
540 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot
));
541 x86_jump_reg (code
, X86_EAX
);
543 tramp
= mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot
), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH
, mono_get_root_domain (), NULL
);
545 /* jump to the actual trampoline */
546 x86_jump_code (code
, tramp
);
549 mono_arch_flush_icache (buf
, code
- buf
);
550 mono_profiler_code_buffer_new (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE
, NULL
);
552 g_assert (code
- buf
<= tramp_size
);
554 char *name
= mono_get_rgctx_fetch_trampoline_name (slot
);
555 *info
= mono_tramp_info_create (name
, buf
, code
- buf
, ji
, unwind_ops
);
562 * mono_arch_create_general_rgctx_lazy_fetch_trampoline:
564 * This is a general variant of the rgctx fetch trampolines. It receives a pointer to gpointer[2] in the rgctx reg. The first entry contains the slot, the second
565 * the trampoline to call if the slot is not filled.
568 mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo
**info
, gboolean aot
)
572 MonoJumpInfo
*ji
= NULL
;
573 GSList
*unwind_ops
= NULL
;
577 unwind_ops
= mono_arch_get_cie_program ();
581 code
= buf
= mono_global_codeman_reserve (tramp_size
);
583 // FIXME: Currently, we always go to the slow path.
585 /* Load trampoline addr */
586 x86_mov_reg_membase (code
, X86_EAX
, MONO_ARCH_RGCTX_REG
, 4, 4);
587 /* Load mrgctx/vtable */
588 x86_mov_reg_membase (code
, MONO_ARCH_VTABLE_REG
, X86_ESP
, 4, 4);
590 x86_jump_reg (code
, X86_EAX
);
592 mono_arch_flush_icache (buf
, code
- buf
);
593 mono_profiler_code_buffer_new (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE
, NULL
);
595 g_assert (code
- buf
<= tramp_size
);
597 *info
= mono_tramp_info_create ("rgctx_fetch_trampoline_general", buf
, code
- buf
, ji
, unwind_ops
);
603 mono_arch_invalidate_method (MonoJitInfo
*ji
, void *func
, gpointer func_arg
)
605 /* FIXME: This is not thread safe */
606 guint8
*code
= ji
->code_start
;
608 x86_push_imm (code
, func_arg
);
609 x86_call_code (code
, (guint8
*)func
);
613 handler_block_trampoline_helper (void)
615 MonoJitTlsData
*jit_tls
= mono_native_tls_get_value (mono_jit_tls_id
);
616 return jit_tls
->handler_block_return_address
;
620 mono_arch_create_handler_block_trampoline (MonoTrampInfo
**info
, gboolean aot
)
622 guint8
*tramp
= mono_get_trampoline_code (MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD
);
625 MonoJumpInfo
*ji
= NULL
;
627 GSList
*unwind_ops
= NULL
;
631 code
= buf
= mono_global_codeman_reserve (tramp_size
);
633 unwind_ops
= mono_arch_get_cie_program ();
634 cfa_offset
= sizeof (mgreg_t
);
636 This trampoline restore the call chain of the handler block then jumps into the code that deals with it.
640 * We are in a method frame after the call emitted by OP_CALL_HANDLER.
643 if (mono_get_jit_tls_offset () != -1) {
644 code
= mono_x86_emit_tls_get (code
, X86_EAX
, mono_get_jit_tls_offset ());
645 x86_mov_reg_membase (code
, X86_EAX
, X86_EAX
, MONO_STRUCT_OFFSET (MonoJitTlsData
, handler_block_return_address
), 4);
647 /*Slow path uses a c helper*/
648 x86_call_code (code
, handler_block_trampoline_helper
);
650 /* Simulate a call */
651 /*Fix stack alignment*/
652 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, 0x4);
653 cfa_offset
+= sizeof (mgreg_t
);
654 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
656 /* This is the address the trampoline will return to */
657 x86_push_reg (code
, X86_EAX
);
658 cfa_offset
+= sizeof (mgreg_t
);
659 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
661 /* Dummy trampoline argument, since we call the generic trampoline directly */
662 x86_push_imm (code
, 0);
663 cfa_offset
+= sizeof (mgreg_t
);
664 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
665 x86_jump_code (code
, tramp
);
667 mono_arch_flush_icache (buf
, code
- buf
);
668 mono_profiler_code_buffer_new (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
);
669 g_assert (code
- buf
<= tramp_size
);
671 *info
= mono_tramp_info_create ("handler_block_trampoline", buf
, code
- buf
, ji
, unwind_ops
);
677 mono_arch_get_call_target (guint8
*code
)
679 if (code
[-5] == 0xe8) {
680 gint32 disp
= *(gint32
*)(code
- 4);
681 guint8
*target
= code
+ disp
;
690 mono_arch_get_plt_info_offset (guint8
*plt_entry
, mgreg_t
*regs
, guint8
*code
)
692 return *(guint32
*)(plt_entry
+ 6);
696 * mono_arch_get_gsharedvt_arg_trampoline:
698 * Return a trampoline which passes ARG to the gsharedvt in/out trampoline ADDR.
701 mono_arch_get_gsharedvt_arg_trampoline (MonoDomain
*domain
, gpointer arg
, gpointer addr
)
703 guint8
*code
, *start
;
710 start
= code
= mono_domain_code_reserve (domain
, buf_len
);
712 unwind_ops
= mono_arch_get_cie_program ();
714 x86_mov_reg_imm (code
, X86_EAX
, arg
);
715 x86_jump_code (code
, addr
);
716 g_assert ((code
- start
) <= buf_len
);
718 mono_arch_flush_icache (start
, code
- start
);
719 mono_profiler_code_buffer_new (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE
, NULL
);
721 mono_tramp_info_register (mono_tramp_info_create (NULL
, start
, code
- start
, NULL
, unwind_ops
), domain
);
727 * mono_arch_create_sdb_trampoline:
729 * Return a trampoline which captures the current context, passes it to
730 * debugger_agent_single_step_from_context ()/debugger_agent_breakpoint_from_context (),
731 * then restores the (potentially changed) context.
734 mono_arch_create_sdb_trampoline (gboolean single_step
, MonoTrampInfo
**info
, gboolean aot
)
736 int tramp_size
= 256;
737 int framesize
, ctx_offset
, cfa_offset
;
739 GSList
*unwind_ops
= NULL
;
740 MonoJumpInfo
*ji
= NULL
;
742 code
= buf
= mono_global_codeman_reserve (tramp_size
);
747 framesize
+= sizeof (mgreg_t
);
749 framesize
= ALIGN_TO (framesize
, 8);
750 ctx_offset
= framesize
;
751 framesize
+= sizeof (MonoContext
);
753 framesize
= ALIGN_TO (framesize
, MONO_ARCH_FRAME_ALIGNMENT
);
757 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, X86_ESP
, 4);
758 // IP saved at CFA - 4
759 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, X86_NREG
, -cfa_offset
);
761 x86_push_reg (code
, X86_EBP
);
762 cfa_offset
+= sizeof(mgreg_t
);
763 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
764 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, X86_EBP
, - cfa_offset
);
766 x86_mov_reg_reg (code
, X86_EBP
, X86_ESP
, sizeof(mgreg_t
));
767 mono_add_unwind_op_def_cfa_reg (unwind_ops
, code
, buf
, X86_EBP
);
768 /* The + 8 makes the stack aligned */
769 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, framesize
+ 8);
771 /* Initialize a MonoContext structure on the stack */
772 x86_mov_membase_reg (code
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, eax
), X86_EAX
, sizeof (mgreg_t
));
773 x86_mov_membase_reg (code
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, ebx
), X86_EBX
, sizeof (mgreg_t
));
774 x86_mov_membase_reg (code
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, ecx
), X86_ECX
, sizeof (mgreg_t
));
775 x86_mov_membase_reg (code
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, edx
), X86_EDX
, sizeof (mgreg_t
));
776 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, 0, sizeof (mgreg_t
));
777 x86_mov_membase_reg (code
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, ebp
), X86_EAX
, sizeof (mgreg_t
));
778 x86_mov_reg_reg (code
, X86_EAX
, X86_EBP
, sizeof (mgreg_t
));
779 x86_alu_reg_imm (code
, X86_ADD
, X86_EAX
, cfa_offset
);
780 x86_mov_membase_reg (code
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, esp
), X86_ESP
, sizeof (mgreg_t
));
781 x86_mov_membase_reg (code
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, esi
), X86_ESI
, sizeof (mgreg_t
));
782 x86_mov_membase_reg (code
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, edi
), X86_EDI
, sizeof (mgreg_t
));
783 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, 4, sizeof (mgreg_t
));
784 x86_mov_membase_reg (code
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, eip
), X86_EAX
, sizeof (mgreg_t
));
786 /* Call the single step/breakpoint function in sdb */
787 x86_lea_membase (code
, X86_EAX
, X86_ESP
, ctx_offset
);
788 x86_mov_membase_reg (code
, X86_ESP
, 0, X86_EAX
, sizeof (mgreg_t
));
791 x86_breakpoint (code
);
794 x86_call_code (code
, debugger_agent_single_step_from_context
);
796 x86_call_code (code
, debugger_agent_breakpoint_from_context
);
799 /* Restore registers from ctx */
800 /* Overwrite the saved ebp */
801 x86_mov_reg_membase (code
, X86_EAX
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, ebp
), sizeof (mgreg_t
));
802 x86_mov_membase_reg (code
, X86_EBP
, 0, X86_EAX
, sizeof (mgreg_t
));
803 /* Overwrite saved eip */
804 x86_mov_reg_membase (code
, X86_EAX
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, eip
), sizeof (mgreg_t
));
805 x86_mov_membase_reg (code
, X86_EBP
, 4, X86_EAX
, sizeof (mgreg_t
));
806 x86_mov_reg_membase (code
, X86_EAX
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, eax
), sizeof (mgreg_t
));
807 x86_mov_reg_membase (code
, X86_EBX
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, ebx
), sizeof (mgreg_t
));
808 x86_mov_reg_membase (code
, X86_ECX
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, ecx
), sizeof (mgreg_t
));
809 x86_mov_reg_membase (code
, X86_EDX
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, edx
), sizeof (mgreg_t
));
810 x86_mov_reg_membase (code
, X86_ESI
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, esi
), sizeof (mgreg_t
));
811 x86_mov_reg_membase (code
, X86_EDI
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, edi
), sizeof (mgreg_t
));
814 cfa_offset
-= sizeof (mgreg_t
);
815 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, X86_ESP
, cfa_offset
);
818 mono_arch_flush_icache (code
, code
- buf
);
819 g_assert (code
- buf
<= tramp_size
);
821 const char *tramp_name
= single_step
? "sdb_single_step_trampoline" : "sdb_breakpoint_trampoline";
822 *info
= mono_tramp_info_create (tramp_name
, buf
, code
- buf
, ji
, unwind_ops
);