3 * JIT trampoline code for amd64
6 * Dietmar Maurer (dietmar@ximian.com)
7 * Zoltan Varga (vargaz@gmail.com)
8 * Johan Lorensson (lateralusx.github@gmail.com)
10 * (C) 2001 Ximian, Inc.
11 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
12 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
13 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
19 #include <mono/metadata/abi-details.h>
20 #include <mono/metadata/appdomain.h>
21 #include <mono/metadata/marshal.h>
22 #include <mono/metadata/tabledefs.h>
23 #include <mono/metadata/profiler-private.h>
24 #include <mono/metadata/gc-internals.h>
25 #include <mono/arch/amd64/amd64-codegen.h>
27 #include <mono/utils/memcheck.h>
30 #include "mini-amd64.h"
31 #include "mini-runtime.h"
32 #include "debugger-agent.h"
34 #ifndef DISABLE_INTERPRETER
35 #include "interp/interp.h"
37 #include "mono/utils/mono-tls-inline.h"
39 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
43 * mono_arch_get_unbox_trampoline:
45 * @addr: pointer to native code for @m
47 * when value type methods are called through the vtable we need to unbox the
48 * this argument. This method returns a pointer to a trampoline which does
49 * unboxing before calling the method
52 mono_arch_get_unbox_trampoline (MonoMethod
*m
, gpointer addr
)
58 MonoDomain
*domain
= mono_domain_get ();
60 const int this_reg
= mono_arch_get_this_arg_reg (NULL
);
62 start
= code
= (guint8
*)mono_domain_code_reserve (domain
, size
+ MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
64 unwind_ops
= mono_arch_get_cie_program ();
66 amd64_alu_reg_imm (code
, X86_ADD
, this_reg
, MONO_ABI_SIZEOF (MonoObject
));
67 /* FIXME: Optimize this */
68 amd64_mov_reg_imm (code
, AMD64_RAX
, addr
);
69 amd64_jump_reg (code
, AMD64_RAX
);
70 g_assertf ((code
- start
) <= size
, "%d %d", (int)(code
- start
), size
);
71 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
73 mono_arch_flush_icache (start
, code
- start
);
74 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE
, m
));
76 mono_tramp_info_register (mono_tramp_info_create (NULL
, start
, code
- start
, NULL
, unwind_ops
), domain
);
82 * mono_arch_get_static_rgctx_trampoline:
84 * Create a trampoline which sets RGCTX_REG to ARG, then jumps to ADDR.
87 mono_arch_get_static_rgctx_trampoline (gpointer arg
, gpointer addr
)
93 MonoDomain
*domain
= mono_domain_get ();
95 #ifdef MONO_ARCH_NOMAP32BIT
98 /* AOTed code could still have a non-32 bit address */
99 if ((((guint64
)addr
) >> 32) == 0)
105 start
= code
= (guint8
*)mono_domain_code_reserve (domain
, buf_len
+ MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
107 unwind_ops
= mono_arch_get_cie_program ();
109 amd64_mov_reg_imm (code
, MONO_ARCH_RGCTX_REG
, arg
);
110 amd64_jump_code (code
, addr
);
111 g_assertf ((code
- start
) <= buf_len
, "%d %d", (int)(code
- start
), buf_len
);
112 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
114 mono_arch_flush_icache (start
, code
- start
);
115 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE
, NULL
));
117 mono_tramp_info_register (mono_tramp_info_create (NULL
, start
, code
- start
, NULL
, unwind_ops
), domain
);
121 #endif /* !DISABLE_JIT */
124 // Workaround lack of Valgrind support for 64-bit Windows
125 #undef VALGRIND_DISCARD_TRANSLATIONS
126 #define VALGRIND_DISCARD_TRANSLATIONS(...)
130 * mono_arch_patch_callsite:
132 * Patch the callsite whose address is given by ORIG_CODE so it calls ADDR. ORIG_CODE
133 * points to the pc right after the call.
136 mono_arch_patch_callsite (guint8
*method_start
, guint8
*orig_code
, guint8
*addr
)
141 // Since method_start is retrieved from function return address (below current call/jmp to patch) there is a case when
142 // last instruction of a function is the call (due to OP_NOT_REACHED) instruction and then directly followed by a
143 // different method. In that case current orig_code points into next method and method_start will also point into
144 // next method, not the method including the call to patch. For this specific case, fallback to using a method_start of NULL.
145 gboolean can_write
= mono_breakpoint_clean_code (method_start
!= orig_code
? method_start
: NULL
, orig_code
, 14, buf
, sizeof (buf
));
149 /* mov 64-bit imm into r11 (followed by call reg?) or direct call*/
150 if (((code
[-13] == 0x49) && (code
[-12] == 0xbb)) || (code
[-5] == 0xe8)) {
151 if (code
[-5] != 0xe8) {
153 g_assert ((guint64
)(orig_code
- 11) % 8 == 0);
154 mono_atomic_xchg_ptr ((gpointer
*)(orig_code
- 11), addr
);
155 VALGRIND_DISCARD_TRANSLATIONS (orig_code
- 11, sizeof (gpointer
));
158 gboolean disp_32bit
= ((((gint64
)addr
- (gint64
)orig_code
)) < (1 << 30)) && ((((gint64
)addr
- (gint64
)orig_code
)) > -(1 << 30));
160 if ((((guint64
)(addr
)) >> 32) != 0 && !disp_32bit
) {
162 * This might happen with LLVM or when calling AOTed code. Create a thunk.
164 guint8
*thunk_start
, *thunk_code
;
166 thunk_start
= thunk_code
= (guint8
*)mono_domain_code_reserve (mono_domain_get (), 32);
167 amd64_jump_membase (thunk_code
, AMD64_RIP
, 0);
168 *(guint64
*)thunk_code
= (guint64
)addr
;
170 g_assert ((((guint64
)(addr
)) >> 32) == 0);
171 mono_arch_flush_icache (thunk_start
, thunk_code
- thunk_start
);
172 MONO_PROFILER_RAISE (jit_code_buffer
, (thunk_start
, thunk_code
- thunk_start
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
));
175 mono_atomic_xchg_i32 ((gint32
*)(orig_code
- 4), ((gint64
)addr
- (gint64
)orig_code
));
176 VALGRIND_DISCARD_TRANSLATIONS (orig_code
- 5, 4);
180 else if ((code
[-7] == 0x41) && (code
[-6] == 0xff) && (code
[-5] == 0x15)) {
181 /* call *<OFFSET>(%rip) */
182 gpointer
*got_entry
= (gpointer
*)((guint8
*)orig_code
+ (*(guint32
*)(orig_code
- 4)));
184 mono_atomic_xchg_ptr (got_entry
, addr
);
185 VALGRIND_DISCARD_TRANSLATIONS (orig_code
- 5, sizeof (gpointer
));
192 mono_arch_create_llvm_native_thunk (MonoDomain
*domain
, guint8
*addr
)
195 * The caller is LLVM code and the call displacement might exceed 32 bits. We can't determine the caller address, so
196 * we add a thunk every time.
197 * Since the caller is also allocated using the domain code manager, hopefully the displacement will fit into 32 bits.
198 * FIXME: Avoid this if possible if !MONO_ARCH_NOMAP32BIT and ADDR is 32 bits.
200 guint8
*thunk_start
, *thunk_code
;
202 thunk_start
= thunk_code
= (guint8
*)mono_domain_code_reserve (mono_domain_get (), 32);
203 amd64_jump_membase (thunk_code
, AMD64_RIP
, 0);
204 *(guint64
*)thunk_code
= (guint64
)addr
;
206 mono_arch_flush_icache (thunk_start
, thunk_code
- thunk_start
);
207 MONO_PROFILER_RAISE (jit_code_buffer
, (thunk_start
, thunk_code
- thunk_start
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
));
210 #endif /* !DISABLE_JIT */
213 mono_arch_patch_plt_entry (guint8
*code
, gpointer
*got
, host_mgreg_t
*regs
, guint8
*addr
)
216 gpointer
*plt_jump_table_entry
;
218 /* A PLT entry: jmp *<DISP>(%rip) */
219 g_assert (code
[0] == 0xff);
220 g_assert (code
[1] == 0x25);
222 disp
= *(gint32
*)(code
+ 2);
224 plt_jump_table_entry
= (gpointer
*)(code
+ 6 + disp
);
226 mono_atomic_xchg_ptr (plt_jump_table_entry
, addr
);
231 stack_unaligned (MonoTrampolineType tramp_type
)
233 printf ("%d\n", tramp_type
);
234 g_assert_not_reached ();
238 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type
, MonoTrampInfo
**info
, gboolean aot
)
240 const char *tramp_name
;
241 guint8
*buf
, *code
, *tramp
, *br
[2], *r11_save_code
, *after_r11_save_code
, *br_ex_check
;
242 int i
, lmf_offset
, offset
, res_offset
, arg_offset
, rax_offset
, ex_offset
, tramp_offset
, ctx_offset
, saved_regs_offset
;
243 int r11_save_offset
, saved_fpregs_offset
, rbp_offset
, framesize
, orig_rsp_to_rbp_offset
, cfa_offset
;
245 GSList
*unwind_ops
= NULL
;
246 MonoJumpInfo
*ji
= NULL
;
247 const int kMaxCodeSize
= 630;
249 if (tramp_type
== MONO_TRAMPOLINE_JUMP
)
254 code
= buf
= (guint8
*)mono_global_codeman_reserve (kMaxCodeSize
+ MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE
);
256 /* Compute stack frame size and offsets */
258 rbp_offset
= -offset
;
260 offset
+= sizeof (target_mgreg_t
);
261 rax_offset
= -offset
;
263 offset
+= sizeof (target_mgreg_t
);
266 offset
+= sizeof (target_mgreg_t
);
267 r11_save_offset
= -offset
;
269 offset
+= sizeof (target_mgreg_t
);
270 tramp_offset
= -offset
;
272 offset
+= sizeof (target_mgreg_t
);
273 arg_offset
= -offset
;
275 offset
+= sizeof (target_mgreg_t
);
276 res_offset
= -offset
;
278 offset
+= sizeof (MonoContext
);
279 ctx_offset
= -offset
;
280 saved_regs_offset
= ctx_offset
+ MONO_STRUCT_OFFSET (MonoContext
, gregs
);
281 saved_fpregs_offset
= ctx_offset
+ MONO_STRUCT_OFFSET (MonoContext
, fregs
);
283 offset
+= sizeof (MonoLMFTramp
);
284 lmf_offset
= -offset
;
287 /* Reserve space where the callee can save the argument registers */
288 offset
+= 4 * sizeof (target_mgreg_t
);
291 framesize
= ALIGN_TO (offset
, MONO_ARCH_FRAME_ALIGNMENT
);
293 // CFA = sp + 16 (the trampoline address is on the stack)
295 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, AMD64_RSP
, 16);
296 // IP saved at CFA - 8
297 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, AMD64_RIP
, -8);
299 orig_rsp_to_rbp_offset
= 0;
300 r11_save_code
= code
;
301 /* Reserve space for the mov_membase_reg to save R11 */
303 after_r11_save_code
= code
;
305 /* Pop the return address off the stack */
306 amd64_pop_reg (code
, AMD64_R11
);
307 orig_rsp_to_rbp_offset
+= sizeof (target_mgreg_t
);
309 cfa_offset
-= sizeof (target_mgreg_t
);
310 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
313 * Allocate a new stack frame
315 amd64_push_reg (code
, AMD64_RBP
);
316 cfa_offset
+= sizeof (target_mgreg_t
);
317 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
318 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, AMD64_RBP
, - cfa_offset
);
320 orig_rsp_to_rbp_offset
-= sizeof (target_mgreg_t
);
321 amd64_mov_reg_reg (code
, AMD64_RBP
, AMD64_RSP
, sizeof (target_mgreg_t
));
322 mono_add_unwind_op_def_cfa_reg (unwind_ops
, code
, buf
, AMD64_RBP
);
323 mono_add_unwind_op_fp_alloc (unwind_ops
, code
, buf
, AMD64_RBP
, 0);
324 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, framesize
);
326 /* Compute the trampoline address from the return address */
328 /* 7 = length of call *<offset>(rip) */
329 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_R11
, 7);
331 /* 5 = length of amd64_call_membase () */
332 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_R11
, 5);
334 amd64_mov_membase_reg (code
, AMD64_RBP
, tramp_offset
, AMD64_R11
, sizeof (target_mgreg_t
));
336 /* Save all registers */
337 for (i
= 0; i
< AMD64_NREG
; ++i
) {
338 if (i
== AMD64_RBP
) {
339 /* RAX is already saved */
340 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RBP
, rbp_offset
, sizeof (target_mgreg_t
));
341 amd64_mov_membase_reg (code
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof (target_mgreg_t
)), AMD64_RAX
, sizeof (target_mgreg_t
));
342 } else if (i
== AMD64_RIP
) {
344 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, 8, sizeof (target_mgreg_t
));
346 amd64_mov_reg_imm (code
, AMD64_R11
, 0);
347 amd64_mov_membase_reg (code
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof (target_mgreg_t
)), AMD64_R11
, sizeof (target_mgreg_t
));
348 } else if (i
== AMD64_RSP
) {
349 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_RSP
, sizeof (target_mgreg_t
));
350 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_R11
, framesize
+ 16);
351 amd64_mov_membase_reg (code
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof (target_mgreg_t
)), AMD64_R11
, sizeof (target_mgreg_t
));
352 } else if (i
!= AMD64_R11
) {
353 amd64_mov_membase_reg (code
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof (target_mgreg_t
)), i
, sizeof (target_mgreg_t
));
355 /* We have to save R11 right at the start of
356 the trampoline code because it's used as a
358 /* This happens before the frame is set up, so it goes into the redzone */
359 amd64_mov_membase_reg (r11_save_code
, AMD64_RSP
, r11_save_offset
+ orig_rsp_to_rbp_offset
, i
, sizeof (target_mgreg_t
));
360 g_assert (r11_save_code
== after_r11_save_code
);
362 /* Copy from the save slot into the register array slot */
363 amd64_mov_reg_membase (code
, i
, AMD64_RSP
, r11_save_offset
+ orig_rsp_to_rbp_offset
, sizeof (target_mgreg_t
));
364 amd64_mov_membase_reg (code
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof (target_mgreg_t
)), i
, sizeof (target_mgreg_t
));
366 /* cfa = rbp + cfa_offset */
367 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, i
, - cfa_offset
+ saved_regs_offset
+ (i
* sizeof (target_mgreg_t
)));
369 for (i
= 0; i
< AMD64_XMM_NREG
; ++i
)
370 if (AMD64_IS_ARGUMENT_XREG (i
))
371 #if defined(MONO_HAVE_SIMD_REG)
372 amd64_movdqu_membase_reg (code
, AMD64_RBP
, saved_fpregs_offset
+ (i
* sizeof (MonoContextSimdReg
)), i
);
374 amd64_movsd_membase_reg (code
, AMD64_RBP
, saved_fpregs_offset
+ (i
* sizeof (double)), i
);
377 /* Check that the stack is aligned */
378 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_RSP
, sizeof (target_mgreg_t
));
379 amd64_alu_reg_imm (code
, X86_AND
, AMD64_R11
, 15);
380 amd64_alu_reg_imm (code
, X86_CMP
, AMD64_R11
, 0);
382 amd64_branch_disp (code
, X86_CC_Z
, 0, FALSE
);
384 amd64_mov_reg_imm (code
, AMD64_R11
, 0);
385 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 0, 8);
387 amd64_mov_reg_imm (code
, MONO_AMD64_ARG_REG1
, tramp_type
);
388 amd64_mov_reg_imm (code
, AMD64_R11
, stack_unaligned
);
389 amd64_call_reg (code
, AMD64_R11
);
391 mono_amd64_patch (br
[0], code
);
392 //amd64_breakpoint (code);
394 /* Obtain the trampoline argument which is encoded in the instruction stream */
396 /* Load the GOT offset */
397 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, tramp_offset
, sizeof (target_mgreg_t
));
399 * r11 points to a call *<offset>(%rip) instruction, load the
400 * pc-relative offset from the instruction itself.
402 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_R11
, 3, 4);
403 /* 7 is the length of the call, 8 is the offset to the next got slot */
404 amd64_alu_reg_imm_size (code
, X86_ADD
, AMD64_RAX
, 7 + sizeof (target_mgreg_t
), sizeof (target_mgreg_t
));
405 /* Compute the address of the GOT slot */
406 amd64_alu_reg_reg_size (code
, X86_ADD
, AMD64_R11
, AMD64_RAX
, sizeof (target_mgreg_t
));
408 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 0, sizeof (target_mgreg_t
));
410 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, tramp_offset
, sizeof (target_mgreg_t
));
411 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_R11
, 5, 1);
412 amd64_widen_reg (code
, AMD64_RAX
, AMD64_RAX
, TRUE
, FALSE
);
413 amd64_alu_reg_imm_size (code
, X86_CMP
, AMD64_RAX
, 4, 1);
415 x86_branch8 (code
, X86_CC_NE
, 6, FALSE
);
416 /* 32 bit immediate */
417 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 6, 4);
419 x86_jump8 (code
, 10);
420 /* 64 bit immediate */
421 mono_amd64_patch (br
[0], code
);
422 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 6, 8);
423 mono_amd64_patch (br
[1], code
);
425 amd64_mov_membase_reg (code
, AMD64_RBP
, arg_offset
, AMD64_R11
, sizeof (target_mgreg_t
));
430 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_RSP
, sizeof (target_mgreg_t
));
431 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_R11
, framesize
+ 16);
432 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, rsp
), AMD64_R11
, sizeof (target_mgreg_t
));
433 /* Save pointer to context */
434 amd64_lea_membase (code
, AMD64_R11
, AMD64_RBP
, ctx_offset
);
435 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMFTramp
, ctx
), AMD64_R11
, sizeof (target_mgreg_t
));
438 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_get_lmf_addr
));
440 amd64_mov_reg_imm (code
, AMD64_R11
, mono_get_lmf_addr
);
442 amd64_call_reg (code
, AMD64_R11
);
445 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMFTramp
, lmf_addr
), AMD64_RAX
, sizeof (target_mgreg_t
));
446 /* Save previous_lmf */
447 /* Set the third lowest bit to signal that this is a MonoLMFTramp structure */
448 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RAX
, 0, sizeof (target_mgreg_t
));
449 amd64_alu_reg_imm_size (code
, X86_ADD
, AMD64_R11
, 0x5, sizeof (target_mgreg_t
));
450 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
), AMD64_R11
, sizeof (target_mgreg_t
));
452 amd64_lea_membase (code
, AMD64_R11
, AMD64_RBP
, lmf_offset
);
453 amd64_mov_membase_reg (code
, AMD64_RAX
, 0, AMD64_R11
, sizeof (target_mgreg_t
));
457 /* Arg1 is the pointer to the saved registers */
458 amd64_lea_membase (code
, AMD64_ARG_REG1
, AMD64_RBP
, saved_regs_offset
);
460 /* Arg2 is the address of the calling code */
462 amd64_mov_reg_membase (code
, AMD64_ARG_REG2
, AMD64_RBP
, 8, sizeof (target_mgreg_t
));
464 amd64_mov_reg_imm (code
, AMD64_ARG_REG2
, 0);
466 /* Arg3 is the method/vtable ptr */
467 amd64_mov_reg_membase (code
, AMD64_ARG_REG3
, AMD64_RBP
, arg_offset
, sizeof (target_mgreg_t
));
469 /* Arg4 is the trampoline address */
470 amd64_mov_reg_membase (code
, AMD64_ARG_REG4
, AMD64_RBP
, tramp_offset
, sizeof (target_mgreg_t
));
473 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, GINT_TO_POINTER (mono_trampoline_type_to_jit_icall_id (tramp_type
)));
475 tramp
= (guint8
*)mono_get_trampoline_func (tramp_type
);
476 amd64_mov_reg_imm (code
, AMD64_R11
, tramp
);
478 amd64_call_reg (code
, AMD64_R11
);
479 amd64_mov_membase_reg (code
, AMD64_RBP
, res_offset
, AMD64_RAX
, sizeof (target_mgreg_t
));
482 amd64_mov_reg_membase (code
, AMD64_RCX
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
), sizeof (target_mgreg_t
));
483 amd64_alu_reg_imm_size (code
, X86_SUB
, AMD64_RCX
, 0x5, sizeof (target_mgreg_t
));
484 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMFTramp
, lmf_addr
), sizeof (target_mgreg_t
));
485 amd64_mov_membase_reg (code
, AMD64_R11
, 0, AMD64_RCX
, sizeof (target_mgreg_t
));
488 * Save rax to the stack, after the leave instruction, this will become part of
491 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RBP
, res_offset
, sizeof (target_mgreg_t
));
492 amd64_mov_membase_reg (code
, AMD64_RBP
, rax_offset
, AMD64_RAX
, sizeof (target_mgreg_t
));
494 /* Check for thread interruption */
495 /* This is not perf critical code so no need to check the interrupt flag */
497 * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
500 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_thread_force_interruption_checkpoint_noraise
));
502 amd64_mov_reg_imm (code
, AMD64_R11
, (guint8
*)mono_thread_force_interruption_checkpoint_noraise
);
504 amd64_call_reg (code
, AMD64_R11
);
506 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
508 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
512 * We have an exception we want to throw in the caller's frame, so pop
513 * the trampoline frame and throw from the caller.
516 amd64_lea_membase (code
, AMD64_RSP
, AMD64_RBP
, 0);
517 amd64_pop_reg (code
, AMD64_RBP
);
518 mono_add_unwind_op_same_value (unwind_ops
, code
, buf
, AMD64_RBP
);
522 /* We are in the parent frame, the exception is in rax */
524 * EH is initialized after trampolines, so get the address of the variable
525 * which contains throw_exception, and load it from there.
528 /* Not really a jit icall */
529 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_rethrow_preserve_exception
));
531 amd64_mov_reg_imm (code
, AMD64_R11
, (guint8
*)mono_get_rethrow_preserve_exception_addr ());
533 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 0, sizeof (target_mgreg_t
));
534 amd64_mov_reg_reg (code
, AMD64_ARG_REG1
, AMD64_RAX
, sizeof (target_mgreg_t
));
536 * We still have the original return value on the top of the stack, so the
537 * throw trampoline will use that as the throw site.
539 amd64_jump_reg (code
, AMD64_R11
);
542 mono_amd64_patch (br_ex_check
, code
);
544 /* Restore argument registers, r10 (imt method/rgxtx)
545 and rax (needed for direct calls to C vararg functions). */
546 for (i
= 0; i
< AMD64_NREG
; ++i
)
547 if (AMD64_IS_ARGUMENT_REG (i
) || i
== AMD64_R10
|| i
== AMD64_RAX
)
548 amd64_mov_reg_membase (code
, i
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof (target_mgreg_t
)), sizeof (target_mgreg_t
));
549 for (i
= 0; i
< AMD64_XMM_NREG
; ++i
)
550 if (AMD64_IS_ARGUMENT_XREG (i
))
551 #if defined(MONO_HAVE_SIMD_REG)
552 amd64_movdqu_reg_membase (code
, i
, AMD64_RBP
, saved_fpregs_offset
+ (i
* sizeof (MonoContextSimdReg
)));
554 amd64_movsd_reg_membase (code
, i
, AMD64_RBP
, saved_fpregs_offset
+ (i
* sizeof (double)));
559 amd64_lea_membase (code
, AMD64_RSP
, AMD64_RBP
, 0);
560 amd64_pop_reg (code
, AMD64_RBP
);
561 mono_add_unwind_op_same_value (unwind_ops
, code
, buf
, AMD64_RBP
);
565 cfa_offset
-= sizeof (target_mgreg_t
);
566 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, AMD64_RSP
, cfa_offset
);
568 if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type
)) {
570 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RSP
, rax_offset
- sizeof (target_mgreg_t
), sizeof (target_mgreg_t
));
573 /* call the compiled method using the saved rax */
574 amd64_jump_membase (code
, AMD64_RSP
, rax_offset
- sizeof (target_mgreg_t
));
577 g_assertf ((code
- buf
) <= kMaxCodeSize
, "%d %d", code
, buf
, (int)(code
- buf
), kMaxCodeSize
);
578 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE
));
580 mono_arch_flush_icache (buf
, code
- buf
);
581 MONO_PROFILER_RAISE (jit_code_buffer
, (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
));
583 tramp_name
= mono_get_generic_trampoline_name (tramp_type
);
584 *info
= mono_tramp_info_create (tramp_name
, buf
, code
- buf
, ji
, unwind_ops
);
590 mono_arch_create_specific_trampoline (gpointer arg1
, MonoTrampolineType tramp_type
, MonoDomain
*domain
, guint32
*code_len
)
592 guint8
*code
, *buf
, *tramp
;
594 gboolean far_addr
= FALSE
;
596 tramp
= mono_get_trampoline_code (tramp_type
);
598 if ((((guint64
)arg1
) >> 32) == 0)
603 code
= buf
= (guint8
*)mono_domain_code_reserve_align (domain
, size
, 1);
605 if (((gint64
)tramp
- (gint64
)code
) >> 31 != 0 && ((gint64
)tramp
- (gint64
)code
) >> 31 != -1) {
606 #ifndef MONO_ARCH_NOMAP32BIT
607 g_assert_not_reached ();
611 code
= buf
= (guint8
*)mono_domain_code_reserve_align (domain
, size
, 1);
615 amd64_mov_reg_imm (code
, AMD64_R11
, tramp
);
616 amd64_call_reg (code
, AMD64_R11
);
618 amd64_call_code (code
, tramp
);
620 /* The trampoline code will obtain the argument from the instruction stream */
621 if ((((guint64
)arg1
) >> 32) == 0) {
623 *(guint32
*)(code
+ 1) = (gint64
)arg1
;
627 *(guint64
*)(code
+ 1) = (gint64
)arg1
;
631 g_assert ((code
- buf
) <= size
);
636 mono_arch_flush_icache (buf
, size
);
637 MONO_PROFILER_RAISE (jit_code_buffer
, (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE
, mono_get_generic_trampoline_simple_name (tramp_type
)));
643 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot
, MonoTrampInfo
**info
, gboolean aot
)
647 guint8
**rgctx_null_jumps
;
651 MonoJumpInfo
*ji
= NULL
;
654 mrgctx
= MONO_RGCTX_SLOT_IS_MRGCTX (slot
);
655 index
= MONO_RGCTX_SLOT_INDEX (slot
);
657 index
+= MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT
/ sizeof (target_mgreg_t
);
658 for (depth
= 0; ; ++depth
) {
659 int size
= mono_class_rgctx_get_array_size (depth
, mrgctx
);
661 if (index
< size
- 1)
666 const int tramp_size
= 64 + 8 * depth
;
668 code
= buf
= (guint8
*)mono_global_codeman_reserve (tramp_size
+ MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
670 unwind_ops
= mono_arch_get_cie_program ();
672 rgctx_null_jumps
= (guint8
**)g_malloc (sizeof (guint8
*) * (depth
+ 2));
676 amd64_mov_reg_reg (code
, AMD64_RAX
, AMD64_ARG_REG1
, 8);
678 /* load rgctx ptr from vtable */
679 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_ARG_REG1
, MONO_STRUCT_OFFSET (MonoVTable
, runtime_generic_context
), sizeof (target_mgreg_t
));
680 /* is the rgctx ptr null? */
681 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
682 /* if yes, jump to actual trampoline */
683 rgctx_null_jumps
[0] = code
;
684 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
687 for (i
= 0; i
< depth
; ++i
) {
688 /* load ptr to next array */
689 if (mrgctx
&& i
== 0)
690 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RAX
, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT
, sizeof (target_mgreg_t
));
692 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RAX
, 0, sizeof (target_mgreg_t
));
693 /* is the ptr null? */
694 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
695 /* if yes, jump to actual trampoline */
696 rgctx_null_jumps
[i
+ 1] = code
;
697 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
701 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RAX
, sizeof (target_mgreg_t
) * (index
+ 1), sizeof (target_mgreg_t
));
702 /* is the slot null? */
703 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
704 /* if yes, jump to actual trampoline */
705 rgctx_null_jumps
[depth
+ 1] = code
;
706 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
707 /* otherwise return */
710 for (i
= mrgctx
? 1 : 0; i
<= depth
+ 1; ++i
)
711 mono_amd64_patch (rgctx_null_jumps
[i
], code
);
713 g_free (rgctx_null_jumps
);
715 if (MONO_ARCH_VTABLE_REG
!= AMD64_ARG_REG1
) {
716 /* move the rgctx pointer to the VTABLE register */
717 amd64_mov_reg_reg (code
, MONO_ARCH_VTABLE_REG
, AMD64_ARG_REG1
, sizeof (target_mgreg_t
));
721 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR
, GUINT_TO_POINTER (slot
));
722 amd64_jump_reg (code
, AMD64_R11
);
724 tramp
= (guint8
*)mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot
), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH
, mono_get_root_domain (), NULL
);
726 /* jump to the actual trampoline */
727 amd64_jump_code (code
, tramp
);
730 mono_arch_flush_icache (buf
, code
- buf
);
731 MONO_PROFILER_RAISE (jit_code_buffer
, (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE
, NULL
));
733 g_assertf ((code
- buf
) <= tramp_size
, "%d %d", (int)(code
- buf
), tramp_size
);
735 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
737 char *name
= mono_get_rgctx_fetch_trampoline_name (slot
);
738 *info
= mono_tramp_info_create (name
, buf
, code
- buf
, ji
, unwind_ops
);
745 mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo
**info
, gboolean aot
)
749 MonoJumpInfo
*ji
= NULL
;
755 code
= buf
= (guint8
*)mono_global_codeman_reserve (tramp_size
+ MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
757 unwind_ops
= mono_arch_get_cie_program ();
759 // FIXME: Currently, we always go to the slow path.
760 /* This receives a <slot, trampoline> in the rgctx arg reg. */
761 /* Load trampoline addr */
762 amd64_mov_reg_membase (code
, AMD64_R11
, MONO_ARCH_RGCTX_REG
, 8, 8);
763 /* move the rgctx pointer to the VTABLE register */
764 amd64_mov_reg_reg (code
, MONO_ARCH_VTABLE_REG
, AMD64_ARG_REG1
, sizeof (target_mgreg_t
));
765 /* Jump to the trampoline */
766 amd64_jump_reg (code
, AMD64_R11
);
768 mono_arch_flush_icache (buf
, code
- buf
);
769 MONO_PROFILER_RAISE (jit_code_buffer
, (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE
, NULL
));
771 g_assertf ((code
- buf
) <= tramp_size
, "%d %d", (int)(code
- buf
), tramp_size
);
773 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
776 *info
= mono_tramp_info_create ("rgctx_fetch_trampoline_general", buf
, code
- buf
, ji
, unwind_ops
);
782 mono_arch_invalidate_method (MonoJitInfo
*ji
, void *func
, gpointer func_arg
)
784 /* FIXME: This is not thread safe */
785 guint8
*code
= (guint8
*)ji
->code_start
;
787 amd64_mov_reg_imm (code
, AMD64_ARG_REG1
, func_arg
);
788 amd64_mov_reg_imm (code
, AMD64_R11
, func
);
790 x86_push_imm (code
, (guint64
)func_arg
);
791 amd64_call_reg (code
, AMD64_R11
);
793 #endif /* !DISABLE_JIT */
796 * mono_arch_get_call_target:
798 * Return the address called by the code before CODE if exists.
801 mono_arch_get_call_target (guint8
*code
)
803 if (code
[-5] == 0xe8) {
804 gint32 disp
= *(gint32
*)(code
- 4);
805 guint8
*target
= code
+ disp
;
814 * mono_arch_get_plt_info_offset:
816 * Return the PLT info offset belonging to the plt entry PLT_ENTRY.
819 mono_arch_get_plt_info_offset (guint8
*plt_entry
, host_mgreg_t
*regs
, guint8
*code
)
821 return *(guint32
*)(plt_entry
+ 6);
826 * mono_arch_create_sdb_trampoline:
828 * Return a trampoline which captures the current context, passes it to
829 * mono_debugger_agent_single_step_from_context ()/mono_debugger_agent_breakpoint_from_context (),
830 * then restores the (potentially changed) context.
833 mono_arch_create_sdb_trampoline (gboolean single_step
, MonoTrampInfo
**info
, gboolean aot
)
835 int tramp_size
= 512;
836 int i
, framesize
, ctx_offset
, cfa_offset
, gregs_offset
;
838 GSList
*unwind_ops
= NULL
;
839 MonoJumpInfo
*ji
= NULL
;
841 code
= buf
= (guint8
*)mono_global_codeman_reserve (tramp_size
+ MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE
);
845 /* Reserve space where the callee can save the argument registers */
846 framesize
+= 4 * sizeof (target_mgreg_t
);
849 ctx_offset
= framesize
;
850 framesize
+= sizeof (MonoContext
);
852 framesize
= ALIGN_TO (framesize
, MONO_ARCH_FRAME_ALIGNMENT
);
856 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, AMD64_RSP
, 8);
857 // IP saved at CFA - 8
858 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, AMD64_RIP
, -cfa_offset
);
860 amd64_push_reg (code
, AMD64_RBP
);
861 cfa_offset
+= sizeof (target_mgreg_t
);
862 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
863 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, AMD64_RBP
, - cfa_offset
);
865 amd64_mov_reg_reg (code
, AMD64_RBP
, AMD64_RSP
, sizeof (target_mgreg_t
));
866 mono_add_unwind_op_def_cfa_reg (unwind_ops
, code
, buf
, AMD64_RBP
);
867 mono_add_unwind_op_fp_alloc (unwind_ops
, code
, buf
, AMD64_RBP
, 0);
868 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, framesize
);
870 gregs_offset
= ctx_offset
+ MONO_STRUCT_OFFSET (MonoContext
, gregs
);
872 /* Initialize a MonoContext structure on the stack */
873 for (i
= 0; i
< AMD64_NREG
; ++i
) {
874 if (i
!= AMD64_RIP
&& i
!= AMD64_RSP
&& i
!= AMD64_RBP
)
875 amd64_mov_membase_reg (code
, AMD64_RSP
, gregs_offset
+ (i
* sizeof (target_mgreg_t
)), i
, sizeof (target_mgreg_t
));
877 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, 0, sizeof (target_mgreg_t
));
878 amd64_mov_membase_reg (code
, AMD64_RSP
, gregs_offset
+ (AMD64_RBP
* sizeof (target_mgreg_t
)), AMD64_R11
, sizeof (target_mgreg_t
));
879 amd64_lea_membase (code
, AMD64_R11
, AMD64_RBP
, 2 * sizeof (target_mgreg_t
));
880 amd64_mov_membase_reg (code
, AMD64_RSP
, gregs_offset
+ (AMD64_RSP
* sizeof (target_mgreg_t
)), AMD64_R11
, sizeof (target_mgreg_t
));
881 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, sizeof (target_mgreg_t
), sizeof (target_mgreg_t
));
882 amd64_mov_membase_reg (code
, AMD64_RSP
, gregs_offset
+ (AMD64_RIP
* sizeof (target_mgreg_t
)), AMD64_R11
, sizeof (target_mgreg_t
));
884 /* Call the single step/breakpoint function in sdb */
885 amd64_lea_membase (code
, AMD64_ARG_REG1
, AMD64_RSP
, ctx_offset
);
889 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_debugger_agent_single_step_from_context
));
891 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_debugger_agent_breakpoint_from_context
));
894 amd64_mov_reg_imm (code
, AMD64_R11
, mini_get_dbg_callbacks ()->single_step_from_context
);
896 amd64_mov_reg_imm (code
, AMD64_R11
, mini_get_dbg_callbacks ()->breakpoint_from_context
);
898 amd64_call_reg (code
, AMD64_R11
);
900 /* Restore registers from ctx */
901 for (i
= 0; i
< AMD64_NREG
; ++i
) {
902 if (i
!= AMD64_RIP
&& i
!= AMD64_RSP
&& i
!= AMD64_RBP
)
903 amd64_mov_reg_membase (code
, i
, AMD64_RSP
, gregs_offset
+ (i
* sizeof (target_mgreg_t
)), sizeof (target_mgreg_t
));
905 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RSP
, gregs_offset
+ (AMD64_RBP
* sizeof (target_mgreg_t
)), sizeof (target_mgreg_t
));
906 amd64_mov_membase_reg (code
, AMD64_RBP
, 0, AMD64_R11
, sizeof (target_mgreg_t
));
907 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RSP
, gregs_offset
+ (AMD64_RIP
* sizeof (target_mgreg_t
)), sizeof (target_mgreg_t
));
908 amd64_mov_membase_reg (code
, AMD64_RBP
, sizeof (target_mgreg_t
), AMD64_R11
, sizeof (target_mgreg_t
));
911 amd64_lea_membase (code
, AMD64_RSP
, AMD64_RBP
, 0);
912 amd64_pop_reg (code
, AMD64_RBP
);
913 mono_add_unwind_op_same_value (unwind_ops
, code
, buf
, AMD64_RBP
);
917 cfa_offset
-= sizeof (target_mgreg_t
);
918 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, AMD64_RSP
, cfa_offset
);
921 g_assertf ((code
- buf
) <= tramp_size
, "%d %d", (int)(code
- buf
), tramp_size
);
923 mono_arch_flush_icache (code
, code
- buf
);
924 MONO_PROFILER_RAISE (jit_code_buffer
, (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
));
925 g_assert (code
- buf
<= tramp_size
);
926 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE
));
928 const char *tramp_name
= single_step
? "sdb_single_step_trampoline" : "sdb_breakpoint_trampoline";
929 *info
= mono_tramp_info_create (tramp_name
, buf
, code
- buf
, ji
, unwind_ops
);
935 * mono_arch_get_interp_to_native_trampoline:
937 * A trampoline that handles the transition from interpreter into native
938 * world. It requires to set up a descriptor (CallContext), so the
939 * trampoline can translate the arguments into the native calling convention.
942 mono_arch_get_interp_to_native_trampoline (MonoTrampInfo
**info
)
944 #ifndef DISABLE_INTERPRETER
945 guint8
*start
= NULL
, *code
;
946 guint8
*label_start_copy
, *label_exit_copy
;
947 MonoJumpInfo
*ji
= NULL
;
948 GSList
*unwind_ops
= NULL
;
949 int buf_len
, i
, cfa_offset
, off_methodargs
, off_targetaddr
;
952 start
= code
= (guint8
*) mono_global_codeman_reserve (buf_len
+ MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE
);
956 mono_add_unwind_op_def_cfa (unwind_ops
, code
, start
, AMD64_RSP
, cfa_offset
);
957 // IP saved at CFA - 8
958 mono_add_unwind_op_offset (unwind_ops
, code
, start
, AMD64_RIP
, -cfa_offset
);
960 amd64_push_reg (code
, AMD64_RBP
);
961 cfa_offset
+= sizeof (target_mgreg_t
);
962 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, cfa_offset
);
963 mono_add_unwind_op_offset (unwind_ops
, code
, start
, AMD64_RBP
, -cfa_offset
);
965 amd64_mov_reg_reg (code
, AMD64_RBP
, AMD64_RSP
, sizeof (target_mgreg_t
));
966 mono_add_unwind_op_def_cfa_reg (unwind_ops
, code
, start
, AMD64_RBP
);
967 mono_add_unwind_op_fp_alloc (unwind_ops
, code
, start
, AMD64_RBP
, 0);
969 /* allocate space for saving the target addr and the call context */
970 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, 2 * sizeof (target_mgreg_t
));
972 /* save CallContext* onto stack */
973 off_methodargs
= - 8;
974 amd64_mov_membase_reg (code
, AMD64_RBP
, off_methodargs
, AMD64_ARG_REG2
, sizeof (target_mgreg_t
));
976 /* save target address on stack */
977 off_targetaddr
= - 2 * 8;
978 amd64_mov_membase_reg (code
, AMD64_RBP
, off_targetaddr
, AMD64_ARG_REG1
, sizeof (target_mgreg_t
));
980 /* load pointer to CallContext* into R11 */
981 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_ARG_REG2
, sizeof (target_mgreg_t
));
983 /* allocate the stack space necessary for the call */
984 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_R11
, MONO_STRUCT_OFFSET (CallContext
, stack_size
), sizeof (target_mgreg_t
));
985 amd64_alu_reg_reg (code
, X86_SUB
, AMD64_RSP
, AMD64_RAX
);
987 /* copy stack from the CallContext, reg1 = dest, reg2 = source */
988 amd64_mov_reg_reg (code
, AMD64_ARG_REG1
, AMD64_RSP
, sizeof (target_mgreg_t
));
989 amd64_mov_reg_membase (code
, AMD64_ARG_REG2
, AMD64_R11
, MONO_STRUCT_OFFSET (CallContext
, stack
), sizeof (target_mgreg_t
));
991 label_start_copy
= code
;
992 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
993 label_exit_copy
= code
;
994 amd64_branch8 (code
, X86_CC_Z
, 0, FALSE
);
995 amd64_mov_reg_membase (code
, AMD64_ARG_REG3
, AMD64_ARG_REG2
, 0, sizeof (target_mgreg_t
));
996 amd64_mov_membase_reg (code
, AMD64_ARG_REG1
, 0, AMD64_ARG_REG3
, sizeof (target_mgreg_t
));
997 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_ARG_REG1
, sizeof (target_mgreg_t
));
998 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_ARG_REG2
, sizeof (target_mgreg_t
));
999 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RAX
, sizeof (target_mgreg_t
));
1000 amd64_jump_code (code
, label_start_copy
);
1001 x86_patch (label_exit_copy
, code
);
1003 /* set all general purpose registers from CallContext */
1004 for (i
= 0; i
< PARAM_REGS
; i
++)
1005 amd64_mov_reg_membase (code
, param_regs
[i
], AMD64_R11
, MONO_STRUCT_OFFSET (CallContext
, gregs
) + param_regs
[i
] * sizeof (target_mgreg_t
), sizeof (target_mgreg_t
));
1007 /* set all floating registers from CallContext */
1008 for (i
= 0; i
< FLOAT_PARAM_REGS
; ++i
)
1009 amd64_sse_movsd_reg_membase (code
, i
, AMD64_R11
, MONO_STRUCT_OFFSET (CallContext
, fregs
) + i
* sizeof (double));
1011 /* load target addr */
1012 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, off_targetaddr
, sizeof (target_mgreg_t
));
1014 /* call into native function */
1015 amd64_call_reg (code
, AMD64_R11
);
1017 /* save all return general purpose registers in the CallContext */
1018 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, off_methodargs
, sizeof (target_mgreg_t
));
1019 for (i
= 0; i
< RETURN_REGS
; i
++)
1020 amd64_mov_membase_reg (code
, AMD64_R11
, MONO_STRUCT_OFFSET (CallContext
, gregs
) + return_regs
[i
] * sizeof (target_mgreg_t
), return_regs
[i
], sizeof (target_mgreg_t
));
1022 /* save all return floating registers in the CallContext */
1023 for (i
= 0; i
< FLOAT_RETURN_REGS
; i
++)
1024 amd64_sse_movsd_membase_reg (code
, AMD64_R11
, MONO_STRUCT_OFFSET (CallContext
, fregs
) + i
* sizeof (double), i
);
1027 amd64_lea_membase (code
, AMD64_RSP
, AMD64_RBP
, 0);
1029 amd64_mov_reg_reg (code
, AMD64_RSP
, AMD64_RBP
, sizeof (target_mgreg_t
));
1031 amd64_pop_reg (code
, AMD64_RBP
);
1032 mono_add_unwind_op_same_value (unwind_ops
, code
, start
, AMD64_RBP
);
1034 cfa_offset
-= sizeof (target_mgreg_t
);
1035 mono_add_unwind_op_def_cfa (unwind_ops
, code
, start
, AMD64_RSP
, cfa_offset
);
1038 g_assertf ((code
- start
) <= buf_len
, "%d %d", (int)(code
- start
), buf_len
);
1040 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE
));
1042 mono_arch_flush_icache (start
, code
- start
);
1043 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
));
1046 *info
= mono_tramp_info_create ("interp_to_native_trampoline", start
, code
- start
, ji
, unwind_ops
);
1050 g_assert_not_reached ();
1052 #endif /* DISABLE_INTERPRETER */
1056 mono_arch_get_native_to_interp_trampoline (MonoTrampInfo
**info
)
1058 #ifndef DISABLE_INTERPRETER
1059 guint8
*start
= NULL
, *code
;
1060 MonoJumpInfo
*ji
= NULL
;
1061 GSList
*unwind_ops
= NULL
;
1062 int buf_len
, i
, framesize
, cfa_offset
, ctx_offset
;
1065 start
= code
= (guint8
*) mono_global_codeman_reserve (buf_len
+ MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE
);
1069 /* Reserve space where the callee can save the argument registers */
1070 framesize
+= 4 * sizeof (target_mgreg_t
);
1073 ctx_offset
= framesize
;
1074 framesize
+= MONO_ABI_SIZEOF (CallContext
);
1075 framesize
= ALIGN_TO (framesize
, MONO_ARCH_FRAME_ALIGNMENT
);
1079 mono_add_unwind_op_def_cfa (unwind_ops
, code
, start
, AMD64_RSP
, cfa_offset
);
1080 // IP saved at CFA - 8
1081 mono_add_unwind_op_offset (unwind_ops
, code
, start
, AMD64_RIP
, -cfa_offset
);
1083 amd64_push_reg (code
, AMD64_RBP
);
1084 cfa_offset
+= sizeof (target_mgreg_t
);
1085 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, cfa_offset
);
1086 mono_add_unwind_op_offset (unwind_ops
, code
, start
, AMD64_RBP
, -cfa_offset
);
1088 amd64_mov_reg_reg (code
, AMD64_RBP
, AMD64_RSP
, sizeof (target_mgreg_t
));
1089 mono_add_unwind_op_def_cfa_reg (unwind_ops
, code
, start
, AMD64_RBP
);
1090 mono_add_unwind_op_fp_alloc (unwind_ops
, code
, start
, AMD64_RBP
, 0);
1092 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, framesize
);
1094 /* save all general purpose registers into the CallContext */
1095 for (i
= 0; i
< PARAM_REGS
; i
++)
1096 amd64_mov_membase_reg (code
, AMD64_RSP
, ctx_offset
+ MONO_STRUCT_OFFSET (CallContext
, gregs
) + param_regs
[i
] * sizeof (target_mgreg_t
), param_regs
[i
], sizeof (target_mgreg_t
));
1098 /* save all floating registers into the CallContext */
1099 for (i
= 0; i
< FLOAT_PARAM_REGS
; i
++)
1100 amd64_sse_movsd_membase_reg (code
, AMD64_RSP
, ctx_offset
+ MONO_STRUCT_OFFSET (CallContext
, fregs
) + i
* sizeof (double), i
);
1102 /* set the stack pointer to the value at call site */
1103 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_RBP
, sizeof (target_mgreg_t
));
1104 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_R11
, 2 * sizeof (target_mgreg_t
));
1105 amd64_mov_membase_reg (code
, AMD64_RSP
, ctx_offset
+ MONO_STRUCT_OFFSET (CallContext
, stack
), AMD64_R11
, sizeof (target_mgreg_t
));
1107 /* call interp_entry with the ccontext and rmethod as arguments */
1108 amd64_mov_reg_reg (code
, AMD64_ARG_REG1
, AMD64_RSP
, sizeof (target_mgreg_t
));
1109 if (ctx_offset
!= 0)
1110 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_ARG_REG1
, ctx_offset
);
1111 amd64_mov_reg_membase (code
, AMD64_ARG_REG2
, MONO_ARCH_RGCTX_REG
, MONO_STRUCT_OFFSET (MonoFtnDesc
, arg
), sizeof (target_mgreg_t
));
1112 amd64_mov_reg_membase (code
, AMD64_R11
, MONO_ARCH_RGCTX_REG
, MONO_STRUCT_OFFSET (MonoFtnDesc
, addr
), sizeof (target_mgreg_t
));
1113 amd64_call_reg (code
, AMD64_R11
);
1115 /* load the return values from the context */
1116 for (i
= 0; i
< RETURN_REGS
; i
++)
1117 amd64_mov_reg_membase (code
, return_regs
[i
], AMD64_RSP
, ctx_offset
+ MONO_STRUCT_OFFSET (CallContext
, gregs
) + return_regs
[i
] * sizeof (target_mgreg_t
), sizeof (target_mgreg_t
));
1119 for (i
= 0; i
< FLOAT_RETURN_REGS
; i
++)
1120 amd64_sse_movsd_reg_membase (code
, i
, AMD64_RSP
, ctx_offset
+ MONO_STRUCT_OFFSET (CallContext
, fregs
) + i
* sizeof (double));
1122 /* reset stack and return */
1124 amd64_lea_membase (code
, AMD64_RSP
, AMD64_RBP
, 0);
1126 amd64_mov_reg_reg (code
, AMD64_RSP
, AMD64_RBP
, sizeof (target_mgreg_t
));
1128 amd64_pop_reg (code
, AMD64_RBP
);
1129 mono_add_unwind_op_same_value (unwind_ops
, code
, start
, AMD64_RBP
);
1131 cfa_offset
-= sizeof (target_mgreg_t
);
1132 mono_add_unwind_op_def_cfa (unwind_ops
, code
, start
, AMD64_RSP
, cfa_offset
);
1135 g_assertf ((code
- start
) <= buf_len
, "%d %d", (int)(code
- start
), buf_len
);
1137 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE
));
1139 mono_arch_flush_icache (start
, code
- start
);
1140 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING
, NULL
));
1143 *info
= mono_tramp_info_create ("native_to_interp_trampoline", start
, code
- start
, ji
, unwind_ops
);
1147 g_assert_not_reached ();
1149 #endif /* DISABLE_INTERPRETER */
1151 #endif /* !DISABLE_JIT */
1155 mono_arch_get_unbox_trampoline (MonoMethod
*m
, gpointer addr
)
1157 g_assert_not_reached ();
1162 mono_arch_get_static_rgctx_trampoline (gpointer arg
, gpointer addr
)
1164 g_assert_not_reached ();
1169 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot
, MonoTrampInfo
**info
, gboolean aot
)
1171 g_assert_not_reached ();
1176 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type
, MonoTrampInfo
**info
, gboolean aot
)
1178 g_assert_not_reached ();
1183 mono_arch_create_specific_trampoline (gpointer arg1
, MonoTrampolineType tramp_type
, MonoDomain
*domain
, guint32
*code_len
)
1185 g_assert_not_reached ();
1190 mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo
**info
, gboolean aot
)
1192 g_assert_not_reached ();
1197 mono_arch_invalidate_method (MonoJitInfo
*ji
, void *func
, gpointer func_arg
)
1199 g_assert_not_reached ();
1204 mono_arch_create_sdb_trampoline (gboolean single_step
, MonoTrampInfo
**info
, gboolean aot
)
1206 g_assert_not_reached ();
1211 mono_arch_get_interp_to_native_trampoline (MonoTrampInfo
**info
)
1213 g_assert_not_reached ();
1218 mono_arch_get_native_to_interp_trampoline (MonoTrampInfo
**info
)
1220 g_assert_not_reached ();
1223 #endif /* DISABLE_JIT */