3 * JIT trampoline code for amd64
6 * Dietmar Maurer (dietmar@ximian.com)
7 * Zoltan Varga (vargaz@gmail.com)
8 * Johan Lorensson (lateralusx.github@gmail.com)
10 * (C) 2001 Ximian, Inc.
11 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
12 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
13 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
19 #include <mono/metadata/abi-details.h>
20 #include <mono/metadata/appdomain.h>
21 #include <mono/metadata/marshal.h>
22 #include <mono/metadata/tabledefs.h>
23 #include <mono/metadata/profiler-private.h>
24 #include <mono/metadata/gc-internals.h>
25 #include <mono/arch/amd64/amd64-codegen.h>
27 #include <mono/utils/memcheck.h>
30 #include "mini-amd64.h"
31 #include "mini-runtime.h"
32 #include "debugger-agent.h"
34 #ifndef DISABLE_INTERPRETER
35 #include "interp/interp.h"
37 #include "mono/utils/mono-tls-inline.h"
39 #ifdef MONO_ARCH_CODE_EXEC_ONLY
40 #include "aot-runtime.h"
41 guint8
* mono_aot_arch_get_plt_entry_exec_only (gpointer amodule_info
, host_mgreg_t
*regs
, guint8
*code
, guint8
*plt
);
42 guint32
mono_arch_get_plt_info_offset_exec_only (gpointer amodule_info
, guint8
*plt_entry
, host_mgreg_t
*regs
, guint8
*code
, MonoAotResolvePltInfoOffset resolver
, gpointer amodule
);
43 void mono_arch_patch_plt_entry_exec_only (gpointer amodule_info
, guint8
*code
, gpointer
*got
, host_mgreg_t
*regs
, guint8
*addr
);
46 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
50 * mono_arch_get_unbox_trampoline:
52 * @addr: pointer to native code for @m
54 * when value type methods are called through the vtable we need to unbox the
55 * this argument. This method returns a pointer to a trampoline which does
56 * unboxing before calling the method
59 mono_arch_get_unbox_trampoline (MonoMethod
*m
, gpointer addr
)
64 MonoDomain
*domain
= mono_domain_get ();
65 MonoMemoryManager
*mem_manager
= m_method_get_mem_manager (domain
, m
);
67 const int this_reg
= mono_arch_get_this_arg_reg (NULL
);
69 start
= code
= (guint8
*)mono_mem_manager_code_reserve (mem_manager
, size
+ MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
71 unwind_ops
= mono_arch_get_cie_program ();
73 amd64_alu_reg_imm (code
, X86_ADD
, this_reg
, MONO_ABI_SIZEOF (MonoObject
));
74 /* FIXME: Optimize this */
75 amd64_mov_reg_imm (code
, AMD64_RAX
, addr
);
76 amd64_jump_reg (code
, AMD64_RAX
);
77 g_assertf ((code
- start
) <= size
, "%d %d", (int)(code
- start
), size
);
78 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
80 mono_arch_flush_icache (start
, code
- start
);
81 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE
, m
));
83 mono_tramp_info_register (mono_tramp_info_create (NULL
, start
, code
- start
, NULL
, unwind_ops
), domain
);
89 * mono_arch_get_static_rgctx_trampoline:
91 * Create a trampoline which sets RGCTX_REG to ARG, then jumps to ADDR.
94 mono_arch_get_static_rgctx_trampoline (MonoMemoryManager
*mem_manager
, gpointer arg
, gpointer addr
)
99 MonoDomain
*domain
= mono_domain_get ();
101 #ifdef MONO_ARCH_NOMAP32BIT
104 /* AOTed code could still have a non-32 bit address */
105 if ((((guint64
)addr
) >> 32) == 0)
111 start
= code
= (guint8
*)mono_mem_manager_code_reserve (mem_manager
, buf_len
+ MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
113 unwind_ops
= mono_arch_get_cie_program ();
115 amd64_mov_reg_imm (code
, MONO_ARCH_RGCTX_REG
, arg
);
116 amd64_jump_code (code
, addr
);
117 g_assertf ((code
- start
) <= buf_len
, "%d %d", (int)(code
- start
), buf_len
);
118 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
120 mono_arch_flush_icache (start
, code
- start
);
121 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE
, NULL
));
123 mono_tramp_info_register (mono_tramp_info_create (NULL
, start
, code
- start
, NULL
, unwind_ops
), domain
);
127 #endif /* !DISABLE_JIT */
130 // Workaround lack of Valgrind support for 64-bit Windows
131 #undef VALGRIND_DISCARD_TRANSLATIONS
132 #define VALGRIND_DISCARD_TRANSLATIONS(...)
136 * mono_arch_patch_callsite:
138 * Patch the callsite whose address is given by ORIG_CODE so it calls ADDR. ORIG_CODE
139 * points to the pc right after the call.
142 mono_arch_patch_callsite (guint8
*method_start
, guint8
*orig_code
, guint8
*addr
)
147 // Since method_start is retrieved from function return address (below current call/jmp to patch) there is a case when
148 // last instruction of a function is the call (due to OP_NOT_REACHED) instruction and then directly followed by a
149 // different method. In that case current orig_code points into next method and method_start will also point into
150 // next method, not the method including the call to patch. For this specific case, fallback to using a method_start of NULL.
151 mono_breakpoint_clean_code (method_start
!= orig_code
? method_start
: NULL
, orig_code
, 14, buf
, sizeof (buf
));
155 /* mov 64-bit imm into r11 (followed by call reg?) or direct call*/
156 if (((code
[-13] == 0x49) && (code
[-12] == 0xbb)) || (code
[-5] == 0xe8)) {
157 if (code
[-5] != 0xe8) {
158 g_assert ((guint64
)(orig_code
- 11) % 8 == 0);
159 mono_atomic_xchg_ptr ((gpointer
*)(orig_code
- 11), addr
);
160 VALGRIND_DISCARD_TRANSLATIONS (orig_code
- 11, sizeof (gpointer
));
162 gboolean disp_32bit
= ((((gint64
)addr
- (gint64
)orig_code
)) < (1 << 30)) && ((((gint64
)addr
- (gint64
)orig_code
)) > -(1 << 30));
164 if ((((guint64
)(addr
)) >> 32) != 0 && !disp_32bit
) {
166 * This might happen with LLVM or when calling AOTed code. Create a thunk.
168 guint8
*thunk_start
, *thunk_code
;
169 MonoMemoryManager
*mem_manager
= mono_domain_ambient_memory_manager (mono_domain_get ());
171 thunk_start
= thunk_code
= (guint8
*)mono_mem_manager_code_reserve (mem_manager
, 32);
172 amd64_jump_membase (thunk_code
, AMD64_RIP
, 0);
173 *(guint64
*)thunk_code
= (guint64
)addr
;
175 g_assert ((((guint64
)(addr
)) >> 32) == 0);
176 mono_arch_flush_icache (thunk_start
, thunk_code
- thunk_start
);
177 MONO_PROFILER_RAISE (jit_code_buffer
, (thunk_start
, thunk_code
- thunk_start
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
));
179 mono_atomic_xchg_i32 ((gint32
*)(orig_code
- 4), ((gint64
)addr
- (gint64
)orig_code
));
180 VALGRIND_DISCARD_TRANSLATIONS (orig_code
- 5, 4);
183 else if ((code
[-7] == 0x41) && (code
[-6] == 0xff) && (code
[-5] == 0x15)) {
184 /* call *<OFFSET>(%rip) */
185 gpointer
*got_entry
= (gpointer
*)((guint8
*)orig_code
+ (*(guint32
*)(orig_code
- 4)));
186 mono_atomic_xchg_ptr (got_entry
, addr
);
187 VALGRIND_DISCARD_TRANSLATIONS (orig_code
- 5, sizeof (gpointer
));
193 mono_arch_create_llvm_native_thunk (MonoDomain
*domain
, guint8
*addr
)
196 * The caller is LLVM code and the call displacement might exceed 32 bits. We can't determine the caller address, so
197 * we add a thunk every time.
198 * Since the caller is also allocated using the domain code manager, hopefully the displacement will fit into 32 bits.
199 * FIXME: Avoid this if possible if !MONO_ARCH_NOMAP32BIT and ADDR is 32 bits.
201 guint8
*thunk_start
, *thunk_code
;
202 // FIXME: Has to be an argument
203 MonoMemoryManager
*mem_manager
= mono_domain_ambient_memory_manager (domain
);
205 thunk_start
= thunk_code
= (guint8
*)mono_mem_manager_code_reserve (mem_manager
, 32);
206 amd64_jump_membase (thunk_code
, AMD64_RIP
, 0);
207 *(guint64
*)thunk_code
= (guint64
)addr
;
209 mono_arch_flush_icache (thunk_start
, thunk_code
- thunk_start
);
210 MONO_PROFILER_RAISE (jit_code_buffer
, (thunk_start
, thunk_code
- thunk_start
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
));
215 stack_unaligned (MonoTrampolineType tramp_type
)
217 printf ("%d\n", tramp_type
);
218 g_assert_not_reached ();
222 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type
, MonoTrampInfo
**info
, gboolean aot
)
224 const char *tramp_name
;
225 guint8
*buf
, *code
, *tramp
, *br
[2], *r11_save_code
, *after_r11_save_code
, *br_ex_check
;
226 int i
, lmf_offset
, offset
, res_offset
, arg_offset
, rax_offset
, tramp_offset
, ctx_offset
, saved_regs_offset
;
227 int r11_save_offset
, saved_fpregs_offset
, rbp_offset
, framesize
, orig_rsp_to_rbp_offset
, cfa_offset
;
229 GSList
*unwind_ops
= NULL
;
230 MonoJumpInfo
*ji
= NULL
;
231 const int kMaxCodeSize
= 630;
233 if (tramp_type
== MONO_TRAMPOLINE_JUMP
)
238 code
= buf
= (guint8
*)mono_global_codeman_reserve (kMaxCodeSize
+ MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE
);
240 /* Compute stack frame size and offsets */
242 rbp_offset
= -offset
;
244 offset
+= sizeof (target_mgreg_t
);
245 rax_offset
= -offset
;
248 offset
+= sizeof (target_mgreg_t
);
250 offset
+= sizeof (target_mgreg_t
);
251 r11_save_offset
= -offset
;
253 offset
+= sizeof (target_mgreg_t
);
254 tramp_offset
= -offset
;
256 offset
+= sizeof (target_mgreg_t
);
257 arg_offset
= -offset
;
259 offset
+= sizeof (target_mgreg_t
);
260 res_offset
= -offset
;
262 offset
+= sizeof (MonoContext
);
263 ctx_offset
= -offset
;
264 saved_regs_offset
= ctx_offset
+ MONO_STRUCT_OFFSET (MonoContext
, gregs
);
265 saved_fpregs_offset
= ctx_offset
+ MONO_STRUCT_OFFSET (MonoContext
, fregs
);
267 offset
+= sizeof (MonoLMFTramp
);
268 lmf_offset
= -offset
;
271 /* Reserve space where the callee can save the argument registers */
272 offset
+= 4 * sizeof (target_mgreg_t
);
275 framesize
= ALIGN_TO (offset
, MONO_ARCH_FRAME_ALIGNMENT
);
277 // CFA = sp + 16 (the trampoline address is on the stack)
279 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, AMD64_RSP
, 16);
280 // IP saved at CFA - 8
281 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, AMD64_RIP
, -8);
283 orig_rsp_to_rbp_offset
= 0;
284 r11_save_code
= code
;
285 /* Reserve space for the mov_membase_reg to save R11 */
287 after_r11_save_code
= code
;
289 /* Pop the return address off the stack */
290 amd64_pop_reg (code
, AMD64_R11
);
291 orig_rsp_to_rbp_offset
+= sizeof (target_mgreg_t
);
293 cfa_offset
-= sizeof (target_mgreg_t
);
294 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
297 * Allocate a new stack frame
299 amd64_push_reg (code
, AMD64_RBP
);
300 cfa_offset
+= sizeof (target_mgreg_t
);
301 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
302 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, AMD64_RBP
, - cfa_offset
);
304 orig_rsp_to_rbp_offset
-= sizeof (target_mgreg_t
);
305 amd64_mov_reg_reg (code
, AMD64_RBP
, AMD64_RSP
, sizeof (target_mgreg_t
));
306 mono_add_unwind_op_def_cfa_reg (unwind_ops
, code
, buf
, AMD64_RBP
);
307 mono_add_unwind_op_fp_alloc (unwind_ops
, code
, buf
, AMD64_RBP
, 0);
308 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, framesize
);
310 /* Compute the trampoline address from the return address */
312 /* 7 = length of call *<offset>(rip) */
313 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_R11
, 7);
315 /* 5 = length of amd64_call_membase () */
316 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_R11
, 5);
318 amd64_mov_membase_reg (code
, AMD64_RBP
, tramp_offset
, AMD64_R11
, sizeof (target_mgreg_t
));
320 /* Save all registers */
321 for (i
= 0; i
< AMD64_NREG
; ++i
) {
322 if (i
== AMD64_RBP
) {
323 /* RAX is already saved */
324 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RBP
, rbp_offset
, sizeof (target_mgreg_t
));
325 amd64_mov_membase_reg (code
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof (target_mgreg_t
)), AMD64_RAX
, sizeof (target_mgreg_t
));
326 } else if (i
== AMD64_RIP
) {
328 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, 8, sizeof (target_mgreg_t
));
330 amd64_mov_reg_imm (code
, AMD64_R11
, 0);
331 amd64_mov_membase_reg (code
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof (target_mgreg_t
)), AMD64_R11
, sizeof (target_mgreg_t
));
332 } else if (i
== AMD64_RSP
) {
333 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_RSP
, sizeof (target_mgreg_t
));
334 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_R11
, framesize
+ 16);
335 amd64_mov_membase_reg (code
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof (target_mgreg_t
)), AMD64_R11
, sizeof (target_mgreg_t
));
336 } else if (i
!= AMD64_R11
) {
337 amd64_mov_membase_reg (code
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof (target_mgreg_t
)), i
, sizeof (target_mgreg_t
));
339 /* We have to save R11 right at the start of
340 the trampoline code because it's used as a
342 /* This happens before the frame is set up, so it goes into the redzone */
343 amd64_mov_membase_reg (r11_save_code
, AMD64_RSP
, r11_save_offset
+ orig_rsp_to_rbp_offset
, i
, sizeof (target_mgreg_t
));
344 g_assert (r11_save_code
== after_r11_save_code
);
346 /* Copy from the save slot into the register array slot */
347 amd64_mov_reg_membase (code
, i
, AMD64_RSP
, r11_save_offset
+ orig_rsp_to_rbp_offset
+ framesize
, sizeof (target_mgreg_t
));
348 amd64_mov_membase_reg (code
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof (target_mgreg_t
)), i
, sizeof (target_mgreg_t
));
350 /* cfa = rbp + cfa_offset */
351 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, i
, - cfa_offset
+ saved_regs_offset
+ (i
* sizeof (target_mgreg_t
)));
353 for (i
= 0; i
< AMD64_XMM_NREG
; ++i
)
354 if (AMD64_IS_ARGUMENT_XREG (i
))
355 #if defined(MONO_HAVE_SIMD_REG)
356 amd64_movdqu_membase_reg (code
, AMD64_RBP
, saved_fpregs_offset
+ (i
* sizeof (MonoContextSimdReg
)), i
);
358 amd64_movsd_membase_reg (code
, AMD64_RBP
, saved_fpregs_offset
+ (i
* sizeof (double)), i
);
361 /* Check that the stack is aligned */
362 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_RSP
, sizeof (target_mgreg_t
));
363 amd64_alu_reg_imm (code
, X86_AND
, AMD64_R11
, 15);
364 amd64_alu_reg_imm (code
, X86_CMP
, AMD64_R11
, 0);
366 amd64_branch_disp (code
, X86_CC_Z
, 0, FALSE
);
368 amd64_mov_reg_imm (code
, AMD64_R11
, 0);
369 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 0, 8);
371 amd64_mov_reg_imm (code
, MONO_AMD64_ARG_REG1
, tramp_type
);
372 amd64_mov_reg_imm (code
, AMD64_R11
, stack_unaligned
);
373 amd64_call_reg (code
, AMD64_R11
);
375 mono_amd64_patch (br
[0], code
);
376 //amd64_breakpoint (code);
378 /* Obtain the trampoline argument which is encoded in the instruction stream */
381 * tramp_index = (tramp_addr - specific_trampolines) / tramp_size
382 * arg = mscorlib_amodule->got [specific_trampolines_got_offsets_base + (tramp_index * 2) + 1]
384 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_SPECIFIC_TRAMPOLINES
, NULL
);
385 /* Trampoline addr */
386 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RBP
, tramp_offset
, sizeof (target_mgreg_t
));
387 /* Trampoline offset */
388 amd64_alu_reg_reg (code
, X86_SUB
, AMD64_RAX
, AMD64_R11
);
389 /* Trampoline index */
390 amd64_shift_reg_imm (code
, X86_SHR
, AMD64_RAX
, 3);
391 /* Every trampoline uses 2 got slots */
392 amd64_shift_reg_imm (code
, X86_SHL
, AMD64_RAX
, 1);
394 amd64_shift_reg_imm (code
, X86_SHL
, AMD64_RAX
, 3);
395 /* Address of block of got slots */
396 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_SPECIFIC_TRAMPOLINES_GOT_SLOTS_BASE
, NULL
);
397 /* Address of got slots belonging to this trampoline */
398 amd64_alu_reg_reg (code
, X86_ADD
, AMD64_RAX
, AMD64_R11
);
399 /* The second slot contains the argument */
400 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RAX
, sizeof (target_mgreg_t
), sizeof (target_mgreg_t
));
402 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, tramp_offset
, sizeof (target_mgreg_t
));
403 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_R11
, 5, 1);
404 amd64_widen_reg (code
, AMD64_RAX
, AMD64_RAX
, TRUE
, FALSE
);
405 amd64_alu_reg_imm_size (code
, X86_CMP
, AMD64_RAX
, 4, 1);
407 x86_branch8 (code
, X86_CC_NE
, 6, FALSE
);
408 /* 32 bit immediate */
409 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 6, 4);
411 x86_jump8 (code
, 10);
412 /* 64 bit immediate */
413 mono_amd64_patch (br
[0], code
);
414 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 6, 8);
415 mono_amd64_patch (br
[1], code
);
417 amd64_mov_membase_reg (code
, AMD64_RBP
, arg_offset
, AMD64_R11
, sizeof (target_mgreg_t
));
422 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_RSP
, sizeof (target_mgreg_t
));
423 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_R11
, framesize
+ 16);
424 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, rsp
), AMD64_R11
, sizeof (target_mgreg_t
));
425 /* Save pointer to context */
426 amd64_lea_membase (code
, AMD64_R11
, AMD64_RBP
, ctx_offset
);
427 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMFTramp
, ctx
), AMD64_R11
, sizeof (target_mgreg_t
));
430 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_get_lmf_addr
));
432 amd64_mov_reg_imm (code
, AMD64_R11
, mono_get_lmf_addr
);
434 amd64_call_reg (code
, AMD64_R11
);
437 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMFTramp
, lmf_addr
), AMD64_RAX
, sizeof (target_mgreg_t
));
438 /* Save previous_lmf */
439 /* Set the third lowest bit to signal that this is a MonoLMFTramp structure */
440 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RAX
, 0, sizeof (target_mgreg_t
));
441 amd64_alu_reg_imm_size (code
, X86_ADD
, AMD64_R11
, 0x5, sizeof (target_mgreg_t
));
442 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
), AMD64_R11
, sizeof (target_mgreg_t
));
444 amd64_lea_membase (code
, AMD64_R11
, AMD64_RBP
, lmf_offset
);
445 amd64_mov_membase_reg (code
, AMD64_RAX
, 0, AMD64_R11
, sizeof (target_mgreg_t
));
449 /* Arg1 is the pointer to the saved registers */
450 amd64_lea_membase (code
, AMD64_ARG_REG1
, AMD64_RBP
, saved_regs_offset
);
452 /* Arg2 is the address of the calling code */
454 amd64_mov_reg_membase (code
, AMD64_ARG_REG2
, AMD64_RBP
, 8, sizeof (target_mgreg_t
));
456 amd64_mov_reg_imm (code
, AMD64_ARG_REG2
, 0);
458 /* Arg3 is the method/vtable ptr */
459 amd64_mov_reg_membase (code
, AMD64_ARG_REG3
, AMD64_RBP
, arg_offset
, sizeof (target_mgreg_t
));
461 /* Arg4 is the trampoline address */
462 amd64_mov_reg_membase (code
, AMD64_ARG_REG4
, AMD64_RBP
, tramp_offset
, sizeof (target_mgreg_t
));
465 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, GINT_TO_POINTER (mono_trampoline_type_to_jit_icall_id (tramp_type
)));
467 tramp
= (guint8
*)mono_get_trampoline_func (tramp_type
);
468 amd64_mov_reg_imm (code
, AMD64_R11
, tramp
);
470 amd64_call_reg (code
, AMD64_R11
);
471 amd64_mov_membase_reg (code
, AMD64_RBP
, res_offset
, AMD64_RAX
, sizeof (target_mgreg_t
));
474 amd64_mov_reg_membase (code
, AMD64_RCX
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
), sizeof (target_mgreg_t
));
475 amd64_alu_reg_imm_size (code
, X86_SUB
, AMD64_RCX
, 0x5, sizeof (target_mgreg_t
));
476 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMFTramp
, lmf_addr
), sizeof (target_mgreg_t
));
477 amd64_mov_membase_reg (code
, AMD64_R11
, 0, AMD64_RCX
, sizeof (target_mgreg_t
));
480 * Save rax to the stack, after the leave instruction, this will become part of
483 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RBP
, res_offset
, sizeof (target_mgreg_t
));
484 amd64_mov_membase_reg (code
, AMD64_RBP
, rax_offset
, AMD64_RAX
, sizeof (target_mgreg_t
));
486 /* Check for thread interruption */
487 /* This is not perf critical code so no need to check the interrupt flag */
489 * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
492 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_thread_force_interruption_checkpoint_noraise
));
494 amd64_mov_reg_imm (code
, AMD64_R11
, (guint8
*)mono_thread_force_interruption_checkpoint_noraise
);
496 amd64_call_reg (code
, AMD64_R11
);
498 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
500 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
504 * We have an exception we want to throw in the caller's frame, so pop
505 * the trampoline frame and throw from the caller.
508 amd64_lea_membase (code
, AMD64_RSP
, AMD64_RBP
, 0);
509 amd64_pop_reg (code
, AMD64_RBP
);
510 mono_add_unwind_op_same_value (unwind_ops
, code
, buf
, AMD64_RBP
);
514 /* We are in the parent frame, the exception is in rax */
516 * EH is initialized after trampolines, so get the address of the variable
517 * which contains throw_exception, and load it from there.
520 /* Not really a jit icall */
521 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_rethrow_preserve_exception
));
523 amd64_mov_reg_imm (code
, AMD64_R11
, (guint8
*)mono_get_rethrow_preserve_exception_addr ());
525 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 0, sizeof (target_mgreg_t
));
526 amd64_mov_reg_reg (code
, AMD64_ARG_REG1
, AMD64_RAX
, sizeof (target_mgreg_t
));
528 * We still have the original return value on the top of the stack, so the
529 * throw trampoline will use that as the throw site.
531 amd64_jump_reg (code
, AMD64_R11
);
534 mono_amd64_patch (br_ex_check
, code
);
536 /* Restore argument registers, r10 (imt method/rgxtx)
537 and rax (needed for direct calls to C vararg functions). */
538 for (i
= 0; i
< AMD64_NREG
; ++i
)
539 if (AMD64_IS_ARGUMENT_REG (i
) || i
== AMD64_R10
|| i
== AMD64_RAX
|| i
== AMD64_R11
)
540 amd64_mov_reg_membase (code
, i
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof (target_mgreg_t
)), sizeof (target_mgreg_t
));
541 for (i
= 0; i
< AMD64_XMM_NREG
; ++i
)
542 if (AMD64_IS_ARGUMENT_XREG (i
))
543 #if defined(MONO_HAVE_SIMD_REG)
544 amd64_movdqu_reg_membase (code
, i
, AMD64_RBP
, saved_fpregs_offset
+ (i
* sizeof (MonoContextSimdReg
)));
546 amd64_movsd_reg_membase (code
, i
, AMD64_RBP
, saved_fpregs_offset
+ (i
* sizeof (double)));
551 amd64_lea_membase (code
, AMD64_RSP
, AMD64_RBP
, 0);
552 amd64_pop_reg (code
, AMD64_RBP
);
553 mono_add_unwind_op_same_value (unwind_ops
, code
, buf
, AMD64_RBP
);
557 cfa_offset
-= sizeof (target_mgreg_t
);
558 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, AMD64_RSP
, cfa_offset
);
560 if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type
)) {
562 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RSP
, rax_offset
- sizeof (target_mgreg_t
), sizeof (target_mgreg_t
));
565 /* call the compiled method using the saved rax */
566 amd64_jump_membase (code
, AMD64_RSP
, rax_offset
- sizeof (target_mgreg_t
));
569 g_assertf ((code
- buf
) <= kMaxCodeSize
, "%d %d", code
, buf
, (int)(code
- buf
), kMaxCodeSize
);
570 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE
));
572 mono_arch_flush_icache (buf
, code
- buf
);
573 MONO_PROFILER_RAISE (jit_code_buffer
, (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
));
575 tramp_name
= mono_get_generic_trampoline_name (tramp_type
);
576 *info
= mono_tramp_info_create (tramp_name
, buf
, code
- buf
, ji
, unwind_ops
);
582 mono_arch_create_specific_trampoline (gpointer arg1
, MonoTrampolineType tramp_type
, MonoMemoryManager
*mem_manager
, guint32
*code_len
)
584 guint8
*code
, *buf
, *tramp
;
586 gboolean far_addr
= FALSE
;
588 tramp
= mono_get_trampoline_code (tramp_type
);
590 if ((((guint64
)arg1
) >> 32) == 0)
595 code
= buf
= (guint8
*)mono_mem_manager_code_reserve_align (mem_manager
, size
, 1);
597 if (((gint64
)tramp
- (gint64
)code
) >> 31 != 0 && ((gint64
)tramp
- (gint64
)code
) >> 31 != -1) {
598 #ifndef MONO_ARCH_NOMAP32BIT
599 g_assert_not_reached ();
603 code
= buf
= (guint8
*)mono_mem_manager_code_reserve_align (mem_manager
, size
, 1);
607 amd64_mov_reg_imm (code
, AMD64_R11
, tramp
);
608 amd64_call_reg (code
, AMD64_R11
);
610 amd64_call_code (code
, tramp
);
612 /* The trampoline code will obtain the argument from the instruction stream */
613 if ((((guint64
)arg1
) >> 32) == 0) {
615 *(guint32
*)(code
+ 1) = (gint64
)arg1
;
619 *(guint64
*)(code
+ 1) = (gint64
)arg1
;
623 g_assert ((code
- buf
) <= size
);
628 mono_arch_flush_icache (buf
, size
);
629 MONO_PROFILER_RAISE (jit_code_buffer
, (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE
, mono_get_generic_trampoline_simple_name (tramp_type
)));
635 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot
, MonoTrampInfo
**info
, gboolean aot
)
639 guint8
**rgctx_null_jumps
;
643 MonoJumpInfo
*ji
= NULL
;
646 mrgctx
= MONO_RGCTX_SLOT_IS_MRGCTX (slot
);
647 index
= MONO_RGCTX_SLOT_INDEX (slot
);
649 index
+= MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT
/ sizeof (target_mgreg_t
);
650 for (depth
= 0; ; ++depth
) {
651 int size
= mono_class_rgctx_get_array_size (depth
, mrgctx
);
653 if (index
< size
- 1)
658 const int tramp_size
= 64 + 8 * depth
;
660 code
= buf
= (guint8
*)mono_global_codeman_reserve (tramp_size
+ MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
662 unwind_ops
= mono_arch_get_cie_program ();
664 rgctx_null_jumps
= (guint8
**)g_malloc (sizeof (guint8
*) * (depth
+ 2));
668 amd64_mov_reg_reg (code
, AMD64_RAX
, AMD64_ARG_REG1
, 8);
670 /* load rgctx ptr from vtable */
671 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_ARG_REG1
, MONO_STRUCT_OFFSET (MonoVTable
, runtime_generic_context
), sizeof (target_mgreg_t
));
672 /* is the rgctx ptr null? */
673 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
674 /* if yes, jump to actual trampoline */
675 rgctx_null_jumps
[0] = code
;
676 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
679 for (i
= 0; i
< depth
; ++i
) {
680 /* load ptr to next array */
681 if (mrgctx
&& i
== 0)
682 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RAX
, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT
, sizeof (target_mgreg_t
));
684 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RAX
, 0, sizeof (target_mgreg_t
));
685 /* is the ptr null? */
686 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
687 /* if yes, jump to actual trampoline */
688 rgctx_null_jumps
[i
+ 1] = code
;
689 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
693 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RAX
, sizeof (target_mgreg_t
) * (index
+ 1), sizeof (target_mgreg_t
));
694 /* is the slot null? */
695 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
696 /* if yes, jump to actual trampoline */
697 rgctx_null_jumps
[depth
+ 1] = code
;
698 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
699 /* otherwise return */
702 for (i
= mrgctx
? 1 : 0; i
<= depth
+ 1; ++i
)
703 mono_amd64_patch (rgctx_null_jumps
[i
], code
);
705 g_free (rgctx_null_jumps
);
707 if (MONO_ARCH_VTABLE_REG
!= AMD64_ARG_REG1
) {
708 /* move the rgctx pointer to the VTABLE register */
709 amd64_mov_reg_reg (code
, MONO_ARCH_VTABLE_REG
, AMD64_ARG_REG1
, sizeof (target_mgreg_t
));
713 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR
, GUINT_TO_POINTER (slot
));
714 amd64_jump_reg (code
, AMD64_R11
);
716 MonoMemoryManager
*mem_manager
= mono_domain_ambient_memory_manager (mono_get_root_domain ());
717 tramp
= (guint8
*)mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot
), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH
, mem_manager
, NULL
);
719 /* jump to the actual trampoline */
720 amd64_jump_code (code
, tramp
);
723 mono_arch_flush_icache (buf
, code
- buf
);
724 MONO_PROFILER_RAISE (jit_code_buffer
, (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE
, NULL
));
726 g_assertf ((code
- buf
) <= tramp_size
, "%d %d", (int)(code
- buf
), tramp_size
);
728 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
730 char *name
= mono_get_rgctx_fetch_trampoline_name (slot
);
731 *info
= mono_tramp_info_create (name
, buf
, code
- buf
, ji
, unwind_ops
);
738 mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo
**info
, gboolean aot
)
742 MonoJumpInfo
*ji
= NULL
;
748 code
= buf
= (guint8
*)mono_global_codeman_reserve (tramp_size
+ MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
750 unwind_ops
= mono_arch_get_cie_program ();
752 // FIXME: Currently, we always go to the slow path.
753 /* This receives a <slot, trampoline> in the rgctx arg reg. */
754 /* Load trampoline addr */
755 amd64_mov_reg_membase (code
, AMD64_R11
, MONO_ARCH_RGCTX_REG
, 8, 8);
756 /* move the rgctx pointer to the VTABLE register */
757 amd64_mov_reg_reg (code
, MONO_ARCH_VTABLE_REG
, AMD64_ARG_REG1
, sizeof (target_mgreg_t
));
758 /* Jump to the trampoline */
759 amd64_jump_reg (code
, AMD64_R11
);
761 mono_arch_flush_icache (buf
, code
- buf
);
762 MONO_PROFILER_RAISE (jit_code_buffer
, (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE
, NULL
));
764 g_assertf ((code
- buf
) <= tramp_size
, "%d %d", (int)(code
- buf
), tramp_size
);
766 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
769 *info
= mono_tramp_info_create ("rgctx_fetch_trampoline_general", buf
, code
- buf
, ji
, unwind_ops
);
775 mono_arch_invalidate_method (MonoJitInfo
*ji
, void *func
, gpointer func_arg
)
777 /* FIXME: This is not thread safe */
778 guint8
*code
= (guint8
*)ji
->code_start
;
780 amd64_mov_reg_imm (code
, AMD64_ARG_REG1
, func_arg
);
781 amd64_mov_reg_imm (code
, AMD64_R11
, func
);
783 x86_push_imm (code
, (guint64
)func_arg
);
784 amd64_call_reg (code
, AMD64_R11
);
786 #endif /* !DISABLE_JIT */
789 * mono_arch_get_call_target:
791 * Return the address called by the code before CODE if exists.
794 mono_arch_get_call_target (guint8
*code
)
796 if (code
[-5] == 0xe8) {
797 gint32 disp
= *(gint32
*)(code
- 4);
798 guint8
*target
= code
+ disp
;
806 #ifdef MONO_ARCH_CODE_EXEC_ONLY
807 /* Keep in sync with aot-compiler.c, arch_emit_plt_entry. */
808 #define PLT_ENTRY_OFFSET_REG AMD64_RAX
810 /* If PLT_ENTRY_OFFSET_REG is R8 - R15, increase mov instruction size by 1 due to use of REX. */
811 #define PLT_MOV_REG_IMM8_SIZE (1 + sizeof (guint8))
812 #define PLT_MOV_REG_IMM16_SIZE (2 + sizeof (guint16))
813 #define PLT_MOV_REG_IMM32_SIZE (1 + sizeof (guint32))
814 #define PLT_JMP_INST_SIZE 6
817 aot_arch_get_plt_entry_size (MonoAotFileInfo
*info
, host_mgreg_t
*regs
, guint8
*code
, guint8
*plt
)
819 if (info
->plt_size
<= 0xFF)
820 return PLT_MOV_REG_IMM8_SIZE
+ PLT_JMP_INST_SIZE
;
821 else if (info
->plt_size
<= 0xFFFF)
822 return PLT_MOV_REG_IMM16_SIZE
+ PLT_JMP_INST_SIZE
;
824 return PLT_MOV_REG_IMM32_SIZE
+ PLT_JMP_INST_SIZE
;
828 aot_arch_get_plt_entry_index (MonoAotFileInfo
*info
, host_mgreg_t
*regs
, guint8
*code
, guint8
*plt
)
830 if (info
->plt_size
<= 0xFF)
831 return regs
[PLT_ENTRY_OFFSET_REG
] & 0xFF;
832 else if (info
->plt_size
<= 0xFFFF)
833 return regs
[PLT_ENTRY_OFFSET_REG
] & 0xFFFF;
835 return regs
[PLT_ENTRY_OFFSET_REG
] & 0xFFFFFFFF;
839 mono_aot_arch_get_plt_entry_exec_only (gpointer amodule_info
, host_mgreg_t
*regs
, guint8
*code
, guint8
*plt
)
841 guint32 plt_entry_index
= aot_arch_get_plt_entry_index ((MonoAotFileInfo
*)amodule_info
, regs
, code
, plt
);
842 guchar plt_entry_size
= aot_arch_get_plt_entry_size ((MonoAotFileInfo
*)amodule_info
, regs
, code
, plt
);
844 /* First PLT slot is never emitted into table, take that into account */
845 /* when calculating corresponding PLT entry. */
847 return plt
+ ((gsize
)plt_entry_index
* (gsize
)plt_entry_size
);
851 mono_arch_get_plt_info_offset_exec_only (gpointer amodule_info
, guint8
*plt_entry
, host_mgreg_t
*regs
, guint8
*code
, MonoAotResolvePltInfoOffset resolver
, gpointer amodule
)
853 guint32 plt_entry_index
= aot_arch_get_plt_entry_index ((MonoAotFileInfo
*)amodule_info
, regs
, code
, NULL
);
855 /* First PLT slot is never emitted into table, take that into account */
856 /* when calculating offset. */
858 return resolver (amodule
, plt_entry_index
);
862 mono_arch_patch_plt_entry_exec_only (gpointer amodule_info
, guint8
*code
, gpointer
*got
, host_mgreg_t
*regs
, guint8
*addr
)
864 /* Same calculation of GOT offset as done in aot-compiler.c, emit_plt and used as jmp DISP. */
865 guint32 plt_entry_index
= aot_arch_get_plt_entry_index ((MonoAotFileInfo
*)amodule_info
, regs
, code
, NULL
);
866 gpointer
*plt_jump_table_entry
= ((gpointer
*)(got
+ ((MonoAotFileInfo
*)amodule_info
)->plt_got_offset_base
) + plt_entry_index
);
867 mono_atomic_xchg_ptr (plt_jump_table_entry
, addr
);
871 * mono_arch_get_plt_info_offset:
873 * Return the PLT info offset belonging to the plt entry PLT_ENTRY.
876 mono_arch_get_plt_info_offset (guint8
*plt_entry
, host_mgreg_t
*regs
, guint8
*code
)
878 return *(guint32
*)(plt_entry
+ 6);
882 mono_arch_patch_plt_entry (guint8
*code
, gpointer
*got
, host_mgreg_t
*regs
, guint8
*addr
)
885 gpointer
*plt_jump_table_entry
;
887 /* A PLT entry: jmp *<DISP>(%rip) */
888 g_assert (code
[0] == 0xff);
889 g_assert (code
[1] == 0x25);
891 disp
= *(gint32
*)(code
+ 2);
893 plt_jump_table_entry
= (gpointer
*)(code
+ 6 + disp
);
895 mono_atomic_xchg_ptr (plt_jump_table_entry
, addr
);
901 * mono_arch_create_sdb_trampoline:
903 * Return a trampoline which captures the current context, passes it to
904 * mono_debugger_agent_single_step_from_context ()/mono_debugger_agent_breakpoint_from_context (),
905 * then restores the (potentially changed) context.
908 mono_arch_create_sdb_trampoline (gboolean single_step
, MonoTrampInfo
**info
, gboolean aot
)
910 int tramp_size
= 512;
911 int i
, framesize
, ctx_offset
, cfa_offset
, gregs_offset
;
913 GSList
*unwind_ops
= NULL
;
914 MonoJumpInfo
*ji
= NULL
;
916 code
= buf
= (guint8
*)mono_global_codeman_reserve (tramp_size
+ MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE
);
920 /* Reserve space where the callee can save the argument registers */
921 framesize
+= 4 * sizeof (target_mgreg_t
);
924 ctx_offset
= framesize
;
925 framesize
+= sizeof (MonoContext
);
927 framesize
= ALIGN_TO (framesize
, MONO_ARCH_FRAME_ALIGNMENT
);
931 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, AMD64_RSP
, 8);
932 // IP saved at CFA - 8
933 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, AMD64_RIP
, -cfa_offset
);
935 amd64_push_reg (code
, AMD64_RBP
);
936 cfa_offset
+= sizeof (target_mgreg_t
);
937 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
938 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, AMD64_RBP
, - cfa_offset
);
940 amd64_mov_reg_reg (code
, AMD64_RBP
, AMD64_RSP
, sizeof (target_mgreg_t
));
941 mono_add_unwind_op_def_cfa_reg (unwind_ops
, code
, buf
, AMD64_RBP
);
942 mono_add_unwind_op_fp_alloc (unwind_ops
, code
, buf
, AMD64_RBP
, 0);
943 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, framesize
);
945 gregs_offset
= ctx_offset
+ MONO_STRUCT_OFFSET (MonoContext
, gregs
);
947 /* Initialize a MonoContext structure on the stack */
948 for (i
= 0; i
< AMD64_NREG
; ++i
) {
949 if (i
!= AMD64_RIP
&& i
!= AMD64_RSP
&& i
!= AMD64_RBP
)
950 amd64_mov_membase_reg (code
, AMD64_RSP
, gregs_offset
+ (i
* sizeof (target_mgreg_t
)), i
, sizeof (target_mgreg_t
));
952 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, 0, sizeof (target_mgreg_t
));
953 amd64_mov_membase_reg (code
, AMD64_RSP
, gregs_offset
+ (AMD64_RBP
* sizeof (target_mgreg_t
)), AMD64_R11
, sizeof (target_mgreg_t
));
954 amd64_lea_membase (code
, AMD64_R11
, AMD64_RBP
, 2 * sizeof (target_mgreg_t
));
955 amd64_mov_membase_reg (code
, AMD64_RSP
, gregs_offset
+ (AMD64_RSP
* sizeof (target_mgreg_t
)), AMD64_R11
, sizeof (target_mgreg_t
));
956 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, sizeof (target_mgreg_t
), sizeof (target_mgreg_t
));
957 amd64_mov_membase_reg (code
, AMD64_RSP
, gregs_offset
+ (AMD64_RIP
* sizeof (target_mgreg_t
)), AMD64_R11
, sizeof (target_mgreg_t
));
959 /* Call the single step/breakpoint function in sdb */
960 amd64_lea_membase (code
, AMD64_ARG_REG1
, AMD64_RSP
, ctx_offset
);
964 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_debugger_agent_single_step_from_context
));
966 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_debugger_agent_breakpoint_from_context
));
969 amd64_mov_reg_imm (code
, AMD64_R11
, mini_get_dbg_callbacks ()->single_step_from_context
);
971 amd64_mov_reg_imm (code
, AMD64_R11
, mini_get_dbg_callbacks ()->breakpoint_from_context
);
973 amd64_call_reg (code
, AMD64_R11
);
975 /* Restore registers from ctx */
976 for (i
= 0; i
< AMD64_NREG
; ++i
) {
977 if (i
!= AMD64_RIP
&& i
!= AMD64_RSP
&& i
!= AMD64_RBP
)
978 amd64_mov_reg_membase (code
, i
, AMD64_RSP
, gregs_offset
+ (i
* sizeof (target_mgreg_t
)), sizeof (target_mgreg_t
));
980 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RSP
, gregs_offset
+ (AMD64_RBP
* sizeof (target_mgreg_t
)), sizeof (target_mgreg_t
));
981 amd64_mov_membase_reg (code
, AMD64_RBP
, 0, AMD64_R11
, sizeof (target_mgreg_t
));
982 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RSP
, gregs_offset
+ (AMD64_RIP
* sizeof (target_mgreg_t
)), sizeof (target_mgreg_t
));
983 amd64_mov_membase_reg (code
, AMD64_RBP
, sizeof (target_mgreg_t
), AMD64_R11
, sizeof (target_mgreg_t
));
986 amd64_lea_membase (code
, AMD64_RSP
, AMD64_RBP
, 0);
987 amd64_pop_reg (code
, AMD64_RBP
);
988 mono_add_unwind_op_same_value (unwind_ops
, code
, buf
, AMD64_RBP
);
992 cfa_offset
-= sizeof (target_mgreg_t
);
993 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, AMD64_RSP
, cfa_offset
);
996 g_assertf ((code
- buf
) <= tramp_size
, "%d %d", (int)(code
- buf
), tramp_size
);
998 mono_arch_flush_icache (code
, code
- buf
);
999 MONO_PROFILER_RAISE (jit_code_buffer
, (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
));
1000 g_assert (code
- buf
<= tramp_size
);
1001 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE
));
1003 const char *tramp_name
= single_step
? "sdb_single_step_trampoline" : "sdb_breakpoint_trampoline";
1004 *info
= mono_tramp_info_create (tramp_name
, buf
, code
- buf
, ji
, unwind_ops
);
1010 * mono_arch_get_interp_to_native_trampoline:
1012 * A trampoline that handles the transition from interpreter into native
1013 * world. It requires to set up a descriptor (CallContext), so the
1014 * trampoline can translate the arguments into the native calling convention.
1017 mono_arch_get_interp_to_native_trampoline (MonoTrampInfo
**info
)
1019 #ifndef DISABLE_INTERPRETER
1020 guint8
*start
= NULL
, *code
;
1021 guint8
*label_start_copy
, *label_exit_copy
;
1022 MonoJumpInfo
*ji
= NULL
;
1023 GSList
*unwind_ops
= NULL
;
1024 int buf_len
, i
, cfa_offset
, off_methodargs
, off_targetaddr
;
1027 start
= code
= (guint8
*) mono_global_codeman_reserve (buf_len
+ MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE
);
1031 mono_add_unwind_op_def_cfa (unwind_ops
, code
, start
, AMD64_RSP
, cfa_offset
);
1032 // IP saved at CFA - 8
1033 mono_add_unwind_op_offset (unwind_ops
, code
, start
, AMD64_RIP
, -cfa_offset
);
1035 amd64_push_reg (code
, AMD64_RBP
);
1036 cfa_offset
+= sizeof (target_mgreg_t
);
1037 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, cfa_offset
);
1038 mono_add_unwind_op_offset (unwind_ops
, code
, start
, AMD64_RBP
, -cfa_offset
);
1040 amd64_mov_reg_reg (code
, AMD64_RBP
, AMD64_RSP
, sizeof (target_mgreg_t
));
1041 mono_add_unwind_op_def_cfa_reg (unwind_ops
, code
, start
, AMD64_RBP
);
1042 mono_add_unwind_op_fp_alloc (unwind_ops
, code
, start
, AMD64_RBP
, 0);
1044 /* allocate space for saving the target addr and the call context */
1045 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, 2 * sizeof (target_mgreg_t
));
1047 /* save CallContext* onto stack */
1048 off_methodargs
= - 8;
1049 amd64_mov_membase_reg (code
, AMD64_RBP
, off_methodargs
, AMD64_ARG_REG2
, sizeof (target_mgreg_t
));
1051 /* save target address on stack */
1052 off_targetaddr
= - 2 * 8;
1053 amd64_mov_membase_reg (code
, AMD64_RBP
, off_targetaddr
, AMD64_ARG_REG1
, sizeof (target_mgreg_t
));
1055 /* load pointer to CallContext* into R11 */
1056 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_ARG_REG2
, sizeof (target_mgreg_t
));
1058 /* allocate the stack space necessary for the call */
1059 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_R11
, MONO_STRUCT_OFFSET (CallContext
, stack_size
), sizeof (target_mgreg_t
));
1060 amd64_alu_reg_reg (code
, X86_SUB
, AMD64_RSP
, AMD64_RAX
);
1062 /* copy stack from the CallContext, reg1 = dest, reg2 = source */
1063 amd64_mov_reg_reg (code
, AMD64_ARG_REG1
, AMD64_RSP
, sizeof (target_mgreg_t
));
1064 amd64_mov_reg_membase (code
, AMD64_ARG_REG2
, AMD64_R11
, MONO_STRUCT_OFFSET (CallContext
, stack
), sizeof (target_mgreg_t
));
1066 label_start_copy
= code
;
1067 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
1068 label_exit_copy
= code
;
1069 amd64_branch8 (code
, X86_CC_Z
, 0, FALSE
);
1070 amd64_mov_reg_membase (code
, AMD64_ARG_REG3
, AMD64_ARG_REG2
, 0, sizeof (target_mgreg_t
));
1071 amd64_mov_membase_reg (code
, AMD64_ARG_REG1
, 0, AMD64_ARG_REG3
, sizeof (target_mgreg_t
));
1072 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_ARG_REG1
, sizeof (target_mgreg_t
));
1073 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_ARG_REG2
, sizeof (target_mgreg_t
));
1074 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RAX
, sizeof (target_mgreg_t
));
1075 amd64_jump_code (code
, label_start_copy
);
1076 x86_patch (label_exit_copy
, code
);
1078 /* set all general purpose registers from CallContext */
1079 for (i
= 0; i
< PARAM_REGS
; i
++)
1080 amd64_mov_reg_membase (code
, param_regs
[i
], AMD64_R11
, MONO_STRUCT_OFFSET (CallContext
, gregs
) + param_regs
[i
] * sizeof (target_mgreg_t
), sizeof (target_mgreg_t
));
1082 /* set all floating registers from CallContext */
1083 for (i
= 0; i
< FLOAT_PARAM_REGS
; ++i
)
1084 amd64_sse_movsd_reg_membase (code
, i
, AMD64_R11
, MONO_STRUCT_OFFSET (CallContext
, fregs
) + i
* sizeof (double));
1086 /* load target addr */
1087 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, off_targetaddr
, sizeof (target_mgreg_t
));
1089 /* call into native function */
1090 amd64_call_reg (code
, AMD64_R11
);
1092 /* save all return general purpose registers in the CallContext */
1093 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, off_methodargs
, sizeof (target_mgreg_t
));
1094 for (i
= 0; i
< RETURN_REGS
; i
++)
1095 amd64_mov_membase_reg (code
, AMD64_R11
, MONO_STRUCT_OFFSET (CallContext
, gregs
) + return_regs
[i
] * sizeof (target_mgreg_t
), return_regs
[i
], sizeof (target_mgreg_t
));
1097 /* save all return floating registers in the CallContext */
1098 for (i
= 0; i
< FLOAT_RETURN_REGS
; i
++)
1099 amd64_sse_movsd_membase_reg (code
, AMD64_R11
, MONO_STRUCT_OFFSET (CallContext
, fregs
) + i
* sizeof (double), i
);
1102 amd64_lea_membase (code
, AMD64_RSP
, AMD64_RBP
, 0);
1104 amd64_mov_reg_reg (code
, AMD64_RSP
, AMD64_RBP
, sizeof (target_mgreg_t
));
1106 amd64_pop_reg (code
, AMD64_RBP
);
1107 mono_add_unwind_op_same_value (unwind_ops
, code
, start
, AMD64_RBP
);
1109 cfa_offset
-= sizeof (target_mgreg_t
);
1110 mono_add_unwind_op_def_cfa (unwind_ops
, code
, start
, AMD64_RSP
, cfa_offset
);
1113 g_assertf ((code
- start
) <= buf_len
, "%d %d", (int)(code
- start
), buf_len
);
1115 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE
));
1117 mono_arch_flush_icache (start
, code
- start
);
1118 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
));
1121 *info
= mono_tramp_info_create ("interp_to_native_trampoline", start
, code
- start
, ji
, unwind_ops
);
1125 g_assert_not_reached ();
1127 #endif /* DISABLE_INTERPRETER */
1131 mono_arch_get_native_to_interp_trampoline (MonoTrampInfo
**info
)
1133 #ifndef DISABLE_INTERPRETER
1134 guint8
*start
= NULL
, *code
;
1135 MonoJumpInfo
*ji
= NULL
;
1136 GSList
*unwind_ops
= NULL
;
1137 int buf_len
, i
, framesize
, cfa_offset
, ctx_offset
;
1140 start
= code
= (guint8
*) mono_global_codeman_reserve (buf_len
+ MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE
);
1144 /* Reserve space where the callee can save the argument registers */
1145 framesize
+= 4 * sizeof (target_mgreg_t
);
1148 ctx_offset
= framesize
;
1149 framesize
+= MONO_ABI_SIZEOF (CallContext
);
1150 framesize
= ALIGN_TO (framesize
, MONO_ARCH_FRAME_ALIGNMENT
);
1154 mono_add_unwind_op_def_cfa (unwind_ops
, code
, start
, AMD64_RSP
, cfa_offset
);
1155 // IP saved at CFA - 8
1156 mono_add_unwind_op_offset (unwind_ops
, code
, start
, AMD64_RIP
, -cfa_offset
);
1158 amd64_push_reg (code
, AMD64_RBP
);
1159 cfa_offset
+= sizeof (target_mgreg_t
);
1160 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, cfa_offset
);
1161 mono_add_unwind_op_offset (unwind_ops
, code
, start
, AMD64_RBP
, -cfa_offset
);
1163 amd64_mov_reg_reg (code
, AMD64_RBP
, AMD64_RSP
, sizeof (target_mgreg_t
));
1164 mono_add_unwind_op_def_cfa_reg (unwind_ops
, code
, start
, AMD64_RBP
);
1165 mono_add_unwind_op_fp_alloc (unwind_ops
, code
, start
, AMD64_RBP
, 0);
1167 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, framesize
);
1169 /* save all general purpose registers into the CallContext */
1170 for (i
= 0; i
< PARAM_REGS
; i
++)
1171 amd64_mov_membase_reg (code
, AMD64_RSP
, ctx_offset
+ MONO_STRUCT_OFFSET (CallContext
, gregs
) + param_regs
[i
] * sizeof (target_mgreg_t
), param_regs
[i
], sizeof (target_mgreg_t
));
1173 /* save all floating registers into the CallContext */
1174 for (i
= 0; i
< FLOAT_PARAM_REGS
; i
++)
1175 amd64_sse_movsd_membase_reg (code
, AMD64_RSP
, ctx_offset
+ MONO_STRUCT_OFFSET (CallContext
, fregs
) + i
* sizeof (double), i
);
1177 /* set the stack pointer to the value at call site */
1178 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_RBP
, sizeof (target_mgreg_t
));
1179 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_R11
, 2 * sizeof (target_mgreg_t
));
1180 amd64_mov_membase_reg (code
, AMD64_RSP
, ctx_offset
+ MONO_STRUCT_OFFSET (CallContext
, stack
), AMD64_R11
, sizeof (target_mgreg_t
));
1182 /* call interp_entry with the ccontext and rmethod as arguments */
1183 amd64_mov_reg_reg (code
, AMD64_ARG_REG1
, AMD64_RSP
, sizeof (target_mgreg_t
));
1184 if (ctx_offset
!= 0)
1185 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_ARG_REG1
, ctx_offset
);
1186 amd64_mov_reg_membase (code
, AMD64_ARG_REG2
, MONO_ARCH_RGCTX_REG
, MONO_STRUCT_OFFSET (MonoFtnDesc
, arg
), sizeof (target_mgreg_t
));
1187 amd64_mov_reg_membase (code
, AMD64_R11
, MONO_ARCH_RGCTX_REG
, MONO_STRUCT_OFFSET (MonoFtnDesc
, addr
), sizeof (target_mgreg_t
));
1188 amd64_call_reg (code
, AMD64_R11
);
1190 /* load the return values from the context */
1191 for (i
= 0; i
< RETURN_REGS
; i
++)
1192 amd64_mov_reg_membase (code
, return_regs
[i
], AMD64_RSP
, ctx_offset
+ MONO_STRUCT_OFFSET (CallContext
, gregs
) + return_regs
[i
] * sizeof (target_mgreg_t
), sizeof (target_mgreg_t
));
1194 for (i
= 0; i
< FLOAT_RETURN_REGS
; i
++)
1195 amd64_sse_movsd_reg_membase (code
, i
, AMD64_RSP
, ctx_offset
+ MONO_STRUCT_OFFSET (CallContext
, fregs
) + i
* sizeof (double));
1197 /* reset stack and return */
1199 amd64_lea_membase (code
, AMD64_RSP
, AMD64_RBP
, 0);
1201 amd64_mov_reg_reg (code
, AMD64_RSP
, AMD64_RBP
, sizeof (target_mgreg_t
));
1203 amd64_pop_reg (code
, AMD64_RBP
);
1204 mono_add_unwind_op_same_value (unwind_ops
, code
, start
, AMD64_RBP
);
1206 cfa_offset
-= sizeof (target_mgreg_t
);
1207 mono_add_unwind_op_def_cfa (unwind_ops
, code
, start
, AMD64_RSP
, cfa_offset
);
1210 g_assertf ((code
- start
) <= buf_len
, "%d %d", (int)(code
- start
), buf_len
);
1212 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE
));
1214 mono_arch_flush_icache (start
, code
- start
);
1215 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING
, NULL
));
1218 *info
= mono_tramp_info_create ("native_to_interp_trampoline", start
, code
- start
, ji
, unwind_ops
);
1222 g_assert_not_reached ();
1224 #endif /* DISABLE_INTERPRETER */
1226 #endif /* !DISABLE_JIT */
1230 mono_arch_get_unbox_trampoline (MonoMethod
*m
, gpointer addr
)
1232 g_assert_not_reached ();
1237 mono_arch_get_static_rgctx_trampoline (MonoMemoryManager
*mem_manager
, gpointer arg
, gpointer addr
)
1239 g_assert_not_reached ();
1244 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot
, MonoTrampInfo
**info
, gboolean aot
)
1246 g_assert_not_reached ();
1251 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type
, MonoTrampInfo
**info
, gboolean aot
)
1253 g_assert_not_reached ();
1258 mono_arch_create_specific_trampoline (gpointer arg1
, MonoTrampolineType tramp_type
, MonoMemoryManager
*mem_manager
, guint32
*code_len
)
1260 g_assert_not_reached ();
1265 mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo
**info
, gboolean aot
)
1267 g_assert_not_reached ();
1272 mono_arch_invalidate_method (MonoJitInfo
*ji
, void *func
, gpointer func_arg
)
1274 g_assert_not_reached ();
1279 mono_arch_create_sdb_trampoline (gboolean single_step
, MonoTrampInfo
**info
, gboolean aot
)
1281 g_assert_not_reached ();
1286 mono_arch_get_interp_to_native_trampoline (MonoTrampInfo
**info
)
1288 g_assert_not_reached ();
1293 mono_arch_get_native_to_interp_trampoline (MonoTrampInfo
**info
)
1295 g_assert_not_reached ();
1298 #endif /* DISABLE_JIT */