2 * tramp-amd64.c: JIT trampoline code for amd64
5 * Dietmar Maurer (dietmar@ximian.com)
6 * Zoltan Varga (vargaz@gmail.com)
8 * (C) 2001 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
17 #include <mono/metadata/abi-details.h>
18 #include <mono/metadata/appdomain.h>
19 #include <mono/metadata/marshal.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/mono-debug-debugger.h>
22 #include <mono/metadata/profiler-private.h>
23 #include <mono/metadata/gc-internals.h>
24 #include <mono/arch/amd64/amd64-codegen.h>
26 #include <mono/utils/memcheck.h>
29 #include "mini-amd64.h"
30 #include "debugger-agent.h"
32 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
34 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
37 * mono_arch_get_unbox_trampoline:
39 * @addr: pointer to native code for @m
41 * when value type methods are called through the vtable we need to unbox the
42 * this argument. This method returns a pointer to a trampoline which does
43 * unboxing before calling the method
46 mono_arch_get_unbox_trampoline (MonoMethod
*m
, gpointer addr
)
50 int this_reg
, size
= 20;
52 MonoDomain
*domain
= mono_domain_get ();
54 this_reg
= mono_arch_get_this_arg_reg (NULL
);
56 start
= code
= (guint8
*)mono_domain_code_reserve (domain
, size
);
58 unwind_ops
= mono_arch_get_cie_program ();
60 amd64_alu_reg_imm (code
, X86_ADD
, this_reg
, sizeof (MonoObject
));
61 /* FIXME: Optimize this */
62 amd64_mov_reg_imm (code
, AMD64_RAX
, addr
);
63 amd64_jump_reg (code
, AMD64_RAX
);
64 g_assert ((code
- start
) < size
);
66 mono_arch_flush_icache (start
, code
- start
);
67 mono_profiler_code_buffer_new (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE
, m
);
69 mono_tramp_info_register (mono_tramp_info_create (NULL
, start
, code
- start
, NULL
, unwind_ops
), domain
);
75 * mono_arch_get_static_rgctx_trampoline:
77 * Create a trampoline which sets RGCTX_REG to MRGCTX, then jumps to ADDR.
80 mono_arch_get_static_rgctx_trampoline (MonoMethod
*m
, MonoMethodRuntimeGenericContext
*mrgctx
, gpointer addr
)
86 MonoDomain
*domain
= mono_domain_get ();
88 #ifdef MONO_ARCH_NOMAP32BIT
91 /* AOTed code could still have a non-32 bit address */
92 if ((((guint64
)addr
) >> 32) == 0)
98 start
= code
= (guint8
*)mono_domain_code_reserve (domain
, buf_len
);
100 unwind_ops
= mono_arch_get_cie_program ();
102 amd64_mov_reg_imm (code
, MONO_ARCH_RGCTX_REG
, mrgctx
);
103 amd64_jump_code (code
, addr
);
104 g_assert ((code
- start
) < buf_len
);
106 mono_arch_flush_icache (start
, code
- start
);
107 mono_profiler_code_buffer_new (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE
, NULL
);
109 mono_tramp_info_register (mono_tramp_info_create (NULL
, start
, code
- start
, NULL
, unwind_ops
), domain
);
115 // Workaround lack of Valgrind support for 64-bit Windows
116 #define VALGRIND_DISCARD_TRANSLATIONS(...)
120 * mono_arch_patch_callsite:
122 * Patch the callsite whose address is given by ORIG_CODE so it calls ADDR. ORIG_CODE
123 * points to the pc right after the call.
126 mono_arch_patch_callsite (guint8
*method_start
, guint8
*orig_code
, guint8
*addr
)
130 gboolean can_write
= mono_breakpoint_clean_code (method_start
, orig_code
, 14, buf
, sizeof (buf
));
134 /* mov 64-bit imm into r11 (followed by call reg?) or direct call*/
135 if (((code
[-13] == 0x49) && (code
[-12] == 0xbb)) || (code
[-5] == 0xe8)) {
136 if (code
[-5] != 0xe8) {
138 InterlockedExchangePointer ((gpointer
*)(orig_code
- 11), addr
);
139 VALGRIND_DISCARD_TRANSLATIONS (orig_code
- 11, sizeof (gpointer
));
142 gboolean disp_32bit
= ((((gint64
)addr
- (gint64
)orig_code
)) < (1 << 30)) && ((((gint64
)addr
- (gint64
)orig_code
)) > -(1 << 30));
144 if ((((guint64
)(addr
)) >> 32) != 0 && !disp_32bit
) {
146 * This might happen with LLVM or when calling AOTed code. Create a thunk.
148 guint8
*thunk_start
, *thunk_code
;
150 thunk_start
= thunk_code
= (guint8
*)mono_domain_code_reserve (mono_domain_get (), 32);
151 amd64_jump_membase (thunk_code
, AMD64_RIP
, 0);
152 *(guint64
*)thunk_code
= (guint64
)addr
;
154 g_assert ((((guint64
)(addr
)) >> 32) == 0);
155 mono_arch_flush_icache (thunk_start
, thunk_code
- thunk_start
);
156 mono_profiler_code_buffer_new (thunk_start
, thunk_code
- thunk_start
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
);
159 InterlockedExchange ((gint32
*)(orig_code
- 4), ((gint64
)addr
- (gint64
)orig_code
));
160 VALGRIND_DISCARD_TRANSLATIONS (orig_code
- 5, 4);
164 else if ((code
[-7] == 0x41) && (code
[-6] == 0xff) && (code
[-5] == 0x15)) {
165 /* call *<OFFSET>(%rip) */
166 gpointer
*got_entry
= (gpointer
*)((guint8
*)orig_code
+ (*(guint32
*)(orig_code
- 4)));
168 InterlockedExchangePointer (got_entry
, addr
);
169 VALGRIND_DISCARD_TRANSLATIONS (orig_code
- 5, sizeof (gpointer
));
175 mono_arch_create_llvm_native_thunk (MonoDomain
*domain
, guint8
*addr
)
178 * The caller is LLVM code and the call displacement might exceed 32 bits. We can't determine the caller address, so
179 * we add a thunk every time.
180 * Since the caller is also allocated using the domain code manager, hopefully the displacement will fit into 32 bits.
181 * FIXME: Avoid this if possible if !MONO_ARCH_NOMAP32BIT and ADDR is 32 bits.
183 guint8
*thunk_start
, *thunk_code
;
185 thunk_start
= thunk_code
= (guint8
*)mono_domain_code_reserve (mono_domain_get (), 32);
186 amd64_jump_membase (thunk_code
, AMD64_RIP
, 0);
187 *(guint64
*)thunk_code
= (guint64
)addr
;
189 mono_arch_flush_icache (thunk_start
, thunk_code
- thunk_start
);
190 mono_profiler_code_buffer_new (thunk_start
, thunk_code
- thunk_start
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
);
195 mono_arch_patch_plt_entry (guint8
*code
, gpointer
*got
, mgreg_t
*regs
, guint8
*addr
)
198 gpointer
*plt_jump_table_entry
;
200 /* A PLT entry: jmp *<DISP>(%rip) */
201 g_assert (code
[0] == 0xff);
202 g_assert (code
[1] == 0x25);
204 disp
= *(gint32
*)(code
+ 2);
206 plt_jump_table_entry
= (gpointer
*)(code
+ 6 + disp
);
208 InterlockedExchangePointer (plt_jump_table_entry
, addr
);
212 stack_unaligned (MonoTrampolineType tramp_type
)
214 printf ("%d\n", tramp_type
);
215 g_assert_not_reached ();
219 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type
, MonoTrampInfo
**info
, gboolean aot
)
222 guint8
*buf
, *code
, *tramp
, *br
[2], *r11_save_code
, *after_r11_save_code
, *br_ex_check
;
223 int i
, lmf_offset
, offset
, res_offset
, arg_offset
, rax_offset
, ex_offset
, tramp_offset
, ctx_offset
, saved_regs_offset
;
224 int r11_save_offset
, saved_fpregs_offset
, rbp_offset
, framesize
, orig_rsp_to_rbp_offset
, cfa_offset
;
226 GSList
*unwind_ops
= NULL
;
227 MonoJumpInfo
*ji
= NULL
;
228 const guint kMaxCodeSize
= 630;
230 if (tramp_type
== MONO_TRAMPOLINE_JUMP
|| tramp_type
== MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD
)
235 code
= buf
= (guint8
*)mono_global_codeman_reserve (kMaxCodeSize
);
237 /* Compute stack frame size and offsets */
239 rbp_offset
= -offset
;
241 offset
+= sizeof(mgreg_t
);
242 rax_offset
= -offset
;
244 offset
+= sizeof(mgreg_t
);
247 offset
+= sizeof(mgreg_t
);
248 r11_save_offset
= -offset
;
250 offset
+= sizeof(mgreg_t
);
251 tramp_offset
= -offset
;
253 offset
+= sizeof(gpointer
);
254 arg_offset
= -offset
;
256 offset
+= sizeof(mgreg_t
);
257 res_offset
= -offset
;
259 offset
+= sizeof (MonoContext
);
260 ctx_offset
= -offset
;
261 saved_regs_offset
= ctx_offset
+ MONO_STRUCT_OFFSET (MonoContext
, gregs
);
262 saved_fpregs_offset
= ctx_offset
+ MONO_STRUCT_OFFSET (MonoContext
, fregs
);
264 offset
+= sizeof (MonoLMFTramp
);
265 lmf_offset
= -offset
;
268 /* Reserve space where the callee can save the argument registers */
269 offset
+= 4 * sizeof (mgreg_t
);
272 framesize
= ALIGN_TO (offset
, MONO_ARCH_FRAME_ALIGNMENT
);
274 // CFA = sp + 16 (the trampoline address is on the stack)
276 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, AMD64_RSP
, 16);
277 // IP saved at CFA - 8
278 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, AMD64_RIP
, -8);
280 orig_rsp_to_rbp_offset
= 0;
281 r11_save_code
= code
;
282 /* Reserve space for the mov_membase_reg to save R11 */
284 after_r11_save_code
= code
;
286 /* Pop the return address off the stack */
287 amd64_pop_reg (code
, AMD64_R11
);
288 orig_rsp_to_rbp_offset
+= sizeof(mgreg_t
);
290 cfa_offset
-= sizeof(mgreg_t
);
291 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
294 * Allocate a new stack frame
296 amd64_push_reg (code
, AMD64_RBP
);
297 cfa_offset
+= sizeof(mgreg_t
);
298 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
299 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, AMD64_RBP
, - cfa_offset
);
301 orig_rsp_to_rbp_offset
-= sizeof(mgreg_t
);
302 amd64_mov_reg_reg (code
, AMD64_RBP
, AMD64_RSP
, sizeof(mgreg_t
));
303 mono_add_unwind_op_def_cfa_reg (unwind_ops
, code
, buf
, AMD64_RBP
);
304 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, framesize
);
306 /* Compute the trampoline address from the return address */
308 /* 7 = length of call *<offset>(rip) */
309 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_R11
, 7);
311 /* 5 = length of amd64_call_membase () */
312 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_R11
, 5);
314 amd64_mov_membase_reg (code
, AMD64_RBP
, tramp_offset
, AMD64_R11
, sizeof(gpointer
));
316 /* Save all registers */
317 for (i
= 0; i
< AMD64_NREG
; ++i
) {
318 if (i
== AMD64_RBP
) {
319 /* RAX is already saved */
320 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RBP
, rbp_offset
, sizeof(mgreg_t
));
321 amd64_mov_membase_reg (code
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof(mgreg_t
)), AMD64_RAX
, sizeof(mgreg_t
));
322 } else if (i
== AMD64_RIP
) {
324 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, 8, sizeof(gpointer
));
326 amd64_mov_reg_imm (code
, AMD64_R11
, 0);
327 amd64_mov_membase_reg (code
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof(mgreg_t
)), AMD64_R11
, sizeof(mgreg_t
));
328 } else if (i
== AMD64_RSP
) {
329 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_RSP
, sizeof(mgreg_t
));
330 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_R11
, framesize
+ 16);
331 amd64_mov_membase_reg (code
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof(mgreg_t
)), AMD64_R11
, sizeof(mgreg_t
));
332 } else if (i
!= AMD64_R11
) {
333 amd64_mov_membase_reg (code
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof(mgreg_t
)), i
, sizeof(mgreg_t
));
335 /* We have to save R11 right at the start of
336 the trampoline code because it's used as a
338 /* This happens before the frame is set up, so it goes into the redzone */
339 amd64_mov_membase_reg (r11_save_code
, AMD64_RSP
, r11_save_offset
+ orig_rsp_to_rbp_offset
, i
, sizeof(mgreg_t
));
340 g_assert (r11_save_code
== after_r11_save_code
);
342 /* Copy from the save slot into the register array slot */
343 amd64_mov_reg_membase (code
, i
, AMD64_RSP
, r11_save_offset
+ orig_rsp_to_rbp_offset
, sizeof(mgreg_t
));
344 amd64_mov_membase_reg (code
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof(mgreg_t
)), i
, sizeof(mgreg_t
));
346 /* cfa = rbp + cfa_offset */
347 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, i
, - cfa_offset
+ saved_regs_offset
+ (i
* sizeof (mgreg_t
)));
349 for (i
= 0; i
< 8; ++i
)
350 amd64_movsd_membase_reg (code
, AMD64_RBP
, saved_fpregs_offset
+ (i
* sizeof(mgreg_t
)), i
);
352 /* Check that the stack is aligned */
353 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_RSP
, sizeof (mgreg_t
));
354 amd64_alu_reg_imm (code
, X86_AND
, AMD64_R11
, 15);
355 amd64_alu_reg_imm (code
, X86_CMP
, AMD64_R11
, 0);
357 amd64_branch_disp (code
, X86_CC_Z
, 0, FALSE
);
359 amd64_mov_reg_imm (code
, AMD64_R11
, 0);
360 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 0, 8);
362 amd64_mov_reg_imm (code
, MONO_AMD64_ARG_REG1
, tramp_type
);
363 amd64_mov_reg_imm (code
, AMD64_R11
, stack_unaligned
);
364 amd64_call_reg (code
, AMD64_R11
);
366 mono_amd64_patch (br
[0], code
);
367 //amd64_breakpoint (code);
369 if (tramp_type
!= MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD
) {
370 /* Obtain the trampoline argument which is encoded in the instruction stream */
372 /* Load the GOT offset */
373 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, tramp_offset
, sizeof(gpointer
));
375 * r11 points to a call *<offset>(%rip) instruction, load the
376 * pc-relative offset from the instruction itself.
378 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_R11
, 3, 4);
379 /* 7 is the length of the call, 8 is the offset to the next got slot */
380 amd64_alu_reg_imm_size (code
, X86_ADD
, AMD64_RAX
, 7 + sizeof (gpointer
), sizeof(gpointer
));
381 /* Compute the address of the GOT slot */
382 amd64_alu_reg_reg_size (code
, X86_ADD
, AMD64_R11
, AMD64_RAX
, sizeof(gpointer
));
384 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 0, sizeof(gpointer
));
386 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, tramp_offset
, sizeof(gpointer
));
387 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_R11
, 5, 1);
388 amd64_widen_reg (code
, AMD64_RAX
, AMD64_RAX
, TRUE
, FALSE
);
389 amd64_alu_reg_imm_size (code
, X86_CMP
, AMD64_RAX
, 4, 1);
391 x86_branch8 (code
, X86_CC_NE
, 6, FALSE
);
392 /* 32 bit immediate */
393 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 6, 4);
395 x86_jump8 (code
, 10);
396 /* 64 bit immediate */
397 mono_amd64_patch (br
[0], code
);
398 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 6, 8);
399 mono_amd64_patch (br
[1], code
);
401 amd64_mov_membase_reg (code
, AMD64_RBP
, arg_offset
, AMD64_R11
, sizeof(gpointer
));
403 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, saved_regs_offset
+ (MONO_AMD64_ARG_REG1
* sizeof(mgreg_t
)), sizeof(mgreg_t
));
404 amd64_mov_membase_reg (code
, AMD64_RBP
, arg_offset
, AMD64_R11
, sizeof(gpointer
));
411 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, 8, sizeof(gpointer
));
413 amd64_mov_reg_imm (code
, AMD64_R11
, 0);
414 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, rip
), AMD64_R11
, sizeof(mgreg_t
));
416 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_RSP
, sizeof(mgreg_t
));
417 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_R11
, framesize
+ 16);
418 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, rsp
), AMD64_R11
, sizeof(mgreg_t
));
419 /* Save pointer to context */
420 amd64_lea_membase (code
, AMD64_R11
, AMD64_RBP
, ctx_offset
);
421 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMFTramp
, ctx
), AMD64_R11
, sizeof(mgreg_t
));
424 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "mono_get_lmf_addr");
426 amd64_mov_reg_imm (code
, AMD64_R11
, mono_get_lmf_addr
);
428 amd64_call_reg (code
, AMD64_R11
);
431 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMFTramp
, lmf_addr
), AMD64_RAX
, sizeof(gpointer
));
432 /* Save previous_lmf */
433 /* Set the lowest bit to signal that this LMF has the ip field set */
434 /* Set the third lowest bit to signal that this is a MonoLMFTramp structure */
435 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RAX
, 0, sizeof(gpointer
));
436 amd64_alu_reg_imm_size (code
, X86_ADD
, AMD64_R11
, 0x5, sizeof(gpointer
));
437 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
), AMD64_R11
, sizeof(gpointer
));
439 amd64_lea_membase (code
, AMD64_R11
, AMD64_RBP
, lmf_offset
);
440 amd64_mov_membase_reg (code
, AMD64_RAX
, 0, AMD64_R11
, sizeof(gpointer
));
444 /* Arg1 is the pointer to the saved registers */
445 amd64_lea_membase (code
, AMD64_ARG_REG1
, AMD64_RBP
, saved_regs_offset
);
447 /* Arg2 is the address of the calling code */
449 amd64_mov_reg_membase (code
, AMD64_ARG_REG2
, AMD64_RBP
, 8, sizeof(gpointer
));
451 amd64_mov_reg_imm (code
, AMD64_ARG_REG2
, 0);
453 /* Arg3 is the method/vtable ptr */
454 amd64_mov_reg_membase (code
, AMD64_ARG_REG3
, AMD64_RBP
, arg_offset
, sizeof(gpointer
));
456 /* Arg4 is the trampoline address */
457 amd64_mov_reg_membase (code
, AMD64_ARG_REG4
, AMD64_RBP
, tramp_offset
, sizeof(gpointer
));
460 char *icall_name
= g_strdup_printf ("trampoline_func_%d", tramp_type
);
461 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, icall_name
);
463 tramp
= (guint8
*)mono_get_trampoline_func (tramp_type
);
464 amd64_mov_reg_imm (code
, AMD64_R11
, tramp
);
466 amd64_call_reg (code
, AMD64_R11
);
467 amd64_mov_membase_reg (code
, AMD64_RBP
, res_offset
, AMD64_RAX
, sizeof(mgreg_t
));
470 amd64_mov_reg_membase (code
, AMD64_RCX
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
), sizeof(gpointer
));
471 amd64_alu_reg_imm_size (code
, X86_SUB
, AMD64_RCX
, 0x5, sizeof(gpointer
));
472 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMFTramp
, lmf_addr
), sizeof(gpointer
));
473 amd64_mov_membase_reg (code
, AMD64_R11
, 0, AMD64_RCX
, sizeof(gpointer
));
476 * Save rax to the stack, after the leave instruction, this will become part of
479 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RBP
, res_offset
, sizeof(mgreg_t
));
480 amd64_mov_membase_reg (code
, AMD64_RBP
, rax_offset
, AMD64_RAX
, sizeof(mgreg_t
));
482 /* Check for thread interruption */
483 /* This is not perf critical code so no need to check the interrupt flag */
485 * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
488 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "mono_thread_force_interruption_checkpoint_noraise");
490 amd64_mov_reg_imm (code
, AMD64_R11
, (guint8
*)mono_thread_force_interruption_checkpoint_noraise
);
492 amd64_call_reg (code
, AMD64_R11
);
494 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
496 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
500 * We have an exception we want to throw in the caller's frame, so pop
501 * the trampoline frame and throw from the caller.
504 /* We are in the parent frame, the exception is in rax */
506 * EH is initialized after trampolines, so get the address of the variable
507 * which contains throw_exception, and load it from there.
510 /* Not really a jit icall */
511 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "throw_exception_addr");
513 amd64_mov_reg_imm (code
, AMD64_R11
, (guint8
*)mono_get_throw_exception_addr ());
515 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 0, sizeof(gpointer
));
516 amd64_mov_reg_reg (code
, AMD64_ARG_REG1
, AMD64_RAX
, sizeof(mgreg_t
));
518 * We still have the original return value on the top of the stack, so the
519 * throw trampoline will use that as the throw site.
521 amd64_jump_reg (code
, AMD64_R11
);
524 mono_amd64_patch (br_ex_check
, code
);
526 /* Restore argument registers, r10 (imt method/rgxtx)
527 and rax (needed for direct calls to C vararg functions). */
528 for (i
= 0; i
< AMD64_NREG
; ++i
)
529 if (AMD64_IS_ARGUMENT_REG (i
) || i
== AMD64_R10
|| i
== AMD64_RAX
)
530 amd64_mov_reg_membase (code
, i
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof(mgreg_t
)), sizeof(mgreg_t
));
531 for (i
= 0; i
< 8; ++i
)
532 amd64_movsd_reg_membase (code
, i
, AMD64_RBP
, saved_fpregs_offset
+ (i
* sizeof(mgreg_t
)));
536 cfa_offset
-= sizeof (mgreg_t
);
537 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, AMD64_RSP
, cfa_offset
);
539 if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type
)) {
541 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RSP
, rax_offset
- sizeof(mgreg_t
), sizeof(mgreg_t
));
544 /* call the compiled method using the saved rax */
545 amd64_jump_membase (code
, AMD64_RSP
, rax_offset
- sizeof(mgreg_t
));
548 g_assert ((code
- buf
) <= kMaxCodeSize
);
550 mono_arch_flush_icache (buf
, code
- buf
);
551 mono_profiler_code_buffer_new (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
);
553 tramp_name
= mono_get_generic_trampoline_name (tramp_type
);
554 *info
= mono_tramp_info_create (tramp_name
, buf
, code
- buf
, ji
, unwind_ops
);
561 mono_arch_create_specific_trampoline (gpointer arg1
, MonoTrampolineType tramp_type
, MonoDomain
*domain
, guint32
*code_len
)
563 guint8
*code
, *buf
, *tramp
;
565 gboolean far_addr
= FALSE
;
567 tramp
= mono_get_trampoline_code (tramp_type
);
569 if ((((guint64
)arg1
) >> 32) == 0)
574 code
= buf
= (guint8
*)mono_domain_code_reserve_align (domain
, size
, 1);
576 if (((gint64
)tramp
- (gint64
)code
) >> 31 != 0 && ((gint64
)tramp
- (gint64
)code
) >> 31 != -1) {
577 #ifndef MONO_ARCH_NOMAP32BIT
578 g_assert_not_reached ();
582 code
= buf
= (guint8
*)mono_domain_code_reserve_align (domain
, size
, 1);
586 amd64_mov_reg_imm (code
, AMD64_R11
, tramp
);
587 amd64_call_reg (code
, AMD64_R11
);
589 amd64_call_code (code
, tramp
);
591 /* The trampoline code will obtain the argument from the instruction stream */
592 if ((((guint64
)arg1
) >> 32) == 0) {
594 *(guint32
*)(code
+ 1) = (gint64
)arg1
;
598 *(guint64
*)(code
+ 1) = (gint64
)arg1
;
602 g_assert ((code
- buf
) <= size
);
607 mono_arch_flush_icache (buf
, size
);
608 mono_profiler_code_buffer_new (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE
, mono_get_generic_trampoline_simple_name (tramp_type
));
614 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot
, MonoTrampInfo
**info
, gboolean aot
)
618 guint8
**rgctx_null_jumps
;
623 MonoJumpInfo
*ji
= NULL
;
626 mrgctx
= MONO_RGCTX_SLOT_IS_MRGCTX (slot
);
627 index
= MONO_RGCTX_SLOT_INDEX (slot
);
629 index
+= MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT
/ sizeof (gpointer
);
630 for (depth
= 0; ; ++depth
) {
631 int size
= mono_class_rgctx_get_array_size (depth
, mrgctx
);
633 if (index
< size
- 1)
638 tramp_size
= 64 + 8 * depth
;
640 code
= buf
= (guint8
*)mono_global_codeman_reserve (tramp_size
);
642 unwind_ops
= mono_arch_get_cie_program ();
644 rgctx_null_jumps
= (guint8
**)g_malloc (sizeof (guint8
*) * (depth
+ 2));
648 amd64_mov_reg_reg (code
, AMD64_RAX
, AMD64_ARG_REG1
, 8);
650 /* load rgctx ptr from vtable */
651 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_ARG_REG1
, MONO_STRUCT_OFFSET (MonoVTable
, runtime_generic_context
), sizeof(gpointer
));
652 /* is the rgctx ptr null? */
653 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
654 /* if yes, jump to actual trampoline */
655 rgctx_null_jumps
[0] = code
;
656 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
659 for (i
= 0; i
< depth
; ++i
) {
660 /* load ptr to next array */
661 if (mrgctx
&& i
== 0)
662 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RAX
, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT
, sizeof(gpointer
));
664 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RAX
, 0, sizeof(gpointer
));
665 /* is the ptr null? */
666 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
667 /* if yes, jump to actual trampoline */
668 rgctx_null_jumps
[i
+ 1] = code
;
669 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
673 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RAX
, sizeof (gpointer
) * (index
+ 1), sizeof(gpointer
));
674 /* is the slot null? */
675 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
676 /* if yes, jump to actual trampoline */
677 rgctx_null_jumps
[depth
+ 1] = code
;
678 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
679 /* otherwise return */
682 for (i
= mrgctx
? 1 : 0; i
<= depth
+ 1; ++i
)
683 mono_amd64_patch (rgctx_null_jumps
[i
], code
);
685 g_free (rgctx_null_jumps
);
687 /* move the rgctx pointer to the VTABLE register */
688 amd64_mov_reg_reg (code
, MONO_ARCH_VTABLE_REG
, AMD64_ARG_REG1
, sizeof(gpointer
));
691 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot
));
692 amd64_jump_reg (code
, AMD64_R11
);
694 tramp
= (guint8
*)mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot
), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH
, mono_get_root_domain (), NULL
);
696 /* jump to the actual trampoline */
697 amd64_jump_code (code
, tramp
);
700 mono_arch_flush_icache (buf
, code
- buf
);
701 mono_profiler_code_buffer_new (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE
, NULL
);
703 g_assert (code
- buf
<= tramp_size
);
705 char *name
= mono_get_rgctx_fetch_trampoline_name (slot
);
706 *info
= mono_tramp_info_create (name
, buf
, code
- buf
, ji
, unwind_ops
);
713 mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo
**info
, gboolean aot
)
717 MonoJumpInfo
*ji
= NULL
;
723 code
= buf
= (guint8
*)mono_global_codeman_reserve (tramp_size
);
725 unwind_ops
= mono_arch_get_cie_program ();
727 // FIXME: Currently, we always go to the slow path.
728 /* This receives a <slot, trampoline> in the rgctx arg reg. */
729 /* Load trampoline addr */
730 amd64_mov_reg_membase (code
, AMD64_R11
, MONO_ARCH_RGCTX_REG
, 8, 8);
731 /* move the rgctx pointer to the VTABLE register */
732 amd64_mov_reg_reg (code
, MONO_ARCH_VTABLE_REG
, AMD64_ARG_REG1
, sizeof(gpointer
));
733 /* Jump to the trampoline */
734 amd64_jump_reg (code
, AMD64_R11
);
736 mono_arch_flush_icache (buf
, code
- buf
);
737 mono_profiler_code_buffer_new (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE
, NULL
);
739 g_assert (code
- buf
<= tramp_size
);
742 *info
= mono_tramp_info_create ("rgctx_fetch_trampoline_general", buf
, code
- buf
, ji
, unwind_ops
);
748 mono_arch_invalidate_method (MonoJitInfo
*ji
, void *func
, gpointer func_arg
)
750 /* FIXME: This is not thread safe */
751 guint8
*code
= (guint8
*)ji
->code_start
;
753 amd64_mov_reg_imm (code
, AMD64_ARG_REG1
, func_arg
);
754 amd64_mov_reg_imm (code
, AMD64_R11
, func
);
756 x86_push_imm (code
, (guint64
)func_arg
);
757 amd64_call_reg (code
, AMD64_R11
);
762 handler_block_trampoline_helper (gpointer
*ptr
)
764 MonoJitTlsData
*jit_tls
= (MonoJitTlsData
*)mono_native_tls_get_value (mono_jit_tls_id
);
765 *ptr
= jit_tls
->handler_block_return_address
;
769 mono_arch_create_handler_block_trampoline (MonoTrampInfo
**info
, gboolean aot
)
771 guint8
*tramp
= mono_get_trampoline_code (MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD
);
774 MonoJumpInfo
*ji
= NULL
;
779 code
= buf
= (guint8
*)mono_global_codeman_reserve (tramp_size
);
781 unwind_ops
= mono_arch_get_cie_program ();
784 This trampoline restore the call chain of the handler block then jumps into the code that deals with it.
786 if (mono_get_jit_tls_offset () != -1) {
787 code
= mono_amd64_emit_tls_get (code
, MONO_AMD64_ARG_REG1
, mono_get_jit_tls_offset ());
788 amd64_mov_reg_membase (code
, MONO_AMD64_ARG_REG1
, MONO_AMD64_ARG_REG1
, MONO_STRUCT_OFFSET (MonoJitTlsData
, handler_block_return_address
), 8);
789 /* Simulate a call */
790 amd64_push_reg (code
, AMD64_RAX
);
791 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, 16);
792 amd64_jump_code (code
, tramp
);
794 /*Slow path uses a c helper*/
795 amd64_mov_reg_reg (code
, MONO_AMD64_ARG_REG1
, AMD64_RSP
, 8);
796 amd64_mov_reg_imm (code
, AMD64_RAX
, tramp
);
797 amd64_push_reg (code
, AMD64_RAX
);
798 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, 16);
799 amd64_push_reg (code
, AMD64_RAX
);
800 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, 24);
801 amd64_jump_code (code
, handler_block_trampoline_helper
);
804 mono_arch_flush_icache (buf
, code
- buf
);
805 mono_profiler_code_buffer_new (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
);
806 g_assert (code
- buf
<= tramp_size
);
808 *info
= mono_tramp_info_create ("handler_block_trampoline", buf
, code
- buf
, ji
, unwind_ops
);
814 * mono_arch_get_call_target:
816 * Return the address called by the code before CODE if exists.
819 mono_arch_get_call_target (guint8
*code
)
821 if (code
[-5] == 0xe8) {
822 gint32 disp
= *(gint32
*)(code
- 4);
823 guint8
*target
= code
+ disp
;
832 * mono_arch_get_plt_info_offset:
834 * Return the PLT info offset belonging to the plt entry PLT_ENTRY.
837 mono_arch_get_plt_info_offset (guint8
*plt_entry
, mgreg_t
*regs
, guint8
*code
)
839 return *(guint32
*)(plt_entry
+ 6);
843 * mono_arch_create_sdb_trampoline:
845 * Return a trampoline which captures the current context, passes it to
846 * debugger_agent_single_step_from_context ()/debugger_agent_breakpoint_from_context (),
847 * then restores the (potentially changed) context.
850 mono_arch_create_sdb_trampoline (gboolean single_step
, MonoTrampInfo
**info
, gboolean aot
)
852 int tramp_size
= 256;
853 int i
, framesize
, ctx_offset
, cfa_offset
, gregs_offset
;
855 GSList
*unwind_ops
= NULL
;
856 MonoJumpInfo
*ji
= NULL
;
858 code
= buf
= (guint8
*)mono_global_codeman_reserve (tramp_size
);
862 /* Reserve space where the callee can save the argument registers */
863 framesize
+= 4 * sizeof (mgreg_t
);
866 ctx_offset
= framesize
;
867 framesize
+= sizeof (MonoContext
);
869 framesize
= ALIGN_TO (framesize
, MONO_ARCH_FRAME_ALIGNMENT
);
873 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, AMD64_RSP
, 8);
874 // IP saved at CFA - 8
875 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, AMD64_RIP
, -cfa_offset
);
877 amd64_push_reg (code
, AMD64_RBP
);
878 cfa_offset
+= sizeof(mgreg_t
);
879 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
880 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, AMD64_RBP
, - cfa_offset
);
882 amd64_mov_reg_reg (code
, AMD64_RBP
, AMD64_RSP
, sizeof(mgreg_t
));
883 mono_add_unwind_op_def_cfa_reg (unwind_ops
, code
, buf
, AMD64_RBP
);
884 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, framesize
);
886 gregs_offset
= ctx_offset
+ MONO_STRUCT_OFFSET (MonoContext
, gregs
);
888 /* Initialize a MonoContext structure on the stack */
889 for (i
= 0; i
< AMD64_NREG
; ++i
) {
890 if (i
!= AMD64_RIP
&& i
!= AMD64_RSP
&& i
!= AMD64_RBP
)
891 amd64_mov_membase_reg (code
, AMD64_RSP
, gregs_offset
+ (i
* sizeof (mgreg_t
)), i
, sizeof (mgreg_t
));
893 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, 0, sizeof (mgreg_t
));
894 amd64_mov_membase_reg (code
, AMD64_RSP
, gregs_offset
+ (AMD64_RBP
* sizeof (mgreg_t
)), AMD64_R11
, sizeof (mgreg_t
));
895 amd64_lea_membase (code
, AMD64_R11
, AMD64_RBP
, 2 * sizeof (mgreg_t
));
896 amd64_mov_membase_reg (code
, AMD64_RSP
, gregs_offset
+ (AMD64_RSP
* sizeof (mgreg_t
)), AMD64_R11
, sizeof (mgreg_t
));
897 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, sizeof (mgreg_t
), sizeof (mgreg_t
));
898 amd64_mov_membase_reg (code
, AMD64_RSP
, gregs_offset
+ (AMD64_RIP
* sizeof (mgreg_t
)), AMD64_R11
, sizeof (mgreg_t
));
900 /* Call the single step/breakpoint function in sdb */
901 amd64_lea_membase (code
, AMD64_ARG_REG1
, AMD64_RSP
, ctx_offset
);
905 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "debugger_agent_single_step_from_context");
907 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "debugger_agent_breakpoint_from_context");
910 amd64_mov_reg_imm (code
, AMD64_R11
, debugger_agent_single_step_from_context
);
912 amd64_mov_reg_imm (code
, AMD64_R11
, debugger_agent_breakpoint_from_context
);
914 amd64_call_reg (code
, AMD64_R11
);
916 /* Restore registers from ctx */
917 for (i
= 0; i
< AMD64_NREG
; ++i
) {
918 if (i
!= AMD64_RIP
&& i
!= AMD64_RSP
&& i
!= AMD64_RBP
)
919 amd64_mov_reg_membase (code
, i
, AMD64_RSP
, gregs_offset
+ (i
* sizeof (mgreg_t
)), sizeof (mgreg_t
));
921 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RSP
, gregs_offset
+ (AMD64_RBP
* sizeof (mgreg_t
)), sizeof (mgreg_t
));
922 amd64_mov_membase_reg (code
, AMD64_RBP
, 0, AMD64_R11
, sizeof (mgreg_t
));
923 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RSP
, gregs_offset
+ (AMD64_RIP
* sizeof (mgreg_t
)), sizeof (mgreg_t
));
924 amd64_mov_membase_reg (code
, AMD64_RBP
, sizeof (mgreg_t
), AMD64_R11
, sizeof (mgreg_t
));
927 cfa_offset
-= sizeof (mgreg_t
);
928 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, AMD64_RSP
, cfa_offset
);
931 mono_arch_flush_icache (code
, code
- buf
);
932 mono_profiler_code_buffer_new (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
);
933 g_assert (code
- buf
<= tramp_size
);
935 const char *tramp_name
= single_step
? "sdb_single_step_trampoline" : "sdb_breakpoint_trampoline";
936 *info
= mono_tramp_info_create (tramp_name
, buf
, code
- buf
, ji
, unwind_ops
);