3 * JIT trampoline code for amd64
6 * Dietmar Maurer (dietmar@ximian.com)
7 * Zoltan Varga (vargaz@gmail.com)
8 * Johan Lorensson (lateralusx.github@gmail.com)
10 * (C) 2001 Ximian, Inc.
11 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
12 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
13 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
19 #include <mono/metadata/abi-details.h>
20 #include <mono/metadata/appdomain.h>
21 #include <mono/metadata/marshal.h>
22 #include <mono/metadata/tabledefs.h>
23 #include <mono/metadata/profiler-private.h>
24 #include <mono/metadata/gc-internals.h>
25 #include <mono/arch/amd64/amd64-codegen.h>
27 #include <mono/utils/memcheck.h>
30 #include "mini-amd64.h"
31 #include "debugger-agent.h"
33 #ifdef ENABLE_INTERPRETER
34 #include "interp/interp.h"
37 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
39 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
43 * mono_arch_get_unbox_trampoline:
45 * @addr: pointer to native code for @m
47 * when value type methods are called through the vtable we need to unbox the
48 * this argument. This method returns a pointer to a trampoline which does
49 * unboxing before calling the method
52 mono_arch_get_unbox_trampoline (MonoMethod
*m
, gpointer addr
)
56 int this_reg
, size
= 20;
58 MonoDomain
*domain
= mono_domain_get ();
60 this_reg
= mono_arch_get_this_arg_reg (NULL
);
62 start
= code
= (guint8
*)mono_domain_code_reserve (domain
, size
+ MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
64 unwind_ops
= mono_arch_get_cie_program ();
66 amd64_alu_reg_imm (code
, X86_ADD
, this_reg
, sizeof (MonoObject
));
67 /* FIXME: Optimize this */
68 amd64_mov_reg_imm (code
, AMD64_RAX
, addr
);
69 amd64_jump_reg (code
, AMD64_RAX
);
70 g_assert ((code
- start
) < size
);
71 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
73 mono_arch_flush_icache (start
, code
- start
);
74 mono_profiler_code_buffer_new (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE
, m
);
76 mono_tramp_info_register (mono_tramp_info_create (NULL
, start
, code
- start
, NULL
, unwind_ops
), domain
);
82 * mono_arch_get_static_rgctx_trampoline:
84 * Create a trampoline which sets RGCTX_REG to ARG, then jumps to ADDR.
87 mono_arch_get_static_rgctx_trampoline (gpointer arg
, gpointer addr
)
93 MonoDomain
*domain
= mono_domain_get ();
95 #ifdef MONO_ARCH_NOMAP32BIT
98 /* AOTed code could still have a non-32 bit address */
99 if ((((guint64
)addr
) >> 32) == 0)
105 start
= code
= (guint8
*)mono_domain_code_reserve (domain
, buf_len
+ MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
107 unwind_ops
= mono_arch_get_cie_program ();
109 amd64_mov_reg_imm (code
, MONO_ARCH_RGCTX_REG
, arg
);
110 amd64_jump_code (code
, addr
);
111 g_assert ((code
- start
) < buf_len
);
112 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
114 mono_arch_flush_icache (start
, code
- start
);
115 mono_profiler_code_buffer_new (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE
, NULL
);
117 mono_tramp_info_register (mono_tramp_info_create (NULL
, start
, code
- start
, NULL
, unwind_ops
), domain
);
121 #endif /* !DISABLE_JIT */
124 // Workaround lack of Valgrind support for 64-bit Windows
125 #define VALGRIND_DISCARD_TRANSLATIONS(...)
129 * mono_arch_patch_callsite:
131 * Patch the callsite whose address is given by ORIG_CODE so it calls ADDR. ORIG_CODE
132 * points to the pc right after the call.
135 mono_arch_patch_callsite (guint8
*method_start
, guint8
*orig_code
, guint8
*addr
)
139 gboolean can_write
= mono_breakpoint_clean_code (method_start
, orig_code
, 14, buf
, sizeof (buf
));
143 /* mov 64-bit imm into r11 (followed by call reg?) or direct call*/
144 if (((code
[-13] == 0x49) && (code
[-12] == 0xbb)) || (code
[-5] == 0xe8)) {
145 if (code
[-5] != 0xe8) {
147 InterlockedExchangePointer ((gpointer
*)(orig_code
- 11), addr
);
148 VALGRIND_DISCARD_TRANSLATIONS (orig_code
- 11, sizeof (gpointer
));
151 gboolean disp_32bit
= ((((gint64
)addr
- (gint64
)orig_code
)) < (1 << 30)) && ((((gint64
)addr
- (gint64
)orig_code
)) > -(1 << 30));
153 if ((((guint64
)(addr
)) >> 32) != 0 && !disp_32bit
) {
155 * This might happen with LLVM or when calling AOTed code. Create a thunk.
157 guint8
*thunk_start
, *thunk_code
;
159 thunk_start
= thunk_code
= (guint8
*)mono_domain_code_reserve (mono_domain_get (), 32);
160 amd64_jump_membase (thunk_code
, AMD64_RIP
, 0);
161 *(guint64
*)thunk_code
= (guint64
)addr
;
163 g_assert ((((guint64
)(addr
)) >> 32) == 0);
164 mono_arch_flush_icache (thunk_start
, thunk_code
- thunk_start
);
165 mono_profiler_code_buffer_new (thunk_start
, thunk_code
- thunk_start
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
);
168 InterlockedExchange ((gint32
*)(orig_code
- 4), ((gint64
)addr
- (gint64
)orig_code
));
169 VALGRIND_DISCARD_TRANSLATIONS (orig_code
- 5, 4);
173 else if ((code
[-7] == 0x41) && (code
[-6] == 0xff) && (code
[-5] == 0x15)) {
174 /* call *<OFFSET>(%rip) */
175 gpointer
*got_entry
= (gpointer
*)((guint8
*)orig_code
+ (*(guint32
*)(orig_code
- 4)));
177 InterlockedExchangePointer (got_entry
, addr
);
178 VALGRIND_DISCARD_TRANSLATIONS (orig_code
- 5, sizeof (gpointer
));
185 mono_arch_create_llvm_native_thunk (MonoDomain
*domain
, guint8
*addr
)
188 * The caller is LLVM code and the call displacement might exceed 32 bits. We can't determine the caller address, so
189 * we add a thunk every time.
190 * Since the caller is also allocated using the domain code manager, hopefully the displacement will fit into 32 bits.
191 * FIXME: Avoid this if possible if !MONO_ARCH_NOMAP32BIT and ADDR is 32 bits.
193 guint8
*thunk_start
, *thunk_code
;
195 thunk_start
= thunk_code
= (guint8
*)mono_domain_code_reserve (mono_domain_get (), 32);
196 amd64_jump_membase (thunk_code
, AMD64_RIP
, 0);
197 *(guint64
*)thunk_code
= (guint64
)addr
;
199 mono_arch_flush_icache (thunk_start
, thunk_code
- thunk_start
);
200 mono_profiler_code_buffer_new (thunk_start
, thunk_code
- thunk_start
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
);
203 #endif /* !DISABLE_JIT */
206 mono_arch_patch_plt_entry (guint8
*code
, gpointer
*got
, mgreg_t
*regs
, guint8
*addr
)
209 gpointer
*plt_jump_table_entry
;
211 /* A PLT entry: jmp *<DISP>(%rip) */
212 g_assert (code
[0] == 0xff);
213 g_assert (code
[1] == 0x25);
215 disp
= *(gint32
*)(code
+ 2);
217 plt_jump_table_entry
= (gpointer
*)(code
+ 6 + disp
);
219 InterlockedExchangePointer (plt_jump_table_entry
, addr
);
224 stack_unaligned (MonoTrampolineType tramp_type
)
226 printf ("%d\n", tramp_type
);
227 g_assert_not_reached ();
231 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type
, MonoTrampInfo
**info
, gboolean aot
)
234 guint8
*buf
, *code
, *tramp
, *br
[2], *r11_save_code
, *after_r11_save_code
, *br_ex_check
;
235 int i
, lmf_offset
, offset
, res_offset
, arg_offset
, rax_offset
, ex_offset
, tramp_offset
, ctx_offset
, saved_regs_offset
;
236 int r11_save_offset
, saved_fpregs_offset
, rbp_offset
, framesize
, orig_rsp_to_rbp_offset
, cfa_offset
;
238 GSList
*unwind_ops
= NULL
;
239 MonoJumpInfo
*ji
= NULL
;
240 const guint kMaxCodeSize
= 630;
242 if (tramp_type
== MONO_TRAMPOLINE_JUMP
|| tramp_type
== MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD
)
247 code
= buf
= (guint8
*)mono_global_codeman_reserve (kMaxCodeSize
+ MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE
);
249 /* Compute stack frame size and offsets */
251 rbp_offset
= -offset
;
253 offset
+= sizeof(mgreg_t
);
254 rax_offset
= -offset
;
256 offset
+= sizeof(mgreg_t
);
259 offset
+= sizeof(mgreg_t
);
260 r11_save_offset
= -offset
;
262 offset
+= sizeof(mgreg_t
);
263 tramp_offset
= -offset
;
265 offset
+= sizeof(gpointer
);
266 arg_offset
= -offset
;
268 offset
+= sizeof(mgreg_t
);
269 res_offset
= -offset
;
271 offset
+= sizeof (MonoContext
);
272 ctx_offset
= -offset
;
273 saved_regs_offset
= ctx_offset
+ MONO_STRUCT_OFFSET (MonoContext
, gregs
);
274 saved_fpregs_offset
= ctx_offset
+ MONO_STRUCT_OFFSET (MonoContext
, fregs
);
276 offset
+= sizeof (MonoLMFTramp
);
277 lmf_offset
= -offset
;
280 /* Reserve space where the callee can save the argument registers */
281 offset
+= 4 * sizeof (mgreg_t
);
284 framesize
= ALIGN_TO (offset
, MONO_ARCH_FRAME_ALIGNMENT
);
286 // CFA = sp + 16 (the trampoline address is on the stack)
288 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, AMD64_RSP
, 16);
289 // IP saved at CFA - 8
290 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, AMD64_RIP
, -8);
292 orig_rsp_to_rbp_offset
= 0;
293 r11_save_code
= code
;
294 /* Reserve space for the mov_membase_reg to save R11 */
296 after_r11_save_code
= code
;
298 /* Pop the return address off the stack */
299 amd64_pop_reg (code
, AMD64_R11
);
300 orig_rsp_to_rbp_offset
+= sizeof(mgreg_t
);
302 cfa_offset
-= sizeof(mgreg_t
);
303 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
306 * Allocate a new stack frame
308 amd64_push_reg (code
, AMD64_RBP
);
309 cfa_offset
+= sizeof(mgreg_t
);
310 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
311 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, AMD64_RBP
, - cfa_offset
);
313 orig_rsp_to_rbp_offset
-= sizeof(mgreg_t
);
314 amd64_mov_reg_reg (code
, AMD64_RBP
, AMD64_RSP
, sizeof(mgreg_t
));
315 mono_add_unwind_op_def_cfa_reg (unwind_ops
, code
, buf
, AMD64_RBP
);
316 mono_add_unwind_op_fp_alloc (unwind_ops
, code
, buf
, AMD64_RBP
, 0);
317 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, framesize
);
319 /* Compute the trampoline address from the return address */
321 /* 7 = length of call *<offset>(rip) */
322 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_R11
, 7);
324 /* 5 = length of amd64_call_membase () */
325 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_R11
, 5);
327 amd64_mov_membase_reg (code
, AMD64_RBP
, tramp_offset
, AMD64_R11
, sizeof(gpointer
));
329 /* Save all registers */
330 for (i
= 0; i
< AMD64_NREG
; ++i
) {
331 if (i
== AMD64_RBP
) {
332 /* RAX is already saved */
333 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RBP
, rbp_offset
, sizeof(mgreg_t
));
334 amd64_mov_membase_reg (code
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof(mgreg_t
)), AMD64_RAX
, sizeof(mgreg_t
));
335 } else if (i
== AMD64_RIP
) {
337 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, 8, sizeof(gpointer
));
339 amd64_mov_reg_imm (code
, AMD64_R11
, 0);
340 amd64_mov_membase_reg (code
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof(mgreg_t
)), AMD64_R11
, sizeof(mgreg_t
));
341 } else if (i
== AMD64_RSP
) {
342 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_RSP
, sizeof(mgreg_t
));
343 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_R11
, framesize
+ 16);
344 amd64_mov_membase_reg (code
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof(mgreg_t
)), AMD64_R11
, sizeof(mgreg_t
));
345 } else if (i
!= AMD64_R11
) {
346 amd64_mov_membase_reg (code
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof(mgreg_t
)), i
, sizeof(mgreg_t
));
348 /* We have to save R11 right at the start of
349 the trampoline code because it's used as a
351 /* This happens before the frame is set up, so it goes into the redzone */
352 amd64_mov_membase_reg (r11_save_code
, AMD64_RSP
, r11_save_offset
+ orig_rsp_to_rbp_offset
, i
, sizeof(mgreg_t
));
353 g_assert (r11_save_code
== after_r11_save_code
);
355 /* Copy from the save slot into the register array slot */
356 amd64_mov_reg_membase (code
, i
, AMD64_RSP
, r11_save_offset
+ orig_rsp_to_rbp_offset
, sizeof(mgreg_t
));
357 amd64_mov_membase_reg (code
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof(mgreg_t
)), i
, sizeof(mgreg_t
));
359 /* cfa = rbp + cfa_offset */
360 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, i
, - cfa_offset
+ saved_regs_offset
+ (i
* sizeof (mgreg_t
)));
362 for (i
= 0; i
< 8; ++i
)
363 amd64_movsd_membase_reg (code
, AMD64_RBP
, saved_fpregs_offset
+ (i
* sizeof(mgreg_t
)), i
);
365 /* Check that the stack is aligned */
366 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_RSP
, sizeof (mgreg_t
));
367 amd64_alu_reg_imm (code
, X86_AND
, AMD64_R11
, 15);
368 amd64_alu_reg_imm (code
, X86_CMP
, AMD64_R11
, 0);
370 amd64_branch_disp (code
, X86_CC_Z
, 0, FALSE
);
372 amd64_mov_reg_imm (code
, AMD64_R11
, 0);
373 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 0, 8);
375 amd64_mov_reg_imm (code
, MONO_AMD64_ARG_REG1
, tramp_type
);
376 amd64_mov_reg_imm (code
, AMD64_R11
, stack_unaligned
);
377 amd64_call_reg (code
, AMD64_R11
);
379 mono_amd64_patch (br
[0], code
);
380 //amd64_breakpoint (code);
382 if (tramp_type
!= MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD
) {
383 /* Obtain the trampoline argument which is encoded in the instruction stream */
385 /* Load the GOT offset */
386 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, tramp_offset
, sizeof(gpointer
));
388 * r11 points to a call *<offset>(%rip) instruction, load the
389 * pc-relative offset from the instruction itself.
391 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_R11
, 3, 4);
392 /* 7 is the length of the call, 8 is the offset to the next got slot */
393 amd64_alu_reg_imm_size (code
, X86_ADD
, AMD64_RAX
, 7 + sizeof (gpointer
), sizeof(gpointer
));
394 /* Compute the address of the GOT slot */
395 amd64_alu_reg_reg_size (code
, X86_ADD
, AMD64_R11
, AMD64_RAX
, sizeof(gpointer
));
397 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 0, sizeof(gpointer
));
399 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, tramp_offset
, sizeof(gpointer
));
400 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_R11
, 5, 1);
401 amd64_widen_reg (code
, AMD64_RAX
, AMD64_RAX
, TRUE
, FALSE
);
402 amd64_alu_reg_imm_size (code
, X86_CMP
, AMD64_RAX
, 4, 1);
404 x86_branch8 (code
, X86_CC_NE
, 6, FALSE
);
405 /* 32 bit immediate */
406 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 6, 4);
408 x86_jump8 (code
, 10);
409 /* 64 bit immediate */
410 mono_amd64_patch (br
[0], code
);
411 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 6, 8);
412 mono_amd64_patch (br
[1], code
);
414 amd64_mov_membase_reg (code
, AMD64_RBP
, arg_offset
, AMD64_R11
, sizeof(gpointer
));
416 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, saved_regs_offset
+ (MONO_AMD64_ARG_REG1
* sizeof(mgreg_t
)), sizeof(mgreg_t
));
417 amd64_mov_membase_reg (code
, AMD64_RBP
, arg_offset
, AMD64_R11
, sizeof(gpointer
));
424 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, 8, sizeof(gpointer
));
426 amd64_mov_reg_imm (code
, AMD64_R11
, 0);
427 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, rip
), AMD64_R11
, sizeof(mgreg_t
));
429 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_RSP
, sizeof(mgreg_t
));
430 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_R11
, framesize
+ 16);
431 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, rsp
), AMD64_R11
, sizeof(mgreg_t
));
432 /* Save pointer to context */
433 amd64_lea_membase (code
, AMD64_R11
, AMD64_RBP
, ctx_offset
);
434 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMFTramp
, ctx
), AMD64_R11
, sizeof(mgreg_t
));
437 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "mono_get_lmf_addr");
439 amd64_mov_reg_imm (code
, AMD64_R11
, mono_get_lmf_addr
);
441 amd64_call_reg (code
, AMD64_R11
);
444 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMFTramp
, lmf_addr
), AMD64_RAX
, sizeof(gpointer
));
445 /* Save previous_lmf */
446 /* Set the lowest bit to signal that this LMF has the ip field set */
447 /* Set the third lowest bit to signal that this is a MonoLMFTramp structure */
448 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RAX
, 0, sizeof(gpointer
));
449 amd64_alu_reg_imm_size (code
, X86_ADD
, AMD64_R11
, 0x5, sizeof(gpointer
));
450 amd64_mov_membase_reg (code
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
), AMD64_R11
, sizeof(gpointer
));
452 amd64_lea_membase (code
, AMD64_R11
, AMD64_RBP
, lmf_offset
);
453 amd64_mov_membase_reg (code
, AMD64_RAX
, 0, AMD64_R11
, sizeof(gpointer
));
457 /* Arg1 is the pointer to the saved registers */
458 amd64_lea_membase (code
, AMD64_ARG_REG1
, AMD64_RBP
, saved_regs_offset
);
460 /* Arg2 is the address of the calling code */
462 amd64_mov_reg_membase (code
, AMD64_ARG_REG2
, AMD64_RBP
, 8, sizeof(gpointer
));
464 amd64_mov_reg_imm (code
, AMD64_ARG_REG2
, 0);
466 /* Arg3 is the method/vtable ptr */
467 amd64_mov_reg_membase (code
, AMD64_ARG_REG3
, AMD64_RBP
, arg_offset
, sizeof(gpointer
));
469 /* Arg4 is the trampoline address */
470 amd64_mov_reg_membase (code
, AMD64_ARG_REG4
, AMD64_RBP
, tramp_offset
, sizeof(gpointer
));
473 char *icall_name
= g_strdup_printf ("trampoline_func_%d", tramp_type
);
474 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, icall_name
);
476 tramp
= (guint8
*)mono_get_trampoline_func (tramp_type
);
477 amd64_mov_reg_imm (code
, AMD64_R11
, tramp
);
479 amd64_call_reg (code
, AMD64_R11
);
480 amd64_mov_membase_reg (code
, AMD64_RBP
, res_offset
, AMD64_RAX
, sizeof(mgreg_t
));
483 amd64_mov_reg_membase (code
, AMD64_RCX
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
), sizeof(gpointer
));
484 amd64_alu_reg_imm_size (code
, X86_SUB
, AMD64_RCX
, 0x5, sizeof(gpointer
));
485 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, lmf_offset
+ MONO_STRUCT_OFFSET (MonoLMFTramp
, lmf_addr
), sizeof(gpointer
));
486 amd64_mov_membase_reg (code
, AMD64_R11
, 0, AMD64_RCX
, sizeof(gpointer
));
489 * Save rax to the stack, after the leave instruction, this will become part of
492 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RBP
, res_offset
, sizeof(mgreg_t
));
493 amd64_mov_membase_reg (code
, AMD64_RBP
, rax_offset
, AMD64_RAX
, sizeof(mgreg_t
));
495 /* Check for thread interruption */
496 /* This is not perf critical code so no need to check the interrupt flag */
498 * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
501 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "mono_thread_force_interruption_checkpoint_noraise");
503 amd64_mov_reg_imm (code
, AMD64_R11
, (guint8
*)mono_thread_force_interruption_checkpoint_noraise
);
505 amd64_call_reg (code
, AMD64_R11
);
507 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
509 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
513 * We have an exception we want to throw in the caller's frame, so pop
514 * the trampoline frame and throw from the caller.
517 amd64_lea_membase (code
, AMD64_RSP
, AMD64_RBP
, 0);
518 amd64_pop_reg (code
, AMD64_RBP
);
519 mono_add_unwind_op_same_value (unwind_ops
, code
, buf
, AMD64_RBP
);
523 /* We are in the parent frame, the exception is in rax */
525 * EH is initialized after trampolines, so get the address of the variable
526 * which contains throw_exception, and load it from there.
529 /* Not really a jit icall */
530 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "throw_exception_addr");
532 amd64_mov_reg_imm (code
, AMD64_R11
, (guint8
*)mono_get_throw_exception_addr ());
534 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, 0, sizeof(gpointer
));
535 amd64_mov_reg_reg (code
, AMD64_ARG_REG1
, AMD64_RAX
, sizeof(mgreg_t
));
537 * We still have the original return value on the top of the stack, so the
538 * throw trampoline will use that as the throw site.
540 amd64_jump_reg (code
, AMD64_R11
);
543 mono_amd64_patch (br_ex_check
, code
);
545 /* Restore argument registers, r10 (imt method/rgxtx)
546 and rax (needed for direct calls to C vararg functions). */
547 for (i
= 0; i
< AMD64_NREG
; ++i
)
548 if (AMD64_IS_ARGUMENT_REG (i
) || i
== AMD64_R10
|| i
== AMD64_RAX
)
549 amd64_mov_reg_membase (code
, i
, AMD64_RBP
, saved_regs_offset
+ (i
* sizeof(mgreg_t
)), sizeof(mgreg_t
));
550 for (i
= 0; i
< 8; ++i
)
551 amd64_movsd_reg_membase (code
, i
, AMD64_RBP
, saved_fpregs_offset
+ (i
* sizeof(mgreg_t
)));
555 amd64_lea_membase (code
, AMD64_RSP
, AMD64_RBP
, 0);
556 amd64_pop_reg (code
, AMD64_RBP
);
557 mono_add_unwind_op_same_value (unwind_ops
, code
, buf
, AMD64_RBP
);
561 cfa_offset
-= sizeof (mgreg_t
);
562 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, AMD64_RSP
, cfa_offset
);
564 if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type
)) {
566 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RSP
, rax_offset
- sizeof(mgreg_t
), sizeof(mgreg_t
));
569 /* call the compiled method using the saved rax */
570 amd64_jump_membase (code
, AMD64_RSP
, rax_offset
- sizeof(mgreg_t
));
573 g_assert ((code
- buf
) <= kMaxCodeSize
);
574 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE
));
576 mono_arch_flush_icache (buf
, code
- buf
);
577 mono_profiler_code_buffer_new (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
);
579 tramp_name
= mono_get_generic_trampoline_name (tramp_type
);
580 *info
= mono_tramp_info_create (tramp_name
, buf
, code
- buf
, ji
, unwind_ops
);
587 mono_arch_create_specific_trampoline (gpointer arg1
, MonoTrampolineType tramp_type
, MonoDomain
*domain
, guint32
*code_len
)
589 guint8
*code
, *buf
, *tramp
;
591 gboolean far_addr
= FALSE
;
593 tramp
= mono_get_trampoline_code (tramp_type
);
595 if ((((guint64
)arg1
) >> 32) == 0)
600 code
= buf
= (guint8
*)mono_domain_code_reserve_align (domain
, size
, 1);
602 if (((gint64
)tramp
- (gint64
)code
) >> 31 != 0 && ((gint64
)tramp
- (gint64
)code
) >> 31 != -1) {
603 #ifndef MONO_ARCH_NOMAP32BIT
604 g_assert_not_reached ();
608 code
= buf
= (guint8
*)mono_domain_code_reserve_align (domain
, size
, 1);
612 amd64_mov_reg_imm (code
, AMD64_R11
, tramp
);
613 amd64_call_reg (code
, AMD64_R11
);
615 amd64_call_code (code
, tramp
);
617 /* The trampoline code will obtain the argument from the instruction stream */
618 if ((((guint64
)arg1
) >> 32) == 0) {
620 *(guint32
*)(code
+ 1) = (gint64
)arg1
;
624 *(guint64
*)(code
+ 1) = (gint64
)arg1
;
628 g_assert ((code
- buf
) <= size
);
633 mono_arch_flush_icache (buf
, size
);
634 mono_profiler_code_buffer_new (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE
, mono_get_generic_trampoline_simple_name (tramp_type
));
640 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot
, MonoTrampInfo
**info
, gboolean aot
)
644 guint8
**rgctx_null_jumps
;
649 MonoJumpInfo
*ji
= NULL
;
652 mrgctx
= MONO_RGCTX_SLOT_IS_MRGCTX (slot
);
653 index
= MONO_RGCTX_SLOT_INDEX (slot
);
655 index
+= MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT
/ sizeof (gpointer
);
656 for (depth
= 0; ; ++depth
) {
657 int size
= mono_class_rgctx_get_array_size (depth
, mrgctx
);
659 if (index
< size
- 1)
664 tramp_size
= 64 + 8 * depth
;
666 code
= buf
= (guint8
*)mono_global_codeman_reserve (tramp_size
+ MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
668 unwind_ops
= mono_arch_get_cie_program ();
670 rgctx_null_jumps
= (guint8
**)g_malloc (sizeof (guint8
*) * (depth
+ 2));
674 amd64_mov_reg_reg (code
, AMD64_RAX
, AMD64_ARG_REG1
, 8);
676 /* load rgctx ptr from vtable */
677 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_ARG_REG1
, MONO_STRUCT_OFFSET (MonoVTable
, runtime_generic_context
), sizeof(gpointer
));
678 /* is the rgctx ptr null? */
679 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
680 /* if yes, jump to actual trampoline */
681 rgctx_null_jumps
[0] = code
;
682 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
685 for (i
= 0; i
< depth
; ++i
) {
686 /* load ptr to next array */
687 if (mrgctx
&& i
== 0)
688 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RAX
, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT
, sizeof(gpointer
));
690 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RAX
, 0, sizeof(gpointer
));
691 /* is the ptr null? */
692 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
693 /* if yes, jump to actual trampoline */
694 rgctx_null_jumps
[i
+ 1] = code
;
695 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
699 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_RAX
, sizeof (gpointer
) * (index
+ 1), sizeof(gpointer
));
700 /* is the slot null? */
701 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
702 /* if yes, jump to actual trampoline */
703 rgctx_null_jumps
[depth
+ 1] = code
;
704 amd64_branch8 (code
, X86_CC_Z
, -1, 1);
705 /* otherwise return */
708 for (i
= mrgctx
? 1 : 0; i
<= depth
+ 1; ++i
)
709 mono_amd64_patch (rgctx_null_jumps
[i
], code
);
711 g_free (rgctx_null_jumps
);
713 /* move the rgctx pointer to the VTABLE register */
714 amd64_mov_reg_reg (code
, MONO_ARCH_VTABLE_REG
, AMD64_ARG_REG1
, sizeof(gpointer
));
717 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot
));
718 amd64_jump_reg (code
, AMD64_R11
);
720 tramp
= (guint8
*)mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot
), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH
, mono_get_root_domain (), NULL
);
722 /* jump to the actual trampoline */
723 amd64_jump_code (code
, tramp
);
726 mono_arch_flush_icache (buf
, code
- buf
);
727 mono_profiler_code_buffer_new (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE
, NULL
);
729 g_assert (code
- buf
<= tramp_size
);
730 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
732 char *name
= mono_get_rgctx_fetch_trampoline_name (slot
);
733 *info
= mono_tramp_info_create (name
, buf
, code
- buf
, ji
, unwind_ops
);
740 mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo
**info
, gboolean aot
)
744 MonoJumpInfo
*ji
= NULL
;
750 code
= buf
= (guint8
*)mono_global_codeman_reserve (tramp_size
+ MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
752 unwind_ops
= mono_arch_get_cie_program ();
754 // FIXME: Currently, we always go to the slow path.
755 /* This receives a <slot, trampoline> in the rgctx arg reg. */
756 /* Load trampoline addr */
757 amd64_mov_reg_membase (code
, AMD64_R11
, MONO_ARCH_RGCTX_REG
, 8, 8);
758 /* move the rgctx pointer to the VTABLE register */
759 amd64_mov_reg_reg (code
, MONO_ARCH_VTABLE_REG
, AMD64_ARG_REG1
, sizeof(gpointer
));
760 /* Jump to the trampoline */
761 amd64_jump_reg (code
, AMD64_R11
);
763 mono_arch_flush_icache (buf
, code
- buf
);
764 mono_profiler_code_buffer_new (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE
, NULL
);
766 g_assert (code
- buf
<= tramp_size
);
767 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
770 *info
= mono_tramp_info_create ("rgctx_fetch_trampoline_general", buf
, code
- buf
, ji
, unwind_ops
);
776 mono_arch_invalidate_method (MonoJitInfo
*ji
, void *func
, gpointer func_arg
)
778 /* FIXME: This is not thread safe */
779 guint8
*code
= (guint8
*)ji
->code_start
;
781 amd64_mov_reg_imm (code
, AMD64_ARG_REG1
, func_arg
);
782 amd64_mov_reg_imm (code
, AMD64_R11
, func
);
784 x86_push_imm (code
, (guint64
)func_arg
);
785 amd64_call_reg (code
, AMD64_R11
);
787 #endif /* !DISABLE_JIT */
790 mono_amd64_handler_block_trampoline_helper (void)
792 MonoJitTlsData
*jit_tls
= (MonoJitTlsData
*)mono_tls_get_jit_tls ();
793 return jit_tls
->handler_block_return_address
;
798 mono_arch_create_handler_block_trampoline (MonoTrampInfo
**info
, gboolean aot
)
802 MonoJumpInfo
*ji
= NULL
;
805 code
= buf
= (guint8
*)mono_global_codeman_reserve (tramp_size
+ MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
807 unwind_ops
= mono_arch_get_cie_program ();
810 * This trampoline restore the call chain of the handler block then jumps into the code that deals with it.
811 * We get here from the ret emitted by CEE_ENDFINALLY.
812 * The stack is misaligned.
814 /* Align the stack before the call to mono_amd64_handler_block_trampoline_helper() */
816 /* Also make room for the "register parameter stack area" as specified by the Windows x64 ABI (4 64-bit registers) */
817 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, 8 + 4 * 8);
819 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, 8);
822 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "mono_amd64_handler_block_trampoline_helper");
823 amd64_call_reg (code
, AMD64_R11
);
825 amd64_mov_reg_imm (code
, AMD64_RAX
, mono_amd64_handler_block_trampoline_helper
);
826 amd64_call_reg (code
, AMD64_RAX
);
828 /* Undo stack alignment */
830 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_RSP
, 8 + 4 * 8);
832 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_RSP
, 8);
834 /* Save the result to the stack */
835 amd64_push_reg (code
, AMD64_RAX
);
837 /* Make room for the "register parameter stack area" as specified by the Windows x64 ABI (4 64-bit registers) */
838 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, 4 * 8);
841 char *name
= g_strdup_printf ("trampoline_func_%d", MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD
);
842 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, name
);
843 amd64_mov_reg_reg (code
, AMD64_RAX
, AMD64_R11
, 8);
845 amd64_mov_reg_imm (code
, AMD64_RAX
, mono_get_trampoline_func (MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD
));
847 /* The stack is aligned */
848 amd64_call_reg (code
, AMD64_RAX
);
850 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_RSP
, 4 * 8);
852 /* Load return address */
853 amd64_pop_reg (code
, AMD64_RAX
);
854 /* The stack is misaligned, thats what the code we branch to expects */
855 amd64_jump_reg (code
, AMD64_RAX
);
857 mono_arch_flush_icache (buf
, code
- buf
);
858 mono_profiler_code_buffer_new (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
);
859 g_assert (code
- buf
<= tramp_size
);
860 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
862 *info
= mono_tramp_info_create ("handler_block_trampoline", buf
, code
- buf
, ji
, unwind_ops
);
866 #endif /* !DISABLE_JIT */
869 * mono_arch_get_call_target:
871 * Return the address called by the code before CODE if exists.
874 mono_arch_get_call_target (guint8
*code
)
876 if (code
[-5] == 0xe8) {
877 gint32 disp
= *(gint32
*)(code
- 4);
878 guint8
*target
= code
+ disp
;
887 * mono_arch_get_plt_info_offset:
889 * Return the PLT info offset belonging to the plt entry PLT_ENTRY.
892 mono_arch_get_plt_info_offset (guint8
*plt_entry
, mgreg_t
*regs
, guint8
*code
)
894 return *(guint32
*)(plt_entry
+ 6);
899 * mono_arch_create_sdb_trampoline:
901 * Return a trampoline which captures the current context, passes it to
902 * debugger_agent_single_step_from_context ()/debugger_agent_breakpoint_from_context (),
903 * then restores the (potentially changed) context.
906 mono_arch_create_sdb_trampoline (gboolean single_step
, MonoTrampInfo
**info
, gboolean aot
)
908 int tramp_size
= 512;
909 int i
, framesize
, ctx_offset
, cfa_offset
, gregs_offset
;
911 GSList
*unwind_ops
= NULL
;
912 MonoJumpInfo
*ji
= NULL
;
914 code
= buf
= (guint8
*)mono_global_codeman_reserve (tramp_size
+ MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE
);
918 /* Reserve space where the callee can save the argument registers */
919 framesize
+= 4 * sizeof (mgreg_t
);
922 ctx_offset
= framesize
;
923 framesize
+= sizeof (MonoContext
);
925 framesize
= ALIGN_TO (framesize
, MONO_ARCH_FRAME_ALIGNMENT
);
929 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, AMD64_RSP
, 8);
930 // IP saved at CFA - 8
931 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, AMD64_RIP
, -cfa_offset
);
933 amd64_push_reg (code
, AMD64_RBP
);
934 cfa_offset
+= sizeof(mgreg_t
);
935 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
936 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, AMD64_RBP
, - cfa_offset
);
938 amd64_mov_reg_reg (code
, AMD64_RBP
, AMD64_RSP
, sizeof(mgreg_t
));
939 mono_add_unwind_op_def_cfa_reg (unwind_ops
, code
, buf
, AMD64_RBP
);
940 mono_add_unwind_op_fp_alloc (unwind_ops
, code
, buf
, AMD64_RBP
, 0);
941 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, framesize
);
943 gregs_offset
= ctx_offset
+ MONO_STRUCT_OFFSET (MonoContext
, gregs
);
945 /* Initialize a MonoContext structure on the stack */
946 for (i
= 0; i
< AMD64_NREG
; ++i
) {
947 if (i
!= AMD64_RIP
&& i
!= AMD64_RSP
&& i
!= AMD64_RBP
)
948 amd64_mov_membase_reg (code
, AMD64_RSP
, gregs_offset
+ (i
* sizeof (mgreg_t
)), i
, sizeof (mgreg_t
));
950 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, 0, sizeof (mgreg_t
));
951 amd64_mov_membase_reg (code
, AMD64_RSP
, gregs_offset
+ (AMD64_RBP
* sizeof (mgreg_t
)), AMD64_R11
, sizeof (mgreg_t
));
952 amd64_lea_membase (code
, AMD64_R11
, AMD64_RBP
, 2 * sizeof (mgreg_t
));
953 amd64_mov_membase_reg (code
, AMD64_RSP
, gregs_offset
+ (AMD64_RSP
* sizeof (mgreg_t
)), AMD64_R11
, sizeof (mgreg_t
));
954 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, sizeof (mgreg_t
), sizeof (mgreg_t
));
955 amd64_mov_membase_reg (code
, AMD64_RSP
, gregs_offset
+ (AMD64_RIP
* sizeof (mgreg_t
)), AMD64_R11
, sizeof (mgreg_t
));
957 /* Call the single step/breakpoint function in sdb */
958 amd64_lea_membase (code
, AMD64_ARG_REG1
, AMD64_RSP
, ctx_offset
);
962 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "debugger_agent_single_step_from_context");
964 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, "debugger_agent_breakpoint_from_context");
967 amd64_mov_reg_imm (code
, AMD64_R11
, debugger_agent_single_step_from_context
);
969 amd64_mov_reg_imm (code
, AMD64_R11
, debugger_agent_breakpoint_from_context
);
971 amd64_call_reg (code
, AMD64_R11
);
973 /* Restore registers from ctx */
974 for (i
= 0; i
< AMD64_NREG
; ++i
) {
975 if (i
!= AMD64_RIP
&& i
!= AMD64_RSP
&& i
!= AMD64_RBP
)
976 amd64_mov_reg_membase (code
, i
, AMD64_RSP
, gregs_offset
+ (i
* sizeof (mgreg_t
)), sizeof (mgreg_t
));
978 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RSP
, gregs_offset
+ (AMD64_RBP
* sizeof (mgreg_t
)), sizeof (mgreg_t
));
979 amd64_mov_membase_reg (code
, AMD64_RBP
, 0, AMD64_R11
, sizeof (mgreg_t
));
980 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RSP
, gregs_offset
+ (AMD64_RIP
* sizeof (mgreg_t
)), sizeof (mgreg_t
));
981 amd64_mov_membase_reg (code
, AMD64_RBP
, sizeof (mgreg_t
), AMD64_R11
, sizeof (mgreg_t
));
984 amd64_lea_membase (code
, AMD64_RSP
, AMD64_RBP
, 0);
985 amd64_pop_reg (code
, AMD64_RBP
);
986 mono_add_unwind_op_same_value (unwind_ops
, code
, buf
, AMD64_RBP
);
990 cfa_offset
-= sizeof (mgreg_t
);
991 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, AMD64_RSP
, cfa_offset
);
994 mono_arch_flush_icache (code
, code
- buf
);
995 mono_profiler_code_buffer_new (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
);
996 g_assert (code
- buf
<= tramp_size
);
997 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops
, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE
));
999 const char *tramp_name
= single_step
? "sdb_single_step_trampoline" : "sdb_breakpoint_trampoline";
1000 *info
= mono_tramp_info_create (tramp_name
, buf
, code
- buf
, ji
, unwind_ops
);
1006 * mono_arch_get_enter_icall_trampoline:
1008 * A trampoline that handles the transition from interpreter into native
1009 * world. It requiers to set up a descriptor (InterpMethodArguments), so the
1010 * trampoline can translate the arguments into the native calling convention.
1012 * See also `build_args_from_sig ()` in interp.c.
1015 mono_arch_get_enter_icall_trampoline (MonoTrampInfo
**info
)
1017 #ifdef ENABLE_INTERPRETER
1018 const int gregs_num
= INTERP_ICALL_TRAMP_IARGS
;
1019 const int fregs_num
= INTERP_ICALL_TRAMP_FARGS
;
1020 guint8
*start
= NULL
, *code
, *label_gexits
[gregs_num
], *label_fexits
[fregs_num
], *label_leave_tramp
[3], *label_is_float_ret
;
1021 MonoJumpInfo
*ji
= NULL
;
1022 GSList
*unwind_ops
= NULL
;
1023 static int farg_regs
[] = {AMD64_XMM0
, AMD64_XMM1
, AMD64_XMM2
};
1024 int buf_len
, i
, framesize
= 0, off_rbp
, off_methodargs
, off_targetaddr
;
1026 buf_len
= 512 + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0);
1027 start
= code
= (guint8
*) mono_global_codeman_reserve (buf_len
);
1029 off_rbp
= -framesize
;
1031 framesize
+= sizeof (mgreg_t
);
1032 off_methodargs
= -framesize
;
1034 framesize
+= sizeof (mgreg_t
);
1035 off_targetaddr
= -framesize
;
1037 framesize
+= (gregs_num
- PARAM_REGS
) * sizeof (mgreg_t
);
1039 amd64_push_reg (code
, AMD64_RBP
);
1040 amd64_mov_reg_reg (code
, AMD64_RBP
, AMD64_RSP
, sizeof (mgreg_t
));
1041 amd64_alu_reg_imm (code
, X86_SUB
, AMD64_RSP
, ALIGN_TO (framesize
, MONO_ARCH_FRAME_ALIGNMENT
));
1043 /* save InterpMethodArguments* onto stack */
1044 amd64_mov_membase_reg (code
, AMD64_RBP
, off_methodargs
, AMD64_ARG_REG2
, sizeof (mgreg_t
));
1046 /* save target address on stack */
1047 amd64_mov_membase_reg (code
, AMD64_RBP
, off_targetaddr
, AMD64_ARG_REG1
, sizeof (mgreg_t
));
1049 /* load pointer to InterpMethodArguments* into R11 */
1050 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_ARG_REG2
, 8);
1052 /* move flen into RAX */
1053 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_R11
, MONO_STRUCT_OFFSET (InterpMethodArguments
, flen
), sizeof (mgreg_t
));
1054 /* load pointer to fargs into R11 */
1055 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, MONO_STRUCT_OFFSET (InterpMethodArguments
, fargs
), sizeof (mgreg_t
));
1057 for (i
= 0; i
< fregs_num
; ++i
) {
1058 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
1059 label_fexits
[i
] = code
;
1060 x86_branch8 (code
, X86_CC_Z
, 0, FALSE
);
1062 amd64_sse_movsd_reg_membase (code
, farg_regs
[i
], AMD64_R11
, i
* sizeof (double));
1063 amd64_dec_reg_size (code
, AMD64_RAX
, 1);
1066 for (i
= 0; i
< fregs_num
; i
++)
1067 x86_patch (label_fexits
[i
], code
);
1069 /* load pointer to InterpMethodArguments* into R11 */
1070 amd64_mov_reg_reg (code
, AMD64_R11
, AMD64_ARG_REG2
, sizeof (mgreg_t
));
1071 /* move ilen into RAX */
1072 amd64_mov_reg_membase (code
, AMD64_RAX
, AMD64_R11
, MONO_STRUCT_OFFSET (InterpMethodArguments
, ilen
), sizeof (mgreg_t
));
1074 int stack_offset
= 0;
1075 for (i
= 0; i
< gregs_num
; i
++) {
1076 amd64_test_reg_reg (code
, AMD64_RAX
, AMD64_RAX
);
1077 label_gexits
[i
] = code
;
1078 x86_branch32 (code
, X86_CC_Z
, 0, FALSE
);
1080 /* load pointer to InterpMethodArguments* into R11 */
1081 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, off_methodargs
, sizeof (mgreg_t
));
1082 /* load pointer to iargs into R11 */
1083 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, MONO_STRUCT_OFFSET (InterpMethodArguments
, iargs
), sizeof (mgreg_t
));
1085 if (i
< PARAM_REGS
) {
1086 amd64_mov_reg_membase (code
, param_regs
[i
], AMD64_R11
, i
* sizeof (mgreg_t
), sizeof (mgreg_t
));
1088 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, i
* sizeof (mgreg_t
), sizeof (mgreg_t
));
1089 amd64_mov_membase_reg (code
, AMD64_RSP
, stack_offset
, AMD64_R11
, sizeof (mgreg_t
));
1090 stack_offset
+= sizeof (mgreg_t
);
1092 amd64_dec_reg_size (code
, AMD64_RAX
, 1);
1095 for (i
= 0; i
< gregs_num
; i
++)
1096 x86_patch (label_gexits
[i
], code
);
1098 /* load target addr */
1099 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, off_targetaddr
, sizeof (mgreg_t
));
1101 /* call into native function */
1102 amd64_call_reg (code
, AMD64_R11
);
1104 /* load InterpMethodArguments */
1105 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, off_methodargs
, sizeof (mgreg_t
));
1107 /* load is_float_ret */
1108 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, MONO_STRUCT_OFFSET (InterpMethodArguments
, is_float_ret
), sizeof (mgreg_t
));
1110 /* check if a float return value is expected */
1111 amd64_test_reg_reg (code
, AMD64_R11
, AMD64_R11
);
1113 label_is_float_ret
= code
;
1114 x86_branch8 (code
, X86_CC_NZ
, 0, FALSE
);
1117 /* load InterpMethodArguments */
1118 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, off_methodargs
, sizeof (mgreg_t
));
1120 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, MONO_STRUCT_OFFSET (InterpMethodArguments
, retval
), sizeof (mgreg_t
));
1122 amd64_test_reg_reg (code
, AMD64_R11
, AMD64_R11
);
1123 label_leave_tramp
[0] = code
;
1124 x86_branch8 (code
, X86_CC_Z
, 0, FALSE
);
1126 amd64_mov_membase_reg (code
, AMD64_R11
, 0, AMD64_RAX
, sizeof (mgreg_t
));
1128 label_leave_tramp
[1] = code
;
1129 x86_jump8 (code
, 0);
1132 x86_patch (label_is_float_ret
, code
);
1133 /* load InterpMethodArguments */
1134 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_RBP
, off_methodargs
, sizeof (mgreg_t
));
1136 amd64_mov_reg_membase (code
, AMD64_R11
, AMD64_R11
, MONO_STRUCT_OFFSET (InterpMethodArguments
, retval
), sizeof (mgreg_t
));
1138 amd64_test_reg_reg (code
, AMD64_R11
, AMD64_R11
);
1139 label_leave_tramp
[2] = code
;
1140 x86_branch8 (code
, X86_CC_Z
, 0, FALSE
);
1142 amd64_sse_movsd_membase_reg (code
, AMD64_R11
, 0, AMD64_XMM0
);
1144 for (i
= 0; i
< 3; i
++)
1145 x86_patch (label_leave_tramp
[i
], code
);
1147 amd64_alu_reg_imm (code
, X86_ADD
, AMD64_RSP
, ALIGN_TO (framesize
, MONO_ARCH_FRAME_ALIGNMENT
));
1148 amd64_pop_reg (code
, AMD64_RBP
);
1151 g_assert (code
- start
< buf_len
);
1153 mono_arch_flush_icache (start
, code
- start
);
1154 mono_profiler_code_buffer_new (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING
, NULL
);
1157 *info
= mono_tramp_info_create ("enter_icall_trampoline", start
, code
- start
, ji
, unwind_ops
);
1161 g_assert_not_reached ();
1163 #endif /* ENABLE_INTERPRETER */
1165 #endif /* !DISABLE_JIT */
1169 mono_arch_get_unbox_trampoline (MonoMethod
*m
, gpointer addr
)
1171 g_assert_not_reached ();
1176 mono_arch_get_static_rgctx_trampoline (gpointer arg
, gpointer addr
)
1178 g_assert_not_reached ();
1183 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot
, MonoTrampInfo
**info
, gboolean aot
)
1185 g_assert_not_reached ();
1190 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type
, MonoTrampInfo
**info
, gboolean aot
)
1192 g_assert_not_reached ();
1197 mono_arch_create_specific_trampoline (gpointer arg1
, MonoTrampolineType tramp_type
, MonoDomain
*domain
, guint32
*code_len
)
1199 g_assert_not_reached ();
1204 mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo
**info
, gboolean aot
)
1206 g_assert_not_reached ();
1211 mono_arch_create_handler_block_trampoline (MonoTrampInfo
**info
, gboolean aot
)
1213 g_assert_not_reached ();
1218 mono_arch_invalidate_method (MonoJitInfo
*ji
, void *func
, gpointer func_arg
)
1220 g_assert_not_reached ();
1225 mono_arch_create_sdb_trampoline (gboolean single_step
, MonoTrampInfo
**info
, gboolean aot
)
1227 g_assert_not_reached ();
1232 mono_arch_get_enter_icall_trampoline (MonoTrampInfo
**info
)
1234 g_assert_not_reached ();
1237 #endif /* DISABLE_JIT */