mono_breakpoint_clean_code always returns TRUE. (#16603)
[mono-project.git] / mono / mini / tramp-amd64.c
blob8b0f4123de31057ff7953311fe10173165379102
1 /**
2 * \file
3 * JIT trampoline code for amd64
5 * Authors:
6 * Dietmar Maurer (dietmar@ximian.com)
7 * Zoltan Varga (vargaz@gmail.com)
8 * Johan Lorensson (lateralusx.github@gmail.com)
10 * (C) 2001 Ximian, Inc.
11 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
12 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
13 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 #include <config.h>
17 #include <glib.h>
19 #include <mono/metadata/abi-details.h>
20 #include <mono/metadata/appdomain.h>
21 #include <mono/metadata/marshal.h>
22 #include <mono/metadata/tabledefs.h>
23 #include <mono/metadata/profiler-private.h>
24 #include <mono/metadata/gc-internals.h>
25 #include <mono/arch/amd64/amd64-codegen.h>
27 #include <mono/utils/memcheck.h>
29 #include "mini.h"
30 #include "mini-amd64.h"
31 #include "mini-runtime.h"
32 #include "debugger-agent.h"
34 #ifndef DISABLE_INTERPRETER
35 #include "interp/interp.h"
36 #endif
37 #include "mono/utils/mono-tls-inline.h"
39 #ifdef MONO_ARCH_CODE_EXEC_ONLY
40 #include "aot-runtime.h"
41 guint8* mono_aot_arch_get_plt_entry_exec_only (gpointer amodule_info, host_mgreg_t *regs, guint8 *code, guint8 *plt);
42 guint32 mono_arch_get_plt_info_offset_exec_only (gpointer amodule_info, guint8 *plt_entry, host_mgreg_t *regs, guint8 *code, MonoAotResolvePltInfoOffset resolver, gpointer amodule);
43 void mono_arch_patch_plt_entry_exec_only (gpointer amodule_info, guint8 *code, gpointer *got, host_mgreg_t *regs, guint8 *addr);
44 #endif
46 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
48 #ifndef DISABLE_JIT
50 * mono_arch_get_unbox_trampoline:
51 * @m: method pointer
52 * @addr: pointer to native code for @m
54 * when value type methods are called through the vtable we need to unbox the
55 * this argument. This method returns a pointer to a trampoline which does
56 * unboxing before calling the method
58 gpointer
59 mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
61 guint8 *code, *start;
62 GSList *unwind_ops;
63 const int size = 20;
64 MonoDomain *domain = mono_domain_get ();
65 MonoMemoryManager *mem_manager = m_method_get_mem_manager (domain, m);
67 const int this_reg = mono_arch_get_this_arg_reg (NULL);
69 start = code = (guint8 *)mono_mem_manager_code_reserve (mem_manager, size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
71 unwind_ops = mono_arch_get_cie_program ();
73 amd64_alu_reg_imm (code, X86_ADD, this_reg, MONO_ABI_SIZEOF (MonoObject));
74 /* FIXME: Optimize this */
75 amd64_mov_reg_imm (code, AMD64_RAX, addr);
76 amd64_jump_reg (code, AMD64_RAX);
77 g_assertf ((code - start) <= size, "%d %d", (int)(code - start), size);
78 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
80 mono_arch_flush_icache (start, code - start);
81 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE, m));
83 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
85 return start;
89 * mono_arch_get_static_rgctx_trampoline:
91 * Create a trampoline which sets RGCTX_REG to ARG, then jumps to ADDR.
93 gpointer
94 mono_arch_get_static_rgctx_trampoline (MonoMemoryManager *mem_manager, gpointer arg, gpointer addr)
96 guint8 *code, *start;
97 GSList *unwind_ops;
98 int buf_len;
99 MonoDomain *domain = mono_domain_get ();
101 #ifdef MONO_ARCH_NOMAP32BIT
102 buf_len = 32;
103 #else
104 /* AOTed code could still have a non-32 bit address */
105 if ((((guint64)addr) >> 32) == 0)
106 buf_len = 16;
107 else
108 buf_len = 30;
109 #endif
111 start = code = (guint8 *)mono_mem_manager_code_reserve (mem_manager, buf_len + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
113 unwind_ops = mono_arch_get_cie_program ();
115 amd64_mov_reg_imm (code, MONO_ARCH_RGCTX_REG, arg);
116 amd64_jump_code (code, addr);
117 g_assertf ((code - start) <= buf_len, "%d %d", (int)(code - start), buf_len);
118 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
120 mono_arch_flush_icache (start, code - start);
121 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
123 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
125 return start;
127 #endif /* !DISABLE_JIT */
129 #ifdef _WIN64
130 // Workaround lack of Valgrind support for 64-bit Windows
131 #undef VALGRIND_DISCARD_TRANSLATIONS
132 #define VALGRIND_DISCARD_TRANSLATIONS(...)
133 #endif
136 * mono_arch_patch_callsite:
138 * Patch the callsite whose address is given by ORIG_CODE so it calls ADDR. ORIG_CODE
139 * points to the pc right after the call.
141 void
142 mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
144 guint8 *code;
145 guint8 buf [16];
147 // Since method_start is retrieved from function return address (below current call/jmp to patch) there is a case when
148 // last instruction of a function is the call (due to OP_NOT_REACHED) instruction and then directly followed by a
149 // different method. In that case current orig_code points into next method and method_start will also point into
150 // next method, not the method including the call to patch. For this specific case, fallback to using a method_start of NULL.
151 mono_breakpoint_clean_code (method_start != orig_code ? method_start : NULL, orig_code, 14, buf, sizeof (buf));
153 code = buf + 14;
155 /* mov 64-bit imm into r11 (followed by call reg?) or direct call*/
156 if (((code [-13] == 0x49) && (code [-12] == 0xbb)) || (code [-5] == 0xe8)) {
157 if (code [-5] != 0xe8) {
158 g_assert ((guint64)(orig_code - 11) % 8 == 0);
159 mono_atomic_xchg_ptr ((gpointer*)(orig_code - 11), addr);
160 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 11, sizeof (gpointer));
161 } else {
162 gboolean disp_32bit = ((((gint64)addr - (gint64)orig_code)) < (1 << 30)) && ((((gint64)addr - (gint64)orig_code)) > -(1 << 30));
164 if ((((guint64)(addr)) >> 32) != 0 && !disp_32bit) {
166 * This might happen with LLVM or when calling AOTed code. Create a thunk.
168 guint8 *thunk_start, *thunk_code;
169 MonoMemoryManager *mem_manager = mono_domain_ambient_memory_manager (mono_domain_get ());
171 thunk_start = thunk_code = (guint8 *)mono_mem_manager_code_reserve (mem_manager, 32);
172 amd64_jump_membase (thunk_code, AMD64_RIP, 0);
173 *(guint64*)thunk_code = (guint64)addr;
174 addr = thunk_start;
175 g_assert ((((guint64)(addr)) >> 32) == 0);
176 mono_arch_flush_icache (thunk_start, thunk_code - thunk_start);
177 MONO_PROFILER_RAISE (jit_code_buffer, (thunk_start, thunk_code - thunk_start, MONO_PROFILER_CODE_BUFFER_HELPER, NULL));
179 mono_atomic_xchg_i32 ((gint32*)(orig_code - 4), ((gint64)addr - (gint64)orig_code));
180 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, 4);
183 else if ((code [-7] == 0x41) && (code [-6] == 0xff) && (code [-5] == 0x15)) {
184 /* call *<OFFSET>(%rip) */
185 gpointer *got_entry = (gpointer*)((guint8*)orig_code + (*(guint32*)(orig_code - 4)));
186 mono_atomic_xchg_ptr (got_entry, addr);
187 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, sizeof (gpointer));
191 #ifndef DISABLE_JIT
192 guint8*
193 mono_arch_create_llvm_native_thunk (MonoDomain *domain, guint8 *addr)
196 * The caller is LLVM code and the call displacement might exceed 32 bits. We can't determine the caller address, so
197 * we add a thunk every time.
198 * Since the caller is also allocated using the domain code manager, hopefully the displacement will fit into 32 bits.
199 * FIXME: Avoid this if possible if !MONO_ARCH_NOMAP32BIT and ADDR is 32 bits.
201 guint8 *thunk_start, *thunk_code;
202 // FIXME: Has to be an argument
203 MonoMemoryManager *mem_manager = mono_domain_ambient_memory_manager (domain);
205 thunk_start = thunk_code = (guint8 *)mono_mem_manager_code_reserve (mem_manager, 32);
206 amd64_jump_membase (thunk_code, AMD64_RIP, 0);
207 *(guint64*)thunk_code = (guint64)addr;
208 addr = thunk_start;
209 mono_arch_flush_icache (thunk_start, thunk_code - thunk_start);
210 MONO_PROFILER_RAISE (jit_code_buffer, (thunk_start, thunk_code - thunk_start, MONO_PROFILER_CODE_BUFFER_HELPER, NULL));
211 return addr;
214 static void
215 stack_unaligned (MonoTrampolineType tramp_type)
217 printf ("%d\n", tramp_type);
218 g_assert_not_reached ();
221 guchar*
222 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
224 const char *tramp_name;
225 guint8 *buf, *code, *tramp, *br [2], *r11_save_code, *after_r11_save_code, *br_ex_check;
226 int i, lmf_offset, offset, res_offset, arg_offset, rax_offset, tramp_offset, ctx_offset, saved_regs_offset;
227 int r11_save_offset, saved_fpregs_offset, rbp_offset, framesize, orig_rsp_to_rbp_offset, cfa_offset;
228 gboolean has_caller;
229 GSList *unwind_ops = NULL;
230 MonoJumpInfo *ji = NULL;
231 const int kMaxCodeSize = 630;
233 if (tramp_type == MONO_TRAMPOLINE_JUMP)
234 has_caller = FALSE;
235 else
236 has_caller = TRUE;
238 code = buf = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize + MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE);
240 /* Compute stack frame size and offsets */
241 offset = 0;
242 rbp_offset = -offset;
244 offset += sizeof (target_mgreg_t);
245 rax_offset = -offset;
247 /* ex_offset */
248 offset += sizeof (target_mgreg_t);
250 offset += sizeof (target_mgreg_t);
251 r11_save_offset = -offset;
253 offset += sizeof (target_mgreg_t);
254 tramp_offset = -offset;
256 offset += sizeof (target_mgreg_t);
257 arg_offset = -offset;
259 offset += sizeof (target_mgreg_t);
260 res_offset = -offset;
262 offset += sizeof (MonoContext);
263 ctx_offset = -offset;
264 saved_regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
265 saved_fpregs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, fregs);
267 offset += sizeof (MonoLMFTramp);
268 lmf_offset = -offset;
270 #ifdef TARGET_WIN32
271 /* Reserve space where the callee can save the argument registers */
272 offset += 4 * sizeof (target_mgreg_t);
273 #endif
275 framesize = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
277 // CFA = sp + 16 (the trampoline address is on the stack)
278 cfa_offset = 16;
279 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, 16);
280 // IP saved at CFA - 8
281 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RIP, -8);
283 orig_rsp_to_rbp_offset = 0;
284 r11_save_code = code;
285 /* Reserve space for the mov_membase_reg to save R11 */
286 code += 5;
287 after_r11_save_code = code;
289 /* Pop the return address off the stack */
290 amd64_pop_reg (code, AMD64_R11);
291 orig_rsp_to_rbp_offset += sizeof (target_mgreg_t);
293 cfa_offset -= sizeof (target_mgreg_t);
294 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
297 * Allocate a new stack frame
299 amd64_push_reg (code, AMD64_RBP);
300 cfa_offset += sizeof (target_mgreg_t);
301 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
302 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RBP, - cfa_offset);
304 orig_rsp_to_rbp_offset -= sizeof (target_mgreg_t);
305 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (target_mgreg_t));
306 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
307 mono_add_unwind_op_fp_alloc (unwind_ops, code, buf, AMD64_RBP, 0);
308 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
310 /* Compute the trampoline address from the return address */
311 if (aot) {
312 /* 7 = length of call *<offset>(rip) */
313 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 7);
314 } else {
315 /* 5 = length of amd64_call_membase () */
316 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 5);
318 amd64_mov_membase_reg (code, AMD64_RBP, tramp_offset, AMD64_R11, sizeof (target_mgreg_t));
320 /* Save all registers */
321 for (i = 0; i < AMD64_NREG; ++i) {
322 if (i == AMD64_RBP) {
323 /* RAX is already saved */
324 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, rbp_offset, sizeof (target_mgreg_t));
325 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof (target_mgreg_t)), AMD64_RAX, sizeof (target_mgreg_t));
326 } else if (i == AMD64_RIP) {
327 if (has_caller)
328 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 8, sizeof (target_mgreg_t));
329 else
330 amd64_mov_reg_imm (code, AMD64_R11, 0);
331 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof (target_mgreg_t)), AMD64_R11, sizeof (target_mgreg_t));
332 } else if (i == AMD64_RSP) {
333 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof (target_mgreg_t));
334 amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, framesize + 16);
335 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof (target_mgreg_t)), AMD64_R11, sizeof (target_mgreg_t));
336 } else if (i != AMD64_R11) {
337 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof (target_mgreg_t)), i, sizeof (target_mgreg_t));
338 } else {
339 /* We have to save R11 right at the start of
340 the trampoline code because it's used as a
341 scratch register */
342 /* This happens before the frame is set up, so it goes into the redzone */
343 amd64_mov_membase_reg (r11_save_code, AMD64_RSP, r11_save_offset + orig_rsp_to_rbp_offset, i, sizeof (target_mgreg_t));
344 g_assert (r11_save_code == after_r11_save_code);
346 /* Copy from the save slot into the register array slot */
347 amd64_mov_reg_membase (code, i, AMD64_RSP, r11_save_offset + orig_rsp_to_rbp_offset + framesize, sizeof (target_mgreg_t));
348 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof (target_mgreg_t)), i, sizeof (target_mgreg_t));
350 /* cfa = rbp + cfa_offset */
351 mono_add_unwind_op_offset (unwind_ops, code, buf, i, - cfa_offset + saved_regs_offset + (i * sizeof (target_mgreg_t)));
353 for (i = 0; i < AMD64_XMM_NREG; ++i)
354 if (AMD64_IS_ARGUMENT_XREG (i))
355 #if defined(MONO_HAVE_SIMD_REG)
356 amd64_movdqu_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * sizeof (MonoContextSimdReg)), i);
357 #else
358 amd64_movsd_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * sizeof (double)), i);
359 #endif
361 /* Check that the stack is aligned */
362 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof (target_mgreg_t));
363 amd64_alu_reg_imm (code, X86_AND, AMD64_R11, 15);
364 amd64_alu_reg_imm (code, X86_CMP, AMD64_R11, 0);
365 br [0] = code;
366 amd64_branch_disp (code, X86_CC_Z, 0, FALSE);
367 if (aot) {
368 amd64_mov_reg_imm (code, AMD64_R11, 0);
369 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
370 } else {
371 amd64_mov_reg_imm (code, MONO_AMD64_ARG_REG1, tramp_type);
372 amd64_mov_reg_imm (code, AMD64_R11, stack_unaligned);
373 amd64_call_reg (code, AMD64_R11);
375 mono_amd64_patch (br [0], code);
376 //amd64_breakpoint (code);
378 /* Obtain the trampoline argument which is encoded in the instruction stream */
379 if (aot) {
381 * tramp_index = (tramp_addr - specific_trampolines) / tramp_size
382 * arg = mscorlib_amodule->got [specific_trampolines_got_offsets_base + (tramp_index * 2) + 1]
384 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_SPECIFIC_TRAMPOLINES, NULL);
385 /* Trampoline addr */
386 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, tramp_offset, sizeof (target_mgreg_t));
387 /* Trampoline offset */
388 amd64_alu_reg_reg (code, X86_SUB, AMD64_RAX, AMD64_R11);
389 /* Trampoline index */
390 amd64_shift_reg_imm (code, X86_SHR, AMD64_RAX, 3);
391 /* Every trampoline uses 2 got slots */
392 amd64_shift_reg_imm (code, X86_SHL, AMD64_RAX, 1);
393 /* pointer size */
394 amd64_shift_reg_imm (code, X86_SHL, AMD64_RAX, 3);
395 /* Address of block of got slots */
396 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_SPECIFIC_TRAMPOLINES_GOT_SLOTS_BASE, NULL);
397 /* Address of got slots belonging to this trampoline */
398 amd64_alu_reg_reg (code, X86_ADD, AMD64_RAX, AMD64_R11);
399 /* The second slot contains the argument */
400 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, sizeof (target_mgreg_t), sizeof (target_mgreg_t));
401 } else {
402 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, sizeof (target_mgreg_t));
403 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 5, 1);
404 amd64_widen_reg (code, AMD64_RAX, AMD64_RAX, TRUE, FALSE);
405 amd64_alu_reg_imm_size (code, X86_CMP, AMD64_RAX, 4, 1);
406 br [0] = code;
407 x86_branch8 (code, X86_CC_NE, 6, FALSE);
408 /* 32 bit immediate */
409 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 4);
410 br [1] = code;
411 x86_jump8 (code, 10);
412 /* 64 bit immediate */
413 mono_amd64_patch (br [0], code);
414 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 8);
415 mono_amd64_patch (br [1], code);
417 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, sizeof (target_mgreg_t));
419 /* Save LMF begin */
421 /* Save sp */
422 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof (target_mgreg_t));
423 amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, framesize + 16);
424 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), AMD64_R11, sizeof (target_mgreg_t));
425 /* Save pointer to context */
426 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, ctx_offset);
427 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMFTramp, ctx), AMD64_R11, sizeof (target_mgreg_t));
429 if (aot) {
430 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_get_lmf_addr));
431 } else {
432 amd64_mov_reg_imm (code, AMD64_R11, mono_get_lmf_addr);
434 amd64_call_reg (code, AMD64_R11);
436 /* Save lmf_addr */
437 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMFTramp, lmf_addr), AMD64_RAX, sizeof (target_mgreg_t));
438 /* Save previous_lmf */
439 /* Set the third lowest bit to signal that this is a MonoLMFTramp structure */
440 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, sizeof (target_mgreg_t));
441 amd64_alu_reg_imm_size (code, X86_ADD, AMD64_R11, 0x5, sizeof (target_mgreg_t));
442 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, sizeof (target_mgreg_t));
443 /* Set new lmf */
444 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, lmf_offset);
445 amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, sizeof (target_mgreg_t));
447 /* Save LMF end */
449 /* Arg1 is the pointer to the saved registers */
450 amd64_lea_membase (code, AMD64_ARG_REG1, AMD64_RBP, saved_regs_offset);
452 /* Arg2 is the address of the calling code */
453 if (has_caller)
454 amd64_mov_reg_membase (code, AMD64_ARG_REG2, AMD64_RBP, 8, sizeof (target_mgreg_t));
455 else
456 amd64_mov_reg_imm (code, AMD64_ARG_REG2, 0);
458 /* Arg3 is the method/vtable ptr */
459 amd64_mov_reg_membase (code, AMD64_ARG_REG3, AMD64_RBP, arg_offset, sizeof (target_mgreg_t));
461 /* Arg4 is the trampoline address */
462 amd64_mov_reg_membase (code, AMD64_ARG_REG4, AMD64_RBP, tramp_offset, sizeof (target_mgreg_t));
464 if (aot) {
465 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, GINT_TO_POINTER (mono_trampoline_type_to_jit_icall_id (tramp_type)));
466 } else {
467 tramp = (guint8*)mono_get_trampoline_func (tramp_type);
468 amd64_mov_reg_imm (code, AMD64_R11, tramp);
470 amd64_call_reg (code, AMD64_R11);
471 amd64_mov_membase_reg (code, AMD64_RBP, res_offset, AMD64_RAX, sizeof (target_mgreg_t));
473 /* Restore LMF */
474 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof (target_mgreg_t));
475 amd64_alu_reg_imm_size (code, X86_SUB, AMD64_RCX, 0x5, sizeof (target_mgreg_t));
476 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMFTramp, lmf_addr), sizeof (target_mgreg_t));
477 amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, sizeof (target_mgreg_t));
480 * Save rax to the stack, after the leave instruction, this will become part of
481 * the red zone.
483 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, res_offset, sizeof (target_mgreg_t));
484 amd64_mov_membase_reg (code, AMD64_RBP, rax_offset, AMD64_RAX, sizeof (target_mgreg_t));
486 /* Check for thread interruption */
487 /* This is not perf critical code so no need to check the interrupt flag */
489 * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
491 if (aot) {
492 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_thread_force_interruption_checkpoint_noraise));
493 } else {
494 amd64_mov_reg_imm (code, AMD64_R11, (guint8*)mono_thread_force_interruption_checkpoint_noraise);
496 amd64_call_reg (code, AMD64_R11);
498 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
499 br_ex_check = code;
500 amd64_branch8 (code, X86_CC_Z, -1, 1);
503 * Exception case:
504 * We have an exception we want to throw in the caller's frame, so pop
505 * the trampoline frame and throw from the caller.
507 #if TARGET_WIN32
508 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
509 amd64_pop_reg (code, AMD64_RBP);
510 mono_add_unwind_op_same_value (unwind_ops, code, buf, AMD64_RBP);
511 #else
512 amd64_leave (code);
513 #endif
514 /* We are in the parent frame, the exception is in rax */
516 * EH is initialized after trampolines, so get the address of the variable
517 * which contains throw_exception, and load it from there.
519 if (aot) {
520 /* Not really a jit icall */
521 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_rethrow_preserve_exception));
522 } else {
523 amd64_mov_reg_imm (code, AMD64_R11, (guint8*)mono_get_rethrow_preserve_exception_addr ());
525 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, sizeof (target_mgreg_t));
526 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, sizeof (target_mgreg_t));
528 * We still have the original return value on the top of the stack, so the
529 * throw trampoline will use that as the throw site.
531 amd64_jump_reg (code, AMD64_R11);
533 /* Normal case */
534 mono_amd64_patch (br_ex_check, code);
536 /* Restore argument registers, r10 (imt method/rgxtx)
537 and rax (needed for direct calls to C vararg functions). */
538 for (i = 0; i < AMD64_NREG; ++i)
539 if (AMD64_IS_ARGUMENT_REG (i) || i == AMD64_R10 || i == AMD64_RAX || i == AMD64_R11)
540 amd64_mov_reg_membase (code, i, AMD64_RBP, saved_regs_offset + (i * sizeof (target_mgreg_t)), sizeof (target_mgreg_t));
541 for (i = 0; i < AMD64_XMM_NREG; ++i)
542 if (AMD64_IS_ARGUMENT_XREG (i))
543 #if defined(MONO_HAVE_SIMD_REG)
544 amd64_movdqu_reg_membase (code, i, AMD64_RBP, saved_fpregs_offset + (i * sizeof (MonoContextSimdReg)));
545 #else
546 amd64_movsd_reg_membase (code, i, AMD64_RBP, saved_fpregs_offset + (i * sizeof (double)));
547 #endif
549 /* Restore stack */
550 #if TARGET_WIN32
551 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
552 amd64_pop_reg (code, AMD64_RBP);
553 mono_add_unwind_op_same_value (unwind_ops, code, buf, AMD64_RBP);
554 #else
555 amd64_leave (code);
556 #endif
557 cfa_offset -= sizeof (target_mgreg_t);
558 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, cfa_offset);
560 if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
561 /* Load result */
562 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, rax_offset - sizeof (target_mgreg_t), sizeof (target_mgreg_t));
563 amd64_ret (code);
564 } else {
565 /* call the compiled method using the saved rax */
566 amd64_jump_membase (code, AMD64_RSP, rax_offset - sizeof (target_mgreg_t));
569 g_assertf ((code - buf) <= kMaxCodeSize, "%d %d", code, buf, (int)(code - buf), kMaxCodeSize);
570 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE));
572 mono_arch_flush_icache (buf, code - buf);
573 MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL));
575 tramp_name = mono_get_generic_trampoline_name (tramp_type);
576 *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
578 return buf;
581 gpointer
582 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoMemoryManager *mem_manager, guint32 *code_len)
584 guint8 *code, *buf, *tramp;
585 int size;
586 gboolean far_addr = FALSE;
588 tramp = mono_get_trampoline_code (tramp_type);
590 if ((((guint64)arg1) >> 32) == 0)
591 size = 5 + 1 + 4;
592 else
593 size = 5 + 1 + 8;
595 code = buf = (guint8 *)mono_mem_manager_code_reserve_align (mem_manager, size, 1);
597 if (((gint64)tramp - (gint64)code) >> 31 != 0 && ((gint64)tramp - (gint64)code) >> 31 != -1) {
598 #ifndef MONO_ARCH_NOMAP32BIT
599 g_assert_not_reached ();
600 #endif
601 far_addr = TRUE;
602 size += 16;
603 code = buf = (guint8 *)mono_mem_manager_code_reserve_align (mem_manager, size, 1);
606 if (far_addr) {
607 amd64_mov_reg_imm (code, AMD64_R11, tramp);
608 amd64_call_reg (code, AMD64_R11);
609 } else {
610 amd64_call_code (code, tramp);
612 /* The trampoline code will obtain the argument from the instruction stream */
613 if ((((guint64)arg1) >> 32) == 0) {
614 *code = 0x4;
615 *(guint32*)(code + 1) = (gint64)arg1;
616 code += 5;
617 } else {
618 *code = 0x8;
619 *(guint64*)(code + 1) = (gint64)arg1;
620 code += 9;
623 g_assert ((code - buf) <= size);
625 if (code_len)
626 *code_len = size;
628 mono_arch_flush_icache (buf, size);
629 MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE, mono_get_generic_trampoline_simple_name (tramp_type)));
631 return buf;
634 gpointer
635 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
637 guint8 *tramp;
638 guint8 *code, *buf;
639 guint8 **rgctx_null_jumps;
640 int depth, index;
641 int i;
642 gboolean mrgctx;
643 MonoJumpInfo *ji = NULL;
644 GSList *unwind_ops;
646 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
647 index = MONO_RGCTX_SLOT_INDEX (slot);
648 if (mrgctx)
649 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (target_mgreg_t);
650 for (depth = 0; ; ++depth) {
651 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
653 if (index < size - 1)
654 break;
655 index -= size - 1;
658 const int tramp_size = 64 + 8 * depth;
660 code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
662 unwind_ops = mono_arch_get_cie_program ();
664 rgctx_null_jumps = (guint8 **)g_malloc (sizeof (guint8*) * (depth + 2));
666 if (mrgctx) {
667 /* get mrgctx ptr */
668 amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
669 } else {
670 /* load rgctx ptr from vtable */
671 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context), sizeof (target_mgreg_t));
672 /* is the rgctx ptr null? */
673 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
674 /* if yes, jump to actual trampoline */
675 rgctx_null_jumps [0] = code;
676 amd64_branch8 (code, X86_CC_Z, -1, 1);
679 for (i = 0; i < depth; ++i) {
680 /* load ptr to next array */
681 if (mrgctx && i == 0)
682 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT, sizeof (target_mgreg_t));
683 else
684 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, 0, sizeof (target_mgreg_t));
685 /* is the ptr null? */
686 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
687 /* if yes, jump to actual trampoline */
688 rgctx_null_jumps [i + 1] = code;
689 amd64_branch8 (code, X86_CC_Z, -1, 1);
692 /* fetch slot */
693 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, sizeof (target_mgreg_t) * (index + 1), sizeof (target_mgreg_t));
694 /* is the slot null? */
695 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
696 /* if yes, jump to actual trampoline */
697 rgctx_null_jumps [depth + 1] = code;
698 amd64_branch8 (code, X86_CC_Z, -1, 1);
699 /* otherwise return */
700 amd64_ret (code);
702 for (i = mrgctx ? 1 : 0; i <= depth + 1; ++i)
703 mono_amd64_patch (rgctx_null_jumps [i], code);
705 g_free (rgctx_null_jumps);
707 if (MONO_ARCH_VTABLE_REG != AMD64_ARG_REG1) {
708 /* move the rgctx pointer to the VTABLE register */
709 amd64_mov_reg_reg (code, MONO_ARCH_VTABLE_REG, AMD64_ARG_REG1, sizeof (target_mgreg_t));
712 if (aot) {
713 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR, GUINT_TO_POINTER (slot));
714 amd64_jump_reg (code, AMD64_R11);
715 } else {
716 MonoMemoryManager *mem_manager = mono_domain_ambient_memory_manager (mono_get_root_domain ());
717 tramp = (guint8 *)mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mem_manager, NULL);
719 /* jump to the actual trampoline */
720 amd64_jump_code (code, tramp);
723 mono_arch_flush_icache (buf, code - buf);
724 MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
726 g_assertf ((code - buf) <= tramp_size, "%d %d", (int)(code - buf), tramp_size);
728 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
730 char *name = mono_get_rgctx_fetch_trampoline_name (slot);
731 *info = mono_tramp_info_create (name, buf, code - buf, ji, unwind_ops);
732 g_free (name);
734 return buf;
737 gpointer
738 mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo **info, gboolean aot)
740 guint8 *code, *buf;
741 int tramp_size;
742 MonoJumpInfo *ji = NULL;
743 GSList *unwind_ops;
745 g_assert (aot);
746 tramp_size = 64;
748 code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size + MONO_TRAMPOLINE_UNWINDINFO_SIZE(0));
750 unwind_ops = mono_arch_get_cie_program ();
752 // FIXME: Currently, we always go to the slow path.
753 /* This receives a <slot, trampoline> in the rgctx arg reg. */
754 /* Load trampoline addr */
755 amd64_mov_reg_membase (code, AMD64_R11, MONO_ARCH_RGCTX_REG, 8, 8);
756 /* move the rgctx pointer to the VTABLE register */
757 amd64_mov_reg_reg (code, MONO_ARCH_VTABLE_REG, AMD64_ARG_REG1, sizeof (target_mgreg_t));
758 /* Jump to the trampoline */
759 amd64_jump_reg (code, AMD64_R11);
761 mono_arch_flush_icache (buf, code - buf);
762 MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
764 g_assertf ((code - buf) <= tramp_size, "%d %d", (int)(code - buf), tramp_size);
766 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_TRAMPOLINE_UNWINDINFO_SIZE(0)));
768 if (info)
769 *info = mono_tramp_info_create ("rgctx_fetch_trampoline_general", buf, code - buf, ji, unwind_ops);
771 return buf;
774 void
775 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
777 /* FIXME: This is not thread safe */
778 guint8 *code = (guint8 *)ji->code_start;
780 amd64_mov_reg_imm (code, AMD64_ARG_REG1, func_arg);
781 amd64_mov_reg_imm (code, AMD64_R11, func);
783 x86_push_imm (code, (guint64)func_arg);
784 amd64_call_reg (code, AMD64_R11);
786 #endif /* !DISABLE_JIT */
789 * mono_arch_get_call_target:
791 * Return the address called by the code before CODE if exists.
793 guint8*
794 mono_arch_get_call_target (guint8 *code)
796 if (code [-5] == 0xe8) {
797 gint32 disp = *(gint32*)(code - 4);
798 guint8 *target = code + disp;
800 return target;
801 } else {
802 return NULL;
806 #ifdef MONO_ARCH_CODE_EXEC_ONLY
807 /* Keep in sync with aot-compiler.c, arch_emit_plt_entry. */
808 #define PLT_ENTRY_OFFSET_REG AMD64_RAX
810 /* If PLT_ENTRY_OFFSET_REG is R8 - R15, increase mov instruction size by 1 due to use of REX. */
811 #define PLT_MOV_REG_IMM8_SIZE (1 + sizeof (guint8))
812 #define PLT_MOV_REG_IMM16_SIZE (2 + sizeof (guint16))
813 #define PLT_MOV_REG_IMM32_SIZE (1 + sizeof (guint32))
814 #define PLT_JMP_INST_SIZE 6
816 static guchar
817 aot_arch_get_plt_entry_size (MonoAotFileInfo *info, host_mgreg_t *regs, guint8 *code, guint8 *plt)
819 if (info->plt_size <= 0xFF)
820 return PLT_MOV_REG_IMM8_SIZE + PLT_JMP_INST_SIZE;
821 else if (info->plt_size <= 0xFFFF)
822 return PLT_MOV_REG_IMM16_SIZE + PLT_JMP_INST_SIZE;
823 else
824 return PLT_MOV_REG_IMM32_SIZE + PLT_JMP_INST_SIZE;
827 static guint32
828 aot_arch_get_plt_entry_index (MonoAotFileInfo *info, host_mgreg_t *regs, guint8 *code, guint8 *plt)
830 if (info->plt_size <= 0xFF)
831 return regs[PLT_ENTRY_OFFSET_REG] & 0xFF;
832 else if (info->plt_size <= 0xFFFF)
833 return regs[PLT_ENTRY_OFFSET_REG] & 0xFFFF;
834 else
835 return regs[PLT_ENTRY_OFFSET_REG] & 0xFFFFFFFF;
838 guint8*
839 mono_aot_arch_get_plt_entry_exec_only (gpointer amodule_info, host_mgreg_t *regs, guint8 *code, guint8 *plt)
841 guint32 plt_entry_index = aot_arch_get_plt_entry_index ((MonoAotFileInfo *)amodule_info, regs, code, plt);
842 guchar plt_entry_size = aot_arch_get_plt_entry_size ((MonoAotFileInfo *)amodule_info, regs, code, plt);
844 /* First PLT slot is never emitted into table, take that into account */
845 /* when calculating corresponding PLT entry. */
846 plt_entry_index--;
847 return plt + ((gsize)plt_entry_index * (gsize)plt_entry_size);
850 guint32
851 mono_arch_get_plt_info_offset_exec_only (gpointer amodule_info, guint8 *plt_entry, host_mgreg_t *regs, guint8 *code, MonoAotResolvePltInfoOffset resolver, gpointer amodule)
853 guint32 plt_entry_index = aot_arch_get_plt_entry_index ((MonoAotFileInfo *)amodule_info, regs, code, NULL);
855 /* First PLT slot is never emitted into table, take that into account */
856 /* when calculating offset. */
857 plt_entry_index--;
858 return resolver (amodule, plt_entry_index);
861 void
862 mono_arch_patch_plt_entry_exec_only (gpointer amodule_info, guint8 *code, gpointer *got, host_mgreg_t *regs, guint8 *addr)
864 /* Same calculation of GOT offset as done in aot-compiler.c, emit_plt and used as jmp DISP. */
865 guint32 plt_entry_index = aot_arch_get_plt_entry_index ((MonoAotFileInfo *)amodule_info, regs, code, NULL);
866 gpointer *plt_jump_table_entry = ((gpointer *)(got + ((MonoAotFileInfo *)amodule_info)->plt_got_offset_base) + plt_entry_index);
867 mono_atomic_xchg_ptr (plt_jump_table_entry, addr);
869 #else
871 * mono_arch_get_plt_info_offset:
873 * Return the PLT info offset belonging to the plt entry PLT_ENTRY.
875 guint32
876 mono_arch_get_plt_info_offset (guint8 *plt_entry, host_mgreg_t *regs, guint8 *code)
878 return *(guint32*)(plt_entry + 6);
881 void
882 mono_arch_patch_plt_entry (guint8 *code, gpointer *got, host_mgreg_t *regs, guint8 *addr)
884 gint32 disp;
885 gpointer *plt_jump_table_entry;
887 /* A PLT entry: jmp *<DISP>(%rip) */
888 g_assert (code [0] == 0xff);
889 g_assert (code [1] == 0x25);
891 disp = *(gint32*)(code + 2);
893 plt_jump_table_entry = (gpointer*)(code + 6 + disp);
895 mono_atomic_xchg_ptr (plt_jump_table_entry, addr);
897 #endif
899 #ifndef DISABLE_JIT
901 * mono_arch_create_sdb_trampoline:
903 * Return a trampoline which captures the current context, passes it to
904 * mono_debugger_agent_single_step_from_context ()/mono_debugger_agent_breakpoint_from_context (),
905 * then restores the (potentially changed) context.
907 guint8*
908 mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot)
910 int tramp_size = 512;
911 int i, framesize, ctx_offset, cfa_offset, gregs_offset;
912 guint8 *code, *buf;
913 GSList *unwind_ops = NULL;
914 MonoJumpInfo *ji = NULL;
916 code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size + MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE);
918 framesize = 0;
919 #ifdef TARGET_WIN32
920 /* Reserve space where the callee can save the argument registers */
921 framesize += 4 * sizeof (target_mgreg_t);
922 #endif
924 ctx_offset = framesize;
925 framesize += sizeof (MonoContext);
927 framesize = ALIGN_TO (framesize, MONO_ARCH_FRAME_ALIGNMENT);
929 // CFA = sp + 8
930 cfa_offset = 8;
931 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, 8);
932 // IP saved at CFA - 8
933 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RIP, -cfa_offset);
935 amd64_push_reg (code, AMD64_RBP);
936 cfa_offset += sizeof (target_mgreg_t);
937 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
938 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RBP, - cfa_offset);
940 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (target_mgreg_t));
941 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
942 mono_add_unwind_op_fp_alloc (unwind_ops, code, buf, AMD64_RBP, 0);
943 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
945 gregs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
947 /* Initialize a MonoContext structure on the stack */
948 for (i = 0; i < AMD64_NREG; ++i) {
949 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_RBP)
950 amd64_mov_membase_reg (code, AMD64_RSP, gregs_offset + (i * sizeof (target_mgreg_t)), i, sizeof (target_mgreg_t));
952 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 0, sizeof (target_mgreg_t));
953 amd64_mov_membase_reg (code, AMD64_RSP, gregs_offset + (AMD64_RBP * sizeof (target_mgreg_t)), AMD64_R11, sizeof (target_mgreg_t));
954 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, 2 * sizeof (target_mgreg_t));
955 amd64_mov_membase_reg (code, AMD64_RSP, gregs_offset + (AMD64_RSP * sizeof (target_mgreg_t)), AMD64_R11, sizeof (target_mgreg_t));
956 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, sizeof (target_mgreg_t), sizeof (target_mgreg_t));
957 amd64_mov_membase_reg (code, AMD64_RSP, gregs_offset + (AMD64_RIP * sizeof (target_mgreg_t)), AMD64_R11, sizeof (target_mgreg_t));
959 /* Call the single step/breakpoint function in sdb */
960 amd64_lea_membase (code, AMD64_ARG_REG1, AMD64_RSP, ctx_offset);
962 if (aot) {
963 if (single_step)
964 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_debugger_agent_single_step_from_context));
965 else
966 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_debugger_agent_breakpoint_from_context));
967 } else {
968 if (single_step)
969 amd64_mov_reg_imm (code, AMD64_R11, mini_get_dbg_callbacks ()->single_step_from_context);
970 else
971 amd64_mov_reg_imm (code, AMD64_R11, mini_get_dbg_callbacks ()->breakpoint_from_context);
973 amd64_call_reg (code, AMD64_R11);
975 /* Restore registers from ctx */
976 for (i = 0; i < AMD64_NREG; ++i) {
977 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_RBP)
978 amd64_mov_reg_membase (code, i, AMD64_RSP, gregs_offset + (i * sizeof (target_mgreg_t)), sizeof (target_mgreg_t));
980 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, gregs_offset + (AMD64_RBP * sizeof (target_mgreg_t)), sizeof (target_mgreg_t));
981 amd64_mov_membase_reg (code, AMD64_RBP, 0, AMD64_R11, sizeof (target_mgreg_t));
982 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, gregs_offset + (AMD64_RIP * sizeof (target_mgreg_t)), sizeof (target_mgreg_t));
983 amd64_mov_membase_reg (code, AMD64_RBP, sizeof (target_mgreg_t), AMD64_R11, sizeof (target_mgreg_t));
985 #if TARGET_WIN32
986 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
987 amd64_pop_reg (code, AMD64_RBP);
988 mono_add_unwind_op_same_value (unwind_ops, code, buf, AMD64_RBP);
989 #else
990 amd64_leave (code);
991 #endif
992 cfa_offset -= sizeof (target_mgreg_t);
993 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, cfa_offset);
994 amd64_ret (code);
996 g_assertf ((code - buf) <= tramp_size, "%d %d", (int)(code - buf), tramp_size);
998 mono_arch_flush_icache (code, code - buf);
999 MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL));
1000 g_assert (code - buf <= tramp_size);
1001 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE));
1003 const char *tramp_name = single_step ? "sdb_single_step_trampoline" : "sdb_breakpoint_trampoline";
1004 *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
1006 return buf;
1010 * mono_arch_get_interp_to_native_trampoline:
1012 * A trampoline that handles the transition from interpreter into native
1013 * world. It requires to set up a descriptor (CallContext), so the
1014 * trampoline can translate the arguments into the native calling convention.
1016 gpointer
1017 mono_arch_get_interp_to_native_trampoline (MonoTrampInfo **info)
1019 #ifndef DISABLE_INTERPRETER
1020 guint8 *start = NULL, *code;
1021 guint8 *label_start_copy, *label_exit_copy;
1022 MonoJumpInfo *ji = NULL;
1023 GSList *unwind_ops = NULL;
1024 int buf_len, i, cfa_offset, off_methodargs, off_targetaddr;
1026 buf_len = 512;
1027 start = code = (guint8 *) mono_global_codeman_reserve (buf_len + MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE);
1029 // CFA = sp + 8
1030 cfa_offset = 8;
1031 mono_add_unwind_op_def_cfa (unwind_ops, code, start, AMD64_RSP, cfa_offset);
1032 // IP saved at CFA - 8
1033 mono_add_unwind_op_offset (unwind_ops, code, start, AMD64_RIP, -cfa_offset);
1035 amd64_push_reg (code, AMD64_RBP);
1036 cfa_offset += sizeof (target_mgreg_t);
1037 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, cfa_offset);
1038 mono_add_unwind_op_offset (unwind_ops, code, start, AMD64_RBP, -cfa_offset);
1040 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (target_mgreg_t));
1041 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, start, AMD64_RBP);
1042 mono_add_unwind_op_fp_alloc (unwind_ops, code, start, AMD64_RBP, 0);
1044 /* allocate space for saving the target addr and the call context */
1045 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 2 * sizeof (target_mgreg_t));
1047 /* save CallContext* onto stack */
1048 off_methodargs = - 8;
1049 amd64_mov_membase_reg (code, AMD64_RBP, off_methodargs, AMD64_ARG_REG2, sizeof (target_mgreg_t));
1051 /* save target address on stack */
1052 off_targetaddr = - 2 * 8;
1053 amd64_mov_membase_reg (code, AMD64_RBP, off_targetaddr, AMD64_ARG_REG1, sizeof (target_mgreg_t));
1055 /* load pointer to CallContext* into R11 */
1056 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG2, sizeof (target_mgreg_t));
1058 /* allocate the stack space necessary for the call */
1059 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, MONO_STRUCT_OFFSET (CallContext, stack_size), sizeof (target_mgreg_t));
1060 amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, AMD64_RAX);
1062 /* copy stack from the CallContext, reg1 = dest, reg2 = source */
1063 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RSP, sizeof (target_mgreg_t));
1064 amd64_mov_reg_membase (code, AMD64_ARG_REG2, AMD64_R11, MONO_STRUCT_OFFSET (CallContext, stack), sizeof (target_mgreg_t));
1066 label_start_copy = code;
1067 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
1068 label_exit_copy = code;
1069 amd64_branch8 (code, X86_CC_Z, 0, FALSE);
1070 amd64_mov_reg_membase (code, AMD64_ARG_REG3, AMD64_ARG_REG2, 0, sizeof (target_mgreg_t));
1071 amd64_mov_membase_reg (code, AMD64_ARG_REG1, 0, AMD64_ARG_REG3, sizeof (target_mgreg_t));
1072 amd64_alu_reg_imm (code, X86_ADD, AMD64_ARG_REG1, sizeof (target_mgreg_t));
1073 amd64_alu_reg_imm (code, X86_ADD, AMD64_ARG_REG2, sizeof (target_mgreg_t));
1074 amd64_alu_reg_imm (code, X86_SUB, AMD64_RAX, sizeof (target_mgreg_t));
1075 amd64_jump_code (code, label_start_copy);
1076 x86_patch (label_exit_copy, code);
1078 /* set all general purpose registers from CallContext */
1079 for (i = 0; i < PARAM_REGS; i++)
1080 amd64_mov_reg_membase (code, param_regs [i], AMD64_R11, MONO_STRUCT_OFFSET (CallContext, gregs) + param_regs [i] * sizeof (target_mgreg_t), sizeof (target_mgreg_t));
1082 /* set all floating registers from CallContext */
1083 for (i = 0; i < FLOAT_PARAM_REGS; ++i)
1084 amd64_sse_movsd_reg_membase (code, i, AMD64_R11, MONO_STRUCT_OFFSET (CallContext, fregs) + i * sizeof (double));
1086 /* load target addr */
1087 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, off_targetaddr, sizeof (target_mgreg_t));
1089 /* call into native function */
1090 amd64_call_reg (code, AMD64_R11);
1092 /* save all return general purpose registers in the CallContext */
1093 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, off_methodargs, sizeof (target_mgreg_t));
1094 for (i = 0; i < RETURN_REGS; i++)
1095 amd64_mov_membase_reg (code, AMD64_R11, MONO_STRUCT_OFFSET (CallContext, gregs) + return_regs [i] * sizeof (target_mgreg_t), return_regs [i], sizeof (target_mgreg_t));
1097 /* save all return floating registers in the CallContext */
1098 for (i = 0; i < FLOAT_RETURN_REGS; i++)
1099 amd64_sse_movsd_membase_reg (code, AMD64_R11, MONO_STRUCT_OFFSET (CallContext, fregs) + i * sizeof (double), i);
1101 #if TARGET_WIN32
1102 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
1103 #else
1104 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_RBP, sizeof (target_mgreg_t));
1105 #endif
1106 amd64_pop_reg (code, AMD64_RBP);
1107 mono_add_unwind_op_same_value (unwind_ops, code, start, AMD64_RBP);
1109 cfa_offset -= sizeof (target_mgreg_t);
1110 mono_add_unwind_op_def_cfa (unwind_ops, code, start, AMD64_RSP, cfa_offset);
1111 amd64_ret (code);
1113 g_assertf ((code - start) <= buf_len, "%d %d", (int)(code - start), buf_len);
1115 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE));
1117 mono_arch_flush_icache (start, code - start);
1118 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_HELPER, NULL));
1120 if (info)
1121 *info = mono_tramp_info_create ("interp_to_native_trampoline", start, code - start, ji, unwind_ops);
1123 return start;
1124 #else
1125 g_assert_not_reached ();
1126 return NULL;
1127 #endif /* DISABLE_INTERPRETER */
1130 gpointer
1131 mono_arch_get_native_to_interp_trampoline (MonoTrampInfo **info)
1133 #ifndef DISABLE_INTERPRETER
1134 guint8 *start = NULL, *code;
1135 MonoJumpInfo *ji = NULL;
1136 GSList *unwind_ops = NULL;
1137 int buf_len, i, framesize, cfa_offset, ctx_offset;
1139 buf_len = 512;
1140 start = code = (guint8 *) mono_global_codeman_reserve (buf_len + MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE);
1142 framesize = 0;
1143 #ifdef TARGET_WIN32
1144 /* Reserve space where the callee can save the argument registers */
1145 framesize += 4 * sizeof (target_mgreg_t);
1146 #endif
1148 ctx_offset = framesize;
1149 framesize += MONO_ABI_SIZEOF (CallContext);
1150 framesize = ALIGN_TO (framesize, MONO_ARCH_FRAME_ALIGNMENT);
1152 // CFA = sp + 8
1153 cfa_offset = 8;
1154 mono_add_unwind_op_def_cfa (unwind_ops, code, start, AMD64_RSP, cfa_offset);
1155 // IP saved at CFA - 8
1156 mono_add_unwind_op_offset (unwind_ops, code, start, AMD64_RIP, -cfa_offset);
1158 amd64_push_reg (code, AMD64_RBP);
1159 cfa_offset += sizeof (target_mgreg_t);
1160 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, cfa_offset);
1161 mono_add_unwind_op_offset (unwind_ops, code, start, AMD64_RBP, -cfa_offset);
1163 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (target_mgreg_t));
1164 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, start, AMD64_RBP);
1165 mono_add_unwind_op_fp_alloc (unwind_ops, code, start, AMD64_RBP, 0);
1167 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
1169 /* save all general purpose registers into the CallContext */
1170 for (i = 0; i < PARAM_REGS; i++)
1171 amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (CallContext, gregs) + param_regs [i] * sizeof (target_mgreg_t), param_regs [i], sizeof (target_mgreg_t));
1173 /* save all floating registers into the CallContext */
1174 for (i = 0; i < FLOAT_PARAM_REGS; i++)
1175 amd64_sse_movsd_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (CallContext, fregs) + i * sizeof (double), i);
1177 /* set the stack pointer to the value at call site */
1178 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RBP, sizeof (target_mgreg_t));
1179 amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, 2 * sizeof (target_mgreg_t));
1180 amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (CallContext, stack), AMD64_R11, sizeof (target_mgreg_t));
1182 /* call interp_entry with the ccontext and rmethod as arguments */
1183 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RSP, sizeof (target_mgreg_t));
1184 if (ctx_offset != 0)
1185 amd64_alu_reg_imm (code, X86_ADD, AMD64_ARG_REG1, ctx_offset);
1186 amd64_mov_reg_membase (code, AMD64_ARG_REG2, MONO_ARCH_RGCTX_REG, MONO_STRUCT_OFFSET (MonoFtnDesc, arg), sizeof (target_mgreg_t));
1187 amd64_mov_reg_membase (code, AMD64_R11, MONO_ARCH_RGCTX_REG, MONO_STRUCT_OFFSET (MonoFtnDesc, addr), sizeof (target_mgreg_t));
1188 amd64_call_reg (code, AMD64_R11);
1190 /* load the return values from the context */
1191 for (i = 0; i < RETURN_REGS; i++)
1192 amd64_mov_reg_membase (code, return_regs [i], AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (CallContext, gregs) + return_regs [i] * sizeof (target_mgreg_t), sizeof (target_mgreg_t));
1194 for (i = 0; i < FLOAT_RETURN_REGS; i++)
1195 amd64_sse_movsd_reg_membase (code, i, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (CallContext, fregs) + i * sizeof (double));
1197 /* reset stack and return */
1198 #if TARGET_WIN32
1199 amd64_lea_membase (code, AMD64_RSP, AMD64_RBP, 0);
1200 #else
1201 amd64_mov_reg_reg (code, AMD64_RSP, AMD64_RBP, sizeof (target_mgreg_t));
1202 #endif
1203 amd64_pop_reg (code, AMD64_RBP);
1204 mono_add_unwind_op_same_value (unwind_ops, code, start, AMD64_RBP);
1206 cfa_offset -= sizeof (target_mgreg_t);
1207 mono_add_unwind_op_def_cfa (unwind_ops, code, start, AMD64_RSP, cfa_offset);
1208 amd64_ret (code);
1210 g_assertf ((code - start) <= buf_len, "%d %d", (int)(code - start), buf_len);
1212 g_assert_checked (mono_arch_unwindinfo_validate_size (unwind_ops, MONO_MAX_TRAMPOLINE_UNWINDINFO_SIZE));
1214 mono_arch_flush_icache (start, code - start);
1215 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
1217 if (info)
1218 *info = mono_tramp_info_create ("native_to_interp_trampoline", start, code - start, ji, unwind_ops);
1220 return start;
1221 #else
1222 g_assert_not_reached ();
1223 return NULL;
1224 #endif /* DISABLE_INTERPRETER */
1226 #endif /* !DISABLE_JIT */
1228 #ifdef DISABLE_JIT
1229 gpointer
1230 mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
1232 g_assert_not_reached ();
1233 return NULL;
1236 gpointer
1237 mono_arch_get_static_rgctx_trampoline (MonoMemoryManager *mem_manager, gpointer arg, gpointer addr)
1239 g_assert_not_reached ();
1240 return NULL;
1243 gpointer
1244 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
1246 g_assert_not_reached ();
1247 return NULL;
1250 guchar*
1251 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
1253 g_assert_not_reached ();
1254 return NULL;
1257 gpointer
1258 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoMemoryManager *mem_manager, guint32 *code_len)
1260 g_assert_not_reached ();
1261 return NULL;
1264 gpointer
1265 mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo **info, gboolean aot)
1267 g_assert_not_reached ();
1268 return NULL;
1271 void
1272 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
1274 g_assert_not_reached ();
1275 return;
1278 guint8*
1279 mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot)
1281 g_assert_not_reached ();
1282 return NULL;
1285 gpointer
1286 mono_arch_get_interp_to_native_trampoline (MonoTrampInfo **info)
1288 g_assert_not_reached ();
1289 return NULL;
1292 gpointer
1293 mono_arch_get_native_to_interp_trampoline (MonoTrampInfo **info)
1295 g_assert_not_reached ();
1296 return NULL;
1298 #endif /* DISABLE_JIT */