[amd64] Implement the general rgctx fetch trampoline.
[mono-project.git] / mono / mini / tramp-amd64.c
blobd9737fb8c52e948339da1868b78630a3a2005c34
1 /*
2 * tramp-amd64.c: JIT trampoline code for amd64
4 * Authors:
5 * Dietmar Maurer (dietmar@ximian.com)
6 * Zoltan Varga (vargaz@gmail.com)
8 * (C) 2001 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
14 #include <config.h>
15 #include <glib.h>
17 #include <mono/metadata/abi-details.h>
18 #include <mono/metadata/appdomain.h>
19 #include <mono/metadata/marshal.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/mono-debug-debugger.h>
22 #include <mono/metadata/profiler-private.h>
23 #include <mono/metadata/gc-internals.h>
24 #include <mono/arch/amd64/amd64-codegen.h>
26 #include <mono/utils/memcheck.h>
28 #include "mini.h"
29 #include "mini-amd64.h"
30 #include "debugger-agent.h"
32 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
34 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
37 * mono_arch_get_unbox_trampoline:
38 * @m: method pointer
39 * @addr: pointer to native code for @m
41 * when value type methods are called through the vtable we need to unbox the
42 * this argument. This method returns a pointer to a trampoline which does
43 * unboxing before calling the method
45 gpointer
46 mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
48 guint8 *code, *start;
49 GSList *unwind_ops;
50 int this_reg, size = 20;
52 MonoDomain *domain = mono_domain_get ();
54 this_reg = mono_arch_get_this_arg_reg (NULL);
56 start = code = (guint8 *)mono_domain_code_reserve (domain, size);
58 unwind_ops = mono_arch_get_cie_program ();
60 amd64_alu_reg_imm (code, X86_ADD, this_reg, sizeof (MonoObject));
61 /* FIXME: Optimize this */
62 amd64_mov_reg_imm (code, AMD64_RAX, addr);
63 amd64_jump_reg (code, AMD64_RAX);
64 g_assert ((code - start) < size);
66 mono_arch_flush_icache (start, code - start);
67 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE, m);
69 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
71 return start;
75 * mono_arch_get_static_rgctx_trampoline:
77 * Create a trampoline which sets RGCTX_REG to MRGCTX, then jumps to ADDR.
79 gpointer
80 mono_arch_get_static_rgctx_trampoline (MonoMethod *m, MonoMethodRuntimeGenericContext *mrgctx, gpointer addr)
82 guint8 *code, *start;
83 GSList *unwind_ops;
84 int buf_len;
86 MonoDomain *domain = mono_domain_get ();
88 #ifdef MONO_ARCH_NOMAP32BIT
89 buf_len = 32;
90 #else
91 /* AOTed code could still have a non-32 bit address */
92 if ((((guint64)addr) >> 32) == 0)
93 buf_len = 16;
94 else
95 buf_len = 30;
96 #endif
98 start = code = (guint8 *)mono_domain_code_reserve (domain, buf_len);
100 unwind_ops = mono_arch_get_cie_program ();
102 amd64_mov_reg_imm (code, MONO_ARCH_RGCTX_REG, mrgctx);
103 amd64_jump_code (code, addr);
104 g_assert ((code - start) < buf_len);
106 mono_arch_flush_icache (start, code - start);
107 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
109 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
111 return start;
114 #ifdef _WIN64
115 // Workaround lack of Valgrind support for 64-bit Windows
116 #define VALGRIND_DISCARD_TRANSLATIONS(...)
117 #endif
120 * mono_arch_patch_callsite:
122 * Patch the callsite whose address is given by ORIG_CODE so it calls ADDR. ORIG_CODE
123 * points to the pc right after the call.
125 void
126 mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
128 guint8 *code;
129 guint8 buf [16];
130 gboolean can_write = mono_breakpoint_clean_code (method_start, orig_code, 14, buf, sizeof (buf));
132 code = buf + 14;
134 /* mov 64-bit imm into r11 (followed by call reg?) or direct call*/
135 if (((code [-13] == 0x49) && (code [-12] == 0xbb)) || (code [-5] == 0xe8)) {
136 if (code [-5] != 0xe8) {
137 if (can_write) {
138 InterlockedExchangePointer ((gpointer*)(orig_code - 11), addr);
139 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 11, sizeof (gpointer));
141 } else {
142 gboolean disp_32bit = ((((gint64)addr - (gint64)orig_code)) < (1 << 30)) && ((((gint64)addr - (gint64)orig_code)) > -(1 << 30));
144 if ((((guint64)(addr)) >> 32) != 0 && !disp_32bit) {
146 * This might happen with LLVM or when calling AOTed code. Create a thunk.
148 guint8 *thunk_start, *thunk_code;
150 thunk_start = thunk_code = (guint8 *)mono_domain_code_reserve (mono_domain_get (), 32);
151 amd64_jump_membase (thunk_code, AMD64_RIP, 0);
152 *(guint64*)thunk_code = (guint64)addr;
153 addr = thunk_start;
154 g_assert ((((guint64)(addr)) >> 32) == 0);
155 mono_arch_flush_icache (thunk_start, thunk_code - thunk_start);
156 mono_profiler_code_buffer_new (thunk_start, thunk_code - thunk_start, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
158 if (can_write) {
159 InterlockedExchange ((gint32*)(orig_code - 4), ((gint64)addr - (gint64)orig_code));
160 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, 4);
164 else if ((code [-7] == 0x41) && (code [-6] == 0xff) && (code [-5] == 0x15)) {
165 /* call *<OFFSET>(%rip) */
166 gpointer *got_entry = (gpointer*)((guint8*)orig_code + (*(guint32*)(orig_code - 4)));
167 if (can_write) {
168 InterlockedExchangePointer (got_entry, addr);
169 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, sizeof (gpointer));
174 guint8*
175 mono_arch_create_llvm_native_thunk (MonoDomain *domain, guint8 *addr)
178 * The caller is LLVM code and the call displacement might exceed 32 bits. We can't determine the caller address, so
179 * we add a thunk every time.
180 * Since the caller is also allocated using the domain code manager, hopefully the displacement will fit into 32 bits.
181 * FIXME: Avoid this if possible if !MONO_ARCH_NOMAP32BIT and ADDR is 32 bits.
183 guint8 *thunk_start, *thunk_code;
185 thunk_start = thunk_code = (guint8 *)mono_domain_code_reserve (mono_domain_get (), 32);
186 amd64_jump_membase (thunk_code, AMD64_RIP, 0);
187 *(guint64*)thunk_code = (guint64)addr;
188 addr = thunk_start;
189 mono_arch_flush_icache (thunk_start, thunk_code - thunk_start);
190 mono_profiler_code_buffer_new (thunk_start, thunk_code - thunk_start, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
191 return addr;
194 void
195 mono_arch_patch_plt_entry (guint8 *code, gpointer *got, mgreg_t *regs, guint8 *addr)
197 gint32 disp;
198 gpointer *plt_jump_table_entry;
200 /* A PLT entry: jmp *<DISP>(%rip) */
201 g_assert (code [0] == 0xff);
202 g_assert (code [1] == 0x25);
204 disp = *(gint32*)(code + 2);
206 plt_jump_table_entry = (gpointer*)(code + 6 + disp);
208 InterlockedExchangePointer (plt_jump_table_entry, addr);
211 static void
212 stack_unaligned (MonoTrampolineType tramp_type)
214 printf ("%d\n", tramp_type);
215 g_assert_not_reached ();
218 guchar*
219 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
221 char *tramp_name;
222 guint8 *buf, *code, *tramp, *br [2], *r11_save_code, *after_r11_save_code, *br_ex_check;
223 int i, lmf_offset, offset, res_offset, arg_offset, rax_offset, ex_offset, tramp_offset, ctx_offset, saved_regs_offset;
224 int r11_save_offset, saved_fpregs_offset, rbp_offset, framesize, orig_rsp_to_rbp_offset, cfa_offset;
225 gboolean has_caller;
226 GSList *unwind_ops = NULL;
227 MonoJumpInfo *ji = NULL;
228 const guint kMaxCodeSize = 630;
230 if (tramp_type == MONO_TRAMPOLINE_JUMP || tramp_type == MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD)
231 has_caller = FALSE;
232 else
233 has_caller = TRUE;
235 code = buf = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
237 /* Compute stack frame size and offsets */
238 offset = 0;
239 rbp_offset = -offset;
241 offset += sizeof(mgreg_t);
242 rax_offset = -offset;
244 offset += sizeof(mgreg_t);
245 ex_offset = -offset;
247 offset += sizeof(mgreg_t);
248 r11_save_offset = -offset;
250 offset += sizeof(mgreg_t);
251 tramp_offset = -offset;
253 offset += sizeof(gpointer);
254 arg_offset = -offset;
256 offset += sizeof(mgreg_t);
257 res_offset = -offset;
259 offset += sizeof (MonoContext);
260 ctx_offset = -offset;
261 saved_regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
262 saved_fpregs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, fregs);
264 offset += sizeof (MonoLMFTramp);
265 lmf_offset = -offset;
267 #ifdef TARGET_WIN32
268 /* Reserve space where the callee can save the argument registers */
269 offset += 4 * sizeof (mgreg_t);
270 #endif
272 framesize = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
274 // CFA = sp + 16 (the trampoline address is on the stack)
275 cfa_offset = 16;
276 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, 16);
277 // IP saved at CFA - 8
278 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RIP, -8);
280 orig_rsp_to_rbp_offset = 0;
281 r11_save_code = code;
282 /* Reserve space for the mov_membase_reg to save R11 */
283 code += 5;
284 after_r11_save_code = code;
286 /* Pop the return address off the stack */
287 amd64_pop_reg (code, AMD64_R11);
288 orig_rsp_to_rbp_offset += sizeof(mgreg_t);
290 cfa_offset -= sizeof(mgreg_t);
291 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
294 * Allocate a new stack frame
296 amd64_push_reg (code, AMD64_RBP);
297 cfa_offset += sizeof(mgreg_t);
298 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
299 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RBP, - cfa_offset);
301 orig_rsp_to_rbp_offset -= sizeof(mgreg_t);
302 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof(mgreg_t));
303 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
304 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
306 /* Compute the trampoline address from the return address */
307 if (aot) {
308 /* 7 = length of call *<offset>(rip) */
309 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 7);
310 } else {
311 /* 5 = length of amd64_call_membase () */
312 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 5);
314 amd64_mov_membase_reg (code, AMD64_RBP, tramp_offset, AMD64_R11, sizeof(gpointer));
316 /* Save all registers */
317 for (i = 0; i < AMD64_NREG; ++i) {
318 if (i == AMD64_RBP) {
319 /* RAX is already saved */
320 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, rbp_offset, sizeof(mgreg_t));
321 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
322 } else if (i == AMD64_RIP) {
323 if (has_caller)
324 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 8, sizeof(gpointer));
325 else
326 amd64_mov_reg_imm (code, AMD64_R11, 0);
327 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), AMD64_R11, sizeof(mgreg_t));
328 } else if (i == AMD64_RSP) {
329 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof(mgreg_t));
330 amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, framesize + 16);
331 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), AMD64_R11, sizeof(mgreg_t));
332 } else if (i != AMD64_R11) {
333 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
334 } else {
335 /* We have to save R11 right at the start of
336 the trampoline code because it's used as a
337 scratch register */
338 /* This happens before the frame is set up, so it goes into the redzone */
339 amd64_mov_membase_reg (r11_save_code, AMD64_RSP, r11_save_offset + orig_rsp_to_rbp_offset, i, sizeof(mgreg_t));
340 g_assert (r11_save_code == after_r11_save_code);
342 /* Copy from the save slot into the register array slot */
343 amd64_mov_reg_membase (code, i, AMD64_RSP, r11_save_offset + orig_rsp_to_rbp_offset, sizeof(mgreg_t));
344 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
346 /* cfa = rbp + cfa_offset */
347 mono_add_unwind_op_offset (unwind_ops, code, buf, i, - cfa_offset + saved_regs_offset + (i * sizeof (mgreg_t)));
349 for (i = 0; i < 8; ++i)
350 amd64_movsd_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * sizeof(mgreg_t)), i);
352 /* Check that the stack is aligned */
353 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof (mgreg_t));
354 amd64_alu_reg_imm (code, X86_AND, AMD64_R11, 15);
355 amd64_alu_reg_imm (code, X86_CMP, AMD64_R11, 0);
356 br [0] = code;
357 amd64_branch_disp (code, X86_CC_Z, 0, FALSE);
358 if (aot) {
359 amd64_mov_reg_imm (code, AMD64_R11, 0);
360 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
361 } else {
362 amd64_mov_reg_imm (code, MONO_AMD64_ARG_REG1, tramp_type);
363 amd64_mov_reg_imm (code, AMD64_R11, stack_unaligned);
364 amd64_call_reg (code, AMD64_R11);
366 mono_amd64_patch (br [0], code);
367 //amd64_breakpoint (code);
369 if (tramp_type != MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD) {
370 /* Obtain the trampoline argument which is encoded in the instruction stream */
371 if (aot) {
372 /* Load the GOT offset */
373 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, sizeof(gpointer));
375 * r11 points to a call *<offset>(%rip) instruction, load the
376 * pc-relative offset from the instruction itself.
378 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 3, 4);
379 /* 7 is the length of the call, 8 is the offset to the next got slot */
380 amd64_alu_reg_imm_size (code, X86_ADD, AMD64_RAX, 7 + sizeof (gpointer), sizeof(gpointer));
381 /* Compute the address of the GOT slot */
382 amd64_alu_reg_reg_size (code, X86_ADD, AMD64_R11, AMD64_RAX, sizeof(gpointer));
383 /* Load the value */
384 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, sizeof(gpointer));
385 } else {
386 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, sizeof(gpointer));
387 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 5, 1);
388 amd64_widen_reg (code, AMD64_RAX, AMD64_RAX, TRUE, FALSE);
389 amd64_alu_reg_imm_size (code, X86_CMP, AMD64_RAX, 4, 1);
390 br [0] = code;
391 x86_branch8 (code, X86_CC_NE, 6, FALSE);
392 /* 32 bit immediate */
393 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 4);
394 br [1] = code;
395 x86_jump8 (code, 10);
396 /* 64 bit immediate */
397 mono_amd64_patch (br [0], code);
398 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 8);
399 mono_amd64_patch (br [1], code);
401 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, sizeof(gpointer));
402 } else {
403 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, saved_regs_offset + (MONO_AMD64_ARG_REG1 * sizeof(mgreg_t)), sizeof(mgreg_t));
404 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, sizeof(gpointer));
407 /* Save LMF begin */
409 /* Save ip */
410 if (has_caller)
411 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 8, sizeof(gpointer));
412 else
413 amd64_mov_reg_imm (code, AMD64_R11, 0);
414 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, sizeof(mgreg_t));
415 /* Save sp */
416 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof(mgreg_t));
417 amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, framesize + 16);
418 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), AMD64_R11, sizeof(mgreg_t));
419 /* Save pointer to context */
420 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, ctx_offset);
421 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMFTramp, ctx), AMD64_R11, sizeof(mgreg_t));
423 if (aot) {
424 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr");
425 } else {
426 amd64_mov_reg_imm (code, AMD64_R11, mono_get_lmf_addr);
428 amd64_call_reg (code, AMD64_R11);
430 /* Save lmf_addr */
431 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMFTramp, lmf_addr), AMD64_RAX, sizeof(gpointer));
432 /* Save previous_lmf */
433 /* Set the lowest bit to signal that this LMF has the ip field set */
434 /* Set the third lowest bit to signal that this is a MonoLMFTramp structure */
435 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, sizeof(gpointer));
436 amd64_alu_reg_imm_size (code, X86_ADD, AMD64_R11, 0x5, sizeof(gpointer));
437 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, sizeof(gpointer));
438 /* Set new lmf */
439 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, lmf_offset);
440 amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, sizeof(gpointer));
442 /* Save LMF end */
444 /* Arg1 is the pointer to the saved registers */
445 amd64_lea_membase (code, AMD64_ARG_REG1, AMD64_RBP, saved_regs_offset);
447 /* Arg2 is the address of the calling code */
448 if (has_caller)
449 amd64_mov_reg_membase (code, AMD64_ARG_REG2, AMD64_RBP, 8, sizeof(gpointer));
450 else
451 amd64_mov_reg_imm (code, AMD64_ARG_REG2, 0);
453 /* Arg3 is the method/vtable ptr */
454 amd64_mov_reg_membase (code, AMD64_ARG_REG3, AMD64_RBP, arg_offset, sizeof(gpointer));
456 /* Arg4 is the trampoline address */
457 amd64_mov_reg_membase (code, AMD64_ARG_REG4, AMD64_RBP, tramp_offset, sizeof(gpointer));
459 if (aot) {
460 char *icall_name = g_strdup_printf ("trampoline_func_%d", tramp_type);
461 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
462 } else {
463 tramp = (guint8*)mono_get_trampoline_func (tramp_type);
464 amd64_mov_reg_imm (code, AMD64_R11, tramp);
466 amd64_call_reg (code, AMD64_R11);
467 amd64_mov_membase_reg (code, AMD64_RBP, res_offset, AMD64_RAX, sizeof(mgreg_t));
469 /* Restore LMF */
470 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
471 amd64_alu_reg_imm_size (code, X86_SUB, AMD64_RCX, 0x5, sizeof(gpointer));
472 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMFTramp, lmf_addr), sizeof(gpointer));
473 amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, sizeof(gpointer));
476 * Save rax to the stack, after the leave instruction, this will become part of
477 * the red zone.
479 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, res_offset, sizeof(mgreg_t));
480 amd64_mov_membase_reg (code, AMD64_RBP, rax_offset, AMD64_RAX, sizeof(mgreg_t));
482 /* Check for thread interruption */
483 /* This is not perf critical code so no need to check the interrupt flag */
485 * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
487 if (aot) {
488 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint_noraise");
489 } else {
490 amd64_mov_reg_imm (code, AMD64_R11, (guint8*)mono_thread_force_interruption_checkpoint_noraise);
492 amd64_call_reg (code, AMD64_R11);
494 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
495 br_ex_check = code;
496 amd64_branch8 (code, X86_CC_Z, -1, 1);
499 * Exception case:
500 * We have an exception we want to throw in the caller's frame, so pop
501 * the trampoline frame and throw from the caller.
503 amd64_leave (code);
504 /* We are in the parent frame, the exception is in rax */
506 * EH is initialized after trampolines, so get the address of the variable
507 * which contains throw_exception, and load it from there.
509 if (aot) {
510 /* Not really a jit icall */
511 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "throw_exception_addr");
512 } else {
513 amd64_mov_reg_imm (code, AMD64_R11, (guint8*)mono_get_throw_exception_addr ());
515 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, sizeof(gpointer));
516 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, sizeof(mgreg_t));
518 * We still have the original return value on the top of the stack, so the
519 * throw trampoline will use that as the throw site.
521 amd64_jump_reg (code, AMD64_R11);
523 /* Normal case */
524 mono_amd64_patch (br_ex_check, code);
526 /* Restore argument registers, r10 (imt method/rgxtx)
527 and rax (needed for direct calls to C vararg functions). */
528 for (i = 0; i < AMD64_NREG; ++i)
529 if (AMD64_IS_ARGUMENT_REG (i) || i == AMD64_R10 || i == AMD64_RAX)
530 amd64_mov_reg_membase (code, i, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), sizeof(mgreg_t));
531 for (i = 0; i < 8; ++i)
532 amd64_movsd_reg_membase (code, i, AMD64_RBP, saved_fpregs_offset + (i * sizeof(mgreg_t)));
534 /* Restore stack */
535 amd64_leave (code);
536 cfa_offset -= sizeof (mgreg_t);
537 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, cfa_offset);
539 if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
540 /* Load result */
541 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, rax_offset - sizeof(mgreg_t), sizeof(mgreg_t));
542 amd64_ret (code);
543 } else {
544 /* call the compiled method using the saved rax */
545 amd64_jump_membase (code, AMD64_RSP, rax_offset - sizeof(mgreg_t));
548 g_assert ((code - buf) <= kMaxCodeSize);
550 mono_arch_flush_icache (buf, code - buf);
551 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
553 tramp_name = mono_get_generic_trampoline_name (tramp_type);
554 *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
555 g_free (tramp_name);
557 return buf;
560 gpointer
561 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
563 guint8 *code, *buf, *tramp;
564 int size;
565 gboolean far_addr = FALSE;
567 tramp = mono_get_trampoline_code (tramp_type);
569 if ((((guint64)arg1) >> 32) == 0)
570 size = 5 + 1 + 4;
571 else
572 size = 5 + 1 + 8;
574 code = buf = (guint8 *)mono_domain_code_reserve_align (domain, size, 1);
576 if (((gint64)tramp - (gint64)code) >> 31 != 0 && ((gint64)tramp - (gint64)code) >> 31 != -1) {
577 #ifndef MONO_ARCH_NOMAP32BIT
578 g_assert_not_reached ();
579 #endif
580 far_addr = TRUE;
581 size += 16;
582 code = buf = (guint8 *)mono_domain_code_reserve_align (domain, size, 1);
585 if (far_addr) {
586 amd64_mov_reg_imm (code, AMD64_R11, tramp);
587 amd64_call_reg (code, AMD64_R11);
588 } else {
589 amd64_call_code (code, tramp);
591 /* The trampoline code will obtain the argument from the instruction stream */
592 if ((((guint64)arg1) >> 32) == 0) {
593 *code = 0x4;
594 *(guint32*)(code + 1) = (gint64)arg1;
595 code += 5;
596 } else {
597 *code = 0x8;
598 *(guint64*)(code + 1) = (gint64)arg1;
599 code += 9;
602 g_assert ((code - buf) <= size);
604 if (code_len)
605 *code_len = size;
607 mono_arch_flush_icache (buf, size);
608 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE, mono_get_generic_trampoline_simple_name (tramp_type));
610 return buf;
613 gpointer
614 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
616 guint8 *tramp;
617 guint8 *code, *buf;
618 guint8 **rgctx_null_jumps;
619 int tramp_size;
620 int depth, index;
621 int i;
622 gboolean mrgctx;
623 MonoJumpInfo *ji = NULL;
624 GSList *unwind_ops;
626 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
627 index = MONO_RGCTX_SLOT_INDEX (slot);
628 if (mrgctx)
629 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
630 for (depth = 0; ; ++depth) {
631 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
633 if (index < size - 1)
634 break;
635 index -= size - 1;
638 tramp_size = 64 + 8 * depth;
640 code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size);
642 unwind_ops = mono_arch_get_cie_program ();
644 rgctx_null_jumps = (guint8 **)g_malloc (sizeof (guint8*) * (depth + 2));
646 if (mrgctx) {
647 /* get mrgctx ptr */
648 amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
649 } else {
650 /* load rgctx ptr from vtable */
651 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context), sizeof(gpointer));
652 /* is the rgctx ptr null? */
653 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
654 /* if yes, jump to actual trampoline */
655 rgctx_null_jumps [0] = code;
656 amd64_branch8 (code, X86_CC_Z, -1, 1);
659 for (i = 0; i < depth; ++i) {
660 /* load ptr to next array */
661 if (mrgctx && i == 0)
662 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT, sizeof(gpointer));
663 else
664 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, 0, sizeof(gpointer));
665 /* is the ptr null? */
666 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
667 /* if yes, jump to actual trampoline */
668 rgctx_null_jumps [i + 1] = code;
669 amd64_branch8 (code, X86_CC_Z, -1, 1);
672 /* fetch slot */
673 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, sizeof (gpointer) * (index + 1), sizeof(gpointer));
674 /* is the slot null? */
675 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
676 /* if yes, jump to actual trampoline */
677 rgctx_null_jumps [depth + 1] = code;
678 amd64_branch8 (code, X86_CC_Z, -1, 1);
679 /* otherwise return */
680 amd64_ret (code);
682 for (i = mrgctx ? 1 : 0; i <= depth + 1; ++i)
683 mono_amd64_patch (rgctx_null_jumps [i], code);
685 g_free (rgctx_null_jumps);
687 /* move the rgctx pointer to the VTABLE register */
688 amd64_mov_reg_reg (code, MONO_ARCH_VTABLE_REG, AMD64_ARG_REG1, sizeof(gpointer));
690 if (aot) {
691 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot));
692 amd64_jump_reg (code, AMD64_R11);
693 } else {
694 tramp = (guint8 *)mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);
696 /* jump to the actual trampoline */
697 amd64_jump_code (code, tramp);
700 mono_arch_flush_icache (buf, code - buf);
701 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
703 g_assert (code - buf <= tramp_size);
705 char *name = mono_get_rgctx_fetch_trampoline_name (slot);
706 *info = mono_tramp_info_create (name, buf, code - buf, ji, unwind_ops);
707 g_free (name);
709 return buf;
712 gpointer
713 mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo **info, gboolean aot)
715 guint8 *code, *buf;
716 int tramp_size;
717 MonoJumpInfo *ji = NULL;
718 GSList *unwind_ops;
720 g_assert (aot);
721 tramp_size = 64;
723 code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size);
725 unwind_ops = mono_arch_get_cie_program ();
727 // FIXME: Currently, we always go to the slow path.
728 /* This receives a <slot, trampoline> in the rgctx arg reg. */
729 /* Load trampoline addr */
730 amd64_mov_reg_membase (code, AMD64_R11, MONO_ARCH_RGCTX_REG, 8, 8);
731 /* move the rgctx pointer to the VTABLE register */
732 amd64_mov_reg_reg (code, MONO_ARCH_VTABLE_REG, AMD64_ARG_REG1, sizeof(gpointer));
733 /* Jump to the trampoline */
734 amd64_jump_reg (code, AMD64_R11);
736 mono_arch_flush_icache (buf, code - buf);
737 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
739 g_assert (code - buf <= tramp_size);
741 if (info)
742 *info = mono_tramp_info_create ("rgctx_fetch_trampoline_general", buf, code - buf, ji, unwind_ops);
744 return buf;
747 void
748 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
750 /* FIXME: This is not thread safe */
751 guint8 *code = (guint8 *)ji->code_start;
753 amd64_mov_reg_imm (code, AMD64_ARG_REG1, func_arg);
754 amd64_mov_reg_imm (code, AMD64_R11, func);
756 x86_push_imm (code, (guint64)func_arg);
757 amd64_call_reg (code, AMD64_R11);
761 static void
762 handler_block_trampoline_helper (gpointer *ptr)
764 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_native_tls_get_value (mono_jit_tls_id);
765 *ptr = jit_tls->handler_block_return_address;
768 gpointer
769 mono_arch_create_handler_block_trampoline (MonoTrampInfo **info, gboolean aot)
771 guint8 *tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD);
772 guint8 *code, *buf;
773 int tramp_size = 64;
774 MonoJumpInfo *ji = NULL;
775 GSList *unwind_ops;
777 g_assert (!aot);
779 code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size);
781 unwind_ops = mono_arch_get_cie_program ();
784 This trampoline restore the call chain of the handler block then jumps into the code that deals with it.
786 if (mono_get_jit_tls_offset () != -1) {
787 code = mono_amd64_emit_tls_get (code, MONO_AMD64_ARG_REG1, mono_get_jit_tls_offset ());
788 amd64_mov_reg_membase (code, MONO_AMD64_ARG_REG1, MONO_AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoJitTlsData, handler_block_return_address), 8);
789 /* Simulate a call */
790 amd64_push_reg (code, AMD64_RAX);
791 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, 16);
792 amd64_jump_code (code, tramp);
793 } else {
794 /*Slow path uses a c helper*/
795 amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, AMD64_RSP, 8);
796 amd64_mov_reg_imm (code, AMD64_RAX, tramp);
797 amd64_push_reg (code, AMD64_RAX);
798 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, 16);
799 amd64_push_reg (code, AMD64_RAX);
800 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, 24);
801 amd64_jump_code (code, handler_block_trampoline_helper);
804 mono_arch_flush_icache (buf, code - buf);
805 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
806 g_assert (code - buf <= tramp_size);
808 *info = mono_tramp_info_create ("handler_block_trampoline", buf, code - buf, ji, unwind_ops);
810 return buf;
814 * mono_arch_get_call_target:
816 * Return the address called by the code before CODE if exists.
818 guint8*
819 mono_arch_get_call_target (guint8 *code)
821 if (code [-5] == 0xe8) {
822 gint32 disp = *(gint32*)(code - 4);
823 guint8 *target = code + disp;
825 return target;
826 } else {
827 return NULL;
832 * mono_arch_get_plt_info_offset:
834 * Return the PLT info offset belonging to the plt entry PLT_ENTRY.
836 guint32
837 mono_arch_get_plt_info_offset (guint8 *plt_entry, mgreg_t *regs, guint8 *code)
839 return *(guint32*)(plt_entry + 6);
843 * mono_arch_create_sdb_trampoline:
845 * Return a trampoline which captures the current context, passes it to
846 * debugger_agent_single_step_from_context ()/debugger_agent_breakpoint_from_context (),
847 * then restores the (potentially changed) context.
849 guint8*
850 mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot)
852 int tramp_size = 256;
853 int i, framesize, ctx_offset, cfa_offset, gregs_offset;
854 guint8 *code, *buf;
855 GSList *unwind_ops = NULL;
856 MonoJumpInfo *ji = NULL;
858 code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size);
860 framesize = 0;
861 #ifdef TARGET_WIN32
862 /* Reserve space where the callee can save the argument registers */
863 framesize += 4 * sizeof (mgreg_t);
864 #endif
866 ctx_offset = framesize;
867 framesize += sizeof (MonoContext);
869 framesize = ALIGN_TO (framesize, MONO_ARCH_FRAME_ALIGNMENT);
871 // CFA = sp + 8
872 cfa_offset = 8;
873 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, 8);
874 // IP saved at CFA - 8
875 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RIP, -cfa_offset);
877 amd64_push_reg (code, AMD64_RBP);
878 cfa_offset += sizeof(mgreg_t);
879 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
880 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RBP, - cfa_offset);
882 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof(mgreg_t));
883 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
884 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
886 gregs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
888 /* Initialize a MonoContext structure on the stack */
889 for (i = 0; i < AMD64_NREG; ++i) {
890 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_RBP)
891 amd64_mov_membase_reg (code, AMD64_RSP, gregs_offset + (i * sizeof (mgreg_t)), i, sizeof (mgreg_t));
893 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 0, sizeof (mgreg_t));
894 amd64_mov_membase_reg (code, AMD64_RSP, gregs_offset + (AMD64_RBP * sizeof (mgreg_t)), AMD64_R11, sizeof (mgreg_t));
895 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, 2 * sizeof (mgreg_t));
896 amd64_mov_membase_reg (code, AMD64_RSP, gregs_offset + (AMD64_RSP * sizeof (mgreg_t)), AMD64_R11, sizeof (mgreg_t));
897 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, sizeof (mgreg_t), sizeof (mgreg_t));
898 amd64_mov_membase_reg (code, AMD64_RSP, gregs_offset + (AMD64_RIP * sizeof (mgreg_t)), AMD64_R11, sizeof (mgreg_t));
900 /* Call the single step/breakpoint function in sdb */
901 amd64_lea_membase (code, AMD64_ARG_REG1, AMD64_RSP, ctx_offset);
903 if (aot) {
904 if (single_step)
905 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "debugger_agent_single_step_from_context");
906 else
907 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "debugger_agent_breakpoint_from_context");
908 } else {
909 if (single_step)
910 amd64_mov_reg_imm (code, AMD64_R11, debugger_agent_single_step_from_context);
911 else
912 amd64_mov_reg_imm (code, AMD64_R11, debugger_agent_breakpoint_from_context);
914 amd64_call_reg (code, AMD64_R11);
916 /* Restore registers from ctx */
917 for (i = 0; i < AMD64_NREG; ++i) {
918 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_RBP)
919 amd64_mov_reg_membase (code, i, AMD64_RSP, gregs_offset + (i * sizeof (mgreg_t)), sizeof (mgreg_t));
921 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, gregs_offset + (AMD64_RBP * sizeof (mgreg_t)), sizeof (mgreg_t));
922 amd64_mov_membase_reg (code, AMD64_RBP, 0, AMD64_R11, sizeof (mgreg_t));
923 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, gregs_offset + (AMD64_RIP * sizeof (mgreg_t)), sizeof (mgreg_t));
924 amd64_mov_membase_reg (code, AMD64_RBP, sizeof (mgreg_t), AMD64_R11, sizeof (mgreg_t));
926 amd64_leave (code);
927 cfa_offset -= sizeof (mgreg_t);
928 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, cfa_offset);
929 amd64_ret (code);
931 mono_arch_flush_icache (code, code - buf);
932 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
933 g_assert (code - buf <= tramp_size);
935 const char *tramp_name = single_step ? "sdb_single_step_trampoline" : "sdb_breakpoint_trampoline";
936 *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
938 return buf;