2010-04-07 Rodrigo Kumpera <rkumpera@novell.com>
[mono.git] / mono / mini / tramp-amd64.c
blob45c91722563448398a92013593eca7a00ab7e1a8
1 /*
2 * tramp-amd64.c: JIT trampoline code for amd64
4 * Authors:
5 * Dietmar Maurer (dietmar@ximian.com)
6 * Zoltan Varga (vargaz@gmail.com)
8 * (C) 2001 Ximian, Inc.
9 */
11 #include <config.h>
12 #include <glib.h>
14 #include <mono/metadata/appdomain.h>
15 #include <mono/metadata/marshal.h>
16 #include <mono/metadata/tabledefs.h>
17 #include <mono/metadata/mono-debug-debugger.h>
18 #include <mono/metadata/monitor.h>
19 #include <mono/arch/amd64/amd64-codegen.h>
21 #include <mono/utils/memcheck.h>
23 #include "mini.h"
24 #include "mini-amd64.h"
26 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
28 static guint8* nullified_class_init_trampoline;
31 * mono_arch_get_unbox_trampoline:
32 * @gsctx: the generic sharing context
33 * @m: method pointer
34 * @addr: pointer to native code for @m
36 * when value type methods are called through the vtable we need to unbox the
37 * this argument. This method returns a pointer to a trampoline which does
38 * unboxing before calling the method
40 gpointer
41 mono_arch_get_unbox_trampoline (MonoGenericSharingContext *gsctx, MonoMethod *m, gpointer addr)
43 guint8 *code, *start;
44 int this_reg;
46 MonoDomain *domain = mono_domain_get ();
48 this_reg = mono_arch_get_this_arg_reg (mono_method_signature (m), gsctx, NULL);
50 start = code = mono_domain_code_reserve (domain, 20);
52 amd64_alu_reg_imm (code, X86_ADD, this_reg, sizeof (MonoObject));
53 /* FIXME: Optimize this */
54 amd64_mov_reg_imm (code, AMD64_RAX, addr);
55 amd64_jump_reg (code, AMD64_RAX);
56 g_assert ((code - start) < 20);
58 mono_arch_flush_icache (start, code - start);
60 return start;
64 * mono_arch_get_static_rgctx_trampoline:
66 * Create a trampoline which sets RGCTX_REG to MRGCTX, then jumps to ADDR.
68 gpointer
69 mono_arch_get_static_rgctx_trampoline (MonoMethod *m, MonoMethodRuntimeGenericContext *mrgctx, gpointer addr)
71 guint8 *code, *start;
72 int buf_len;
74 MonoDomain *domain = mono_domain_get ();
76 #ifdef MONO_ARCH_NOMAP32BIT
77 buf_len = 32;
78 #else
79 /* AOTed code could still have a non-32 bit address */
80 if ((((guint64)addr) >> 32) == 0)
81 buf_len = 16;
82 else
83 buf_len = 30;
84 #endif
86 start = code = mono_domain_code_reserve (domain, buf_len);
88 amd64_mov_reg_imm (code, MONO_ARCH_RGCTX_REG, mrgctx);
89 amd64_jump_code (code, addr);
90 g_assert ((code - start) < buf_len);
92 mono_arch_flush_icache (start, code - start);
94 return start;
97 gpointer
98 mono_arch_get_llvm_imt_trampoline (MonoDomain *domain, MonoMethod *m, int vt_offset)
100 guint8 *code, *start;
101 int buf_len;
102 int this_reg;
104 buf_len = 32;
106 start = code = mono_domain_code_reserve (domain, buf_len);
108 this_reg = mono_arch_get_this_arg_reg (mono_method_signature (m), NULL, NULL);
110 /* Set imt arg */
111 amd64_mov_reg_imm (code, MONO_ARCH_IMT_REG, m);
112 /* Load vtable address */
113 amd64_mov_reg_membase (code, AMD64_RAX, this_reg, 0, 8);
114 amd64_jump_membase (code, AMD64_RAX, vt_offset);
115 amd64_ret (code);
117 g_assert ((code - start) < buf_len);
119 mono_arch_flush_icache (start, code - start);
121 return start;
125 * mono_arch_patch_callsite:
127 * Patch the callsite whose address is given by ORIG_CODE so it calls ADDR. ORIG_CODE
128 * points to the pc right after the call.
130 void
131 mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
133 guint8 *code;
134 guint8 buf [16];
135 gboolean can_write = mono_breakpoint_clean_code (method_start, orig_code, 14, buf, sizeof (buf));
137 code = buf + 14;
139 if (((code [-13] == 0x49) && (code [-12] == 0xbb)) || (code [-5] == 0xe8)) {
140 if (code [-5] != 0xe8) {
141 if (can_write) {
142 InterlockedExchangePointer ((gpointer*)(orig_code - 11), addr);
143 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 11, sizeof (gpointer));
145 } else {
146 if ((((guint64)(addr)) >> 32) != 0) {
147 #ifdef MONO_ARCH_NOMAP32BIT
148 /* Print some diagnostics */
149 MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (char*)orig_code);
150 if (ji)
151 fprintf (stderr, "At %s, offset 0x%zx\n", mono_method_full_name (ji->method, TRUE), (guint8*)orig_code - (guint8*)ji->code_start);
152 fprintf (stderr, "Addr: %p\n", addr);
153 ji = mono_jit_info_table_find (mono_domain_get (), (char*)addr);
154 if (ji)
155 fprintf (stderr, "Callee: %s\n", mono_method_full_name (ji->method, TRUE));
156 g_assert_not_reached ();
157 #else
159 * This might happen when calling AOTed code. Create a thunk.
161 guint8 *thunk_start, *thunk_code;
163 thunk_start = thunk_code = mono_domain_code_reserve (mono_domain_get (), 32);
164 amd64_jump_membase (thunk_code, AMD64_RIP, 0);
165 *(guint64*)thunk_code = (guint64)addr;
166 addr = thunk_start;
167 g_assert ((((guint64)(addr)) >> 32) == 0);
168 mono_arch_flush_icache (thunk_start, thunk_code - thunk_start);
169 #endif
171 g_assert ((((guint64)(orig_code)) >> 32) == 0);
172 if (can_write) {
173 InterlockedExchange ((gint32*)(orig_code - 4), ((gint64)addr - (gint64)orig_code));
174 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, 4);
178 else if ((code [-7] == 0x41) && (code [-6] == 0xff) && (code [-5] == 0x15)) {
179 /* call *<OFFSET>(%rip) */
180 gpointer *got_entry = (gpointer*)((guint8*)orig_code + (*(guint32*)(orig_code - 4)));
181 if (can_write) {
182 InterlockedExchangePointer (got_entry, addr);
183 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, sizeof (gpointer));
188 void
189 mono_arch_patch_plt_entry (guint8 *code, gpointer *got, mgreg_t *regs, guint8 *addr)
191 gint32 disp;
192 gpointer *plt_jump_table_entry;
194 /* A PLT entry: jmp *<DISP>(%rip) */
195 g_assert (code [0] == 0xff);
196 g_assert (code [1] == 0x25);
198 disp = *(gint32*)(code + 2);
200 plt_jump_table_entry = (gpointer*)(code + 6 + disp);
202 InterlockedExchangePointer (plt_jump_table_entry, addr);
205 void
206 mono_arch_nullify_class_init_trampoline (guint8 *code, mgreg_t *regs)
208 guint8 buf [16];
209 MonoJitInfo *ji = NULL;
210 gboolean can_write;
212 if (mono_use_llvm) {
213 /* code - 7 might be before the start of the method */
214 /* FIXME: Avoid this expensive call somehow */
215 ji = mono_jit_info_table_find (mono_domain_get (), (char*)code);
218 can_write = mono_breakpoint_clean_code (ji ? ji->code_start : NULL, code, 7, buf, sizeof (buf));
220 if (!can_write)
221 return;
223 code -= 3;
226 * A given byte sequence can match more than case here, so we have to be
227 * really careful about the ordering of the cases. Longer sequences
228 * come first.
230 if ((buf [0] == 0x41) && (buf [1] == 0xff) && (buf [2] == 0x15)) {
231 gpointer *vtable_slot;
233 /* call *<OFFSET>(%rip) */
234 vtable_slot = mono_get_vcall_slot_addr (code + 3, regs);
235 g_assert (vtable_slot);
237 *vtable_slot = nullified_class_init_trampoline;
238 } else if (buf [2] == 0xe8) {
239 /* call <TARGET> */
240 //guint8 *buf = code - 2;
243 * It would be better to replace the call with nops, but that doesn't seem
244 * to work on SMP machines even when the whole call is inside a cache line.
245 * Patching the call address seems to work.
248 buf [0] = 0x66;
249 buf [1] = 0x66;
250 buf [2] = 0x90;
251 buf [3] = 0x66;
252 buf [4] = 0x90;
255 mono_arch_patch_callsite (code - 2, code - 2 + 5, nullified_class_init_trampoline);
256 } else if ((buf [5] == 0xff) && x86_modrm_mod (buf [6]) == 3 && x86_modrm_reg (buf [6]) == 2) {
257 /* call *<reg> */
258 /* Generated by the LLVM JIT or on platforms without MAP_32BIT set */
259 guint8* buf = code;
261 /* FIXME: Not thread safe */
262 buf [1] = 0x90;
263 buf [2] = 0x90;
264 } else if (buf [4] == 0x90 || buf [5] == 0xeb || buf [6] == 0x66) {
265 /* Already changed by another thread */
267 } else {
268 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", buf [0], buf [1], buf [2], buf [3],
269 buf [4], buf [5], buf [6]);
270 g_assert_not_reached ();
274 void
275 mono_arch_nullify_plt_entry (guint8 *code, mgreg_t *regs)
277 if (mono_aot_only && !nullified_class_init_trampoline)
278 nullified_class_init_trampoline = mono_aot_get_named_code ("nullified_class_init_trampoline");
280 mono_arch_patch_plt_entry (code, NULL, regs, nullified_class_init_trampoline);
283 guchar*
284 mono_arch_create_trampoline_code (MonoTrampolineType tramp_type)
286 MonoJumpInfo *ji;
287 guint32 code_size;
288 guchar *code;
289 GSList *unwind_ops, *l;
291 code = mono_arch_create_trampoline_code_full (tramp_type, &code_size, &ji, &unwind_ops, FALSE);
293 mono_save_trampoline_xdebug_info ("<generic_trampoline>", code, code_size, unwind_ops);
295 for (l = unwind_ops; l; l = l->next)
296 g_free (l->data);
297 g_slist_free (unwind_ops);
299 return code;
302 guchar*
303 mono_arch_create_trampoline_code_full (MonoTrampolineType tramp_type, guint32 *code_size, MonoJumpInfo **ji, GSList **out_unwind_ops, gboolean aot)
305 guint8 *buf, *code, *tramp, *br [2], *r11_save_code, *after_r11_save_code;
306 int i, lmf_offset, offset, res_offset, arg_offset, rax_offset, tramp_offset, saved_regs_offset;
307 int saved_fpregs_offset, rbp_offset, framesize, orig_rsp_to_rbp_offset, cfa_offset;
308 gboolean has_caller;
309 GSList *unwind_ops = NULL;
311 if (tramp_type == MONO_TRAMPOLINE_JUMP)
312 has_caller = FALSE;
313 else
314 has_caller = TRUE;
316 code = buf = mono_global_codeman_reserve (538);
318 *ji = NULL;
320 framesize = 538 + sizeof (MonoLMF);
321 framesize = (framesize + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
323 orig_rsp_to_rbp_offset = 0;
324 r11_save_code = code;
325 /* Reserve 5 bytes for the mov_membase_reg to save R11 */
326 code += 5;
327 after_r11_save_code = code;
329 // CFA = sp + 16 (the trampoline address is on the stack)
330 cfa_offset = 16;
331 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, 16);
332 // IP saved at CFA - 8
333 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RIP, -8);
335 /* Pop the return address off the stack */
336 amd64_pop_reg (code, AMD64_R11);
337 orig_rsp_to_rbp_offset += 8;
339 cfa_offset -= 8;
340 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
343 * Allocate a new stack frame
345 amd64_push_reg (code, AMD64_RBP);
346 cfa_offset += 8;
347 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
348 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RBP, - cfa_offset);
350 orig_rsp_to_rbp_offset -= 8;
351 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
352 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
353 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
355 offset = 0;
356 rbp_offset = - offset;
358 offset += 8;
359 rax_offset = - offset;
361 offset += 8;
362 tramp_offset = - offset;
364 offset += 8;
365 arg_offset = - offset;
367 /* Compute the trampoline address from the return address */
368 if (aot) {
369 /* 7 = length of call *<offset>(rip) */
370 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 7);
371 } else {
372 /* 5 = length of amd64_call_membase () */
373 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 5);
375 amd64_mov_membase_reg (code, AMD64_RBP, tramp_offset, AMD64_R11, 8);
377 offset += 8;
378 res_offset = - offset;
380 /* Save all registers */
382 offset += AMD64_NREG * 8;
383 saved_regs_offset = - offset;
384 for (i = 0; i < AMD64_NREG; ++i) {
385 if (i == AMD64_RBP) {
386 /* RAX is already saved */
387 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, rbp_offset, 8);
388 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * 8), AMD64_RAX, 8);
389 } else if (i != AMD64_R11) {
390 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * 8), i, 8);
391 } else {
392 /* We have to save R11 right at the start of
393 the trampoline code because it's used as a
394 scratch register */
395 amd64_mov_membase_reg (r11_save_code, AMD64_RSP, saved_regs_offset + orig_rsp_to_rbp_offset + (i * 8), i, 8);
396 g_assert (r11_save_code == after_r11_save_code);
399 offset += 8 * 8;
400 saved_fpregs_offset = - offset;
401 for (i = 0; i < 8; ++i)
402 amd64_movsd_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * 8), i);
404 if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT &&
405 tramp_type != MONO_TRAMPOLINE_MONITOR_ENTER &&
406 tramp_type != MONO_TRAMPOLINE_MONITOR_EXIT) {
407 /* Obtain the trampoline argument which is encoded in the instruction stream */
408 if (aot) {
409 /* Load the GOT offset */
410 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, 8);
411 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 7, 4);
412 /* Compute the address of the GOT slot */
413 amd64_alu_reg_reg_size (code, X86_ADD, AMD64_R11, AMD64_RAX, 8);
414 /* Load the value */
415 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
416 } else {
417 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, 8);
418 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 5, 1);
419 amd64_widen_reg (code, AMD64_RAX, AMD64_RAX, TRUE, FALSE);
420 amd64_alu_reg_imm_size (code, X86_CMP, AMD64_RAX, 4, 1);
421 br [0] = code;
422 x86_branch8 (code, X86_CC_NE, 6, FALSE);
423 /* 32 bit immediate */
424 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 4);
425 br [1] = code;
426 x86_jump8 (code, 10);
427 /* 64 bit immediate */
428 mono_amd64_patch (br [0], code);
429 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 8);
430 mono_amd64_patch (br [1], code);
432 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, 8);
433 } else {
434 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, saved_regs_offset + (MONO_AMD64_ARG_REG1 * 8), 8);
435 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, 8);
438 /* Save LMF begin */
440 offset += sizeof (MonoLMF);
441 lmf_offset = - offset;
443 /* Save ip */
444 if (has_caller)
445 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 8, 8);
446 else
447 amd64_mov_reg_imm (code, AMD64_R11, 0);
448 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, 8);
449 /* Save fp */
450 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, framesize, 8);
451 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbp), AMD64_R11, 8);
452 /* Save sp */
453 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
454 amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, framesize + 16);
455 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_R11, 8);
456 /* Save method */
457 if (tramp_type == MONO_TRAMPOLINE_JIT || tramp_type == MONO_TRAMPOLINE_JUMP) {
458 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, arg_offset, 8);
459 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), AMD64_R11, 8);
460 } else {
461 amd64_mov_membase_imm (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), 0, 8);
463 /* Save callee saved regs */
464 #ifdef TARGET_WIN32
465 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rdi), AMD64_RDI, 8);
466 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsi), AMD64_RSI, 8);
467 #endif
468 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), AMD64_RBX, 8);
469 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), AMD64_R12, 8);
470 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), AMD64_R13, 8);
471 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), AMD64_R14, 8);
472 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), AMD64_R15, 8);
474 if (aot) {
475 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr");
476 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
477 } else {
478 amd64_mov_reg_imm (code, AMD64_R11, mono_get_lmf_addr);
480 amd64_call_reg (code, AMD64_R11);
482 /* Save lmf_addr */
483 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, 8);
484 /* Save previous_lmf */
485 /* Set the lowest bit to 1 to signal that this LMF has the ip field set */
486 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, 8);
487 amd64_alu_reg_imm_size (code, X86_ADD, AMD64_R11, 1, 8);
488 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, 8);
489 /* Set new lmf */
490 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, lmf_offset);
491 amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, 8);
493 /* Save LMF end */
495 /* Arg1 is the pointer to the saved registers */
496 amd64_lea_membase (code, AMD64_ARG_REG1, AMD64_RBP, saved_regs_offset);
498 /* Arg2 is the address of the calling code */
499 if (has_caller)
500 amd64_mov_reg_membase (code, AMD64_ARG_REG2, AMD64_RBP, 8, 8);
501 else
502 amd64_mov_reg_imm (code, AMD64_ARG_REG2, 0);
504 /* Arg3 is the method/vtable ptr */
505 amd64_mov_reg_membase (code, AMD64_ARG_REG3, AMD64_RBP, arg_offset, 8);
507 /* Arg4 is the trampoline address */
508 amd64_mov_reg_membase (code, AMD64_ARG_REG4, AMD64_RBP, tramp_offset, 8);
510 if (aot) {
511 char *icall_name = g_strdup_printf ("trampoline_func_%d", tramp_type);
512 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
513 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RIP, 0, 8);
514 } else {
515 tramp = (guint8*)mono_get_trampoline_func (tramp_type);
516 amd64_mov_reg_imm (code, AMD64_RAX, tramp);
518 amd64_call_reg (code, AMD64_RAX);
520 /* Check for thread interruption */
521 /* This is not perf critical code so no need to check the interrupt flag */
523 * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
525 amd64_mov_membase_reg (code, AMD64_RBP, res_offset, AMD64_RAX, 8);
526 if (aot) {
527 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint");
528 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RIP, 0, 8);
529 } else {
530 amd64_mov_reg_imm (code, AMD64_RAX, (guint8*)mono_thread_force_interruption_checkpoint);
532 amd64_call_reg (code, AMD64_RAX);
533 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, res_offset, 8);
535 /* Restore LMF */
537 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 8);
538 amd64_alu_reg_imm_size (code, X86_SUB, AMD64_RCX, 1, 8);
539 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 8);
540 amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, 8);
543 * Save rax to the stack, after the leave instruction, this will become part of
544 * the red zone.
546 amd64_mov_membase_reg (code, AMD64_RBP, rax_offset, AMD64_RAX, 8);
548 /* Restore argument registers, r10 (needed to pass rgctx to
549 static shared generic methods), r11 (imt register for
550 interface calls), and rax (needed for direct calls to C vararg functions). */
551 for (i = 0; i < AMD64_NREG; ++i)
552 if (AMD64_IS_ARGUMENT_REG (i) || i == AMD64_R10 || i == AMD64_R11 || i == AMD64_RAX)
553 amd64_mov_reg_membase (code, i, AMD64_RBP, saved_regs_offset + (i * 8), 8);
555 for (i = 0; i < 8; ++i)
556 amd64_movsd_reg_membase (code, i, AMD64_RBP, saved_fpregs_offset + (i * 8));
558 /* Restore stack */
559 amd64_leave (code);
561 if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
562 /* Load result */
563 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, rax_offset - 0x8, 8);
564 amd64_ret (code);
565 } else {
566 /* call the compiled method using the saved rax */
567 amd64_jump_membase (code, AMD64_RSP, rax_offset - 0x8);
570 g_assert ((code - buf) <= 538);
572 mono_arch_flush_icache (buf, code - buf);
574 *code_size = code - buf;
576 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) {
577 guint32 code_len;
579 /* Initialize the nullified class init trampoline used in the AOT case */
580 nullified_class_init_trampoline = mono_arch_get_nullified_class_init_trampoline (&code_len);
583 *out_unwind_ops = unwind_ops;
585 return buf;
588 gpointer
589 mono_arch_get_nullified_class_init_trampoline (guint32 *code_len)
591 guint8 *code, *buf;
593 code = buf = mono_global_codeman_reserve (16);
594 amd64_ret (code);
596 mono_arch_flush_icache (buf, code - buf);
598 *code_len = code - buf;
600 return buf;
603 gpointer
604 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
606 guint8 *code, *buf, *tramp;
607 int size;
609 tramp = mono_get_trampoline_code (tramp_type);
611 if ((((guint64)arg1) >> 32) == 0)
612 size = 5 + 1 + 4;
613 else
614 size = 5 + 1 + 8;
616 code = buf = mono_domain_code_reserve_align (domain, size, 1);
618 amd64_call_code (code, tramp);
619 /* The trampoline code will obtain the argument from the instruction stream */
620 if ((((guint64)arg1) >> 32) == 0) {
621 *code = 0x4;
622 *(guint32*)(code + 1) = (gint64)arg1;
623 code += 5;
624 } else {
625 *code = 0x8;
626 *(guint64*)(code + 1) = (gint64)arg1;
627 code += 9;
630 g_assert ((code - buf) <= size);
632 if (code_len)
633 *code_len = size;
635 mono_arch_flush_icache (buf, size);
637 return buf;
640 gpointer
641 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot)
643 guint32 code_size;
644 MonoJumpInfo *ji;
646 return mono_arch_create_rgctx_lazy_fetch_trampoline_full (slot, &code_size, &ji, FALSE);
649 gpointer
650 mono_arch_create_rgctx_lazy_fetch_trampoline_full (guint32 slot, guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
652 guint8 *tramp;
653 guint8 *code, *buf;
654 guint8 **rgctx_null_jumps;
655 int tramp_size;
656 int depth, index;
657 int i;
658 gboolean mrgctx;
659 GSList *unwind_ops = NULL;
660 char *name;
662 *ji = NULL;
664 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
665 index = MONO_RGCTX_SLOT_INDEX (slot);
666 if (mrgctx)
667 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
668 for (depth = 0; ; ++depth) {
669 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
671 if (index < size - 1)
672 break;
673 index -= size - 1;
676 tramp_size = 64 + 8 * depth;
678 code = buf = mono_global_codeman_reserve (tramp_size);
680 unwind_ops = mono_arch_get_cie_program ();
682 rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
684 if (mrgctx) {
685 /* get mrgctx ptr */
686 amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
687 } else {
688 /* load rgctx ptr from vtable */
689 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoVTable, runtime_generic_context), 8);
690 /* is the rgctx ptr null? */
691 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
692 /* if yes, jump to actual trampoline */
693 rgctx_null_jumps [0] = code;
694 amd64_branch8 (code, X86_CC_Z, -1, 1);
697 for (i = 0; i < depth; ++i) {
698 /* load ptr to next array */
699 if (mrgctx && i == 0)
700 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT, 8);
701 else
702 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, 0, 8);
703 /* is the ptr null? */
704 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
705 /* if yes, jump to actual trampoline */
706 rgctx_null_jumps [i + 1] = code;
707 amd64_branch8 (code, X86_CC_Z, -1, 1);
710 /* fetch slot */
711 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, sizeof (gpointer) * (index + 1), 8);
712 /* is the slot null? */
713 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
714 /* if yes, jump to actual trampoline */
715 rgctx_null_jumps [depth + 1] = code;
716 amd64_branch8 (code, X86_CC_Z, -1, 1);
717 /* otherwise return */
718 amd64_ret (code);
720 for (i = mrgctx ? 1 : 0; i <= depth + 1; ++i)
721 x86_patch (rgctx_null_jumps [i], code);
723 g_free (rgctx_null_jumps);
725 /* move the rgctx pointer to the VTABLE register */
726 amd64_mov_reg_reg (code, MONO_ARCH_VTABLE_REG, AMD64_ARG_REG1, 8);
728 if (aot) {
729 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot));
730 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
731 amd64_jump_reg (code, AMD64_R11);
732 } else {
733 tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);
735 /* jump to the actual trampoline */
736 amd64_jump_code (code, tramp);
739 mono_arch_flush_icache (buf, code - buf);
741 g_assert (code - buf <= tramp_size);
743 *code_size = code - buf;
745 name = g_strdup_printf ("rgctx_fetch_trampoline_%s_%d", mrgctx ? "mrgctx" : "rgctx", index);
746 mono_save_trampoline_xdebug_info (name, buf, code - buf, unwind_ops);
747 g_free (name);
749 return buf;
752 gpointer
753 mono_arch_create_generic_class_init_trampoline (void)
755 guint32 code_size;
756 MonoJumpInfo *ji;
758 return mono_arch_create_generic_class_init_trampoline_full (&code_size, &ji, FALSE);
761 gpointer
762 mono_arch_create_generic_class_init_trampoline_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
764 guint8 *tramp;
765 guint8 *code, *buf;
766 static int byte_offset = -1;
767 static guint8 bitmask;
768 guint8 *jump;
769 int tramp_size;
771 *ji = NULL;
773 tramp_size = 64;
775 code = buf = mono_global_codeman_reserve (tramp_size);
777 if (byte_offset < 0)
778 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
780 amd64_test_membase_imm_size (code, MONO_AMD64_ARG_REG1, byte_offset, bitmask, 1);
781 jump = code;
782 amd64_branch8 (code, X86_CC_Z, -1, 1);
784 amd64_ret (code);
786 x86_patch (jump, code);
788 if (aot) {
789 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_generic_class_init");
790 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
791 amd64_jump_reg (code, AMD64_R11);
792 } else {
793 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT, mono_get_root_domain (), NULL);
795 /* jump to the actual trampoline */
796 amd64_jump_code (code, tramp);
799 mono_arch_flush_icache (buf, code - buf);
801 g_assert (code - buf <= tramp_size);
803 *code_size = code - buf;
805 return buf;
808 #ifdef MONO_ARCH_MONITOR_OBJECT_REG
810 gpointer
811 mono_arch_create_monitor_enter_trampoline (void)
813 guint32 code_size;
814 MonoJumpInfo *ji;
816 return mono_arch_create_monitor_enter_trampoline_full (&code_size, &ji, FALSE);
819 gpointer
820 mono_arch_create_monitor_enter_trampoline_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
823 guint8 *tramp;
824 guint8 *code, *buf;
825 guint8 *jump_obj_null, *jump_sync_null, *jump_cmpxchg_failed, *jump_other_owner, *jump_tid;
826 int tramp_size;
827 int owner_offset, nest_offset, dummy;
829 *ji = NULL;
831 g_assert (MONO_ARCH_MONITOR_OBJECT_REG == AMD64_RDI);
833 mono_monitor_threads_sync_members_offset (&owner_offset, &nest_offset, &dummy);
834 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (owner_offset) == sizeof (gpointer));
835 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset) == sizeof (guint32));
836 owner_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset);
837 nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
839 tramp_size = 96;
841 code = buf = mono_global_codeman_reserve (tramp_size);
843 if (mono_thread_get_tls_offset () != -1) {
844 /* MonoObject* obj is in RDI */
845 /* is obj null? */
846 amd64_test_reg_reg (code, AMD64_RDI, AMD64_RDI);
847 /* if yes, jump to actual trampoline */
848 jump_obj_null = code;
849 amd64_branch8 (code, X86_CC_Z, -1, 1);
851 /* load obj->synchronization to RCX */
852 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RDI, G_STRUCT_OFFSET (MonoObject, synchronisation), 8);
853 /* is synchronization null? */
854 amd64_test_reg_reg (code, AMD64_RCX, AMD64_RCX);
855 /* if yes, jump to actual trampoline */
856 jump_sync_null = code;
857 amd64_branch8 (code, X86_CC_Z, -1, 1);
859 /* load MonoInternalThread* into RDX */
860 code = mono_amd64_emit_tls_get (code, AMD64_RDX, mono_thread_get_tls_offset ());
861 /* load TID into RDX */
862 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_RDX, G_STRUCT_OFFSET (MonoInternalThread, tid), 8);
864 /* is synchronization->owner null? */
865 amd64_alu_membase_imm_size (code, X86_CMP, AMD64_RCX, owner_offset, 0, 8);
866 /* if not, jump to next case */
867 jump_tid = code;
868 amd64_branch8 (code, X86_CC_NZ, -1, 1);
870 /* if yes, try a compare-exchange with the TID */
871 /* zero RAX */
872 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
873 /* compare and exchange */
874 amd64_prefix (code, X86_LOCK_PREFIX);
875 amd64_cmpxchg_membase_reg_size (code, AMD64_RCX, owner_offset, AMD64_RDX, 8);
876 /* if not successful, jump to actual trampoline */
877 jump_cmpxchg_failed = code;
878 amd64_branch8 (code, X86_CC_NZ, -1, 1);
879 /* if successful, return */
880 amd64_ret (code);
882 /* next case: synchronization->owner is not null */
883 x86_patch (jump_tid, code);
884 /* is synchronization->owner == TID? */
885 amd64_alu_membase_reg_size (code, X86_CMP, AMD64_RCX, owner_offset, AMD64_RDX, 8);
886 /* if not, jump to actual trampoline */
887 jump_other_owner = code;
888 amd64_branch8 (code, X86_CC_NZ, -1, 1);
889 /* if yes, increment nest */
890 amd64_inc_membase_size (code, AMD64_RCX, nest_offset, 4);
891 /* return */
892 amd64_ret (code);
894 x86_patch (jump_obj_null, code);
895 x86_patch (jump_sync_null, code);
896 x86_patch (jump_cmpxchg_failed, code);
897 x86_patch (jump_other_owner, code);
900 /* jump to the actual trampoline */
901 #if MONO_AMD64_ARG_REG1 != AMD64_RDI
902 amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, AMD64_RDI);
903 #endif
905 if (aot) {
906 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_monitor_enter");
907 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
908 amd64_jump_reg (code, AMD64_R11);
909 } else {
910 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_ENTER, mono_get_root_domain (), NULL);
912 /* jump to the actual trampoline */
913 amd64_jump_code (code, tramp);
916 mono_arch_flush_icache (code, code - buf);
917 g_assert (code - buf <= tramp_size);
919 *code_size = code - buf;
921 return buf;
924 gpointer
925 mono_arch_create_monitor_exit_trampoline (void)
927 guint32 code_size;
928 MonoJumpInfo *ji;
930 return mono_arch_create_monitor_exit_trampoline_full (&code_size, &ji, FALSE);
933 gpointer
934 mono_arch_create_monitor_exit_trampoline_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
936 guint8 *tramp;
937 guint8 *code, *buf;
938 guint8 *jump_obj_null, *jump_have_waiters, *jump_sync_null, *jump_not_owned;
939 guint8 *jump_next;
940 int tramp_size;
941 int owner_offset, nest_offset, entry_count_offset;
943 *ji = NULL;
945 g_assert (MONO_ARCH_MONITOR_OBJECT_REG == AMD64_RDI);
947 mono_monitor_threads_sync_members_offset (&owner_offset, &nest_offset, &entry_count_offset);
948 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (owner_offset) == sizeof (gpointer));
949 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset) == sizeof (guint32));
950 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (entry_count_offset) == sizeof (gint32));
951 owner_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset);
952 nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
953 entry_count_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (entry_count_offset);
955 tramp_size = 94;
957 code = buf = mono_global_codeman_reserve (tramp_size);
959 if (mono_thread_get_tls_offset () != -1) {
960 /* MonoObject* obj is in RDI */
961 /* is obj null? */
962 amd64_test_reg_reg (code, AMD64_RDI, AMD64_RDI);
963 /* if yes, jump to actual trampoline */
964 jump_obj_null = code;
965 amd64_branch8 (code, X86_CC_Z, -1, 1);
967 /* load obj->synchronization to RCX */
968 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RDI, G_STRUCT_OFFSET (MonoObject, synchronisation), 8);
969 /* is synchronization null? */
970 amd64_test_reg_reg (code, AMD64_RCX, AMD64_RCX);
971 /* if yes, jump to actual trampoline */
972 jump_sync_null = code;
973 amd64_branch8 (code, X86_CC_Z, -1, 1);
975 /* next case: synchronization is not null */
976 /* load MonoInternalThread* into RDX */
977 code = mono_amd64_emit_tls_get (code, AMD64_RDX, mono_thread_get_tls_offset ());
978 /* load TID into RDX */
979 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_RDX, G_STRUCT_OFFSET (MonoInternalThread, tid), 8);
980 /* is synchronization->owner == TID */
981 amd64_alu_membase_reg_size (code, X86_CMP, AMD64_RCX, owner_offset, AMD64_RDX, 8);
982 /* if no, jump to actual trampoline */
983 jump_not_owned = code;
984 amd64_branch8 (code, X86_CC_NZ, -1, 1);
986 /* next case: synchronization->owner == TID */
987 /* is synchronization->nest == 1 */
988 amd64_alu_membase_imm_size (code, X86_CMP, AMD64_RCX, nest_offset, 1, 4);
989 /* if not, jump to next case */
990 jump_next = code;
991 amd64_branch8 (code, X86_CC_NZ, -1, 1);
992 /* if yes, is synchronization->entry_count zero? */
993 amd64_alu_membase_imm_size (code, X86_CMP, AMD64_RCX, entry_count_offset, 0, 4);
994 /* if not, jump to actual trampoline */
995 jump_have_waiters = code;
996 amd64_branch8 (code, X86_CC_NZ, -1 , 1);
997 /* if yes, set synchronization->owner to null and return */
998 amd64_mov_membase_imm (code, AMD64_RCX, owner_offset, 0, 8);
999 amd64_ret (code);
1001 /* next case: synchronization->nest is not 1 */
1002 x86_patch (jump_next, code);
1003 /* decrease synchronization->nest and return */
1004 amd64_dec_membase_size (code, AMD64_RCX, nest_offset, 4);
1005 amd64_ret (code);
1007 x86_patch (jump_obj_null, code);
1008 x86_patch (jump_have_waiters, code);
1009 x86_patch (jump_not_owned, code);
1010 x86_patch (jump_sync_null, code);
1013 /* jump to the actual trampoline */
1014 #if MONO_AMD64_ARG_REG1 != AMD64_RDI
1015 amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, AMD64_RDI);
1016 #endif
1018 if (aot) {
1019 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_monitor_exit");
1020 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
1021 amd64_jump_reg (code, AMD64_R11);
1022 } else {
1023 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_EXIT, mono_get_root_domain (), NULL);
1024 amd64_jump_code (code, tramp);
1027 mono_arch_flush_icache (code, code - buf);
1028 g_assert (code - buf <= tramp_size);
1030 *code_size = code - buf;
1032 return buf;
1034 #endif
1036 void
1037 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
1039 /* FIXME: This is not thread safe */
1040 guint8 *code = ji->code_start;
1042 amd64_mov_reg_imm (code, AMD64_ARG_REG1, func_arg);
1043 amd64_mov_reg_imm (code, AMD64_R11, func);
1045 x86_push_imm (code, (guint64)func_arg);
1046 amd64_call_reg (code, AMD64_R11);