Merge pull request #444 from knocte/xbuild_improvements
[mono-project.git] / mono / mini / tramp-amd64.c
blobd63b055f530dc0aee726510b465b2af4e03494ec
1 /*
2 * tramp-amd64.c: JIT trampoline code for amd64
4 * Authors:
5 * Dietmar Maurer (dietmar@ximian.com)
6 * Zoltan Varga (vargaz@gmail.com)
8 * (C) 2001 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
13 #include <config.h>
14 #include <glib.h>
16 #include <mono/metadata/appdomain.h>
17 #include <mono/metadata/marshal.h>
18 #include <mono/metadata/tabledefs.h>
19 #include <mono/metadata/mono-debug-debugger.h>
20 #include <mono/metadata/monitor.h>
21 #include <mono/metadata/monitor.h>
22 #include <mono/metadata/gc-internal.h>
23 #include <mono/arch/amd64/amd64-codegen.h>
25 #include <mono/utils/memcheck.h>
27 #include "mini.h"
28 #include "mini-amd64.h"
30 #if defined(__native_client_codegen__) && defined(__native_client__)
31 #include <malloc.h>
32 #include <sys/nacl_syscalls.h>
33 #endif
35 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
37 static guint8* nullified_class_init_trampoline;
40 * mono_arch_get_unbox_trampoline:
41 * @m: method pointer
42 * @addr: pointer to native code for @m
44 * when value type methods are called through the vtable we need to unbox the
45 * this argument. This method returns a pointer to a trampoline which does
46 * unboxing before calling the method
48 gpointer
49 mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
51 guint8 *code, *start;
52 int this_reg;
54 MonoDomain *domain = mono_domain_get ();
56 this_reg = mono_arch_get_this_arg_reg (NULL);
58 start = code = mono_domain_code_reserve (domain, 20);
60 amd64_alu_reg_imm (code, X86_ADD, this_reg, sizeof (MonoObject));
61 /* FIXME: Optimize this */
62 amd64_mov_reg_imm (code, AMD64_RAX, addr);
63 amd64_jump_reg (code, AMD64_RAX);
64 g_assert ((code - start) < 20);
66 nacl_domain_code_validate (domain, &start, 20, &code);
68 mono_arch_flush_icache (start, code - start);
70 return start;
74 * mono_arch_get_static_rgctx_trampoline:
76 * Create a trampoline which sets RGCTX_REG to MRGCTX, then jumps to ADDR.
78 gpointer
79 mono_arch_get_static_rgctx_trampoline (MonoMethod *m, MonoMethodRuntimeGenericContext *mrgctx, gpointer addr)
81 guint8 *code, *start;
82 int buf_len;
84 MonoDomain *domain = mono_domain_get ();
86 #ifdef MONO_ARCH_NOMAP32BIT
87 buf_len = 32;
88 #else
89 /* AOTed code could still have a non-32 bit address */
90 if ((((guint64)addr) >> 32) == 0)
91 buf_len = 16;
92 else
93 buf_len = 30;
94 #endif
96 start = code = mono_domain_code_reserve (domain, buf_len);
98 amd64_mov_reg_imm (code, MONO_ARCH_RGCTX_REG, mrgctx);
99 amd64_jump_code (code, addr);
100 g_assert ((code - start) < buf_len);
102 nacl_domain_code_validate (domain, &start, buf_len, &code);
103 mono_arch_flush_icache (start, code - start);
105 return start;
108 gpointer
109 mono_arch_get_llvm_imt_trampoline (MonoDomain *domain, MonoMethod *m, int vt_offset)
111 guint8 *code, *start;
112 int buf_len;
113 int this_reg;
115 buf_len = 32;
117 start = code = mono_domain_code_reserve (domain, buf_len);
119 this_reg = mono_arch_get_this_arg_reg (NULL);
121 /* Set imt arg */
122 amd64_mov_reg_imm (code, MONO_ARCH_IMT_REG, m);
123 /* Load vtable address */
124 amd64_mov_reg_membase (code, AMD64_RAX, this_reg, 0, 8);
125 amd64_jump_membase (code, AMD64_RAX, vt_offset);
126 amd64_ret (code);
128 g_assert ((code - start) < buf_len);
130 nacl_domain_code_validate (domain, &start, buf_len, &code);
132 mono_arch_flush_icache (start, code - start);
134 return start;
138 * mono_arch_patch_callsite:
140 * Patch the callsite whose address is given by ORIG_CODE so it calls ADDR. ORIG_CODE
141 * points to the pc right after the call.
143 void
144 mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
146 #if defined(__default_codegen__)
147 guint8 *code;
148 guint8 buf [16];
149 gboolean can_write = mono_breakpoint_clean_code (method_start, orig_code, 14, buf, sizeof (buf));
151 code = buf + 14;
153 /* mov 64-bit imm into r11 (followed by call reg?) or direct call*/
154 if (((code [-13] == 0x49) && (code [-12] == 0xbb)) || (code [-5] == 0xe8)) {
155 if (code [-5] != 0xe8) {
156 if (can_write) {
157 InterlockedExchangePointer ((gpointer*)(orig_code - 11), addr);
158 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 11, sizeof (gpointer));
160 } else {
161 if ((((guint64)(addr)) >> 32) != 0) {
162 #ifdef MONO_ARCH_NOMAP32BIT
163 /* Print some diagnostics */
164 MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (char*)orig_code);
165 if (ji)
166 fprintf (stderr, "At %s, offset 0x%zx\n", mono_method_full_name (ji->method, TRUE), (guint8*)orig_code - (guint8*)ji->code_start);
167 fprintf (stderr, "Addr: %p\n", addr);
168 ji = mono_jit_info_table_find (mono_domain_get (), (char*)addr);
169 if (ji)
170 fprintf (stderr, "Callee: %s\n", mono_method_full_name (ji->method, TRUE));
171 g_assert_not_reached ();
172 #else
174 * This might happen when calling AOTed code. Create a thunk.
176 guint8 *thunk_start, *thunk_code;
178 thunk_start = thunk_code = mono_domain_code_reserve (mono_domain_get (), 32);
179 amd64_jump_membase (thunk_code, AMD64_RIP, 0);
180 *(guint64*)thunk_code = (guint64)addr;
181 addr = thunk_start;
182 g_assert ((((guint64)(addr)) >> 32) == 0);
183 mono_arch_flush_icache (thunk_start, thunk_code - thunk_start);
184 #endif
186 g_assert ((((guint64)(orig_code)) >> 32) == 0);
187 if (can_write) {
188 InterlockedExchange ((gint32*)(orig_code - 4), ((gint64)addr - (gint64)orig_code));
189 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, 4);
193 else if ((code [-7] == 0x41) && (code [-6] == 0xff) && (code [-5] == 0x15)) {
194 /* call *<OFFSET>(%rip) */
195 gpointer *got_entry = (gpointer*)((guint8*)orig_code + (*(guint32*)(orig_code - 4)));
196 if (can_write) {
197 InterlockedExchangePointer (got_entry, addr);
198 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, sizeof (gpointer));
201 #elif defined(__native_client__)
202 /* These are essentially the same 2 cases as above, modified for NaCl*/
204 /* Target must be bundle-aligned */
205 g_assert (((guint32)addr & kNaClAlignmentMask) == 0);
206 /* Return target must be bundle-aligned */
207 g_assert (((guint32)orig_code & kNaClAlignmentMask) == 0);
209 if (orig_code[-5] == 0xe8) {
210 /* Direct call */
211 int ret;
212 gint32 offset = (gint32)addr - (gint32)orig_code;
213 guint8 buf[sizeof(gint32)];
214 *((gint32*)(buf)) = offset;
215 ret = nacl_dyncode_modify (orig_code - sizeof(gint32), buf, sizeof(gint32));
216 g_assert (ret == 0);
219 else if (is_nacl_call_reg_sequence (orig_code - 10) && orig_code[-16] == 0x41 && orig_code[-15] == 0xbb) {
220 int ret;
221 guint8 buf[sizeof(gint32)];
222 *((gint32 *)(buf)) = addr;
223 /* orig_code[-14] is the start of the immediate. */
224 ret = nacl_dyncode_modify (orig_code - 14, buf, sizeof(gint32));
225 g_assert (ret == 0);
227 else {
228 g_assert_not_reached ();
231 return;
232 #endif
235 void
236 mono_arch_patch_plt_entry (guint8 *code, gpointer *got, mgreg_t *regs, guint8 *addr)
238 gint32 disp;
239 gpointer *plt_jump_table_entry;
241 #if defined(__default_codegen__)
242 /* A PLT entry: jmp *<DISP>(%rip) */
243 g_assert (code [0] == 0xff);
244 g_assert (code [1] == 0x25);
246 disp = *(gint32*)(code + 2);
248 plt_jump_table_entry = (gpointer*)(code + 6 + disp);
249 #elif defined(__native_client_codegen__)
250 /* A PLT entry: */
251 /* mov <DISP>(%rip), %r11d */
252 /* nacljmp *%r11 */
254 /* Verify the 'mov' */
255 g_assert (code [0] == 0x45);
256 g_assert (code [1] == 0x8b);
257 g_assert (code [2] == 0x1d);
259 disp = *(gint32*)(code + 3);
261 /* 7 = 3 (mov opcode) + 4 (disp) */
262 /* This needs to resolve to the target of the RIP-relative offset */
263 plt_jump_table_entry = (gpointer*)(code + 7 + disp);
265 #endif /* __native_client_codegen__ */
268 InterlockedExchangePointer (plt_jump_table_entry, addr);
271 static gpointer
272 get_vcall_slot (guint8 *code, mgreg_t *regs, int *displacement)
274 guint8 buf [10];
275 gint32 disp;
276 MonoJitInfo *ji = NULL;
278 #ifdef ENABLE_LLVM
279 /* code - 9 might be before the start of the method */
280 /* FIXME: Avoid this expensive call somehow */
281 ji = mono_jit_info_table_find (mono_domain_get (), (char*)code);
282 #endif
284 mono_breakpoint_clean_code (ji ? ji->code_start : NULL, code, 9, buf, sizeof (buf));
285 code = buf + 9;
287 *displacement = 0;
289 code -= 7;
291 if ((code [0] == 0x41) && (code [1] == 0xff) && (code [2] == 0x15)) {
292 /* call OFFSET(%rip) */
293 g_assert_not_reached ();
294 *displacement = *(guint32*)(code + 3);
295 return (gpointer*)(code + disp + 7);
296 } else {
297 g_assert_not_reached ();
298 return NULL;
302 static gpointer*
303 get_vcall_slot_addr (guint8* code, mgreg_t *regs)
305 gpointer vt;
306 int displacement;
307 vt = get_vcall_slot (code, regs, &displacement);
308 if (!vt)
309 return NULL;
310 return (gpointer*)((char*)vt + displacement);
313 void
314 mono_arch_nullify_class_init_trampoline (guint8 *code, mgreg_t *regs)
316 guint8 buf [16];
317 MonoJitInfo *ji = NULL;
318 gboolean can_write;
320 if (mono_use_llvm) {
321 /* code - 7 might be before the start of the method */
322 /* FIXME: Avoid this expensive call somehow */
323 ji = mono_jit_info_table_find (mono_domain_get (), (char*)code);
326 can_write = mono_breakpoint_clean_code (ji ? ji->code_start : NULL, code, 7, buf, sizeof (buf));
328 if (!can_write)
329 return;
332 * A given byte sequence can match more than case here, so we have to be
333 * really careful about the ordering of the cases. Longer sequences
334 * come first.
336 if ((buf [0] == 0x41) && (buf [1] == 0xff) && (buf [2] == 0x15)) {
337 gpointer *vtable_slot;
339 /* call *<OFFSET>(%rip) */
340 vtable_slot = get_vcall_slot_addr (code, regs);
341 g_assert (vtable_slot);
343 *vtable_slot = nullified_class_init_trampoline;
344 } else if (buf [2] == 0xe8) {
345 /* call <TARGET> */
346 //guint8 *buf = code - 2;
349 * It would be better to replace the call with nops, but that doesn't seem
350 * to work on SMP machines even when the whole call is inside a cache line.
351 * Patching the call address seems to work.
354 buf [0] = 0x66;
355 buf [1] = 0x66;
356 buf [2] = 0x90;
357 buf [3] = 0x66;
358 buf [4] = 0x90;
361 mono_arch_patch_callsite (code - 5, code, nullified_class_init_trampoline);
362 } else if ((buf [5] == 0xff) && x86_modrm_mod (buf [6]) == 3 && x86_modrm_reg (buf [6]) == 2) {
363 /* call *<reg> */
364 /* Generated by the LLVM JIT or on platforms without MAP_32BIT set */
365 mono_arch_patch_callsite (code - 13, code, nullified_class_init_trampoline);
366 } else if (buf [4] == 0x90 || buf [5] == 0xeb || buf [6] == 0x66) {
367 /* Already changed by another thread */
369 } else {
370 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", buf [0], buf [1], buf [2], buf [3],
371 buf [4], buf [5], buf [6]);
372 g_assert_not_reached ();
376 void
377 mono_arch_nullify_plt_entry (guint8 *code, mgreg_t *regs)
379 if (mono_aot_only && !nullified_class_init_trampoline)
380 nullified_class_init_trampoline = mono_aot_get_trampoline ("nullified_class_init_trampoline");
382 mono_arch_patch_plt_entry (code, NULL, regs, nullified_class_init_trampoline);
385 static void
386 stack_unaligned (MonoTrampolineType tramp_type)
388 printf ("%d\n", tramp_type);
389 g_assert_not_reached ();
392 guchar*
393 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
395 guint8 *buf, *code, *tramp, *br [2], *r11_save_code, *after_r11_save_code;
396 int i, lmf_offset, offset, res_offset, arg_offset, rax_offset, tramp_offset, saved_regs_offset;
397 int saved_fpregs_offset, rbp_offset, framesize, orig_rsp_to_rbp_offset, cfa_offset;
398 gboolean has_caller;
399 GSList *unwind_ops = NULL;
400 MonoJumpInfo *ji = NULL;
401 const guint kMaxCodeSize = NACL_SIZE (600, 600*2);
403 #if defined(__native_client_codegen__)
404 const guint kNaClTrampOffset = 17;
405 #endif
407 if (tramp_type == MONO_TRAMPOLINE_JUMP || tramp_type == MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD)
408 has_caller = FALSE;
409 else
410 has_caller = TRUE;
412 code = buf = mono_global_codeman_reserve (kMaxCodeSize);
414 framesize = kMaxCodeSize + sizeof (MonoLMF);
415 framesize = (framesize + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
417 orig_rsp_to_rbp_offset = 0;
418 r11_save_code = code;
419 /* Reserve 5 bytes for the mov_membase_reg to save R11 */
420 code += 5;
421 after_r11_save_code = code;
423 // CFA = sp + 16 (the trampoline address is on the stack)
424 cfa_offset = 16;
425 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, 16);
426 // IP saved at CFA - 8
427 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RIP, -8);
429 /* Pop the return address off the stack */
430 amd64_pop_reg (code, AMD64_R11);
431 orig_rsp_to_rbp_offset += sizeof(mgreg_t);
433 cfa_offset -= sizeof(mgreg_t);
434 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
437 * Allocate a new stack frame
439 amd64_push_reg (code, AMD64_RBP);
440 cfa_offset += sizeof(mgreg_t);
441 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
442 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RBP, - cfa_offset);
444 orig_rsp_to_rbp_offset -= sizeof(mgreg_t);
445 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof(mgreg_t));
446 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
447 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
449 offset = 0;
450 rbp_offset = - offset;
452 offset += sizeof(mgreg_t);
453 rax_offset = - offset;
455 offset += sizeof(mgreg_t);
456 tramp_offset = - offset;
458 offset += sizeof(gpointer);
459 arg_offset = - offset;
461 /* Compute the trampoline address from the return address */
462 if (aot) {
463 #if defined(__default_codegen__)
464 /* 7 = length of call *<offset>(rip) */
465 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 7);
466 #elif defined(__native_client_codegen__)
467 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, kNaClTrampOffset);
468 #endif
469 } else {
470 /* 5 = length of amd64_call_membase () */
471 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 5);
473 amd64_mov_membase_reg (code, AMD64_RBP, tramp_offset, AMD64_R11, sizeof(gpointer));
475 offset += sizeof(mgreg_t);
476 res_offset = - offset;
478 /* Save all registers */
480 offset += AMD64_NREG * sizeof(mgreg_t);
481 saved_regs_offset = - offset;
482 for (i = 0; i < AMD64_NREG; ++i) {
483 if (i == AMD64_RBP) {
484 /* RAX is already saved */
485 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, rbp_offset, sizeof(mgreg_t));
486 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
487 } else if (i != AMD64_R11) {
488 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
489 } else {
490 /* We have to save R11 right at the start of
491 the trampoline code because it's used as a
492 scratch register */
493 amd64_mov_membase_reg (r11_save_code, AMD64_RSP, saved_regs_offset + orig_rsp_to_rbp_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
494 g_assert (r11_save_code == after_r11_save_code);
497 offset += 8 * sizeof(mgreg_t);
498 saved_fpregs_offset = - offset;
499 for (i = 0; i < 8; ++i)
500 amd64_movsd_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * sizeof(mgreg_t)), i);
502 /* Check that the stack is aligned */
503 #if defined(__default_codegen__)
504 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof (mgreg_t));
505 amd64_alu_reg_imm (code, X86_AND, AMD64_R11, 15);
506 amd64_alu_reg_imm (code, X86_CMP, AMD64_R11, 0);
507 br [0] = code;
508 amd64_branch_disp (code, X86_CC_Z, 0, FALSE);
509 if (aot) {
510 amd64_mov_reg_imm (code, AMD64_R11, 0);
511 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
512 } else {
513 amd64_mov_reg_imm (code, AMD64_RDI, tramp_type);
514 amd64_mov_reg_imm (code, AMD64_R11, stack_unaligned);
515 amd64_call_reg (code, AMD64_R11);
517 mono_amd64_patch (br [0], code);
518 //amd64_breakpoint (code);
519 #endif
521 if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT &&
522 tramp_type != MONO_TRAMPOLINE_MONITOR_ENTER &&
523 tramp_type != MONO_TRAMPOLINE_MONITOR_EXIT &&
524 tramp_type != MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD) {
525 /* Obtain the trampoline argument which is encoded in the instruction stream */
526 if (aot) {
527 /* Load the GOT offset */
528 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, sizeof(gpointer));
529 #if defined(__default_codegen__)
530 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 7, 4);
531 #elif defined(__native_client_codegen__)
532 /* The arg is hidden in a "push imm32" instruction, */
533 /* add one to skip the opcode. */
534 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, kNaClTrampOffset+1, 4);
535 #endif
536 /* Compute the address of the GOT slot */
537 amd64_alu_reg_reg_size (code, X86_ADD, AMD64_R11, AMD64_RAX, sizeof(gpointer));
538 /* Load the value */
539 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, sizeof(gpointer));
540 } else {
541 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, sizeof(gpointer));
542 #if defined(__default_codegen__)
543 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 5, 1);
544 amd64_widen_reg (code, AMD64_RAX, AMD64_RAX, TRUE, FALSE);
545 amd64_alu_reg_imm_size (code, X86_CMP, AMD64_RAX, 4, 1);
546 br [0] = code;
547 x86_branch8 (code, X86_CC_NE, 6, FALSE);
548 /* 32 bit immediate */
549 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 4);
550 br [1] = code;
551 x86_jump8 (code, 10);
552 /* 64 bit immediate */
553 mono_amd64_patch (br [0], code);
554 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 8);
555 mono_amd64_patch (br [1], code);
556 #elif defined(__native_client_codegen__)
557 /* All args are 32-bit pointers in NaCl */
558 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 4);
559 #endif
561 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, sizeof(gpointer));
562 } else {
563 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, saved_regs_offset + (MONO_AMD64_ARG_REG1 * sizeof(mgreg_t)), sizeof(mgreg_t));
564 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, sizeof(gpointer));
567 /* Save LMF begin */
569 offset += sizeof (MonoLMF);
570 lmf_offset = - offset;
572 /* Save ip */
573 if (has_caller)
574 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 8, sizeof(gpointer));
575 else
576 amd64_mov_reg_imm (code, AMD64_R11, 0);
577 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, sizeof(mgreg_t));
578 /* Save fp */
579 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, framesize, sizeof(mgreg_t));
580 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbp), AMD64_R11, sizeof(mgreg_t));
581 /* Save sp */
582 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof(mgreg_t));
583 amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, framesize + 16);
584 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_R11, sizeof(mgreg_t));
585 /* Save method */
586 if (tramp_type == MONO_TRAMPOLINE_JIT || tramp_type == MONO_TRAMPOLINE_JUMP) {
587 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, arg_offset, sizeof(gpointer));
588 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), AMD64_R11, sizeof(gpointer));
589 } else {
590 amd64_mov_membase_imm (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), 0, sizeof(gpointer));
592 /* Save callee saved regs */
593 #ifdef TARGET_WIN32
594 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rdi), AMD64_RDI, sizeof(mgreg_t));
595 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsi), AMD64_RSI, sizeof(mgreg_t));
596 #endif
597 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), AMD64_RBX, sizeof(mgreg_t));
598 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), AMD64_R12, sizeof(mgreg_t));
599 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), AMD64_R13, sizeof(mgreg_t));
600 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), AMD64_R14, sizeof(mgreg_t));
601 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), AMD64_R15, sizeof(mgreg_t));
603 if (aot) {
604 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr");
605 } else {
606 amd64_mov_reg_imm (code, AMD64_R11, mono_get_lmf_addr);
608 amd64_call_reg (code, AMD64_R11);
610 /* Save lmf_addr */
611 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, sizeof(gpointer));
612 /* Save previous_lmf */
613 /* Set the lowest bit to 1 to signal that this LMF has the ip field set */
614 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, sizeof(gpointer));
615 amd64_alu_reg_imm_size (code, X86_ADD, AMD64_R11, 1, sizeof(gpointer));
616 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, sizeof(gpointer));
617 /* Set new lmf */
618 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, lmf_offset);
619 amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, sizeof(gpointer));
621 /* Save LMF end */
623 /* Arg1 is the pointer to the saved registers */
624 amd64_lea_membase (code, AMD64_ARG_REG1, AMD64_RBP, saved_regs_offset);
626 /* Arg2 is the address of the calling code */
627 if (has_caller)
628 amd64_mov_reg_membase (code, AMD64_ARG_REG2, AMD64_RBP, 8, sizeof(gpointer));
629 else
630 amd64_mov_reg_imm (code, AMD64_ARG_REG2, 0);
632 /* Arg3 is the method/vtable ptr */
633 amd64_mov_reg_membase (code, AMD64_ARG_REG3, AMD64_RBP, arg_offset, sizeof(gpointer));
635 /* Arg4 is the trampoline address */
636 amd64_mov_reg_membase (code, AMD64_ARG_REG4, AMD64_RBP, tramp_offset, sizeof(gpointer));
638 if (aot) {
639 char *icall_name = g_strdup_printf ("trampoline_func_%d", tramp_type);
640 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
641 } else {
642 tramp = (guint8*)mono_get_trampoline_func (tramp_type);
643 amd64_mov_reg_imm (code, AMD64_R11, tramp);
645 amd64_call_reg (code, AMD64_R11);
647 /* Check for thread interruption */
648 /* This is not perf critical code so no need to check the interrupt flag */
650 * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
652 amd64_mov_membase_reg (code, AMD64_RBP, res_offset, AMD64_RAX, sizeof(mgreg_t));
653 if (aot) {
654 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint");
655 } else {
656 amd64_mov_reg_imm (code, AMD64_R11, (guint8*)mono_thread_force_interruption_checkpoint);
658 amd64_call_reg (code, AMD64_R11);
660 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, res_offset, sizeof(mgreg_t));
662 /* Restore LMF */
664 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
665 amd64_alu_reg_imm_size (code, X86_SUB, AMD64_RCX, 1, sizeof(gpointer));
666 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sizeof(gpointer));
667 amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, sizeof(gpointer));
670 * Save rax to the stack, after the leave instruction, this will become part of
671 * the red zone.
673 amd64_mov_membase_reg (code, AMD64_RBP, rax_offset, AMD64_RAX, sizeof(mgreg_t));
675 /* Restore argument registers, r10 (imt method/rgxtx)
676 and rax (needed for direct calls to C vararg functions). */
677 for (i = 0; i < AMD64_NREG; ++i)
678 if (AMD64_IS_ARGUMENT_REG (i) || i == AMD64_R10 || i == AMD64_RAX)
679 amd64_mov_reg_membase (code, i, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), sizeof(mgreg_t));
681 for (i = 0; i < 8; ++i)
682 amd64_movsd_reg_membase (code, i, AMD64_RBP, saved_fpregs_offset + (i * sizeof(mgreg_t)));
684 /* Restore stack */
685 amd64_leave (code);
687 if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
688 /* Load result */
689 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, rax_offset - sizeof(mgreg_t), sizeof(mgreg_t));
690 amd64_ret (code);
691 } else {
692 /* call the compiled method using the saved rax */
693 amd64_jump_membase (code, AMD64_RSP, rax_offset - sizeof(mgreg_t));
696 g_assert ((code - buf) <= kMaxCodeSize);
698 nacl_global_codeman_validate (&buf, kMaxCodeSize, &code);
700 mono_arch_flush_icache (buf, code - buf);
702 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) {
703 /* Initialize the nullified class init trampoline used in the AOT case */
704 nullified_class_init_trampoline = mono_arch_get_nullified_class_init_trampoline (NULL);
707 if (info)
708 *info = mono_tramp_info_create (mono_get_generic_trampoline_name (tramp_type), buf, code - buf, ji, unwind_ops);
710 return buf;
713 gpointer
714 mono_arch_get_nullified_class_init_trampoline (MonoTrampInfo **info)
716 guint8 *code, *buf;
718 code = buf = mono_global_codeman_reserve (16);
719 amd64_ret (code);
721 nacl_global_codeman_validate(&buf, 16, &code);
723 mono_arch_flush_icache (buf, code - buf);
725 if (info)
726 *info = mono_tramp_info_create (g_strdup_printf ("nullified_class_init_trampoline"), buf, code - buf, NULL, NULL);
728 if (mono_jit_map_is_enabled ())
729 mono_emit_jit_tramp (buf, code - buf, "nullified_class_init_trampoline");
731 return buf;
734 gpointer
735 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
737 guint8 *code, *buf, *tramp;
738 int size;
739 gboolean far_addr = FALSE;
741 tramp = mono_get_trampoline_code (tramp_type);
743 #if defined(__default_codegen__)
744 if ((((guint64)arg1) >> 32) == 0)
745 size = 5 + 1 + 4;
746 else
747 size = 5 + 1 + 8;
749 code = buf = mono_domain_code_reserve_align (domain, size, 1);
751 if (((gint64)tramp - (gint64)code) >> 31 != 0 && ((gint64)tramp - (gint64)code) >> 31 != -1) {
752 #ifndef MONO_ARCH_NOMAP32BIT
753 g_assert_not_reached ();
754 #endif
755 far_addr = TRUE;
756 size += 16;
757 code = buf = mono_domain_code_reserve_align (domain, size, 1);
759 #elif defined(__native_client_codegen__)
760 size = 5 + 1 + 4;
761 /* Aligning the call site below could */
762 /* add up to kNaClAlignment-1 bytes */
763 size += (kNaClAlignment-1);
764 buf = mono_domain_code_reserve_align (domain, size, kNaClAlignment);
765 code = buf;
766 #endif
768 if (far_addr) {
769 amd64_mov_reg_imm (code, AMD64_R11, tramp);
770 amd64_call_reg (code, AMD64_R11);
771 } else {
772 amd64_call_code (code, tramp);
774 /* The trampoline code will obtain the argument from the instruction stream */
775 #if defined(__default_codegen__)
776 if ((((guint64)arg1) >> 32) == 0) {
777 *code = 0x4;
778 *(guint32*)(code + 1) = (gint64)arg1;
779 code += 5;
780 } else {
781 *code = 0x8;
782 *(guint64*)(code + 1) = (gint64)arg1;
783 code += 9;
785 #elif defined(__native_client_codegen__)
786 /* For NaCl, all tramp args are 32-bit because they're pointers */
787 *code = 0x68; /* push imm32 */
788 *(guint32*)(code + 1) = (gint32)arg1;
789 code += 5;
790 #endif
792 g_assert ((code - buf) <= size);
794 if (code_len)
795 *code_len = size;
797 nacl_domain_code_validate(domain, &buf, size, &code);
799 mono_arch_flush_icache (buf, size);
801 return buf;
804 gpointer
805 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
807 guint8 *tramp;
808 guint8 *code, *buf;
809 guint8 **rgctx_null_jumps;
810 int tramp_size;
811 int depth, index;
812 int i;
813 gboolean mrgctx;
814 MonoJumpInfo *ji = NULL;
815 GSList *unwind_ops = NULL;
817 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
818 index = MONO_RGCTX_SLOT_INDEX (slot);
819 if (mrgctx)
820 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
821 for (depth = 0; ; ++depth) {
822 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
824 if (index < size - 1)
825 break;
826 index -= size - 1;
829 tramp_size = NACL_SIZE (64 + 8 * depth, 128 + 8 * depth);
831 code = buf = mono_global_codeman_reserve (tramp_size);
833 unwind_ops = mono_arch_get_cie_program ();
835 rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
837 if (mrgctx) {
838 /* get mrgctx ptr */
839 amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
840 } else {
841 /* load rgctx ptr from vtable */
842 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoVTable, runtime_generic_context), sizeof(gpointer));
843 /* is the rgctx ptr null? */
844 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
845 /* if yes, jump to actual trampoline */
846 rgctx_null_jumps [0] = code;
847 amd64_branch8 (code, X86_CC_Z, -1, 1);
850 for (i = 0; i < depth; ++i) {
851 /* load ptr to next array */
852 if (mrgctx && i == 0)
853 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT, sizeof(gpointer));
854 else
855 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, 0, sizeof(gpointer));
856 /* is the ptr null? */
857 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
858 /* if yes, jump to actual trampoline */
859 rgctx_null_jumps [i + 1] = code;
860 amd64_branch8 (code, X86_CC_Z, -1, 1);
863 /* fetch slot */
864 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, sizeof (gpointer) * (index + 1), sizeof(gpointer));
865 /* is the slot null? */
866 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
867 /* if yes, jump to actual trampoline */
868 rgctx_null_jumps [depth + 1] = code;
869 amd64_branch8 (code, X86_CC_Z, -1, 1);
870 /* otherwise return */
871 amd64_ret (code);
873 for (i = mrgctx ? 1 : 0; i <= depth + 1; ++i)
874 mono_amd64_patch (rgctx_null_jumps [i], code);
876 g_free (rgctx_null_jumps);
878 /* move the rgctx pointer to the VTABLE register */
879 amd64_mov_reg_reg (code, MONO_ARCH_VTABLE_REG, AMD64_ARG_REG1, sizeof(gpointer));
881 if (aot) {
882 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot));
883 amd64_jump_reg (code, AMD64_R11);
884 } else {
885 tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);
887 /* jump to the actual trampoline */
888 amd64_jump_code (code, tramp);
891 nacl_global_codeman_validate (&buf, tramp_size, &code);
892 mono_arch_flush_icache (buf, code - buf);
894 g_assert (code - buf <= tramp_size);
896 if (info)
897 *info = mono_tramp_info_create (mono_get_rgctx_fetch_trampoline_name (slot), buf, code - buf, ji, unwind_ops);
899 return buf;
902 gpointer
903 mono_arch_create_generic_class_init_trampoline (MonoTrampInfo **info, gboolean aot)
905 guint8 *tramp;
906 guint8 *code, *buf;
907 static int byte_offset = -1;
908 static guint8 bitmask;
909 guint8 *jump;
910 int tramp_size;
911 GSList *unwind_ops = NULL;
912 MonoJumpInfo *ji = NULL;
914 tramp_size = 64;
916 code = buf = mono_global_codeman_reserve (tramp_size);
918 if (byte_offset < 0)
919 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
921 amd64_test_membase_imm_size (code, MONO_AMD64_ARG_REG1, byte_offset, bitmask, 1);
922 jump = code;
923 amd64_branch8 (code, X86_CC_Z, -1, 1);
925 amd64_ret (code);
927 x86_patch (jump, code);
929 if (aot) {
930 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_generic_class_init");
931 amd64_jump_reg (code, AMD64_R11);
932 } else {
933 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT, mono_get_root_domain (), NULL);
935 /* jump to the actual trampoline */
936 amd64_jump_code (code, tramp);
939 nacl_global_codeman_validate (&buf, tramp_size, &code);
941 mono_arch_flush_icache (buf, code - buf);
943 g_assert (code - buf <= tramp_size);
945 if (info)
946 *info = mono_tramp_info_create (g_strdup_printf ("generic_class_init_trampoline"), buf, code - buf, ji, unwind_ops);
948 return buf;
951 #ifdef MONO_ARCH_MONITOR_OBJECT_REG
953 gpointer
954 mono_arch_create_monitor_enter_trampoline (MonoTrampInfo **info, gboolean aot)
956 guint8 *tramp;
957 guint8 *code, *buf;
958 guint8 *jump_obj_null, *jump_sync_null, *jump_cmpxchg_failed, *jump_other_owner, *jump_tid, *jump_sync_thin_hash = NULL;
959 int tramp_size;
960 int owner_offset, nest_offset, dummy;
961 MonoJumpInfo *ji = NULL;
962 GSList *unwind_ops = NULL;
964 g_assert (MONO_ARCH_MONITOR_OBJECT_REG == AMD64_RDI);
966 mono_monitor_threads_sync_members_offset (&owner_offset, &nest_offset, &dummy);
967 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (owner_offset) == sizeof (gpointer));
968 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset) == sizeof (guint32));
969 owner_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset);
970 nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
972 tramp_size = 96;
974 code = buf = mono_global_codeman_reserve (tramp_size);
976 unwind_ops = mono_arch_get_cie_program ();
978 if (mono_thread_get_tls_offset () != -1) {
979 /* MonoObject* obj is in RDI */
980 /* is obj null? */
981 amd64_test_reg_reg (code, AMD64_RDI, AMD64_RDI);
982 /* if yes, jump to actual trampoline */
983 jump_obj_null = code;
984 amd64_branch8 (code, X86_CC_Z, -1, 1);
986 /* load obj->synchronization to RCX */
987 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RDI, G_STRUCT_OFFSET (MonoObject, synchronisation), 8);
989 if (mono_gc_is_moving ()) {
990 /*if bit zero is set it's a thin hash*/
991 /*FIXME use testb encoding*/
992 amd64_test_reg_imm (code, AMD64_RCX, 0x01);
993 jump_sync_thin_hash = code;
994 amd64_branch8 (code, X86_CC_NE, -1, 1);
996 /*clear bits used by the gc*/
997 amd64_alu_reg_imm (code, X86_AND, AMD64_RCX, ~0x3);
1000 /* is synchronization null? */
1001 amd64_test_reg_reg (code, AMD64_RCX, AMD64_RCX);
1002 /* if yes, jump to actual trampoline */
1003 jump_sync_null = code;
1004 amd64_branch8 (code, X86_CC_Z, -1, 1);
1006 /* load MonoInternalThread* into RDX */
1007 code = mono_amd64_emit_tls_get (code, AMD64_RDX, mono_thread_get_tls_offset ());
1008 /* load TID into RDX */
1009 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_RDX, G_STRUCT_OFFSET (MonoInternalThread, tid), 8);
1011 /* is synchronization->owner null? */
1012 amd64_alu_membase_imm_size (code, X86_CMP, AMD64_RCX, owner_offset, 0, 8);
1013 /* if not, jump to next case */
1014 jump_tid = code;
1015 amd64_branch8 (code, X86_CC_NZ, -1, 1);
1017 /* if yes, try a compare-exchange with the TID */
1018 /* zero RAX */
1019 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
1020 /* compare and exchange */
1021 amd64_prefix (code, X86_LOCK_PREFIX);
1022 amd64_cmpxchg_membase_reg_size (code, AMD64_RCX, owner_offset, AMD64_RDX, 8);
1023 /* if not successful, jump to actual trampoline */
1024 jump_cmpxchg_failed = code;
1025 amd64_branch8 (code, X86_CC_NZ, -1, 1);
1026 /* if successful, return */
1027 amd64_ret (code);
1029 /* next case: synchronization->owner is not null */
1030 x86_patch (jump_tid, code);
1031 /* is synchronization->owner == TID? */
1032 amd64_alu_membase_reg_size (code, X86_CMP, AMD64_RCX, owner_offset, AMD64_RDX, 8);
1033 /* if not, jump to actual trampoline */
1034 jump_other_owner = code;
1035 amd64_branch8 (code, X86_CC_NZ, -1, 1);
1036 /* if yes, increment nest */
1037 amd64_inc_membase_size (code, AMD64_RCX, nest_offset, 4);
1038 /* return */
1039 amd64_ret (code);
1041 x86_patch (jump_obj_null, code);
1042 if (jump_sync_thin_hash)
1043 x86_patch (jump_sync_thin_hash, code);
1044 x86_patch (jump_sync_null, code);
1045 x86_patch (jump_cmpxchg_failed, code);
1046 x86_patch (jump_other_owner, code);
1049 /* jump to the actual trampoline */
1050 #if MONO_AMD64_ARG_REG1 != AMD64_RDI
1051 amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, AMD64_RDI);
1052 #endif
1054 if (aot) {
1055 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_monitor_enter");
1056 amd64_jump_reg (code, AMD64_R11);
1057 } else {
1058 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_ENTER, mono_get_root_domain (), NULL);
1060 /* jump to the actual trampoline */
1061 amd64_jump_code (code, tramp);
1064 nacl_global_codeman_validate (&buf, tramp_size, &code);
1066 mono_arch_flush_icache (code, code - buf);
1067 g_assert (code - buf <= tramp_size);
1069 if (info)
1070 *info = mono_tramp_info_create (g_strdup_printf ("monitor_enter_trampoline"), buf, code - buf, ji, unwind_ops);
1072 return buf;
1075 gpointer
1076 mono_arch_create_monitor_exit_trampoline (MonoTrampInfo **info, gboolean aot)
1078 guint8 *tramp;
1079 guint8 *code, *buf;
1080 guint8 *jump_obj_null, *jump_have_waiters, *jump_sync_null, *jump_not_owned, *jump_sync_thin_hash = NULL;
1081 guint8 *jump_next;
1082 int tramp_size;
1083 int owner_offset, nest_offset, entry_count_offset;
1084 MonoJumpInfo *ji = NULL;
1085 GSList *unwind_ops = NULL;
1087 g_assert (MONO_ARCH_MONITOR_OBJECT_REG == AMD64_RDI);
1089 mono_monitor_threads_sync_members_offset (&owner_offset, &nest_offset, &entry_count_offset);
1090 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (owner_offset) == sizeof (gpointer));
1091 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset) == sizeof (guint32));
1092 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (entry_count_offset) == sizeof (gint32));
1093 owner_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset);
1094 nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
1095 entry_count_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (entry_count_offset);
1097 tramp_size = 112;
1099 code = buf = mono_global_codeman_reserve (tramp_size);
1101 unwind_ops = mono_arch_get_cie_program ();
1103 if (mono_thread_get_tls_offset () != -1) {
1104 /* MonoObject* obj is in RDI */
1105 /* is obj null? */
1106 amd64_test_reg_reg (code, AMD64_RDI, AMD64_RDI);
1107 /* if yes, jump to actual trampoline */
1108 jump_obj_null = code;
1109 amd64_branch8 (code, X86_CC_Z, -1, 1);
1111 /* load obj->synchronization to RCX */
1112 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RDI, G_STRUCT_OFFSET (MonoObject, synchronisation), 8);
1114 if (mono_gc_is_moving ()) {
1115 /*if bit zero is set it's a thin hash*/
1116 /*FIXME use testb encoding*/
1117 amd64_test_reg_imm (code, AMD64_RCX, 0x01);
1118 jump_sync_thin_hash = code;
1119 amd64_branch8 (code, X86_CC_NE, -1, 1);
1121 /*clear bits used by the gc*/
1122 amd64_alu_reg_imm (code, X86_AND, AMD64_RCX, ~0x3);
1125 /* is synchronization null? */
1126 amd64_test_reg_reg (code, AMD64_RCX, AMD64_RCX);
1127 /* if yes, jump to actual trampoline */
1128 jump_sync_null = code;
1129 amd64_branch8 (code, X86_CC_Z, -1, 1);
1131 /* next case: synchronization is not null */
1132 /* load MonoInternalThread* into RDX */
1133 code = mono_amd64_emit_tls_get (code, AMD64_RDX, mono_thread_get_tls_offset ());
1134 /* load TID into RDX */
1135 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_RDX, G_STRUCT_OFFSET (MonoInternalThread, tid), 8);
1136 /* is synchronization->owner == TID */
1137 amd64_alu_membase_reg_size (code, X86_CMP, AMD64_RCX, owner_offset, AMD64_RDX, 8);
1138 /* if no, jump to actual trampoline */
1139 jump_not_owned = code;
1140 amd64_branch8 (code, X86_CC_NZ, -1, 1);
1142 /* next case: synchronization->owner == TID */
1143 /* is synchronization->nest == 1 */
1144 amd64_alu_membase_imm_size (code, X86_CMP, AMD64_RCX, nest_offset, 1, 4);
1145 /* if not, jump to next case */
1146 jump_next = code;
1147 amd64_branch8 (code, X86_CC_NZ, -1, 1);
1148 /* if yes, is synchronization->entry_count zero? */
1149 amd64_alu_membase_imm_size (code, X86_CMP, AMD64_RCX, entry_count_offset, 0, 4);
1150 /* if not, jump to actual trampoline */
1151 jump_have_waiters = code;
1152 amd64_branch8 (code, X86_CC_NZ, -1 , 1);
1153 /* if yes, set synchronization->owner to null and return */
1154 amd64_mov_membase_imm (code, AMD64_RCX, owner_offset, 0, 8);
1155 amd64_ret (code);
1157 /* next case: synchronization->nest is not 1 */
1158 x86_patch (jump_next, code);
1159 /* decrease synchronization->nest and return */
1160 amd64_dec_membase_size (code, AMD64_RCX, nest_offset, 4);
1161 amd64_ret (code);
1163 x86_patch (jump_obj_null, code);
1164 x86_patch (jump_have_waiters, code);
1165 x86_patch (jump_not_owned, code);
1166 x86_patch (jump_sync_null, code);
1169 /* jump to the actual trampoline */
1170 #if MONO_AMD64_ARG_REG1 != AMD64_RDI
1171 amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, AMD64_RDI);
1172 #endif
1174 if (aot) {
1175 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_monitor_exit");
1176 amd64_jump_reg (code, AMD64_R11);
1177 } else {
1178 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_EXIT, mono_get_root_domain (), NULL);
1179 amd64_jump_code (code, tramp);
1182 nacl_global_codeman_validate (&buf, tramp_size, &code);
1184 mono_arch_flush_icache (code, code - buf);
1185 g_assert (code - buf <= tramp_size);
1187 if (info)
1188 *info = mono_tramp_info_create (g_strdup_printf ("monitor_exit_trampoline"), buf, code - buf, ji, unwind_ops);
1190 return buf;
1192 #endif
1194 void
1195 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
1197 /* FIXME: This is not thread safe */
1198 guint8 *code = ji->code_start;
1200 amd64_mov_reg_imm (code, AMD64_ARG_REG1, func_arg);
1201 amd64_mov_reg_imm (code, AMD64_R11, func);
1203 x86_push_imm (code, (guint64)func_arg);
1204 amd64_call_reg (code, AMD64_R11);
1208 static void
1209 handler_block_trampoline_helper (gpointer *ptr)
1211 MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
1212 *ptr = jit_tls->handler_block_return_address;
1215 gpointer
1216 mono_arch_create_handler_block_trampoline (void)
1218 guint8 *tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD);
1219 guint8 *code, *buf;
1220 int tramp_size = 64;
1221 code = buf = mono_global_codeman_reserve (tramp_size);
1224 This trampoline restore the call chain of the handler block then jumps into the code that deals with it.
1227 if (mono_get_jit_tls_offset () != -1) {
1228 code = mono_amd64_emit_tls_get (code, AMD64_RDI, mono_get_jit_tls_offset ());
1229 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RDI, G_STRUCT_OFFSET (MonoJitTlsData, handler_block_return_address), 8);
1230 /* Simulate a call */
1231 amd64_push_reg (code, AMD64_RAX);
1232 amd64_jump_code (code, tramp);
1233 } else {
1234 /*Slow path uses a c helper*/
1235 amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RSP, 8);
1236 amd64_mov_reg_imm (code, AMD64_RAX, tramp);
1237 amd64_push_reg (code, AMD64_RAX);
1238 amd64_jump_code (code, handler_block_trampoline_helper);
1241 mono_arch_flush_icache (buf, code - buf);
1242 g_assert (code - buf <= tramp_size);
1244 if (mono_jit_map_is_enabled ())
1245 mono_emit_jit_tramp (buf, code - buf, "handler_block_trampoline");
1247 return buf;
1251 * mono_arch_get_call_target:
1253 * Return the address called by the code before CODE if exists.
1255 guint8*
1256 mono_arch_get_call_target (guint8 *code)
1258 if (code [-5] == 0xe8) {
1259 guint32 disp = *(guint32*)(code - 4);
1260 guint8 *target = code + disp;
1262 return target;
1263 } else {
1264 return NULL;
1269 * mono_arch_get_plt_info_offset:
1271 * Return the PLT info offset belonging to the plt entry PLT_ENTRY.
1273 guint32
1274 mono_arch_get_plt_info_offset (guint8 *plt_entry, mgreg_t *regs, guint8 *code)
1276 #if defined(__native_client__) || defined(__native_client_codegen__)
1277 /* 18 = 3 (mov opcode) + 4 (disp) + 10 (nacljmp) + 1 (push opcode) */
1278 /* See aot-compiler.c arch_emit_plt_entry for details. */
1279 return *(guint32*)(plt_entry + 18);
1280 #else
1281 return *(guint32*)(plt_entry + 6);
1282 #endif