Fix the previous change.
[mono-project/dkf.git] / mono / mini / tramp-amd64.c
blob39f2ba1c509dc570957e1edba21127a971ecf2ed
1 /*
2 * tramp-amd64.c: JIT trampoline code for amd64
4 * Authors:
5 * Dietmar Maurer (dietmar@ximian.com)
6 * Zoltan Varga (vargaz@gmail.com)
8 * (C) 2001 Ximian, Inc.
9 */
11 #include <config.h>
12 #include <glib.h>
14 #include <mono/metadata/appdomain.h>
15 #include <mono/metadata/marshal.h>
16 #include <mono/metadata/tabledefs.h>
17 #include <mono/metadata/mono-debug-debugger.h>
18 #include <mono/metadata/monitor.h>
19 #include <mono/metadata/monitor.h>
20 #include <mono/metadata/gc-internal.h>
21 #include <mono/arch/amd64/amd64-codegen.h>
23 #include <mono/utils/memcheck.h>
25 #include "mini.h"
26 #include "mini-amd64.h"
28 #if defined(__native_client_codegen__) && defined(__native_client__)
29 #include <malloc.h>
30 #include <sys/nacl_syscalls.h>
31 #endif
33 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
35 static guint8* nullified_class_init_trampoline;
38 * mono_arch_get_unbox_trampoline:
39 * @m: method pointer
40 * @addr: pointer to native code for @m
42 * when value type methods are called through the vtable we need to unbox the
43 * this argument. This method returns a pointer to a trampoline which does
44 * unboxing before calling the method
46 gpointer
47 mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
49 guint8 *code, *start;
50 int this_reg;
52 MonoDomain *domain = mono_domain_get ();
54 this_reg = mono_arch_get_this_arg_reg (NULL);
56 start = code = mono_domain_code_reserve (domain, 20);
58 amd64_alu_reg_imm (code, X86_ADD, this_reg, sizeof (MonoObject));
59 /* FIXME: Optimize this */
60 amd64_mov_reg_imm (code, AMD64_RAX, addr);
61 amd64_jump_reg (code, AMD64_RAX);
62 g_assert ((code - start) < 20);
64 nacl_domain_code_validate (domain, &start, 20, &code);
66 mono_arch_flush_icache (start, code - start);
68 return start;
72 * mono_arch_get_static_rgctx_trampoline:
74 * Create a trampoline which sets RGCTX_REG to MRGCTX, then jumps to ADDR.
76 gpointer
77 mono_arch_get_static_rgctx_trampoline (MonoMethod *m, MonoMethodRuntimeGenericContext *mrgctx, gpointer addr)
79 guint8 *code, *start;
80 int buf_len;
82 MonoDomain *domain = mono_domain_get ();
84 #ifdef MONO_ARCH_NOMAP32BIT
85 buf_len = 32;
86 #else
87 /* AOTed code could still have a non-32 bit address */
88 if ((((guint64)addr) >> 32) == 0)
89 buf_len = 16;
90 else
91 buf_len = 30;
92 #endif
94 start = code = mono_domain_code_reserve (domain, buf_len);
96 amd64_mov_reg_imm (code, MONO_ARCH_RGCTX_REG, mrgctx);
97 amd64_jump_code (code, addr);
98 g_assert ((code - start) < buf_len);
100 nacl_domain_code_validate (domain, &start, buf_len, &code);
101 mono_arch_flush_icache (start, code - start);
103 return start;
106 gpointer
107 mono_arch_get_llvm_imt_trampoline (MonoDomain *domain, MonoMethod *m, int vt_offset)
109 guint8 *code, *start;
110 int buf_len;
111 int this_reg;
113 buf_len = 32;
115 start = code = mono_domain_code_reserve (domain, buf_len);
117 this_reg = mono_arch_get_this_arg_reg (NULL);
119 /* Set imt arg */
120 amd64_mov_reg_imm (code, MONO_ARCH_IMT_REG, m);
121 /* Load vtable address */
122 amd64_mov_reg_membase (code, AMD64_RAX, this_reg, 0, 8);
123 amd64_jump_membase (code, AMD64_RAX, vt_offset);
124 amd64_ret (code);
126 g_assert ((code - start) < buf_len);
128 nacl_domain_code_validate (domain, &start, buf_len, &code);
130 mono_arch_flush_icache (start, code - start);
132 return start;
136 * mono_arch_patch_callsite:
138 * Patch the callsite whose address is given by ORIG_CODE so it calls ADDR. ORIG_CODE
139 * points to the pc right after the call.
141 void
142 mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
144 #if defined(__default_codegen__)
145 guint8 *code;
146 guint8 buf [16];
147 gboolean can_write = mono_breakpoint_clean_code (method_start, orig_code, 14, buf, sizeof (buf));
149 code = buf + 14;
151 /* mov 64-bit imm into r11 (followed by call reg?) or direct call*/
152 if (((code [-13] == 0x49) && (code [-12] == 0xbb)) || (code [-5] == 0xe8)) {
153 if (code [-5] != 0xe8) {
154 if (can_write) {
155 InterlockedExchangePointer ((gpointer*)(orig_code - 11), addr);
156 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 11, sizeof (gpointer));
158 } else {
159 if ((((guint64)(addr)) >> 32) != 0) {
160 #ifdef MONO_ARCH_NOMAP32BIT
161 /* Print some diagnostics */
162 MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (char*)orig_code);
163 if (ji)
164 fprintf (stderr, "At %s, offset 0x%zx\n", mono_method_full_name (ji->method, TRUE), (guint8*)orig_code - (guint8*)ji->code_start);
165 fprintf (stderr, "Addr: %p\n", addr);
166 ji = mono_jit_info_table_find (mono_domain_get (), (char*)addr);
167 if (ji)
168 fprintf (stderr, "Callee: %s\n", mono_method_full_name (ji->method, TRUE));
169 g_assert_not_reached ();
170 #else
172 * This might happen when calling AOTed code. Create a thunk.
174 guint8 *thunk_start, *thunk_code;
176 thunk_start = thunk_code = mono_domain_code_reserve (mono_domain_get (), 32);
177 amd64_jump_membase (thunk_code, AMD64_RIP, 0);
178 *(guint64*)thunk_code = (guint64)addr;
179 addr = thunk_start;
180 g_assert ((((guint64)(addr)) >> 32) == 0);
181 mono_arch_flush_icache (thunk_start, thunk_code - thunk_start);
182 #endif
184 g_assert ((((guint64)(orig_code)) >> 32) == 0);
185 if (can_write) {
186 InterlockedExchange ((gint32*)(orig_code - 4), ((gint64)addr - (gint64)orig_code));
187 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, 4);
191 else if ((code [-7] == 0x41) && (code [-6] == 0xff) && (code [-5] == 0x15)) {
192 /* call *<OFFSET>(%rip) */
193 gpointer *got_entry = (gpointer*)((guint8*)orig_code + (*(guint32*)(orig_code - 4)));
194 if (can_write) {
195 InterlockedExchangePointer (got_entry, addr);
196 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, sizeof (gpointer));
199 #elif defined(__native_client__)
200 /* These are essentially the same 2 cases as above, modified for NaCl*/
202 /* Target must be bundle-aligned */
203 g_assert (((guint32)addr & kNaClAlignmentMask) == 0);
204 /* Return target must be bundle-aligned */
205 g_assert (((guint32)orig_code & kNaClAlignmentMask) == 0);
207 if (orig_code[-5] == 0xe8) {
208 /* Direct call */
209 int ret;
210 gint32 offset = (gint32)addr - (gint32)orig_code;
211 guint8 buf[sizeof(gint32)];
212 *((gint32*)(buf)) = offset;
213 ret = nacl_dyncode_modify (orig_code - sizeof(gint32), buf, sizeof(gint32));
214 g_assert (ret == 0);
217 else if (is_nacl_call_reg_sequence (orig_code - 10) && orig_code[-16] == 0x41 && orig_code[-15] == 0xbb) {
218 int ret;
219 guint8 buf[sizeof(gint32)];
220 *((gint32 *)(buf)) = addr;
221 /* orig_code[-14] is the start of the immediate. */
222 ret = nacl_dyncode_modify (orig_code - 14, buf, sizeof(gint32));
223 g_assert (ret == 0);
225 else {
226 g_assert_not_reached ();
229 return;
230 #endif
233 void
234 mono_arch_patch_plt_entry (guint8 *code, gpointer *got, mgreg_t *regs, guint8 *addr)
236 gint32 disp;
237 gpointer *plt_jump_table_entry;
239 #if defined(__default_codegen__)
240 /* A PLT entry: jmp *<DISP>(%rip) */
241 g_assert (code [0] == 0xff);
242 g_assert (code [1] == 0x25);
244 disp = *(gint32*)(code + 2);
246 plt_jump_table_entry = (gpointer*)(code + 6 + disp);
247 #elif defined(__native_client_codegen__)
248 /* A PLT entry: */
249 /* mov <DISP>(%rip), %r11d */
250 /* nacljmp *%r11 */
252 /* Verify the 'mov' */
253 g_assert (code [0] == 0x45);
254 g_assert (code [1] == 0x8b);
255 g_assert (code [2] == 0x1d);
257 disp = *(gint32*)(code + 3);
259 /* 7 = 3 (mov opcode) + 4 (disp) */
260 /* This needs to resolve to the target of the RIP-relative offset */
261 plt_jump_table_entry = (gpointer*)(code + 7 + disp);
263 #endif /* __native_client_codegen__ */
266 InterlockedExchangePointer (plt_jump_table_entry, addr);
269 static gpointer
270 get_vcall_slot (guint8 *code, mgreg_t *regs, int *displacement)
272 guint8 buf [10];
273 gint32 disp;
274 MonoJitInfo *ji = NULL;
276 #ifdef ENABLE_LLVM
277 /* code - 9 might be before the start of the method */
278 /* FIXME: Avoid this expensive call somehow */
279 ji = mono_jit_info_table_find (mono_domain_get (), (char*)code);
280 #endif
282 mono_breakpoint_clean_code (ji ? ji->code_start : NULL, code, 9, buf, sizeof (buf));
283 code = buf + 9;
285 *displacement = 0;
287 code -= 7;
289 if ((code [0] == 0x41) && (code [1] == 0xff) && (code [2] == 0x15)) {
290 /* call OFFSET(%rip) */
291 g_assert_not_reached ();
292 *displacement = *(guint32*)(code + 3);
293 return (gpointer*)(code + disp + 7);
294 } else {
295 g_assert_not_reached ();
296 return NULL;
300 static gpointer*
301 get_vcall_slot_addr (guint8* code, mgreg_t *regs)
303 gpointer vt;
304 int displacement;
305 vt = get_vcall_slot (code, regs, &displacement);
306 if (!vt)
307 return NULL;
308 return (gpointer*)((char*)vt + displacement);
311 void
312 mono_arch_nullify_class_init_trampoline (guint8 *code, mgreg_t *regs)
314 guint8 buf [16];
315 MonoJitInfo *ji = NULL;
316 gboolean can_write;
318 if (mono_use_llvm) {
319 /* code - 7 might be before the start of the method */
320 /* FIXME: Avoid this expensive call somehow */
321 ji = mono_jit_info_table_find (mono_domain_get (), (char*)code);
324 can_write = mono_breakpoint_clean_code (ji ? ji->code_start : NULL, code, 7, buf, sizeof (buf));
326 if (!can_write)
327 return;
330 * A given byte sequence can match more than case here, so we have to be
331 * really careful about the ordering of the cases. Longer sequences
332 * come first.
334 if ((buf [0] == 0x41) && (buf [1] == 0xff) && (buf [2] == 0x15)) {
335 gpointer *vtable_slot;
337 /* call *<OFFSET>(%rip) */
338 vtable_slot = get_vcall_slot_addr (code, regs);
339 g_assert (vtable_slot);
341 *vtable_slot = nullified_class_init_trampoline;
342 } else if (buf [2] == 0xe8) {
343 /* call <TARGET> */
344 //guint8 *buf = code - 2;
347 * It would be better to replace the call with nops, but that doesn't seem
348 * to work on SMP machines even when the whole call is inside a cache line.
349 * Patching the call address seems to work.
352 buf [0] = 0x66;
353 buf [1] = 0x66;
354 buf [2] = 0x90;
355 buf [3] = 0x66;
356 buf [4] = 0x90;
359 mono_arch_patch_callsite (code - 5, code, nullified_class_init_trampoline);
360 } else if ((buf [5] == 0xff) && x86_modrm_mod (buf [6]) == 3 && x86_modrm_reg (buf [6]) == 2) {
361 /* call *<reg> */
362 /* Generated by the LLVM JIT or on platforms without MAP_32BIT set */
363 mono_arch_patch_callsite (code - 13, code, nullified_class_init_trampoline);
364 } else if (buf [4] == 0x90 || buf [5] == 0xeb || buf [6] == 0x66) {
365 /* Already changed by another thread */
367 } else {
368 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", buf [0], buf [1], buf [2], buf [3],
369 buf [4], buf [5], buf [6]);
370 g_assert_not_reached ();
374 void
375 mono_arch_nullify_plt_entry (guint8 *code, mgreg_t *regs)
377 if (mono_aot_only && !nullified_class_init_trampoline)
378 nullified_class_init_trampoline = mono_aot_get_trampoline ("nullified_class_init_trampoline");
380 mono_arch_patch_plt_entry (code, NULL, regs, nullified_class_init_trampoline);
383 guchar*
384 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
386 guint8 *buf, *code, *tramp, *br [2], *r11_save_code, *after_r11_save_code;
387 int i, lmf_offset, offset, res_offset, arg_offset, rax_offset, tramp_offset, saved_regs_offset;
388 int saved_fpregs_offset, rbp_offset, framesize, orig_rsp_to_rbp_offset, cfa_offset;
389 gboolean has_caller;
390 GSList *unwind_ops = NULL;
391 MonoJumpInfo *ji = NULL;
392 const guint kMaxCodeSize = NACL_SIZE (548, 548*2);
394 #if defined(__native_client_codegen__)
395 const guint kNaClTrampOffset = 17;
396 #endif
398 if (tramp_type == MONO_TRAMPOLINE_JUMP)
399 has_caller = FALSE;
400 else
401 has_caller = TRUE;
403 code = buf = mono_global_codeman_reserve (kMaxCodeSize);
405 framesize = kMaxCodeSize + sizeof (MonoLMF);
406 framesize = (framesize + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
408 orig_rsp_to_rbp_offset = 0;
409 r11_save_code = code;
410 /* Reserve 5 bytes for the mov_membase_reg to save R11 */
411 code += 5;
412 after_r11_save_code = code;
414 // CFA = sp + 16 (the trampoline address is on the stack)
415 cfa_offset = 16;
416 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, 16);
417 // IP saved at CFA - 8
418 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RIP, -8);
420 /* Pop the return address off the stack */
421 amd64_pop_reg (code, AMD64_R11);
422 orig_rsp_to_rbp_offset += sizeof(mgreg_t);
424 cfa_offset -= sizeof(mgreg_t);
425 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
428 * Allocate a new stack frame
430 amd64_push_reg (code, AMD64_RBP);
431 cfa_offset += sizeof(mgreg_t);
432 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
433 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RBP, - cfa_offset);
435 orig_rsp_to_rbp_offset -= sizeof(mgreg_t);
436 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof(mgreg_t));
437 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
438 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
440 offset = 0;
441 rbp_offset = - offset;
443 offset += sizeof(mgreg_t);
444 rax_offset = - offset;
446 offset += sizeof(mgreg_t);
447 tramp_offset = - offset;
449 offset += sizeof(gpointer);
450 arg_offset = - offset;
452 /* Compute the trampoline address from the return address */
453 if (aot) {
454 #if defined(__default_codegen__)
455 /* 7 = length of call *<offset>(rip) */
456 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 7);
457 #elif defined(__native_client_codegen__)
458 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, kNaClTrampOffset);
459 #endif
460 } else {
461 /* 5 = length of amd64_call_membase () */
462 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 5);
464 amd64_mov_membase_reg (code, AMD64_RBP, tramp_offset, AMD64_R11, sizeof(gpointer));
466 offset += sizeof(mgreg_t);
467 res_offset = - offset;
469 /* Save all registers */
471 offset += AMD64_NREG * sizeof(mgreg_t);
472 saved_regs_offset = - offset;
473 for (i = 0; i < AMD64_NREG; ++i) {
474 if (i == AMD64_RBP) {
475 /* RAX is already saved */
476 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, rbp_offset, sizeof(mgreg_t));
477 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
478 } else if (i != AMD64_R11) {
479 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
480 } else {
481 /* We have to save R11 right at the start of
482 the trampoline code because it's used as a
483 scratch register */
484 amd64_mov_membase_reg (r11_save_code, AMD64_RSP, saved_regs_offset + orig_rsp_to_rbp_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
485 g_assert (r11_save_code == after_r11_save_code);
488 offset += 8 * sizeof(mgreg_t);
489 saved_fpregs_offset = - offset;
490 for (i = 0; i < 8; ++i)
491 amd64_movsd_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * sizeof(mgreg_t)), i);
493 if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT &&
494 tramp_type != MONO_TRAMPOLINE_MONITOR_ENTER &&
495 tramp_type != MONO_TRAMPOLINE_MONITOR_EXIT) {
496 /* Obtain the trampoline argument which is encoded in the instruction stream */
497 if (aot) {
498 /* Load the GOT offset */
499 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, sizeof(gpointer));
500 #if defined(__default_codegen__)
501 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 7, 4);
502 #elif defined(__native_client_codegen__)
503 /* The arg is hidden in a "push imm32" instruction, */
504 /* add one to skip the opcode. */
505 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, kNaClTrampOffset+1, 4);
506 #endif
507 /* Compute the address of the GOT slot */
508 amd64_alu_reg_reg_size (code, X86_ADD, AMD64_R11, AMD64_RAX, sizeof(gpointer));
509 /* Load the value */
510 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, sizeof(gpointer));
511 } else {
512 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, sizeof(gpointer));
513 #if defined(__default_codegen__)
514 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 5, 1);
515 amd64_widen_reg (code, AMD64_RAX, AMD64_RAX, TRUE, FALSE);
516 amd64_alu_reg_imm_size (code, X86_CMP, AMD64_RAX, 4, 1);
517 br [0] = code;
518 x86_branch8 (code, X86_CC_NE, 6, FALSE);
519 /* 32 bit immediate */
520 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 4);
521 br [1] = code;
522 x86_jump8 (code, 10);
523 /* 64 bit immediate */
524 mono_amd64_patch (br [0], code);
525 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 8);
526 mono_amd64_patch (br [1], code);
527 #elif defined(__native_client_codegen__)
528 /* All args are 32-bit pointers in NaCl */
529 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 4);
530 #endif
532 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, sizeof(gpointer));
533 } else {
534 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, saved_regs_offset + (MONO_AMD64_ARG_REG1 * sizeof(mgreg_t)), sizeof(mgreg_t));
535 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, sizeof(gpointer));
538 /* Save LMF begin */
540 offset += sizeof (MonoLMF);
541 lmf_offset = - offset;
543 /* Save ip */
544 if (has_caller)
545 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 8, sizeof(gpointer));
546 else
547 amd64_mov_reg_imm (code, AMD64_R11, 0);
548 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, sizeof(mgreg_t));
549 /* Save fp */
550 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, framesize, sizeof(mgreg_t));
551 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbp), AMD64_R11, sizeof(mgreg_t));
552 /* Save sp */
553 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof(mgreg_t));
554 amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, framesize + 16);
555 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_R11, sizeof(mgreg_t));
556 /* Save method */
557 if (tramp_type == MONO_TRAMPOLINE_JIT || tramp_type == MONO_TRAMPOLINE_JUMP) {
558 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, arg_offset, sizeof(gpointer));
559 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), AMD64_R11, sizeof(gpointer));
560 } else {
561 amd64_mov_membase_imm (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), 0, sizeof(gpointer));
563 /* Save callee saved regs */
564 #ifdef TARGET_WIN32
565 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rdi), AMD64_RDI, sizeof(mgreg_t));
566 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsi), AMD64_RSI, sizeof(mgreg_t));
567 #endif
568 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), AMD64_RBX, sizeof(mgreg_t));
569 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), AMD64_R12, sizeof(mgreg_t));
570 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), AMD64_R13, sizeof(mgreg_t));
571 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), AMD64_R14, sizeof(mgreg_t));
572 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), AMD64_R15, sizeof(mgreg_t));
574 if (aot) {
575 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr");
576 } else {
577 amd64_mov_reg_imm (code, AMD64_R11, mono_get_lmf_addr);
579 amd64_call_reg (code, AMD64_R11);
581 /* Save lmf_addr */
582 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, sizeof(gpointer));
583 /* Save previous_lmf */
584 /* Set the lowest bit to 1 to signal that this LMF has the ip field set */
585 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, sizeof(gpointer));
586 amd64_alu_reg_imm_size (code, X86_ADD, AMD64_R11, 1, sizeof(gpointer));
587 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, sizeof(gpointer));
588 /* Set new lmf */
589 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, lmf_offset);
590 amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, sizeof(gpointer));
592 /* Save LMF end */
594 /* Arg1 is the pointer to the saved registers */
595 amd64_lea_membase (code, AMD64_ARG_REG1, AMD64_RBP, saved_regs_offset);
597 /* Arg2 is the address of the calling code */
598 if (has_caller)
599 amd64_mov_reg_membase (code, AMD64_ARG_REG2, AMD64_RBP, 8, sizeof(gpointer));
600 else
601 amd64_mov_reg_imm (code, AMD64_ARG_REG2, 0);
603 /* Arg3 is the method/vtable ptr */
604 amd64_mov_reg_membase (code, AMD64_ARG_REG3, AMD64_RBP, arg_offset, sizeof(gpointer));
606 /* Arg4 is the trampoline address */
607 amd64_mov_reg_membase (code, AMD64_ARG_REG4, AMD64_RBP, tramp_offset, sizeof(gpointer));
609 if (aot) {
610 char *icall_name = g_strdup_printf ("trampoline_func_%d", tramp_type);
611 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
612 } else {
613 tramp = (guint8*)mono_get_trampoline_func (tramp_type);
614 amd64_mov_reg_imm (code, AMD64_R11, tramp);
616 amd64_call_reg (code, AMD64_R11);
618 /* Check for thread interruption */
619 /* This is not perf critical code so no need to check the interrupt flag */
621 * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
623 amd64_mov_membase_reg (code, AMD64_RBP, res_offset, AMD64_RAX, sizeof(mgreg_t));
624 if (aot) {
625 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint");
626 } else {
627 amd64_mov_reg_imm (code, AMD64_R11, (guint8*)mono_thread_force_interruption_checkpoint);
629 amd64_call_reg (code, AMD64_R11);
631 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, res_offset, sizeof(mgreg_t));
633 /* Restore LMF */
635 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
636 amd64_alu_reg_imm_size (code, X86_SUB, AMD64_RCX, 1, sizeof(gpointer));
637 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sizeof(gpointer));
638 amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, sizeof(gpointer));
641 * Save rax to the stack, after the leave instruction, this will become part of
642 * the red zone.
644 amd64_mov_membase_reg (code, AMD64_RBP, rax_offset, AMD64_RAX, sizeof(mgreg_t));
646 /* Restore argument registers, r10 (imt method/rgxtx)
647 and rax (needed for direct calls to C vararg functions). */
648 for (i = 0; i < AMD64_NREG; ++i)
649 if (AMD64_IS_ARGUMENT_REG (i) || i == AMD64_R10 || i == AMD64_RAX)
650 amd64_mov_reg_membase (code, i, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), sizeof(mgreg_t));
652 for (i = 0; i < 8; ++i)
653 amd64_movsd_reg_membase (code, i, AMD64_RBP, saved_fpregs_offset + (i * sizeof(mgreg_t)));
655 /* Restore stack */
656 amd64_leave (code);
658 if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
659 /* Load result */
660 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, rax_offset - sizeof(mgreg_t), sizeof(mgreg_t));
661 amd64_ret (code);
662 } else {
663 /* call the compiled method using the saved rax */
664 amd64_jump_membase (code, AMD64_RSP, rax_offset - sizeof(mgreg_t));
667 g_assert ((code - buf) <= kMaxCodeSize);
669 nacl_global_codeman_validate (&buf, kMaxCodeSize, &code);
671 mono_arch_flush_icache (buf, code - buf);
673 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) {
674 /* Initialize the nullified class init trampoline used in the AOT case */
675 nullified_class_init_trampoline = mono_arch_get_nullified_class_init_trampoline (NULL);
678 if (info)
679 *info = mono_tramp_info_create (mono_get_generic_trampoline_name (tramp_type), buf, code - buf, ji, unwind_ops);
681 return buf;
684 gpointer
685 mono_arch_get_nullified_class_init_trampoline (MonoTrampInfo **info)
687 guint8 *code, *buf;
689 code = buf = mono_global_codeman_reserve (16);
690 amd64_ret (code);
692 nacl_global_codeman_validate(&buf, 16, &code);
694 mono_arch_flush_icache (buf, code - buf);
696 if (info)
697 *info = mono_tramp_info_create (g_strdup_printf ("nullified_class_init_trampoline"), buf, code - buf, NULL, NULL);
699 if (mono_jit_map_is_enabled ())
700 mono_emit_jit_tramp (buf, code - buf, "nullified_class_init_trampoline");
702 return buf;
705 gpointer
706 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
708 guint8 *code, *buf, *tramp;
709 int size;
710 gboolean far_addr = FALSE;
712 tramp = mono_get_trampoline_code (tramp_type);
714 #if defined(__default_codegen__)
715 if ((((guint64)arg1) >> 32) == 0)
716 size = 5 + 1 + 4;
717 else
718 size = 5 + 1 + 8;
720 code = buf = mono_domain_code_reserve_align (domain, size, 1);
722 if (((gint64)tramp - (gint64)code) >> 31 != 0 && ((gint64)tramp - (gint64)code) >> 31 != -1) {
723 #ifndef MONO_ARCH_NOMAP32BIT
724 g_assert_not_reached ();
725 #endif
726 far_addr = TRUE;
727 size += 16;
728 code = buf = mono_domain_code_reserve_align (domain, size, 1);
730 #elif defined(__native_client_codegen__)
731 size = 5 + 1 + 4;
732 /* Aligning the call site below could */
733 /* add up to kNaClAlignment-1 bytes */
734 size += (kNaClAlignment-1);
735 buf = mono_domain_code_reserve_align (domain, size, kNaClAlignment);
736 code = buf;
737 #endif
739 if (far_addr) {
740 amd64_mov_reg_imm (code, AMD64_R11, tramp);
741 amd64_call_reg (code, AMD64_R11);
742 } else {
743 amd64_call_code (code, tramp);
745 /* The trampoline code will obtain the argument from the instruction stream */
746 #if defined(__default_codegen__)
747 if ((((guint64)arg1) >> 32) == 0) {
748 *code = 0x4;
749 *(guint32*)(code + 1) = (gint64)arg1;
750 code += 5;
751 } else {
752 *code = 0x8;
753 *(guint64*)(code + 1) = (gint64)arg1;
754 code += 9;
756 #elif defined(__native_client_codegen__)
757 /* For NaCl, all tramp args are 32-bit because they're pointers */
758 *code = 0x68; /* push imm32 */
759 *(guint32*)(code + 1) = (gint32)arg1;
760 code += 5;
761 #endif
763 g_assert ((code - buf) <= size);
765 if (code_len)
766 *code_len = size;
768 nacl_domain_code_validate(domain, &buf, size, &code);
770 mono_arch_flush_icache (buf, size);
772 return buf;
775 gpointer
776 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
778 guint8 *tramp;
779 guint8 *code, *buf;
780 guint8 **rgctx_null_jumps;
781 int tramp_size;
782 int depth, index;
783 int i;
784 gboolean mrgctx;
785 MonoJumpInfo *ji = NULL;
786 GSList *unwind_ops = NULL;
788 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
789 index = MONO_RGCTX_SLOT_INDEX (slot);
790 if (mrgctx)
791 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
792 for (depth = 0; ; ++depth) {
793 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
795 if (index < size - 1)
796 break;
797 index -= size - 1;
800 tramp_size = NACL_SIZE (64 + 8 * depth, 128 + 8 * depth);
802 code = buf = mono_global_codeman_reserve (tramp_size);
804 unwind_ops = mono_arch_get_cie_program ();
806 rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
808 if (mrgctx) {
809 /* get mrgctx ptr */
810 amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
811 } else {
812 /* load rgctx ptr from vtable */
813 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoVTable, runtime_generic_context), sizeof(gpointer));
814 /* is the rgctx ptr null? */
815 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
816 /* if yes, jump to actual trampoline */
817 rgctx_null_jumps [0] = code;
818 amd64_branch8 (code, X86_CC_Z, -1, 1);
821 for (i = 0; i < depth; ++i) {
822 /* load ptr to next array */
823 if (mrgctx && i == 0)
824 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT, sizeof(gpointer));
825 else
826 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, 0, sizeof(gpointer));
827 /* is the ptr null? */
828 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
829 /* if yes, jump to actual trampoline */
830 rgctx_null_jumps [i + 1] = code;
831 amd64_branch8 (code, X86_CC_Z, -1, 1);
834 /* fetch slot */
835 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, sizeof (gpointer) * (index + 1), sizeof(gpointer));
836 /* is the slot null? */
837 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
838 /* if yes, jump to actual trampoline */
839 rgctx_null_jumps [depth + 1] = code;
840 amd64_branch8 (code, X86_CC_Z, -1, 1);
841 /* otherwise return */
842 amd64_ret (code);
844 for (i = mrgctx ? 1 : 0; i <= depth + 1; ++i)
845 mono_amd64_patch (rgctx_null_jumps [i], code);
847 g_free (rgctx_null_jumps);
849 /* move the rgctx pointer to the VTABLE register */
850 amd64_mov_reg_reg (code, MONO_ARCH_VTABLE_REG, AMD64_ARG_REG1, sizeof(gpointer));
852 if (aot) {
853 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot));
854 amd64_jump_reg (code, AMD64_R11);
855 } else {
856 tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);
858 /* jump to the actual trampoline */
859 amd64_jump_code (code, tramp);
862 nacl_global_codeman_validate (&buf, tramp_size, &code);
863 mono_arch_flush_icache (buf, code - buf);
865 g_assert (code - buf <= tramp_size);
867 if (info)
868 *info = mono_tramp_info_create (mono_get_rgctx_fetch_trampoline_name (slot), buf, code - buf, ji, unwind_ops);
870 return buf;
873 gpointer
874 mono_arch_create_generic_class_init_trampoline (MonoTrampInfo **info, gboolean aot)
876 guint8 *tramp;
877 guint8 *code, *buf;
878 static int byte_offset = -1;
879 static guint8 bitmask;
880 guint8 *jump;
881 int tramp_size;
882 GSList *unwind_ops = NULL;
883 MonoJumpInfo *ji = NULL;
885 tramp_size = 64;
887 code = buf = mono_global_codeman_reserve (tramp_size);
889 if (byte_offset < 0)
890 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
892 amd64_test_membase_imm_size (code, MONO_AMD64_ARG_REG1, byte_offset, bitmask, 1);
893 jump = code;
894 amd64_branch8 (code, X86_CC_Z, -1, 1);
896 amd64_ret (code);
898 x86_patch (jump, code);
900 if (aot) {
901 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_generic_class_init");
902 amd64_jump_reg (code, AMD64_R11);
903 } else {
904 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT, mono_get_root_domain (), NULL);
906 /* jump to the actual trampoline */
907 amd64_jump_code (code, tramp);
910 nacl_global_codeman_validate (&buf, tramp_size, &code);
912 mono_arch_flush_icache (buf, code - buf);
914 g_assert (code - buf <= tramp_size);
916 if (info)
917 *info = mono_tramp_info_create (g_strdup_printf ("generic_class_init_trampoline"), buf, code - buf, ji, unwind_ops);
919 return buf;
922 #ifdef MONO_ARCH_MONITOR_OBJECT_REG
924 gpointer
925 mono_arch_create_monitor_enter_trampoline (MonoTrampInfo **info, gboolean aot)
927 guint8 *tramp;
928 guint8 *code, *buf;
929 guint8 *jump_obj_null, *jump_sync_null, *jump_cmpxchg_failed, *jump_other_owner, *jump_tid, *jump_sync_thin_hash = NULL;
930 int tramp_size;
931 int owner_offset, nest_offset, dummy;
932 MonoJumpInfo *ji = NULL;
933 GSList *unwind_ops = NULL;
935 g_assert (MONO_ARCH_MONITOR_OBJECT_REG == AMD64_RDI);
937 mono_monitor_threads_sync_members_offset (&owner_offset, &nest_offset, &dummy);
938 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (owner_offset) == sizeof (gpointer));
939 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset) == sizeof (guint32));
940 owner_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset);
941 nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
943 tramp_size = 96;
945 code = buf = mono_global_codeman_reserve (tramp_size);
947 unwind_ops = mono_arch_get_cie_program ();
949 if (mono_thread_get_tls_offset () != -1) {
950 /* MonoObject* obj is in RDI */
951 /* is obj null? */
952 amd64_test_reg_reg (code, AMD64_RDI, AMD64_RDI);
953 /* if yes, jump to actual trampoline */
954 jump_obj_null = code;
955 amd64_branch8 (code, X86_CC_Z, -1, 1);
957 /* load obj->synchronization to RCX */
958 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RDI, G_STRUCT_OFFSET (MonoObject, synchronisation), 8);
960 if (mono_gc_is_moving ()) {
961 /*if bit zero is set it's a thin hash*/
962 /*FIXME use testb encoding*/
963 amd64_test_reg_imm (code, AMD64_RCX, 0x01);
964 jump_sync_thin_hash = code;
965 amd64_branch8 (code, X86_CC_NE, -1, 1);
967 /*clear bits used by the gc*/
968 amd64_alu_reg_imm (code, X86_AND, AMD64_RCX, ~0x3);
971 /* is synchronization null? */
972 amd64_test_reg_reg (code, AMD64_RCX, AMD64_RCX);
973 /* if yes, jump to actual trampoline */
974 jump_sync_null = code;
975 amd64_branch8 (code, X86_CC_Z, -1, 1);
977 /* load MonoInternalThread* into RDX */
978 code = mono_amd64_emit_tls_get (code, AMD64_RDX, mono_thread_get_tls_offset ());
979 /* load TID into RDX */
980 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_RDX, G_STRUCT_OFFSET (MonoInternalThread, tid), 8);
982 /* is synchronization->owner null? */
983 amd64_alu_membase_imm_size (code, X86_CMP, AMD64_RCX, owner_offset, 0, 8);
984 /* if not, jump to next case */
985 jump_tid = code;
986 amd64_branch8 (code, X86_CC_NZ, -1, 1);
988 /* if yes, try a compare-exchange with the TID */
989 /* zero RAX */
990 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
991 /* compare and exchange */
992 amd64_prefix (code, X86_LOCK_PREFIX);
993 amd64_cmpxchg_membase_reg_size (code, AMD64_RCX, owner_offset, AMD64_RDX, 8);
994 /* if not successful, jump to actual trampoline */
995 jump_cmpxchg_failed = code;
996 amd64_branch8 (code, X86_CC_NZ, -1, 1);
997 /* if successful, return */
998 amd64_ret (code);
1000 /* next case: synchronization->owner is not null */
1001 x86_patch (jump_tid, code);
1002 /* is synchronization->owner == TID? */
1003 amd64_alu_membase_reg_size (code, X86_CMP, AMD64_RCX, owner_offset, AMD64_RDX, 8);
1004 /* if not, jump to actual trampoline */
1005 jump_other_owner = code;
1006 amd64_branch8 (code, X86_CC_NZ, -1, 1);
1007 /* if yes, increment nest */
1008 amd64_inc_membase_size (code, AMD64_RCX, nest_offset, 4);
1009 /* return */
1010 amd64_ret (code);
1012 x86_patch (jump_obj_null, code);
1013 if (jump_sync_thin_hash)
1014 x86_patch (jump_sync_thin_hash, code);
1015 x86_patch (jump_sync_null, code);
1016 x86_patch (jump_cmpxchg_failed, code);
1017 x86_patch (jump_other_owner, code);
1020 /* jump to the actual trampoline */
1021 #if MONO_AMD64_ARG_REG1 != AMD64_RDI
1022 amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, AMD64_RDI);
1023 #endif
1025 if (aot) {
1026 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_monitor_enter");
1027 amd64_jump_reg (code, AMD64_R11);
1028 } else {
1029 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_ENTER, mono_get_root_domain (), NULL);
1031 /* jump to the actual trampoline */
1032 amd64_jump_code (code, tramp);
1035 nacl_global_codeman_validate (&buf, tramp_size, &code);
1037 mono_arch_flush_icache (code, code - buf);
1038 g_assert (code - buf <= tramp_size);
1040 if (info)
1041 *info = mono_tramp_info_create (g_strdup_printf ("monitor_enter_trampoline"), buf, code - buf, ji, unwind_ops);
1043 return buf;
1046 gpointer
1047 mono_arch_create_monitor_exit_trampoline (MonoTrampInfo **info, gboolean aot)
1049 guint8 *tramp;
1050 guint8 *code, *buf;
1051 guint8 *jump_obj_null, *jump_have_waiters, *jump_sync_null, *jump_not_owned, *jump_sync_thin_hash = NULL;
1052 guint8 *jump_next;
1053 int tramp_size;
1054 int owner_offset, nest_offset, entry_count_offset;
1055 MonoJumpInfo *ji = NULL;
1056 GSList *unwind_ops = NULL;
1058 g_assert (MONO_ARCH_MONITOR_OBJECT_REG == AMD64_RDI);
1060 mono_monitor_threads_sync_members_offset (&owner_offset, &nest_offset, &entry_count_offset);
1061 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (owner_offset) == sizeof (gpointer));
1062 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset) == sizeof (guint32));
1063 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (entry_count_offset) == sizeof (gint32));
1064 owner_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset);
1065 nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
1066 entry_count_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (entry_count_offset);
1068 tramp_size = 112;
1070 code = buf = mono_global_codeman_reserve (tramp_size);
1072 unwind_ops = mono_arch_get_cie_program ();
1074 if (mono_thread_get_tls_offset () != -1) {
1075 /* MonoObject* obj is in RDI */
1076 /* is obj null? */
1077 amd64_test_reg_reg (code, AMD64_RDI, AMD64_RDI);
1078 /* if yes, jump to actual trampoline */
1079 jump_obj_null = code;
1080 amd64_branch8 (code, X86_CC_Z, -1, 1);
1082 /* load obj->synchronization to RCX */
1083 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RDI, G_STRUCT_OFFSET (MonoObject, synchronisation), 8);
1085 if (mono_gc_is_moving ()) {
1086 /*if bit zero is set it's a thin hash*/
1087 /*FIXME use testb encoding*/
1088 amd64_test_reg_imm (code, AMD64_RCX, 0x01);
1089 jump_sync_thin_hash = code;
1090 amd64_branch8 (code, X86_CC_NE, -1, 1);
1092 /*clear bits used by the gc*/
1093 amd64_alu_reg_imm (code, X86_AND, AMD64_RCX, ~0x3);
1096 /* is synchronization null? */
1097 amd64_test_reg_reg (code, AMD64_RCX, AMD64_RCX);
1098 /* if yes, jump to actual trampoline */
1099 jump_sync_null = code;
1100 amd64_branch8 (code, X86_CC_Z, -1, 1);
1102 /* next case: synchronization is not null */
1103 /* load MonoInternalThread* into RDX */
1104 code = mono_amd64_emit_tls_get (code, AMD64_RDX, mono_thread_get_tls_offset ());
1105 /* load TID into RDX */
1106 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_RDX, G_STRUCT_OFFSET (MonoInternalThread, tid), 8);
1107 /* is synchronization->owner == TID */
1108 amd64_alu_membase_reg_size (code, X86_CMP, AMD64_RCX, owner_offset, AMD64_RDX, 8);
1109 /* if no, jump to actual trampoline */
1110 jump_not_owned = code;
1111 amd64_branch8 (code, X86_CC_NZ, -1, 1);
1113 /* next case: synchronization->owner == TID */
1114 /* is synchronization->nest == 1 */
1115 amd64_alu_membase_imm_size (code, X86_CMP, AMD64_RCX, nest_offset, 1, 4);
1116 /* if not, jump to next case */
1117 jump_next = code;
1118 amd64_branch8 (code, X86_CC_NZ, -1, 1);
1119 /* if yes, is synchronization->entry_count zero? */
1120 amd64_alu_membase_imm_size (code, X86_CMP, AMD64_RCX, entry_count_offset, 0, 4);
1121 /* if not, jump to actual trampoline */
1122 jump_have_waiters = code;
1123 amd64_branch8 (code, X86_CC_NZ, -1 , 1);
1124 /* if yes, set synchronization->owner to null and return */
1125 amd64_mov_membase_imm (code, AMD64_RCX, owner_offset, 0, 8);
1126 amd64_ret (code);
1128 /* next case: synchronization->nest is not 1 */
1129 x86_patch (jump_next, code);
1130 /* decrease synchronization->nest and return */
1131 amd64_dec_membase_size (code, AMD64_RCX, nest_offset, 4);
1132 amd64_ret (code);
1134 x86_patch (jump_obj_null, code);
1135 x86_patch (jump_have_waiters, code);
1136 x86_patch (jump_not_owned, code);
1137 x86_patch (jump_sync_null, code);
1140 /* jump to the actual trampoline */
1141 #if MONO_AMD64_ARG_REG1 != AMD64_RDI
1142 amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, AMD64_RDI);
1143 #endif
1145 if (aot) {
1146 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_monitor_exit");
1147 amd64_jump_reg (code, AMD64_R11);
1148 } else {
1149 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_EXIT, mono_get_root_domain (), NULL);
1150 amd64_jump_code (code, tramp);
1153 nacl_global_codeman_validate (&buf, tramp_size, &code);
1155 mono_arch_flush_icache (code, code - buf);
1156 g_assert (code - buf <= tramp_size);
1158 if (info)
1159 *info = mono_tramp_info_create (g_strdup_printf ("monitor_exit_trampoline"), buf, code - buf, ji, unwind_ops);
1161 return buf;
1163 #endif
1165 void
1166 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
1168 /* FIXME: This is not thread safe */
1169 guint8 *code = ji->code_start;
1171 amd64_mov_reg_imm (code, AMD64_ARG_REG1, func_arg);
1172 amd64_mov_reg_imm (code, AMD64_R11, func);
1174 x86_push_imm (code, (guint64)func_arg);
1175 amd64_call_reg (code, AMD64_R11);
1179 static void
1180 handler_block_trampoline_helper (gpointer *ptr)
1182 MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
1183 *ptr = jit_tls->handler_block_return_address;
1186 gpointer
1187 mono_arch_create_handler_block_trampoline (void)
1189 guint8 *tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD);
1190 guint8 *code, *buf;
1191 int tramp_size = 64;
1192 code = buf = mono_global_codeman_reserve (tramp_size);
1195 This trampoline restore the call chain of the handler block then jumps into the code that deals with it.
1198 if (mono_get_jit_tls_offset () != -1) {
1199 code = mono_amd64_emit_tls_get (code, AMD64_RDI, mono_get_jit_tls_offset ());
1200 /*simulate a call*/
1201 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RDI, G_STRUCT_OFFSET (MonoJitTlsData, handler_block_return_address), 8);
1202 amd64_jump_code (code, tramp);
1203 } else {
1204 /*Slow path uses a c helper*/
1205 amd64_mov_reg_reg (code, AMD64_RDI, AMD64_RSP, 8);
1206 amd64_mov_reg_imm (code, AMD64_RAX, tramp);
1207 amd64_push_reg (code, AMD64_RAX);
1208 amd64_jump_code (code, handler_block_trampoline_helper);
1211 mono_arch_flush_icache (buf, code - buf);
1212 g_assert (code - buf <= tramp_size);
1214 if (mono_jit_map_is_enabled ())
1215 mono_emit_jit_tramp (buf, code - buf, "handler_block_trampoline");
1217 return buf;
1221 * mono_arch_get_call_target:
1223 * Return the address called by the code before CODE if exists.
1225 guint8*
1226 mono_arch_get_call_target (guint8 *code)
1228 if (code [-5] == 0xe8) {
1229 guint32 disp = *(guint32*)(code - 4);
1230 guint8 *target = code + disp;
1232 return target;
1233 } else {
1234 return NULL;
1239 * mono_arch_get_plt_info_offset:
1241 * Return the PLT info offset belonging to the plt entry PLT_ENTRY.
1243 guint32
1244 mono_arch_get_plt_info_offset (guint8 *plt_entry, mgreg_t *regs, guint8 *code)
1246 #if defined(__native_client__) || defined(__native_client_codegen__)
1247 /* 18 = 3 (mov opcode) + 4 (disp) + 10 (nacljmp) + 1 (push opcode) */
1248 /* See aot-compiler.c arch_emit_plt_entry for details. */
1249 return *(guint32*)(plt_entry + 18);
1250 #else
1251 return *(guint32*)(plt_entry + 6);
1252 #endif