2008-11-09 Zoltan Varga <vargaz@gmail.com>
[mono-project.git] / mono / mini / tramp-amd64.c
blob6c617b63ade71f888703f107208ab0dd9907536a
1 /*
2 * tramp-amd64.c: JIT trampoline code for amd64
4 * Authors:
5 * Dietmar Maurer (dietmar@ximian.com)
6 * Zoltan Varga (vargaz@gmail.com)
8 * (C) 2001 Ximian, Inc.
9 */
11 #include <config.h>
12 #include <glib.h>
14 #include <mono/metadata/appdomain.h>
15 #include <mono/metadata/marshal.h>
16 #include <mono/metadata/tabledefs.h>
17 #include <mono/metadata/mono-debug-debugger.h>
18 #include <mono/metadata/monitor.h>
19 #include <mono/arch/amd64/amd64-codegen.h>
21 #ifdef HAVE_VALGRIND_MEMCHECK_H
22 #include <valgrind/memcheck.h>
23 #endif
25 #include "mini.h"
26 #include "mini-amd64.h"
28 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
30 static guint8* nullified_class_init_trampoline;
33 * mono_arch_get_unbox_trampoline:
34 * @gsctx: the generic sharing context
35 * @m: method pointer
36 * @addr: pointer to native code for @m
38 * when value type methods are called through the vtable we need to unbox the
39 * this argument. This method returns a pointer to a trampoline which does
40 * unboxing before calling the method
42 gpointer
43 mono_arch_get_unbox_trampoline (MonoGenericSharingContext *gsctx, MonoMethod *m, gpointer addr)
45 guint8 *code, *start;
46 int this_reg;
48 MonoDomain *domain = mono_domain_get ();
50 this_reg = mono_arch_get_this_arg_reg (mono_method_signature (m), gsctx, NULL);
52 mono_domain_lock (domain);
53 start = code = mono_code_manager_reserve (domain->code_mp, 20);
54 mono_domain_unlock (domain);
56 amd64_alu_reg_imm (code, X86_ADD, this_reg, sizeof (MonoObject));
57 /* FIXME: Optimize this */
58 amd64_mov_reg_imm (code, AMD64_RAX, addr);
59 amd64_jump_reg (code, AMD64_RAX);
60 g_assert ((code - start) < 20);
62 mono_arch_flush_icache (start, code - start);
64 return start;
68 * mono_arch_patch_callsite:
70 * Patch the callsite whose address is given by ORIG_CODE so it calls ADDR. ORIG_CODE
71 * points to the pc right after the call.
73 void
74 mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
76 guint8 *code;
77 guint8 buf [16];
78 gboolean can_write = mono_breakpoint_clean_code (method_start, orig_code, 14, buf, sizeof (buf));
80 code = buf + 14;
82 if (((code [-13] == 0x49) && (code [-12] == 0xbb)) || (code [-5] == 0xe8)) {
83 if (code [-5] != 0xe8) {
84 if (can_write) {
85 InterlockedExchangePointer ((gpointer*)(orig_code - 11), addr);
86 #ifdef HAVE_VALGRIND_MEMCHECK_H
87 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 11, sizeof (gpointer));
88 #endif
90 } else {
91 if ((((guint64)(addr)) >> 32) != 0) {
92 /* Print some diagnostics */
93 MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (char*)orig_code);
94 if (ji)
95 fprintf (stderr, "At %s, offset 0x%zx\n", mono_method_full_name (ji->method, TRUE), (guint8*)orig_code - (guint8*)ji->code_start);
96 fprintf (stderr, "Addr: %p\n", addr);
97 ji = mono_jit_info_table_find (mono_domain_get (), (char*)addr);
98 if (ji)
99 fprintf (stderr, "Callee: %s\n", mono_method_full_name (ji->method, TRUE));
100 g_assert_not_reached ();
102 g_assert ((((guint64)(orig_code)) >> 32) == 0);
103 if (can_write) {
104 InterlockedExchange ((gint32*)(orig_code - 4), ((gint64)addr - (gint64)orig_code));
105 #ifdef HAVE_VALGRIND_MEMCHECK_H
106 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, 4);
107 #endif
111 else if ((code [-7] == 0x41) && (code [-6] == 0xff) && (code [-5] == 0x15)) {
112 /* call *<OFFSET>(%rip) */
113 gpointer *got_entry = (gpointer*)((guint8*)orig_code + (*(guint32*)(orig_code - 4)));
114 if (can_write) {
115 InterlockedExchangePointer (got_entry, addr);
116 #ifdef HAVE_VALGRIND_MEMCHECK_H
117 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, sizeof (gpointer));
118 #endif
123 void
124 mono_arch_patch_plt_entry (guint8 *code, guint8 *addr)
126 gint32 disp;
127 gpointer *plt_jump_table_entry;
129 /* A PLT entry: jmp *<DISP>(%rip) */
130 g_assert (code [0] == 0xff);
131 g_assert (code [1] == 0x25);
133 disp = *(gint32*)(code + 2);
135 plt_jump_table_entry = (gpointer*)(code + 6 + disp);
137 InterlockedExchangePointer (plt_jump_table_entry, addr);
140 void
141 mono_arch_nullify_class_init_trampoline (guint8 *code, gssize *regs)
143 guint8 buf [16];
144 gboolean can_write = mono_breakpoint_clean_code (NULL, code, 7, buf, sizeof (buf));
146 if (!can_write)
147 return;
149 code -= 3;
152 * A given byte sequence can match more than case here, so we have to be
153 * really careful about the ordering of the cases. Longer sequences
154 * come first.
156 if ((code [-4] == 0x41) && (code [-3] == 0xff) && (code [-2] == 0x15)) {
157 gpointer *vtable_slot;
159 /* call *<OFFSET>(%rip) */
160 vtable_slot = mono_arch_get_vcall_slot_addr (code + 3, (gpointer*)regs);
161 g_assert (vtable_slot);
163 *vtable_slot = nullified_class_init_trampoline;
164 } else if (code [-2] == 0xe8) {
165 /* call <TARGET> */
166 guint8 *buf = code - 2;
168 buf [0] = 0x66;
169 buf [1] = 0x66;
170 buf [2] = 0x90;
171 buf [3] = 0x66;
172 buf [4] = 0x90;
173 } else if ((code [0] == 0x41) && (code [1] == 0xff)) {
174 /* call <REG> */
175 /* happens on machines without MAP_32BIT like freebsd */
176 /* amd64_set_reg_template is 10 bytes long */
177 guint8* buf = code - 10;
179 /* FIXME: Make this thread safe */
180 /* Padding code suggested by the AMD64 Opt Manual */
181 buf [0] = 0x66;
182 buf [1] = 0x66;
183 buf [2] = 0x66;
184 buf [3] = 0x90;
185 buf [4] = 0x66;
186 buf [5] = 0x66;
187 buf [6] = 0x66;
188 buf [7] = 0x90;
189 buf [8] = 0x66;
190 buf [9] = 0x66;
191 buf [10] = 0x90;
192 buf [11] = 0x66;
193 buf [12] = 0x90;
194 } else if (code [0] == 0x90 || code [0] == 0xeb || code [0] == 0x66) {
195 /* Already changed by another thread */
197 } else {
198 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", code [0], code [1], code [2], code [3],
199 code [4], code [5], code [6]);
200 g_assert_not_reached ();
204 void
205 mono_arch_nullify_plt_entry (guint8 *code)
207 if (mono_aot_only && !nullified_class_init_trampoline)
208 nullified_class_init_trampoline = mono_aot_get_named_code ("nullified_class_init_trampoline");
210 mono_arch_patch_plt_entry (code, nullified_class_init_trampoline);
213 guchar*
214 mono_arch_create_trampoline_code (MonoTrampolineType tramp_type)
216 MonoJumpInfo *ji;
217 guint32 code_size;
219 return mono_arch_create_trampoline_code_full (tramp_type, &code_size, &ji, FALSE);
222 guchar*
223 mono_arch_create_trampoline_code_full (MonoTrampolineType tramp_type, guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
225 guint8 *buf, *code, *tramp, *br [2], *r11_save_code, *after_r11_save_code;
226 int i, lmf_offset, offset, res_offset, arg_offset, tramp_offset, saved_regs_offset;
227 int saved_fpregs_offset, rbp_offset, framesize, orig_rsp_to_rbp_offset;
228 gboolean has_caller;
230 if (tramp_type == MONO_TRAMPOLINE_JUMP)
231 has_caller = FALSE;
232 else
233 has_caller = TRUE;
235 code = buf = mono_global_codeman_reserve (524);
237 *ji = NULL;
239 framesize = 524 + sizeof (MonoLMF);
240 framesize = (framesize + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
242 orig_rsp_to_rbp_offset = 0;
243 r11_save_code = code;
244 /* Reserve 5 bytes for the mov_membase_reg to save R11 */
245 code += 5;
246 after_r11_save_code = code;
248 /* Pop the return address off the stack */
249 amd64_pop_reg (code, AMD64_R11);
250 orig_rsp_to_rbp_offset += 8;
253 * Allocate a new stack frame
255 amd64_push_reg (code, AMD64_RBP);
256 orig_rsp_to_rbp_offset -= 8;
257 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
258 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
260 offset = 0;
261 rbp_offset = - offset;
263 offset += 8;
264 tramp_offset = - offset;
266 offset += 8;
267 arg_offset = - offset;
269 /* Compute the trampoline address from the return address */
270 if (aot) {
271 /* 7 = length of call *<offset>(rip) */
272 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 7);
273 } else {
274 /* 5 = length of amd64_call_membase () */
275 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 5);
277 amd64_mov_membase_reg (code, AMD64_RBP, tramp_offset, AMD64_R11, 8);
279 offset += 8;
280 res_offset = - offset;
282 /* Save all registers */
284 offset += AMD64_NREG * 8;
285 saved_regs_offset = - offset;
286 for (i = 0; i < AMD64_NREG; ++i) {
287 if (i == AMD64_RBP) {
288 /* RAX is already saved */
289 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, rbp_offset, 8);
290 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * 8), AMD64_RAX, 8);
291 } else if (i != AMD64_R11) {
292 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * 8), i, 8);
293 } else {
294 /* We have to save R11 right at the start of
295 the trampoline code because it's used as a
296 scratch register */
297 amd64_mov_membase_reg (r11_save_code, AMD64_RSP, saved_regs_offset + orig_rsp_to_rbp_offset + (i * 8), i, 8);
298 g_assert (r11_save_code == after_r11_save_code);
301 offset += 8 * 8;
302 saved_fpregs_offset = - offset;
303 for (i = 0; i < 8; ++i)
304 amd64_movsd_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * 8), i);
306 if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT &&
307 tramp_type != MONO_TRAMPOLINE_MONITOR_ENTER &&
308 tramp_type != MONO_TRAMPOLINE_MONITOR_EXIT) {
309 /* Obtain the trampoline argument which is encoded in the instruction stream */
310 if (aot) {
311 /* Load the GOT offset */
312 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, 8);
313 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 7, 4);
314 /* Compute the address of the GOT slot */
315 amd64_alu_reg_reg_size (code, X86_ADD, AMD64_R11, AMD64_RAX, 8);
316 /* Load the value */
317 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
318 } else {
319 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, 8);
320 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 5, 1);
321 amd64_widen_reg (code, AMD64_RAX, AMD64_RAX, TRUE, FALSE);
322 amd64_alu_reg_imm_size (code, X86_CMP, AMD64_RAX, 4, 1);
323 br [0] = code;
324 x86_branch8 (code, X86_CC_NE, 6, FALSE);
325 /* 32 bit immediate */
326 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 4);
327 br [1] = code;
328 x86_jump8 (code, 10);
329 /* 64 bit immediate */
330 mono_amd64_patch (br [0], code);
331 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 8);
332 mono_amd64_patch (br [1], code);
334 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, 8);
335 } else {
336 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, saved_regs_offset + (MONO_AMD64_ARG_REG1 * 8), 8);
337 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, 8);
340 /* Save LMF begin */
342 offset += sizeof (MonoLMF);
343 lmf_offset = - offset;
345 /* Save ip */
346 if (has_caller)
347 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 8, 8);
348 else
349 amd64_mov_reg_imm (code, AMD64_R11, 0);
350 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, 8);
351 /* Save fp */
352 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, framesize, 8);
353 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbp), AMD64_R11, 8);
354 /* Save sp */
355 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
356 amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, framesize + 16);
357 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_R11, 8);
358 /* Save method */
359 if (tramp_type == MONO_TRAMPOLINE_JIT || tramp_type == MONO_TRAMPOLINE_JUMP) {
360 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, arg_offset, 8);
361 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), AMD64_R11, 8);
362 } else {
363 amd64_mov_membase_imm (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), 0, 8);
365 /* Save callee saved regs */
366 #ifdef PLATFORM_WIN32
367 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rdi), AMD64_RDI, 8);
368 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsi), AMD64_RSI, 8);
369 #endif
370 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), AMD64_RBX, 8);
371 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), AMD64_R12, 8);
372 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), AMD64_R13, 8);
373 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), AMD64_R14, 8);
374 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), AMD64_R15, 8);
376 if (aot) {
377 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr");
378 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
379 } else {
380 amd64_mov_reg_imm (code, AMD64_R11, mono_get_lmf_addr);
382 amd64_call_reg (code, AMD64_R11);
384 /* Save lmf_addr */
385 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, 8);
386 /* Save previous_lmf */
387 /* Set the lowest bit to 1 to signal that this LMF has the ip field set */
388 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, 8);
389 amd64_alu_reg_imm_size (code, X86_ADD, AMD64_R11, 1, 8);
390 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, 8);
391 /* Set new lmf */
392 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, lmf_offset);
393 amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, 8);
395 /* Save LMF end */
397 /* Arg1 is the pointer to the saved registers */
398 amd64_lea_membase (code, AMD64_ARG_REG1, AMD64_RBP, saved_regs_offset);
400 /* Arg2 is the address of the calling code */
401 if (has_caller)
402 amd64_mov_reg_membase (code, AMD64_ARG_REG2, AMD64_RBP, 8, 8);
403 else
404 amd64_mov_reg_imm (code, AMD64_ARG_REG2, 0);
406 /* Arg3 is the method/vtable ptr */
407 amd64_mov_reg_membase (code, AMD64_ARG_REG3, AMD64_RBP, arg_offset, 8);
409 /* Arg4 is the trampoline address */
410 amd64_mov_reg_membase (code, AMD64_ARG_REG4, AMD64_RBP, tramp_offset, 8);
412 if (aot) {
413 char *icall_name = g_strdup_printf ("trampoline_func_%d", tramp_type);
414 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
415 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RIP, 0, 8);
416 } else {
417 tramp = (guint8*)mono_get_trampoline_func (tramp_type);
418 amd64_mov_reg_imm (code, AMD64_RAX, tramp);
420 amd64_call_reg (code, AMD64_RAX);
422 /* Check for thread interruption */
423 /* This is not perf critical code so no need to check the interrupt flag */
425 * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
427 amd64_mov_membase_reg (code, AMD64_RBP, res_offset, AMD64_RAX, 8);
428 if (aot) {
429 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint");
430 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RIP, 0, 8);
431 } else {
432 amd64_mov_reg_imm (code, AMD64_RAX, (guint8*)mono_thread_force_interruption_checkpoint);
434 amd64_call_reg (code, AMD64_RAX);
435 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, res_offset, 8);
437 /* Restore LMF */
439 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 8);
440 amd64_alu_reg_imm_size (code, X86_SUB, AMD64_RCX, 1, 8);
441 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 8);
442 amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, 8);
444 /* Restore argument registers, r10 (needed to pass rgctx to
445 static shared generic methods) and r11 (imt register for
446 interface calls). */
447 for (i = 0; i < AMD64_NREG; ++i)
448 if (AMD64_IS_ARGUMENT_REG (i) || i == AMD64_R10 || i == AMD64_R11)
449 amd64_mov_reg_membase (code, i, AMD64_RBP, saved_regs_offset + (i * 8), 8);
452 * FIXME: When using aot-only, the called code might be a C vararg function
453 * which uses %rax as well.
454 * We could restore it, but we would have to use another register to store the
455 * target address, and we don't have any left.
456 * Also, the default AOT plt trampolines overwrite 'rax'.
459 for (i = 0; i < 8; ++i)
460 amd64_movsd_reg_membase (code, i, AMD64_RBP, saved_fpregs_offset + (i * 8));
462 if (tramp_type == MONO_TRAMPOLINE_RESTORE_STACK_PROT)
463 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, saved_regs_offset + (AMD64_RAX * 8), 8);
465 /* Restore stack */
466 amd64_leave (code);
468 if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
469 amd64_ret (code);
470 } else {
471 /* call the compiled method */
472 amd64_jump_reg (code, AMD64_RAX);
475 g_assert ((code - buf) <= 524);
477 mono_arch_flush_icache (buf, code - buf);
479 *code_size = code - buf;
481 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) {
482 guint32 code_len;
484 /* Initialize the nullified class init trampoline used in the AOT case */
485 nullified_class_init_trampoline = mono_arch_get_nullified_class_init_trampoline (&code_len);
488 return buf;
491 gpointer
492 mono_arch_get_nullified_class_init_trampoline (guint32 *code_len)
494 guint8 *code, *buf;
496 code = buf = mono_global_codeman_reserve (16);
497 amd64_ret (code);
499 mono_arch_flush_icache (buf, code - buf);
501 *code_len = code - buf;
503 return buf;
506 gpointer
507 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
509 guint8 *code, *buf, *tramp;
510 int size;
512 tramp = mono_get_trampoline_code (tramp_type);
514 if ((((guint64)arg1) >> 32) == 0)
515 size = 5 + 1 + 4;
516 else
517 size = 5 + 1 + 8;
519 mono_domain_lock (domain);
520 code = buf = mono_code_manager_reserve_align (domain->code_mp, size, 1);
521 mono_domain_unlock (domain);
523 amd64_call_code (code, tramp);
524 /* The trampoline code will obtain the argument from the instruction stream */
525 if ((((guint64)arg1) >> 32) == 0) {
526 *code = 0x4;
527 *(guint32*)(code + 1) = (gint64)arg1;
528 code += 5;
529 } else {
530 *code = 0x8;
531 *(guint64*)(code + 1) = (gint64)arg1;
532 code += 9;
535 g_assert ((code - buf) <= size);
537 if (code_len)
538 *code_len = size;
540 mono_arch_flush_icache (buf, size);
542 return buf;
545 gpointer
546 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot)
548 guint32 code_size;
549 MonoJumpInfo *ji;
551 return mono_arch_create_rgctx_lazy_fetch_trampoline_full (slot, &code_size, &ji, FALSE);
554 gpointer
555 mono_arch_create_rgctx_lazy_fetch_trampoline_full (guint32 slot, guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
557 guint8 *tramp;
558 guint8 *code, *buf;
559 guint8 **rgctx_null_jumps;
560 int tramp_size;
561 int depth, index;
562 int i;
563 gboolean mrgctx;
565 *ji = NULL;
567 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
568 index = MONO_RGCTX_SLOT_INDEX (slot);
569 if (mrgctx)
570 index += sizeof (MonoMethodRuntimeGenericContext) / sizeof (gpointer);
571 for (depth = 0; ; ++depth) {
572 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
574 if (index < size - 1)
575 break;
576 index -= size - 1;
579 tramp_size = 64 + 8 * depth;
581 code = buf = mono_global_codeman_reserve (tramp_size);
583 rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
585 if (mrgctx) {
586 /* get mrgctx ptr */
587 amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
588 } else {
589 /* load rgctx ptr from vtable */
590 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoVTable, runtime_generic_context), 8);
591 /* is the rgctx ptr null? */
592 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
593 /* if yes, jump to actual trampoline */
594 rgctx_null_jumps [0] = code;
595 amd64_branch8 (code, X86_CC_Z, -1, 1);
598 for (i = 0; i < depth; ++i) {
599 /* load ptr to next array */
600 if (mrgctx && i == 0)
601 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, sizeof (MonoMethodRuntimeGenericContext), 8);
602 else
603 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, 0, 8);
604 /* is the ptr null? */
605 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
606 /* if yes, jump to actual trampoline */
607 rgctx_null_jumps [i + 1] = code;
608 amd64_branch8 (code, X86_CC_Z, -1, 1);
611 /* fetch slot */
612 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, sizeof (gpointer) * (index + 1), 8);
613 /* is the slot null? */
614 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
615 /* if yes, jump to actual trampoline */
616 rgctx_null_jumps [depth + 1] = code;
617 amd64_branch8 (code, X86_CC_Z, -1, 1);
618 /* otherwise return */
619 amd64_ret (code);
621 for (i = mrgctx ? 1 : 0; i <= depth + 1; ++i)
622 x86_patch (rgctx_null_jumps [i], code);
624 g_free (rgctx_null_jumps);
626 /* move the rgctx pointer to the VTABLE register */
627 amd64_mov_reg_reg (code, MONO_ARCH_VTABLE_REG, AMD64_ARG_REG1, 8);
629 if (aot) {
630 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot));
631 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
632 amd64_jump_reg (code, AMD64_R11);
633 } else {
634 tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);
636 /* jump to the actual trampoline */
637 amd64_jump_code (code, tramp);
640 mono_arch_flush_icache (buf, code - buf);
642 g_assert (code - buf <= tramp_size);
644 *code_size = code - buf;
646 return buf;
649 gpointer
650 mono_arch_create_generic_class_init_trampoline (void)
652 guint8 *tramp;
653 guint8 *code, *buf;
654 static int byte_offset = -1;
655 static guint8 bitmask;
656 guint8 *jump;
657 int tramp_size;
659 tramp_size = 64;
661 code = buf = mono_global_codeman_reserve (tramp_size);
663 if (byte_offset < 0)
664 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
666 amd64_test_membase_imm_size (code, MONO_AMD64_ARG_REG1, byte_offset, bitmask, 1);
667 jump = code;
668 amd64_branch8 (code, X86_CC_Z, -1, 1);
670 amd64_ret (code);
672 x86_patch (jump, code);
674 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT, mono_get_root_domain (), NULL);
676 /* jump to the actual trampoline */
677 amd64_jump_code (code, tramp);
679 mono_arch_flush_icache (buf, code - buf);
681 g_assert (code - buf <= tramp_size);
683 return buf;
686 #ifdef MONO_ARCH_MONITOR_OBJECT_REG
688 gpointer
689 mono_arch_create_monitor_enter_trampoline (void)
691 guint32 code_size;
692 MonoJumpInfo *ji;
694 return mono_arch_create_monitor_enter_trampoline_full (&code_size, &ji, FALSE);
697 gpointer
698 mono_arch_create_monitor_enter_trampoline_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
700 guint8 *tramp;
701 guint8 *code, *buf;
702 guint8 *jump_obj_null, *jump_sync_null, *jump_cmpxchg_failed, *jump_other_owner, *jump_tid;
703 int tramp_size;
704 int owner_offset, nest_offset, dummy;
706 g_assert (MONO_ARCH_MONITOR_OBJECT_REG == AMD64_RDI);
708 mono_monitor_threads_sync_members_offset (&owner_offset, &nest_offset, &dummy);
709 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (owner_offset) == sizeof (gpointer));
710 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset) == sizeof (guint32));
711 owner_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset);
712 nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
714 tramp_size = 96;
716 code = buf = mono_global_codeman_reserve (tramp_size);
718 if (mono_thread_get_tls_offset () != -1) {
719 /* MonoObject* obj is in RDI */
720 /* is obj null? */
721 amd64_test_reg_reg (buf, AMD64_RDI, AMD64_RDI);
722 /* if yes, jump to actual trampoline */
723 jump_obj_null = buf;
724 amd64_branch8 (buf, X86_CC_Z, -1, 1);
726 /* load obj->synchronization to RCX */
727 amd64_mov_reg_membase (buf, AMD64_RCX, AMD64_RDI, G_STRUCT_OFFSET (MonoObject, synchronisation), 8);
728 /* is synchronization null? */
729 amd64_test_reg_reg (buf, AMD64_RCX, AMD64_RCX);
730 /* if yes, jump to actual trampoline */
731 jump_sync_null = buf;
732 amd64_branch8 (buf, X86_CC_Z, -1, 1);
734 /* load MonoThread* into RDX */
735 buf = mono_amd64_emit_tls_get (buf, AMD64_RDX, mono_thread_get_tls_offset ());
736 /* load TID into RDX */
737 amd64_mov_reg_membase (buf, AMD64_RDX, AMD64_RDX, G_STRUCT_OFFSET (MonoThread, tid), 8);
739 /* is synchronization->owner null? */
740 amd64_alu_membase_imm_size (buf, X86_CMP, AMD64_RCX, owner_offset, 0, 8);
741 /* if not, jump to next case */
742 jump_tid = buf;
743 amd64_branch8 (buf, X86_CC_NZ, -1, 1);
745 /* if yes, try a compare-exchange with the TID */
746 /* zero RAX */
747 amd64_alu_reg_reg (buf, X86_XOR, AMD64_RAX, AMD64_RAX);
748 /* compare and exchange */
749 amd64_prefix (buf, X86_LOCK_PREFIX);
750 amd64_cmpxchg_membase_reg_size (buf, AMD64_RCX, owner_offset, AMD64_RDX, 8);
751 /* if not successful, jump to actual trampoline */
752 jump_cmpxchg_failed = buf;
753 amd64_branch8 (buf, X86_CC_NZ, -1, 1);
754 /* if successful, return */
755 amd64_ret (buf);
757 /* next case: synchronization->owner is not null */
758 x86_patch (jump_tid, buf);
759 /* is synchronization->owner == TID? */
760 amd64_alu_membase_reg_size (buf, X86_CMP, AMD64_RCX, owner_offset, AMD64_RDX, 8);
761 /* if not, jump to actual trampoline */
762 jump_other_owner = buf;
763 amd64_branch8 (buf, X86_CC_NZ, -1, 1);
764 /* if yes, increment nest */
765 amd64_inc_membase_size (buf, AMD64_RCX, nest_offset, 4);
766 /* return */
767 amd64_ret (buf);
769 x86_patch (jump_obj_null, buf);
770 x86_patch (jump_sync_null, buf);
771 x86_patch (jump_cmpxchg_failed, buf);
772 x86_patch (jump_other_owner, buf);
775 /* jump to the actual trampoline */
776 #if MONO_AMD64_ARG_REG1 != AMD64_RDI
777 amd64_mov_reg_reg (buf, MONO_AMD64_ARG_REG1, AMD64_RDI);
778 #endif
780 if (aot) {
781 *ji = mono_patch_info_list_prepend (*ji, buf - code, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_monitor_enter");
782 amd64_mov_reg_membase (buf, AMD64_R11, AMD64_RIP, 0, 8);
783 amd64_jump_reg (buf, AMD64_R11);
784 } else {
785 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_ENTER, mono_get_root_domain (), NULL);
787 /* jump to the actual trampoline */
788 amd64_jump_code (buf, tramp);
791 mono_arch_flush_icache (buf, buf - code);
792 g_assert (buf - code <= tramp_size);
794 *code_size = buf - code;
796 return code;
799 gpointer
800 mono_arch_create_monitor_exit_trampoline (void)
802 guint32 code_size;
803 MonoJumpInfo *ji;
805 return mono_arch_create_monitor_exit_trampoline_full (&code_size, &ji, FALSE);
808 gpointer
809 mono_arch_create_monitor_exit_trampoline_full (guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
811 guint8 *tramp;
812 guint8 *code, *buf;
813 guint8 *jump_obj_null, *jump_have_waiters;
814 guint8 *jump_next;
815 int tramp_size;
816 int owner_offset, nest_offset, entry_count_offset;
818 g_assert (MONO_ARCH_MONITOR_OBJECT_REG == AMD64_RDI);
820 mono_monitor_threads_sync_members_offset (&owner_offset, &nest_offset, &entry_count_offset);
821 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (owner_offset) == sizeof (gpointer));
822 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset) == sizeof (guint32));
823 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (entry_count_offset) == sizeof (gint32));
824 owner_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset);
825 nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
826 entry_count_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (entry_count_offset);
828 tramp_size = 94;
830 code = buf = mono_global_codeman_reserve (tramp_size);
832 if (mono_thread_get_tls_offset () != -1) {
833 /* MonoObject* obj is in RDI */
834 /* is obj null? */
835 amd64_test_reg_reg (buf, AMD64_RDI, AMD64_RDI);
836 /* if yes, jump to actual trampoline */
837 jump_obj_null = buf;
838 amd64_branch8 (buf, X86_CC_Z, -1, 1);
840 /* load obj->synchronization to RCX */
841 amd64_mov_reg_membase (buf, AMD64_RCX, AMD64_RDI, G_STRUCT_OFFSET (MonoObject, synchronisation), 8);
842 /* is synchronization null? */
843 amd64_test_reg_reg (buf, AMD64_RCX, AMD64_RCX);
844 /* if not, jump to next case */
845 jump_next = buf;
846 amd64_branch8 (buf, X86_CC_NZ, -1, 1);
847 /* if yes, just return */
848 amd64_ret (buf);
850 /* next case: synchronization is not null */
851 x86_patch (jump_next, buf);
852 /* load MonoThread* into RDX */
853 buf = mono_amd64_emit_tls_get (buf, AMD64_RDX, mono_thread_get_tls_offset ());
854 /* load TID into RDX */
855 amd64_mov_reg_membase (buf, AMD64_RDX, AMD64_RDX, G_STRUCT_OFFSET (MonoThread, tid), 8);
856 /* is synchronization->owner == TID */
857 amd64_alu_membase_reg_size (buf, X86_CMP, AMD64_RCX, owner_offset, AMD64_RDX, 8);
858 /* if yes, jump to next case */
859 jump_next = buf;
860 amd64_branch8 (buf, X86_CC_Z, -1, 1);
861 /* if not, just return */
862 amd64_ret (buf);
864 /* next case: synchronization->owner == TID */
865 x86_patch (jump_next, buf);
866 /* is synchronization->nest == 1 */
867 amd64_alu_membase_imm_size (buf, X86_CMP, AMD64_RCX, nest_offset, 1, 4);
868 /* if not, jump to next case */
869 jump_next = buf;
870 amd64_branch8 (buf, X86_CC_NZ, -1, 1);
871 /* if yes, is synchronization->entry_count zero? */
872 amd64_alu_membase_imm_size (buf, X86_CMP, AMD64_RCX, entry_count_offset, 0, 4);
873 /* if not, jump to actual trampoline */
874 jump_have_waiters = buf;
875 amd64_branch8 (buf, X86_CC_NZ, -1 , 1);
876 /* if yes, set synchronization->owner to null and return */
877 amd64_mov_membase_imm (buf, AMD64_RCX, owner_offset, 0, 8);
878 amd64_ret (buf);
880 /* next case: synchronization->nest is not 1 */
881 x86_patch (jump_next, buf);
882 /* decrease synchronization->nest and return */
883 amd64_dec_membase_size (buf, AMD64_RCX, nest_offset, 4);
884 amd64_ret (buf);
886 x86_patch (jump_obj_null, buf);
887 x86_patch (jump_have_waiters, buf);
890 /* jump to the actual trampoline */
891 #if MONO_AMD64_ARG_REG1 != AMD64_RDI
892 amd64_mov_reg_reg (buf, MONO_AMD64_ARG_REG1, AMD64_RDI);
893 #endif
895 if (aot) {
896 *ji = mono_patch_info_list_prepend (*ji, buf - code, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_monitor_exit");
897 amd64_mov_reg_membase (buf, AMD64_R11, AMD64_RIP, 0, 8);
898 amd64_jump_reg (buf, AMD64_R11);
899 } else {
900 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_EXIT, mono_get_root_domain (), NULL);
901 amd64_jump_code (buf, tramp);
904 mono_arch_flush_icache (buf, buf - code);
905 g_assert (buf - code <= tramp_size);
907 *code_size = buf - code;
909 return code;
911 #endif
913 void
914 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
916 /* FIXME: This is not thread safe */
917 guint8 *code = ji->code_start;
919 amd64_mov_reg_imm (code, AMD64_ARG_REG1, func_arg);
920 amd64_mov_reg_imm (code, AMD64_R11, func);
922 x86_push_imm (code, (guint64)func_arg);
923 amd64_call_reg (code, AMD64_R11);