* mini-s390[x].c (is_regsize_var): Support PTR/FNPTR too.
[mono.git] / mono / mini / tramp-amd64.c
blobb5514e4934d513abe557286cf627387af055ab42
1 /*
2 * tramp-x86.c: JIT trampoline code for x86
4 * Authors:
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
8 */
10 #include <config.h>
11 #include <glib.h>
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/marshal.h>
15 #include <mono/metadata/tabledefs.h>
16 #include <mono/metadata/mono-debug-debugger.h>
17 #include <mono/arch/amd64/amd64-codegen.h>
19 #ifdef HAVE_VALGRIND_MEMCHECK_H
20 #include <valgrind/memcheck.h>
21 #endif
23 #include "mini.h"
24 #include "mini-amd64.h"
26 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
28 static guint8* nullified_class_init_trampoline;
31 * mono_arch_get_unbox_trampoline:
32 * @m: method pointer
33 * @addr: pointer to native code for @m
35 * when value type methods are called through the vtable we need to unbox the
36 * this argument. This method returns a pointer to a trampoline which does
37 * unboxing before calling the method
39 gpointer
40 mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
42 guint8 *code, *start;
43 int this_reg = AMD64_RDI;
44 MonoDomain *domain = mono_domain_get ();
46 if (!mono_method_signature (m)->ret->byref && MONO_TYPE_ISSTRUCT (mono_method_signature (m)->ret))
47 this_reg = AMD64_RSI;
49 mono_domain_lock (domain);
50 start = code = mono_code_manager_reserve (domain->code_mp, 20);
51 mono_domain_unlock (domain);
53 amd64_alu_reg_imm (code, X86_ADD, this_reg, sizeof (MonoObject));
54 /* FIXME: Optimize this */
55 amd64_mov_reg_imm (code, AMD64_RAX, addr);
56 amd64_jump_reg (code, AMD64_RAX);
57 g_assert ((code - start) < 20);
59 mono_arch_flush_icache (start, code - start);
61 return start;
64 void
65 mono_arch_patch_callsite (guint8 *code, guint8 *addr)
67 if (((code [-13] == 0x49) && (code [-12] == 0xbb)) || (code [-5] == 0xe8)) {
68 if (code [-5] != 0xe8)
69 InterlockedExchangePointer ((gpointer*)(code - 11), addr);
70 else {
71 g_assert ((((guint64)(addr)) >> 32) == 0);
72 g_assert ((((guint64)(code)) >> 32) == 0);
73 InterlockedExchange ((gint32*)(code - 4), ((gint64)addr - (gint64)code));
76 else if ((code [-7] == 0x41) && (code [-6] == 0xff) && (code [-5] == 0x15)) {
77 /* call *<OFFSET>(%rip) */
78 gpointer *got_entry = (gpointer*)((guint8*)code + (*(guint32*)(code - 4)));
79 InterlockedExchangePointer (got_entry, addr);
83 void
84 mono_arch_patch_plt_entry (guint8 *code, guint8 *addr)
86 gint32 disp;
87 gpointer *plt_jump_table_entry;
89 /* A PLT entry: jmp *<DISP>(%rip) */
90 g_assert (code [0] == 0xff);
91 g_assert (code [1] == 0x25);
93 disp = *(gint32*)(code + 2);
95 plt_jump_table_entry = (gpointer*)(code + 6 + disp);
97 InterlockedExchangePointer (plt_jump_table_entry, addr);
100 void
101 mono_arch_nullify_class_init_trampoline (guint8 *code, gssize *regs)
103 code -= 3;
106 * A given byte sequence can match more than case here, so we have to be
107 * really careful about the ordering of the cases. Longer sequences
108 * come first.
110 if ((code [-4] == 0x41) && (code [-3] == 0xff) && (code [-2] == 0x15)) {
111 gpointer *vtable_slot;
113 /* call *<OFFSET>(%rip) */
114 vtable_slot = mono_arch_get_vcall_slot_addr (code + 3, (gpointer*)regs);
115 g_assert (vtable_slot);
117 *vtable_slot = nullified_class_init_trampoline;
118 } else if (code [-2] == 0xe8) {
119 /* call <TARGET> */
120 guint8 *buf = code - 2;
122 buf [0] = 0x66;
123 buf [1] = 0x66;
124 buf [2] = 0x90;
125 buf [3] = 0x66;
126 buf [4] = 0x90;
127 } else if ((code [0] == 0x49) && (code [1] == 0xff)) {
128 /* call <REG> */
129 /* amd64_set_reg_template is 10 bytes long */
130 guint8* buf = code - 10;
132 /* FIXME: Make this thread safe */
133 /* Padding code suggested by the AMD64 Opt Manual */
134 buf [0] = 0x66;
135 buf [1] = 0x66;
136 buf [2] = 0x66;
137 buf [3] = 0x90;
138 buf [4] = 0x66;
139 buf [5] = 0x66;
140 buf [6] = 0x66;
141 buf [7] = 0x90;
142 buf [8] = 0x66;
143 buf [9] = 0x66;
144 buf [10] = 0x90;
145 buf [11] = 0x66;
146 buf [12] = 0x90;
147 } else if (code [0] == 0x90 || code [0] == 0xeb || code [0] == 0x66) {
148 /* Already changed by another thread */
150 } else {
151 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", code [0], code [1], code [2], code [3],
152 code [4], code [5], code [6]);
153 g_assert_not_reached ();
157 void
158 mono_arch_nullify_plt_entry (guint8 *code)
160 mono_arch_patch_plt_entry (code, nullified_class_init_trampoline);
163 void
164 mono_arch_patch_delegate_trampoline (guint8 *code, guint8 *tramp, gssize *regs, guint8 *addr)
166 guint8 rex = 0;
167 guint32 reg;
168 guint32 disp;
170 if ((code [-3] == 0xff) && (amd64_modrm_reg (code [-2]) == 0x2) && (amd64_modrm_mod (code [-2]) == 0x1)) {
171 /* call *[reg+disp8] */
172 if (IS_REX (code [-4]))
173 rex = code [-4];
174 reg = amd64_modrm_rm (code [-2]);
175 disp = *(guint8*)(code - 1);
176 //printf ("B: [%%r%d+0x%x]\n", reg, disp);
178 else {
179 int i;
181 for (i = -16; i < 0; ++i)
182 printf ("%d ", code [i]);
183 printf ("\n");
184 g_assert_not_reached ();
187 reg += amd64_rex_b (rex);
189 /* R11 is clobbered by the trampoline code */
190 g_assert (reg != AMD64_R11);
192 *(gpointer*)(((guint64)(regs [reg])) + disp) = addr;
195 guchar*
196 mono_arch_create_trampoline_code (MonoTrampolineType tramp_type)
198 guint8 *buf, *code, *tramp;
199 int i, lmf_offset, offset, method_offset, tramp_offset, saved_regs_offset, saved_fpregs_offset, framesize;
200 gboolean has_caller;
202 if (tramp_type == MONO_TRAMPOLINE_JUMP)
203 has_caller = FALSE;
204 else
205 has_caller = TRUE;
207 code = buf = mono_global_codeman_reserve (512);
209 framesize = 512 + sizeof (MonoLMF);
210 framesize = (framesize + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
212 offset = 0;
215 * Allocate a new stack frame and transfer the two arguments received on
216 * the stack to our frame.
218 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
219 amd64_pop_reg (code, AMD64_R11);
221 amd64_push_reg (code, AMD64_RBP);
222 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
223 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
226 * The method is at offset -8 from the new RBP, so no need to
227 * copy it.
229 offset += 8;
230 method_offset = - offset;
232 offset += 8;
233 tramp_offset = - offset;
234 amd64_mov_membase_reg (code, AMD64_RBP, tramp_offset, AMD64_R11, 8);
236 /* Save all registers */
238 offset += AMD64_NREG * 8;
239 saved_regs_offset = - offset;
240 for (i = 0; i < AMD64_NREG; ++i)
241 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * 8), i, 8);
242 offset += 8 * 8;
243 saved_fpregs_offset = - offset;
244 for (i = 0; i < 8; ++i)
245 amd64_movsd_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * 8), i);
247 /* Save LMF begin */
249 offset += sizeof (MonoLMF);
250 lmf_offset = - offset;
252 /* Save ip */
253 if (has_caller)
254 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 8, 8);
255 else
256 amd64_mov_reg_imm (code, AMD64_R11, 0);
257 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, 8);
258 /* Save fp */
259 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, framesize, 8);
260 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp), AMD64_R11, 8);
261 /* Save sp */
262 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
263 amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, framesize + 16);
264 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_R11, 8);
265 /* Save method */
266 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, method_offset, 8);
267 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), AMD64_R11, 8);
268 /* Save callee saved regs */
269 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), AMD64_RBX, 8);
270 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), AMD64_R12, 8);
271 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), AMD64_R13, 8);
272 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), AMD64_R14, 8);
273 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), AMD64_R15, 8);
275 amd64_mov_reg_imm (code, AMD64_R11, mono_get_lmf_addr);
276 amd64_call_reg (code, AMD64_R11);
278 /* Save lmf_addr */
279 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, 8);
280 /* Save previous_lmf */
281 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, 8);
282 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, 8);
283 /* Set new lmf */
284 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, lmf_offset);
285 amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, 8);
287 /* Save LMF end */
289 /* Arg1 is the pointer to the saved registers */
290 amd64_lea_membase (code, AMD64_RDI, AMD64_RBP, saved_regs_offset);
292 /* Arg2 is the address of the calling code */
293 if (has_caller)
294 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RBP, 8, 8);
295 else
296 amd64_mov_reg_imm (code, AMD64_RSI, 0);
298 /* Arg3 is the method/vtable ptr */
299 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_RBP, method_offset, 8);
301 /* Arg4 is the trampoline address */
302 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, tramp_offset, 8);
304 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT)
305 tramp = (guint8*)mono_class_init_trampoline;
306 else if (tramp_type == MONO_TRAMPOLINE_AOT)
307 tramp = (guint8*)mono_aot_trampoline;
308 else if (tramp_type == MONO_TRAMPOLINE_AOT_PLT)
309 tramp = (guint8*)mono_aot_plt_trampoline;
310 else if (tramp_type == MONO_TRAMPOLINE_DELEGATE)
311 tramp = (guint8*)mono_delegate_trampoline;
312 else
313 tramp = (guint8*)mono_magic_trampoline;
315 amd64_mov_reg_imm (code, AMD64_RAX, tramp);
316 amd64_call_reg (code, AMD64_RAX);
318 /* Restore LMF */
320 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 8);
321 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 8);
322 amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, 8);
324 /* Restore argument registers */
325 for (i = 0; i < AMD64_NREG; ++i)
326 if (AMD64_IS_ARGUMENT_REG (i))
327 amd64_mov_reg_membase (code, i, AMD64_RBP, saved_regs_offset + (i * 8), 8);
329 for (i = 0; i < 8; ++i)
330 amd64_movsd_reg_membase (code, i, AMD64_RBP, saved_fpregs_offset + (i * 8));
332 /* Restore stack */
333 amd64_leave (code);
335 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT)
336 amd64_ret (code);
337 else
338 /* call the compiled method */
339 amd64_jump_reg (code, X86_EAX);
341 g_assert ((code - buf) <= 512);
343 mono_arch_flush_icache (buf, code - buf);
345 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) {
346 /* Initialize the nullified class init trampoline used in the AOT case */
347 nullified_class_init_trampoline = code = mono_global_codeman_reserve (16);
348 x86_ret (code);
351 return buf;
354 #define TRAMPOLINE_SIZE 34
356 gpointer
357 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
359 guint8 *code, *buf, *tramp, *real_code;
360 int size, jump_offset;
362 tramp = mono_get_trampoline_code (tramp_type);
364 code = buf = g_alloca (TRAMPOLINE_SIZE);
366 /* push trampoline address */
367 amd64_lea_membase (code, AMD64_R11, AMD64_RIP, -7);
368 amd64_push_reg (code, AMD64_R11);
370 /* push argument */
371 if (amd64_is_imm32 ((gint64)arg1))
372 amd64_push_imm (code, (gint64)arg1);
373 else {
374 amd64_mov_reg_imm (code, AMD64_R11, arg1);
375 amd64_push_reg (code, AMD64_R11);
378 jump_offset = code - buf;
379 amd64_jump_disp (code, 0xffffffff);
381 g_assert ((code - buf) <= TRAMPOLINE_SIZE);
383 mono_domain_lock (domain);
384 real_code = mono_code_manager_reserve (domain->code_mp, code - buf);
385 size = code - buf;
386 mono_domain_unlock (domain);
388 memcpy (real_code, buf, size);
390 /* Fix up jump */
391 code = (guint8*)real_code + jump_offset;
392 g_assert (amd64_is_imm32 (((gint64)tramp - (gint64)code)));
393 amd64_jump_disp (code, tramp - code);
395 if (code_len)
396 *code_len = size;
398 mono_arch_flush_icache (real_code, size);
400 return real_code;
403 void
404 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
406 /* FIXME: This is not thread safe */
407 guint8 *code = ji->code_start;
409 amd64_mov_reg_imm (code, AMD64_RDI, func_arg);
410 amd64_mov_reg_imm (code, AMD64_R11, func);
412 x86_push_imm (code, (guint64)func_arg);
413 amd64_call_reg (code, AMD64_R11);
417 * This method is only called when running in the Mono Debugger.
419 guint8 *
420 mono_debugger_create_notification_function (MonoCodeManager *codeman)
422 guint8 *buf, *code;
424 code = buf = mono_code_manager_reserve (codeman, 2);
425 x86_breakpoint (buf);
426 x86_ret (buf);
427 return code;