[2019-12] [jit] Avoid passing a vtable argument to DIM methods when making calls...
[mono-project.git] / mono / mini / tramp-x86.c
blob196a3bf9ad818fd208a0a9306a53e1c9d8437310
1 /**
2 * \file
3 * JIT trampoline code for x86
5 * Authors:
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2001 Ximian, Inc.
9 */
11 #include <config.h>
12 #include <glib.h>
14 #include <mono/metadata/abi-details.h>
15 #include <mono/metadata/appdomain.h>
16 #include <mono/metadata/metadata-internals.h>
17 #include <mono/metadata/marshal.h>
18 #include <mono/metadata/tabledefs.h>
19 #include <mono/metadata/profiler-private.h>
20 #include <mono/metadata/gc-internals.h>
21 #include <mono/arch/x86/x86-codegen.h>
23 #include <mono/utils/memcheck.h>
25 #include "mini.h"
26 #include "mini-x86.h"
27 #include "mini-runtime.h"
28 #include "debugger-agent.h"
29 #include "jit-icalls.h"
30 #include "mono/utils/mono-tls-inline.h"
33 * mono_arch_get_unbox_trampoline:
34 * @m: method pointer
35 * @addr: pointer to native code for @m
37 * when value type methods are called through the vtable we need to unbox the
38 * this argument. This method returns a pointer to a trampoline which does
39 * unboxing before calling the method
41 gpointer
42 mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
44 guint8 *code, *start;
45 int this_pos = 4, size = 16;
46 MonoDomain *domain = mono_domain_get ();
47 GSList *unwind_ops;
49 start = code = mono_domain_code_reserve (domain, size);
51 unwind_ops = mono_arch_get_cie_program ();
53 x86_alu_membase_imm (code, X86_ADD, X86_ESP, this_pos, MONO_ABI_SIZEOF (MonoObject));
54 x86_jump_code (code, addr);
55 g_assertf ((code - start) <= size, "%d %d", (int)(code - start), size);
57 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE, m));
59 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
61 return start;
64 gpointer
65 mono_arch_get_static_rgctx_trampoline (gpointer arg, gpointer addr)
67 guint8 *code, *start;
68 GSList *unwind_ops;
70 MonoDomain *domain = mono_domain_get ();
72 const int buf_len = 10;
74 start = code = mono_domain_code_reserve (domain, buf_len);
76 unwind_ops = mono_arch_get_cie_program ();
78 x86_mov_reg_imm (code, MONO_ARCH_RGCTX_REG, (gsize)arg);
79 x86_jump_code (code, addr);
80 g_assertf ((code - start) <= buf_len, "%d %d", (int)(code - start), buf_len);
82 mono_arch_flush_icache (start, code - start);
83 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
85 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
87 return start;
90 void
91 mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
93 guint8 *code;
94 guint8 buf [8];
96 // Since method_start is retrieved from function return address (below current call/jmp to patch) there is a case when
97 // last instruction of a function is the call (due to OP_NOT_REACHED) instruction and then directly followed by a
98 // different method. In that case current orig_code points into next method and method_start will also point into
99 // next method, not the method including the call to patch. For this specific case, fallback to using a method_start of NULL.
100 gboolean can_write = mono_breakpoint_clean_code (method_start != orig_code ? method_start : NULL, orig_code, 8, buf, sizeof (buf));
102 code = buf + 8;
104 /* go to the start of the call instruction
106 * address_byte = (m << 6) | (o << 3) | reg
107 * call opcode: 0xff address_byte displacement
108 * 0xff m=1,o=2 imm8
109 * 0xff m=2,o=2 imm32
111 code -= 6;
112 orig_code -= 6;
113 if (code [1] == 0xe8) {
114 if (can_write) {
115 mono_atomic_xchg_i32 ((gint32*)(orig_code + 2), (gsize)addr - ((gsize)orig_code + 1) - 5);
117 /* Tell valgrind to recompile the patched code */
118 VALGRIND_DISCARD_TRANSLATIONS (orig_code + 2, 4);
120 } else if (code [1] == 0xe9) {
121 /* A PLT entry: jmp <DISP> */
122 if (can_write)
123 mono_atomic_xchg_i32 ((gint32*)(orig_code + 2), (gsize)addr - ((gsize)orig_code + 1) - 5);
124 } else {
125 printf ("Invalid trampoline sequence: %x %x %x %x %x %x n", code [0], code [1], code [2], code [3],
126 code [4], code [5]);
128 g_assert_not_reached ();
132 void
133 mono_arch_patch_plt_entry (guint8 *code, gpointer *got, host_mgreg_t *regs, guint8 *addr)
135 guint32 offset;
137 /* Patch the jump table entry used by the plt entry */
139 /* A PLT entry: jmp *<DISP>(%ebx) */
140 g_assert (code [0] == 0xff);
141 g_assert (code [1] == 0xa3);
143 offset = *(guint32*)(code + 2);
144 if (!got)
145 got = (gpointer*)(gsize) regs [MONO_ARCH_GOT_REG];
146 *(guint8**)((guint8*)got + offset) = addr;
149 guchar*
150 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
152 const char *tramp_name;
153 guint8 *buf, *code, *tramp, *br_ex_check;
154 GSList *unwind_ops = NULL;
155 MonoJumpInfo *ji = NULL;
156 int i, offset, frame_size, regarray_offset, lmf_offset, caller_ip_offset, arg_offset;
157 int cfa_offset; /* cfa = cfa_reg + cfa_offset */
159 const int buf_len = 256;
161 code = buf = mono_global_codeman_reserve (buf_len);
163 /* Note that there is a single argument to the trampoline
164 * and it is stored at: esp + pushed_args * sizeof (target_mgreg_t)
165 * the ret address is at: esp + (pushed_args + 1) * sizeof (target_mgreg_t)
168 /* Compute frame offsets relative to the frame pointer %ebp */
169 arg_offset = sizeof (target_mgreg_t);
170 caller_ip_offset = 2 * sizeof (target_mgreg_t);
171 offset = 0;
172 offset += sizeof (MonoLMF);
173 lmf_offset = -offset;
174 offset += X86_NREG * sizeof (target_mgreg_t);
175 regarray_offset = -offset;
176 /* Argument area */
177 offset += 4 * sizeof (target_mgreg_t);
178 frame_size = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
180 /* ret addr and arg are on the stack */
181 cfa_offset = 2 * sizeof (target_mgreg_t);
182 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, cfa_offset);
183 // IP saved at CFA - 4
184 mono_add_unwind_op_offset (unwind_ops, code, buf, X86_NREG, -4);
186 /* Allocate frame */
187 x86_push_reg (code, X86_EBP);
188 cfa_offset += sizeof (target_mgreg_t);
189 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
190 mono_add_unwind_op_offset (unwind_ops, code, buf, X86_EBP, -cfa_offset);
192 x86_mov_reg_reg (code, X86_EBP, X86_ESP);
193 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, X86_EBP);
195 /* There are three words on the stack, adding + 4 aligns the stack to 16, which is needed on osx */
196 x86_alu_reg_imm (code, X86_SUB, X86_ESP, frame_size + sizeof (target_mgreg_t));
198 /* Save all registers */
199 for (i = X86_EAX; i <= X86_EDI; ++i) {
200 int reg = i;
202 if (i == X86_EBP) {
203 /* Save original ebp */
204 /* EAX is already saved */
205 x86_mov_reg_membase (code, X86_EAX, X86_EBP, 0, sizeof (target_mgreg_t));
206 reg = X86_EAX;
207 } else if (i == X86_ESP) {
208 /* Save original esp */
209 /* EAX is already saved */
210 x86_mov_reg_reg (code, X86_EAX, X86_EBP);
211 /* Saved ebp + trampoline arg + return addr */
212 x86_alu_reg_imm (code, X86_ADD, X86_EAX, 3 * sizeof (target_mgreg_t));
213 reg = X86_EAX;
215 x86_mov_membase_reg (code, X86_EBP, regarray_offset + (i * sizeof (target_mgreg_t)), reg, sizeof (target_mgreg_t));
218 /* Setup LMF */
219 /* eip */
220 if (tramp_type == MONO_TRAMPOLINE_JUMP) {
221 x86_mov_membase_imm (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, eip), 0, sizeof (target_mgreg_t));
222 } else {
223 x86_mov_reg_membase (code, X86_EAX, X86_EBP, caller_ip_offset, sizeof (target_mgreg_t));
224 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, eip), X86_EAX, sizeof (target_mgreg_t));
226 /* method */
227 if ((tramp_type == MONO_TRAMPOLINE_JIT) || (tramp_type == MONO_TRAMPOLINE_JUMP)) {
228 x86_mov_reg_membase (code, X86_EAX, X86_EBP, arg_offset, sizeof (target_mgreg_t));
229 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), X86_EAX, sizeof (target_mgreg_t));
230 } else {
231 x86_mov_membase_imm (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), 0, sizeof (target_mgreg_t));
233 /* esp */
234 x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_ESP * sizeof (target_mgreg_t)), sizeof (target_mgreg_t));
235 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, esp), X86_EAX, sizeof (target_mgreg_t));
236 /* callee save registers */
237 x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_EBX * sizeof (target_mgreg_t)), sizeof (target_mgreg_t));
238 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebx), X86_EAX, sizeof (target_mgreg_t));
239 x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_EDI * sizeof (target_mgreg_t)), sizeof (target_mgreg_t));
240 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, edi), X86_EAX, sizeof (target_mgreg_t));
241 x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_ESI * sizeof (target_mgreg_t)), sizeof (target_mgreg_t));
242 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, esi), X86_EAX, sizeof (target_mgreg_t));
243 x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_EBP * sizeof (target_mgreg_t)), sizeof (target_mgreg_t));
244 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp), X86_EAX, sizeof (target_mgreg_t));
246 /* Push LMF */
247 /* get the address of lmf for the current thread */
248 if (aot) {
249 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_get_lmf_addr));
250 x86_call_reg (code, X86_EAX);
251 } else {
252 x86_call_code (code, mono_get_lmf_addr);
254 /* lmf->lmf_addr = lmf_addr (%eax) */
255 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), X86_EAX, sizeof (target_mgreg_t));
256 /* lmf->previous_lmf = *(lmf_addr) */
257 x86_mov_reg_membase (code, X86_ECX, X86_EAX, 0, sizeof (target_mgreg_t));
258 /* Signal to mono_arch_unwind_frame () that this is a trampoline frame */
259 x86_alu_reg_imm (code, X86_ADD, X86_ECX, 1);
260 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), X86_ECX, sizeof (target_mgreg_t));
261 /* *lmf_addr = lmf */
262 x86_lea_membase (code, X86_ECX, X86_EBP, lmf_offset);
263 x86_mov_membase_reg (code, X86_EAX, 0, X86_ECX, sizeof (target_mgreg_t));
265 /* Call trampoline function */
266 /* Arg 1 - registers */
267 x86_lea_membase (code, X86_EAX, X86_EBP, regarray_offset);
268 x86_mov_membase_reg (code, X86_ESP, (0 * sizeof (target_mgreg_t)), X86_EAX, sizeof (target_mgreg_t));
269 /* Arg2 - calling code */
270 if (tramp_type == MONO_TRAMPOLINE_JUMP) {
271 x86_mov_membase_imm (code, X86_ESP, (1 * sizeof (target_mgreg_t)), 0, sizeof (target_mgreg_t));
272 } else {
273 x86_mov_reg_membase (code, X86_EAX, X86_EBP, caller_ip_offset, sizeof (target_mgreg_t));
274 x86_mov_membase_reg (code, X86_ESP, (1 * sizeof (target_mgreg_t)), X86_EAX, sizeof (target_mgreg_t));
276 /* Arg3 - trampoline argument */
277 x86_mov_reg_membase (code, X86_EAX, X86_EBP, arg_offset, sizeof (target_mgreg_t));
278 x86_mov_membase_reg (code, X86_ESP, (2 * sizeof (target_mgreg_t)), X86_EAX, sizeof (target_mgreg_t));
279 /* Arg4 - trampoline address */
280 // FIXME:
281 x86_mov_membase_imm (code, X86_ESP, (3 * sizeof (target_mgreg_t)), 0, sizeof (target_mgreg_t));
283 #ifdef __APPLE__
284 /* check the stack is aligned after the ret ip is pushed */
286 x86_mov_reg_reg (code, X86_EDX, X86_ESP);
287 x86_alu_reg_imm (code, X86_AND, X86_EDX, 15);
288 x86_alu_reg_imm (code, X86_CMP, X86_EDX, 0);
289 x86_branch_disp (code, X86_CC_Z, 3, FALSE);
290 x86_breakpoint (code);
292 #endif
294 if (aot) {
295 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, GINT_TO_POINTER (mono_trampoline_type_to_jit_icall_id (tramp_type)));
296 x86_call_reg (code, X86_EAX);
297 } else {
298 tramp = (guint8*)mono_get_trampoline_func (tramp_type);
299 x86_call_code (code, tramp);
303 * Overwrite the trampoline argument with the address we need to jump to,
304 * to free %eax.
306 x86_mov_membase_reg (code, X86_EBP, arg_offset, X86_EAX, 4);
308 /* Restore LMF */
309 x86_mov_reg_membase (code, X86_EAX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sizeof (target_mgreg_t));
310 x86_mov_reg_membase (code, X86_ECX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof (target_mgreg_t));
311 x86_alu_reg_imm (code, X86_SUB, X86_ECX, 1);
312 x86_mov_membase_reg (code, X86_EAX, 0, X86_ECX, sizeof (target_mgreg_t));
314 /* Check for interruptions */
315 if (aot) {
316 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_thread_force_interruption_checkpoint_noraise));
317 x86_call_reg (code, X86_EAX);
318 } else {
319 x86_call_code (code, (guint8*)mono_thread_force_interruption_checkpoint_noraise);
322 x86_test_reg_reg (code, X86_EAX, X86_EAX);
323 br_ex_check = code;
324 x86_branch8 (code, X86_CC_Z, -1, 1);
327 * Exception case:
328 * We have an exception we want to throw in the caller's frame, so pop
329 * the trampoline frame and throw from the caller.
331 x86_leave (code);
333 * The exception is in eax.
334 * We are calling the throw trampoline used by OP_THROW, so we have to setup the
335 * stack to look the same.
336 * The stack contains the ret addr, and the trampoline argument, the throw trampoline
337 * expects it to contain the ret addr and the exception. It also needs to be aligned
338 * after the exception is pushed.
340 /* Align stack */
341 x86_push_reg (code, X86_EAX);
342 /* Push the exception */
343 x86_push_reg (code, X86_EAX);
344 //x86_breakpoint (code);
345 /* Push the original return value */
346 x86_push_membase (code, X86_ESP, 3 * 4);
348 * EH is initialized after trampolines, so get the address of the variable
349 * which contains throw_exception, and load it from there.
351 if (aot) {
352 /* Not really a jit icall */
353 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_rethrow_preserve_exception));
354 } else {
355 x86_mov_reg_imm (code, X86_ECX, (gsize)(guint8*)mono_get_rethrow_preserve_exception_addr ());
357 x86_mov_reg_membase (code, X86_ECX, X86_ECX, 0, sizeof (target_mgreg_t));
358 x86_jump_reg (code, X86_ECX);
360 /* Normal case */
361 mono_x86_patch (br_ex_check, code);
363 /* Restore registers */
364 for (i = X86_EAX; i <= X86_EDI; ++i) {
365 if (i == X86_ESP || i == X86_EBP)
366 continue;
367 if (i == X86_EAX && tramp_type != MONO_TRAMPOLINE_AOT_PLT)
368 continue;
369 x86_mov_reg_membase (code, i, X86_EBP, regarray_offset + (i * 4), 4);
372 /* Restore frame */
373 x86_leave (code);
374 cfa_offset -= sizeof (target_mgreg_t);
375 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, cfa_offset);
376 mono_add_unwind_op_same_value (unwind_ops, code, buf, X86_EBP);
378 if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
379 /* Load the value returned by the trampoline */
380 x86_mov_reg_membase (code, X86_EAX, X86_ESP, 0, 4);
381 /* The trampoline returns normally, pop the trampoline argument */
382 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
383 cfa_offset -= sizeof (target_mgreg_t);
384 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
385 x86_ret (code);
386 } else {
387 x86_ret (code);
390 g_assertf ((code - buf) <= buf_len, "%d %d", (int)(code - buf), buf_len);
391 MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL));
393 tramp_name = mono_get_generic_trampoline_name (tramp_type);
394 *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
396 return buf;
399 #define TRAMPOLINE_SIZE 10
401 gpointer
402 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
404 guint8 *code, *buf, *tramp;
406 tramp = mono_get_trampoline_code (tramp_type);
408 const int size = TRAMPOLINE_SIZE;
410 code = buf = mono_domain_code_reserve_align (domain, size, 4);
412 x86_push_imm (buf, (gsize)arg1);
413 x86_jump_code (buf, tramp);
414 g_assertf ((code - buf) <= size, "%d %d", (int)(code - buf), size);
416 mono_arch_flush_icache (code, buf - code);
417 MONO_PROFILER_RAISE (jit_code_buffer, (code, buf - code, MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE, mono_get_generic_trampoline_simple_name (tramp_type)));
419 if (code_len)
420 *code_len = buf - code;
422 return code;
425 gpointer
426 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
428 guint8 *tramp;
429 guint8 *code, *buf;
430 guint8 **rgctx_null_jumps;
431 int tramp_size;
432 int depth, index;
433 int i;
434 gboolean mrgctx;
435 MonoJumpInfo *ji = NULL;
436 GSList *unwind_ops = NULL;
438 unwind_ops = mono_arch_get_cie_program ();
440 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
441 index = MONO_RGCTX_SLOT_INDEX (slot);
442 if (mrgctx)
443 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (target_mgreg_t);
444 for (depth = 0; ; ++depth) {
445 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
447 if (index < size - 1)
448 break;
449 index -= size - 1;
452 tramp_size = (aot ? 64 : 36) + 6 * depth;
454 code = buf = mono_global_codeman_reserve (tramp_size);
456 rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
458 /* load vtable/mrgctx ptr */
459 x86_mov_reg_membase (code, X86_EAX, X86_ESP, 4, 4);
460 if (!mrgctx) {
461 /* load rgctx ptr from vtable */
462 x86_mov_reg_membase (code, X86_EAX, X86_EAX, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context), 4);
463 /* is the rgctx ptr null? */
464 x86_test_reg_reg (code, X86_EAX, X86_EAX);
465 /* if yes, jump to actual trampoline */
466 rgctx_null_jumps [0] = code;
467 x86_branch8 (code, X86_CC_Z, -1, 1);
470 for (i = 0; i < depth; ++i) {
471 /* load ptr to next array */
472 if (mrgctx && i == 0)
473 x86_mov_reg_membase (code, X86_EAX, X86_EAX, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT, 4);
474 else
475 x86_mov_reg_membase (code, X86_EAX, X86_EAX, 0, 4);
476 /* is the ptr null? */
477 x86_test_reg_reg (code, X86_EAX, X86_EAX);
478 /* if yes, jump to actual trampoline */
479 rgctx_null_jumps [i + 1] = code;
480 x86_branch8 (code, X86_CC_Z, -1, 1);
483 /* fetch slot */
484 x86_mov_reg_membase (code, X86_EAX, X86_EAX, sizeof (target_mgreg_t) * (index + 1), 4);
485 /* is the slot null? */
486 x86_test_reg_reg (code, X86_EAX, X86_EAX);
487 /* if yes, jump to actual trampoline */
488 rgctx_null_jumps [depth + 1] = code;
489 x86_branch8 (code, X86_CC_Z, -1, 1);
490 /* otherwise return */
491 x86_ret (code);
493 for (i = mrgctx ? 1 : 0; i <= depth + 1; ++i)
494 x86_patch (rgctx_null_jumps [i], code);
496 g_free (rgctx_null_jumps);
498 x86_mov_reg_membase (code, MONO_ARCH_VTABLE_REG, X86_ESP, 4, 4);
500 if (aot) {
501 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR, GUINT_TO_POINTER (slot));
502 x86_jump_reg (code, X86_EAX);
503 } else {
504 tramp = (guint8*)mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);
506 /* jump to the actual trampoline */
507 x86_jump_code (code, tramp);
510 mono_arch_flush_icache (buf, code - buf);
511 MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
513 g_assertf (code - buf <= tramp_size, "%d %d", (int)(code - buf), tramp_size);
515 char *name = mono_get_rgctx_fetch_trampoline_name (slot);
516 *info = mono_tramp_info_create (name, buf, code - buf, ji, unwind_ops);
517 g_free (name);
519 return buf;
523 * mono_arch_create_general_rgctx_lazy_fetch_trampoline:
525 * This is a general variant of the rgctx fetch trampolines. It receives a pointer to gpointer[2] in the rgctx reg. The first entry contains the slot, the second
526 * the trampoline to call if the slot is not filled.
528 gpointer
529 mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo **info, gboolean aot)
531 guint8 *code, *buf;
532 int tramp_size;
533 MonoJumpInfo *ji = NULL;
534 GSList *unwind_ops = NULL;
536 g_assert (aot);
538 unwind_ops = mono_arch_get_cie_program ();
540 tramp_size = 64;
542 code = buf = mono_global_codeman_reserve (tramp_size);
544 // FIXME: Currently, we always go to the slow path.
546 /* Load trampoline addr */
547 x86_mov_reg_membase (code, X86_EAX, MONO_ARCH_RGCTX_REG, 4, 4);
548 /* Load mrgctx/vtable */
549 x86_mov_reg_membase (code, MONO_ARCH_VTABLE_REG, X86_ESP, 4, 4);
551 x86_jump_reg (code, X86_EAX);
553 mono_arch_flush_icache (buf, code - buf);
554 MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
556 g_assertf (code - buf <= tramp_size, "%d %d", (int)(code - buf), tramp_size);
558 *info = mono_tramp_info_create ("rgctx_fetch_trampoline_general", buf, code - buf, ji, unwind_ops);
560 return buf;
563 void
564 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
566 /* FIXME: This is not thread safe */
567 guint8 *code = (guint8*)ji->code_start;
569 x86_push_imm (code, (gsize)func_arg);
570 x86_call_code (code, (guint8*)func);
573 guint8*
574 mono_arch_get_call_target (guint8 *code)
576 if (code [-5] == 0xe8) {
577 gint32 disp = *(gint32*)(code - 4);
578 guint8 *target = code + disp;
580 return target;
581 } else {
582 return NULL;
586 guint32
587 mono_arch_get_plt_info_offset (guint8 *plt_entry, host_mgreg_t *regs, guint8 *code)
589 return *(guint32*)(plt_entry + 6);
593 * mono_arch_get_gsharedvt_arg_trampoline:
595 * Return a trampoline which passes ARG to the gsharedvt in/out trampoline ADDR.
597 gpointer
598 mono_arch_get_gsharedvt_arg_trampoline (MonoDomain *domain, gpointer arg, gpointer addr)
600 guint8 *code, *start;
601 GSList *unwind_ops;
603 const int buf_len = 10;
605 start = code = mono_domain_code_reserve (domain, buf_len);
607 unwind_ops = mono_arch_get_cie_program ();
609 x86_mov_reg_imm (code, X86_EAX, (gsize)arg);
610 x86_jump_code (code, addr);
611 g_assertf ((code - start) <= buf_len, "%d %d", (int)(code - start), buf_len);
613 mono_arch_flush_icache (start, code - start);
614 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
616 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
618 return start;
622 * mono_arch_create_sdb_trampoline:
624 * Return a trampoline which captures the current context, passes it to
625 * mini_get_dbg_callbacks ()->single_step_from_context ()/mini_get_dbg_callbacks ()->breakpoint_from_context (),
626 * then restores the (potentially changed) context.
628 guint8*
629 mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot)
631 int tramp_size = 256;
632 int framesize, ctx_offset, cfa_offset;
633 guint8 *code, *buf;
634 GSList *unwind_ops = NULL;
635 MonoJumpInfo *ji = NULL;
637 code = buf = mono_global_codeman_reserve (tramp_size);
639 framesize = 0;
641 /* Argument area */
642 framesize += sizeof (target_mgreg_t);
644 framesize = ALIGN_TO (framesize, 8);
645 ctx_offset = framesize;
646 framesize += sizeof (MonoContext);
648 framesize = ALIGN_TO (framesize, MONO_ARCH_FRAME_ALIGNMENT);
650 // CFA = sp + 4
651 cfa_offset = 4;
652 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, 4);
653 // IP saved at CFA - 4
654 mono_add_unwind_op_offset (unwind_ops, code, buf, X86_NREG, -cfa_offset);
656 x86_push_reg (code, X86_EBP);
657 cfa_offset += sizeof (target_mgreg_t);
658 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
659 mono_add_unwind_op_offset (unwind_ops, code, buf, X86_EBP, - cfa_offset);
661 x86_mov_reg_reg (code, X86_EBP, X86_ESP);
662 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, X86_EBP);
663 /* The + 8 makes the stack aligned */
664 x86_alu_reg_imm (code, X86_SUB, X86_ESP, framesize + 8);
666 /* Initialize a MonoContext structure on the stack */
667 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, eax), X86_EAX, sizeof (target_mgreg_t));
668 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ebx), X86_EBX, sizeof (target_mgreg_t));
669 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ecx), X86_ECX, sizeof (target_mgreg_t));
670 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, edx), X86_EDX, sizeof (target_mgreg_t));
671 x86_mov_reg_membase (code, X86_EAX, X86_EBP, 0, sizeof (target_mgreg_t));
672 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ebp), X86_EAX, sizeof (target_mgreg_t));
673 x86_mov_reg_reg (code, X86_EAX, X86_EBP);
674 x86_alu_reg_imm (code, X86_ADD, X86_EAX, cfa_offset);
675 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, esp), X86_ESP, sizeof (target_mgreg_t));
676 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, esi), X86_ESI, sizeof (target_mgreg_t));
677 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, edi), X86_EDI, sizeof (target_mgreg_t));
678 x86_mov_reg_membase (code, X86_EAX, X86_EBP, 4, sizeof (target_mgreg_t));
679 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, eip), X86_EAX, sizeof (target_mgreg_t));
681 /* Call the single step/breakpoint function in sdb */
682 x86_lea_membase (code, X86_EAX, X86_ESP, ctx_offset);
683 x86_mov_membase_reg (code, X86_ESP, 0, X86_EAX, sizeof (target_mgreg_t));
685 if (aot) {
686 x86_breakpoint (code);
687 } else {
688 if (single_step)
689 x86_call_code (code, mini_get_dbg_callbacks ()->single_step_from_context);
690 else
691 x86_call_code (code, mini_get_dbg_callbacks ()->breakpoint_from_context);
694 /* Restore registers from ctx */
695 /* Overwrite the saved ebp */
696 x86_mov_reg_membase (code, X86_EAX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ebp), sizeof (target_mgreg_t));
697 x86_mov_membase_reg (code, X86_EBP, 0, X86_EAX, sizeof (target_mgreg_t));
698 /* Overwrite saved eip */
699 x86_mov_reg_membase (code, X86_EAX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, eip), sizeof (target_mgreg_t));
700 x86_mov_membase_reg (code, X86_EBP, 4, X86_EAX, sizeof (target_mgreg_t));
701 x86_mov_reg_membase (code, X86_EAX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, eax), sizeof (target_mgreg_t));
702 x86_mov_reg_membase (code, X86_EBX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ebx), sizeof (target_mgreg_t));
703 x86_mov_reg_membase (code, X86_ECX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ecx), sizeof (target_mgreg_t));
704 x86_mov_reg_membase (code, X86_EDX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, edx), sizeof (target_mgreg_t));
705 x86_mov_reg_membase (code, X86_ESI, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, esi), sizeof (target_mgreg_t));
706 x86_mov_reg_membase (code, X86_EDI, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, edi), sizeof (target_mgreg_t));
708 x86_leave (code);
709 cfa_offset -= sizeof (target_mgreg_t);
710 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, cfa_offset);
711 x86_ret (code);
713 mono_arch_flush_icache (code, code - buf);
714 MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL));
715 g_assertf (code - buf <= tramp_size, "%d %d", (int)(code - buf), tramp_size);
717 const char *tramp_name = single_step ? "sdb_single_step_trampoline" : "sdb_breakpoint_trampoline";
718 *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
720 return buf;