Fix special case tramp assert in x86/amd64. (#16476)
[mono-project.git] / mono / mini / tramp-x86.c
blob68d01a5362bfd24e624bc14dfea149c526ba9b4f
1 /**
2 * \file
3 * JIT trampoline code for x86
5 * Authors:
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2001 Ximian, Inc.
9 */
11 #include <config.h>
12 #include <glib.h>
14 #include <mono/metadata/abi-details.h>
15 #include <mono/metadata/appdomain.h>
16 #include <mono/metadata/metadata-internals.h>
17 #include <mono/metadata/marshal.h>
18 #include <mono/metadata/tabledefs.h>
19 #include <mono/metadata/profiler-private.h>
20 #include <mono/metadata/gc-internals.h>
21 #include <mono/arch/x86/x86-codegen.h>
23 #include <mono/utils/memcheck.h>
25 #include "mini.h"
26 #include "mini-x86.h"
27 #include "mini-runtime.h"
28 #include "debugger-agent.h"
29 #include "jit-icalls.h"
32 * mono_arch_get_unbox_trampoline:
33 * @m: method pointer
34 * @addr: pointer to native code for @m
36 * when value type methods are called through the vtable we need to unbox the
37 * this argument. This method returns a pointer to a trampoline which does
38 * unboxing before calling the method
40 gpointer
41 mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
43 guint8 *code, *start;
44 int this_pos = 4, size = 16;
45 MonoDomain *domain = mono_domain_get ();
46 GSList *unwind_ops;
48 start = code = mono_domain_code_reserve (domain, size);
50 unwind_ops = mono_arch_get_cie_program ();
52 x86_alu_membase_imm (code, X86_ADD, X86_ESP, this_pos, MONO_ABI_SIZEOF (MonoObject));
53 x86_jump_code (code, addr);
54 g_assert ((code - start) < size);
56 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE, m));
58 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
60 return start;
63 gpointer
64 mono_arch_get_static_rgctx_trampoline (gpointer arg, gpointer addr)
66 guint8 *code, *start;
67 int buf_len;
68 GSList *unwind_ops;
70 MonoDomain *domain = mono_domain_get ();
72 buf_len = 10;
74 start = code = mono_domain_code_reserve (domain, buf_len);
76 unwind_ops = mono_arch_get_cie_program ();
78 x86_mov_reg_imm (code, MONO_ARCH_RGCTX_REG, arg);
79 x86_jump_code (code, addr);
80 g_assert ((code - start) <= buf_len);
82 mono_arch_flush_icache (start, code - start);
83 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
85 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
87 return start;
90 void
91 mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
93 guint8 *code;
94 guint8 buf [8];
96 // Since method_start is retrieved from function return address (below current call/jmp to patch) there is a case when
97 // last instruction of a function is the call (due to OP_NOT_REACHED) instruction and then directly followed by a
98 // different method. In that case current orig_code points into next method and method_start will also point into
99 // next method, not the method including the call to patch. For this specific case, fallback to using a method_start of NULL.
100 gboolean can_write = mono_breakpoint_clean_code (method_start != orig_code ? method_start : NULL, orig_code, 8, buf, sizeof (buf));
102 code = buf + 8;
104 /* go to the start of the call instruction
106 * address_byte = (m << 6) | (o << 3) | reg
107 * call opcode: 0xff address_byte displacement
108 * 0xff m=1,o=2 imm8
109 * 0xff m=2,o=2 imm32
111 code -= 6;
112 orig_code -= 6;
113 if (code [1] == 0xe8) {
114 if (can_write) {
115 mono_atomic_xchg_i32 ((gint32*)(orig_code + 2), (guint)addr - ((guint)orig_code + 1) - 5);
117 /* Tell valgrind to recompile the patched code */
118 VALGRIND_DISCARD_TRANSLATIONS (orig_code + 2, 4);
120 } else if (code [1] == 0xe9) {
121 /* A PLT entry: jmp <DISP> */
122 if (can_write)
123 mono_atomic_xchg_i32 ((gint32*)(orig_code + 2), (guint)addr - ((guint)orig_code + 1) - 5);
124 } else {
125 printf ("Invalid trampoline sequence: %x %x %x %x %x %x n", code [0], code [1], code [2], code [3],
126 code [4], code [5]);
128 g_assert_not_reached ();
132 void
133 mono_arch_patch_plt_entry (guint8 *code, gpointer *got, host_mgreg_t *regs, guint8 *addr)
135 guint32 offset;
137 /* Patch the jump table entry used by the plt entry */
139 /* A PLT entry: jmp *<DISP>(%ebx) */
140 g_assert (code [0] == 0xff);
141 g_assert (code [1] == 0xa3);
143 offset = *(guint32*)(code + 2);
144 if (!got)
145 got = (gpointer*)(gsize) regs [MONO_ARCH_GOT_REG];
146 *(guint8**)((guint8*)got + offset) = addr;
149 guchar*
150 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
152 const char *tramp_name;
153 guint8 *buf, *code, *tramp, *br_ex_check;
154 GSList *unwind_ops = NULL;
155 MonoJumpInfo *ji = NULL;
156 int i, offset, frame_size, regarray_offset, lmf_offset, caller_ip_offset, arg_offset;
157 int cfa_offset; /* cfa = cfa_reg + cfa_offset */
159 code = buf = mono_global_codeman_reserve (256);
161 /* Note that there is a single argument to the trampoline
162 * and it is stored at: esp + pushed_args * sizeof (target_mgreg_t)
163 * the ret address is at: esp + (pushed_args + 1) * sizeof (target_mgreg_t)
166 /* Compute frame offsets relative to the frame pointer %ebp */
167 arg_offset = sizeof (target_mgreg_t);
168 caller_ip_offset = 2 * sizeof (target_mgreg_t);
169 offset = 0;
170 offset += sizeof (MonoLMF);
171 lmf_offset = -offset;
172 offset += X86_NREG * sizeof (target_mgreg_t);
173 regarray_offset = -offset;
174 /* Argument area */
175 offset += 4 * sizeof (target_mgreg_t);
176 frame_size = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
178 /* ret addr and arg are on the stack */
179 cfa_offset = 2 * sizeof (target_mgreg_t);
180 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, cfa_offset);
181 // IP saved at CFA - 4
182 mono_add_unwind_op_offset (unwind_ops, code, buf, X86_NREG, -4);
184 /* Allocate frame */
185 x86_push_reg (code, X86_EBP);
186 cfa_offset += sizeof (target_mgreg_t);
187 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
188 mono_add_unwind_op_offset (unwind_ops, code, buf, X86_EBP, -cfa_offset);
190 x86_mov_reg_reg (code, X86_EBP, X86_ESP);
191 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, X86_EBP);
193 /* There are three words on the stack, adding + 4 aligns the stack to 16, which is needed on osx */
194 x86_alu_reg_imm (code, X86_SUB, X86_ESP, frame_size + sizeof (target_mgreg_t));
196 /* Save all registers */
197 for (i = X86_EAX; i <= X86_EDI; ++i) {
198 int reg = i;
200 if (i == X86_EBP) {
201 /* Save original ebp */
202 /* EAX is already saved */
203 x86_mov_reg_membase (code, X86_EAX, X86_EBP, 0, sizeof (target_mgreg_t));
204 reg = X86_EAX;
205 } else if (i == X86_ESP) {
206 /* Save original esp */
207 /* EAX is already saved */
208 x86_mov_reg_reg (code, X86_EAX, X86_EBP);
209 /* Saved ebp + trampoline arg + return addr */
210 x86_alu_reg_imm (code, X86_ADD, X86_EAX, 3 * sizeof (target_mgreg_t));
211 reg = X86_EAX;
213 x86_mov_membase_reg (code, X86_EBP, regarray_offset + (i * sizeof (target_mgreg_t)), reg, sizeof (target_mgreg_t));
216 /* Setup LMF */
217 /* eip */
218 if (tramp_type == MONO_TRAMPOLINE_JUMP) {
219 x86_mov_membase_imm (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, eip), 0, sizeof (target_mgreg_t));
220 } else {
221 x86_mov_reg_membase (code, X86_EAX, X86_EBP, caller_ip_offset, sizeof (target_mgreg_t));
222 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, eip), X86_EAX, sizeof (target_mgreg_t));
224 /* method */
225 if ((tramp_type == MONO_TRAMPOLINE_JIT) || (tramp_type == MONO_TRAMPOLINE_JUMP)) {
226 x86_mov_reg_membase (code, X86_EAX, X86_EBP, arg_offset, sizeof (target_mgreg_t));
227 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), X86_EAX, sizeof (target_mgreg_t));
228 } else {
229 x86_mov_membase_imm (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), 0, sizeof (target_mgreg_t));
231 /* esp */
232 x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_ESP * sizeof (target_mgreg_t)), sizeof (target_mgreg_t));
233 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, esp), X86_EAX, sizeof (target_mgreg_t));
234 /* callee save registers */
235 x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_EBX * sizeof (target_mgreg_t)), sizeof (target_mgreg_t));
236 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebx), X86_EAX, sizeof (target_mgreg_t));
237 x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_EDI * sizeof (target_mgreg_t)), sizeof (target_mgreg_t));
238 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, edi), X86_EAX, sizeof (target_mgreg_t));
239 x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_ESI * sizeof (target_mgreg_t)), sizeof (target_mgreg_t));
240 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, esi), X86_EAX, sizeof (target_mgreg_t));
241 x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_EBP * sizeof (target_mgreg_t)), sizeof (target_mgreg_t));
242 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp), X86_EAX, sizeof (target_mgreg_t));
244 /* Push LMF */
245 /* get the address of lmf for the current thread */
246 if (aot) {
247 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_get_lmf_addr));
248 x86_call_reg (code, X86_EAX);
249 } else {
250 x86_call_code (code, mono_get_lmf_addr);
252 /* lmf->lmf_addr = lmf_addr (%eax) */
253 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), X86_EAX, sizeof (target_mgreg_t));
254 /* lmf->previous_lmf = *(lmf_addr) */
255 x86_mov_reg_membase (code, X86_ECX, X86_EAX, 0, sizeof (target_mgreg_t));
256 /* Signal to mono_arch_unwind_frame () that this is a trampoline frame */
257 x86_alu_reg_imm (code, X86_ADD, X86_ECX, 1);
258 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), X86_ECX, sizeof (target_mgreg_t));
259 /* *lmf_addr = lmf */
260 x86_lea_membase (code, X86_ECX, X86_EBP, lmf_offset);
261 x86_mov_membase_reg (code, X86_EAX, 0, X86_ECX, sizeof (target_mgreg_t));
263 /* Call trampoline function */
264 /* Arg 1 - registers */
265 x86_lea_membase (code, X86_EAX, X86_EBP, regarray_offset);
266 x86_mov_membase_reg (code, X86_ESP, (0 * sizeof (target_mgreg_t)), X86_EAX, sizeof (target_mgreg_t));
267 /* Arg2 - calling code */
268 if (tramp_type == MONO_TRAMPOLINE_JUMP) {
269 x86_mov_membase_imm (code, X86_ESP, (1 * sizeof (target_mgreg_t)), 0, sizeof (target_mgreg_t));
270 } else {
271 x86_mov_reg_membase (code, X86_EAX, X86_EBP, caller_ip_offset, sizeof (target_mgreg_t));
272 x86_mov_membase_reg (code, X86_ESP, (1 * sizeof (target_mgreg_t)), X86_EAX, sizeof (target_mgreg_t));
274 /* Arg3 - trampoline argument */
275 x86_mov_reg_membase (code, X86_EAX, X86_EBP, arg_offset, sizeof (target_mgreg_t));
276 x86_mov_membase_reg (code, X86_ESP, (2 * sizeof (target_mgreg_t)), X86_EAX, sizeof (target_mgreg_t));
277 /* Arg4 - trampoline address */
278 // FIXME:
279 x86_mov_membase_imm (code, X86_ESP, (3 * sizeof (target_mgreg_t)), 0, sizeof (target_mgreg_t));
281 #ifdef __APPLE__
282 /* check the stack is aligned after the ret ip is pushed */
284 x86_mov_reg_reg (code, X86_EDX, X86_ESP);
285 x86_alu_reg_imm (code, X86_AND, X86_EDX, 15);
286 x86_alu_reg_imm (code, X86_CMP, X86_EDX, 0);
287 x86_branch_disp (code, X86_CC_Z, 3, FALSE);
288 x86_breakpoint (code);
290 #endif
292 if (aot) {
293 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, GINT_TO_POINTER (mono_trampoline_type_to_jit_icall_id (tramp_type)));
294 x86_call_reg (code, X86_EAX);
295 } else {
296 tramp = (guint8*)mono_get_trampoline_func (tramp_type);
297 x86_call_code (code, tramp);
301 * Overwrite the trampoline argument with the address we need to jump to,
302 * to free %eax.
304 x86_mov_membase_reg (code, X86_EBP, arg_offset, X86_EAX, 4);
306 /* Restore LMF */
307 x86_mov_reg_membase (code, X86_EAX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sizeof (target_mgreg_t));
308 x86_mov_reg_membase (code, X86_ECX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof (target_mgreg_t));
309 x86_alu_reg_imm (code, X86_SUB, X86_ECX, 1);
310 x86_mov_membase_reg (code, X86_EAX, 0, X86_ECX, sizeof (target_mgreg_t));
312 /* Check for interruptions */
313 if (aot) {
314 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_thread_force_interruption_checkpoint_noraise));
315 x86_call_reg (code, X86_EAX);
316 } else {
317 x86_call_code (code, (guint8*)mono_thread_force_interruption_checkpoint_noraise);
320 x86_test_reg_reg (code, X86_EAX, X86_EAX);
321 br_ex_check = code;
322 x86_branch8 (code, X86_CC_Z, -1, 1);
325 * Exception case:
326 * We have an exception we want to throw in the caller's frame, so pop
327 * the trampoline frame and throw from the caller.
329 x86_leave (code);
331 * The exception is in eax.
332 * We are calling the throw trampoline used by OP_THROW, so we have to setup the
333 * stack to look the same.
334 * The stack contains the ret addr, and the trampoline argument, the throw trampoline
335 * expects it to contain the ret addr and the exception. It also needs to be aligned
336 * after the exception is pushed.
338 /* Align stack */
339 x86_push_reg (code, X86_EAX);
340 /* Push the exception */
341 x86_push_reg (code, X86_EAX);
342 //x86_breakpoint (code);
343 /* Push the original return value */
344 x86_push_membase (code, X86_ESP, 3 * 4);
346 * EH is initialized after trampolines, so get the address of the variable
347 * which contains throw_exception, and load it from there.
349 if (aot) {
350 /* Not really a jit icall */
351 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_rethrow_preserve_exception));
352 } else {
353 x86_mov_reg_imm (code, X86_ECX, (guint8*)mono_get_rethrow_preserve_exception_addr ());
355 x86_mov_reg_membase (code, X86_ECX, X86_ECX, 0, sizeof (target_mgreg_t));
356 x86_jump_reg (code, X86_ECX);
358 /* Normal case */
359 mono_x86_patch (br_ex_check, code);
361 /* Restore registers */
362 for (i = X86_EAX; i <= X86_EDI; ++i) {
363 if (i == X86_ESP || i == X86_EBP)
364 continue;
365 if (i == X86_EAX && tramp_type != MONO_TRAMPOLINE_AOT_PLT)
366 continue;
367 x86_mov_reg_membase (code, i, X86_EBP, regarray_offset + (i * 4), 4);
370 /* Restore frame */
371 x86_leave (code);
372 cfa_offset -= sizeof (target_mgreg_t);
373 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, cfa_offset);
374 mono_add_unwind_op_same_value (unwind_ops, code, buf, X86_EBP);
376 if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
377 /* Load the value returned by the trampoline */
378 x86_mov_reg_membase (code, X86_EAX, X86_ESP, 0, 4);
379 /* The trampoline returns normally, pop the trampoline argument */
380 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
381 cfa_offset -= sizeof (target_mgreg_t);
382 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
383 x86_ret (code);
384 } else {
385 x86_ret (code);
388 g_assert ((code - buf) <= 256);
389 MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL));
391 tramp_name = mono_get_generic_trampoline_name (tramp_type);
392 *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
394 return buf;
397 #define TRAMPOLINE_SIZE 10
399 gpointer
400 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
402 guint8 *code, *buf, *tramp;
404 tramp = mono_get_trampoline_code (tramp_type);
406 code = buf = mono_domain_code_reserve_align (domain, TRAMPOLINE_SIZE, 4);
408 x86_push_imm (buf, arg1);
409 x86_jump_code (buf, tramp);
410 g_assert ((buf - code) <= TRAMPOLINE_SIZE);
412 mono_arch_flush_icache (code, buf - code);
413 MONO_PROFILER_RAISE (jit_code_buffer, (code, buf - code, MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE, mono_get_generic_trampoline_simple_name (tramp_type)));
415 if (code_len)
416 *code_len = buf - code;
418 return code;
421 gpointer
422 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
424 guint8 *tramp;
425 guint8 *code, *buf;
426 guint8 **rgctx_null_jumps;
427 int tramp_size;
428 int depth, index;
429 int i;
430 gboolean mrgctx;
431 MonoJumpInfo *ji = NULL;
432 GSList *unwind_ops = NULL;
434 unwind_ops = mono_arch_get_cie_program ();
436 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
437 index = MONO_RGCTX_SLOT_INDEX (slot);
438 if (mrgctx)
439 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (target_mgreg_t);
440 for (depth = 0; ; ++depth) {
441 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
443 if (index < size - 1)
444 break;
445 index -= size - 1;
448 tramp_size = (aot ? 64 : 36) + 6 * depth;
450 code = buf = mono_global_codeman_reserve (tramp_size);
452 rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
454 /* load vtable/mrgctx ptr */
455 x86_mov_reg_membase (code, X86_EAX, X86_ESP, 4, 4);
456 if (!mrgctx) {
457 /* load rgctx ptr from vtable */
458 x86_mov_reg_membase (code, X86_EAX, X86_EAX, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context), 4);
459 /* is the rgctx ptr null? */
460 x86_test_reg_reg (code, X86_EAX, X86_EAX);
461 /* if yes, jump to actual trampoline */
462 rgctx_null_jumps [0] = code;
463 x86_branch8 (code, X86_CC_Z, -1, 1);
466 for (i = 0; i < depth; ++i) {
467 /* load ptr to next array */
468 if (mrgctx && i == 0)
469 x86_mov_reg_membase (code, X86_EAX, X86_EAX, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT, 4);
470 else
471 x86_mov_reg_membase (code, X86_EAX, X86_EAX, 0, 4);
472 /* is the ptr null? */
473 x86_test_reg_reg (code, X86_EAX, X86_EAX);
474 /* if yes, jump to actual trampoline */
475 rgctx_null_jumps [i + 1] = code;
476 x86_branch8 (code, X86_CC_Z, -1, 1);
479 /* fetch slot */
480 x86_mov_reg_membase (code, X86_EAX, X86_EAX, sizeof (target_mgreg_t) * (index + 1), 4);
481 /* is the slot null? */
482 x86_test_reg_reg (code, X86_EAX, X86_EAX);
483 /* if yes, jump to actual trampoline */
484 rgctx_null_jumps [depth + 1] = code;
485 x86_branch8 (code, X86_CC_Z, -1, 1);
486 /* otherwise return */
487 x86_ret (code);
489 for (i = mrgctx ? 1 : 0; i <= depth + 1; ++i)
490 x86_patch (rgctx_null_jumps [i], code);
492 g_free (rgctx_null_jumps);
494 x86_mov_reg_membase (code, MONO_ARCH_VTABLE_REG, X86_ESP, 4, 4);
496 if (aot) {
497 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR, GUINT_TO_POINTER (slot));
498 x86_jump_reg (code, X86_EAX);
499 } else {
500 tramp = (guint8*)mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);
502 /* jump to the actual trampoline */
503 x86_jump_code (code, tramp);
506 mono_arch_flush_icache (buf, code - buf);
507 MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
509 g_assert (code - buf <= tramp_size);
511 char *name = mono_get_rgctx_fetch_trampoline_name (slot);
512 *info = mono_tramp_info_create (name, buf, code - buf, ji, unwind_ops);
513 g_free (name);
515 return buf;
519 * mono_arch_create_general_rgctx_lazy_fetch_trampoline:
521 * This is a general variant of the rgctx fetch trampolines. It receives a pointer to gpointer[2] in the rgctx reg. The first entry contains the slot, the second
522 * the trampoline to call if the slot is not filled.
524 gpointer
525 mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo **info, gboolean aot)
527 guint8 *code, *buf;
528 int tramp_size;
529 MonoJumpInfo *ji = NULL;
530 GSList *unwind_ops = NULL;
532 g_assert (aot);
534 unwind_ops = mono_arch_get_cie_program ();
536 tramp_size = 64;
538 code = buf = mono_global_codeman_reserve (tramp_size);
540 // FIXME: Currently, we always go to the slow path.
542 /* Load trampoline addr */
543 x86_mov_reg_membase (code, X86_EAX, MONO_ARCH_RGCTX_REG, 4, 4);
544 /* Load mrgctx/vtable */
545 x86_mov_reg_membase (code, MONO_ARCH_VTABLE_REG, X86_ESP, 4, 4);
547 x86_jump_reg (code, X86_EAX);
549 mono_arch_flush_icache (buf, code - buf);
550 MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
552 g_assert (code - buf <= tramp_size);
554 *info = mono_tramp_info_create ("rgctx_fetch_trampoline_general", buf, code - buf, ji, unwind_ops);
556 return buf;
559 void
560 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
562 /* FIXME: This is not thread safe */
563 guint8 *code = (guint8*)ji->code_start;
565 x86_push_imm (code, func_arg);
566 x86_call_code (code, (guint8*)func);
569 guint8*
570 mono_arch_get_call_target (guint8 *code)
572 if (code [-5] == 0xe8) {
573 gint32 disp = *(gint32*)(code - 4);
574 guint8 *target = code + disp;
576 return target;
577 } else {
578 return NULL;
582 guint32
583 mono_arch_get_plt_info_offset (guint8 *plt_entry, host_mgreg_t *regs, guint8 *code)
585 return *(guint32*)(plt_entry + 6);
589 * mono_arch_get_gsharedvt_arg_trampoline:
591 * Return a trampoline which passes ARG to the gsharedvt in/out trampoline ADDR.
593 gpointer
594 mono_arch_get_gsharedvt_arg_trampoline (MonoDomain *domain, gpointer arg, gpointer addr)
596 guint8 *code, *start;
597 int buf_len;
598 GSList *unwind_ops;
600 buf_len = 10;
602 start = code = mono_domain_code_reserve (domain, buf_len);
604 unwind_ops = mono_arch_get_cie_program ();
606 x86_mov_reg_imm (code, X86_EAX, arg);
607 x86_jump_code (code, addr);
608 g_assert ((code - start) <= buf_len);
610 mono_arch_flush_icache (start, code - start);
611 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
613 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
615 return start;
619 * mono_arch_create_sdb_trampoline:
621 * Return a trampoline which captures the current context, passes it to
622 * mini_get_dbg_callbacks ()->single_step_from_context ()/mini_get_dbg_callbacks ()->breakpoint_from_context (),
623 * then restores the (potentially changed) context.
625 guint8*
626 mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot)
628 int tramp_size = 256;
629 int framesize, ctx_offset, cfa_offset;
630 guint8 *code, *buf;
631 GSList *unwind_ops = NULL;
632 MonoJumpInfo *ji = NULL;
634 code = buf = mono_global_codeman_reserve (tramp_size);
636 framesize = 0;
638 /* Argument area */
639 framesize += sizeof (target_mgreg_t);
641 framesize = ALIGN_TO (framesize, 8);
642 ctx_offset = framesize;
643 framesize += sizeof (MonoContext);
645 framesize = ALIGN_TO (framesize, MONO_ARCH_FRAME_ALIGNMENT);
647 // CFA = sp + 4
648 cfa_offset = 4;
649 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, 4);
650 // IP saved at CFA - 4
651 mono_add_unwind_op_offset (unwind_ops, code, buf, X86_NREG, -cfa_offset);
653 x86_push_reg (code, X86_EBP);
654 cfa_offset += sizeof (target_mgreg_t);
655 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
656 mono_add_unwind_op_offset (unwind_ops, code, buf, X86_EBP, - cfa_offset);
658 x86_mov_reg_reg (code, X86_EBP, X86_ESP);
659 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, X86_EBP);
660 /* The + 8 makes the stack aligned */
661 x86_alu_reg_imm (code, X86_SUB, X86_ESP, framesize + 8);
663 /* Initialize a MonoContext structure on the stack */
664 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, eax), X86_EAX, sizeof (target_mgreg_t));
665 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ebx), X86_EBX, sizeof (target_mgreg_t));
666 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ecx), X86_ECX, sizeof (target_mgreg_t));
667 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, edx), X86_EDX, sizeof (target_mgreg_t));
668 x86_mov_reg_membase (code, X86_EAX, X86_EBP, 0, sizeof (target_mgreg_t));
669 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ebp), X86_EAX, sizeof (target_mgreg_t));
670 x86_mov_reg_reg (code, X86_EAX, X86_EBP);
671 x86_alu_reg_imm (code, X86_ADD, X86_EAX, cfa_offset);
672 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, esp), X86_ESP, sizeof (target_mgreg_t));
673 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, esi), X86_ESI, sizeof (target_mgreg_t));
674 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, edi), X86_EDI, sizeof (target_mgreg_t));
675 x86_mov_reg_membase (code, X86_EAX, X86_EBP, 4, sizeof (target_mgreg_t));
676 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, eip), X86_EAX, sizeof (target_mgreg_t));
678 /* Call the single step/breakpoint function in sdb */
679 x86_lea_membase (code, X86_EAX, X86_ESP, ctx_offset);
680 x86_mov_membase_reg (code, X86_ESP, 0, X86_EAX, sizeof (target_mgreg_t));
682 if (aot) {
683 x86_breakpoint (code);
684 } else {
685 if (single_step)
686 x86_call_code (code, mini_get_dbg_callbacks ()->single_step_from_context);
687 else
688 x86_call_code (code, mini_get_dbg_callbacks ()->breakpoint_from_context);
691 /* Restore registers from ctx */
692 /* Overwrite the saved ebp */
693 x86_mov_reg_membase (code, X86_EAX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ebp), sizeof (target_mgreg_t));
694 x86_mov_membase_reg (code, X86_EBP, 0, X86_EAX, sizeof (target_mgreg_t));
695 /* Overwrite saved eip */
696 x86_mov_reg_membase (code, X86_EAX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, eip), sizeof (target_mgreg_t));
697 x86_mov_membase_reg (code, X86_EBP, 4, X86_EAX, sizeof (target_mgreg_t));
698 x86_mov_reg_membase (code, X86_EAX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, eax), sizeof (target_mgreg_t));
699 x86_mov_reg_membase (code, X86_EBX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ebx), sizeof (target_mgreg_t));
700 x86_mov_reg_membase (code, X86_ECX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ecx), sizeof (target_mgreg_t));
701 x86_mov_reg_membase (code, X86_EDX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, edx), sizeof (target_mgreg_t));
702 x86_mov_reg_membase (code, X86_ESI, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, esi), sizeof (target_mgreg_t));
703 x86_mov_reg_membase (code, X86_EDI, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, edi), sizeof (target_mgreg_t));
705 x86_leave (code);
706 cfa_offset -= sizeof (target_mgreg_t);
707 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, cfa_offset);
708 x86_ret (code);
710 mono_arch_flush_icache (code, code - buf);
711 MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL));
712 g_assert (code - buf <= tramp_size);
714 const char *tramp_name = single_step ? "sdb_single_step_trampoline" : "sdb_breakpoint_trampoline";
715 *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
717 return buf;