3 * JIT trampoline code for x86
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2001 Ximian, Inc.
14 #include <mono/metadata/abi-details.h>
15 #include <mono/metadata/appdomain.h>
16 #include <mono/metadata/metadata-internals.h>
17 #include <mono/metadata/marshal.h>
18 #include <mono/metadata/tabledefs.h>
19 #include <mono/metadata/profiler-private.h>
20 #include <mono/metadata/gc-internals.h>
21 #include <mono/arch/x86/x86-codegen.h>
23 #include <mono/utils/memcheck.h>
27 #include "mini-runtime.h"
28 #include "debugger-agent.h"
29 #include "jit-icalls.h"
30 #include "mono/utils/mono-tls-inline.h"
33 * mono_arch_get_unbox_trampoline:
35 * @addr: pointer to native code for @m
37 * when value type methods are called through the vtable we need to unbox the
38 * this argument. This method returns a pointer to a trampoline which does
39 * unboxing before calling the method
42 mono_arch_get_unbox_trampoline (MonoMethod
*m
, gpointer addr
)
45 int this_pos
= 4, size
= 16;
46 MonoDomain
*domain
= mono_domain_get ();
47 MonoMemoryManager
*mem_manager
= m_method_get_mem_manager (domain
, m
);
50 start
= code
= mono_mem_manager_code_reserve (mem_manager
, size
);
52 unwind_ops
= mono_arch_get_cie_program ();
54 x86_alu_membase_imm (code
, X86_ADD
, X86_ESP
, this_pos
, MONO_ABI_SIZEOF (MonoObject
));
55 x86_jump_code (code
, addr
);
56 g_assertf ((code
- start
) <= size
, "%d %d", (int)(code
- start
), size
);
58 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE
, m
));
60 mono_tramp_info_register (mono_tramp_info_create (NULL
, start
, code
- start
, NULL
, unwind_ops
), domain
);
66 mono_arch_get_static_rgctx_trampoline (MonoMemoryManager
*mem_manager
, gpointer arg
, gpointer addr
)
70 MonoDomain
*domain
= mono_domain_get ();
72 const int buf_len
= 10;
74 start
= code
= mono_mem_manager_code_reserve (mem_manager
, buf_len
);
76 unwind_ops
= mono_arch_get_cie_program ();
78 x86_mov_reg_imm (code
, MONO_ARCH_RGCTX_REG
, (gsize
)arg
);
79 x86_jump_code (code
, addr
);
80 g_assertf ((code
- start
) <= buf_len
, "%d %d", (int)(code
- start
), buf_len
);
82 mono_arch_flush_icache (start
, code
- start
);
83 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE
, NULL
));
85 mono_tramp_info_register (mono_tramp_info_create (NULL
, start
, code
- start
, NULL
, unwind_ops
), domain
);
91 mono_arch_patch_callsite (guint8
*method_start
, guint8
*orig_code
, guint8
*addr
)
96 // Since method_start is retrieved from function return address (below current call/jmp to patch) there is a case when
97 // last instruction of a function is the call (due to OP_NOT_REACHED) instruction and then directly followed by a
98 // different method. In that case current orig_code points into next method and method_start will also point into
99 // next method, not the method including the call to patch. For this specific case, fallback to using a method_start of NULL.
100 gboolean can_write
= mono_breakpoint_clean_code (method_start
!= orig_code
? method_start
: NULL
, orig_code
, 8, buf
, sizeof (buf
));
104 /* go to the start of the call instruction
106 * address_byte = (m << 6) | (o << 3) | reg
107 * call opcode: 0xff address_byte displacement
113 if (code
[1] == 0xe8) {
115 mono_atomic_xchg_i32 ((gint32
*)(orig_code
+ 2), (gsize
)addr
- ((gsize
)orig_code
+ 1) - 5);
117 /* Tell valgrind to recompile the patched code */
118 VALGRIND_DISCARD_TRANSLATIONS (orig_code
+ 2, 4);
120 } else if (code
[1] == 0xe9) {
121 /* A PLT entry: jmp <DISP> */
123 mono_atomic_xchg_i32 ((gint32
*)(orig_code
+ 2), (gsize
)addr
- ((gsize
)orig_code
+ 1) - 5);
125 printf ("Invalid trampoline sequence: %x %x %x %x %x %x n", code
[0], code
[1], code
[2], code
[3],
128 g_assert_not_reached ();
133 mono_arch_patch_plt_entry (guint8
*code
, gpointer
*got
, host_mgreg_t
*regs
, guint8
*addr
)
137 /* Patch the jump table entry used by the plt entry */
139 /* A PLT entry: jmp *<DISP>(%ebx) */
140 g_assert (code
[0] == 0xff);
141 g_assert (code
[1] == 0xa3);
143 offset
= *(guint32
*)(code
+ 2);
145 got
= (gpointer
*)(gsize
) regs
[MONO_ARCH_GOT_REG
];
146 *(guint8
**)((guint8
*)got
+ offset
) = addr
;
150 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type
, MonoTrampInfo
**info
, gboolean aot
)
152 const char *tramp_name
;
153 guint8
*buf
, *code
, *tramp
, *br_ex_check
;
154 GSList
*unwind_ops
= NULL
;
155 MonoJumpInfo
*ji
= NULL
;
156 int i
, offset
, frame_size
, regarray_offset
, lmf_offset
, caller_ip_offset
, arg_offset
;
157 int cfa_offset
; /* cfa = cfa_reg + cfa_offset */
159 const int buf_len
= 256;
161 code
= buf
= mono_global_codeman_reserve (buf_len
);
163 /* Note that there is a single argument to the trampoline
164 * and it is stored at: esp + pushed_args * sizeof (target_mgreg_t)
165 * the ret address is at: esp + (pushed_args + 1) * sizeof (target_mgreg_t)
168 /* Compute frame offsets relative to the frame pointer %ebp */
169 arg_offset
= sizeof (target_mgreg_t
);
170 caller_ip_offset
= 2 * sizeof (target_mgreg_t
);
172 offset
+= sizeof (MonoLMF
);
173 lmf_offset
= -offset
;
174 offset
+= X86_NREG
* sizeof (target_mgreg_t
);
175 regarray_offset
= -offset
;
177 offset
+= 4 * sizeof (target_mgreg_t
);
178 frame_size
= ALIGN_TO (offset
, MONO_ARCH_FRAME_ALIGNMENT
);
180 /* ret addr and arg are on the stack */
181 cfa_offset
= 2 * sizeof (target_mgreg_t
);
182 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, X86_ESP
, cfa_offset
);
183 // IP saved at CFA - 4
184 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, X86_NREG
, -4);
187 x86_push_reg (code
, X86_EBP
);
188 cfa_offset
+= sizeof (target_mgreg_t
);
189 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
190 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, X86_EBP
, -cfa_offset
);
192 x86_mov_reg_reg (code
, X86_EBP
, X86_ESP
);
193 mono_add_unwind_op_def_cfa_reg (unwind_ops
, code
, buf
, X86_EBP
);
195 /* There are three words on the stack, adding + 4 aligns the stack to 16, which is needed on osx */
196 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, frame_size
+ sizeof (target_mgreg_t
));
198 /* Save all registers */
199 for (i
= X86_EAX
; i
<= X86_EDI
; ++i
) {
203 /* Save original ebp */
204 /* EAX is already saved */
205 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, 0, sizeof (target_mgreg_t
));
207 } else if (i
== X86_ESP
) {
208 /* Save original esp */
209 /* EAX is already saved */
210 x86_mov_reg_reg (code
, X86_EAX
, X86_EBP
);
211 /* Saved ebp + trampoline arg + return addr */
212 x86_alu_reg_imm (code
, X86_ADD
, X86_EAX
, 3 * sizeof (target_mgreg_t
));
215 x86_mov_membase_reg (code
, X86_EBP
, regarray_offset
+ (i
* sizeof (target_mgreg_t
)), reg
, sizeof (target_mgreg_t
));
220 if (tramp_type
== MONO_TRAMPOLINE_JUMP
) {
221 x86_mov_membase_imm (code
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, eip
), 0, sizeof (target_mgreg_t
));
223 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, caller_ip_offset
, sizeof (target_mgreg_t
));
224 x86_mov_membase_reg (code
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, eip
), X86_EAX
, sizeof (target_mgreg_t
));
227 if ((tramp_type
== MONO_TRAMPOLINE_JIT
) || (tramp_type
== MONO_TRAMPOLINE_JUMP
)) {
228 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, arg_offset
, sizeof (target_mgreg_t
));
229 x86_mov_membase_reg (code
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, method
), X86_EAX
, sizeof (target_mgreg_t
));
231 x86_mov_membase_imm (code
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, method
), 0, sizeof (target_mgreg_t
));
234 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, regarray_offset
+ (X86_ESP
* sizeof (target_mgreg_t
)), sizeof (target_mgreg_t
));
235 x86_mov_membase_reg (code
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, esp
), X86_EAX
, sizeof (target_mgreg_t
));
236 /* callee save registers */
237 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, regarray_offset
+ (X86_EBX
* sizeof (target_mgreg_t
)), sizeof (target_mgreg_t
));
238 x86_mov_membase_reg (code
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, ebx
), X86_EAX
, sizeof (target_mgreg_t
));
239 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, regarray_offset
+ (X86_EDI
* sizeof (target_mgreg_t
)), sizeof (target_mgreg_t
));
240 x86_mov_membase_reg (code
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, edi
), X86_EAX
, sizeof (target_mgreg_t
));
241 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, regarray_offset
+ (X86_ESI
* sizeof (target_mgreg_t
)), sizeof (target_mgreg_t
));
242 x86_mov_membase_reg (code
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, esi
), X86_EAX
, sizeof (target_mgreg_t
));
243 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, regarray_offset
+ (X86_EBP
* sizeof (target_mgreg_t
)), sizeof (target_mgreg_t
));
244 x86_mov_membase_reg (code
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, ebp
), X86_EAX
, sizeof (target_mgreg_t
));
247 /* get the address of lmf for the current thread */
249 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_get_lmf_addr
));
250 x86_call_reg (code
, X86_EAX
);
252 x86_call_code (code
, mono_get_lmf_addr
);
254 /* lmf->lmf_addr = lmf_addr (%eax) */
255 x86_mov_membase_reg (code
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, lmf_addr
), X86_EAX
, sizeof (target_mgreg_t
));
256 /* lmf->previous_lmf = *(lmf_addr) */
257 x86_mov_reg_membase (code
, X86_ECX
, X86_EAX
, 0, sizeof (target_mgreg_t
));
258 /* Signal to mono_arch_unwind_frame () that this is a trampoline frame */
259 x86_alu_reg_imm (code
, X86_ADD
, X86_ECX
, 1);
260 x86_mov_membase_reg (code
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, previous_lmf
), X86_ECX
, sizeof (target_mgreg_t
));
261 /* *lmf_addr = lmf */
262 x86_lea_membase (code
, X86_ECX
, X86_EBP
, lmf_offset
);
263 x86_mov_membase_reg (code
, X86_EAX
, 0, X86_ECX
, sizeof (target_mgreg_t
));
265 /* Call trampoline function */
266 /* Arg 1 - registers */
267 x86_lea_membase (code
, X86_EAX
, X86_EBP
, regarray_offset
);
268 x86_mov_membase_reg (code
, X86_ESP
, (0 * sizeof (target_mgreg_t
)), X86_EAX
, sizeof (target_mgreg_t
));
269 /* Arg2 - calling code */
270 if (tramp_type
== MONO_TRAMPOLINE_JUMP
) {
271 x86_mov_membase_imm (code
, X86_ESP
, (1 * sizeof (target_mgreg_t
)), 0, sizeof (target_mgreg_t
));
273 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, caller_ip_offset
, sizeof (target_mgreg_t
));
274 x86_mov_membase_reg (code
, X86_ESP
, (1 * sizeof (target_mgreg_t
)), X86_EAX
, sizeof (target_mgreg_t
));
276 /* Arg3 - trampoline argument */
277 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, arg_offset
, sizeof (target_mgreg_t
));
278 x86_mov_membase_reg (code
, X86_ESP
, (2 * sizeof (target_mgreg_t
)), X86_EAX
, sizeof (target_mgreg_t
));
279 /* Arg4 - trampoline address */
281 x86_mov_membase_imm (code
, X86_ESP
, (3 * sizeof (target_mgreg_t
)), 0, sizeof (target_mgreg_t
));
284 /* check the stack is aligned after the ret ip is pushed */
286 x86_mov_reg_reg (code, X86_EDX, X86_ESP);
287 x86_alu_reg_imm (code, X86_AND, X86_EDX, 15);
288 x86_alu_reg_imm (code, X86_CMP, X86_EDX, 0);
289 x86_branch_disp (code, X86_CC_Z, 3, FALSE);
290 x86_breakpoint (code);
295 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, GINT_TO_POINTER (mono_trampoline_type_to_jit_icall_id (tramp_type
)));
296 x86_call_reg (code
, X86_EAX
);
298 tramp
= (guint8
*)mono_get_trampoline_func (tramp_type
);
299 x86_call_code (code
, tramp
);
303 * Overwrite the trampoline argument with the address we need to jump to,
306 x86_mov_membase_reg (code
, X86_EBP
, arg_offset
, X86_EAX
, 4);
309 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, lmf_addr
), sizeof (target_mgreg_t
));
310 x86_mov_reg_membase (code
, X86_ECX
, X86_EBP
, lmf_offset
+ G_STRUCT_OFFSET (MonoLMF
, previous_lmf
), sizeof (target_mgreg_t
));
311 x86_alu_reg_imm (code
, X86_SUB
, X86_ECX
, 1);
312 x86_mov_membase_reg (code
, X86_EAX
, 0, X86_ECX
, sizeof (target_mgreg_t
));
314 /* Check for interruptions */
316 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_thread_force_interruption_checkpoint_noraise
));
317 x86_call_reg (code
, X86_EAX
);
319 x86_call_code (code
, (guint8
*)mono_thread_force_interruption_checkpoint_noraise
);
322 x86_test_reg_reg (code
, X86_EAX
, X86_EAX
);
324 x86_branch8 (code
, X86_CC_Z
, -1, 1);
328 * We have an exception we want to throw in the caller's frame, so pop
329 * the trampoline frame and throw from the caller.
333 * The exception is in eax.
334 * We are calling the throw trampoline used by OP_THROW, so we have to setup the
335 * stack to look the same.
336 * The stack contains the ret addr, and the trampoline argument, the throw trampoline
337 * expects it to contain the ret addr and the exception. It also needs to be aligned
338 * after the exception is pushed.
341 x86_push_reg (code
, X86_EAX
);
342 /* Push the exception */
343 x86_push_reg (code
, X86_EAX
);
344 //x86_breakpoint (code);
345 /* Push the original return value */
346 x86_push_membase (code
, X86_ESP
, 3 * 4);
348 * EH is initialized after trampolines, so get the address of the variable
349 * which contains throw_exception, and load it from there.
352 /* Not really a jit icall */
353 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_JIT_ICALL_ADDR
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_rethrow_preserve_exception
));
355 x86_mov_reg_imm (code
, X86_ECX
, (gsize
)(guint8
*)mono_get_rethrow_preserve_exception_addr ());
357 x86_mov_reg_membase (code
, X86_ECX
, X86_ECX
, 0, sizeof (target_mgreg_t
));
358 x86_jump_reg (code
, X86_ECX
);
361 mono_x86_patch (br_ex_check
, code
);
363 /* Restore registers */
364 for (i
= X86_EAX
; i
<= X86_EDI
; ++i
) {
365 if (i
== X86_ESP
|| i
== X86_EBP
)
367 if (i
== X86_EAX
&& tramp_type
!= MONO_TRAMPOLINE_AOT_PLT
)
369 x86_mov_reg_membase (code
, i
, X86_EBP
, regarray_offset
+ (i
* 4), 4);
374 cfa_offset
-= sizeof (target_mgreg_t
);
375 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, X86_ESP
, cfa_offset
);
376 mono_add_unwind_op_same_value (unwind_ops
, code
, buf
, X86_EBP
);
378 if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type
)) {
379 /* Load the value returned by the trampoline */
380 x86_mov_reg_membase (code
, X86_EAX
, X86_ESP
, 0, 4);
381 /* The trampoline returns normally, pop the trampoline argument */
382 x86_alu_reg_imm (code
, X86_ADD
, X86_ESP
, 4);
383 cfa_offset
-= sizeof (target_mgreg_t
);
384 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
390 g_assertf ((code
- buf
) <= buf_len
, "%d %d", (int)(code
- buf
), buf_len
);
391 MONO_PROFILER_RAISE (jit_code_buffer
, (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
));
393 tramp_name
= mono_get_generic_trampoline_name (tramp_type
);
394 *info
= mono_tramp_info_create (tramp_name
, buf
, code
- buf
, ji
, unwind_ops
);
399 #define TRAMPOLINE_SIZE 10
402 mono_arch_create_specific_trampoline (gpointer arg1
, MonoTrampolineType tramp_type
, MonoMemoryManager
*mem_manager
, guint32
*code_len
)
404 guint8
*code
, *buf
, *tramp
;
406 tramp
= mono_get_trampoline_code (tramp_type
);
408 const int size
= TRAMPOLINE_SIZE
;
410 code
= buf
= mono_mem_manager_code_reserve_align (mem_manager
, size
, 4);
412 x86_push_imm (buf
, (gsize
)arg1
);
413 x86_jump_code (buf
, tramp
);
414 g_assertf ((code
- buf
) <= size
, "%d %d", (int)(code
- buf
), size
);
416 mono_arch_flush_icache (code
, buf
- code
);
417 MONO_PROFILER_RAISE (jit_code_buffer
, (code
, buf
- code
, MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE
, mono_get_generic_trampoline_simple_name (tramp_type
)));
420 *code_len
= buf
- code
;
426 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot
, MonoTrampInfo
**info
, gboolean aot
)
430 guint8
**rgctx_null_jumps
;
435 MonoJumpInfo
*ji
= NULL
;
436 GSList
*unwind_ops
= NULL
;
438 unwind_ops
= mono_arch_get_cie_program ();
440 mrgctx
= MONO_RGCTX_SLOT_IS_MRGCTX (slot
);
441 index
= MONO_RGCTX_SLOT_INDEX (slot
);
443 index
+= MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT
/ sizeof (target_mgreg_t
);
444 for (depth
= 0; ; ++depth
) {
445 int size
= mono_class_rgctx_get_array_size (depth
, mrgctx
);
447 if (index
< size
- 1)
452 tramp_size
= (aot
? 64 : 36) + 6 * depth
;
454 code
= buf
= mono_global_codeman_reserve (tramp_size
);
456 rgctx_null_jumps
= g_malloc (sizeof (guint8
*) * (depth
+ 2));
458 /* load vtable/mrgctx ptr */
459 x86_mov_reg_membase (code
, X86_EAX
, X86_ESP
, 4, 4);
461 /* load rgctx ptr from vtable */
462 x86_mov_reg_membase (code
, X86_EAX
, X86_EAX
, MONO_STRUCT_OFFSET (MonoVTable
, runtime_generic_context
), 4);
463 /* is the rgctx ptr null? */
464 x86_test_reg_reg (code
, X86_EAX
, X86_EAX
);
465 /* if yes, jump to actual trampoline */
466 rgctx_null_jumps
[0] = code
;
467 x86_branch8 (code
, X86_CC_Z
, -1, 1);
470 for (i
= 0; i
< depth
; ++i
) {
471 /* load ptr to next array */
472 if (mrgctx
&& i
== 0)
473 x86_mov_reg_membase (code
, X86_EAX
, X86_EAX
, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT
, 4);
475 x86_mov_reg_membase (code
, X86_EAX
, X86_EAX
, 0, 4);
476 /* is the ptr null? */
477 x86_test_reg_reg (code
, X86_EAX
, X86_EAX
);
478 /* if yes, jump to actual trampoline */
479 rgctx_null_jumps
[i
+ 1] = code
;
480 x86_branch8 (code
, X86_CC_Z
, -1, 1);
484 x86_mov_reg_membase (code
, X86_EAX
, X86_EAX
, sizeof (target_mgreg_t
) * (index
+ 1), 4);
485 /* is the slot null? */
486 x86_test_reg_reg (code
, X86_EAX
, X86_EAX
);
487 /* if yes, jump to actual trampoline */
488 rgctx_null_jumps
[depth
+ 1] = code
;
489 x86_branch8 (code
, X86_CC_Z
, -1, 1);
490 /* otherwise return */
493 for (i
= mrgctx
? 1 : 0; i
<= depth
+ 1; ++i
)
494 x86_patch (rgctx_null_jumps
[i
], code
);
496 g_free (rgctx_null_jumps
);
498 x86_mov_reg_membase (code
, MONO_ARCH_VTABLE_REG
, X86_ESP
, 4, 4);
501 code
= mono_arch_emit_load_aotconst (buf
, code
, &ji
, MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR
, GUINT_TO_POINTER (slot
));
502 x86_jump_reg (code
, X86_EAX
);
504 MonoMemoryManager
*mem_manager
= mono_domain_ambient_memory_manager (mono_get_root_domain ());
505 tramp
= (guint8
*)mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot
), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH
, mem_manager
, NULL
);
507 /* jump to the actual trampoline */
508 x86_jump_code (code
, tramp
);
511 mono_arch_flush_icache (buf
, code
- buf
);
512 MONO_PROFILER_RAISE (jit_code_buffer
, (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE
, NULL
));
514 g_assertf (code
- buf
<= tramp_size
, "%d %d", (int)(code
- buf
), tramp_size
);
516 char *name
= mono_get_rgctx_fetch_trampoline_name (slot
);
517 *info
= mono_tramp_info_create (name
, buf
, code
- buf
, ji
, unwind_ops
);
524 * mono_arch_create_general_rgctx_lazy_fetch_trampoline:
526 * This is a general variant of the rgctx fetch trampolines. It receives a pointer to gpointer[2] in the rgctx reg. The first entry contains the slot, the second
527 * the trampoline to call if the slot is not filled.
530 mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo
**info
, gboolean aot
)
534 MonoJumpInfo
*ji
= NULL
;
535 GSList
*unwind_ops
= NULL
;
539 unwind_ops
= mono_arch_get_cie_program ();
543 code
= buf
= mono_global_codeman_reserve (tramp_size
);
545 // FIXME: Currently, we always go to the slow path.
547 /* Load trampoline addr */
548 x86_mov_reg_membase (code
, X86_EAX
, MONO_ARCH_RGCTX_REG
, 4, 4);
549 /* Load mrgctx/vtable */
550 x86_mov_reg_membase (code
, MONO_ARCH_VTABLE_REG
, X86_ESP
, 4, 4);
552 x86_jump_reg (code
, X86_EAX
);
554 mono_arch_flush_icache (buf
, code
- buf
);
555 MONO_PROFILER_RAISE (jit_code_buffer
, (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE
, NULL
));
557 g_assertf (code
- buf
<= tramp_size
, "%d %d", (int)(code
- buf
), tramp_size
);
559 *info
= mono_tramp_info_create ("rgctx_fetch_trampoline_general", buf
, code
- buf
, ji
, unwind_ops
);
565 mono_arch_invalidate_method (MonoJitInfo
*ji
, void *func
, gpointer func_arg
)
567 /* FIXME: This is not thread safe */
568 guint8
*code
= (guint8
*)ji
->code_start
;
570 x86_push_imm (code
, (gsize
)func_arg
);
571 x86_call_code (code
, (guint8
*)func
);
575 mono_arch_get_call_target (guint8
*code
)
577 if (code
[-5] == 0xe8) {
578 gint32 disp
= *(gint32
*)(code
- 4);
579 guint8
*target
= code
+ disp
;
588 mono_arch_get_plt_info_offset (guint8
*plt_entry
, host_mgreg_t
*regs
, guint8
*code
)
590 return *(guint32
*)(plt_entry
+ 6);
594 * mono_arch_get_gsharedvt_arg_trampoline:
596 * Return a trampoline which passes ARG to the gsharedvt in/out trampoline ADDR.
599 mono_arch_get_gsharedvt_arg_trampoline (MonoDomain
*domain
, gpointer arg
, gpointer addr
)
601 guint8
*code
, *start
;
603 MonoMemoryManager
*mem_manager
= mono_domain_ambient_memory_manager (domain
);
605 const int buf_len
= 10;
607 start
= code
= mono_mem_manager_code_reserve (mem_manager
, buf_len
);
609 unwind_ops
= mono_arch_get_cie_program ();
611 x86_mov_reg_imm (code
, X86_EAX
, (gsize
)arg
);
612 x86_jump_code (code
, addr
);
613 g_assertf ((code
- start
) <= buf_len
, "%d %d", (int)(code
- start
), buf_len
);
615 mono_arch_flush_icache (start
, code
- start
);
616 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE
, NULL
));
618 mono_tramp_info_register (mono_tramp_info_create (NULL
, start
, code
- start
, NULL
, unwind_ops
), domain
);
624 * mono_arch_create_sdb_trampoline:
626 * Return a trampoline which captures the current context, passes it to
627 * mini_get_dbg_callbacks ()->single_step_from_context ()/mini_get_dbg_callbacks ()->breakpoint_from_context (),
628 * then restores the (potentially changed) context.
631 mono_arch_create_sdb_trampoline (gboolean single_step
, MonoTrampInfo
**info
, gboolean aot
)
633 int tramp_size
= 256;
634 int framesize
, ctx_offset
, cfa_offset
;
636 GSList
*unwind_ops
= NULL
;
637 MonoJumpInfo
*ji
= NULL
;
639 code
= buf
= mono_global_codeman_reserve (tramp_size
);
644 framesize
+= sizeof (target_mgreg_t
);
646 framesize
= ALIGN_TO (framesize
, 8);
647 ctx_offset
= framesize
;
648 framesize
+= sizeof (MonoContext
);
650 framesize
= ALIGN_TO (framesize
, MONO_ARCH_FRAME_ALIGNMENT
);
654 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, X86_ESP
, 4);
655 // IP saved at CFA - 4
656 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, X86_NREG
, -cfa_offset
);
658 x86_push_reg (code
, X86_EBP
);
659 cfa_offset
+= sizeof (target_mgreg_t
);
660 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, buf
, cfa_offset
);
661 mono_add_unwind_op_offset (unwind_ops
, code
, buf
, X86_EBP
, - cfa_offset
);
663 x86_mov_reg_reg (code
, X86_EBP
, X86_ESP
);
664 mono_add_unwind_op_def_cfa_reg (unwind_ops
, code
, buf
, X86_EBP
);
665 /* The + 8 makes the stack aligned */
666 x86_alu_reg_imm (code
, X86_SUB
, X86_ESP
, framesize
+ 8);
668 /* Initialize a MonoContext structure on the stack */
669 x86_mov_membase_reg (code
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, eax
), X86_EAX
, sizeof (target_mgreg_t
));
670 x86_mov_membase_reg (code
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, ebx
), X86_EBX
, sizeof (target_mgreg_t
));
671 x86_mov_membase_reg (code
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, ecx
), X86_ECX
, sizeof (target_mgreg_t
));
672 x86_mov_membase_reg (code
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, edx
), X86_EDX
, sizeof (target_mgreg_t
));
673 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, 0, sizeof (target_mgreg_t
));
674 x86_mov_membase_reg (code
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, ebp
), X86_EAX
, sizeof (target_mgreg_t
));
675 x86_mov_reg_reg (code
, X86_EAX
, X86_EBP
);
676 x86_alu_reg_imm (code
, X86_ADD
, X86_EAX
, cfa_offset
);
677 x86_mov_membase_reg (code
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, esp
), X86_ESP
, sizeof (target_mgreg_t
));
678 x86_mov_membase_reg (code
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, esi
), X86_ESI
, sizeof (target_mgreg_t
));
679 x86_mov_membase_reg (code
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, edi
), X86_EDI
, sizeof (target_mgreg_t
));
680 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, 4, sizeof (target_mgreg_t
));
681 x86_mov_membase_reg (code
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, eip
), X86_EAX
, sizeof (target_mgreg_t
));
683 /* Call the single step/breakpoint function in sdb */
684 x86_lea_membase (code
, X86_EAX
, X86_ESP
, ctx_offset
);
685 x86_mov_membase_reg (code
, X86_ESP
, 0, X86_EAX
, sizeof (target_mgreg_t
));
688 x86_breakpoint (code
);
691 x86_call_code (code
, mini_get_dbg_callbacks ()->single_step_from_context
);
693 x86_call_code (code
, mini_get_dbg_callbacks ()->breakpoint_from_context
);
696 /* Restore registers from ctx */
697 /* Overwrite the saved ebp */
698 x86_mov_reg_membase (code
, X86_EAX
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, ebp
), sizeof (target_mgreg_t
));
699 x86_mov_membase_reg (code
, X86_EBP
, 0, X86_EAX
, sizeof (target_mgreg_t
));
700 /* Overwrite saved eip */
701 x86_mov_reg_membase (code
, X86_EAX
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, eip
), sizeof (target_mgreg_t
));
702 x86_mov_membase_reg (code
, X86_EBP
, 4, X86_EAX
, sizeof (target_mgreg_t
));
703 x86_mov_reg_membase (code
, X86_EAX
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, eax
), sizeof (target_mgreg_t
));
704 x86_mov_reg_membase (code
, X86_EBX
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, ebx
), sizeof (target_mgreg_t
));
705 x86_mov_reg_membase (code
, X86_ECX
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, ecx
), sizeof (target_mgreg_t
));
706 x86_mov_reg_membase (code
, X86_EDX
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, edx
), sizeof (target_mgreg_t
));
707 x86_mov_reg_membase (code
, X86_ESI
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, esi
), sizeof (target_mgreg_t
));
708 x86_mov_reg_membase (code
, X86_EDI
, X86_ESP
, ctx_offset
+ G_STRUCT_OFFSET (MonoContext
, edi
), sizeof (target_mgreg_t
));
711 cfa_offset
-= sizeof (target_mgreg_t
);
712 mono_add_unwind_op_def_cfa (unwind_ops
, code
, buf
, X86_ESP
, cfa_offset
);
715 mono_arch_flush_icache (code
, code
- buf
);
716 MONO_PROFILER_RAISE (jit_code_buffer
, (buf
, code
- buf
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
));
717 g_assertf (code
- buf
<= tramp_size
, "%d %d", (int)(code
- buf
), tramp_size
);
719 const char *tramp_name
= single_step
? "sdb_single_step_trampoline" : "sdb_breakpoint_trampoline";
720 *info
= mono_tramp_info_create (tramp_name
, buf
, code
- buf
, ji
, unwind_ops
);
726 mono_arch_get_interp_to_native_trampoline (MonoTrampInfo
**info
)
728 #ifndef DISABLE_INTERPRETER
729 guint8
*start
= NULL
, *code
;
730 guint8
*label_start_copy
, *label_exit_copy
;
731 MonoJumpInfo
*ji
= NULL
;
732 GSList
*unwind_ops
= NULL
;
734 int ccontext_offset
, target_offset
;
737 start
= code
= (guint8
*) mono_global_codeman_reserve (buf_len
);
739 x86_push_reg (code
, X86_EBP
);
740 /* args are on the stack, above saved EBP and pushed return EIP */
741 target_offset
= 2 * sizeof (target_mgreg_t
);
742 ccontext_offset
= target_offset
+ sizeof (target_mgreg_t
);
743 x86_mov_reg_reg (code
, X86_EBP
, X86_ESP
);
745 /* Save some used regs and align stack to 16 bytes */
746 x86_push_reg (code
, X86_EDI
);
747 x86_push_reg (code
, X86_ESI
);
749 /* load pointer to CallContext* into ESI */
750 x86_mov_reg_membase (code
, X86_ESI
, X86_EBP
, ccontext_offset
, sizeof (target_mgreg_t
));
752 /* allocate the stack space necessary for the call */
753 x86_mov_reg_membase (code
, X86_ECX
, X86_ESI
, MONO_STRUCT_OFFSET (CallContext
, stack_size
), sizeof (target_mgreg_t
));
754 x86_alu_reg_reg (code
, X86_SUB
, X86_ESP
, X86_ECX
);
756 /* copy stack from the CallContext, ESI = source, EDI = dest, ECX bytes to copy */
757 x86_mov_reg_membase (code
, X86_ESI
, X86_ESI
, MONO_STRUCT_OFFSET (CallContext
, stack
), sizeof (target_mgreg_t
));
758 x86_mov_reg_reg (code
, X86_EDI
, X86_ESP
);
760 label_start_copy
= code
;
761 x86_test_reg_reg (code
, X86_ECX
, X86_ECX
);
762 label_exit_copy
= code
;
763 x86_branch8 (code
, X86_CC_Z
, 0, FALSE
);
764 x86_mov_reg_membase (code
, X86_EDX
, X86_ESI
, 0, sizeof (target_mgreg_t
));
765 x86_mov_membase_reg (code
, X86_EDI
, 0, X86_EDX
, sizeof (target_mgreg_t
));
766 x86_alu_reg_imm (code
, X86_ADD
, X86_EDI
, sizeof (target_mgreg_t
));
767 x86_alu_reg_imm (code
, X86_ADD
, X86_ESI
, sizeof (target_mgreg_t
));
768 x86_alu_reg_imm (code
, X86_SUB
, X86_ECX
, sizeof (target_mgreg_t
));
769 x86_jump_code (code
, label_start_copy
);
770 x86_patch (label_exit_copy
, code
);
772 /* load target addr */
773 x86_mov_reg_membase (code
, X86_EAX
, X86_EBP
, target_offset
, sizeof (target_mgreg_t
));
775 /* call into native function */
776 x86_call_reg (code
, X86_EAX
);
778 /* Save return values into CallContext* */
779 x86_mov_reg_membase (code
, X86_ESI
, X86_EBP
, ccontext_offset
, sizeof (target_mgreg_t
));
780 x86_mov_membase_reg (code
, X86_ESI
, MONO_STRUCT_OFFSET (CallContext
, eax
), X86_EAX
, sizeof (target_mgreg_t
));
781 x86_mov_membase_reg (code
, X86_ESI
, MONO_STRUCT_OFFSET (CallContext
, edx
), X86_EDX
, sizeof (target_mgreg_t
));
784 * We always pop ST0, even if we don't have return value. We seem to get away with
785 * this because fpstack is either empty or has one fp return value on top and the cpu
786 * doesn't trap if we read top of empty stack.
788 x86_fst_membase (code
, X86_ESI
, MONO_STRUCT_OFFSET (CallContext
, fret
), TRUE
, TRUE
);
790 /* restore ESI, EDI which were saved below rbp */
791 x86_mov_reg_membase (code
, X86_EDI
, X86_EBP
, - sizeof (target_mgreg_t
), sizeof (target_mgreg_t
));
792 x86_mov_reg_membase (code
, X86_ESI
, X86_EBP
, - 2 * sizeof (target_mgreg_t
), sizeof (target_mgreg_t
));
793 x86_mov_reg_reg (code
, X86_ESP
, X86_EBP
);
795 x86_pop_reg (code
, X86_EBP
);
799 g_assertf ((code
- start
) <= buf_len
, "%d %d", (int)(code
- start
), buf_len
);
801 mono_arch_flush_icache (start
, code
- start
);
802 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_HELPER
, NULL
));
805 *info
= mono_tramp_info_create ("interp_to_native_trampoline", start
, code
- start
, ji
, unwind_ops
);
809 g_assert_not_reached ();
811 #endif /* DISABLE_INTERPRETER */