[tests] Disable tests that crash on android sdks (#17255)
[mono-project.git] / mono / mini / tramp-arm.c
blob8ee4e4a673d60033e576ca28ea5e8176513e7945
1 /**
2 * \file
3 * JIT trampoline code for ARM
5 * Authors:
6 * Paolo Molaro (lupus@ximian.com)
8 * (C) 2001-2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell Inc
10 * Copyright 2011 Xamarin Inc
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
14 #include <config.h>
15 #include <glib.h>
17 #include <mono/metadata/abi-details.h>
18 #include <mono/metadata/appdomain.h>
19 #include <mono/metadata/marshal.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/profiler-private.h>
22 #include <mono/arch/arm/arm-codegen.h>
23 #include <mono/arch/arm/arm-vfp-codegen.h>
25 #include "mini.h"
26 #include "mini-arm.h"
27 #include "mini-runtime.h"
28 #include "debugger-agent.h"
29 #include "jit-icalls.h"
31 #ifndef DISABLE_INTERPRETER
32 #include "interp/interp.h"
33 #endif
35 void
36 mono_arch_patch_callsite (guint8 *method_start, guint8 *code_ptr, guint8 *addr)
38 guint32 *code = (guint32*)code_ptr;
40 /* This is the 'bl' or the 'mov pc' instruction */
41 --code;
44 * Note that methods are called also with the bl opcode.
46 if ((((*code) >> 25) & 7) == 5) {
47 /*g_print ("direct patching\n");*/
48 arm_patch ((guint8*)code, addr);
49 mono_arch_flush_icache ((guint8*)code, 4);
50 return;
53 if ((((*code) >> 20) & 0xFF) == 0x12) {
54 /*g_print ("patching bx\n");*/
55 arm_patch ((guint8*)code, addr);
56 mono_arch_flush_icache ((guint8*)(code - 2), 4);
57 return;
60 g_assert_not_reached ();
63 void
64 mono_arch_patch_plt_entry (guint8 *code, gpointer *got, host_mgreg_t *regs, guint8 *addr)
66 guint8 *jump_entry;
68 /* Patch the jump table entry used by the plt entry */
69 if (*(guint32*)code == 0xe59fc000) {
70 /* ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0); */
71 guint32 offset = ((guint32*)code)[2];
73 jump_entry = code + offset + 12;
74 } else if (*(guint16*)(code - 4) == 0xf8df) {
75 /*
76 * Thumb PLT entry, begins with ldr.w ip, [pc, #8], code points to entry + 4, see
77 * mono_arm_get_thumb_plt_entry ().
79 guint32 offset;
81 code -= 4;
82 offset = *(guint32*)(code + 12);
83 jump_entry = code + offset + 8;
84 } else {
85 g_assert_not_reached ();
88 *(guint8**)jump_entry = addr;
91 #ifndef DISABLE_JIT
93 #define arm_is_imm12(v) ((int)(v) > -4096 && (int)(v) < 4096)
96 * Return the instruction to jump from code to target, 0 if not
97 * reachable with a single instruction
99 static guint32
100 branch_for_target_reachable (guint8 *branch, guint8 *target)
102 gint diff = target - branch - 8;
103 g_assert ((diff & 3) == 0);
104 if (diff >= 0) {
105 if (diff <= 33554431)
106 return (ARMCOND_AL << ARMCOND_SHIFT) | (ARM_BR_TAG) | (diff >> 2);
107 } else {
108 /* diff between 0 and -33554432 */
109 if (diff >= -33554432)
110 return (ARMCOND_AL << ARMCOND_SHIFT) | (ARM_BR_TAG) | ((diff >> 2) & ~0xff000000);
112 return 0;
115 static guint8*
116 emit_bx (guint8* code, int reg)
118 if (mono_arm_thumb_supported ())
119 ARM_BX (code, reg);
120 else
121 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
122 return code;
125 /* Stack size for trampoline function
127 #define STACK ALIGN_TO (MONO_ABI_SIZEOF (MonoLMF), MONO_ARCH_FRAME_ALIGNMENT)
129 /* Method-specific trampoline code fragment size */
130 #define METHOD_TRAMPOLINE_SIZE 64
132 /* Jump-specific trampoline code fragment size */
133 #define JUMP_TRAMPOLINE_SIZE 64
135 guchar*
136 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
138 const char *tramp_name;
139 guint8 *buf, *code = NULL;
140 guint8 *load_get_lmf_addr = NULL, *load_trampoline = NULL;
141 guint8 *labels [16];
142 gpointer *constants;
143 int i, orig_cfa_offset, cfa_offset, regsave_size, lr_offset;
144 GSList *unwind_ops = NULL;
145 MonoJumpInfo *ji = NULL;
146 int buf_len;
148 /* Now we'll create in 'buf' the ARM trampoline code. This
149 is the trampoline code common to all methods */
151 buf_len = 272;
153 /* Add space for saving/restoring VFP regs. */
154 if (mono_arm_is_hard_float ())
155 buf_len += 8 * 2;
157 code = buf = mono_global_codeman_reserve (buf_len);
160 * At this point lr points to the specific arg and sp points to the saved
161 * regs on the stack (all but PC and SP). The original LR value has been
162 * saved as sp + LR_OFFSET by the push in the specific trampoline
165 /* The size of the area already allocated by the push in the specific trampoline */
166 regsave_size = 14 * sizeof (target_mgreg_t);
167 /* The offset where lr was saved inside the regsave area */
168 lr_offset = 13 * sizeof (target_mgreg_t);
170 // CFA = SP + (num registers pushed) * 4
171 cfa_offset = 14 * sizeof (target_mgreg_t);
172 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, ARMREG_SP, cfa_offset);
173 // PC saved at sp+LR_OFFSET
174 mono_add_unwind_op_offset (unwind_ops, code, buf, ARMREG_LR, -4);
175 /* Callee saved regs */
176 for (i = 0; i < 8; ++i)
177 mono_add_unwind_op_offset (unwind_ops, code, buf, ARMREG_R4 + i, -regsave_size + ((4 + i) * 4));
179 if (aot) {
181 * For page trampolines the data is in r1, so just move it, otherwise use the got slot as below.
182 * The trampoline contains a pc-relative offset to the got slot
183 * preceeding the got slot where the value is stored. The offset can be
184 * found at [lr + 0].
186 /* See if emit_trampolines () in aot-compiler.c for the '2' */
187 if (aot == 2) {
188 ARM_MOV_REG_REG (code, ARMREG_V2, ARMREG_R1);
189 } else {
190 ARM_LDR_IMM (code, ARMREG_V2, ARMREG_LR, 0);
191 ARM_ADD_REG_IMM (code, ARMREG_V2, ARMREG_V2, 4, 0);
192 ARM_LDR_REG_REG (code, ARMREG_V2, ARMREG_V2, ARMREG_LR);
194 } else {
195 ARM_LDR_IMM (code, ARMREG_V2, ARMREG_LR, 0);
197 ARM_LDR_IMM (code, ARMREG_V3, ARMREG_SP, lr_offset);
199 /* we build the MonoLMF structure on the stack - see mini-arm.h
200 * The pointer to the struct is put in r1.
201 * the iregs array is already allocated on the stack by push.
203 code = mono_arm_emit_load_imm (code, ARMREG_R2, STACK - regsave_size);
204 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_R2);
205 cfa_offset += STACK - regsave_size;
206 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
207 /* V1 == lmf */
208 code = mono_arm_emit_load_imm (code, ARMREG_R2, STACK - MONO_ABI_SIZEOF (MonoLMF));
209 ARM_ADD_REG_REG (code, ARMREG_V1, ARMREG_SP, ARMREG_R2);
211 /* ok, now we can continue with the MonoLMF setup, mostly untouched
212 * from emit_prolog in mini-arm.c
213 * This is a synthetized call to mono_get_lmf_addr ()
215 if (aot) {
216 ji = mono_patch_info_list_prepend (ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_get_lmf_addr));
217 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
218 ARM_B (code, 0);
219 *(gpointer*)code = NULL;
220 code += 4;
221 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
222 } else {
223 load_get_lmf_addr = code;
224 code += 4;
226 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
227 code = emit_bx (code, ARMREG_R0);
230 * The stack now looks like:
231 * <saved regs>
232 * v1 -> <rest of LMF>
233 * sp -> <alignment>
236 /* r0 is the result from mono_get_lmf_addr () */
237 ARM_STR_IMM (code, ARMREG_R0, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
238 /* new_lmf->previous_lmf = *lmf_addr */
239 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
240 ARM_STR_IMM (code, ARMREG_R2, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
241 /* *(lmf_addr) = r1 */
242 ARM_STR_IMM (code, ARMREG_V1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
243 /* save method info (it's in v2) */
244 if ((tramp_type == MONO_TRAMPOLINE_JIT) || (tramp_type == MONO_TRAMPOLINE_JUMP))
245 ARM_STR_IMM (code, ARMREG_V2, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, method));
246 else {
247 ARM_MOV_REG_IMM8 (code, ARMREG_R2, 0);
248 ARM_STR_IMM (code, ARMREG_R2, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, method));
250 /* save caller SP */
251 code = mono_arm_emit_load_imm (code, ARMREG_R2, cfa_offset);
252 ARM_ADD_REG_REG (code, ARMREG_R2, ARMREG_SP, ARMREG_R2);
253 ARM_STR_IMM (code, ARMREG_R2, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, sp));
254 /* save caller FP */
255 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_V1, (MONO_STRUCT_OFFSET (MonoLMF, iregs) + ARMREG_FP*4));
256 ARM_STR_IMM (code, ARMREG_R2, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, fp));
257 /* save the IP (caller ip) */
258 if (tramp_type == MONO_TRAMPOLINE_JUMP) {
259 ARM_MOV_REG_IMM8 (code, ARMREG_R2, 0);
260 } else {
261 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_V1, (MONO_STRUCT_OFFSET (MonoLMF, iregs) + 13*4));
263 ARM_STR_IMM (code, ARMREG_R2, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, ip));
265 /* Save VFP registers. */
266 if (mono_arm_is_hard_float ()) {
268 * Strictly speaking, we don't have to save d0-d7 in the LMF, but
269 * it's easier than attempting to store them on the stack since
270 * this trampoline code is pretty messy.
272 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, fregs));
273 ARM_FSTMD (code, ARM_VFP_D0, 8, ARMREG_R0);
277 * Now we're ready to call xxx_trampoline ().
279 /* Arg 1: the saved registers */
280 ARM_ADD_REG_IMM (code, ARMREG_R0, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, iregs), 0);
282 /* Arg 2: code (next address to the instruction that called us) */
283 if (tramp_type == MONO_TRAMPOLINE_JUMP) {
284 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0);
285 } else {
286 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_V3);
289 /* Arg 3: the specific argument, stored in v2
291 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_V2);
293 if (aot) {
294 ji = mono_patch_info_list_prepend (ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, GINT_TO_POINTER (mono_trampoline_type_to_jit_icall_id (tramp_type)));
295 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
296 ARM_B (code, 0);
297 *(gpointer*)code = NULL;
298 code += 4;
299 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_PC, ARMREG_IP);
300 } else {
301 load_trampoline = code;
302 code += 4;
305 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
306 code = emit_bx (code, ARMREG_IP);
308 /* OK, code address is now on r0. Move it to the place on the stack
309 * where IP was saved (it is now no more useful to us and it can be
310 * clobbered). This way we can just restore all the regs in one inst
311 * and branch to IP.
313 ARM_STR_IMM (code, ARMREG_R0, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, iregs) + (ARMREG_R12 * sizeof (target_mgreg_t)));
316 * Now we restore the MonoLMF (see emit_epilogue in mini-arm.c)
317 * and the rest of the registers, so the method called will see
318 * the same state as before we executed.
320 /* ip = previous_lmf */
321 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
322 /* lr = lmf_addr */
323 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
324 /* *(lmf_addr) = previous_lmf */
325 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
327 /* Check for thread interruption */
328 /* This is not perf critical code so no need to check the interrupt flag */
329 if (aot) {
330 code = mono_arm_emit_aotconst (&ji, code, buf, ARMREG_IP, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_thread_force_interruption_checkpoint_noraise));
331 } else {
332 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
333 ARM_B (code, 0);
334 *(gpointer*)code = (gpointer)mono_thread_force_interruption_checkpoint_noraise;
335 code += 4;
337 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
338 code = emit_bx (code, ARMREG_IP);
340 /* Check whenever an exception needs to be thrown */
341 ARM_CMP_REG_IMM (code, ARMREG_R0, 0, 0);
342 labels [0] = code;
343 ARM_B_COND (code, ARMCOND_NE, 0);
345 orig_cfa_offset = cfa_offset;
347 /* Normal case */
349 /* Restore VFP registers. */
350 if (mono_arm_is_hard_float ()) {
351 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, fregs));
352 ARM_FLDMD (code, ARM_VFP_D0, 8, ARMREG_R0);
355 /* Non-standard function epilogue. Instead of doing a proper
356 * return, we just jump to the compiled code.
358 /* Restore the registers and jump to the code:
359 * Note that IP has been conveniently set to the method addr.
361 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, STACK - regsave_size);
362 cfa_offset -= STACK - regsave_size;
363 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
364 ARM_POP_NWB (code, 0x5fff);
365 mono_add_unwind_op_same_value (unwind_ops, code, buf, ARMREG_LR);
366 if (tramp_type == MONO_TRAMPOLINE_RGCTX_LAZY_FETCH)
367 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_IP);
368 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, regsave_size);
369 cfa_offset -= regsave_size;
370 g_assert (cfa_offset == 0);
371 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
372 if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type))
373 code = emit_bx (code, ARMREG_LR);
374 else
375 code = emit_bx (code, ARMREG_IP);
377 if (!aot) {
378 constants = (gpointer*)code;
379 constants [0] = (gpointer)mono_get_lmf_addr;
380 constants [1] = (gpointer)mono_get_trampoline_func (tramp_type);
382 /* backpatch by emitting the missing instructions skipped above */
383 ARM_LDR_IMM (load_get_lmf_addr, ARMREG_R0, ARMREG_PC, (code - load_get_lmf_addr - 8));
384 ARM_LDR_IMM (load_trampoline, ARMREG_IP, ARMREG_PC, (code + 4 - load_trampoline - 8));
385 code += 8;
388 /* Exception case */
389 arm_patch (labels [0], code);
391 cfa_offset = orig_cfa_offset;
394 * We have an exception we want to throw in the caller's frame, so pop
395 * the trampoline frame and throw from the caller.
397 /* Store the exception in place of IP */
398 ARM_STR_IMM (code, ARMREG_R0, ARMREG_V1, MONO_STRUCT_OFFSET (MonoLMF, iregs) + (ARMREG_R12 * sizeof (target_mgreg_t)));
400 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, STACK - regsave_size);
401 cfa_offset -= STACK - regsave_size;
402 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
403 /* Restore all regs */
404 ARM_POP_NWB (code, 0x5fff);
405 mono_add_unwind_op_same_value (unwind_ops, code, buf, ARMREG_LR);
406 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, regsave_size);
407 cfa_offset -= regsave_size;
408 g_assert (cfa_offset == 0);
409 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
410 /* We are in the parent frame, the exception is in ip */
411 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_IP);
413 * EH is initialized after trampolines, so get the address of the variable
414 * which contains throw_exception, and load it from there.
416 if (aot) {
417 /* Not really a jit icall */
418 code = mono_arm_emit_aotconst (&ji, code, buf, ARMREG_IP, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_rethrow_preserve_exception));
419 } else {
420 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
421 ARM_B (code, 0);
422 *(gpointer*)code = mono_get_rethrow_preserve_exception_addr ();
423 code += 4;
425 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, 0);
426 /* Branch to the throw trampoline */
427 /* lr contains the return address, the trampoline will use it as the throw site */
428 code = emit_bx (code, ARMREG_IP);
430 /* Flush instruction cache, since we've generated code */
431 mono_arch_flush_icache (buf, code - buf);
432 MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL));
434 /* Sanity check */
435 g_assert ((code - buf) <= buf_len);
437 g_assert (info);
438 tramp_name = mono_get_generic_trampoline_name (tramp_type);
439 *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
441 return buf;
444 #define SPEC_TRAMP_SIZE 24
446 gpointer
447 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
449 guint8 *code, *buf, *tramp;
450 guint32 *constants;
451 guint32 short_branch = FALSE;
452 guint32 size = SPEC_TRAMP_SIZE;
454 tramp = mono_get_trampoline_code (tramp_type);
456 if (domain) {
457 mono_domain_lock (domain);
458 code = buf = mono_domain_code_reserve_align (domain, size, 4);
459 if ((short_branch = branch_for_target_reachable (code + 4, tramp))) {
460 size = 12;
461 mono_domain_code_commit (domain, code, SPEC_TRAMP_SIZE, size);
463 mono_domain_unlock (domain);
464 } else {
465 code = buf = mono_global_codeman_reserve (size);
466 short_branch = FALSE;
469 /* we could reduce this to 12 bytes if tramp is within reach:
470 * ARM_PUSH ()
471 * ARM_BL ()
472 * method-literal
473 * The called code can access method using the lr register
474 * A 20 byte sequence could be:
475 * ARM_PUSH ()
476 * ARM_MOV_REG_REG (lr, pc)
477 * ARM_LDR_IMM (pc, pc, 0)
478 * method-literal
479 * tramp-literal
481 /* We save all the registers, except PC and SP */
482 ARM_PUSH (code, 0x5fff);
483 if (short_branch) {
484 constants = (guint32*)code;
485 constants [0] = short_branch | (1 << 24);
486 constants [1] = GPOINTER_TO_UINT (arg1);
487 code += 8;
488 } else {
489 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 8); /* temp reg */
490 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
491 code = emit_bx (code, ARMREG_R1);
493 constants = (guint32*)code;
494 constants [0] = GPOINTER_TO_UINT (arg1);
495 constants [1] = GPOINTER_TO_UINT (tramp);
496 code += 8;
499 /* Flush instruction cache, since we've generated code */
500 mono_arch_flush_icache (buf, code - buf);
501 MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE, mono_get_generic_trampoline_simple_name (tramp_type)));
503 g_assert ((code - buf) <= size);
505 if (code_len)
506 *code_len = code - buf;
508 return buf;
512 * mono_arch_get_unbox_trampoline:
513 * @m: method pointer
514 * @addr: pointer to native code for @m
516 * when value type methods are called through the vtable we need to unbox the
517 * this argument. This method returns a pointer to a trampoline which does
518 * unboxing before calling the method
520 gpointer
521 mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
523 guint8 *code, *start;
524 MonoDomain *domain = mono_domain_get ();
525 GSList *unwind_ops;
526 guint32 size = 16;
528 start = code = mono_domain_code_reserve (domain, size);
530 unwind_ops = mono_arch_get_cie_program ();
532 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 4);
533 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, MONO_ABI_SIZEOF (MonoObject));
534 code = emit_bx (code, ARMREG_IP);
535 *(guint32*)code = (guint32)(gsize)addr;
536 code += 4;
537 mono_arch_flush_icache (start, code - start);
538 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE, m));
539 g_assert ((code - start) <= size);
540 /*g_print ("unbox trampoline at %d for %s:%s\n", this_pos, m->klass->name, m->name);
541 g_print ("unbox code is at %p for method at %p\n", start, addr);*/
543 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
545 return start;
548 gpointer
549 mono_arch_get_static_rgctx_trampoline (gpointer arg, gpointer addr)
551 guint8 *code, *start;
552 GSList *unwind_ops;
553 int buf_len = 16;
554 MonoDomain *domain = mono_domain_get ();
556 start = code = mono_domain_code_reserve (domain, buf_len);
558 unwind_ops = mono_arch_get_cie_program ();
560 ARM_LDR_IMM (code, MONO_ARCH_RGCTX_REG, ARMREG_PC, 0);
561 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_PC, 0);
562 *(guint32*)code = (guint32)(gsize)arg;
563 code += 4;
564 *(guint32*)code = (guint32)(gsize)addr;
565 code += 4;
567 g_assert ((code - start) <= buf_len);
569 mono_arch_flush_icache (start, code - start);
570 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
572 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
574 return start;
577 /* Same as static rgctx trampoline, but clobbering ARMREG_IP, which is scratch */
578 gpointer
579 mono_arch_get_ftnptr_arg_trampoline (gpointer arg, gpointer addr)
581 guint8 *code, *start;
582 GSList *unwind_ops;
583 int buf_len = 16;
584 MonoDomain *domain = mono_domain_get ();
586 start = code = mono_domain_code_reserve (domain, buf_len);
588 unwind_ops = mono_arch_get_cie_program ();
590 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
591 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_PC, 0);
592 *(guint32*)code = (guint32)(gsize)arg;
593 code += 4;
594 *(guint32*)code = (guint32)(gsize)addr;
595 code += 4;
597 g_assert ((code - start) <= buf_len);
599 mono_arch_flush_icache (start, code - start);
600 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
602 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
604 return start;
607 gpointer
608 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
610 guint8 *tramp;
611 guint8 *code, *buf;
612 int tramp_size;
613 guint32 code_len;
614 guint8 **rgctx_null_jumps;
615 int depth, index;
616 int i, njumps;
617 gboolean mrgctx;
618 MonoJumpInfo *ji = NULL;
619 GSList *unwind_ops = NULL;
621 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
622 index = MONO_RGCTX_SLOT_INDEX (slot);
623 if (mrgctx)
624 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / TARGET_SIZEOF_VOID_P;
625 for (depth = 0; ; ++depth) {
626 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
628 if (index < size - 1)
629 break;
630 index -= size - 1;
633 tramp_size = 64 + 16 * depth;
635 code = buf = mono_global_codeman_reserve (tramp_size);
637 unwind_ops = mono_arch_get_cie_program ();
639 rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
640 njumps = 0;
642 /* The vtable/mrgctx is in R0 */
643 g_assert (MONO_ARCH_VTABLE_REG == ARMREG_R0);
645 if (mrgctx) {
646 /* get mrgctx ptr */
647 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
648 } else {
649 /* load rgctx ptr from vtable */
650 g_assert (arm_is_imm12 (MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context)));
651 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context));
652 /* is the rgctx ptr null? */
653 ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
654 /* if yes, jump to actual trampoline */
655 rgctx_null_jumps [njumps ++] = code;
656 ARM_B_COND (code, ARMCOND_EQ, 0);
659 for (i = 0; i < depth; ++i) {
660 /* load ptr to next array */
661 if (mrgctx && i == 0) {
662 g_assert (arm_is_imm12 (MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT));
663 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_R1, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT);
664 } else {
665 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_R1, 0);
667 /* is the ptr null? */
668 ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
669 /* if yes, jump to actual trampoline */
670 rgctx_null_jumps [njumps ++] = code;
671 ARM_B_COND (code, ARMCOND_EQ, 0);
674 /* fetch slot */
675 code = mono_arm_emit_load_imm (code, ARMREG_R2, TARGET_SIZEOF_VOID_P * (index + 1));
676 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_R1, ARMREG_R2);
677 /* is the slot null? */
678 ARM_CMP_REG_IMM (code, ARMREG_R1, 0, 0);
679 /* if yes, jump to actual trampoline */
680 rgctx_null_jumps [njumps ++] = code;
681 ARM_B_COND (code, ARMCOND_EQ, 0);
682 /* otherwise return, result is in R1 */
683 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_R1);
684 code = emit_bx (code, ARMREG_LR);
686 g_assert (njumps <= depth + 2);
687 for (i = 0; i < njumps; ++i)
688 arm_patch (rgctx_null_jumps [i], code);
690 g_free (rgctx_null_jumps);
692 /* Slowpath */
694 /* The vtable/mrgctx is still in R0 */
696 if (aot) {
697 ji = mono_patch_info_list_prepend (ji, code - buf, MONO_PATCH_INFO_SPECIFIC_TRAMPOLINE_LAZY_FETCH_ADDR, GUINT_TO_POINTER (slot));
698 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
699 ARM_B (code, 0);
700 *(gpointer*)code = NULL;
701 code += 4;
702 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_R1);
703 } else {
704 tramp = (guint8*)mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), &code_len);
706 /* Jump to the actual trampoline */
707 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0); /* temp reg */
708 code = emit_bx (code, ARMREG_R1);
709 *(gpointer*)code = tramp;
710 code += 4;
713 mono_arch_flush_icache (buf, code - buf);
714 MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
716 g_assert (code - buf <= tramp_size);
718 char *name = mono_get_rgctx_fetch_trampoline_name (slot);
719 *info = mono_tramp_info_create (name, buf, code - buf, ji, unwind_ops);
720 g_free (name);
722 return buf;
725 gpointer
726 mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo **info, gboolean aot)
728 guint8 *code, *buf;
729 int tramp_size;
730 MonoJumpInfo *ji = NULL;
731 GSList *unwind_ops = NULL;
733 g_assert (aot);
735 tramp_size = 32;
737 code = buf = mono_global_codeman_reserve (tramp_size);
739 unwind_ops = mono_arch_get_cie_program ();
741 // FIXME: Currently, we always go to the slow path.
742 /* Load trampoline addr */
743 ARM_LDR_IMM (code, ARMREG_R1, MONO_ARCH_RGCTX_REG, 4);
744 /* The vtable/mrgctx is in R0 */
745 g_assert (MONO_ARCH_VTABLE_REG == ARMREG_R0);
746 code = emit_bx (code, ARMREG_R1);
748 mono_arch_flush_icache (buf, code - buf);
749 MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
751 g_assert (code - buf <= tramp_size);
753 *info = mono_tramp_info_create ("rgctx_fetch_trampoline_general", buf, code - buf, ji, unwind_ops);
755 return buf;
758 guint8*
759 mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot)
761 guint8 *buf, *code;
762 GSList *unwind_ops = NULL;
763 MonoJumpInfo *ji = NULL;
764 int frame_size;
766 buf = code = mono_global_codeman_reserve (96);
769 * Construct the MonoContext structure on the stack.
772 frame_size = MONO_ABI_SIZEOF (MonoContext);
773 frame_size = ALIGN_TO (frame_size, MONO_ARCH_FRAME_ALIGNMENT);
774 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, frame_size);
776 /* save ip, lr and pc into their correspodings ctx.regs slots. */
777 ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + sizeof (target_mgreg_t) * ARMREG_IP);
778 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
779 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
781 /* save r0..r10 and fp */
782 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs));
783 ARM_STM (code, ARMREG_IP, 0x0fff);
785 /* now we can update fp. */
786 ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
788 /* make ctx.esp hold the actual value of sp at the beginning of this method. */
789 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, frame_size);
790 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
791 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
793 /* make ctx.eip hold the address of the call. */
794 //ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
795 ARM_STR_IMM (code, ARMREG_LR, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, pc));
797 /* r0 now points to the MonoContext */
798 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
800 /* call */
801 if (aot) {
802 if (single_step)
803 ji = mono_patch_info_list_prepend (ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_debugger_agent_single_step_from_context));
804 else
805 ji = mono_patch_info_list_prepend (ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_debugger_agent_breakpoint_from_context));
806 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
807 ARM_B (code, 0);
808 *(gpointer*)code = NULL;
809 code += 4;
810 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_PC, ARMREG_IP);
811 ARM_BLX_REG (code, ARMREG_IP);
812 } else {
813 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
814 ARM_B (code, 0);
815 if (single_step)
816 *(gpointer*)code = (gpointer)mini_get_dbg_callbacks ()->single_step_from_context;
817 else
818 *(gpointer*)code = (gpointer)mini_get_dbg_callbacks ()->breakpoint_from_context;
819 code += 4;
820 ARM_BLX_REG (code, ARMREG_IP);
823 /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
824 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, pc));
825 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
826 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
828 /* make ip point to the regs array, then restore everything, including pc. */
829 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs));
830 ARM_LDM (code, ARMREG_IP, 0xffff);
832 mono_arch_flush_icache (buf, code - buf);
833 MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL));
835 const char *tramp_name = single_step ? "sdb_single_step_trampoline" : "sdb_breakpoint_trampoline";
836 *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
838 return buf;
842 * mono_arch_get_interp_to_native_trampoline:
844 * See tramp-amd64.c for documentation.
846 gpointer
847 mono_arch_get_interp_to_native_trampoline (MonoTrampInfo **info)
849 #ifndef DISABLE_INTERPRETER
850 guint8 *start = NULL, *code;
851 guint8 *label_start_copy, *label_exit_copy;
852 MonoJumpInfo *ji = NULL;
853 GSList *unwind_ops = NULL;
854 int buf_len, i, off_methodargs, off_targetaddr;
855 const int fp_reg = ARMREG_R7;
856 int framesize;
858 buf_len = 512 + 1024;
859 start = code = (guint8 *) mono_global_codeman_reserve (buf_len);
862 * iOS ABI
864 * FIXME We save rgctx reg here so we don't regress tests. It should
865 * not be clobbered by native->interp transition.
867 ARM_PUSH (code, (1 << MONO_ARCH_RGCTX_REG) | (1 << fp_reg) | (1 << ARMREG_LR));
868 ARM_MOV_REG_REG (code, fp_reg, ARMREG_SP);
870 /* allocate space for saving the target addr and the call context and align stack */
871 framesize = sizeof (target_mgreg_t) + ALIGN_TO (2 * sizeof (target_mgreg_t), MONO_ARCH_FRAME_ALIGNMENT);
872 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, framesize);
874 /* save CallContext* onto stack */
875 off_methodargs = -4;
876 ARM_STR_IMM (code, ARMREG_R1, fp_reg, off_methodargs);
878 /* save target address onto stack */
879 off_targetaddr = -8;
880 ARM_STR_IMM (code, ARMREG_R0, fp_reg, off_targetaddr);
882 /* allocate the stack space necessary for the call */
883 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R1, MONO_STRUCT_OFFSET (CallContext, stack_size));
884 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_R3);
886 /* copy stack from the CallContext, R0 = dest, R1 = source */
887 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_SP);
888 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_R1, MONO_STRUCT_OFFSET (CallContext, stack));
890 label_start_copy = code;
892 ARM_CMP_REG_IMM (code, ARMREG_R3, 0, 0);
893 label_exit_copy = code;
894 ARM_B_COND (code, ARMCOND_EQ, 0);
895 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R1, 0);
896 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R0, 0);
897 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, sizeof (target_mgreg_t));
898 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, sizeof (target_mgreg_t));
899 ARM_SUB_REG_IMM8 (code, ARMREG_R3, ARMREG_R3, sizeof (target_mgreg_t));
900 ARM_B (code, 0);
901 arm_patch (code - 4, label_start_copy);
902 arm_patch (label_exit_copy, code);
904 ARM_LDR_IMM (code, ARMREG_IP, fp_reg, off_methodargs);
905 /* set all general purpose registers from CallContext */
906 for (i = 0; i < PARAM_REGS; i++)
907 ARM_LDR_IMM (code, i, ARMREG_IP, MONO_STRUCT_OFFSET (CallContext, gregs) + i * sizeof (target_mgreg_t));
909 /* set all floating registers from CallContext */
910 for (i = 0; i < FP_PARAM_REGS; i++)
911 ARM_FLDD (code, i * 2, ARMREG_IP, MONO_STRUCT_OFFSET (CallContext, fregs) + i * sizeof (double));
913 /* load target addr */
914 ARM_LDR_IMM (code, ARMREG_IP, fp_reg, off_targetaddr);
916 /* call into native function */
917 ARM_BLX_REG (code, ARMREG_IP);
919 /* load CallContext*/
920 ARM_LDR_IMM (code, ARMREG_IP, fp_reg, off_methodargs);
922 /* set all general purpose registers to CallContext */
923 for (i = 0; i < PARAM_REGS; i++)
924 ARM_STR_IMM (code, i, ARMREG_IP, MONO_STRUCT_OFFSET (CallContext, gregs) + i * sizeof (target_mgreg_t));
926 /* set all floating registers to CallContext */
927 for (i = 0; i < FP_PARAM_REGS; i++)
928 ARM_FSTD (code, i * 2, ARMREG_IP, MONO_STRUCT_OFFSET (CallContext, fregs) + i * sizeof (double));
930 ARM_MOV_REG_REG (code, ARMREG_SP, fp_reg);
931 ARM_POP (code, (1 << MONO_ARCH_RGCTX_REG) | (1 << fp_reg) | (1 << ARMREG_PC));
933 g_assert (code - start < buf_len);
935 mono_arch_flush_icache (start, code - start);
936 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_HELPER, NULL));
938 if (info)
939 *info = mono_tramp_info_create ("interp_to_native_trampoline", start, code - start, ji, unwind_ops);
941 return start;
942 #else
943 g_assert_not_reached ();
944 return NULL;
945 #endif /* DISABLE_INTERPRETER */
948 gpointer
949 mono_arch_get_native_to_interp_trampoline (MonoTrampInfo **info)
951 #ifndef DISABLE_INTERPRETER
952 guint8 *start = NULL, *code;
953 MonoJumpInfo *ji = NULL;
954 GSList *unwind_ops = NULL;
955 int buf_len, i;
956 const int fp_reg = ARMREG_R7;
957 int framesize;
959 buf_len = 512;
960 start = code = (guint8 *) mono_global_codeman_reserve (buf_len);
962 unwind_ops = mono_arch_get_cie_program ();
964 /* iOS ABI */
965 ARM_PUSH (code, (1 << fp_reg) | (1 << ARMREG_LR));
966 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, start, 2 * sizeof (target_mgreg_t));
967 mono_add_unwind_op_offset (unwind_ops, code, start, ARMREG_LR, -4);
968 mono_add_unwind_op_offset (unwind_ops, code, start, fp_reg, -8);
970 ARM_MOV_REG_REG (code, fp_reg, ARMREG_SP);
971 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, start, fp_reg);
973 /* allocate the CallContext on the stack */
974 framesize = ALIGN_TO (MONO_ABI_SIZEOF (CallContext), MONO_ARCH_FRAME_ALIGNMENT);
975 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, framesize);
977 /* save all general purpose registers into the CallContext */
978 for (i = 0; i < PARAM_REGS; i++)
979 ARM_STR_IMM (code, i, ARMREG_SP, MONO_STRUCT_OFFSET (CallContext, gregs) + i * sizeof (target_mgreg_t));
981 /* save all floating registers into the CallContext */
982 for (i = 0; i < FP_PARAM_REGS; i++)
983 ARM_FSTD (code, i * 2, ARMREG_SP, MONO_STRUCT_OFFSET (CallContext, fregs) + i * sizeof (double));
985 /* set the stack pointer to the value at call site */
986 ARM_ADD_REG_IMM8 (code, ARMREG_R0, fp_reg, 2 * sizeof (target_mgreg_t));
987 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, MONO_STRUCT_OFFSET (CallContext, stack));
989 /* call interp_entry with the ccontext and rmethod as arguments */
990 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_SP);
991 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_IP, MONO_STRUCT_OFFSET (MonoFtnDesc, arg));
992 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, MONO_STRUCT_OFFSET (MonoFtnDesc, addr));
993 ARM_BLX_REG (code, ARMREG_IP);
995 /* load the return values from the context */
996 for (i = 0; i < PARAM_REGS; i++)
997 ARM_LDR_IMM (code, i, ARMREG_SP, MONO_STRUCT_OFFSET (CallContext, gregs) + i * sizeof (target_mgreg_t));
999 for (i = 0; i < FP_PARAM_REGS; i++)
1000 ARM_FLDD (code, i * 2, ARMREG_SP, MONO_STRUCT_OFFSET (CallContext, fregs) + i * sizeof (double));
1002 /* reset stack and return */
1003 ARM_MOV_REG_REG (code, ARMREG_SP, fp_reg);
1004 ARM_POP (code, (1 << fp_reg) | (1 << ARMREG_PC));
1006 g_assert (code - start < buf_len);
1008 mono_arch_flush_icache (start, code - start);
1009 MONO_PROFILER_RAISE (jit_code_buffer, (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL));
1011 if (info)
1012 *info = mono_tramp_info_create ("native_to_interp_trampoline", start, code - start, ji, unwind_ops);
1014 return start;
1015 #else
1016 g_assert_not_reached ();
1017 return NULL;
1018 #endif /* DISABLE_INTERPRETER */
1021 #else
1023 guchar*
1024 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
1026 g_assert_not_reached ();
1027 return NULL;
1030 gpointer
1031 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
1033 g_assert_not_reached ();
1034 return NULL;
1037 gpointer
1038 mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
1040 g_assert_not_reached ();
1041 return NULL;
1044 gpointer
1045 mono_arch_get_static_rgctx_trampoline (gpointer arg, gpointer addr)
1047 g_assert_not_reached ();
1048 return NULL;
1051 gpointer
1052 mono_arch_get_ftnptr_arg_trampoline (gpointer arg, gpointer addr)
1054 g_assert_not_reached ();
1055 return NULL;
1058 gpointer
1059 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
1061 g_assert_not_reached ();
1062 return NULL;
1065 guint8*
1066 mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot)
1068 g_assert_not_reached ();
1069 return NULL;
1072 gpointer
1073 mono_arch_get_interp_to_native_trampoline (MonoTrampInfo **info)
1075 g_assert_not_reached ();
1076 return NULL;
1079 gpointer
1080 mono_arch_get_native_to_interp_trampoline (MonoTrampInfo **info)
1082 g_assert_not_reached ();
1083 return NULL;
1085 #endif /* DISABLE_JIT */
1087 guint8*
1088 mono_arch_get_call_target (guint8 *code)
1090 guint32 ins = ((guint32*)code) [-1];
1092 /* Should be a 'bl' or a 'b' */
1093 if (((ins >> 25) & 0x7) == 0x5) {
1094 gint32 disp = ((((gint32)ins) & 0xffffff) << 8) >> 8;
1095 guint8 *target = code - 4 + 8 + (disp * 4);
1097 return target;
1098 } else {
1099 return NULL;
1103 guint32
1104 mono_arch_get_plt_info_offset (guint8 *plt_entry, host_mgreg_t *regs, guint8 *code)
1106 /* The offset is stored as the 4th word of the plt entry */
1107 return ((guint32*)plt_entry) [3];
1111 * Return the address of the PLT entry called by the thumb code CODE.
1113 guint8*
1114 mono_arm_get_thumb_plt_entry (guint8 *code)
1116 int s, j1, j2, imm10, imm11, i1, i2, imm32;
1117 guint8 *bl, *base;
1118 guint16 t1, t2;
1119 guint8 *target;
1121 /* code should be right after a BL */
1122 code = (guint8*)((gsize)code & ~1);
1123 base = (guint8*)((gsize)code & ~3);
1124 bl = code - 4;
1125 t1 = ((guint16*)bl) [0];
1126 t2 = ((guint16*)bl) [1];
1128 g_assert ((t1 >> 11) == 0x1e);
1130 s = (t1 >> 10) & 0x1;
1131 imm10 = (t1 >> 0) & 0x3ff;
1132 j1 = (t2 >> 13) & 0x1;
1133 j2 = (t2 >> 11) & 0x1;
1134 imm11 = t2 & 0x7ff;
1136 i1 = (s ^ j1) ? 0 : 1;
1137 i2 = (s ^ j2) ? 0 : 1;
1139 imm32 = (imm11 << 1) | (imm10 << 12) | (i2 << 22) | (i1 << 23);
1140 if (s)
1141 /* Sign extend from 24 bits to 32 bits */
1142 imm32 = ((gint32)imm32 << 8) >> 8;
1144 target = code + imm32;
1146 /* target now points to the thumb plt entry */
1147 /* ldr.w r12, [pc, #8] */
1148 g_assert (((guint16*)target) [0] == 0xf8df);
1149 g_assert (((guint16*)target) [1] == 0xc008);
1152 * The PLT info offset is at offset 16, but mono_arch_get_plt_entry_offset () returns
1153 * the 3rd word, so compensate by returning a different value.
1155 target += 4;
1157 return target;
1160 #ifndef DISABLE_JIT
1163 * mono_arch_get_gsharedvt_arg_trampoline:
1165 * See tramp-x86.c for documentation.
1167 gpointer
1168 mono_arch_get_gsharedvt_arg_trampoline (MonoDomain *domain, gpointer arg, gpointer addr)
1170 guint8 *code, *buf;
1171 int buf_len;
1172 gpointer *constants;
1174 buf_len = 24;
1176 buf = code = mono_domain_code_reserve (domain, buf_len);
1178 /* Similar to the specialized trampoline code */
1179 ARM_PUSH (code, (1 << ARMREG_R0) | (1 << ARMREG_R1) | (1 << ARMREG_R2) | (1 << ARMREG_R3) | (1 << ARMREG_LR));
1180 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 2 * sizeof (target_mgreg_t));
1181 /* arg is passed in LR */
1182 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_PC, 0);
1183 code = emit_bx (code, ARMREG_IP);
1184 constants = (gpointer*)code;
1185 constants [0] = arg;
1186 constants [1] = addr;
1187 code += 2 * sizeof (gpointer);
1189 g_assert ((code - buf) <= buf_len);
1191 mono_arch_flush_icache (buf, code - buf);
1192 MONO_PROFILER_RAISE (jit_code_buffer, (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL));
1194 mono_tramp_info_register (mono_tramp_info_create (NULL, buf, code - buf, NULL, NULL), domain);
1196 return buf;
1199 #else
1201 gpointer
1202 mono_arch_get_gsharedvt_arg_trampoline (MonoDomain *domain, gpointer arg, gpointer addr)
1204 g_assert_not_reached ();
1205 return NULL;
1208 #endif