2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 #include <mono/metadata/abi-details.h>
17 #include <mono/metadata/appdomain.h>
18 #include <mono/metadata/profiler-private.h>
19 #include <mono/metadata/debug-helpers.h>
20 #include <mono/utils/mono-mmap.h>
21 #include <mono/utils/mono-hwcap.h>
22 #include <mono/utils/mono-memory-model.h>
23 #include <mono/utils/mono-threads-coop.h>
29 #include "debugger-agent.h"
31 #include "mono/arch/arm/arm-vfp-codegen.h"
33 /* Sanity check: This makes no sense */
34 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
35 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
39 * IS_SOFT_FLOAT: Is full software floating point used?
40 * IS_HARD_FLOAT: Is full hardware floating point used?
41 * IS_VFP: Is hardware floating point with software ABI used?
43 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
44 * IS_VFP may delegate to mono_arch_is_soft_float ().
47 #if defined(ARM_FPU_VFP_HARD)
48 #define IS_SOFT_FLOAT (FALSE)
49 #define IS_HARD_FLOAT (TRUE)
51 #elif defined(ARM_FPU_NONE)
52 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
53 #define IS_HARD_FLOAT (FALSE)
54 #define IS_VFP (!mono_arch_is_soft_float ())
56 #define IS_SOFT_FLOAT (FALSE)
57 #define IS_HARD_FLOAT (FALSE)
61 #define THUNK_SIZE (3 * 4)
63 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
66 void sys_icache_invalidate (void *start
, size_t len
);
69 /* This mutex protects architecture specific caches */
70 #define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex)
71 #define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex)
72 static mono_mutex_t mini_arch_mutex
;
74 static gboolean v5_supported
= FALSE
;
75 static gboolean v6_supported
= FALSE
;
76 static gboolean v7_supported
= FALSE
;
77 static gboolean v7s_supported
= FALSE
;
78 static gboolean v7k_supported
= FALSE
;
79 static gboolean thumb_supported
= FALSE
;
80 static gboolean thumb2_supported
= FALSE
;
82 * Whenever to use the ARM EABI
84 static gboolean eabi_supported
= FALSE
;
87 * Whenever to use the iphone ABI extensions:
88 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
89 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
90 * This is required for debugging/profiling tools to work, but it has some overhead so it should
91 * only be turned on in debug builds.
93 static gboolean iphone_abi
= FALSE
;
96 * The FPU we are generating code for. This is NOT runtime configurable right now,
97 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
99 static MonoArmFPU arm_fpu
;
101 #if defined(ARM_FPU_VFP_HARD)
103 * On armhf, d0-d7 are used for argument passing and d8-d15
104 * must be preserved across calls, which leaves us no room
105 * for scratch registers. So we use d14-d15 but back up their
106 * previous contents to a stack slot before using them - see
107 * mono_arm_emit_vfp_scratch_save/_restore ().
109 static int vfp_scratch1
= ARM_VFP_D14
;
110 static int vfp_scratch2
= ARM_VFP_D15
;
113 * On armel, d0-d7 do not need to be preserved, so we can
114 * freely make use of them as scratch registers.
116 static int vfp_scratch1
= ARM_VFP_D0
;
117 static int vfp_scratch2
= ARM_VFP_D1
;
122 static gpointer single_step_tramp
, breakpoint_tramp
;
123 static gpointer get_tls_tramp
;
126 * The code generated for sequence points reads from this location, which is
127 * made read-only when single stepping is enabled.
129 static gpointer ss_trigger_page
;
131 /* Enabled breakpoints read from this trigger page */
132 static gpointer bp_trigger_page
;
136 * floating point support: on ARM it is a mess, there are at least 3
137 * different setups, each of which binary incompat with the other.
138 * 1) FPA: old and ugly, but unfortunately what current distros use
139 * the double binary format has the two words swapped. 8 double registers.
140 * Implemented usually by kernel emulation.
141 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
142 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
143 * 3) VFP: the new and actually sensible and useful FP support. Implemented
144 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
146 * We do not care about FPA. We will support soft float and VFP.
148 int mono_exc_esp_offset
= 0;
150 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
151 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
152 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
154 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
155 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
156 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
158 //#define DEBUG_IMT 0
161 static void mono_arch_compute_omit_fp (MonoCompile
*cfg
);
165 emit_aotconst (MonoCompile
*cfg
, guint8
*code
, int dreg
, int patch_type
, gpointer data
);
168 mono_arch_regname (int reg
)
170 static const char * rnames
[] = {
171 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
172 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
173 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
176 if (reg
>= 0 && reg
< 16)
182 mono_arch_fregname (int reg
)
184 static const char * rnames
[] = {
185 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
186 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
187 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
188 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
189 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
190 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
193 if (reg
>= 0 && reg
< 32)
201 emit_big_add (guint8
*code
, int dreg
, int sreg
, int imm
)
203 int imm8
, rot_amount
;
204 if ((imm8
= mono_arm_is_rotated_imm8 (imm
, &rot_amount
)) >= 0) {
205 ARM_ADD_REG_IMM (code
, dreg
, sreg
, imm8
, rot_amount
);
209 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, imm
);
210 ARM_ADD_REG_REG (code
, dreg
, sreg
, ARMREG_IP
);
212 code
= mono_arm_emit_load_imm (code
, dreg
, imm
);
213 ARM_ADD_REG_REG (code
, dreg
, dreg
, sreg
);
218 /* If dreg == sreg, this clobbers IP */
220 emit_sub_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
222 int imm8
, rot_amount
;
223 if ((imm8
= mono_arm_is_rotated_imm8 (imm
, &rot_amount
)) >= 0) {
224 ARM_SUB_REG_IMM (code
, dreg
, sreg
, imm8
, rot_amount
);
228 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, imm
);
229 ARM_SUB_REG_REG (code
, dreg
, sreg
, ARMREG_IP
);
231 code
= mono_arm_emit_load_imm (code
, dreg
, imm
);
232 ARM_SUB_REG_REG (code
, dreg
, dreg
, sreg
);
238 emit_memcpy (guint8
*code
, int size
, int dreg
, int doffset
, int sreg
, int soffset
)
240 /* we can use r0-r3, since this is called only for incoming args on the stack */
241 if (size
> sizeof (gpointer
) * 4) {
243 code
= emit_big_add (code
, ARMREG_R0
, sreg
, soffset
);
244 code
= emit_big_add (code
, ARMREG_R1
, dreg
, doffset
);
245 start_loop
= code
= mono_arm_emit_load_imm (code
, ARMREG_R2
, size
);
246 ARM_LDR_IMM (code
, ARMREG_R3
, ARMREG_R0
, 0);
247 ARM_STR_IMM (code
, ARMREG_R3
, ARMREG_R1
, 0);
248 ARM_ADD_REG_IMM8 (code
, ARMREG_R0
, ARMREG_R0
, 4);
249 ARM_ADD_REG_IMM8 (code
, ARMREG_R1
, ARMREG_R1
, 4);
250 ARM_SUBS_REG_IMM8 (code
, ARMREG_R2
, ARMREG_R2
, 4);
251 ARM_B_COND (code
, ARMCOND_NE
, 0);
252 arm_patch (code
- 4, start_loop
);
255 if (arm_is_imm12 (doffset
) && arm_is_imm12 (doffset
+ size
) &&
256 arm_is_imm12 (soffset
) && arm_is_imm12 (soffset
+ size
)) {
258 ARM_LDR_IMM (code
, ARMREG_LR
, sreg
, soffset
);
259 ARM_STR_IMM (code
, ARMREG_LR
, dreg
, doffset
);
265 code
= emit_big_add (code
, ARMREG_R0
, sreg
, soffset
);
266 code
= emit_big_add (code
, ARMREG_R1
, dreg
, doffset
);
267 doffset
= soffset
= 0;
269 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_R0
, soffset
);
270 ARM_STR_IMM (code
, ARMREG_LR
, ARMREG_R1
, doffset
);
276 g_assert (size
== 0);
281 emit_call_reg (guint8
*code
, int reg
)
284 ARM_BLX_REG (code
, reg
);
286 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
290 ARM_MOV_REG_REG (code
, ARMREG_PC
, reg
);
296 emit_call_seq (MonoCompile
*cfg
, guint8
*code
)
298 if (cfg
->method
->dynamic
) {
299 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
301 *(gpointer
*)code
= NULL
;
303 code
= emit_call_reg (code
, ARMREG_IP
);
307 cfg
->thunk_area
+= THUNK_SIZE
;
312 mono_arm_patchable_b (guint8
*code
, int cond
)
314 ARM_B_COND (code
, cond
, 0);
319 mono_arm_patchable_bl (guint8
*code
, int cond
)
321 ARM_BL_COND (code
, cond
, 0);
325 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID) && !defined(__native_client__)
326 #define HAVE_AEABI_READ_TP 1
329 #ifdef HAVE_AEABI_READ_TP
330 gpointer
__aeabi_read_tp (void);
334 mono_arch_have_fast_tls (void)
336 #ifdef HAVE_AEABI_READ_TP
337 static gboolean have_fast_tls
= FALSE
;
338 static gboolean inited
= FALSE
;
341 if (mini_get_debug_options ()->use_fallback_tls
)
345 return have_fast_tls
;
347 tp1
= __aeabi_read_tp ();
348 asm volatile("mrc p15, 0, %0, c13, c0, 3" : "=r" (tp2
));
350 have_fast_tls
= tp1
&& tp1
== tp2
;
352 return have_fast_tls
;
359 emit_tls_get (guint8
*code
, int dreg
, int tls_offset
)
361 ARM_MRC (code
, 15, 0, dreg
, 13, 0, 3);
362 ARM_LDR_IMM (code
, dreg
, dreg
, tls_offset
);
367 emit_tls_set (guint8
*code
, int sreg
, int tls_offset
)
369 int tp_reg
= (sreg
!= ARMREG_R0
) ? ARMREG_R0
: ARMREG_R1
;
370 ARM_MRC (code
, 15, 0, tp_reg
, 13, 0, 3);
371 ARM_STR_IMM (code
, sreg
, tp_reg
, tls_offset
);
378 * Emit code to push an LMF structure on the LMF stack.
379 * On arm, this is intermixed with the initialization of other fields of the structure.
382 emit_save_lmf (MonoCompile
*cfg
, guint8
*code
, gint32 lmf_offset
)
386 if (mono_arch_have_fast_tls () && mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR
) != -1) {
387 code
= emit_tls_get (code
, ARMREG_R0
, mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR
));
389 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
390 (gpointer
)"mono_tls_get_lmf_addr");
391 code
= emit_call_seq (cfg
, code
);
393 /* we build the MonoLMF structure on the stack - see mini-arm.h */
394 /* lmf_offset is the offset from the previous stack pointer,
395 * alloc_size is the total stack space allocated, so the offset
396 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
397 * The pointer to the struct is put in r1 (new_lmf).
398 * ip is used as scratch
399 * The callee-saved registers are already in the MonoLMF structure
401 code
= emit_big_add (code
, ARMREG_R1
, ARMREG_SP
, lmf_offset
);
402 /* r0 is the result from mono_get_lmf_addr () */
403 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
404 /* new_lmf->previous_lmf = *lmf_addr */
405 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
406 ARM_STR_IMM (code
, ARMREG_IP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
407 /* *(lmf_addr) = r1 */
408 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
409 /* Skip method (only needed for trampoline LMF frames) */
410 ARM_STR_IMM (code
, ARMREG_SP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, sp
));
411 ARM_STR_IMM (code
, ARMREG_FP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, fp
));
412 /* save the current IP */
413 ARM_MOV_REG_REG (code
, ARMREG_IP
, ARMREG_PC
);
414 ARM_STR_IMM (code
, ARMREG_IP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, ip
));
416 for (i
= 0; i
< sizeof (MonoLMF
); i
+= sizeof (mgreg_t
))
417 mini_gc_set_slot_type_from_fp (cfg
, lmf_offset
+ i
, SLOT_NOREF
);
428 emit_float_args (MonoCompile
*cfg
, MonoCallInst
*inst
, guint8
*code
, int *max_len
, guint
*offset
)
432 for (list
= inst
->float_args
; list
; list
= list
->next
) {
433 FloatArgData
*fad
= list
->data
;
434 MonoInst
*var
= get_vreg_to_inst (cfg
, fad
->vreg
);
435 gboolean imm
= arm_is_fpimm8 (var
->inst_offset
);
437 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
443 if (*offset
+ *max_len
> cfg
->code_size
) {
444 cfg
->code_size
+= *max_len
;
445 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
447 code
= cfg
->native_code
+ *offset
;
451 code
= emit_big_add (code
, ARMREG_LR
, var
->inst_basereg
, var
->inst_offset
);
452 ARM_FLDS (code
, fad
->hreg
, ARMREG_LR
, 0);
454 ARM_FLDS (code
, fad
->hreg
, var
->inst_basereg
, var
->inst_offset
);
456 *offset
= code
- cfg
->native_code
;
463 mono_arm_emit_vfp_scratch_save (MonoCompile
*cfg
, guint8
*code
, int reg
)
467 g_assert (reg
== vfp_scratch1
|| reg
== vfp_scratch2
);
469 inst
= (MonoInst
*) cfg
->arch
.vfp_scratch_slots
[reg
== vfp_scratch1
? 0 : 1];
472 if (!arm_is_fpimm8 (inst
->inst_offset
)) {
473 code
= emit_big_add (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
474 ARM_FSTD (code
, reg
, ARMREG_LR
, 0);
476 ARM_FSTD (code
, reg
, inst
->inst_basereg
, inst
->inst_offset
);
483 mono_arm_emit_vfp_scratch_restore (MonoCompile
*cfg
, guint8
*code
, int reg
)
487 g_assert (reg
== vfp_scratch1
|| reg
== vfp_scratch2
);
489 inst
= (MonoInst
*) cfg
->arch
.vfp_scratch_slots
[reg
== vfp_scratch1
? 0 : 1];
492 if (!arm_is_fpimm8 (inst
->inst_offset
)) {
493 code
= emit_big_add (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
494 ARM_FLDD (code
, reg
, ARMREG_LR
, 0);
496 ARM_FLDD (code
, reg
, inst
->inst_basereg
, inst
->inst_offset
);
505 * Emit code to pop an LMF structure from the LMF stack.
508 emit_restore_lmf (MonoCompile
*cfg
, guint8
*code
, gint32 lmf_offset
)
512 if (lmf_offset
< 32) {
513 basereg
= cfg
->frame_reg
;
518 code
= emit_big_add (code
, ARMREG_R2
, cfg
->frame_reg
, lmf_offset
);
521 /* ip = previous_lmf */
522 ARM_LDR_IMM (code
, ARMREG_IP
, basereg
, offset
+ MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
524 ARM_LDR_IMM (code
, ARMREG_LR
, basereg
, offset
+ MONO_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
525 /* *(lmf_addr) = previous_lmf */
526 ARM_STR_IMM (code
, ARMREG_IP
, ARMREG_LR
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
531 #endif /* #ifndef DISABLE_JIT */
534 * mono_arch_get_argument_info:
535 * @csig: a method signature
536 * @param_count: the number of parameters to consider
537 * @arg_info: an array to store the result infos
539 * Gathers information on parameters such as size, alignment and
540 * padding. arg_info should be large enought to hold param_count + 1 entries.
542 * Returns the size of the activation frame.
545 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
547 int k
, frame_size
= 0;
548 guint32 size
, align
, pad
;
552 t
= mini_get_underlying_type (csig
->ret
);
553 if (MONO_TYPE_ISSTRUCT (t
)) {
554 frame_size
+= sizeof (gpointer
);
558 arg_info
[0].offset
= offset
;
561 frame_size
+= sizeof (gpointer
);
565 arg_info
[0].size
= frame_size
;
567 for (k
= 0; k
< param_count
; k
++) {
568 size
= mini_type_stack_size_full (csig
->params
[k
], &align
, csig
->pinvoke
);
570 /* ignore alignment for now */
573 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
574 arg_info
[k
].pad
= pad
;
576 arg_info
[k
+ 1].pad
= 0;
577 arg_info
[k
+ 1].size
= size
;
579 arg_info
[k
+ 1].offset
= offset
;
583 align
= MONO_ARCH_FRAME_ALIGNMENT
;
584 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
585 arg_info
[k
].pad
= pad
;
590 #define MAX_ARCH_DELEGATE_PARAMS 3
593 get_delegate_invoke_impl (MonoTrampInfo
**info
, gboolean has_target
, gboolean param_count
)
595 guint8
*code
, *start
;
596 GSList
*unwind_ops
= mono_arch_get_cie_program ();
599 start
= code
= mono_global_codeman_reserve (12);
601 /* Replace the this argument with the target */
602 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
603 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, target
));
604 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
606 g_assert ((code
- start
) <= 12);
608 mono_arch_flush_icache (start
, 12);
612 size
= 8 + param_count
* 4;
613 start
= code
= mono_global_codeman_reserve (size
);
615 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
616 /* slide down the arguments */
617 for (i
= 0; i
< param_count
; ++i
) {
618 ARM_MOV_REG_REG (code
, (ARMREG_R0
+ i
), (ARMREG_R0
+ i
+ 1));
620 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
622 g_assert ((code
- start
) <= size
);
624 mono_arch_flush_icache (start
, size
);
628 *info
= mono_tramp_info_create ("delegate_invoke_impl_has_target", start
, code
- start
, NULL
, unwind_ops
);
630 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", param_count
);
631 *info
= mono_tramp_info_create (name
, start
, code
- start
, NULL
, unwind_ops
);
635 mono_profiler_code_buffer_new (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
);
641 * mono_arch_get_delegate_invoke_impls:
643 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
647 mono_arch_get_delegate_invoke_impls (void)
653 get_delegate_invoke_impl (&info
, TRUE
, 0);
654 res
= g_slist_prepend (res
, info
);
656 for (i
= 0; i
<= MAX_ARCH_DELEGATE_PARAMS
; ++i
) {
657 get_delegate_invoke_impl (&info
, FALSE
, i
);
658 res
= g_slist_prepend (res
, info
);
665 mono_arch_get_delegate_invoke_impl (MonoMethodSignature
*sig
, gboolean has_target
)
667 guint8
*code
, *start
;
670 /* FIXME: Support more cases */
671 sig_ret
= mini_get_underlying_type (sig
->ret
);
672 if (MONO_TYPE_ISSTRUCT (sig_ret
))
676 static guint8
* cached
= NULL
;
677 mono_mini_arch_lock ();
679 mono_mini_arch_unlock ();
684 start
= mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
687 start
= get_delegate_invoke_impl (&info
, TRUE
, 0);
688 mono_tramp_info_register (info
, NULL
);
691 mono_mini_arch_unlock ();
694 static guint8
* cache
[MAX_ARCH_DELEGATE_PARAMS
+ 1] = {NULL
};
697 if (sig
->param_count
> MAX_ARCH_DELEGATE_PARAMS
)
699 for (i
= 0; i
< sig
->param_count
; ++i
)
700 if (!mono_is_regsize_var (sig
->params
[i
]))
703 mono_mini_arch_lock ();
704 code
= cache
[sig
->param_count
];
706 mono_mini_arch_unlock ();
711 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", sig
->param_count
);
712 start
= mono_aot_get_trampoline (name
);
716 start
= get_delegate_invoke_impl (&info
, FALSE
, sig
->param_count
);
717 mono_tramp_info_register (info
, NULL
);
719 cache
[sig
->param_count
] = start
;
720 mono_mini_arch_unlock ();
728 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature
*sig
, MonoMethod
*method
, int offset
, gboolean load_imt_reg
)
734 mono_arch_get_this_arg_from_call (mgreg_t
*regs
, guint8
*code
)
736 return (gpointer
)regs
[ARMREG_R0
];
740 * Initialize the cpu to execute managed code.
743 mono_arch_cpu_init (void)
745 i8_align
= MONO_ABI_ALIGNOF (gint64
);
746 #ifdef MONO_CROSS_COMPILE
747 /* Need to set the alignment of i8 since it can different on the target */
748 #ifdef TARGET_ANDROID
750 mono_type_set_alignment (MONO_TYPE_I8
, i8_align
);
756 * Initialize architecture specific code.
759 mono_arch_init (void)
761 const char *cpu_arch
;
763 #ifdef TARGET_WATCHOS
764 mini_get_debug_options ()->soft_breakpoints
= TRUE
;
767 mono_os_mutex_init_recursive (&mini_arch_mutex
);
768 if (mini_get_debug_options ()->soft_breakpoints
) {
770 breakpoint_tramp
= mini_get_breakpoint_trampoline ();
772 ss_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
|MONO_MMAP_32BIT
, MONO_MEM_ACCOUNT_OTHER
);
773 bp_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
|MONO_MMAP_32BIT
, MONO_MEM_ACCOUNT_OTHER
);
774 mono_mprotect (bp_trigger_page
, mono_pagesize (), 0);
777 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception
);
778 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token
);
779 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind
);
780 #if defined(MONO_ARCH_GSHAREDVT_SUPPORTED)
781 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call
);
783 mono_aot_register_jit_icall ("mono_arm_unaligned_stack", mono_arm_unaligned_stack
);
785 #if defined(__ARM_EABI__)
786 eabi_supported
= TRUE
;
789 #if defined(ARM_FPU_VFP_HARD)
790 arm_fpu
= MONO_ARM_FPU_VFP_HARD
;
792 arm_fpu
= MONO_ARM_FPU_VFP
;
794 #if defined(ARM_FPU_NONE) && !defined(__APPLE__)
796 * If we're compiling with a soft float fallback and it
797 * turns out that no VFP unit is available, we need to
798 * switch to soft float. We don't do this for iOS, since
799 * iOS devices always have a VFP unit.
801 if (!mono_hwcap_arm_has_vfp
)
802 arm_fpu
= MONO_ARM_FPU_NONE
;
805 * This environment variable can be useful in testing
806 * environments to make sure the soft float fallback
807 * works. Most ARM devices have VFP units these days, so
808 * normally soft float code would not be exercised much.
810 const char *soft
= g_getenv ("MONO_ARM_FORCE_SOFT_FLOAT");
812 if (soft
&& !strncmp (soft
, "1", 1))
813 arm_fpu
= MONO_ARM_FPU_NONE
;
817 v5_supported
= mono_hwcap_arm_is_v5
;
818 v6_supported
= mono_hwcap_arm_is_v6
;
819 v7_supported
= mono_hwcap_arm_is_v7
;
822 * On weird devices, the hwcap code may fail to detect
823 * the ARM version. In that case, we can at least safely
824 * assume the version the runtime was compiled for.
836 #if defined(__APPLE__)
837 /* iOS is special-cased here because we don't yet
838 have a way to properly detect CPU features on it. */
839 thumb_supported
= TRUE
;
842 thumb_supported
= mono_hwcap_arm_has_thumb
;
843 thumb2_supported
= mono_hwcap_arm_has_thumb2
;
846 /* Format: armv(5|6|7[s])[-thumb[2]] */
847 cpu_arch
= g_getenv ("MONO_CPU_ARCH");
849 /* Do this here so it overrides any detection. */
851 if (strncmp (cpu_arch
, "armv", 4) == 0) {
852 v5_supported
= cpu_arch
[4] >= '5';
853 v6_supported
= cpu_arch
[4] >= '6';
854 v7_supported
= cpu_arch
[4] >= '7';
855 v7s_supported
= strncmp (cpu_arch
, "armv7s", 6) == 0;
856 v7k_supported
= strncmp (cpu_arch
, "armv7k", 6) == 0;
859 thumb_supported
= strstr (cpu_arch
, "thumb") != NULL
;
860 thumb2_supported
= strstr (cpu_arch
, "thumb2") != NULL
;
865 * Cleanup architecture specific code.
868 mono_arch_cleanup (void)
873 * This function returns the optimizations supported on this cpu.
876 mono_arch_cpu_optimizations (guint32
*exclude_mask
)
878 /* no arm-specific optimizations yet */
884 * This function test for all SIMD functions supported.
886 * Returns a bitmask corresponding to all supported versions.
890 mono_arch_cpu_enumerate_simd_versions (void)
892 /* SIMD is currently unimplemented */
897 mono_arm_is_hard_float (void)
899 return arm_fpu
== MONO_ARM_FPU_VFP_HARD
;
905 mono_arch_opcode_needs_emulation (MonoCompile
*cfg
, int opcode
)
907 if (v7s_supported
|| v7k_supported
) {
921 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
923 mono_arch_is_soft_float (void)
925 return arm_fpu
== MONO_ARM_FPU_NONE
;
930 is_regsize_var (MonoType
*t
)
934 t
= mini_get_underlying_type (t
);
941 case MONO_TYPE_FNPTR
:
943 case MONO_TYPE_OBJECT
:
944 case MONO_TYPE_STRING
:
945 case MONO_TYPE_CLASS
:
946 case MONO_TYPE_SZARRAY
:
947 case MONO_TYPE_ARRAY
:
949 case MONO_TYPE_GENERICINST
:
950 if (!mono_type_generic_inst_is_valuetype (t
))
953 case MONO_TYPE_VALUETYPE
:
960 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
965 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
966 MonoInst
*ins
= cfg
->varinfo
[i
];
967 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
970 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
973 if (ins
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
) || (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
976 /* we can only allocate 32 bit values */
977 if (is_regsize_var (ins
->inst_vtype
)) {
978 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
979 g_assert (i
== vmv
->idx
);
980 vars
= mono_varlist_insert_sorted (cfg
, vars
, vmv
, FALSE
);
988 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
992 mono_arch_compute_omit_fp (cfg
);
995 * FIXME: Interface calls might go through a static rgctx trampoline which
996 * sets V5, but it doesn't save it, so we need to save it ourselves, and
999 if (cfg
->flags
& MONO_CFG_HAS_CALLS
)
1000 cfg
->uses_rgctx_reg
= TRUE
;
1002 if (cfg
->arch
.omit_fp
)
1003 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_FP
));
1004 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V1
));
1005 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V2
));
1006 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V3
));
1008 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1009 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V7
));
1011 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V4
));
1012 if (!(cfg
->compile_aot
|| cfg
->uses_rgctx_reg
|| COMPILE_LLVM (cfg
)))
1013 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1014 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V5
));
1015 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1016 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1022 * mono_arch_regalloc_cost:
1024 * Return the cost, in number of memory references, of the action of
1025 * allocating the variable VMV into a register during global register
1029 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
1035 #endif /* #ifndef DISABLE_JIT */
1038 mono_arch_flush_icache (guint8
*code
, gint size
)
1040 #if defined(MONO_CROSS_COMPILE)
1042 sys_icache_invalidate (code
, size
);
1044 __builtin___clear_cache (code
, code
+ size
);
1051 add_general (guint
*gr
, guint
*stack_size
, ArgInfo
*ainfo
, gboolean simple
)
1054 if (*gr
> ARMREG_R3
) {
1056 ainfo
->offset
= *stack_size
;
1057 ainfo
->reg
= ARMREG_SP
; /* in the caller */
1058 ainfo
->storage
= RegTypeBase
;
1061 ainfo
->storage
= RegTypeGeneral
;
1068 split
= i8_align
== 4;
1073 if (*gr
== ARMREG_R3
&& split
) {
1074 /* first word in r3 and the second on the stack */
1075 ainfo
->offset
= *stack_size
;
1076 ainfo
->reg
= ARMREG_SP
; /* in the caller */
1077 ainfo
->storage
= RegTypeBaseGen
;
1079 } else if (*gr
>= ARMREG_R3
) {
1080 if (eabi_supported
) {
1081 /* darwin aligns longs to 4 byte only */
1082 if (i8_align
== 8) {
1087 ainfo
->offset
= *stack_size
;
1088 ainfo
->reg
= ARMREG_SP
; /* in the caller */
1089 ainfo
->storage
= RegTypeBase
;
1092 if (eabi_supported
) {
1093 if (i8_align
== 8 && ((*gr
) & 1))
1096 ainfo
->storage
= RegTypeIRegPair
;
1105 add_float (guint
*fpr
, guint
*stack_size
, ArgInfo
*ainfo
, gboolean is_double
, gint
*float_spare
)
1108 * If we're calling a function like this:
1110 * void foo(float a, double b, float c)
1112 * We pass a in s0 and b in d1. That leaves us
1113 * with s1 being unused. The armhf ABI recognizes
1114 * this and requires register assignment to then
1115 * use that for the next single-precision arg,
1116 * i.e. c in this example. So float_spare either
1117 * tells us which reg to use for the next single-
1118 * precision arg, or it's -1, meaning use *fpr.
1120 * Note that even though most of the JIT speaks
1121 * double-precision, fpr represents single-
1122 * precision registers.
1124 * See parts 5.5 and 6.1.2 of the AAPCS for how
1128 if (*fpr
< ARM_VFP_F16
|| (!is_double
&& *float_spare
>= 0)) {
1129 ainfo
->storage
= RegTypeFP
;
1133 * If we're passing a double-precision value
1134 * and *fpr is odd (e.g. it's s1, s3, ...)
1135 * we need to use the next even register. So
1136 * we mark the current *fpr as a spare that
1137 * can be used for the next single-precision
1141 *float_spare
= *fpr
;
1146 * At this point, we have an even register
1147 * so we assign that and move along.
1151 } else if (*float_spare
>= 0) {
1153 * We're passing a single-precision value
1154 * and it looks like a spare single-
1155 * precision register is available. Let's
1159 ainfo
->reg
= *float_spare
;
1163 * If we hit this branch, we're passing a
1164 * single-precision value and we can simply
1165 * use the next available register.
1173 * We've exhausted available floating point
1174 * regs, so pass the rest on the stack.
1182 ainfo
->offset
= *stack_size
;
1183 ainfo
->reg
= ARMREG_SP
;
1184 ainfo
->storage
= RegTypeBase
;
1191 is_hfa (MonoType
*t
, int *out_nfields
, int *out_esize
)
1195 MonoClassField
*field
;
1196 MonoType
*ftype
, *prev_ftype
= NULL
;
1199 klass
= mono_class_from_mono_type (t
);
1201 while ((field
= mono_class_get_fields (klass
, &iter
))) {
1202 if (field
->type
->attrs
& FIELD_ATTRIBUTE_STATIC
)
1204 ftype
= mono_field_get_type (field
);
1205 ftype
= mini_get_underlying_type (ftype
);
1207 if (MONO_TYPE_ISSTRUCT (ftype
)) {
1208 int nested_nfields
, nested_esize
;
1210 if (!is_hfa (ftype
, &nested_nfields
, &nested_esize
))
1212 if (nested_esize
== 4)
1213 ftype
= &mono_defaults
.single_class
->byval_arg
;
1215 ftype
= &mono_defaults
.double_class
->byval_arg
;
1216 if (prev_ftype
&& prev_ftype
->type
!= ftype
->type
)
1219 nfields
+= nested_nfields
;
1221 if (!(!ftype
->byref
&& (ftype
->type
== MONO_TYPE_R4
|| ftype
->type
== MONO_TYPE_R8
)))
1223 if (prev_ftype
&& prev_ftype
->type
!= ftype
->type
)
1229 if (nfields
== 0 || nfields
> 4)
1231 *out_nfields
= nfields
;
1232 *out_esize
= prev_ftype
->type
== MONO_TYPE_R4
? 4 : 8;
1237 get_call_info (MonoMemPool
*mp
, MonoMethodSignature
*sig
)
1239 guint i
, gr
, fpr
, pstart
;
1241 int n
= sig
->hasthis
+ sig
->param_count
;
1245 guint32 stack_size
= 0;
1247 gboolean is_pinvoke
= sig
->pinvoke
;
1248 gboolean vtype_retaddr
= FALSE
;
1251 cinfo
= mono_mempool_alloc0 (mp
, sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
1253 cinfo
= g_malloc0 (sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
1260 t
= mini_get_underlying_type (sig
->ret
);
1271 case MONO_TYPE_FNPTR
:
1272 case MONO_TYPE_CLASS
:
1273 case MONO_TYPE_OBJECT
:
1274 case MONO_TYPE_SZARRAY
:
1275 case MONO_TYPE_ARRAY
:
1276 case MONO_TYPE_STRING
:
1277 cinfo
->ret
.storage
= RegTypeGeneral
;
1278 cinfo
->ret
.reg
= ARMREG_R0
;
1282 cinfo
->ret
.storage
= RegTypeIRegPair
;
1283 cinfo
->ret
.reg
= ARMREG_R0
;
1287 cinfo
->ret
.storage
= RegTypeFP
;
1289 if (t
->type
== MONO_TYPE_R4
)
1290 cinfo
->ret
.size
= 4;
1292 cinfo
->ret
.size
= 8;
1294 if (IS_HARD_FLOAT
) {
1295 cinfo
->ret
.reg
= ARM_VFP_F0
;
1297 cinfo
->ret
.reg
= ARMREG_R0
;
1300 case MONO_TYPE_GENERICINST
:
1301 if (!mono_type_generic_inst_is_valuetype (t
)) {
1302 cinfo
->ret
.storage
= RegTypeGeneral
;
1303 cinfo
->ret
.reg
= ARMREG_R0
;
1306 if (mini_is_gsharedvt_variable_type (t
)) {
1307 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1311 case MONO_TYPE_VALUETYPE
:
1312 case MONO_TYPE_TYPEDBYREF
:
1313 if (IS_HARD_FLOAT
&& sig
->pinvoke
&& is_hfa (t
, &nfields
, &esize
)) {
1314 cinfo
->ret
.storage
= RegTypeHFA
;
1316 cinfo
->ret
.nregs
= nfields
;
1317 cinfo
->ret
.esize
= esize
;
1320 int native_size
= mono_class_native_size (mono_class_from_mono_type (t
), &align
);
1323 #ifdef TARGET_WATCHOS
1328 if (native_size
<= max_size
) {
1329 cinfo
->ret
.storage
= RegTypeStructByVal
;
1330 cinfo
->ret
.struct_size
= native_size
;
1331 cinfo
->ret
.nregs
= ALIGN_TO (native_size
, 4) / 4;
1333 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1336 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1341 case MONO_TYPE_MVAR
:
1342 g_assert (mini_is_gsharedvt_type (t
));
1343 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1345 case MONO_TYPE_VOID
:
1348 g_error ("Can't handle as return value 0x%x", sig
->ret
->type
);
1351 vtype_retaddr
= cinfo
->ret
.storage
== RegTypeStructByAddr
;
1356 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1357 * the first argument, allowing 'this' to be always passed in the first arg reg.
1358 * Also do this if the first argument is a reference type, since virtual calls
1359 * are sometimes made using calli without sig->hasthis set, like in the delegate
1362 if (vtype_retaddr
&& !is_pinvoke
&& (sig
->hasthis
|| (sig
->param_count
> 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig
->params
[0]))))) {
1364 add_general (&gr
, &stack_size
, cinfo
->args
+ 0, TRUE
);
1366 add_general (&gr
, &stack_size
, &cinfo
->args
[sig
->hasthis
+ 0], TRUE
);
1370 cinfo
->ret
.reg
= gr
;
1372 cinfo
->vret_arg_index
= 1;
1376 add_general (&gr
, &stack_size
, cinfo
->args
+ 0, TRUE
);
1379 if (vtype_retaddr
) {
1380 cinfo
->ret
.reg
= gr
;
1385 DEBUG(g_print("params: %d\n", sig
->param_count
));
1386 for (i
= pstart
; i
< sig
->param_count
; ++i
) {
1387 ArgInfo
*ainfo
= &cinfo
->args
[n
];
1389 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1390 /* Prevent implicit arguments and sig_cookie from
1391 being passed in registers */
1394 /* Emit the signature cookie just before the implicit arguments */
1395 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
1397 DEBUG(g_print("param %d: ", i
));
1398 if (sig
->params
[i
]->byref
) {
1399 DEBUG(g_print("byref\n"));
1400 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1404 t
= mini_get_underlying_type (sig
->params
[i
]);
1408 cinfo
->args
[n
].size
= 1;
1409 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1413 cinfo
->args
[n
].size
= 2;
1414 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1418 cinfo
->args
[n
].size
= 4;
1419 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1424 case MONO_TYPE_FNPTR
:
1425 case MONO_TYPE_CLASS
:
1426 case MONO_TYPE_OBJECT
:
1427 case MONO_TYPE_STRING
:
1428 case MONO_TYPE_SZARRAY
:
1429 case MONO_TYPE_ARRAY
:
1430 cinfo
->args
[n
].size
= sizeof (gpointer
);
1431 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1433 case MONO_TYPE_GENERICINST
:
1434 if (!mono_type_generic_inst_is_valuetype (t
)) {
1435 cinfo
->args
[n
].size
= sizeof (gpointer
);
1436 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1439 if (mini_is_gsharedvt_variable_type (t
)) {
1440 /* gsharedvt arguments are passed by ref */
1441 g_assert (mini_is_gsharedvt_type (t
));
1442 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1443 switch (ainfo
->storage
) {
1444 case RegTypeGeneral
:
1445 ainfo
->storage
= RegTypeGSharedVtInReg
;
1448 ainfo
->storage
= RegTypeGSharedVtOnStack
;
1451 g_assert_not_reached ();
1456 case MONO_TYPE_TYPEDBYREF
:
1457 case MONO_TYPE_VALUETYPE
: {
1460 int nwords
, nfields
, esize
;
1463 if (IS_HARD_FLOAT
&& sig
->pinvoke
&& is_hfa (t
, &nfields
, &esize
)) {
1464 if (fpr
+ nfields
< ARM_VFP_F16
) {
1465 ainfo
->storage
= RegTypeHFA
;
1467 ainfo
->nregs
= nfields
;
1468 ainfo
->esize
= esize
;
1479 if (t
->type
== MONO_TYPE_TYPEDBYREF
) {
1480 size
= sizeof (MonoTypedRef
);
1481 align
= sizeof (gpointer
);
1483 MonoClass
*klass
= mono_class_from_mono_type (sig
->params
[i
]);
1485 size
= mono_class_native_size (klass
, &align
);
1487 size
= mini_type_stack_size_full (t
, &align
, FALSE
);
1489 DEBUG(g_print ("load %d bytes struct\n", size
));
1491 #ifdef TARGET_WATCHOS
1492 /* Watchos pass large structures by ref */
1493 /* We only do this for pinvoke to make gsharedvt/dyncall simpler */
1494 if (sig
->pinvoke
&& size
> 16) {
1495 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1496 switch (ainfo
->storage
) {
1497 case RegTypeGeneral
:
1498 ainfo
->storage
= RegTypeStructByAddr
;
1501 ainfo
->storage
= RegTypeStructByAddrOnStack
;
1504 g_assert_not_reached ();
1513 align_size
+= (sizeof (gpointer
) - 1);
1514 align_size
&= ~(sizeof (gpointer
) - 1);
1515 nwords
= (align_size
+ sizeof (gpointer
) -1 ) / sizeof (gpointer
);
1516 ainfo
->storage
= RegTypeStructByVal
;
1517 ainfo
->struct_size
= size
;
1518 /* FIXME: align stack_size if needed */
1519 if (eabi_supported
) {
1520 if (align
>= 8 && (gr
& 1))
1523 if (gr
> ARMREG_R3
) {
1525 ainfo
->vtsize
= nwords
;
1527 int rest
= ARMREG_R3
- gr
+ 1;
1528 int n_in_regs
= rest
>= nwords
? nwords
: rest
;
1530 ainfo
->size
= n_in_regs
;
1531 ainfo
->vtsize
= nwords
- n_in_regs
;
1534 nwords
-= n_in_regs
;
1536 if (sig
->call_convention
== MONO_CALL_VARARG
)
1537 /* This matches the alignment in mono_ArgIterator_IntGetNextArg () */
1538 stack_size
= ALIGN_TO (stack_size
, align
);
1539 ainfo
->offset
= stack_size
;
1540 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1541 stack_size
+= nwords
* sizeof (gpointer
);
1547 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
1553 add_float (&fpr
, &stack_size
, ainfo
, FALSE
, &float_spare
);
1555 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1561 add_float (&fpr
, &stack_size
, ainfo
, TRUE
, &float_spare
);
1563 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
1566 case MONO_TYPE_MVAR
:
1567 /* gsharedvt arguments are passed by ref */
1568 g_assert (mini_is_gsharedvt_type (t
));
1569 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1570 switch (ainfo
->storage
) {
1571 case RegTypeGeneral
:
1572 ainfo
->storage
= RegTypeGSharedVtInReg
;
1575 ainfo
->storage
= RegTypeGSharedVtOnStack
;
1578 g_assert_not_reached ();
1582 g_error ("Can't handle 0x%x", sig
->params
[i
]->type
);
1587 /* Handle the case where there are no implicit arguments */
1588 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1589 /* Prevent implicit arguments and sig_cookie from
1590 being passed in registers */
1593 /* Emit the signature cookie just before the implicit arguments */
1594 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
1597 /* align stack size to 8 */
1598 DEBUG (g_print (" stack size: %d (%d)\n", (stack_size
+ 15) & ~15, stack_size
));
1599 stack_size
= (stack_size
+ 7) & ~7;
1601 cinfo
->stack_usage
= stack_size
;
1607 mono_arch_tail_call_supported (MonoCompile
*cfg
, MonoMethodSignature
*caller_sig
, MonoMethodSignature
*callee_sig
)
1609 MonoType
*callee_ret
;
1613 c1
= get_call_info (NULL
, caller_sig
);
1614 c2
= get_call_info (NULL
, callee_sig
);
1617 * Tail calls with more callee stack usage than the caller cannot be supported, since
1618 * the extra stack space would be left on the stack after the tail call.
1620 res
= c1
->stack_usage
>= c2
->stack_usage
;
1621 callee_ret
= mini_get_underlying_type (callee_sig
->ret
);
1622 if (callee_ret
&& MONO_TYPE_ISSTRUCT (callee_ret
) && c2
->ret
.storage
!= RegTypeStructByVal
)
1623 /* An address on the callee's stack is passed as the first argument */
1626 if (c2
->stack_usage
> 16 * 4)
1638 debug_omit_fp (void)
1641 return mono_debug_count ();
1648 * mono_arch_compute_omit_fp:
1650 * Determine whenever the frame pointer can be eliminated.
1653 mono_arch_compute_omit_fp (MonoCompile
*cfg
)
1655 MonoMethodSignature
*sig
;
1656 MonoMethodHeader
*header
;
1660 if (cfg
->arch
.omit_fp_computed
)
1663 header
= cfg
->header
;
1665 sig
= mono_method_signature (cfg
->method
);
1667 if (!cfg
->arch
.cinfo
)
1668 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
1669 cinfo
= cfg
->arch
.cinfo
;
1672 * FIXME: Remove some of the restrictions.
1674 cfg
->arch
.omit_fp
= TRUE
;
1675 cfg
->arch
.omit_fp_computed
= TRUE
;
1677 if (cfg
->disable_omit_fp
)
1678 cfg
->arch
.omit_fp
= FALSE
;
1679 if (!debug_omit_fp ())
1680 cfg
->arch
.omit_fp
= FALSE
;
1682 if (cfg->method->save_lmf)
1683 cfg->arch.omit_fp = FALSE;
1685 if (cfg
->flags
& MONO_CFG_HAS_ALLOCA
)
1686 cfg
->arch
.omit_fp
= FALSE
;
1687 if (header
->num_clauses
)
1688 cfg
->arch
.omit_fp
= FALSE
;
1689 if (cfg
->param_area
)
1690 cfg
->arch
.omit_fp
= FALSE
;
1691 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
))
1692 cfg
->arch
.omit_fp
= FALSE
;
1693 if ((mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
)) ||
1694 (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
))
1695 cfg
->arch
.omit_fp
= FALSE
;
1696 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
1697 ArgInfo
*ainfo
= &cinfo
->args
[i
];
1699 if (ainfo
->storage
== RegTypeBase
|| ainfo
->storage
== RegTypeBaseGen
|| ainfo
->storage
== RegTypeStructByVal
) {
1701 * The stack offset can only be determined when the frame
1704 cfg
->arch
.omit_fp
= FALSE
;
1709 for (i
= cfg
->locals_start
; i
< cfg
->num_varinfo
; i
++) {
1710 MonoInst
*ins
= cfg
->varinfo
[i
];
1713 locals_size
+= mono_type_size (ins
->inst_vtype
, &ialign
);
1718 * Set var information according to the calling convention. arm version.
1719 * The locals var stuff should most likely be split in another method.
1722 mono_arch_allocate_vars (MonoCompile
*cfg
)
1724 MonoMethodSignature
*sig
;
1725 MonoMethodHeader
*header
;
1728 int i
, offset
, size
, align
, curinst
;
1733 sig
= mono_method_signature (cfg
->method
);
1735 if (!cfg
->arch
.cinfo
)
1736 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
1737 cinfo
= cfg
->arch
.cinfo
;
1738 sig_ret
= mini_get_underlying_type (sig
->ret
);
1740 mono_arch_compute_omit_fp (cfg
);
1742 if (cfg
->arch
.omit_fp
)
1743 cfg
->frame_reg
= ARMREG_SP
;
1745 cfg
->frame_reg
= ARMREG_FP
;
1747 cfg
->flags
|= MONO_CFG_HAS_SPILLUP
;
1749 /* allow room for the vararg method args: void* and long/double */
1750 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
))
1751 cfg
->param_area
= MAX (cfg
->param_area
, sizeof (gpointer
)*8);
1753 header
= cfg
->header
;
1755 /* See mono_arch_get_global_int_regs () */
1756 if (cfg
->flags
& MONO_CFG_HAS_CALLS
)
1757 cfg
->uses_rgctx_reg
= TRUE
;
1759 if (cfg
->frame_reg
!= ARMREG_SP
)
1760 cfg
->used_int_regs
|= 1 << cfg
->frame_reg
;
1762 if (cfg
->compile_aot
|| cfg
->uses_rgctx_reg
|| COMPILE_LLVM (cfg
))
1763 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1764 cfg
->used_int_regs
|= (1 << MONO_ARCH_IMT_REG
);
1768 if (!MONO_TYPE_ISSTRUCT (sig_ret
) && cinfo
->ret
.storage
!= RegTypeStructByAddr
) {
1769 if (sig_ret
->type
!= MONO_TYPE_VOID
) {
1770 cfg
->ret
->opcode
= OP_REGVAR
;
1771 cfg
->ret
->inst_c0
= ARMREG_R0
;
1774 /* local vars are at a positive offset from the stack pointer */
1776 * also note that if the function uses alloca, we use FP
1777 * to point at the local variables.
1779 offset
= 0; /* linkage area */
1780 /* align the offset to 16 bytes: not sure this is needed here */
1782 //offset &= ~(8 - 1);
1784 /* add parameter area size for called functions */
1785 offset
+= cfg
->param_area
;
1788 if (cfg
->flags
& MONO_CFG_HAS_FPOUT
)
1791 /* allow room to save the return value */
1792 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
))
1795 switch (cinfo
->ret
.storage
) {
1796 case RegTypeStructByVal
:
1798 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
1799 offset
= ALIGN_TO (offset
, 8);
1800 cfg
->ret
->opcode
= OP_REGOFFSET
;
1801 cfg
->ret
->inst_basereg
= cfg
->frame_reg
;
1802 cfg
->ret
->inst_offset
= offset
;
1803 if (cinfo
->ret
.storage
== RegTypeStructByVal
)
1804 offset
+= cinfo
->ret
.nregs
* sizeof (gpointer
);
1808 case RegTypeStructByAddr
:
1809 ins
= cfg
->vret_addr
;
1810 offset
+= sizeof(gpointer
) - 1;
1811 offset
&= ~(sizeof(gpointer
) - 1);
1812 ins
->inst_offset
= offset
;
1813 ins
->opcode
= OP_REGOFFSET
;
1814 ins
->inst_basereg
= cfg
->frame_reg
;
1815 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
1816 g_print ("vret_addr =");
1817 mono_print_ins (cfg
->vret_addr
);
1819 offset
+= sizeof(gpointer
);
1825 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1826 if (cfg
->arch
.seq_point_info_var
) {
1829 ins
= cfg
->arch
.seq_point_info_var
;
1833 offset
+= align
- 1;
1834 offset
&= ~(align
- 1);
1835 ins
->opcode
= OP_REGOFFSET
;
1836 ins
->inst_basereg
= cfg
->frame_reg
;
1837 ins
->inst_offset
= offset
;
1840 if (cfg
->arch
.ss_trigger_page_var
) {
1843 ins
= cfg
->arch
.ss_trigger_page_var
;
1846 offset
+= align
- 1;
1847 offset
&= ~(align
- 1);
1848 ins
->opcode
= OP_REGOFFSET
;
1849 ins
->inst_basereg
= cfg
->frame_reg
;
1850 ins
->inst_offset
= offset
;
1854 if (cfg
->arch
.seq_point_ss_method_var
) {
1857 ins
= cfg
->arch
.seq_point_ss_method_var
;
1860 offset
+= align
- 1;
1861 offset
&= ~(align
- 1);
1862 ins
->opcode
= OP_REGOFFSET
;
1863 ins
->inst_basereg
= cfg
->frame_reg
;
1864 ins
->inst_offset
= offset
;
1867 if (cfg
->arch
.seq_point_bp_method_var
) {
1870 ins
= cfg
->arch
.seq_point_bp_method_var
;
1873 offset
+= align
- 1;
1874 offset
&= ~(align
- 1);
1875 ins
->opcode
= OP_REGOFFSET
;
1876 ins
->inst_basereg
= cfg
->frame_reg
;
1877 ins
->inst_offset
= offset
;
1881 if (cfg
->has_atomic_exchange_i4
|| cfg
->has_atomic_cas_i4
|| cfg
->has_atomic_add_i4
) {
1882 /* Allocate a temporary used by the atomic ops */
1886 /* Allocate a local slot to hold the sig cookie address */
1887 offset
+= align
- 1;
1888 offset
&= ~(align
- 1);
1889 cfg
->arch
.atomic_tmp_offset
= offset
;
1892 cfg
->arch
.atomic_tmp_offset
= -1;
1895 cfg
->locals_min_stack_offset
= offset
;
1897 curinst
= cfg
->locals_start
;
1898 for (i
= curinst
; i
< cfg
->num_varinfo
; ++i
) {
1901 ins
= cfg
->varinfo
[i
];
1902 if ((ins
->flags
& MONO_INST_IS_DEAD
) || ins
->opcode
== OP_REGVAR
|| ins
->opcode
== OP_REGOFFSET
)
1905 t
= ins
->inst_vtype
;
1906 if (cfg
->gsharedvt
&& mini_is_gsharedvt_variable_type (t
))
1909 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1910 * pinvoke wrappers when they call functions returning structure */
1911 if (ins
->backend
.is_pinvoke
&& MONO_TYPE_ISSTRUCT (t
) && t
->type
!= MONO_TYPE_TYPEDBYREF
) {
1912 size
= mono_class_native_size (mono_class_from_mono_type (t
), &ualign
);
1916 size
= mono_type_size (t
, &align
);
1918 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1919 * since it loads/stores misaligned words, which don't do the right thing.
1921 if (align
< 4 && size
>= 4)
1923 if (ALIGN_TO (offset
, align
) > ALIGN_TO (offset
, 4))
1924 mini_gc_set_slot_type_from_fp (cfg
, ALIGN_TO (offset
, 4), SLOT_NOREF
);
1925 offset
+= align
- 1;
1926 offset
&= ~(align
- 1);
1927 ins
->opcode
= OP_REGOFFSET
;
1928 ins
->inst_offset
= offset
;
1929 ins
->inst_basereg
= cfg
->frame_reg
;
1931 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1934 cfg
->locals_max_stack_offset
= offset
;
1938 ins
= cfg
->args
[curinst
];
1939 if (ins
->opcode
!= OP_REGVAR
) {
1940 ins
->opcode
= OP_REGOFFSET
;
1941 ins
->inst_basereg
= cfg
->frame_reg
;
1942 offset
+= sizeof (gpointer
) - 1;
1943 offset
&= ~(sizeof (gpointer
) - 1);
1944 ins
->inst_offset
= offset
;
1945 offset
+= sizeof (gpointer
);
1950 if (sig
->call_convention
== MONO_CALL_VARARG
) {
1954 /* Allocate a local slot to hold the sig cookie address */
1955 offset
+= align
- 1;
1956 offset
&= ~(align
- 1);
1957 cfg
->sig_cookie
= offset
;
1961 for (i
= 0; i
< sig
->param_count
; ++i
) {
1962 ainfo
= cinfo
->args
+ i
;
1964 ins
= cfg
->args
[curinst
];
1966 switch (ainfo
->storage
) {
1968 offset
= ALIGN_TO (offset
, 8);
1969 ins
->opcode
= OP_REGOFFSET
;
1970 ins
->inst_basereg
= cfg
->frame_reg
;
1971 /* These arguments are saved to the stack in the prolog */
1972 ins
->inst_offset
= offset
;
1973 if (cfg
->verbose_level
>= 2)
1974 g_print ("arg %d allocated to %s+0x%0x.\n", i
, mono_arch_regname (ins
->inst_basereg
), (int)ins
->inst_offset
);
1982 if (ins
->opcode
!= OP_REGVAR
) {
1983 ins
->opcode
= OP_REGOFFSET
;
1984 ins
->inst_basereg
= cfg
->frame_reg
;
1985 size
= mini_type_stack_size_full (sig
->params
[i
], &ualign
, sig
->pinvoke
);
1987 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1988 * since it loads/stores misaligned words, which don't do the right thing.
1990 if (align
< 4 && size
>= 4)
1992 /* The code in the prolog () stores words when storing vtypes received in a register */
1993 if (MONO_TYPE_ISSTRUCT (sig
->params
[i
]))
1995 if (ALIGN_TO (offset
, align
) > ALIGN_TO (offset
, 4))
1996 mini_gc_set_slot_type_from_fp (cfg
, ALIGN_TO (offset
, 4), SLOT_NOREF
);
1997 offset
+= align
- 1;
1998 offset
&= ~(align
- 1);
1999 ins
->inst_offset
= offset
;
2005 /* align the offset to 8 bytes */
2006 if (ALIGN_TO (offset
, 8) > ALIGN_TO (offset
, 4))
2007 mini_gc_set_slot_type_from_fp (cfg
, ALIGN_TO (offset
, 4), SLOT_NOREF
);
2012 cfg
->stack_offset
= offset
;
2016 mono_arch_create_vars (MonoCompile
*cfg
)
2018 MonoMethodSignature
*sig
;
2022 sig
= mono_method_signature (cfg
->method
);
2024 if (!cfg
->arch
.cinfo
)
2025 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
2026 cinfo
= cfg
->arch
.cinfo
;
2028 if (IS_HARD_FLOAT
) {
2029 for (i
= 0; i
< 2; i
++) {
2030 MonoInst
*inst
= mono_compile_create_var (cfg
, &mono_defaults
.double_class
->byval_arg
, OP_LOCAL
);
2031 inst
->flags
|= MONO_INST_VOLATILE
;
2033 cfg
->arch
.vfp_scratch_slots
[i
] = (gpointer
) inst
;
2037 if (cinfo
->ret
.storage
== RegTypeStructByVal
)
2038 cfg
->ret_var_is_local
= TRUE
;
2040 if (cinfo
->ret
.storage
== RegTypeStructByAddr
) {
2041 cfg
->vret_addr
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_ARG
);
2042 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
2043 g_print ("vret_addr = ");
2044 mono_print_ins (cfg
->vret_addr
);
2048 if (cfg
->gen_sdb_seq_points
) {
2049 if (cfg
->compile_aot
) {
2050 MonoInst
*ins
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
2051 ins
->flags
|= MONO_INST_VOLATILE
;
2052 cfg
->arch
.seq_point_info_var
= ins
;
2054 if (!cfg
->soft_breakpoints
) {
2055 /* Allocate a separate variable for this to save 1 load per seq point */
2056 ins
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
2057 ins
->flags
|= MONO_INST_VOLATILE
;
2058 cfg
->arch
.ss_trigger_page_var
= ins
;
2061 if (cfg
->soft_breakpoints
) {
2064 ins
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
2065 ins
->flags
|= MONO_INST_VOLATILE
;
2066 cfg
->arch
.seq_point_ss_method_var
= ins
;
2068 ins
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
2069 ins
->flags
|= MONO_INST_VOLATILE
;
2070 cfg
->arch
.seq_point_bp_method_var
= ins
;
2076 emit_sig_cookie (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
)
2078 MonoMethodSignature
*tmp_sig
;
2081 if (call
->tail_call
)
2084 g_assert (cinfo
->sig_cookie
.storage
== RegTypeBase
);
2087 * mono_ArgIterator_Setup assumes the signature cookie is
2088 * passed first and all the arguments which were before it are
2089 * passed on the stack after the signature. So compensate by
2090 * passing a different signature.
2092 tmp_sig
= mono_metadata_signature_dup (call
->signature
);
2093 tmp_sig
->param_count
-= call
->signature
->sentinelpos
;
2094 tmp_sig
->sentinelpos
= 0;
2095 memcpy (tmp_sig
->params
, call
->signature
->params
+ call
->signature
->sentinelpos
, tmp_sig
->param_count
* sizeof (MonoType
*));
2097 sig_reg
= mono_alloc_ireg (cfg
);
2098 MONO_EMIT_NEW_SIGNATURECONST (cfg
, sig_reg
, tmp_sig
);
2100 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, cinfo
->sig_cookie
.offset
, sig_reg
);
2105 mono_arch_get_llvm_call_info (MonoCompile
*cfg
, MonoMethodSignature
*sig
)
2110 LLVMCallInfo
*linfo
;
2112 n
= sig
->param_count
+ sig
->hasthis
;
2114 cinfo
= get_call_info (cfg
->mempool
, sig
);
2116 linfo
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (LLVMCallInfo
) + (sizeof (LLVMArgInfo
) * n
));
2119 * LLVM always uses the native ABI while we use our own ABI, the
2120 * only difference is the handling of vtypes:
2121 * - we only pass/receive them in registers in some cases, and only
2122 * in 1 or 2 integer registers.
2124 switch (cinfo
->ret
.storage
) {
2125 case RegTypeGeneral
:
2128 case RegTypeIRegPair
:
2130 case RegTypeStructByAddr
:
2131 /* Vtype returned using a hidden argument */
2132 linfo
->ret
.storage
= LLVMArgVtypeRetAddr
;
2133 linfo
->vret_arg_index
= cinfo
->vret_arg_index
;
2136 case RegTypeStructByVal
:
2137 /* LLVM models this by returning an int array */
2138 linfo
->ret
.storage
= LLVMArgAsIArgs
;
2139 linfo
->ret
.nslots
= cinfo
->ret
.nregs
;
2143 cfg
->exception_message
= g_strdup_printf ("unknown ret conv (%d)", cinfo
->ret
.storage
);
2144 cfg
->disable_llvm
= TRUE
;
2148 for (i
= 0; i
< n
; ++i
) {
2149 LLVMArgInfo
*lainfo
= &linfo
->args
[i
];
2150 ainfo
= cinfo
->args
+ i
;
2152 lainfo
->storage
= LLVMArgNone
;
2154 switch (ainfo
->storage
) {
2155 case RegTypeGeneral
:
2156 case RegTypeIRegPair
:
2158 case RegTypeBaseGen
:
2160 lainfo
->storage
= LLVMArgNormal
;
2162 case RegTypeStructByVal
:
2163 lainfo
->storage
= LLVMArgAsIArgs
;
2164 lainfo
->nslots
= ainfo
->struct_size
/ sizeof (gpointer
);
2166 case RegTypeStructByAddr
:
2167 case RegTypeStructByAddrOnStack
:
2168 lainfo
->storage
= LLVMArgVtypeByRef
;
2171 cfg
->exception_message
= g_strdup_printf ("ainfo->storage (%d)", ainfo
->storage
);
2172 cfg
->disable_llvm
= TRUE
;
2182 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
2185 MonoMethodSignature
*sig
;
2189 sig
= call
->signature
;
2190 n
= sig
->param_count
+ sig
->hasthis
;
2192 cinfo
= get_call_info (cfg
->mempool
, sig
);
2194 switch (cinfo
->ret
.storage
) {
2195 case RegTypeStructByVal
:
2197 if (cinfo
->ret
.storage
== RegTypeStructByVal
&& cinfo
->ret
.nregs
== 1) {
2198 /* The JIT will transform this into a normal call */
2199 call
->vret_in_reg
= TRUE
;
2202 if (call
->inst
.opcode
== OP_TAILCALL
)
2205 * The vtype is returned in registers, save the return area address in a local, and save the vtype into
2206 * the location pointed to by it after call in emit_move_return_value ().
2208 if (!cfg
->arch
.vret_addr_loc
) {
2209 cfg
->arch
.vret_addr_loc
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
2210 /* Prevent it from being register allocated or optimized away */
2211 ((MonoInst
*)cfg
->arch
.vret_addr_loc
)->flags
|= MONO_INST_VOLATILE
;
2214 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, ((MonoInst
*)cfg
->arch
.vret_addr_loc
)->dreg
, call
->vret_var
->dreg
);
2216 case RegTypeStructByAddr
: {
2218 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
2219 vtarg
->sreg1
= call
->vret_var
->dreg
;
2220 vtarg
->dreg
= mono_alloc_preg (cfg
);
2221 MONO_ADD_INS (cfg
->cbb
, vtarg
);
2223 mono_call_inst_add_outarg_reg (cfg
, call
, vtarg
->dreg
, cinfo
->ret
.reg
, FALSE
);
2230 for (i
= 0; i
< n
; ++i
) {
2231 ArgInfo
*ainfo
= cinfo
->args
+ i
;
2234 if (i
>= sig
->hasthis
)
2235 t
= sig
->params
[i
- sig
->hasthis
];
2237 t
= &mono_defaults
.int_class
->byval_arg
;
2238 t
= mini_get_underlying_type (t
);
2240 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
2241 /* Emit the signature cookie just before the implicit arguments */
2242 emit_sig_cookie (cfg
, call
, cinfo
);
2245 in
= call
->args
[i
];
2247 switch (ainfo
->storage
) {
2248 case RegTypeGeneral
:
2249 case RegTypeIRegPair
:
2250 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
2251 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2252 ins
->dreg
= mono_alloc_ireg (cfg
);
2253 ins
->sreg1
= MONO_LVREG_LS (in
->dreg
);
2254 MONO_ADD_INS (cfg
->cbb
, ins
);
2255 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2257 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2258 ins
->dreg
= mono_alloc_ireg (cfg
);
2259 ins
->sreg1
= MONO_LVREG_MS (in
->dreg
);
2260 MONO_ADD_INS (cfg
->cbb
, ins
);
2261 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
2262 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R8
) || (t
->type
== MONO_TYPE_R4
))) {
2263 if (ainfo
->size
== 4) {
2264 if (IS_SOFT_FLOAT
) {
2265 /* mono_emit_call_args () have already done the r8->r4 conversion */
2266 /* The converted value is in an int vreg */
2267 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2268 ins
->dreg
= mono_alloc_ireg (cfg
);
2269 ins
->sreg1
= in
->dreg
;
2270 MONO_ADD_INS (cfg
->cbb
, ins
);
2271 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2275 cfg
->param_area
= MAX (cfg
->param_area
, 8);
2276 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
2277 creg
= mono_alloc_ireg (cfg
);
2278 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
2279 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
, FALSE
);
2282 if (IS_SOFT_FLOAT
) {
2283 MONO_INST_NEW (cfg
, ins
, OP_FGETLOW32
);
2284 ins
->dreg
= mono_alloc_ireg (cfg
);
2285 ins
->sreg1
= in
->dreg
;
2286 MONO_ADD_INS (cfg
->cbb
, ins
);
2287 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2289 MONO_INST_NEW (cfg
, ins
, OP_FGETHIGH32
);
2290 ins
->dreg
= mono_alloc_ireg (cfg
);
2291 ins
->sreg1
= in
->dreg
;
2292 MONO_ADD_INS (cfg
->cbb
, ins
);
2293 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
2297 cfg
->param_area
= MAX (cfg
->param_area
, 8);
2298 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
2299 creg
= mono_alloc_ireg (cfg
);
2300 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
2301 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
, FALSE
);
2302 creg
= mono_alloc_ireg (cfg
);
2303 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8 + 4));
2304 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
+ 1, FALSE
);
2307 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
2309 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2310 ins
->dreg
= mono_alloc_ireg (cfg
);
2311 ins
->sreg1
= in
->dreg
;
2312 MONO_ADD_INS (cfg
->cbb
, ins
);
2314 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2317 case RegTypeStructByVal
:
2318 case RegTypeGSharedVtInReg
:
2319 case RegTypeGSharedVtOnStack
:
2321 case RegTypeStructByAddr
:
2322 case RegTypeStructByAddrOnStack
:
2323 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
2324 ins
->opcode
= OP_OUTARG_VT
;
2325 ins
->sreg1
= in
->dreg
;
2326 ins
->klass
= in
->klass
;
2327 ins
->inst_p0
= call
;
2328 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
2329 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
2330 mono_call_inst_add_outarg_vt (cfg
, call
, ins
);
2331 MONO_ADD_INS (cfg
->cbb
, ins
);
2334 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
2335 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2336 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R4
) || (t
->type
== MONO_TYPE_R8
))) {
2337 if (t
->type
== MONO_TYPE_R8
) {
2338 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2341 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2343 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2346 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2349 case RegTypeBaseGen
:
2350 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
2351 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, (G_BYTE_ORDER
== G_BIG_ENDIAN
) ? MONO_LVREG_LS (in
->dreg
) : MONO_LVREG_MS (in
->dreg
));
2352 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2353 ins
->dreg
= mono_alloc_ireg (cfg
);
2354 ins
->sreg1
= G_BYTE_ORDER
== G_BIG_ENDIAN
? MONO_LVREG_MS (in
->dreg
) : MONO_LVREG_LS (in
->dreg
);
2355 MONO_ADD_INS (cfg
->cbb
, ins
);
2356 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ARMREG_R3
, FALSE
);
2357 } else if (!t
->byref
&& (t
->type
== MONO_TYPE_R8
)) {
2360 /* This should work for soft-float as well */
2362 cfg
->param_area
= MAX (cfg
->param_area
, 8);
2363 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
2364 creg
= mono_alloc_ireg (cfg
);
2365 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ARMREG_R3
, FALSE
);
2366 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
2367 creg
= mono_alloc_ireg (cfg
);
2368 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 4));
2369 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, creg
);
2370 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
2372 g_assert_not_reached ();
2376 int fdreg
= mono_alloc_freg (cfg
);
2378 if (ainfo
->size
== 8) {
2379 MONO_INST_NEW (cfg
, ins
, OP_FMOVE
);
2380 ins
->sreg1
= in
->dreg
;
2382 MONO_ADD_INS (cfg
->cbb
, ins
);
2384 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, TRUE
);
2389 * Mono's register allocator doesn't speak single-precision registers that
2390 * overlap double-precision registers (i.e. armhf). So we have to work around
2391 * the register allocator and load the value from memory manually.
2393 * So we create a variable for the float argument and an instruction to store
2394 * the argument into the variable. We then store the list of these arguments
2395 * in call->float_args. This list is then used by emit_float_args later to
2396 * pass the arguments in the various call opcodes.
2398 * This is not very nice, and we should really try to fix the allocator.
2401 MonoInst
*float_arg
= mono_compile_create_var (cfg
, &mono_defaults
.single_class
->byval_arg
, OP_LOCAL
);
2403 /* Make sure the instruction isn't seen as pointless and removed.
2405 float_arg
->flags
|= MONO_INST_VOLATILE
;
2407 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, float_arg
->dreg
, in
->dreg
);
2409 /* We use the dreg to look up the instruction later. The hreg is used to
2410 * emit the instruction that loads the value into the FP reg.
2412 fad
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (FloatArgData
));
2413 fad
->vreg
= float_arg
->dreg
;
2414 fad
->hreg
= ainfo
->reg
;
2416 call
->float_args
= g_slist_append_mempool (cfg
->mempool
, call
->float_args
, fad
);
2419 call
->used_iregs
|= 1 << ainfo
->reg
;
2420 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
2424 g_assert_not_reached ();
2428 /* Handle the case where there are no implicit arguments */
2429 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== sig
->sentinelpos
))
2430 emit_sig_cookie (cfg
, call
, cinfo
);
2432 call
->call_info
= cinfo
;
2433 call
->stack_usage
= cinfo
->stack_usage
;
2437 add_outarg_reg (MonoCompile
*cfg
, MonoCallInst
*call
, ArgStorage storage
, int reg
, MonoInst
*arg
)
2443 MONO_INST_NEW (cfg
, ins
, OP_FMOVE
);
2444 ins
->dreg
= mono_alloc_freg (cfg
);
2445 ins
->sreg1
= arg
->dreg
;
2446 MONO_ADD_INS (cfg
->cbb
, ins
);
2447 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, reg
, TRUE
);
2450 g_assert_not_reached ();
2456 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
2458 MonoCallInst
*call
= (MonoCallInst
*)ins
->inst_p0
;
2460 ArgInfo
*ainfo
= ins
->inst_p1
;
2461 int ovf_size
= ainfo
->vtsize
;
2462 int doffset
= ainfo
->offset
;
2463 int struct_size
= ainfo
->struct_size
;
2464 int i
, soffset
, dreg
, tmpreg
;
2466 switch (ainfo
->storage
) {
2467 case RegTypeGSharedVtInReg
:
2468 case RegTypeStructByAddr
:
2470 mono_call_inst_add_outarg_reg (cfg
, call
, src
->dreg
, ainfo
->reg
, FALSE
);
2472 case RegTypeGSharedVtOnStack
:
2473 case RegTypeStructByAddrOnStack
:
2474 /* Pass by addr on stack */
2475 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, src
->dreg
);
2478 for (i
= 0; i
< ainfo
->nregs
; ++i
) {
2479 if (ainfo
->esize
== 4)
2480 MONO_INST_NEW (cfg
, load
, OP_LOADR4_MEMBASE
);
2482 MONO_INST_NEW (cfg
, load
, OP_LOADR8_MEMBASE
);
2483 load
->dreg
= mono_alloc_freg (cfg
);
2484 load
->inst_basereg
= src
->dreg
;
2485 load
->inst_offset
= i
* ainfo
->esize
;
2486 MONO_ADD_INS (cfg
->cbb
, load
);
2488 if (ainfo
->esize
== 4) {
2491 /* See RegTypeFP in mono_arch_emit_call () */
2492 MonoInst
*float_arg
= mono_compile_create_var (cfg
, &mono_defaults
.single_class
->byval_arg
, OP_LOCAL
);
2493 float_arg
->flags
|= MONO_INST_VOLATILE
;
2494 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, float_arg
->dreg
, load
->dreg
);
2496 fad
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (FloatArgData
));
2497 fad
->vreg
= float_arg
->dreg
;
2498 fad
->hreg
= ainfo
->reg
+ i
;
2500 call
->float_args
= g_slist_append_mempool (cfg
->mempool
, call
->float_args
, fad
);
2502 add_outarg_reg (cfg
, call
, RegTypeFP
, ainfo
->reg
+ (i
* 2), load
);
2508 for (i
= 0; i
< ainfo
->size
; ++i
) {
2509 dreg
= mono_alloc_ireg (cfg
);
2510 switch (struct_size
) {
2512 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, dreg
, src
->dreg
, soffset
);
2515 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, dreg
, src
->dreg
, soffset
);
2518 tmpreg
= mono_alloc_ireg (cfg
);
2519 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, dreg
, src
->dreg
, soffset
);
2520 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, tmpreg
, src
->dreg
, soffset
+ 1);
2521 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, tmpreg
, tmpreg
, 8);
2522 MONO_EMIT_NEW_BIALU (cfg
, OP_IOR
, dreg
, dreg
, tmpreg
);
2523 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, tmpreg
, src
->dreg
, soffset
+ 2);
2524 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, tmpreg
, tmpreg
, 16);
2525 MONO_EMIT_NEW_BIALU (cfg
, OP_IOR
, dreg
, dreg
, tmpreg
);
2528 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, src
->dreg
, soffset
);
2531 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
+ i
, FALSE
);
2532 soffset
+= sizeof (gpointer
);
2533 struct_size
-= sizeof (gpointer
);
2535 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2537 mini_emit_memcpy (cfg
, ARMREG_SP
, doffset
, src
->dreg
, soffset
, MIN (ovf_size
* sizeof (gpointer
), struct_size
), struct_size
< 4 ? 1 : 4);
2543 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
2545 MonoType
*ret
= mini_get_underlying_type (mono_method_signature (method
)->ret
);
2548 if (ret
->type
== MONO_TYPE_I8
|| ret
->type
== MONO_TYPE_U8
) {
2551 if (COMPILE_LLVM (cfg
)) {
2552 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
2554 MONO_INST_NEW (cfg
, ins
, OP_SETLRET
);
2555 ins
->sreg1
= MONO_LVREG_LS (val
->dreg
);
2556 ins
->sreg2
= MONO_LVREG_MS (val
->dreg
);
2557 MONO_ADD_INS (cfg
->cbb
, ins
);
2562 case MONO_ARM_FPU_NONE
:
2563 if (ret
->type
== MONO_TYPE_R8
) {
2566 MONO_INST_NEW (cfg
, ins
, OP_SETFRET
);
2567 ins
->dreg
= cfg
->ret
->dreg
;
2568 ins
->sreg1
= val
->dreg
;
2569 MONO_ADD_INS (cfg
->cbb
, ins
);
2572 if (ret
->type
== MONO_TYPE_R4
) {
2573 /* Already converted to an int in method_to_ir () */
2574 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
2578 case MONO_ARM_FPU_VFP
:
2579 case MONO_ARM_FPU_VFP_HARD
:
2580 if (ret
->type
== MONO_TYPE_R8
|| ret
->type
== MONO_TYPE_R4
) {
2583 MONO_INST_NEW (cfg
, ins
, OP_SETFRET
);
2584 ins
->dreg
= cfg
->ret
->dreg
;
2585 ins
->sreg1
= val
->dreg
;
2586 MONO_ADD_INS (cfg
->cbb
, ins
);
2591 g_assert_not_reached ();
2595 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
2598 #endif /* #ifndef DISABLE_JIT */
2601 mono_arch_is_inst_imm (gint64 imm
)
2607 MonoMethodSignature
*sig
;
2610 MonoType
**param_types
;
2614 dyn_call_supported (CallInfo
*cinfo
, MonoMethodSignature
*sig
)
2618 if (sig
->hasthis
+ sig
->param_count
> PARAM_REGS
+ DYN_CALL_STACK_ARGS
)
2621 switch (cinfo
->ret
.storage
) {
2623 case RegTypeGeneral
:
2624 case RegTypeIRegPair
:
2625 case RegTypeStructByAddr
:
2636 for (i
= 0; i
< cinfo
->nargs
; ++i
) {
2637 ArgInfo
*ainfo
= &cinfo
->args
[i
];
2640 switch (ainfo
->storage
) {
2641 case RegTypeGeneral
:
2642 case RegTypeIRegPair
:
2643 case RegTypeBaseGen
:
2647 if (ainfo
->offset
>= (DYN_CALL_STACK_ARGS
* sizeof (gpointer
)))
2650 case RegTypeStructByVal
:
2651 if (ainfo
->size
== 0)
2652 last_slot
= PARAM_REGS
+ (ainfo
->offset
/ 4) + ainfo
->vtsize
;
2654 last_slot
= ainfo
->reg
+ ainfo
->size
+ ainfo
->vtsize
;
2655 if (last_slot
>= PARAM_REGS
+ DYN_CALL_STACK_ARGS
)
2663 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2664 for (i
= 0; i
< sig
->param_count
; ++i
) {
2665 MonoType
*t
= sig
->params
[i
];
2670 t
= mini_get_underlying_type (t
);
2693 mono_arch_dyn_call_prepare (MonoMethodSignature
*sig
)
2695 ArchDynCallInfo
*info
;
2699 cinfo
= get_call_info (NULL
, sig
);
2701 if (!dyn_call_supported (cinfo
, sig
)) {
2706 info
= g_new0 (ArchDynCallInfo
, 1);
2707 // FIXME: Preprocess the info to speed up start_dyn_call ()
2709 info
->cinfo
= cinfo
;
2710 info
->rtype
= mini_get_underlying_type (sig
->ret
);
2711 info
->param_types
= g_new0 (MonoType
*, sig
->param_count
);
2712 for (i
= 0; i
< sig
->param_count
; ++i
)
2713 info
->param_types
[i
] = mini_get_underlying_type (sig
->params
[i
]);
2715 return (MonoDynCallInfo
*)info
;
2719 mono_arch_dyn_call_free (MonoDynCallInfo
*info
)
2721 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
2723 g_free (ainfo
->cinfo
);
2728 mono_arch_start_dyn_call (MonoDynCallInfo
*info
, gpointer
**args
, guint8
*ret
, guint8
*buf
, int buf_len
)
2730 ArchDynCallInfo
*dinfo
= (ArchDynCallInfo
*)info
;
2731 DynCallArgs
*p
= (DynCallArgs
*)buf
;
2732 int arg_index
, greg
, i
, j
, pindex
;
2733 MonoMethodSignature
*sig
= dinfo
->sig
;
2735 g_assert (buf_len
>= sizeof (DynCallArgs
));
2745 if (sig
->hasthis
|| dinfo
->cinfo
->vret_arg_index
== 1) {
2746 p
->regs
[greg
++] = (mgreg_t
)*(args
[arg_index
++]);
2751 if (dinfo
->cinfo
->ret
.storage
== RegTypeStructByAddr
)
2752 p
->regs
[greg
++] = (mgreg_t
)ret
;
2754 for (i
= pindex
; i
< sig
->param_count
; i
++) {
2755 MonoType
*t
= dinfo
->param_types
[i
];
2756 gpointer
*arg
= args
[arg_index
++];
2757 ArgInfo
*ainfo
= &dinfo
->cinfo
->args
[i
+ sig
->hasthis
];
2760 if (ainfo
->storage
== RegTypeGeneral
|| ainfo
->storage
== RegTypeIRegPair
|| ainfo
->storage
== RegTypeStructByVal
) {
2762 } else if (ainfo
->storage
== RegTypeFP
) {
2763 } else if (ainfo
->storage
== RegTypeBase
) {
2764 slot
= PARAM_REGS
+ (ainfo
->offset
/ 4);
2765 } else if (ainfo
->storage
== RegTypeBaseGen
) {
2766 /* slot + 1 is the first stack slot, so the code below will work */
2769 g_assert_not_reached ();
2773 p
->regs
[slot
] = (mgreg_t
)*arg
;
2778 case MONO_TYPE_STRING
:
2779 case MONO_TYPE_CLASS
:
2780 case MONO_TYPE_ARRAY
:
2781 case MONO_TYPE_SZARRAY
:
2782 case MONO_TYPE_OBJECT
:
2786 p
->regs
[slot
] = (mgreg_t
)*arg
;
2789 p
->regs
[slot
] = *(guint8
*)arg
;
2792 p
->regs
[slot
] = *(gint8
*)arg
;
2795 p
->regs
[slot
] = *(gint16
*)arg
;
2798 p
->regs
[slot
] = *(guint16
*)arg
;
2801 p
->regs
[slot
] = *(gint32
*)arg
;
2804 p
->regs
[slot
] = *(guint32
*)arg
;
2808 p
->regs
[slot
++] = (mgreg_t
)arg
[0];
2809 p
->regs
[slot
] = (mgreg_t
)arg
[1];
2812 if (ainfo
->storage
== RegTypeFP
) {
2813 float f
= *(float*)arg
;
2814 p
->fpregs
[ainfo
->reg
/ 2] = *(double*)&f
;
2817 p
->regs
[slot
] = *(mgreg_t
*)arg
;
2821 if (ainfo
->storage
== RegTypeFP
) {
2822 p
->fpregs
[ainfo
->reg
/ 2] = *(double*)arg
;
2825 p
->regs
[slot
++] = (mgreg_t
)arg
[0];
2826 p
->regs
[slot
] = (mgreg_t
)arg
[1];
2829 case MONO_TYPE_GENERICINST
:
2830 if (MONO_TYPE_IS_REFERENCE (t
)) {
2831 p
->regs
[slot
] = (mgreg_t
)*arg
;
2834 if (t
->type
== MONO_TYPE_GENERICINST
&& mono_class_is_nullable (mono_class_from_mono_type (t
))) {
2835 MonoClass
*klass
= mono_class_from_mono_type (t
);
2836 guint8
*nullable_buf
;
2839 size
= mono_class_value_size (klass
, NULL
);
2840 nullable_buf
= g_alloca (size
);
2841 g_assert (nullable_buf
);
2843 /* The argument pointed to by arg is either a boxed vtype or null */
2844 mono_nullable_init (nullable_buf
, (MonoObject
*)arg
, klass
);
2846 arg
= (gpointer
*)nullable_buf
;
2852 case MONO_TYPE_VALUETYPE
:
2853 g_assert (ainfo
->storage
== RegTypeStructByVal
);
2855 if (ainfo
->size
== 0)
2856 slot
= PARAM_REGS
+ (ainfo
->offset
/ 4);
2860 for (j
= 0; j
< ainfo
->size
+ ainfo
->vtsize
; ++j
)
2861 p
->regs
[slot
++] = ((mgreg_t
*)arg
) [j
];
2864 g_assert_not_reached ();
2870 mono_arch_finish_dyn_call (MonoDynCallInfo
*info
, guint8
*buf
)
2872 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
2873 DynCallArgs
*p
= (DynCallArgs
*)buf
;
2874 MonoType
*ptype
= ainfo
->rtype
;
2875 guint8
*ret
= p
->ret
;
2876 mgreg_t res
= p
->res
;
2877 mgreg_t res2
= p
->res2
;
2879 switch (ptype
->type
) {
2880 case MONO_TYPE_VOID
:
2881 *(gpointer
*)ret
= NULL
;
2883 case MONO_TYPE_STRING
:
2884 case MONO_TYPE_CLASS
:
2885 case MONO_TYPE_ARRAY
:
2886 case MONO_TYPE_SZARRAY
:
2887 case MONO_TYPE_OBJECT
:
2891 *(gpointer
*)ret
= (gpointer
)res
;
2897 *(guint8
*)ret
= res
;
2900 *(gint16
*)ret
= res
;
2903 *(guint16
*)ret
= res
;
2906 *(gint32
*)ret
= res
;
2909 *(guint32
*)ret
= res
;
2913 /* This handles endianness as well */
2914 ((gint32
*)ret
) [0] = res
;
2915 ((gint32
*)ret
) [1] = res2
;
2917 case MONO_TYPE_GENERICINST
:
2918 if (MONO_TYPE_IS_REFERENCE (ptype
)) {
2919 *(gpointer
*)ret
= (gpointer
)res
;
2924 case MONO_TYPE_VALUETYPE
:
2925 g_assert (ainfo
->cinfo
->ret
.storage
== RegTypeStructByAddr
);
2931 *(float*)ret
= *(float*)&p
->fpregs
[0];
2933 *(float*)ret
= *(float*)&res
;
2935 case MONO_TYPE_R8
: {
2939 if (IS_HARD_FLOAT
) {
2940 *(double*)ret
= p
->fpregs
[0];
2945 *(double*)ret
= *(double*)®s
;
2950 g_assert_not_reached ();
2957 * Allow tracing to work with this interface (with an optional argument)
2961 mono_arch_instrument_prolog (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
)
2965 code
= mono_arm_emit_load_imm (code
, ARMREG_R0
, (guint32
)cfg
->method
);
2966 ARM_MOV_REG_IMM8 (code
, ARMREG_R1
, 0); /* NULL ebp for now */
2967 code
= mono_arm_emit_load_imm (code
, ARMREG_R2
, (guint32
)func
);
2968 code
= emit_call_reg (code
, ARMREG_R2
);
2982 mono_arch_instrument_epilog_full (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
, gboolean preserve_argument_registers
)
2985 int save_mode
= SAVE_NONE
;
2987 MonoMethod
*method
= cfg
->method
;
2988 MonoType
*ret_type
= mini_get_underlying_type (mono_method_signature (method
)->ret
);
2989 int rtype
= ret_type
->type
;
2990 int save_offset
= cfg
->param_area
;
2994 offset
= code
- cfg
->native_code
;
2995 /* we need about 16 instructions */
2996 if (offset
> (cfg
->code_size
- 16 * 4)) {
2997 cfg
->code_size
*= 2;
2998 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
2999 code
= cfg
->native_code
+ offset
;
3002 case MONO_TYPE_VOID
:
3003 /* special case string .ctor icall */
3004 if (strcmp (".ctor", method
->name
) && method
->klass
== mono_defaults
.string_class
)
3005 save_mode
= SAVE_ONE
;
3007 save_mode
= SAVE_NONE
;
3011 save_mode
= SAVE_TWO
;
3015 save_mode
= SAVE_ONE_FP
;
3017 save_mode
= SAVE_ONE
;
3021 save_mode
= SAVE_TWO_FP
;
3023 save_mode
= SAVE_TWO
;
3025 case MONO_TYPE_GENERICINST
:
3026 if (!mono_type_generic_inst_is_valuetype (ret_type
)) {
3027 save_mode
= SAVE_ONE
;
3031 case MONO_TYPE_VALUETYPE
:
3032 save_mode
= SAVE_STRUCT
;
3035 save_mode
= SAVE_ONE
;
3039 switch (save_mode
) {
3041 ARM_STR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
3042 ARM_STR_IMM (code
, ARMREG_R1
, cfg
->frame_reg
, save_offset
+ 4);
3043 if (enable_arguments
) {
3044 ARM_MOV_REG_REG (code
, ARMREG_R2
, ARMREG_R1
);
3045 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_R0
);
3049 ARM_STR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
3050 if (enable_arguments
) {
3051 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_R0
);
3055 ARM_FSTS (code
, ARM_VFP_F0
, cfg
->frame_reg
, save_offset
);
3056 if (enable_arguments
) {
3057 ARM_FMRS (code
, ARMREG_R1
, ARM_VFP_F0
);
3061 ARM_FSTD (code
, ARM_VFP_D0
, cfg
->frame_reg
, save_offset
);
3062 if (enable_arguments
) {
3063 ARM_FMDRR (code
, ARMREG_R1
, ARMREG_R2
, ARM_VFP_D0
);
3067 if (enable_arguments
) {
3068 /* FIXME: get the actual address */
3069 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_R0
);
3077 code
= mono_arm_emit_load_imm (code
, ARMREG_R0
, (guint32
)cfg
->method
);
3078 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, (guint32
)func
);
3079 code
= emit_call_reg (code
, ARMREG_IP
);
3081 switch (save_mode
) {
3083 ARM_LDR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
3084 ARM_LDR_IMM (code
, ARMREG_R1
, cfg
->frame_reg
, save_offset
+ 4);
3087 ARM_LDR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
3090 ARM_FLDS (code
, ARM_VFP_F0
, cfg
->frame_reg
, save_offset
);
3093 ARM_FLDD (code
, ARM_VFP_D0
, cfg
->frame_reg
, save_offset
);
3104 * The immediate field for cond branches is big enough for all reasonable methods
3106 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3107 if (0 && ins->inst_true_bb->native_offset) { \
3108 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3110 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3111 ARM_B_COND (code, (condcode), 0); \
3114 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3116 /* emit an exception if condition is fail
3118 * We assign the extra code used to throw the implicit exceptions
3119 * to cfg->bb_exit as far as the big branch handling is concerned
3121 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3123 mono_add_patch_info (cfg, code - cfg->native_code, \
3124 MONO_PATCH_INFO_EXC, exc_name); \
3125 ARM_BL_COND (code, (condcode), 0); \
3128 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3131 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3136 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3140 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
3141 MonoInst
*last_ins
= mono_inst_prev (ins
, FILTER_IL_SEQ_POINT
);
3143 switch (ins
->opcode
) {
3146 /* Already done by an arch-independent pass */
3148 case OP_LOAD_MEMBASE
:
3149 case OP_LOADI4_MEMBASE
:
3151 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3152 * OP_LOAD_MEMBASE offset(basereg), reg
3154 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_REG
3155 || last_ins
->opcode
== OP_STORE_MEMBASE_REG
) &&
3156 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3157 ins
->inst_offset
== last_ins
->inst_offset
) {
3158 if (ins
->dreg
== last_ins
->sreg1
) {
3159 MONO_DELETE_INS (bb
, ins
);
3162 //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
3163 ins
->opcode
= OP_MOVE
;
3164 ins
->sreg1
= last_ins
->sreg1
;
3168 * Note: reg1 must be different from the basereg in the second load
3169 * OP_LOAD_MEMBASE offset(basereg), reg1
3170 * OP_LOAD_MEMBASE offset(basereg), reg2
3172 * OP_LOAD_MEMBASE offset(basereg), reg1
3173 * OP_MOVE reg1, reg2
3175 } if (last_ins
&& (last_ins
->opcode
== OP_LOADI4_MEMBASE
3176 || last_ins
->opcode
== OP_LOAD_MEMBASE
) &&
3177 ins
->inst_basereg
!= last_ins
->dreg
&&
3178 ins
->inst_basereg
== last_ins
->inst_basereg
&&
3179 ins
->inst_offset
== last_ins
->inst_offset
) {
3181 if (ins
->dreg
== last_ins
->dreg
) {
3182 MONO_DELETE_INS (bb
, ins
);
3185 ins
->opcode
= OP_MOVE
;
3186 ins
->sreg1
= last_ins
->dreg
;
3189 //g_assert_not_reached ();
3193 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3194 * OP_LOAD_MEMBASE offset(basereg), reg
3196 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3197 * OP_ICONST reg, imm
3199 } else if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_IMM
3200 || last_ins
->opcode
== OP_STORE_MEMBASE_IMM
) &&
3201 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3202 ins
->inst_offset
== last_ins
->inst_offset
) {
3203 //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
3204 ins
->opcode
= OP_ICONST
;
3205 ins
->inst_c0
= last_ins
->inst_imm
;
3206 g_assert_not_reached (); // check this rule
3210 case OP_LOADU1_MEMBASE
:
3211 case OP_LOADI1_MEMBASE
:
3212 if (last_ins
&& (last_ins
->opcode
== OP_STOREI1_MEMBASE_REG
) &&
3213 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3214 ins
->inst_offset
== last_ins
->inst_offset
) {
3215 ins
->opcode
= (ins
->opcode
== OP_LOADI1_MEMBASE
) ? OP_ICONV_TO_I1
: OP_ICONV_TO_U1
;
3216 ins
->sreg1
= last_ins
->sreg1
;
3219 case OP_LOADU2_MEMBASE
:
3220 case OP_LOADI2_MEMBASE
:
3221 if (last_ins
&& (last_ins
->opcode
== OP_STOREI2_MEMBASE_REG
) &&
3222 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3223 ins
->inst_offset
== last_ins
->inst_offset
) {
3224 ins
->opcode
= (ins
->opcode
== OP_LOADI2_MEMBASE
) ? OP_ICONV_TO_I2
: OP_ICONV_TO_U2
;
3225 ins
->sreg1
= last_ins
->sreg1
;
3229 ins
->opcode
= OP_MOVE
;
3233 if (ins
->dreg
== ins
->sreg1
) {
3234 MONO_DELETE_INS (bb
, ins
);
3238 * OP_MOVE sreg, dreg
3239 * OP_MOVE dreg, sreg
3241 if (last_ins
&& last_ins
->opcode
== OP_MOVE
&&
3242 ins
->sreg1
== last_ins
->dreg
&&
3243 ins
->dreg
== last_ins
->sreg1
) {
3244 MONO_DELETE_INS (bb
, ins
);
3253 * the branch_cc_table should maintain the order of these
3267 branch_cc_table
[] = {
3281 #define ADD_NEW_INS(cfg,dest,op) do { \
3282 MONO_INST_NEW ((cfg), (dest), (op)); \
3283 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3287 map_to_reg_reg_op (int op
)
3296 case OP_COMPARE_IMM
:
3298 case OP_ICOMPARE_IMM
:
3312 case OP_LOAD_MEMBASE
:
3313 return OP_LOAD_MEMINDEX
;
3314 case OP_LOADI4_MEMBASE
:
3315 return OP_LOADI4_MEMINDEX
;
3316 case OP_LOADU4_MEMBASE
:
3317 return OP_LOADU4_MEMINDEX
;
3318 case OP_LOADU1_MEMBASE
:
3319 return OP_LOADU1_MEMINDEX
;
3320 case OP_LOADI2_MEMBASE
:
3321 return OP_LOADI2_MEMINDEX
;
3322 case OP_LOADU2_MEMBASE
:
3323 return OP_LOADU2_MEMINDEX
;
3324 case OP_LOADI1_MEMBASE
:
3325 return OP_LOADI1_MEMINDEX
;
3326 case OP_STOREI1_MEMBASE_REG
:
3327 return OP_STOREI1_MEMINDEX
;
3328 case OP_STOREI2_MEMBASE_REG
:
3329 return OP_STOREI2_MEMINDEX
;
3330 case OP_STOREI4_MEMBASE_REG
:
3331 return OP_STOREI4_MEMINDEX
;
3332 case OP_STORE_MEMBASE_REG
:
3333 return OP_STORE_MEMINDEX
;
3334 case OP_STORER4_MEMBASE_REG
:
3335 return OP_STORER4_MEMINDEX
;
3336 case OP_STORER8_MEMBASE_REG
:
3337 return OP_STORER8_MEMINDEX
;
3338 case OP_STORE_MEMBASE_IMM
:
3339 return OP_STORE_MEMBASE_REG
;
3340 case OP_STOREI1_MEMBASE_IMM
:
3341 return OP_STOREI1_MEMBASE_REG
;
3342 case OP_STOREI2_MEMBASE_IMM
:
3343 return OP_STOREI2_MEMBASE_REG
;
3344 case OP_STOREI4_MEMBASE_IMM
:
3345 return OP_STOREI4_MEMBASE_REG
;
3347 g_assert_not_reached ();
3351 * Remove from the instruction list the instructions that can't be
3352 * represented with very simple instructions with no register
3356 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3358 MonoInst
*ins
, *temp
, *last_ins
= NULL
;
3359 int rot_amount
, imm8
, low_imm
;
3361 MONO_BB_FOR_EACH_INS (bb
, ins
) {
3363 switch (ins
->opcode
) {
3367 case OP_COMPARE_IMM
:
3368 case OP_ICOMPARE_IMM
:
3382 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
)) < 0) {
3383 int opcode2
= mono_op_imm_to_op (ins
->opcode
);
3384 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3385 temp
->inst_c0
= ins
->inst_imm
;
3386 temp
->dreg
= mono_alloc_ireg (cfg
);
3387 ins
->sreg2
= temp
->dreg
;
3389 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins
->opcode
));
3390 ins
->opcode
= opcode2
;
3392 if (ins
->opcode
== OP_SBB
|| ins
->opcode
== OP_ISBB
|| ins
->opcode
== OP_SUBCC
)
3398 if (ins
->inst_imm
== 1) {
3399 ins
->opcode
= OP_MOVE
;
3402 if (ins
->inst_imm
== 0) {
3403 ins
->opcode
= OP_ICONST
;
3407 imm8
= mono_is_power_of_two (ins
->inst_imm
);
3409 ins
->opcode
= OP_SHL_IMM
;
3410 ins
->inst_imm
= imm8
;
3413 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3414 temp
->inst_c0
= ins
->inst_imm
;
3415 temp
->dreg
= mono_alloc_ireg (cfg
);
3416 ins
->sreg2
= temp
->dreg
;
3417 ins
->opcode
= OP_IMUL
;
3423 if (ins
->next
&& (ins
->next
->opcode
== OP_COND_EXC_C
|| ins
->next
->opcode
== OP_COND_EXC_IC
))
3424 /* ARM sets the C flag to 1 if there was _no_ overflow */
3425 ins
->next
->opcode
= OP_COND_EXC_NC
;
3428 case OP_IDIV_UN_IMM
:
3430 case OP_IREM_UN_IMM
: {
3431 int opcode2
= mono_op_imm_to_op (ins
->opcode
);
3432 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3433 temp
->inst_c0
= ins
->inst_imm
;
3434 temp
->dreg
= mono_alloc_ireg (cfg
);
3435 ins
->sreg2
= temp
->dreg
;
3437 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins
->opcode
));
3438 ins
->opcode
= opcode2
;
3441 case OP_LOCALLOC_IMM
:
3442 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3443 temp
->inst_c0
= ins
->inst_imm
;
3444 temp
->dreg
= mono_alloc_ireg (cfg
);
3445 ins
->sreg1
= temp
->dreg
;
3446 ins
->opcode
= OP_LOCALLOC
;
3448 case OP_LOAD_MEMBASE
:
3449 case OP_LOADI4_MEMBASE
:
3450 case OP_LOADU4_MEMBASE
:
3451 case OP_LOADU1_MEMBASE
:
3452 /* we can do two things: load the immed in a register
3453 * and use an indexed load, or see if the immed can be
3454 * represented as an ad_imm + a load with a smaller offset
3455 * that fits. We just do the first for now, optimize later.
3457 if (arm_is_imm12 (ins
->inst_offset
))
3459 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3460 temp
->inst_c0
= ins
->inst_offset
;
3461 temp
->dreg
= mono_alloc_ireg (cfg
);
3462 ins
->sreg2
= temp
->dreg
;
3463 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3465 case OP_LOADI2_MEMBASE
:
3466 case OP_LOADU2_MEMBASE
:
3467 case OP_LOADI1_MEMBASE
:
3468 if (arm_is_imm8 (ins
->inst_offset
))
3470 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3471 temp
->inst_c0
= ins
->inst_offset
;
3472 temp
->dreg
= mono_alloc_ireg (cfg
);
3473 ins
->sreg2
= temp
->dreg
;
3474 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3476 case OP_LOADR4_MEMBASE
:
3477 case OP_LOADR8_MEMBASE
:
3478 if (arm_is_fpimm8 (ins
->inst_offset
))
3480 low_imm
= ins
->inst_offset
& 0x1ff;
3481 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_offset
& ~0x1ff, &rot_amount
)) >= 0) {
3482 ADD_NEW_INS (cfg
, temp
, OP_ADD_IMM
);
3483 temp
->inst_imm
= ins
->inst_offset
& ~0x1ff;
3484 temp
->sreg1
= ins
->inst_basereg
;
3485 temp
->dreg
= mono_alloc_ireg (cfg
);
3486 ins
->inst_basereg
= temp
->dreg
;
3487 ins
->inst_offset
= low_imm
;
3491 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3492 temp
->inst_c0
= ins
->inst_offset
;
3493 temp
->dreg
= mono_alloc_ireg (cfg
);
3495 ADD_NEW_INS (cfg
, add_ins
, OP_IADD
);
3496 add_ins
->sreg1
= ins
->inst_basereg
;
3497 add_ins
->sreg2
= temp
->dreg
;
3498 add_ins
->dreg
= mono_alloc_ireg (cfg
);
3500 ins
->inst_basereg
= add_ins
->dreg
;
3501 ins
->inst_offset
= 0;
3504 case OP_STORE_MEMBASE_REG
:
3505 case OP_STOREI4_MEMBASE_REG
:
3506 case OP_STOREI1_MEMBASE_REG
:
3507 if (arm_is_imm12 (ins
->inst_offset
))
3509 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3510 temp
->inst_c0
= ins
->inst_offset
;
3511 temp
->dreg
= mono_alloc_ireg (cfg
);
3512 ins
->sreg2
= temp
->dreg
;
3513 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3515 case OP_STOREI2_MEMBASE_REG
:
3516 if (arm_is_imm8 (ins
->inst_offset
))
3518 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3519 temp
->inst_c0
= ins
->inst_offset
;
3520 temp
->dreg
= mono_alloc_ireg (cfg
);
3521 ins
->sreg2
= temp
->dreg
;
3522 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3524 case OP_STORER4_MEMBASE_REG
:
3525 case OP_STORER8_MEMBASE_REG
:
3526 if (arm_is_fpimm8 (ins
->inst_offset
))
3528 low_imm
= ins
->inst_offset
& 0x1ff;
3529 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_offset
& ~ 0x1ff, &rot_amount
)) >= 0 && arm_is_fpimm8 (low_imm
)) {
3530 ADD_NEW_INS (cfg
, temp
, OP_ADD_IMM
);
3531 temp
->inst_imm
= ins
->inst_offset
& ~0x1ff;
3532 temp
->sreg1
= ins
->inst_destbasereg
;
3533 temp
->dreg
= mono_alloc_ireg (cfg
);
3534 ins
->inst_destbasereg
= temp
->dreg
;
3535 ins
->inst_offset
= low_imm
;
3539 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3540 temp
->inst_c0
= ins
->inst_offset
;
3541 temp
->dreg
= mono_alloc_ireg (cfg
);
3543 ADD_NEW_INS (cfg
, add_ins
, OP_IADD
);
3544 add_ins
->sreg1
= ins
->inst_destbasereg
;
3545 add_ins
->sreg2
= temp
->dreg
;
3546 add_ins
->dreg
= mono_alloc_ireg (cfg
);
3548 ins
->inst_destbasereg
= add_ins
->dreg
;
3549 ins
->inst_offset
= 0;
3552 case OP_STORE_MEMBASE_IMM
:
3553 case OP_STOREI1_MEMBASE_IMM
:
3554 case OP_STOREI2_MEMBASE_IMM
:
3555 case OP_STOREI4_MEMBASE_IMM
:
3556 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3557 temp
->inst_c0
= ins
->inst_imm
;
3558 temp
->dreg
= mono_alloc_ireg (cfg
);
3559 ins
->sreg1
= temp
->dreg
;
3560 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3562 goto loop_start
; /* make it handle the possibly big ins->inst_offset */
3565 gboolean swap
= FALSE
;
3569 /* Optimized away */
3574 /* Some fp compares require swapped operands */
3575 switch (ins
->next
->opcode
) {
3577 ins
->next
->opcode
= OP_FBLT
;
3581 ins
->next
->opcode
= OP_FBLT_UN
;
3585 ins
->next
->opcode
= OP_FBGE
;
3589 ins
->next
->opcode
= OP_FBGE_UN
;
3597 ins
->sreg1
= ins
->sreg2
;
3606 bb
->last_ins
= last_ins
;
3607 bb
->max_vreg
= cfg
->next_vreg
;
3611 mono_arch_decompose_long_opts (MonoCompile
*cfg
, MonoInst
*long_ins
)
3615 if (long_ins
->opcode
== OP_LNEG
) {
3617 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ARM_RSBS_IMM
, MONO_LVREG_LS (ins
->dreg
), MONO_LVREG_LS (ins
->sreg1
), 0);
3618 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ARM_RSC_IMM
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->sreg1
), 0);
3624 emit_float_to_int (MonoCompile
*cfg
, guchar
*code
, int dreg
, int sreg
, int size
, gboolean is_signed
)
3626 /* sreg is a float, dreg is an integer reg */
3628 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
3630 ARM_TOSIZD (code
, vfp_scratch1
, sreg
);
3632 ARM_TOUIZD (code
, vfp_scratch1
, sreg
);
3633 ARM_FMRS (code
, dreg
, vfp_scratch1
);
3634 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
3638 ARM_AND_REG_IMM8 (code
, dreg
, dreg
, 0xff);
3639 else if (size
== 2) {
3640 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3641 ARM_SHR_IMM (code
, dreg
, dreg
, 16);
3645 ARM_SHL_IMM (code
, dreg
, dreg
, 24);
3646 ARM_SAR_IMM (code
, dreg
, dreg
, 24);
3647 } else if (size
== 2) {
3648 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3649 ARM_SAR_IMM (code
, dreg
, dreg
, 16);
3656 emit_r4_to_int (MonoCompile
*cfg
, guchar
*code
, int dreg
, int sreg
, int size
, gboolean is_signed
)
3658 /* sreg is a float, dreg is an integer reg */
3660 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
3662 ARM_TOSIZS (code
, vfp_scratch1
, sreg
);
3664 ARM_TOUIZS (code
, vfp_scratch1
, sreg
);
3665 ARM_FMRS (code
, dreg
, vfp_scratch1
);
3666 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
3670 ARM_AND_REG_IMM8 (code
, dreg
, dreg
, 0xff);
3671 else if (size
== 2) {
3672 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3673 ARM_SHR_IMM (code
, dreg
, dreg
, 16);
3677 ARM_SHL_IMM (code
, dreg
, dreg
, 24);
3678 ARM_SAR_IMM (code
, dreg
, dreg
, 24);
3679 } else if (size
== 2) {
3680 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3681 ARM_SAR_IMM (code
, dreg
, dreg
, 16);
3687 #endif /* #ifndef DISABLE_JIT */
3689 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3692 emit_thunk (guint8
*code
, gconstpointer target
)
3696 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
3697 if (thumb_supported
)
3698 ARM_BX (code
, ARMREG_IP
);
3700 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
3701 *(guint32
*)code
= (guint32
)target
;
3703 mono_arch_flush_icache (p
, code
- p
);
3707 handle_thunk (MonoCompile
*cfg
, MonoDomain
*domain
, guchar
*code
, const guchar
*target
)
3709 MonoJitInfo
*ji
= NULL
;
3710 MonoThunkJitInfo
*info
;
3713 guint8
*orig_target
;
3714 guint8
*target_thunk
;
3717 domain
= mono_domain_get ();
3721 * This can be called multiple times during JITting,
3722 * save the current position in cfg->arch to avoid
3723 * doing a O(n^2) search.
3725 if (!cfg
->arch
.thunks
) {
3726 cfg
->arch
.thunks
= cfg
->thunks
;
3727 cfg
->arch
.thunks_size
= cfg
->thunk_area
;
3729 thunks
= cfg
->arch
.thunks
;
3730 thunks_size
= cfg
->arch
.thunks_size
;
3732 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, mono_method_full_name (cfg
->method
, TRUE
));
3733 g_assert_not_reached ();
3736 g_assert (*(guint32
*)thunks
== 0);
3737 emit_thunk (thunks
, target
);
3738 arm_patch (code
, thunks
);
3740 cfg
->arch
.thunks
+= THUNK_SIZE
;
3741 cfg
->arch
.thunks_size
-= THUNK_SIZE
;
3743 ji
= mini_jit_info_table_find (domain
, (char*)code
, NULL
);
3745 info
= mono_jit_info_get_thunk_info (ji
);
3748 thunks
= (guint8
*)ji
->code_start
+ info
->thunks_offset
;
3749 thunks_size
= info
->thunks_size
;
3751 orig_target
= mono_arch_get_call_target (code
+ 4);
3753 mono_mini_arch_lock ();
3755 target_thunk
= NULL
;
3756 if (orig_target
>= thunks
&& orig_target
< thunks
+ thunks_size
) {
3757 /* The call already points to a thunk, because of trampolines etc. */
3758 target_thunk
= orig_target
;
3760 for (p
= thunks
; p
< thunks
+ thunks_size
; p
+= THUNK_SIZE
) {
3761 if (((guint32
*)p
) [0] == 0) {
3765 } else if (((guint32
*)p
) [2] == (guint32
)target
) {
3766 /* Thunk already points to target */
3773 //g_print ("THUNK: %p %p %p\n", code, target, target_thunk);
3775 if (!target_thunk
) {
3776 mono_mini_arch_unlock ();
3777 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, cfg
? mono_method_full_name (cfg
->method
, TRUE
) : mono_method_full_name (jinfo_get_method (ji
), TRUE
));
3778 g_assert_not_reached ();
3781 emit_thunk (target_thunk
, target
);
3782 arm_patch (code
, target_thunk
);
3783 mono_arch_flush_icache (code
, 4);
3785 mono_mini_arch_unlock ();
3790 arm_patch_general (MonoCompile
*cfg
, MonoDomain
*domain
, guchar
*code
, const guchar
*target
)
3792 guint32
*code32
= (void*)code
;
3793 guint32 ins
= *code32
;
3794 guint32 prim
= (ins
>> 25) & 7;
3795 guint32 tval
= GPOINTER_TO_UINT (target
);
3797 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3798 if (prim
== 5) { /* 101b */
3799 /* the diff starts 8 bytes from the branch opcode */
3800 gint diff
= target
- code
- 8;
3802 gint tmask
= 0xffffffff;
3803 if (tval
& 1) { /* entering thumb mode */
3804 diff
= target
- 1 - code
- 8;
3805 g_assert (thumb_supported
);
3806 tbits
= 0xf << 28; /* bl->blx bit pattern */
3807 g_assert ((ins
& (1 << 24))); /* it must be a bl, not b instruction */
3808 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3812 tmask
= ~(1 << 24); /* clear the link bit */
3813 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3818 if (diff
<= 33554431) {
3820 ins
= (ins
& 0xff000000) | diff
;
3822 *code32
= ins
| tbits
;
3826 /* diff between 0 and -33554432 */
3827 if (diff
>= -33554432) {
3829 ins
= (ins
& 0xff000000) | (diff
& ~0xff000000);
3831 *code32
= ins
| tbits
;
3836 handle_thunk (cfg
, domain
, code
, target
);
3841 * The alternative call sequences looks like this:
3843 * ldr ip, [pc] // loads the address constant
3844 * b 1f // jumps around the constant
3845 * address constant embedded in the code
3850 * There are two cases for patching:
3851 * a) at the end of method emission: in this case code points to the start
3852 * of the call sequence
3853 * b) during runtime patching of the call site: in this case code points
3854 * to the mov pc, ip instruction
3856 * We have to handle also the thunk jump code sequence:
3860 * address constant // execution never reaches here
3862 if ((ins
& 0x0ffffff0) == 0x12fff10) {
3863 /* Branch and exchange: the address is constructed in a reg
3864 * We can patch BX when the code sequence is the following:
3865 * ldr ip, [pc, #0] ; 0x8
3872 guint8
*emit
= (guint8
*)ccode
;
3873 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
3875 ARM_MOV_REG_REG (emit
, ARMREG_LR
, ARMREG_PC
);
3876 ARM_BX (emit
, ARMREG_IP
);
3878 /*patching from magic trampoline*/
3879 if (ins
== ccode
[3]) {
3880 g_assert (code32
[-4] == ccode
[0]);
3881 g_assert (code32
[-3] == ccode
[1]);
3882 g_assert (code32
[-1] == ccode
[2]);
3883 code32
[-2] = (guint32
)target
;
3886 /*patching from JIT*/
3887 if (ins
== ccode
[0]) {
3888 g_assert (code32
[1] == ccode
[1]);
3889 g_assert (code32
[3] == ccode
[2]);
3890 g_assert (code32
[4] == ccode
[3]);
3891 code32
[2] = (guint32
)target
;
3894 g_assert_not_reached ();
3895 } else if ((ins
& 0x0ffffff0) == 0x12fff30) {
3903 guint8
*emit
= (guint8
*)ccode
;
3904 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
3906 ARM_BLX_REG (emit
, ARMREG_IP
);
3908 g_assert (code32
[-3] == ccode
[0]);
3909 g_assert (code32
[-2] == ccode
[1]);
3910 g_assert (code32
[0] == ccode
[2]);
3912 code32
[-1] = (guint32
)target
;
3915 guint32
*tmp
= ccode
;
3916 guint8
*emit
= (guint8
*)tmp
;
3917 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
3918 ARM_MOV_REG_REG (emit
, ARMREG_LR
, ARMREG_PC
);
3919 ARM_MOV_REG_REG (emit
, ARMREG_PC
, ARMREG_IP
);
3920 ARM_BX (emit
, ARMREG_IP
);
3921 if (ins
== ccode
[2]) {
3922 g_assert_not_reached (); // should be -2 ...
3923 code32
[-1] = (guint32
)target
;
3926 if (ins
== ccode
[0]) {
3927 /* handles both thunk jump code and the far call sequence */
3928 code32
[2] = (guint32
)target
;
3931 g_assert_not_reached ();
3933 // g_print ("patched with 0x%08x\n", ins);
3937 arm_patch (guchar
*code
, const guchar
*target
)
3939 arm_patch_general (NULL
, NULL
, code
, target
);
3943 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
3944 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
3945 * to be used with the emit macros.
3946 * Return -1 otherwise.
3949 mono_arm_is_rotated_imm8 (guint32 val
, gint
*rot_amount
)
3952 for (i
= 0; i
< 31; i
+= 2) {
3953 res
= (val
<< (32 - i
)) | (val
>> i
);
3956 *rot_amount
= i
? 32 - i
: 0;
3963 * Emits in code a sequence of instructions that load the value 'val'
3964 * into the dreg register. Uses at most 4 instructions.
3967 mono_arm_emit_load_imm (guint8
*code
, int dreg
, guint32 val
)
3969 int imm8
, rot_amount
;
3971 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
3972 /* skip the constant pool */
3978 if (mini_get_debug_options()->single_imm_size
&& v7_supported
) {
3979 ARM_MOVW_REG_IMM (code
, dreg
, val
& 0xffff);
3980 ARM_MOVT_REG_IMM (code
, dreg
, (val
>> 16) & 0xffff);
3984 if ((imm8
= mono_arm_is_rotated_imm8 (val
, &rot_amount
)) >= 0) {
3985 ARM_MOV_REG_IMM (code
, dreg
, imm8
, rot_amount
);
3986 } else if ((imm8
= mono_arm_is_rotated_imm8 (~val
, &rot_amount
)) >= 0) {
3987 ARM_MVN_REG_IMM (code
, dreg
, imm8
, rot_amount
);
3990 ARM_MOVW_REG_IMM (code
, dreg
, val
& 0xffff);
3992 ARM_MOVT_REG_IMM (code
, dreg
, (val
>> 16) & 0xffff);
3996 ARM_MOV_REG_IMM8 (code
, dreg
, (val
& 0xFF));
3998 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF00) >> 8, 24);
4000 if (val
& 0xFF0000) {
4001 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
4003 if (val
& 0xFF000000) {
4004 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
4006 } else if (val
& 0xFF00) {
4007 ARM_MOV_REG_IMM (code
, dreg
, (val
& 0xFF00) >> 8, 24);
4008 if (val
& 0xFF0000) {
4009 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
4011 if (val
& 0xFF000000) {
4012 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
4014 } else if (val
& 0xFF0000) {
4015 ARM_MOV_REG_IMM (code
, dreg
, (val
& 0xFF0000) >> 16, 16);
4016 if (val
& 0xFF000000) {
4017 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
4020 //g_assert_not_reached ();
4026 mono_arm_thumb_supported (void)
4028 return thumb_supported
;
4034 emit_move_return_value (MonoCompile
*cfg
, MonoInst
*ins
, guint8
*code
)
4039 call
= (MonoCallInst
*)ins
;
4040 cinfo
= call
->call_info
;
4042 switch (cinfo
->ret
.storage
) {
4043 case RegTypeStructByVal
:
4045 MonoInst
*loc
= cfg
->arch
.vret_addr_loc
;
4048 if (cinfo
->ret
.storage
== RegTypeStructByVal
&& cinfo
->ret
.nregs
== 1) {
4049 /* The JIT treats this as a normal call */
4053 /* Load the destination address */
4054 g_assert (loc
&& loc
->opcode
== OP_REGOFFSET
);
4056 if (arm_is_imm12 (loc
->inst_offset
)) {
4057 ARM_LDR_IMM (code
, ARMREG_LR
, loc
->inst_basereg
, loc
->inst_offset
);
4059 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, loc
->inst_offset
);
4060 ARM_LDR_REG_REG (code
, ARMREG_LR
, loc
->inst_basereg
, ARMREG_LR
);
4063 if (cinfo
->ret
.storage
== RegTypeStructByVal
) {
4064 int rsize
= cinfo
->ret
.struct_size
;
4066 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
4067 g_assert (rsize
>= 0);
4072 ARM_STRB_IMM (code
, i
, ARMREG_LR
, i
* 4);
4075 ARM_STRH_IMM (code
, i
, ARMREG_LR
, i
* 4);
4078 ARM_STR_IMM (code
, i
, ARMREG_LR
, i
* 4);
4084 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
4085 if (cinfo
->ret
.esize
== 4)
4086 ARM_FSTS (code
, cinfo
->ret
.reg
+ i
, ARMREG_LR
, i
* 4);
4088 ARM_FSTD (code
, cinfo
->ret
.reg
+ (i
* 2), ARMREG_LR
, i
* 8);
4097 switch (ins
->opcode
) {
4100 case OP_FCALL_MEMBASE
:
4102 MonoType
*sig_ret
= mini_get_underlying_type (((MonoCallInst
*)ins
)->signature
->ret
);
4103 if (sig_ret
->type
== MONO_TYPE_R4
) {
4104 if (IS_HARD_FLOAT
) {
4105 ARM_CVTS (code
, ins
->dreg
, ARM_VFP_F0
);
4107 ARM_FMSR (code
, ins
->dreg
, ARMREG_R0
);
4108 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
4111 if (IS_HARD_FLOAT
) {
4112 ARM_CPYD (code
, ins
->dreg
, ARM_VFP_D0
);
4114 ARM_FMDRR (code
, ARMREG_R0
, ARMREG_R1
, ins
->dreg
);
4121 case OP_RCALL_MEMBASE
: {
4126 sig_ret
= mini_get_underlying_type (((MonoCallInst
*)ins
)->signature
->ret
);
4127 g_assert (sig_ret
->type
== MONO_TYPE_R4
);
4128 if (IS_HARD_FLOAT
) {
4129 ARM_CPYS (code
, ins
->dreg
, ARM_VFP_F0
);
4131 ARM_FMSR (code
, ins
->dreg
, ARMREG_R0
);
4132 ARM_CPYS (code
, ins
->dreg
, ins
->dreg
);
4144 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
4149 guint8
*code
= cfg
->native_code
+ cfg
->code_len
;
4150 MonoInst
*last_ins
= NULL
;
4151 guint last_offset
= 0;
4153 int imm8
, rot_amount
;
4155 /* we don't align basic blocks of loops on arm */
4157 if (cfg
->verbose_level
> 2)
4158 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
4160 cpos
= bb
->max_offset
;
4162 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
) {
4163 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
4164 //g_assert (!mono_compile_aot);
4167 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
4168 /* this is not thread save, but good enough */
4169 /* fixme: howto handle overflows? */
4170 //x86_inc_mem (code, &cov->data [bb->dfn].count);
4173 if (mono_break_at_bb_method
&& mono_method_desc_full_match (mono_break_at_bb_method
, cfg
->method
) && bb
->block_num
== mono_break_at_bb_bb_num
) {
4174 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
4175 (gpointer
)"mono_break");
4176 code
= emit_call_seq (cfg
, code
);
4179 MONO_BB_FOR_EACH_INS (bb
, ins
) {
4180 offset
= code
- cfg
->native_code
;
4182 max_len
= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
4184 if (offset
> (cfg
->code_size
- max_len
- 16)) {
4185 cfg
->code_size
*= 2;
4186 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4187 code
= cfg
->native_code
+ offset
;
4189 // if (ins->cil_code)
4190 // g_print ("cil code\n");
4191 mono_debug_record_line_number (cfg
, ins
, offset
);
4193 switch (ins
->opcode
) {
4194 case OP_MEMORY_BARRIER
:
4196 ARM_MOV_REG_IMM8 (code
, ARMREG_R0
, 0);
4197 ARM_MCR (code
, 15, 0, ARMREG_R0
, 7, 10, 5);
4201 code
= emit_tls_get (code
, ins
->dreg
, ins
->inst_offset
);
4204 code
= emit_tls_set (code
, ins
->sreg1
, ins
->inst_offset
);
4206 case OP_ATOMIC_EXCHANGE_I4
:
4207 case OP_ATOMIC_CAS_I4
:
4208 case OP_ATOMIC_ADD_I4
: {
4212 g_assert (v7_supported
);
4215 if (ins
->sreg1
!= ARMREG_IP
&& ins
->sreg2
!= ARMREG_IP
&& ins
->sreg3
!= ARMREG_IP
)
4217 else if (ins
->sreg1
!= ARMREG_R0
&& ins
->sreg2
!= ARMREG_R0
&& ins
->sreg3
!= ARMREG_R0
)
4219 else if (ins
->sreg1
!= ARMREG_R1
&& ins
->sreg2
!= ARMREG_R1
&& ins
->sreg3
!= ARMREG_R1
)
4223 g_assert (cfg
->arch
.atomic_tmp_offset
!= -1);
4224 ARM_STR_IMM (code
, tmpreg
, cfg
->frame_reg
, cfg
->arch
.atomic_tmp_offset
);
4226 switch (ins
->opcode
) {
4227 case OP_ATOMIC_EXCHANGE_I4
:
4229 ARM_DMB (code
, ARM_DMB_SY
);
4230 ARM_LDREX_REG (code
, ARMREG_LR
, ins
->sreg1
);
4231 ARM_STREX_REG (code
, tmpreg
, ins
->sreg2
, ins
->sreg1
);
4232 ARM_CMP_REG_IMM (code
, tmpreg
, 0, 0);
4234 ARM_B_COND (code
, ARMCOND_NE
, 0);
4235 arm_patch (buf
[1], buf
[0]);
4237 case OP_ATOMIC_CAS_I4
:
4238 ARM_DMB (code
, ARM_DMB_SY
);
4240 ARM_LDREX_REG (code
, ARMREG_LR
, ins
->sreg1
);
4241 ARM_CMP_REG_REG (code
, ARMREG_LR
, ins
->sreg3
);
4243 ARM_B_COND (code
, ARMCOND_NE
, 0);
4244 ARM_STREX_REG (code
, tmpreg
, ins
->sreg2
, ins
->sreg1
);
4245 ARM_CMP_REG_IMM (code
, tmpreg
, 0, 0);
4247 ARM_B_COND (code
, ARMCOND_NE
, 0);
4248 arm_patch (buf
[2], buf
[0]);
4249 arm_patch (buf
[1], code
);
4251 case OP_ATOMIC_ADD_I4
:
4253 ARM_DMB (code
, ARM_DMB_SY
);
4254 ARM_LDREX_REG (code
, ARMREG_LR
, ins
->sreg1
);
4255 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->sreg2
);
4256 ARM_STREX_REG (code
, tmpreg
, ARMREG_LR
, ins
->sreg1
);
4257 ARM_CMP_REG_IMM (code
, tmpreg
, 0, 0);
4259 ARM_B_COND (code
, ARMCOND_NE
, 0);
4260 arm_patch (buf
[1], buf
[0]);
4263 g_assert_not_reached ();
4266 ARM_DMB (code
, ARM_DMB_SY
);
4267 if (tmpreg
!= ins
->dreg
)
4268 ARM_LDR_IMM (code
, tmpreg
, cfg
->frame_reg
, cfg
->arch
.atomic_tmp_offset
);
4269 ARM_MOV_REG_REG (code
, ins
->dreg
, ARMREG_LR
);
4272 case OP_ATOMIC_LOAD_I1
:
4273 case OP_ATOMIC_LOAD_U1
:
4274 case OP_ATOMIC_LOAD_I2
:
4275 case OP_ATOMIC_LOAD_U2
:
4276 case OP_ATOMIC_LOAD_I4
:
4277 case OP_ATOMIC_LOAD_U4
:
4278 case OP_ATOMIC_LOAD_R4
:
4279 case OP_ATOMIC_LOAD_R8
: {
4280 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4281 ARM_DMB (code
, ARM_DMB_SY
);
4283 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4285 switch (ins
->opcode
) {
4286 case OP_ATOMIC_LOAD_I1
:
4287 ARM_LDRSB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4289 case OP_ATOMIC_LOAD_U1
:
4290 ARM_LDRB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4292 case OP_ATOMIC_LOAD_I2
:
4293 ARM_LDRSH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4295 case OP_ATOMIC_LOAD_U2
:
4296 ARM_LDRH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4298 case OP_ATOMIC_LOAD_I4
:
4299 case OP_ATOMIC_LOAD_U4
:
4300 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4302 case OP_ATOMIC_LOAD_R4
:
4304 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_basereg
, ARMREG_LR
);
4305 ARM_FLDS (code
, ins
->dreg
, ARMREG_LR
, 0);
4307 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
4308 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_basereg
, ARMREG_LR
);
4309 ARM_FLDS (code
, vfp_scratch1
, ARMREG_LR
, 0);
4310 ARM_CVTS (code
, ins
->dreg
, vfp_scratch1
);
4311 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
4314 case OP_ATOMIC_LOAD_R8
:
4315 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_basereg
, ARMREG_LR
);
4316 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
4320 if (ins
->backend
.memory_barrier_kind
!= MONO_MEMORY_BARRIER_NONE
)
4321 ARM_DMB (code
, ARM_DMB_SY
);
4324 case OP_ATOMIC_STORE_I1
:
4325 case OP_ATOMIC_STORE_U1
:
4326 case OP_ATOMIC_STORE_I2
:
4327 case OP_ATOMIC_STORE_U2
:
4328 case OP_ATOMIC_STORE_I4
:
4329 case OP_ATOMIC_STORE_U4
:
4330 case OP_ATOMIC_STORE_R4
:
4331 case OP_ATOMIC_STORE_R8
: {
4332 if (ins
->backend
.memory_barrier_kind
!= MONO_MEMORY_BARRIER_NONE
)
4333 ARM_DMB (code
, ARM_DMB_SY
);
4335 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4337 switch (ins
->opcode
) {
4338 case OP_ATOMIC_STORE_I1
:
4339 case OP_ATOMIC_STORE_U1
:
4340 ARM_STRB_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4342 case OP_ATOMIC_STORE_I2
:
4343 case OP_ATOMIC_STORE_U2
:
4344 ARM_STRH_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4346 case OP_ATOMIC_STORE_I4
:
4347 case OP_ATOMIC_STORE_U4
:
4348 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4350 case OP_ATOMIC_STORE_R4
:
4352 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_destbasereg
, ARMREG_LR
);
4353 ARM_FSTS (code
, ins
->sreg1
, ARMREG_LR
, 0);
4355 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
4356 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_destbasereg
, ARMREG_LR
);
4357 ARM_CVTD (code
, vfp_scratch1
, ins
->sreg1
);
4358 ARM_FSTS (code
, vfp_scratch1
, ARMREG_LR
, 0);
4359 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
4362 case OP_ATOMIC_STORE_R8
:
4363 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_destbasereg
, ARMREG_LR
);
4364 ARM_FSTD (code
, ins
->sreg1
, ARMREG_LR
, 0);
4368 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4369 ARM_DMB (code
, ARM_DMB_SY
);
4373 ARM_SMULL_REG_REG (code
, ins
->backend
.reg3
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4376 ARM_UMULL_REG_REG (code
, ins
->backend
.reg3
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4378 case OP_STOREI1_MEMBASE_IMM
:
4379 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
& 0xFF);
4380 g_assert (arm_is_imm12 (ins
->inst_offset
));
4381 ARM_STRB_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4383 case OP_STOREI2_MEMBASE_IMM
:
4384 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
& 0xFFFF);
4385 g_assert (arm_is_imm8 (ins
->inst_offset
));
4386 ARM_STRH_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4388 case OP_STORE_MEMBASE_IMM
:
4389 case OP_STOREI4_MEMBASE_IMM
:
4390 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
);
4391 g_assert (arm_is_imm12 (ins
->inst_offset
));
4392 ARM_STR_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4394 case OP_STOREI1_MEMBASE_REG
:
4395 g_assert (arm_is_imm12 (ins
->inst_offset
));
4396 ARM_STRB_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4398 case OP_STOREI2_MEMBASE_REG
:
4399 g_assert (arm_is_imm8 (ins
->inst_offset
));
4400 ARM_STRH_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4402 case OP_STORE_MEMBASE_REG
:
4403 case OP_STOREI4_MEMBASE_REG
:
4404 /* this case is special, since it happens for spill code after lowering has been called */
4405 if (arm_is_imm12 (ins
->inst_offset
)) {
4406 ARM_STR_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4408 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4409 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4412 case OP_STOREI1_MEMINDEX
:
4413 ARM_STRB_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4415 case OP_STOREI2_MEMINDEX
:
4416 ARM_STRH_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4418 case OP_STORE_MEMINDEX
:
4419 case OP_STOREI4_MEMINDEX
:
4420 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4423 g_assert_not_reached ();
4425 case OP_LOAD_MEMINDEX
:
4426 case OP_LOADI4_MEMINDEX
:
4427 case OP_LOADU4_MEMINDEX
:
4428 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4430 case OP_LOADI1_MEMINDEX
:
4431 ARM_LDRSB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4433 case OP_LOADU1_MEMINDEX
:
4434 ARM_LDRB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4436 case OP_LOADI2_MEMINDEX
:
4437 ARM_LDRSH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4439 case OP_LOADU2_MEMINDEX
:
4440 ARM_LDRH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4442 case OP_LOAD_MEMBASE
:
4443 case OP_LOADI4_MEMBASE
:
4444 case OP_LOADU4_MEMBASE
:
4445 /* this case is special, since it happens for spill code after lowering has been called */
4446 if (arm_is_imm12 (ins
->inst_offset
)) {
4447 ARM_LDR_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4449 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4450 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4453 case OP_LOADI1_MEMBASE
:
4454 g_assert (arm_is_imm8 (ins
->inst_offset
));
4455 ARM_LDRSB_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4457 case OP_LOADU1_MEMBASE
:
4458 g_assert (arm_is_imm12 (ins
->inst_offset
));
4459 ARM_LDRB_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4461 case OP_LOADU2_MEMBASE
:
4462 g_assert (arm_is_imm8 (ins
->inst_offset
));
4463 ARM_LDRH_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4465 case OP_LOADI2_MEMBASE
:
4466 g_assert (arm_is_imm8 (ins
->inst_offset
));
4467 ARM_LDRSH_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4469 case OP_ICONV_TO_I1
:
4470 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 24);
4471 ARM_SAR_IMM (code
, ins
->dreg
, ins
->dreg
, 24);
4473 case OP_ICONV_TO_I2
:
4474 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 16);
4475 ARM_SAR_IMM (code
, ins
->dreg
, ins
->dreg
, 16);
4477 case OP_ICONV_TO_U1
:
4478 ARM_AND_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 0xff);
4480 case OP_ICONV_TO_U2
:
4481 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 16);
4482 ARM_SHR_IMM (code
, ins
->dreg
, ins
->dreg
, 16);
4486 ARM_CMP_REG_REG (code
, ins
->sreg1
, ins
->sreg2
);
4488 case OP_COMPARE_IMM
:
4489 case OP_ICOMPARE_IMM
:
4490 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4491 g_assert (imm8
>= 0);
4492 ARM_CMP_REG_IMM (code
, ins
->sreg1
, imm8
, rot_amount
);
4496 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4497 * So instead of emitting a trap, we emit a call a C function and place a
4500 //*(int*)code = 0xef9f0001;
4503 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
4504 (gpointer
)"mono_break");
4505 code
= emit_call_seq (cfg
, code
);
4507 case OP_RELAXED_NOP
:
4512 case OP_DUMMY_STORE
:
4513 case OP_DUMMY_ICONST
:
4514 case OP_DUMMY_R8CONST
:
4515 case OP_NOT_REACHED
:
4518 case OP_IL_SEQ_POINT
:
4519 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
4521 case OP_SEQ_POINT
: {
4523 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
4524 MonoInst
*ss_trigger_page_var
= cfg
->arch
.ss_trigger_page_var
;
4525 MonoInst
*ss_method_var
= cfg
->arch
.seq_point_ss_method_var
;
4526 MonoInst
*bp_method_var
= cfg
->arch
.seq_point_bp_method_var
;
4528 int dreg
= ARMREG_LR
;
4531 if (cfg
->soft_breakpoints
) {
4532 g_assert (!cfg
->compile_aot
);
4537 * For AOT, we use one got slot per method, which will point to a
4538 * SeqPointInfo structure, containing all the information required
4539 * by the code below.
4541 if (cfg
->compile_aot
) {
4542 g_assert (info_var
);
4543 g_assert (info_var
->opcode
== OP_REGOFFSET
);
4544 g_assert (arm_is_imm12 (info_var
->inst_offset
));
4547 if (!cfg
->soft_breakpoints
&& !cfg
->compile_aot
) {
4549 * Read from the single stepping trigger page. This will cause a
4550 * SIGSEGV when single stepping is enabled.
4551 * We do this _before_ the breakpoint, so single stepping after
4552 * a breakpoint is hit will step to the next IL offset.
4554 g_assert (((guint64
)(gsize
)ss_trigger_page
>> 32) == 0);
4557 /* Single step check */
4558 if (ins
->flags
& MONO_INST_SINGLE_STEP_LOC
) {
4559 if (cfg
->soft_breakpoints
) {
4560 /* Load the address of the sequence point method variable. */
4561 var
= ss_method_var
;
4563 g_assert (var
->opcode
== OP_REGOFFSET
);
4564 g_assert (arm_is_imm12 (var
->inst_offset
));
4565 ARM_LDR_IMM (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
4567 /* Read the value and check whether it is non-zero. */
4568 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
4569 ARM_CMP_REG_IMM (code
, dreg
, 0, 0);
4570 /* Call it conditionally. */
4571 ARM_BLX_REG_COND (code
, ARMCOND_NE
, dreg
);
4573 if (cfg
->compile_aot
) {
4574 /* Load the trigger page addr from the variable initialized in the prolog */
4575 var
= ss_trigger_page_var
;
4577 g_assert (var
->opcode
== OP_REGOFFSET
);
4578 g_assert (arm_is_imm12 (var
->inst_offset
));
4579 ARM_LDR_IMM (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
4581 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
4583 *(int*)code
= (int)ss_trigger_page
;
4586 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
4590 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
4592 /* Breakpoint check */
4593 if (cfg
->compile_aot
) {
4594 guint32 offset
= code
- cfg
->native_code
;
4597 ARM_LDR_IMM (code
, dreg
, info_var
->inst_basereg
, info_var
->inst_offset
);
4598 /* Add the offset */
4599 val
= ((offset
/ 4) * sizeof (guint8
*)) + MONO_STRUCT_OFFSET (SeqPointInfo
, bp_addrs
);
4600 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4601 if (arm_is_imm12 ((int)val
)) {
4602 ARM_LDR_IMM (code
, dreg
, dreg
, val
);
4604 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF), 0);
4606 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF00) >> 8, 24);
4608 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
4609 g_assert (!(val
& 0xFF000000));
4611 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
4613 /* What is faster, a branch or a load ? */
4614 ARM_CMP_REG_IMM (code
, dreg
, 0, 0);
4615 /* The breakpoint instruction */
4616 if (cfg
->soft_breakpoints
)
4617 ARM_BLX_REG_COND (code
, ARMCOND_NE
, dreg
);
4619 ARM_LDR_IMM_COND (code
, dreg
, dreg
, 0, ARMCOND_NE
);
4620 } else if (cfg
->soft_breakpoints
) {
4621 /* Load the address of the breakpoint method into ip. */
4622 var
= bp_method_var
;
4624 g_assert (var
->opcode
== OP_REGOFFSET
);
4625 g_assert (arm_is_imm12 (var
->inst_offset
));
4626 ARM_LDR_IMM (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
4629 * A placeholder for a possible breakpoint inserted by
4630 * mono_arch_set_breakpoint ().
4635 * A placeholder for a possible breakpoint inserted by
4636 * mono_arch_set_breakpoint ().
4638 for (i
= 0; i
< 4; ++i
)
4643 * Add an additional nop so skipping the bp doesn't cause the ip to point
4644 * to another IL offset.
4652 ARM_ADDS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4655 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4659 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4662 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4663 g_assert (imm8
>= 0);
4664 ARM_ADDS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4668 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4669 g_assert (imm8
>= 0);
4670 ARM_ADD_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4674 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4675 g_assert (imm8
>= 0);
4676 ARM_ADCS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4679 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4680 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4682 case OP_IADD_OVF_UN
:
4683 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4684 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4687 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4688 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4690 case OP_ISUB_OVF_UN
:
4691 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4692 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4694 case OP_ADD_OVF_CARRY
:
4695 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4696 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4698 case OP_ADD_OVF_UN_CARRY
:
4699 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4700 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4702 case OP_SUB_OVF_CARRY
:
4703 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4704 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4706 case OP_SUB_OVF_UN_CARRY
:
4707 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4708 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4712 ARM_SUBS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4715 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4716 g_assert (imm8
>= 0);
4717 ARM_SUBS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4720 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4724 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4728 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4729 g_assert (imm8
>= 0);
4730 ARM_SUB_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4734 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4735 g_assert (imm8
>= 0);
4736 ARM_SBCS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4738 case OP_ARM_RSBS_IMM
:
4739 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4740 g_assert (imm8
>= 0);
4741 ARM_RSBS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4743 case OP_ARM_RSC_IMM
:
4744 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4745 g_assert (imm8
>= 0);
4746 ARM_RSC_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4749 ARM_AND_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4753 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4754 g_assert (imm8
>= 0);
4755 ARM_AND_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4758 g_assert (v7s_supported
|| v7k_supported
);
4759 ARM_SDIV (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4762 g_assert (v7s_supported
|| v7k_supported
);
4763 ARM_UDIV (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4766 g_assert (v7s_supported
|| v7k_supported
);
4767 ARM_SDIV (code
, ARMREG_LR
, ins
->sreg1
, ins
->sreg2
);
4768 ARM_MLS (code
, ins
->dreg
, ARMREG_LR
, ins
->sreg2
, ins
->sreg1
);
4771 g_assert (v7s_supported
|| v7k_supported
);
4772 ARM_UDIV (code
, ARMREG_LR
, ins
->sreg1
, ins
->sreg2
);
4773 ARM_MLS (code
, ins
->dreg
, ARMREG_LR
, ins
->sreg2
, ins
->sreg1
);
4777 g_assert_not_reached ();
4779 ARM_ORR_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4783 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4784 g_assert (imm8
>= 0);
4785 ARM_ORR_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4788 ARM_EOR_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4792 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4793 g_assert (imm8
>= 0);
4794 ARM_EOR_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4797 ARM_SHL_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4802 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4803 else if (ins
->dreg
!= ins
->sreg1
)
4804 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4807 ARM_SAR_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4812 ARM_SAR_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4813 else if (ins
->dreg
!= ins
->sreg1
)
4814 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4817 case OP_ISHR_UN_IMM
:
4819 ARM_SHR_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4820 else if (ins
->dreg
!= ins
->sreg1
)
4821 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4824 ARM_SHR_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4827 ARM_MVN_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4830 ARM_RSB_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 0);
4833 if (ins
->dreg
== ins
->sreg2
)
4834 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4836 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
4839 g_assert_not_reached ();
4842 /* FIXME: handle ovf/ sreg2 != dreg */
4843 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4844 /* FIXME: MUL doesn't set the C/O flags on ARM */
4846 case OP_IMUL_OVF_UN
:
4847 /* FIXME: handle ovf/ sreg2 != dreg */
4848 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4849 /* FIXME: MUL doesn't set the C/O flags on ARM */
4852 code
= mono_arm_emit_load_imm (code
, ins
->dreg
, ins
->inst_c0
);
4855 /* Load the GOT offset */
4856 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
4857 ARM_LDR_IMM (code
, ins
->dreg
, ARMREG_PC
, 0);
4859 *(gpointer
*)code
= NULL
;
4861 /* Load the value from the GOT */
4862 ARM_LDR_REG_REG (code
, ins
->dreg
, ARMREG_PC
, ins
->dreg
);
4864 case OP_OBJC_GET_SELECTOR
:
4865 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_OBJC_SELECTOR_REF
, ins
->inst_p0
);
4866 ARM_LDR_IMM (code
, ins
->dreg
, ARMREG_PC
, 0);
4868 *(gpointer
*)code
= NULL
;
4870 ARM_LDR_REG_REG (code
, ins
->dreg
, ARMREG_PC
, ins
->dreg
);
4872 case OP_ICONV_TO_I4
:
4873 case OP_ICONV_TO_U4
:
4875 if (ins
->dreg
!= ins
->sreg1
)
4876 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4879 int saved
= ins
->sreg2
;
4880 if (ins
->sreg2
== ARM_LSW_REG
) {
4881 ARM_MOV_REG_REG (code
, ARMREG_LR
, ins
->sreg2
);
4884 if (ins
->sreg1
!= ARM_LSW_REG
)
4885 ARM_MOV_REG_REG (code
, ARM_LSW_REG
, ins
->sreg1
);
4886 if (saved
!= ARM_MSW_REG
)
4887 ARM_MOV_REG_REG (code
, ARM_MSW_REG
, saved
);
4891 if (IS_VFP
&& ins
->dreg
!= ins
->sreg1
)
4892 ARM_CPYD (code
, ins
->dreg
, ins
->sreg1
);
4895 if (IS_VFP
&& ins
->dreg
!= ins
->sreg1
)
4896 ARM_CPYS (code
, ins
->dreg
, ins
->sreg1
);
4898 case OP_MOVE_F_TO_I4
:
4900 ARM_FMRS (code
, ins
->dreg
, ins
->sreg1
);
4902 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
4903 ARM_CVTD (code
, vfp_scratch1
, ins
->sreg1
);
4904 ARM_FMRS (code
, ins
->dreg
, vfp_scratch1
);
4905 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
4908 case OP_MOVE_I4_TO_F
:
4910 ARM_FMSR (code
, ins
->dreg
, ins
->sreg1
);
4912 ARM_FMSR (code
, ins
->dreg
, ins
->sreg1
);
4913 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
4916 case OP_FCONV_TO_R4
:
4919 ARM_CVTD (code
, ins
->dreg
, ins
->sreg1
);
4921 ARM_CVTD (code
, ins
->dreg
, ins
->sreg1
);
4922 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
4927 MonoCallInst
*call
= (MonoCallInst
*)ins
;
4930 * The stack looks like the following:
4931 * <caller argument area>
4934 * <callee argument area>
4935 * Need to copy the arguments from the callee argument area to
4936 * the caller argument area, and pop the frame.
4938 if (call
->stack_usage
) {
4939 int i
, prev_sp_offset
= 0;
4941 /* Compute size of saved registers restored below */
4943 prev_sp_offset
= 2 * 4;
4945 prev_sp_offset
= 1 * 4;
4946 for (i
= 0; i
< 16; ++i
) {
4947 if (cfg
->used_int_regs
& (1 << i
))
4948 prev_sp_offset
+= 4;
4951 code
= emit_big_add (code
, ARMREG_IP
, cfg
->frame_reg
, cfg
->stack_usage
+ prev_sp_offset
);
4953 /* Copy arguments on the stack to our argument area */
4954 for (i
= 0; i
< call
->stack_usage
; i
+= sizeof (mgreg_t
)) {
4955 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, i
);
4956 ARM_STR_IMM (code
, ARMREG_LR
, ARMREG_IP
, i
);
4961 * Keep in sync with mono_arch_emit_epilog
4963 g_assert (!cfg
->method
->save_lmf
);
4965 code
= emit_big_add (code
, ARMREG_SP
, cfg
->frame_reg
, cfg
->stack_usage
);
4967 if (cfg
->used_int_regs
)
4968 ARM_POP (code
, cfg
->used_int_regs
);
4969 ARM_POP (code
, (1 << ARMREG_R7
) | (1 << ARMREG_LR
));
4971 ARM_POP (code
, cfg
->used_int_regs
| (1 << ARMREG_LR
));
4974 mono_add_patch_info (cfg
, (guint8
*) code
- cfg
->native_code
, MONO_PATCH_INFO_METHOD_JUMP
, call
->method
);
4975 if (cfg
->compile_aot
) {
4976 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
4978 *(gpointer
*)code
= NULL
;
4980 ARM_LDR_REG_REG (code
, ARMREG_PC
, ARMREG_PC
, ARMREG_IP
);
4982 code
= mono_arm_patchable_b (code
, ARMCOND_AL
);
4983 cfg
->thunk_area
+= THUNK_SIZE
;
4988 /* ensure ins->sreg1 is not NULL */
4989 ARM_LDRB_IMM (code
, ARMREG_LR
, ins
->sreg1
, 0);
4992 g_assert (cfg
->sig_cookie
< 128);
4993 ARM_LDR_IMM (code
, ARMREG_IP
, cfg
->frame_reg
, cfg
->sig_cookie
);
4994 ARM_STR_IMM (code
, ARMREG_IP
, ins
->sreg1
, 0);
5004 call
= (MonoCallInst
*)ins
;
5007 code
= emit_float_args (cfg
, call
, code
, &max_len
, &offset
);
5009 if (ins
->flags
& MONO_INST_HAS_METHOD
)
5010 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_METHOD
, call
->method
);
5012 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_ABS
, call
->fptr
);
5013 code
= emit_call_seq (cfg
, code
);
5014 ins
->flags
|= MONO_INST_GC_CALLSITE
;
5015 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5016 code
= emit_move_return_value (cfg
, ins
, code
);
5023 case OP_VOIDCALL_REG
:
5026 code
= emit_float_args (cfg
, (MonoCallInst
*)ins
, code
, &max_len
, &offset
);
5028 code
= emit_call_reg (code
, ins
->sreg1
);
5029 ins
->flags
|= MONO_INST_GC_CALLSITE
;
5030 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5031 code
= emit_move_return_value (cfg
, ins
, code
);
5033 case OP_FCALL_MEMBASE
:
5034 case OP_RCALL_MEMBASE
:
5035 case OP_LCALL_MEMBASE
:
5036 case OP_VCALL_MEMBASE
:
5037 case OP_VCALL2_MEMBASE
:
5038 case OP_VOIDCALL_MEMBASE
:
5039 case OP_CALL_MEMBASE
: {
5040 g_assert (ins
->sreg1
!= ARMREG_LR
);
5041 call
= (MonoCallInst
*)ins
;
5044 code
= emit_float_args (cfg
, call
, code
, &max_len
, &offset
);
5045 if (!arm_is_imm12 (ins
->inst_offset
)) {
5046 /* sreg1 might be IP */
5047 ARM_MOV_REG_REG (code
, ARMREG_LR
, ins
->sreg1
);
5048 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, ins
->inst_offset
);
5049 ARM_ADD_REG_REG (code
, ARMREG_IP
, ARMREG_IP
, ARMREG_LR
);
5050 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
5051 ARM_LDR_IMM (code
, ARMREG_PC
, ARMREG_IP
, 0);
5053 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
5054 ARM_LDR_IMM (code
, ARMREG_PC
, ins
->sreg1
, ins
->inst_offset
);
5056 ins
->flags
|= MONO_INST_GC_CALLSITE
;
5057 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5058 code
= emit_move_return_value (cfg
, ins
, code
);
5061 case OP_GENERIC_CLASS_INIT
: {
5065 byte_offset
= MONO_STRUCT_OFFSET (MonoVTable
, initialized
);
5067 g_assert (arm_is_imm8 (byte_offset
));
5068 ARM_LDRSB_IMM (code
, ARMREG_IP
, ins
->sreg1
, byte_offset
);
5069 ARM_CMP_REG_IMM (code
, ARMREG_IP
, 0, 0);
5071 ARM_B_COND (code
, ARMCOND_NE
, 0);
5073 /* Uninitialized case */
5074 g_assert (ins
->sreg1
== ARMREG_R0
);
5076 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
5077 (gpointer
)"mono_generic_class_init");
5078 code
= emit_call_seq (cfg
, code
);
5080 /* Initialized case */
5081 arm_patch (jump
, code
);
5085 /* round the size to 8 bytes */
5086 ARM_ADD_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, (MONO_ARCH_FRAME_ALIGNMENT
- 1));
5087 ARM_BIC_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, (MONO_ARCH_FRAME_ALIGNMENT
- 1));
5088 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ins
->dreg
);
5089 /* memzero the area: dreg holds the size, sp is the pointer */
5090 if (ins
->flags
& MONO_INST_INIT
) {
5091 guint8
*start_loop
, *branch_to_cond
;
5092 ARM_MOV_REG_IMM8 (code
, ARMREG_LR
, 0);
5093 branch_to_cond
= code
;
5096 ARM_STR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ins
->dreg
);
5097 arm_patch (branch_to_cond
, code
);
5098 /* decrement by 4 and set flags */
5099 ARM_SUBS_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, sizeof (mgreg_t
));
5100 ARM_B_COND (code
, ARMCOND_GE
, 0);
5101 arm_patch (code
- 4, start_loop
);
5103 ARM_MOV_REG_REG (code
, ins
->dreg
, ARMREG_SP
);
5104 if (cfg
->param_area
)
5105 code
= emit_sub_imm (code
, ARMREG_SP
, ARMREG_SP
, ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
));
5110 MonoInst
*var
= cfg
->dyn_call_var
;
5113 g_assert (var
->opcode
== OP_REGOFFSET
);
5114 g_assert (arm_is_imm12 (var
->inst_offset
));
5116 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
5117 ARM_MOV_REG_REG (code
, ARMREG_LR
, ins
->sreg1
);
5119 ARM_MOV_REG_REG (code
, ARMREG_IP
, ins
->sreg2
);
5121 /* Save args buffer */
5122 ARM_STR_IMM (code
, ARMREG_LR
, var
->inst_basereg
, var
->inst_offset
);
5124 /* Set stack slots using R0 as scratch reg */
5125 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
5126 for (i
= 0; i
< DYN_CALL_STACK_ARGS
; ++i
) {
5127 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_LR
, (PARAM_REGS
+ i
) * sizeof (mgreg_t
));
5128 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_SP
, i
* sizeof (mgreg_t
));
5131 /* Set fp argument registers */
5132 if (IS_HARD_FLOAT
) {
5133 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, has_fpregs
));
5134 ARM_CMP_REG_IMM (code
, ARMREG_R0
, 0, 0);
5136 ARM_B_COND (code
, ARMCOND_EQ
, 0);
5137 for (i
= 0; i
< FP_PARAM_REGS
; ++i
) {
5138 int offset
= MONO_STRUCT_OFFSET (DynCallArgs
, fpregs
) + (i
* sizeof (double));
5139 g_assert (arm_is_fpimm8 (offset
));
5140 ARM_FLDD (code
, i
* 2, ARMREG_LR
, offset
);
5142 arm_patch (buf
[0], code
);
5145 /* Set argument registers */
5146 for (i
= 0; i
< PARAM_REGS
; ++i
)
5147 ARM_LDR_IMM (code
, i
, ARMREG_LR
, i
* sizeof (mgreg_t
));
5150 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
5151 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
5154 ARM_LDR_IMM (code
, ARMREG_IP
, var
->inst_basereg
, var
->inst_offset
);
5155 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_IP
, MONO_STRUCT_OFFSET (DynCallArgs
, res
));
5156 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_IP
, MONO_STRUCT_OFFSET (DynCallArgs
, res2
));
5158 ARM_FSTD (code
, ARM_VFP_D0
, ARMREG_IP
, MONO_STRUCT_OFFSET (DynCallArgs
, fpregs
));
5162 if (ins
->sreg1
!= ARMREG_R0
)
5163 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
5164 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
5165 (gpointer
)"mono_arch_throw_exception");
5166 code
= emit_call_seq (cfg
, code
);
5170 if (ins
->sreg1
!= ARMREG_R0
)
5171 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
5172 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
5173 (gpointer
)"mono_arch_rethrow_exception");
5174 code
= emit_call_seq (cfg
, code
);
5177 case OP_START_HANDLER
: {
5178 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
5179 int param_area
= ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
);
5182 /* Reserve a param area, see filter-stack.exe */
5184 if ((i
= mono_arm_is_rotated_imm8 (param_area
, &rot_amount
)) >= 0) {
5185 ARM_SUB_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
5187 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, param_area
);
5188 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
5192 if (arm_is_imm12 (spvar
->inst_offset
)) {
5193 ARM_STR_IMM (code
, ARMREG_LR
, spvar
->inst_basereg
, spvar
->inst_offset
);
5195 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
5196 ARM_STR_REG_REG (code
, ARMREG_LR
, spvar
->inst_basereg
, ARMREG_IP
);
5200 case OP_ENDFILTER
: {
5201 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
5202 int param_area
= ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
);
5205 /* Free the param area */
5207 if ((i
= mono_arm_is_rotated_imm8 (param_area
, &rot_amount
)) >= 0) {
5208 ARM_ADD_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
5210 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, param_area
);
5211 ARM_ADD_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
5215 if (ins
->sreg1
!= ARMREG_R0
)
5216 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
5217 if (arm_is_imm12 (spvar
->inst_offset
)) {
5218 ARM_LDR_IMM (code
, ARMREG_IP
, spvar
->inst_basereg
, spvar
->inst_offset
);
5220 g_assert (ARMREG_IP
!= spvar
->inst_basereg
);
5221 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
5222 ARM_LDR_REG_REG (code
, ARMREG_IP
, spvar
->inst_basereg
, ARMREG_IP
);
5224 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
5227 case OP_ENDFINALLY
: {
5228 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
5229 int param_area
= ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
);
5232 /* Free the param area */
5234 if ((i
= mono_arm_is_rotated_imm8 (param_area
, &rot_amount
)) >= 0) {
5235 ARM_ADD_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
5237 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, param_area
);
5238 ARM_ADD_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
5242 if (arm_is_imm12 (spvar
->inst_offset
)) {
5243 ARM_LDR_IMM (code
, ARMREG_IP
, spvar
->inst_basereg
, spvar
->inst_offset
);
5245 g_assert (ARMREG_IP
!= spvar
->inst_basereg
);
5246 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
5247 ARM_LDR_REG_REG (code
, ARMREG_IP
, spvar
->inst_basereg
, ARMREG_IP
);
5249 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
5252 case OP_CALL_HANDLER
:
5253 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
5254 code
= mono_arm_patchable_bl (code
, ARMCOND_AL
);
5255 cfg
->thunk_area
+= THUNK_SIZE
;
5256 mono_cfg_add_try_hole (cfg
, ins
->inst_eh_block
, code
, bb
);
5259 if (ins
->dreg
!= ARMREG_R0
)
5260 ARM_MOV_REG_REG (code
, ins
->dreg
, ARMREG_R0
);
5264 ins
->inst_c0
= code
- cfg
->native_code
;
5267 /*if (ins->inst_target_bb->native_offset) {
5269 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5271 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
5272 code
= mono_arm_patchable_b (code
, ARMCOND_AL
);
5276 ARM_MOV_REG_REG (code
, ARMREG_PC
, ins
->sreg1
);
5280 * In the normal case we have:
5281 * ldr pc, [pc, ins->sreg1 << 2]
5284 * ldr lr, [pc, ins->sreg1 << 2]
5286 * After follows the data.
5287 * FIXME: add aot support.
5289 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_SWITCH
, ins
->inst_p0
);
5290 max_len
+= 4 * GPOINTER_TO_INT (ins
->klass
);
5291 if (offset
+ max_len
> (cfg
->code_size
- 16)) {
5292 cfg
->code_size
+= max_len
;
5293 cfg
->code_size
*= 2;
5294 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
5295 code
= cfg
->native_code
+ offset
;
5297 ARM_LDR_REG_REG_SHIFT (code
, ARMREG_PC
, ARMREG_PC
, ins
->sreg1
, ARMSHIFT_LSL
, 2);
5299 code
+= 4 * GPOINTER_TO_INT (ins
->klass
);
5303 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
5304 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
5308 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5309 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_LT
);
5313 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5314 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_LO
);
5318 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5319 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_GT
);
5323 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5324 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_HI
);
5327 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_NE
);
5328 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_EQ
);
5331 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5332 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_LT
);
5335 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5336 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_GT
);
5339 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5340 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_LO
);
5343 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5344 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_HI
);
5346 case OP_COND_EXC_EQ
:
5347 case OP_COND_EXC_NE_UN
:
5348 case OP_COND_EXC_LT
:
5349 case OP_COND_EXC_LT_UN
:
5350 case OP_COND_EXC_GT
:
5351 case OP_COND_EXC_GT_UN
:
5352 case OP_COND_EXC_GE
:
5353 case OP_COND_EXC_GE_UN
:
5354 case OP_COND_EXC_LE
:
5355 case OP_COND_EXC_LE_UN
:
5356 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_EQ
, ins
->inst_p1
);
5358 case OP_COND_EXC_IEQ
:
5359 case OP_COND_EXC_INE_UN
:
5360 case OP_COND_EXC_ILT
:
5361 case OP_COND_EXC_ILT_UN
:
5362 case OP_COND_EXC_IGT
:
5363 case OP_COND_EXC_IGT_UN
:
5364 case OP_COND_EXC_IGE
:
5365 case OP_COND_EXC_IGE_UN
:
5366 case OP_COND_EXC_ILE
:
5367 case OP_COND_EXC_ILE_UN
:
5368 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_IEQ
, ins
->inst_p1
);
5371 case OP_COND_EXC_IC
:
5372 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS
, ins
->inst_p1
);
5374 case OP_COND_EXC_OV
:
5375 case OP_COND_EXC_IOV
:
5376 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS
, ins
->inst_p1
);
5378 case OP_COND_EXC_NC
:
5379 case OP_COND_EXC_INC
:
5380 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC
, ins
->inst_p1
);
5382 case OP_COND_EXC_NO
:
5383 case OP_COND_EXC_INO
:
5384 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC
, ins
->inst_p1
);
5396 EMIT_COND_BRANCH (ins
, ins
->opcode
- OP_IBEQ
);
5399 /* floating point opcodes */
5401 if (cfg
->compile_aot
) {
5402 ARM_FLDD (code
, ins
->dreg
, ARMREG_PC
, 0);
5404 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
5406 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[1];
5409 /* FIXME: we can optimize the imm load by dealing with part of
5410 * the displacement in LDFD (aligning to 512).
5412 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)ins
->inst_p0
);
5413 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
5417 if (cfg
->compile_aot
) {
5418 ARM_FLDS (code
, ins
->dreg
, ARMREG_PC
, 0);
5420 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
5423 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
5425 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)ins
->inst_p0
);
5426 ARM_FLDS (code
, ins
->dreg
, ARMREG_LR
, 0);
5428 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
5431 case OP_STORER8_MEMBASE_REG
:
5432 /* This is generated by the local regalloc pass which runs after the lowering pass */
5433 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
5434 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
5435 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_destbasereg
);
5436 ARM_FSTD (code
, ins
->sreg1
, ARMREG_LR
, 0);
5438 ARM_FSTD (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
5441 case OP_LOADR8_MEMBASE
:
5442 /* This is generated by the local regalloc pass which runs after the lowering pass */
5443 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
5444 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
5445 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_basereg
);
5446 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
5448 ARM_FLDD (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
5451 case OP_STORER4_MEMBASE_REG
:
5452 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
5454 ARM_FSTS (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
5456 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5457 ARM_CVTD (code
, vfp_scratch1
, ins
->sreg1
);
5458 ARM_FSTS (code
, vfp_scratch1
, ins
->inst_destbasereg
, ins
->inst_offset
);
5459 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5462 case OP_LOADR4_MEMBASE
:
5464 ARM_FLDS (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
5466 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
5467 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5468 ARM_FLDS (code
, vfp_scratch1
, ins
->inst_basereg
, ins
->inst_offset
);
5469 ARM_CVTS (code
, ins
->dreg
, vfp_scratch1
);
5470 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5473 case OP_ICONV_TO_R_UN
: {
5474 g_assert_not_reached ();
5477 case OP_ICONV_TO_R4
:
5479 ARM_FMSR (code
, ins
->dreg
, ins
->sreg1
);
5480 ARM_FSITOS (code
, ins
->dreg
, ins
->dreg
);
5482 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5483 ARM_FMSR (code
, vfp_scratch1
, ins
->sreg1
);
5484 ARM_FSITOS (code
, vfp_scratch1
, vfp_scratch1
);
5485 ARM_CVTS (code
, ins
->dreg
, vfp_scratch1
);
5486 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5489 case OP_ICONV_TO_R8
:
5490 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5491 ARM_FMSR (code
, vfp_scratch1
, ins
->sreg1
);
5492 ARM_FSITOD (code
, ins
->dreg
, vfp_scratch1
);
5493 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5497 MonoType
*sig_ret
= mini_get_underlying_type (mono_method_signature (cfg
->method
)->ret
);
5498 if (sig_ret
->type
== MONO_TYPE_R4
) {
5500 if (IS_HARD_FLOAT
) {
5501 if (ins
->sreg1
!= ARM_VFP_D0
)
5502 ARM_CPYS (code
, ARM_VFP_D0
, ins
->sreg1
);
5504 ARM_FMRS (code
, ARMREG_R0
, ins
->sreg1
);
5507 ARM_CVTD (code
, ARM_VFP_F0
, ins
->sreg1
);
5510 ARM_FMRS (code
, ARMREG_R0
, ARM_VFP_F0
);
5514 ARM_CPYD (code
, ARM_VFP_D0
, ins
->sreg1
);
5516 ARM_FMRRD (code
, ARMREG_R0
, ARMREG_R1
, ins
->sreg1
);
5520 case OP_FCONV_TO_I1
:
5521 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, TRUE
);
5523 case OP_FCONV_TO_U1
:
5524 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, FALSE
);
5526 case OP_FCONV_TO_I2
:
5527 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, TRUE
);
5529 case OP_FCONV_TO_U2
:
5530 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, FALSE
);
5532 case OP_FCONV_TO_I4
:
5534 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, TRUE
);
5536 case OP_FCONV_TO_U4
:
5538 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, FALSE
);
5540 case OP_FCONV_TO_I8
:
5541 case OP_FCONV_TO_U8
:
5542 g_assert_not_reached ();
5543 /* Implemented as helper calls */
5545 case OP_LCONV_TO_R_UN
:
5546 g_assert_not_reached ();
5547 /* Implemented as helper calls */
5549 case OP_LCONV_TO_OVF_I4_2
: {
5550 guint8
*high_bit_not_set
, *valid_negative
, *invalid_negative
, *valid_positive
;
5552 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5555 ARM_CMP_REG_IMM8 (code
, ins
->sreg1
, 0);
5556 high_bit_not_set
= code
;
5557 ARM_B_COND (code
, ARMCOND_GE
, 0); /*branch if bit 31 of the lower part is not set*/
5559 ARM_CMN_REG_IMM8 (code
, ins
->sreg2
, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5560 valid_negative
= code
;
5561 ARM_B_COND (code
, ARMCOND_EQ
, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5562 invalid_negative
= code
;
5563 ARM_B_COND (code
, ARMCOND_AL
, 0);
5565 arm_patch (high_bit_not_set
, code
);
5567 ARM_CMP_REG_IMM8 (code
, ins
->sreg2
, 0);
5568 valid_positive
= code
;
5569 ARM_B_COND (code
, ARMCOND_EQ
, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5571 arm_patch (invalid_negative
, code
);
5572 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL
, "OverflowException");
5574 arm_patch (valid_negative
, code
);
5575 arm_patch (valid_positive
, code
);
5577 if (ins
->dreg
!= ins
->sreg1
)
5578 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
5582 ARM_VFP_ADDD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5585 ARM_VFP_SUBD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5588 ARM_VFP_MULD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5591 ARM_VFP_DIVD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5594 ARM_NEGD (code
, ins
->dreg
, ins
->sreg1
);
5598 g_assert_not_reached ();
5602 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5608 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5613 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5616 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
5617 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
5621 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5624 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5625 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5629 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5632 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5633 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5634 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5638 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
5641 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5642 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5646 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
5649 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5650 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5651 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5655 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5658 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_NE
);
5659 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_EQ
);
5663 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5666 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5667 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
5671 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
5674 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5675 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
5678 /* ARM FPA flags table:
5679 * N Less than ARMCOND_MI
5680 * Z Equal ARMCOND_EQ
5681 * C Greater Than or Equal ARMCOND_CS
5682 * V Unordered ARMCOND_VS
5685 EMIT_COND_BRANCH (ins
, OP_IBEQ
- OP_IBEQ
);
5688 EMIT_COND_BRANCH (ins
, OP_IBNE_UN
- OP_IBEQ
);
5691 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_MI
); /* N set */
5694 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_VS
); /* V set */
5695 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_MI
); /* N set */
5701 g_assert_not_reached ();
5705 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_GE
);
5707 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5708 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_EQ
);
5709 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_CS
);
5713 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_VS
); /* V set */
5714 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_GE
);
5719 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5720 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch2
);
5722 ARM_ABSD (code
, vfp_scratch2
, ins
->sreg1
);
5723 ARM_FLDD (code
, vfp_scratch1
, ARMREG_PC
, 0);
5725 *(guint32
*)code
= 0xffffffff;
5727 *(guint32
*)code
= 0x7fefffff;
5729 ARM_CMPD (code
, vfp_scratch2
, vfp_scratch1
);
5731 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT
, "OverflowException");
5732 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg1
);
5734 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS
, "OverflowException");
5735 ARM_CPYD (code
, ins
->dreg
, ins
->sreg1
);
5737 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5738 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch2
);
5743 case OP_RCONV_TO_I1
:
5744 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, TRUE
);
5746 case OP_RCONV_TO_U1
:
5747 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, FALSE
);
5749 case OP_RCONV_TO_I2
:
5750 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, TRUE
);
5752 case OP_RCONV_TO_U2
:
5753 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, FALSE
);
5755 case OP_RCONV_TO_I4
:
5756 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, TRUE
);
5758 case OP_RCONV_TO_U4
:
5759 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, FALSE
);
5761 case OP_RCONV_TO_R4
:
5763 if (ins
->dreg
!= ins
->sreg1
)
5764 ARM_CPYS (code
, ins
->dreg
, ins
->sreg1
);
5766 case OP_RCONV_TO_R8
:
5768 ARM_CVTS (code
, ins
->dreg
, ins
->sreg1
);
5771 ARM_VFP_ADDS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5774 ARM_VFP_SUBS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5777 ARM_VFP_MULS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5780 ARM_VFP_DIVS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5783 ARM_NEGS (code
, ins
->dreg
, ins
->sreg1
);
5787 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5790 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
5791 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
5795 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5798 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5799 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5803 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5806 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5807 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5808 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5812 ARM_CMPS (code
, ins
->sreg2
, ins
->sreg1
);
5815 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5816 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5820 ARM_CMPS (code
, ins
->sreg2
, ins
->sreg1
);
5823 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5824 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5825 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5829 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5832 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_NE
);
5833 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_EQ
);
5837 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5840 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5841 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
5845 ARM_CMPS (code
, ins
->sreg2
, ins
->sreg1
);
5848 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5849 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
5852 case OP_GC_LIVENESS_DEF
:
5853 case OP_GC_LIVENESS_USE
:
5854 case OP_GC_PARAM_SLOT_LIVENESS_DEF
:
5855 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5857 case OP_GC_SPILL_SLOT_LIVENESS_DEF
:
5858 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5859 bb
->spill_slot_defs
= g_slist_prepend_mempool (cfg
->mempool
, bb
->spill_slot_defs
, ins
);
5861 case OP_GC_SAFE_POINT
: {
5864 g_assert (mono_threads_is_coop_enabled ());
5866 ARM_LDR_IMM (code
, ARMREG_IP
, ins
->sreg1
, 0);
5867 ARM_CMP_REG_IMM (code
, ARMREG_IP
, 0, 0);
5869 ARM_B_COND (code
, ARMCOND_EQ
, 0);
5870 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
, "mono_threads_state_poll");
5871 code
= emit_call_seq (cfg
, code
);
5872 arm_patch (buf
[0], code
);
5877 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
5878 g_assert_not_reached ();
5881 if ((cfg
->opt
& MONO_OPT_BRANCH
) && ((code
- cfg
->native_code
- offset
) > max_len
)) {
5882 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
5883 mono_inst_name (ins
->opcode
), max_len
, code
- cfg
->native_code
- offset
);
5884 g_assert_not_reached ();
5890 last_offset
= offset
;
5893 cfg
->code_len
= code
- cfg
->native_code
;
5896 #endif /* DISABLE_JIT */
5899 mono_arch_register_lowlevel_calls (void)
5901 /* The signature doesn't matter */
5902 mono_register_jit_icall (mono_arm_throw_exception
, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE
);
5903 mono_register_jit_icall (mono_arm_throw_exception_by_token
, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE
);
5904 mono_register_jit_icall (mono_arm_unaligned_stack
, "mono_arm_unaligned_stack", mono_create_icall_signature ("void"), TRUE
);
5907 #define patch_lis_ori(ip,val) do {\
5908 guint16 *__lis_ori = (guint16*)(ip); \
5909 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
5910 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
5914 mono_arch_patch_code_new (MonoCompile
*cfg
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gpointer target
)
5916 unsigned char *ip
= ji
->ip
.i
+ code
;
5918 if (ji
->type
== MONO_PATCH_INFO_SWITCH
) {
5922 case MONO_PATCH_INFO_SWITCH
: {
5923 gpointer
*jt
= (gpointer
*)(ip
+ 8);
5925 /* jt is the inlined jump table, 2 instructions after ip
5926 * In the normal case we store the absolute addresses,
5927 * otherwise the displacements.
5929 for (i
= 0; i
< ji
->data
.table
->table_size
; i
++)
5930 jt
[i
] = code
+ (int)ji
->data
.table
->table
[i
];
5933 case MONO_PATCH_INFO_IP
:
5934 g_assert_not_reached ();
5935 patch_lis_ori (ip
, ip
);
5937 case MONO_PATCH_INFO_METHOD_REL
:
5938 g_assert_not_reached ();
5939 *((gpointer
*)(ip
)) = target
;
5941 case MONO_PATCH_INFO_METHODCONST
:
5942 case MONO_PATCH_INFO_CLASS
:
5943 case MONO_PATCH_INFO_IMAGE
:
5944 case MONO_PATCH_INFO_FIELD
:
5945 case MONO_PATCH_INFO_VTABLE
:
5946 case MONO_PATCH_INFO_IID
:
5947 case MONO_PATCH_INFO_SFLDA
:
5948 case MONO_PATCH_INFO_LDSTR
:
5949 case MONO_PATCH_INFO_TYPE_FROM_HANDLE
:
5950 case MONO_PATCH_INFO_LDTOKEN
:
5951 g_assert_not_reached ();
5952 /* from OP_AOTCONST : lis + ori */
5953 patch_lis_ori (ip
, target
);
5955 case MONO_PATCH_INFO_R4
:
5956 case MONO_PATCH_INFO_R8
:
5957 g_assert_not_reached ();
5958 *((gconstpointer
*)(ip
+ 2)) = target
;
5960 case MONO_PATCH_INFO_EXC_NAME
:
5961 g_assert_not_reached ();
5962 *((gconstpointer
*)(ip
+ 1)) = target
;
5964 case MONO_PATCH_INFO_NONE
:
5965 case MONO_PATCH_INFO_BB_OVF
:
5966 case MONO_PATCH_INFO_EXC_OVF
:
5967 /* everything is dealt with at epilog output time */
5970 arm_patch_general (cfg
, domain
, ip
, target
);
5976 mono_arm_unaligned_stack (MonoMethod
*method
)
5978 g_assert_not_reached ();
5984 * Stack frame layout:
5986 * ------------------- fp
5987 * MonoLMF structure or saved registers
5988 * -------------------
5990 * -------------------
5992 * -------------------
5993 * optional 8 bytes for tracing
5994 * -------------------
5995 * param area size is cfg->param_area
5996 * ------------------- sp
5999 mono_arch_emit_prolog (MonoCompile
*cfg
)
6001 MonoMethod
*method
= cfg
->method
;
6003 MonoMethodSignature
*sig
;
6005 int alloc_size
, orig_alloc_size
, pos
, max_offset
, i
, rot_amount
, part
;
6010 int prev_sp_offset
, reg_offset
;
6012 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
6015 sig
= mono_method_signature (method
);
6016 cfg
->code_size
= 256 + sig
->param_count
* 64;
6017 code
= cfg
->native_code
= g_malloc (cfg
->code_size
);
6019 mono_emit_unwind_op_def_cfa (cfg
, code
, ARMREG_SP
, 0);
6021 alloc_size
= cfg
->stack_offset
;
6027 * The iphone uses R7 as the frame pointer, and it points at the saved
6032 * We can't use r7 as a frame pointer since it points into the middle of
6033 * the frame, so we keep using our own frame pointer.
6034 * FIXME: Optimize this.
6036 ARM_PUSH (code
, (1 << ARMREG_R7
) | (1 << ARMREG_LR
));
6037 prev_sp_offset
+= 8; /* r7 and lr */
6038 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
6039 mono_emit_unwind_op_offset (cfg
, code
, ARMREG_R7
, (- prev_sp_offset
) + 0);
6040 ARM_MOV_REG_REG (code
, ARMREG_R7
, ARMREG_SP
);
6043 if (!method
->save_lmf
) {
6045 /* No need to push LR again */
6046 if (cfg
->used_int_regs
)
6047 ARM_PUSH (code
, cfg
->used_int_regs
);
6049 ARM_PUSH (code
, cfg
->used_int_regs
| (1 << ARMREG_LR
));
6050 prev_sp_offset
+= 4;
6052 for (i
= 0; i
< 16; ++i
) {
6053 if (cfg
->used_int_regs
& (1 << i
))
6054 prev_sp_offset
+= 4;
6056 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
6058 for (i
= 0; i
< 16; ++i
) {
6059 if ((cfg
->used_int_regs
& (1 << i
))) {
6060 mono_emit_unwind_op_offset (cfg
, code
, i
, (- prev_sp_offset
) + reg_offset
);
6061 mini_gc_set_slot_type_from_cfa (cfg
, (- prev_sp_offset
) + reg_offset
, SLOT_NOREF
);
6065 mono_emit_unwind_op_offset (cfg
, code
, ARMREG_LR
, -4);
6066 mini_gc_set_slot_type_from_cfa (cfg
, -4, SLOT_NOREF
);
6068 ARM_MOV_REG_REG (code
, ARMREG_IP
, ARMREG_SP
);
6069 ARM_PUSH (code
, 0x5ff0);
6070 prev_sp_offset
+= 4 * 10; /* all but r0-r3, sp and pc */
6071 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
6073 for (i
= 0; i
< 16; ++i
) {
6074 if ((i
> ARMREG_R3
) && (i
!= ARMREG_SP
) && (i
!= ARMREG_PC
)) {
6075 /* The original r7 is saved at the start */
6076 if (!(iphone_abi
&& i
== ARMREG_R7
))
6077 mono_emit_unwind_op_offset (cfg
, code
, i
, (- prev_sp_offset
) + reg_offset
);
6081 g_assert (reg_offset
== 4 * 10);
6082 pos
+= sizeof (MonoLMF
) - (4 * 10);
6086 orig_alloc_size
= alloc_size
;
6087 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
6088 if (alloc_size
& (MONO_ARCH_FRAME_ALIGNMENT
- 1)) {
6089 alloc_size
+= MONO_ARCH_FRAME_ALIGNMENT
- 1;
6090 alloc_size
&= ~(MONO_ARCH_FRAME_ALIGNMENT
- 1);
6093 /* the stack used in the pushed regs */
6094 alloc_size
+= ALIGN_TO (prev_sp_offset
, MONO_ARCH_FRAME_ALIGNMENT
) - prev_sp_offset
;
6095 cfg
->stack_usage
= alloc_size
;
6097 if ((i
= mono_arm_is_rotated_imm8 (alloc_size
, &rot_amount
)) >= 0) {
6098 ARM_SUB_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
6100 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, alloc_size
);
6101 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
6103 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
+ alloc_size
);
6105 if (cfg
->frame_reg
!= ARMREG_SP
) {
6106 ARM_MOV_REG_REG (code
, cfg
->frame_reg
, ARMREG_SP
);
6107 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, cfg
->frame_reg
);
6109 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
6110 prev_sp_offset
+= alloc_size
;
6112 for (i
= 0; i
< alloc_size
- orig_alloc_size
; i
+= 4)
6113 mini_gc_set_slot_type_from_cfa (cfg
, (- prev_sp_offset
) + orig_alloc_size
+ i
, SLOT_NOREF
);
6115 /* compute max_offset in order to use short forward jumps
6116 * we could skip do it on arm because the immediate displacement
6117 * for jumps is large enough, it may be useful later for constant pools
6120 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
6121 MonoInst
*ins
= bb
->code
;
6122 bb
->max_offset
= max_offset
;
6124 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
)
6127 MONO_BB_FOR_EACH_INS (bb
, ins
)
6128 max_offset
+= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
6131 /* stack alignment check */
6135 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_SP);
6136 code = mono_arm_emit_load_imm (code, ARMREG_IP, MONO_ARCH_FRAME_ALIGNMENT -1);
6137 ARM_AND_REG_REG (code, ARMREG_LR, ARMREG_LR, ARMREG_IP);
6138 ARM_CMP_REG_IMM (code, ARMREG_LR, 0, 0);
6140 ARM_B_COND (code, ARMCOND_EQ, 0);
6141 if (cfg->compile_aot)
6142 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
6144 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
6145 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD, "mono_arm_unaligned_stack");
6146 code = emit_call_seq (cfg, code);
6147 arm_patch (buf [0], code);
6151 /* store runtime generic context */
6152 if (cfg
->rgctx_var
) {
6153 MonoInst
*ins
= cfg
->rgctx_var
;
6155 g_assert (ins
->opcode
== OP_REGOFFSET
);
6157 if (arm_is_imm12 (ins
->inst_offset
)) {
6158 ARM_STR_IMM (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ins
->inst_offset
);
6160 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
6161 ARM_STR_REG_REG (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ARMREG_LR
);
6165 /* load arguments allocated to register from the stack */
6168 cinfo
= get_call_info (NULL
, sig
);
6170 if (cinfo
->ret
.storage
== RegTypeStructByAddr
) {
6171 ArgInfo
*ainfo
= &cinfo
->ret
;
6172 inst
= cfg
->vret_addr
;
6173 g_assert (arm_is_imm12 (inst
->inst_offset
));
6174 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6177 if (sig
->call_convention
== MONO_CALL_VARARG
) {
6178 ArgInfo
*cookie
= &cinfo
->sig_cookie
;
6180 /* Save the sig cookie address */
6181 g_assert (cookie
->storage
== RegTypeBase
);
6183 g_assert (arm_is_imm12 (prev_sp_offset
+ cookie
->offset
));
6184 g_assert (arm_is_imm12 (cfg
->sig_cookie
));
6185 ARM_ADD_REG_IMM8 (code
, ARMREG_IP
, cfg
->frame_reg
, prev_sp_offset
+ cookie
->offset
);
6186 ARM_STR_IMM (code
, ARMREG_IP
, cfg
->frame_reg
, cfg
->sig_cookie
);
6189 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
6190 ArgInfo
*ainfo
= cinfo
->args
+ i
;
6191 inst
= cfg
->args
[pos
];
6193 if (cfg
->verbose_level
> 2)
6194 g_print ("Saving argument %d (type: %d)\n", i
, ainfo
->storage
);
6196 if (inst
->opcode
== OP_REGVAR
) {
6197 if (ainfo
->storage
== RegTypeGeneral
)
6198 ARM_MOV_REG_REG (code
, inst
->dreg
, ainfo
->reg
);
6199 else if (ainfo
->storage
== RegTypeFP
) {
6200 g_assert_not_reached ();
6201 } else if (ainfo
->storage
== RegTypeBase
) {
6202 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
6203 ARM_LDR_IMM (code
, inst
->dreg
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
6205 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
);
6206 ARM_LDR_REG_REG (code
, inst
->dreg
, ARMREG_SP
, ARMREG_IP
);
6209 g_assert_not_reached ();
6211 if (cfg
->verbose_level
> 2)
6212 g_print ("Argument %d assigned to register %s\n", pos
, mono_arch_regname (inst
->dreg
));
6214 switch (ainfo
->storage
) {
6216 for (part
= 0; part
< ainfo
->nregs
; part
++) {
6217 if (ainfo
->esize
== 4)
6218 ARM_FSTS (code
, ainfo
->reg
+ part
, inst
->inst_basereg
, inst
->inst_offset
+ (part
* ainfo
->esize
));
6220 ARM_FSTD (code
, ainfo
->reg
+ (part
* 2), inst
->inst_basereg
, inst
->inst_offset
+ (part
* ainfo
->esize
));
6223 case RegTypeGeneral
:
6224 case RegTypeIRegPair
:
6225 case RegTypeGSharedVtInReg
:
6226 case RegTypeStructByAddr
:
6227 switch (ainfo
->size
) {
6229 if (arm_is_imm12 (inst
->inst_offset
))
6230 ARM_STRB_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6232 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6233 ARM_STRB_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6237 if (arm_is_imm8 (inst
->inst_offset
)) {
6238 ARM_STRH_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6240 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6241 ARM_STRH_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6245 if (arm_is_imm12 (inst
->inst_offset
)) {
6246 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6248 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6249 ARM_STR_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6251 if (arm_is_imm12 (inst
->inst_offset
+ 4)) {
6252 ARM_STR_IMM (code
, ainfo
->reg
+ 1, inst
->inst_basereg
, inst
->inst_offset
+ 4);
6254 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
+ 4);
6255 ARM_STR_REG_REG (code
, ainfo
->reg
+ 1, inst
->inst_basereg
, ARMREG_IP
);
6259 if (arm_is_imm12 (inst
->inst_offset
)) {
6260 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6262 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6263 ARM_STR_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6268 case RegTypeBaseGen
:
6269 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
6270 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
6272 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
);
6273 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
6275 if (arm_is_imm12 (inst
->inst_offset
+ 4)) {
6276 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
6277 ARM_STR_IMM (code
, ARMREG_R3
, inst
->inst_basereg
, inst
->inst_offset
);
6279 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
+ 4);
6280 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6281 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6282 ARM_STR_REG_REG (code
, ARMREG_R3
, inst
->inst_basereg
, ARMREG_IP
);
6286 case RegTypeGSharedVtOnStack
:
6287 case RegTypeStructByAddrOnStack
:
6288 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
6289 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
6291 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
);
6292 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
6295 switch (ainfo
->size
) {
6297 if (arm_is_imm8 (inst
->inst_offset
)) {
6298 ARM_STRB_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6300 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6301 ARM_STRB_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6305 if (arm_is_imm8 (inst
->inst_offset
)) {
6306 ARM_STRH_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6308 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6309 ARM_STRH_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6313 if (arm_is_imm12 (inst
->inst_offset
)) {
6314 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6316 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6317 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6319 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
+ 4)) {
6320 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
+ 4));
6322 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
+ 4);
6323 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
6325 if (arm_is_imm12 (inst
->inst_offset
+ 4)) {
6326 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
6328 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
+ 4);
6329 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6333 if (arm_is_imm12 (inst
->inst_offset
)) {
6334 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6336 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6337 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6343 int imm8
, rot_amount
;
6345 if ((imm8
= mono_arm_is_rotated_imm8 (inst
->inst_offset
, &rot_amount
)) == -1) {
6346 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6347 ARM_ADD_REG_REG (code
, ARMREG_IP
, ARMREG_IP
, inst
->inst_basereg
);
6349 ARM_ADD_REG_IMM (code
, ARMREG_IP
, inst
->inst_basereg
, imm8
, rot_amount
);
6351 if (ainfo
->size
== 8)
6352 ARM_FSTD (code
, ainfo
->reg
, ARMREG_IP
, 0);
6354 ARM_FSTS (code
, ainfo
->reg
, ARMREG_IP
, 0);
6357 case RegTypeStructByVal
: {
6358 int doffset
= inst
->inst_offset
;
6362 size
= mini_type_stack_size_full (inst
->inst_vtype
, NULL
, sig
->pinvoke
);
6363 for (cur_reg
= 0; cur_reg
< ainfo
->size
; ++cur_reg
) {
6364 if (arm_is_imm12 (doffset
)) {
6365 ARM_STR_IMM (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, doffset
);
6367 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, doffset
);
6368 ARM_STR_REG_REG (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, ARMREG_IP
);
6370 soffset
+= sizeof (gpointer
);
6371 doffset
+= sizeof (gpointer
);
6373 if (ainfo
->vtsize
) {
6374 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6375 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6376 code
= emit_memcpy (code
, ainfo
->vtsize
* sizeof (gpointer
), inst
->inst_basereg
, doffset
, ARMREG_SP
, prev_sp_offset
+ ainfo
->offset
);
6381 g_assert_not_reached ();
6388 if (method
->save_lmf
)
6389 code
= emit_save_lmf (cfg
, code
, alloc_size
- lmf_offset
);
6392 code
= mono_arch_instrument_prolog (cfg
, mono_trace_enter_method
, code
, TRUE
);
6394 if (cfg
->arch
.seq_point_info_var
) {
6395 MonoInst
*ins
= cfg
->arch
.seq_point_info_var
;
6397 /* Initialize the variable from a GOT slot */
6398 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_SEQ_POINT_INFO
, cfg
->method
);
6399 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_PC
, 0);
6401 *(gpointer
*)code
= NULL
;
6403 ARM_LDR_REG_REG (code
, ARMREG_R0
, ARMREG_PC
, ARMREG_R0
);
6405 g_assert (ins
->opcode
== OP_REGOFFSET
);
6407 if (arm_is_imm12 (ins
->inst_offset
)) {
6408 ARM_STR_IMM (code
, ARMREG_R0
, ins
->inst_basereg
, ins
->inst_offset
);
6410 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
6411 ARM_STR_REG_REG (code
, ARMREG_R0
, ins
->inst_basereg
, ARMREG_LR
);
6415 /* Initialize ss_trigger_page_var */
6416 if (!cfg
->soft_breakpoints
) {
6417 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
6418 MonoInst
*ss_trigger_page_var
= cfg
->arch
.ss_trigger_page_var
;
6419 int dreg
= ARMREG_LR
;
6422 g_assert (info_var
->opcode
== OP_REGOFFSET
);
6423 g_assert (arm_is_imm12 (info_var
->inst_offset
));
6425 ARM_LDR_IMM (code
, dreg
, info_var
->inst_basereg
, info_var
->inst_offset
);
6426 /* Load the trigger page addr */
6427 ARM_LDR_IMM (code
, dreg
, dreg
, MONO_STRUCT_OFFSET (SeqPointInfo
, ss_trigger_page
));
6428 ARM_STR_IMM (code
, dreg
, ss_trigger_page_var
->inst_basereg
, ss_trigger_page_var
->inst_offset
);
6432 if (cfg
->arch
.seq_point_ss_method_var
) {
6433 MonoInst
*ss_method_ins
= cfg
->arch
.seq_point_ss_method_var
;
6434 MonoInst
*bp_method_ins
= cfg
->arch
.seq_point_bp_method_var
;
6436 g_assert (ss_method_ins
->opcode
== OP_REGOFFSET
);
6437 g_assert (arm_is_imm12 (ss_method_ins
->inst_offset
));
6439 if (cfg
->compile_aot
) {
6440 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
6441 int dreg
= ARMREG_LR
;
6443 g_assert (info_var
->opcode
== OP_REGOFFSET
);
6444 g_assert (arm_is_imm12 (info_var
->inst_offset
));
6446 ARM_LDR_IMM (code
, dreg
, info_var
->inst_basereg
, info_var
->inst_offset
);
6447 ARM_LDR_IMM (code
, dreg
, dreg
, MONO_STRUCT_OFFSET (SeqPointInfo
, ss_tramp_addr
));
6448 ARM_STR_IMM (code
, dreg
, ss_method_ins
->inst_basereg
, ss_method_ins
->inst_offset
);
6450 g_assert (bp_method_ins
->opcode
== OP_REGOFFSET
);
6451 g_assert (arm_is_imm12 (bp_method_ins
->inst_offset
));
6453 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
6455 *(gpointer
*)code
= &single_step_tramp
;
6457 *(gpointer
*)code
= breakpoint_tramp
;
6460 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_LR
, 0);
6461 ARM_STR_IMM (code
, ARMREG_IP
, ss_method_ins
->inst_basereg
, ss_method_ins
->inst_offset
);
6462 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_LR
, 4);
6463 ARM_STR_IMM (code
, ARMREG_IP
, bp_method_ins
->inst_basereg
, bp_method_ins
->inst_offset
);
6467 cfg
->code_len
= code
- cfg
->native_code
;
6468 g_assert (cfg
->code_len
< cfg
->code_size
);
6475 mono_arch_emit_epilog (MonoCompile
*cfg
)
6477 MonoMethod
*method
= cfg
->method
;
6478 int pos
, i
, rot_amount
;
6479 int max_epilog_size
= 16 + 20*4;
6483 if (cfg
->method
->save_lmf
)
6484 max_epilog_size
+= 128;
6486 if (mono_jit_trace_calls
!= NULL
)
6487 max_epilog_size
+= 50;
6489 if (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)
6490 max_epilog_size
+= 50;
6492 while (cfg
->code_len
+ max_epilog_size
> (cfg
->code_size
- 16)) {
6493 cfg
->code_size
*= 2;
6494 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
6495 cfg
->stat_code_reallocs
++;
6499 * Keep in sync with OP_JMP
6501 code
= cfg
->native_code
+ cfg
->code_len
;
6503 /* Save the uwind state which is needed by the out-of-line code */
6504 mono_emit_unwind_op_remember_state (cfg
, code
);
6506 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
)) {
6507 code
= mono_arch_instrument_epilog (cfg
, mono_trace_leave_method
, code
, TRUE
);
6511 /* Load returned vtypes into registers if needed */
6512 cinfo
= cfg
->arch
.cinfo
;
6513 switch (cinfo
->ret
.storage
) {
6514 case RegTypeStructByVal
: {
6515 MonoInst
*ins
= cfg
->ret
;
6517 if (cinfo
->ret
.nregs
== 1) {
6518 if (arm_is_imm12 (ins
->inst_offset
)) {
6519 ARM_LDR_IMM (code
, ARMREG_R0
, ins
->inst_basereg
, ins
->inst_offset
);
6521 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
6522 ARM_LDR_REG_REG (code
, ARMREG_R0
, ins
->inst_basereg
, ARMREG_LR
);
6525 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
6526 int offset
= ins
->inst_offset
+ (i
* 4);
6527 if (arm_is_imm12 (offset
)) {
6528 ARM_LDR_IMM (code
, i
, ins
->inst_basereg
, offset
);
6530 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, offset
);
6531 ARM_LDR_REG_REG (code
, i
, ins
->inst_basereg
, ARMREG_LR
);
6538 MonoInst
*ins
= cfg
->ret
;
6540 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
6541 if (cinfo
->ret
.esize
== 4)
6542 ARM_FLDS (code
, cinfo
->ret
.reg
+ i
, ins
->inst_basereg
, ins
->inst_offset
+ (i
* cinfo
->ret
.esize
));
6544 ARM_FLDD (code
, cinfo
->ret
.reg
+ (i
* 2), ins
->inst_basereg
, ins
->inst_offset
+ (i
* cinfo
->ret
.esize
));
6552 if (method
->save_lmf
) {
6553 int lmf_offset
, reg
, sp_adj
, regmask
, nused_int_regs
= 0;
6554 /* all but r0-r3, sp and pc */
6555 pos
+= sizeof (MonoLMF
) - (MONO_ARM_NUM_SAVED_REGS
* sizeof (mgreg_t
));
6558 code
= emit_restore_lmf (cfg
, code
, cfg
->stack_usage
- lmf_offset
);
6560 /* This points to r4 inside MonoLMF->iregs */
6561 sp_adj
= (sizeof (MonoLMF
) - MONO_ARM_NUM_SAVED_REGS
* sizeof (mgreg_t
));
6563 regmask
= 0x9ff0; /* restore lr to pc */
6564 /* Skip caller saved registers not used by the method */
6565 while (!(cfg
->used_int_regs
& (1 << reg
)) && reg
< ARMREG_FP
) {
6566 regmask
&= ~(1 << reg
);
6571 /* Restored later */
6572 regmask
&= ~(1 << ARMREG_PC
);
6573 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6574 code
= emit_big_add (code
, ARMREG_SP
, cfg
->frame_reg
, cfg
->stack_usage
- lmf_offset
+ sp_adj
);
6575 for (i
= 0; i
< 16; i
++) {
6576 if (regmask
& (1 << i
))
6579 mono_emit_unwind_op_def_cfa (cfg
, code
, ARMREG_SP
, ((iphone_abi
? 3 : 0) + nused_int_regs
) * 4);
6581 ARM_POP (code
, regmask
);
6583 for (i
= 0; i
< 16; i
++) {
6584 if (regmask
& (1 << i
))
6585 mono_emit_unwind_op_same_value (cfg
, code
, i
);
6587 /* Restore saved r7, restore LR to PC */
6588 /* Skip lr from the lmf */
6589 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, 3 * 4);
6590 ARM_ADD_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, sizeof (gpointer
), 0);
6591 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, 2 * 4);
6592 ARM_POP (code
, (1 << ARMREG_R7
) | (1 << ARMREG_PC
));
6595 int i
, nused_int_regs
= 0;
6597 for (i
= 0; i
< 16; i
++) {
6598 if (cfg
->used_int_regs
& (1 << i
))
6602 if ((i
= mono_arm_is_rotated_imm8 (cfg
->stack_usage
, &rot_amount
)) >= 0) {
6603 ARM_ADD_REG_IMM (code
, ARMREG_SP
, cfg
->frame_reg
, i
, rot_amount
);
6605 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, cfg
->stack_usage
);
6606 ARM_ADD_REG_REG (code
, ARMREG_SP
, cfg
->frame_reg
, ARMREG_IP
);
6609 if (cfg
->frame_reg
!= ARMREG_SP
) {
6610 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, ARMREG_SP
);
6614 /* Restore saved gregs */
6615 if (cfg
->used_int_regs
) {
6616 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, (2 + nused_int_regs
) * 4);
6617 ARM_POP (code
, cfg
->used_int_regs
);
6618 for (i
= 0; i
< 16; i
++) {
6619 if (cfg
->used_int_regs
& (1 << i
))
6620 mono_emit_unwind_op_same_value (cfg
, code
, i
);
6623 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, 2 * 4);
6624 /* Restore saved r7, restore LR to PC */
6625 ARM_POP (code
, (1 << ARMREG_R7
) | (1 << ARMREG_PC
));
6627 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, (nused_int_regs
+ 1) * 4);
6628 ARM_POP (code
, cfg
->used_int_regs
| (1 << ARMREG_PC
));
6632 /* Restore the unwind state to be the same as before the epilog */
6633 mono_emit_unwind_op_restore_state (cfg
, code
);
6635 cfg
->code_len
= code
- cfg
->native_code
;
6637 g_assert (cfg
->code_len
< cfg
->code_size
);
6642 mono_arch_emit_exceptions (MonoCompile
*cfg
)
6644 MonoJumpInfo
*patch_info
;
6647 guint8
* exc_throw_pos
[MONO_EXC_INTRINS_NUM
];
6648 guint8 exc_throw_found
[MONO_EXC_INTRINS_NUM
];
6649 int max_epilog_size
= 50;
6651 for (i
= 0; i
< MONO_EXC_INTRINS_NUM
; i
++) {
6652 exc_throw_pos
[i
] = NULL
;
6653 exc_throw_found
[i
] = 0;
6656 /* count the number of exception infos */
6659 * make sure we have enough space for exceptions
6661 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
6662 if (patch_info
->type
== MONO_PATCH_INFO_EXC
) {
6663 i
= mini_exception_id_by_name (patch_info
->data
.target
);
6664 if (!exc_throw_found
[i
]) {
6665 max_epilog_size
+= 32;
6666 exc_throw_found
[i
] = TRUE
;
6671 while (cfg
->code_len
+ max_epilog_size
> (cfg
->code_size
- 16)) {
6672 cfg
->code_size
*= 2;
6673 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
6674 cfg
->stat_code_reallocs
++;
6677 code
= cfg
->native_code
+ cfg
->code_len
;
6679 /* add code to raise exceptions */
6680 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
6681 switch (patch_info
->type
) {
6682 case MONO_PATCH_INFO_EXC
: {
6683 MonoClass
*exc_class
;
6684 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
6686 i
= mini_exception_id_by_name (patch_info
->data
.target
);
6687 if (exc_throw_pos
[i
]) {
6688 arm_patch (ip
, exc_throw_pos
[i
]);
6689 patch_info
->type
= MONO_PATCH_INFO_NONE
;
6692 exc_throw_pos
[i
] = code
;
6694 arm_patch (ip
, code
);
6696 exc_class
= mono_class_load_from_name (mono_defaults
.corlib
, "System", patch_info
->data
.name
);
6698 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_LR
);
6699 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_PC
, 0);
6700 patch_info
->type
= MONO_PATCH_INFO_INTERNAL_METHOD
;
6701 patch_info
->data
.name
= "mono_arch_throw_corlib_exception";
6702 patch_info
->ip
.i
= code
- cfg
->native_code
;
6704 cfg
->thunk_area
+= THUNK_SIZE
;
6705 *(guint32
*)(gpointer
)code
= exc_class
->type_token
- MONO_TOKEN_TYPE_DEF
;
6715 cfg
->code_len
= code
- cfg
->native_code
;
6717 g_assert (cfg
->code_len
< cfg
->code_size
);
6721 #endif /* #ifndef DISABLE_JIT */
6724 mono_arch_finish_init (void)
6729 mono_arch_free_jit_tls_data (MonoJitTlsData
*tls
)
6734 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
6741 mono_arch_print_tree (MonoInst
*tree
, int arity
)
6751 mono_arch_get_patch_offset (guint8
*code
)
6758 mono_arch_flush_register_windows (void)
6763 mono_arch_find_imt_method (mgreg_t
*regs
, guint8
*code
)
6765 return (MonoMethod
*)regs
[MONO_ARCH_IMT_REG
];
6769 mono_arch_find_static_call_vtable (mgreg_t
*regs
, guint8
*code
)
6771 return (MonoVTable
*) regs
[MONO_ARCH_RGCTX_REG
];
6775 mono_arch_get_cie_program (void)
6779 mono_add_unwind_op_def_cfa (l
, (guint8
*)NULL
, (guint8
*)NULL
, ARMREG_SP
, 0);
6784 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6785 #define BASE_SIZE (6 * 4)
6786 #define BSEARCH_ENTRY_SIZE (4 * 4)
6787 #define CMP_SIZE (3 * 4)
6788 #define BRANCH_SIZE (1 * 4)
6789 #define CALL_SIZE (2 * 4)
6790 #define WMC_SIZE (8 * 4)
6791 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
6794 arm_emit_value_and_patch_ldr (arminstr_t
*code
, arminstr_t
*target
, guint32 value
)
6796 guint32 delta
= DISTANCE (target
, code
);
6798 g_assert (delta
>= 0 && delta
<= 0xFFF);
6799 *target
= *target
| delta
;
6804 #ifdef ENABLE_WRONG_METHOD_CHECK
6806 mini_dump_bad_imt (int input_imt
, int compared_imt
, int pc
)
6808 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt
, compared_imt
, pc
);
6814 mono_arch_build_imt_trampoline (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
6815 gpointer fail_tramp
)
6818 arminstr_t
*code
, *start
;
6819 gboolean large_offsets
= FALSE
;
6820 guint32
**constant_pool_starts
;
6821 arminstr_t
*vtable_target
= NULL
;
6822 int extra_space
= 0;
6823 #ifdef ENABLE_WRONG_METHOD_CHECK
6829 constant_pool_starts
= g_new0 (guint32
*, count
);
6831 for (i
= 0; i
< count
; ++i
) {
6832 MonoIMTCheckItem
*item
= imt_entries
[i
];
6833 if (item
->is_equals
) {
6834 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
6836 if (item
->has_target_code
|| !arm_is_imm12 (DISTANCE (vtable
, &vtable
->vtable
[item
->value
.vtable_slot
]))) {
6837 item
->chunk_size
+= 32;
6838 large_offsets
= TRUE
;
6841 if (item
->check_target_idx
|| fail_case
) {
6842 if (!item
->compare_done
|| fail_case
)
6843 item
->chunk_size
+= CMP_SIZE
;
6844 item
->chunk_size
+= BRANCH_SIZE
;
6846 #ifdef ENABLE_WRONG_METHOD_CHECK
6847 item
->chunk_size
+= WMC_SIZE
;
6851 item
->chunk_size
+= 16;
6852 large_offsets
= TRUE
;
6854 item
->chunk_size
+= CALL_SIZE
;
6856 item
->chunk_size
+= BSEARCH_ENTRY_SIZE
;
6857 imt_entries
[item
->check_target_idx
]->compare_done
= TRUE
;
6859 size
+= item
->chunk_size
;
6863 size
+= 4 * count
; /* The ARM_ADD_REG_IMM to pop the stack */
6866 code
= mono_method_alloc_generic_virtual_trampoline (domain
, size
);
6868 code
= mono_domain_code_reserve (domain
, size
);
6871 unwind_ops
= mono_arch_get_cie_program ();
6874 g_print ("Building IMT trampoline for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", vtable
->klass
->name_space
, vtable
->klass
->name
, count
, size
, start
, ((guint8
*)start
) + size
, vtable
, fail_tramp
);
6875 for (i
= 0; i
< count
; ++i
) {
6876 MonoIMTCheckItem
*item
= imt_entries
[i
];
6877 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i
, item
->key
, ((MonoMethod
*)item
->key
)->name
, &vtable
->vtable
[item
->value
.vtable_slot
], item
->is_equals
, item
->chunk_size
);
6881 if (large_offsets
) {
6882 ARM_PUSH4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
6883 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 4 * sizeof (mgreg_t
));
6885 ARM_PUSH2 (code
, ARMREG_R0
, ARMREG_R1
);
6886 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 2 * sizeof (mgreg_t
));
6888 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_LR
, -4);
6889 vtable_target
= code
;
6890 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
6891 ARM_MOV_REG_REG (code
, ARMREG_R0
, ARMREG_V5
);
6893 for (i
= 0; i
< count
; ++i
) {
6894 MonoIMTCheckItem
*item
= imt_entries
[i
];
6895 arminstr_t
*imt_method
= NULL
, *vtable_offset_ins
= NULL
, *target_code_ins
= NULL
;
6896 gint32 vtable_offset
;
6898 item
->code_target
= (guint8
*)code
;
6900 if (item
->is_equals
) {
6901 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
6903 if (item
->check_target_idx
|| fail_case
) {
6904 if (!item
->compare_done
|| fail_case
) {
6906 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
6907 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
6909 item
->jmp_code
= (guint8
*)code
;
6910 ARM_B_COND (code
, ARMCOND_NE
, 0);
6912 /*Enable the commented code to assert on wrong method*/
6913 #ifdef ENABLE_WRONG_METHOD_CHECK
6915 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
6916 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
6918 ARM_B_COND (code
, ARMCOND_EQ
, 0);
6920 /* Define this if your system is so bad that gdb is failing. */
6921 #ifdef BROKEN_DEV_ENV
6922 ARM_MOV_REG_REG (code
, ARMREG_R2
, ARMREG_PC
);
6924 arm_patch (code
- 1, mini_dump_bad_imt
);
6928 arm_patch (cond
, code
);
6932 if (item
->has_target_code
) {
6933 /* Load target address */
6934 target_code_ins
= code
;
6935 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
6936 /* Save it to the fourth slot */
6937 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (gpointer
));
6938 /* Restore registers and branch */
6939 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
6941 code
= arm_emit_value_and_patch_ldr (code
, target_code_ins
, (gsize
)item
->value
.target_code
);
6943 vtable_offset
= DISTANCE (vtable
, &vtable
->vtable
[item
->value
.vtable_slot
]);
6944 if (!arm_is_imm12 (vtable_offset
)) {
6946 * We need to branch to a computed address but we don't have
6947 * a free register to store it, since IP must contain the
6948 * vtable address. So we push the two values to the stack, and
6949 * load them both using LDM.
6951 /* Compute target address */
6952 vtable_offset_ins
= code
;
6953 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
6954 ARM_LDR_REG_REG (code
, ARMREG_R1
, ARMREG_IP
, ARMREG_R1
);
6955 /* Save it to the fourth slot */
6956 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (gpointer
));
6957 /* Restore registers and branch */
6958 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
6960 code
= arm_emit_value_and_patch_ldr (code
, vtable_offset_ins
, vtable_offset
);
6962 ARM_POP2 (code
, ARMREG_R0
, ARMREG_R1
);
6963 if (large_offsets
) {
6964 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 2 * sizeof (mgreg_t
));
6965 ARM_ADD_REG_IMM8 (code
, ARMREG_SP
, ARMREG_SP
, 2 * sizeof (gpointer
));
6967 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 0);
6968 ARM_LDR_IMM (code
, ARMREG_PC
, ARMREG_IP
, vtable_offset
);
6973 arm_patch (item
->jmp_code
, (guchar
*)code
);
6975 target_code_ins
= code
;
6976 /* Load target address */
6977 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
6978 /* Save it to the fourth slot */
6979 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (gpointer
));
6980 /* Restore registers and branch */
6981 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
6983 code
= arm_emit_value_and_patch_ldr (code
, target_code_ins
, (gsize
)fail_tramp
);
6984 item
->jmp_code
= NULL
;
6988 code
= arm_emit_value_and_patch_ldr (code
, imt_method
, (guint32
)item
->key
);
6990 /*must emit after unconditional branch*/
6991 if (vtable_target
) {
6992 code
= arm_emit_value_and_patch_ldr (code
, vtable_target
, (guint32
)vtable
);
6993 item
->chunk_size
+= 4;
6994 vtable_target
= NULL
;
6997 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
6998 constant_pool_starts
[i
] = code
;
7000 code
+= extra_space
;
7004 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7005 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
7007 item
->jmp_code
= (guint8
*)code
;
7008 ARM_B_COND (code
, ARMCOND_HS
, 0);
7013 for (i
= 0; i
< count
; ++i
) {
7014 MonoIMTCheckItem
*item
= imt_entries
[i
];
7015 if (item
->jmp_code
) {
7016 if (item
->check_target_idx
)
7017 arm_patch (item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
);
7019 if (i
> 0 && item
->is_equals
) {
7021 arminstr_t
*space_start
= constant_pool_starts
[i
];
7022 for (j
= i
- 1; j
>= 0 && !imt_entries
[j
]->is_equals
; --j
) {
7023 space_start
= arm_emit_value_and_patch_ldr (space_start
, (arminstr_t
*)imt_entries
[j
]->code_target
, (guint32
)imt_entries
[j
]->key
);
7030 char *buff
= g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable
->klass
->name_space
, vtable
->klass
->name
, count
);
7031 mono_disassemble_code (NULL
, (guint8
*)start
, size
, buff
);
7036 g_free (constant_pool_starts
);
7038 mono_arch_flush_icache ((guint8
*)start
, size
);
7039 mono_profiler_code_buffer_new (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE
, NULL
);
7040 mono_stats
.imt_trampolines_size
+= code
- start
;
7042 g_assert (DISTANCE (start
, code
) <= size
);
7044 mono_tramp_info_register (mono_tramp_info_create (NULL
, (guint8
*)start
, DISTANCE (start
, code
), NULL
, unwind_ops
), domain
);
7050 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
7052 return ctx
->regs
[reg
];
7056 mono_arch_context_set_int_reg (MonoContext
*ctx
, int reg
, mgreg_t val
)
7058 ctx
->regs
[reg
] = val
;
7062 * mono_arch_get_trampolines:
7064 * Return a list of MonoTrampInfo structures describing arch specific trampolines
7068 mono_arch_get_trampolines (gboolean aot
)
7070 return mono_arm_get_exception_trampolines (aot
);
7074 mono_arch_install_handler_block_guard (MonoJitInfo
*ji
, MonoJitExceptionInfo
*clause
, MonoContext
*ctx
, gpointer new_value
)
7081 bp
= MONO_CONTEXT_GET_BP (ctx
);
7082 lr_loc
= (gpointer
*)(bp
+ clause
->exvar_offset
);
7084 old_value
= *lr_loc
;
7085 if ((char*)old_value
< (char*)ji
->code_start
|| (char*)old_value
> ((char*)ji
->code_start
+ ji
->code_size
))
7088 *lr_loc
= new_value
;
7093 #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
7095 * mono_arch_set_breakpoint:
7097 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
7098 * The location should contain code emitted by OP_SEQ_POINT.
7101 mono_arch_set_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
7104 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
7105 MonoDebugOptions
*opt
= mini_get_debug_options ();
7108 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), ji
->code_start
);
7110 if (!breakpoint_tramp
)
7111 breakpoint_tramp
= mini_get_breakpoint_trampoline ();
7113 g_assert (native_offset
% 4 == 0);
7114 g_assert (info
->bp_addrs
[native_offset
/ 4] == 0);
7115 info
->bp_addrs
[native_offset
/ 4] = opt
->soft_breakpoints
? breakpoint_tramp
: bp_trigger_page
;
7116 } else if (opt
->soft_breakpoints
) {
7118 ARM_BLX_REG (code
, ARMREG_LR
);
7119 mono_arch_flush_icache (code
- 4, 4);
7121 int dreg
= ARMREG_LR
;
7123 /* Read from another trigger page */
7124 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
7126 *(int*)code
= (int)bp_trigger_page
;
7128 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
7130 mono_arch_flush_icache (code
- 16, 16);
7133 /* This is currently implemented by emitting an SWI instruction, which
7134 * qemu/linux seems to convert to a SIGILL.
7136 *(int*)code
= (0xef << 24) | 8;
7138 mono_arch_flush_icache (code
- 4, 4);
7144 * mono_arch_clear_breakpoint:
7146 * Clear the breakpoint at IP.
7149 mono_arch_clear_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
7151 MonoDebugOptions
*opt
= mini_get_debug_options ();
7156 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
7157 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), ji
->code_start
);
7159 if (!breakpoint_tramp
)
7160 breakpoint_tramp
= mini_get_breakpoint_trampoline ();
7162 g_assert (native_offset
% 4 == 0);
7163 g_assert (info
->bp_addrs
[native_offset
/ 4] == (opt
->soft_breakpoints
? breakpoint_tramp
: bp_trigger_page
));
7164 info
->bp_addrs
[native_offset
/ 4] = 0;
7165 } else if (opt
->soft_breakpoints
) {
7168 mono_arch_flush_icache (code
- 4, 4);
7170 for (i
= 0; i
< 4; ++i
)
7173 mono_arch_flush_icache (ip
, code
- ip
);
7178 * mono_arch_start_single_stepping:
7180 * Start single stepping.
7183 mono_arch_start_single_stepping (void)
7185 if (ss_trigger_page
)
7186 mono_mprotect (ss_trigger_page
, mono_pagesize (), 0);
7188 single_step_tramp
= mini_get_single_step_trampoline ();
7192 * mono_arch_stop_single_stepping:
7194 * Stop single stepping.
7197 mono_arch_stop_single_stepping (void)
7199 if (ss_trigger_page
)
7200 mono_mprotect (ss_trigger_page
, mono_pagesize (), MONO_MMAP_READ
);
7202 single_step_tramp
= NULL
;
7206 #define DBG_SIGNAL SIGBUS
7208 #define DBG_SIGNAL SIGSEGV
7212 * mono_arch_is_single_step_event:
7214 * Return whenever the machine state in SIGCTX corresponds to a single
7218 mono_arch_is_single_step_event (void *info
, void *sigctx
)
7220 siginfo_t
*sinfo
= info
;
7222 if (!ss_trigger_page
)
7225 /* Sometimes the address is off by 4 */
7226 if (sinfo
->si_addr
>= ss_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)ss_trigger_page
+ 128)
7233 * mono_arch_is_breakpoint_event:
7235 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7238 mono_arch_is_breakpoint_event (void *info
, void *sigctx
)
7240 siginfo_t
*sinfo
= info
;
7242 if (!ss_trigger_page
)
7245 if (sinfo
->si_signo
== DBG_SIGNAL
) {
7246 /* Sometimes the address is off by 4 */
7247 if (sinfo
->si_addr
>= bp_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)bp_trigger_page
+ 128)
7257 * mono_arch_skip_breakpoint:
7259 * See mini-amd64.c for docs.
7262 mono_arch_skip_breakpoint (MonoContext
*ctx
, MonoJitInfo
*ji
)
7264 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
7268 * mono_arch_skip_single_step:
7270 * See mini-amd64.c for docs.
7273 mono_arch_skip_single_step (MonoContext
*ctx
)
7275 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
7278 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7281 * mono_arch_get_seq_point_info:
7283 * See mini-amd64.c for docs.
7286 mono_arch_get_seq_point_info (MonoDomain
*domain
, guint8
*code
)
7291 // FIXME: Add a free function
7293 mono_domain_lock (domain
);
7294 info
= g_hash_table_lookup (domain_jit_info (domain
)->arch_seq_points
,
7296 mono_domain_unlock (domain
);
7299 ji
= mono_jit_info_table_find (domain
, (char*)code
);
7302 info
= g_malloc0 (sizeof (SeqPointInfo
) + ji
->code_size
);
7304 info
->ss_trigger_page
= ss_trigger_page
;
7305 info
->bp_trigger_page
= bp_trigger_page
;
7306 info
->ss_tramp_addr
= &single_step_tramp
;
7308 mono_domain_lock (domain
);
7309 g_hash_table_insert (domain_jit_info (domain
)->arch_seq_points
,
7311 mono_domain_unlock (domain
);
7318 mono_arch_init_lmf_ext (MonoLMFExt
*ext
, gpointer prev_lmf
)
7320 ext
->lmf
.previous_lmf
= prev_lmf
;
7321 /* Mark that this is a MonoLMFExt */
7322 ext
->lmf
.previous_lmf
= (gpointer
)(((gssize
)ext
->lmf
.previous_lmf
) | 2);
7323 ext
->lmf
.sp
= (gssize
)ext
;
7327 * mono_arch_set_target:
7329 * Set the target architecture the JIT backend should generate code for, in the form
7330 * of a GNU target triplet. Only used in AOT mode.
7333 mono_arch_set_target (char *mtriple
)
7335 /* The GNU target triple format is not very well documented */
7336 if (strstr (mtriple
, "armv7")) {
7337 v5_supported
= TRUE
;
7338 v6_supported
= TRUE
;
7339 v7_supported
= TRUE
;
7341 if (strstr (mtriple
, "armv6")) {
7342 v5_supported
= TRUE
;
7343 v6_supported
= TRUE
;
7345 if (strstr (mtriple
, "armv7s")) {
7346 v7s_supported
= TRUE
;
7348 if (strstr (mtriple
, "armv7k")) {
7349 v7k_supported
= TRUE
;
7351 if (strstr (mtriple
, "thumbv7s")) {
7352 v5_supported
= TRUE
;
7353 v6_supported
= TRUE
;
7354 v7_supported
= TRUE
;
7355 v7s_supported
= TRUE
;
7356 thumb_supported
= TRUE
;
7357 thumb2_supported
= TRUE
;
7359 if (strstr (mtriple
, "darwin") || strstr (mtriple
, "ios")) {
7360 v5_supported
= TRUE
;
7361 v6_supported
= TRUE
;
7362 thumb_supported
= TRUE
;
7365 if (strstr (mtriple
, "gnueabi"))
7366 eabi_supported
= TRUE
;
7370 mono_arch_opcode_supported (int opcode
)
7373 case OP_ATOMIC_ADD_I4
:
7374 case OP_ATOMIC_EXCHANGE_I4
:
7375 case OP_ATOMIC_CAS_I4
:
7376 case OP_ATOMIC_LOAD_I1
:
7377 case OP_ATOMIC_LOAD_I2
:
7378 case OP_ATOMIC_LOAD_I4
:
7379 case OP_ATOMIC_LOAD_U1
:
7380 case OP_ATOMIC_LOAD_U2
:
7381 case OP_ATOMIC_LOAD_U4
:
7382 case OP_ATOMIC_STORE_I1
:
7383 case OP_ATOMIC_STORE_I2
:
7384 case OP_ATOMIC_STORE_I4
:
7385 case OP_ATOMIC_STORE_U1
:
7386 case OP_ATOMIC_STORE_U2
:
7387 case OP_ATOMIC_STORE_U4
:
7388 return v7_supported
;
7389 case OP_ATOMIC_LOAD_R4
:
7390 case OP_ATOMIC_LOAD_R8
:
7391 case OP_ATOMIC_STORE_R4
:
7392 case OP_ATOMIC_STORE_R8
:
7393 return v7_supported
&& IS_VFP
;
7400 mono_arch_get_call_info (MonoMemPool
*mp
, MonoMethodSignature
*sig
)
7402 return get_call_info (mp
, sig
);
7406 mono_arch_get_get_tls_tramp (void)
7412 emit_aotconst (MonoCompile
*cfg
, guint8
*code
, int dreg
, int patch_type
, gpointer data
)
7415 mono_add_patch_info (cfg
, code
- cfg
->native_code
, patch_type
, data
);
7416 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
7418 *(gpointer
*)code
= NULL
;
7420 /* Load the value from the GOT */
7421 ARM_LDR_REG_REG (code
, dreg
, ARMREG_PC
, dreg
);