2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
16 #include <mono/metadata/abi-details.h>
17 #include <mono/metadata/appdomain.h>
18 #include <mono/metadata/profiler-private.h>
19 #include <mono/metadata/debug-helpers.h>
20 #include <mono/utils/mono-mmap.h>
21 #include <mono/utils/mono-hwcap-arm.h>
22 #include <mono/utils/mono-memory-model.h>
23 #include <mono/utils/mono-threads-coop.h>
26 #include "mini-arm-tls.h"
30 #include "debugger-agent.h"
32 #include "mono/arch/arm/arm-vfp-codegen.h"
34 #if (defined(HAVE_KW_THREAD) && defined(__linux__) && defined(__ARM_EABI__)) \
35 || defined(TARGET_ANDROID) \
36 || (defined(TARGET_IOS) && !defined(TARGET_WATCHOS))
40 /* Sanity check: This makes no sense */
41 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
42 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
46 * IS_SOFT_FLOAT: Is full software floating point used?
47 * IS_HARD_FLOAT: Is full hardware floating point used?
48 * IS_VFP: Is hardware floating point with software ABI used?
50 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
51 * IS_VFP may delegate to mono_arch_is_soft_float ().
54 #if defined(ARM_FPU_VFP_HARD)
55 #define IS_SOFT_FLOAT (FALSE)
56 #define IS_HARD_FLOAT (TRUE)
58 #elif defined(ARM_FPU_NONE)
59 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
60 #define IS_HARD_FLOAT (FALSE)
61 #define IS_VFP (!mono_arch_is_soft_float ())
63 #define IS_SOFT_FLOAT (FALSE)
64 #define IS_HARD_FLOAT (FALSE)
68 #define THUNK_SIZE (3 * 4)
70 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
73 void sys_icache_invalidate (void *start
, size_t len
);
76 /* This mutex protects architecture specific caches */
77 #define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex)
78 #define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex)
79 static mono_mutex_t mini_arch_mutex
;
81 static gboolean v5_supported
= FALSE
;
82 static gboolean v6_supported
= FALSE
;
83 static gboolean v7_supported
= FALSE
;
84 static gboolean v7s_supported
= FALSE
;
85 static gboolean v7k_supported
= FALSE
;
86 static gboolean thumb_supported
= FALSE
;
87 static gboolean thumb2_supported
= FALSE
;
89 * Whenever to use the ARM EABI
91 static gboolean eabi_supported
= FALSE
;
94 * Whenever to use the iphone ABI extensions:
95 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
96 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
97 * This is required for debugging/profiling tools to work, but it has some overhead so it should
98 * only be turned on in debug builds.
100 static gboolean iphone_abi
= FALSE
;
103 * The FPU we are generating code for. This is NOT runtime configurable right now,
104 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
106 static MonoArmFPU arm_fpu
;
108 #if defined(ARM_FPU_VFP_HARD)
110 * On armhf, d0-d7 are used for argument passing and d8-d15
111 * must be preserved across calls, which leaves us no room
112 * for scratch registers. So we use d14-d15 but back up their
113 * previous contents to a stack slot before using them - see
114 * mono_arm_emit_vfp_scratch_save/_restore ().
116 static int vfp_scratch1
= ARM_VFP_D14
;
117 static int vfp_scratch2
= ARM_VFP_D15
;
120 * On armel, d0-d7 do not need to be preserved, so we can
121 * freely make use of them as scratch registers.
123 static int vfp_scratch1
= ARM_VFP_D0
;
124 static int vfp_scratch2
= ARM_VFP_D1
;
129 static gpointer single_step_tramp
, breakpoint_tramp
;
132 * The code generated for sequence points reads from this location, which is
133 * made read-only when single stepping is enabled.
135 static gpointer ss_trigger_page
;
137 /* Enabled breakpoints read from this trigger page */
138 static gpointer bp_trigger_page
;
142 * floating point support: on ARM it is a mess, there are at least 3
143 * different setups, each of which binary incompat with the other.
144 * 1) FPA: old and ugly, but unfortunately what current distros use
145 * the double binary format has the two words swapped. 8 double registers.
146 * Implemented usually by kernel emulation.
147 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
148 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
149 * 3) VFP: the new and actually sensible and useful FP support. Implemented
150 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
152 * We do not care about FPA. We will support soft float and VFP.
154 int mono_exc_esp_offset
= 0;
156 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
157 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
158 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
160 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
161 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
162 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
164 //#define DEBUG_IMT 0
167 static void mono_arch_compute_omit_fp (MonoCompile
*cfg
);
171 mono_arch_regname (int reg
)
173 static const char * rnames
[] = {
174 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
175 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
176 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
179 if (reg
>= 0 && reg
< 16)
185 mono_arch_fregname (int reg
)
187 static const char * rnames
[] = {
188 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
189 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
190 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
191 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
192 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
193 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
196 if (reg
>= 0 && reg
< 32)
204 emit_big_add (guint8
*code
, int dreg
, int sreg
, int imm
)
206 int imm8
, rot_amount
;
207 if ((imm8
= mono_arm_is_rotated_imm8 (imm
, &rot_amount
)) >= 0) {
208 ARM_ADD_REG_IMM (code
, dreg
, sreg
, imm8
, rot_amount
);
212 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, imm
);
213 ARM_ADD_REG_REG (code
, dreg
, sreg
, ARMREG_IP
);
215 code
= mono_arm_emit_load_imm (code
, dreg
, imm
);
216 ARM_ADD_REG_REG (code
, dreg
, dreg
, sreg
);
221 /* If dreg == sreg, this clobbers IP */
223 emit_sub_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
225 int imm8
, rot_amount
;
226 if ((imm8
= mono_arm_is_rotated_imm8 (imm
, &rot_amount
)) >= 0) {
227 ARM_SUB_REG_IMM (code
, dreg
, sreg
, imm8
, rot_amount
);
231 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, imm
);
232 ARM_SUB_REG_REG (code
, dreg
, sreg
, ARMREG_IP
);
234 code
= mono_arm_emit_load_imm (code
, dreg
, imm
);
235 ARM_SUB_REG_REG (code
, dreg
, dreg
, sreg
);
241 emit_memcpy (guint8
*code
, int size
, int dreg
, int doffset
, int sreg
, int soffset
)
243 /* we can use r0-r3, since this is called only for incoming args on the stack */
244 if (size
> sizeof (gpointer
) * 4) {
246 code
= emit_big_add (code
, ARMREG_R0
, sreg
, soffset
);
247 code
= emit_big_add (code
, ARMREG_R1
, dreg
, doffset
);
248 start_loop
= code
= mono_arm_emit_load_imm (code
, ARMREG_R2
, size
);
249 ARM_LDR_IMM (code
, ARMREG_R3
, ARMREG_R0
, 0);
250 ARM_STR_IMM (code
, ARMREG_R3
, ARMREG_R1
, 0);
251 ARM_ADD_REG_IMM8 (code
, ARMREG_R0
, ARMREG_R0
, 4);
252 ARM_ADD_REG_IMM8 (code
, ARMREG_R1
, ARMREG_R1
, 4);
253 ARM_SUBS_REG_IMM8 (code
, ARMREG_R2
, ARMREG_R2
, 4);
254 ARM_B_COND (code
, ARMCOND_NE
, 0);
255 arm_patch (code
- 4, start_loop
);
258 if (arm_is_imm12 (doffset
) && arm_is_imm12 (doffset
+ size
) &&
259 arm_is_imm12 (soffset
) && arm_is_imm12 (soffset
+ size
)) {
261 ARM_LDR_IMM (code
, ARMREG_LR
, sreg
, soffset
);
262 ARM_STR_IMM (code
, ARMREG_LR
, dreg
, doffset
);
268 code
= emit_big_add (code
, ARMREG_R0
, sreg
, soffset
);
269 code
= emit_big_add (code
, ARMREG_R1
, dreg
, doffset
);
270 doffset
= soffset
= 0;
272 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_R0
, soffset
);
273 ARM_STR_IMM (code
, ARMREG_LR
, ARMREG_R1
, doffset
);
279 g_assert (size
== 0);
284 emit_call_reg (guint8
*code
, int reg
)
287 ARM_BLX_REG (code
, reg
);
289 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
293 ARM_MOV_REG_REG (code
, ARMREG_PC
, reg
);
299 emit_call_seq (MonoCompile
*cfg
, guint8
*code
)
301 if (cfg
->method
->dynamic
) {
302 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
304 *(gpointer
*)code
= NULL
;
306 code
= emit_call_reg (code
, ARMREG_IP
);
310 cfg
->thunk_area
+= THUNK_SIZE
;
315 mono_arm_patchable_b (guint8
*code
, int cond
)
317 ARM_B_COND (code
, cond
, 0);
322 mono_arm_patchable_bl (guint8
*code
, int cond
)
324 ARM_BL_COND (code
, cond
, 0);
329 mono_arm_emit_tls_get (MonoCompile
*cfg
, guint8
* code
, int dreg
, int tls_offset
)
332 code
= mono_arm_emit_load_imm (code
, ARMREG_R0
, tls_offset
);
333 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
335 code
= emit_call_seq (cfg
, code
);
336 if (dreg
!= ARMREG_R0
)
337 ARM_MOV_REG_REG (code
, dreg
, ARMREG_R0
);
339 g_assert_not_reached ();
345 mono_arm_emit_tls_get_reg (MonoCompile
*cfg
, guint8
* code
, int dreg
, int tls_offset_reg
)
348 if (tls_offset_reg
!= ARMREG_R0
)
349 ARM_MOV_REG_REG (code
, ARMREG_R0
, tls_offset_reg
);
350 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
352 code
= emit_call_seq (cfg
, code
);
353 if (dreg
!= ARMREG_R0
)
354 ARM_MOV_REG_REG (code
, dreg
, ARMREG_R0
);
356 g_assert_not_reached ();
362 mono_arm_emit_tls_set (MonoCompile
*cfg
, guint8
* code
, int sreg
, int tls_offset
)
365 if (sreg
!= ARMREG_R1
)
366 ARM_MOV_REG_REG (code
, ARMREG_R1
, sreg
);
367 code
= mono_arm_emit_load_imm (code
, ARMREG_R0
, tls_offset
);
368 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
370 code
= emit_call_seq (cfg
, code
);
372 g_assert_not_reached ();
378 mono_arm_emit_tls_set_reg (MonoCompile
*cfg
, guint8
* code
, int sreg
, int tls_offset_reg
)
381 /* Get sreg in R1 and tls_offset_reg in R0 */
382 if (tls_offset_reg
== ARMREG_R1
) {
383 if (sreg
== ARMREG_R0
) {
384 /* swap sreg and tls_offset_reg */
385 ARM_EOR_REG_REG (code
, sreg
, sreg
, tls_offset_reg
);
386 ARM_EOR_REG_REG (code
, tls_offset_reg
, sreg
, tls_offset_reg
);
387 ARM_EOR_REG_REG (code
, sreg
, sreg
, tls_offset_reg
);
389 ARM_MOV_REG_REG (code
, ARMREG_R0
, tls_offset_reg
);
390 if (sreg
!= ARMREG_R1
)
391 ARM_MOV_REG_REG (code
, ARMREG_R1
, sreg
);
394 if (sreg
!= ARMREG_R1
)
395 ARM_MOV_REG_REG (code
, ARMREG_R1
, sreg
);
396 if (tls_offset_reg
!= ARMREG_R0
)
397 ARM_MOV_REG_REG (code
, ARMREG_R0
, tls_offset_reg
);
399 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
401 code
= emit_call_seq (cfg
, code
);
403 g_assert_not_reached ();
411 * Emit code to push an LMF structure on the LMF stack.
412 * On arm, this is intermixed with the initialization of other fields of the structure.
415 emit_save_lmf (MonoCompile
*cfg
, guint8
*code
, gint32 lmf_offset
)
417 gboolean get_lmf_fast
= FALSE
;
420 if (mono_arm_have_tls_get ()) {
422 if (cfg
->compile_aot
) {
424 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_TLS_OFFSET
, (gpointer
)TLS_KEY_LMF_ADDR
);
425 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
427 *(gpointer
*)code
= NULL
;
429 /* Load the value from the GOT */
430 ARM_LDR_REG_REG (code
, ARMREG_R1
, ARMREG_PC
, ARMREG_R1
);
431 code
= mono_arm_emit_tls_get_reg (cfg
, code
, ARMREG_R0
, ARMREG_R1
);
433 gint32 lmf_addr_tls_offset
= mono_get_lmf_addr_tls_offset ();
434 g_assert (lmf_addr_tls_offset
!= -1);
435 code
= mono_arm_emit_tls_get (cfg
, code
, ARMREG_R0
, lmf_addr_tls_offset
);
440 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
441 (gpointer
)"mono_get_lmf_addr");
442 code
= emit_call_seq (cfg
, code
);
444 /* we build the MonoLMF structure on the stack - see mini-arm.h */
445 /* lmf_offset is the offset from the previous stack pointer,
446 * alloc_size is the total stack space allocated, so the offset
447 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
448 * The pointer to the struct is put in r1 (new_lmf).
449 * ip is used as scratch
450 * The callee-saved registers are already in the MonoLMF structure
452 code
= emit_big_add (code
, ARMREG_R1
, ARMREG_SP
, lmf_offset
);
453 /* r0 is the result from mono_get_lmf_addr () */
454 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
455 /* new_lmf->previous_lmf = *lmf_addr */
456 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
457 ARM_STR_IMM (code
, ARMREG_IP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
458 /* *(lmf_addr) = r1 */
459 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
460 /* Skip method (only needed for trampoline LMF frames) */
461 ARM_STR_IMM (code
, ARMREG_SP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, sp
));
462 ARM_STR_IMM (code
, ARMREG_FP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, fp
));
463 /* save the current IP */
464 ARM_MOV_REG_REG (code
, ARMREG_IP
, ARMREG_PC
);
465 ARM_STR_IMM (code
, ARMREG_IP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, ip
));
467 for (i
= 0; i
< sizeof (MonoLMF
); i
+= sizeof (mgreg_t
))
468 mini_gc_set_slot_type_from_fp (cfg
, lmf_offset
+ i
, SLOT_NOREF
);
479 emit_float_args (MonoCompile
*cfg
, MonoCallInst
*inst
, guint8
*code
, int *max_len
, guint
*offset
)
483 for (list
= inst
->float_args
; list
; list
= list
->next
) {
484 FloatArgData
*fad
= list
->data
;
485 MonoInst
*var
= get_vreg_to_inst (cfg
, fad
->vreg
);
486 gboolean imm
= arm_is_fpimm8 (var
->inst_offset
);
488 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
494 if (*offset
+ *max_len
> cfg
->code_size
) {
495 cfg
->code_size
+= *max_len
;
496 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
498 code
= cfg
->native_code
+ *offset
;
502 code
= emit_big_add (code
, ARMREG_LR
, var
->inst_basereg
, var
->inst_offset
);
503 ARM_FLDS (code
, fad
->hreg
, ARMREG_LR
, 0);
505 ARM_FLDS (code
, fad
->hreg
, var
->inst_basereg
, var
->inst_offset
);
507 *offset
= code
- cfg
->native_code
;
514 mono_arm_emit_vfp_scratch_save (MonoCompile
*cfg
, guint8
*code
, int reg
)
518 g_assert (reg
== vfp_scratch1
|| reg
== vfp_scratch2
);
520 inst
= (MonoInst
*) cfg
->arch
.vfp_scratch_slots
[reg
== vfp_scratch1
? 0 : 1];
523 if (!arm_is_fpimm8 (inst
->inst_offset
)) {
524 code
= emit_big_add (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
525 ARM_FSTD (code
, reg
, ARMREG_LR
, 0);
527 ARM_FSTD (code
, reg
, inst
->inst_basereg
, inst
->inst_offset
);
534 mono_arm_emit_vfp_scratch_restore (MonoCompile
*cfg
, guint8
*code
, int reg
)
538 g_assert (reg
== vfp_scratch1
|| reg
== vfp_scratch2
);
540 inst
= (MonoInst
*) cfg
->arch
.vfp_scratch_slots
[reg
== vfp_scratch1
? 0 : 1];
543 if (!arm_is_fpimm8 (inst
->inst_offset
)) {
544 code
= emit_big_add (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
545 ARM_FLDD (code
, reg
, ARMREG_LR
, 0);
547 ARM_FLDD (code
, reg
, inst
->inst_basereg
, inst
->inst_offset
);
556 * Emit code to pop an LMF structure from the LMF stack.
559 emit_restore_lmf (MonoCompile
*cfg
, guint8
*code
, gint32 lmf_offset
)
563 if (lmf_offset
< 32) {
564 basereg
= cfg
->frame_reg
;
569 code
= emit_big_add (code
, ARMREG_R2
, cfg
->frame_reg
, lmf_offset
);
572 /* ip = previous_lmf */
573 ARM_LDR_IMM (code
, ARMREG_IP
, basereg
, offset
+ MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
575 ARM_LDR_IMM (code
, ARMREG_LR
, basereg
, offset
+ MONO_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
576 /* *(lmf_addr) = previous_lmf */
577 ARM_STR_IMM (code
, ARMREG_IP
, ARMREG_LR
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
582 #endif /* #ifndef DISABLE_JIT */
585 * mono_arm_have_tls_get:
587 * Returns whether we have tls access implemented on the current
591 mono_arm_have_tls_get (void)
601 * mono_arch_get_argument_info:
602 * @csig: a method signature
603 * @param_count: the number of parameters to consider
604 * @arg_info: an array to store the result infos
606 * Gathers information on parameters such as size, alignment and
607 * padding. arg_info should be large enought to hold param_count + 1 entries.
609 * Returns the size of the activation frame.
612 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
614 int k
, frame_size
= 0;
615 guint32 size
, align
, pad
;
619 t
= mini_get_underlying_type (csig
->ret
);
620 if (MONO_TYPE_ISSTRUCT (t
)) {
621 frame_size
+= sizeof (gpointer
);
625 arg_info
[0].offset
= offset
;
628 frame_size
+= sizeof (gpointer
);
632 arg_info
[0].size
= frame_size
;
634 for (k
= 0; k
< param_count
; k
++) {
635 size
= mini_type_stack_size_full (csig
->params
[k
], &align
, csig
->pinvoke
);
637 /* ignore alignment for now */
640 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
641 arg_info
[k
].pad
= pad
;
643 arg_info
[k
+ 1].pad
= 0;
644 arg_info
[k
+ 1].size
= size
;
646 arg_info
[k
+ 1].offset
= offset
;
650 align
= MONO_ARCH_FRAME_ALIGNMENT
;
651 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
652 arg_info
[k
].pad
= pad
;
657 #define MAX_ARCH_DELEGATE_PARAMS 3
660 get_delegate_invoke_impl (MonoTrampInfo
**info
, gboolean has_target
, gboolean param_count
)
662 guint8
*code
, *start
;
663 GSList
*unwind_ops
= mono_arch_get_cie_program ();
666 start
= code
= mono_global_codeman_reserve (12);
668 /* Replace the this argument with the target */
669 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
670 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, target
));
671 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
673 g_assert ((code
- start
) <= 12);
675 mono_arch_flush_icache (start
, 12);
679 size
= 8 + param_count
* 4;
680 start
= code
= mono_global_codeman_reserve (size
);
682 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
683 /* slide down the arguments */
684 for (i
= 0; i
< param_count
; ++i
) {
685 ARM_MOV_REG_REG (code
, (ARMREG_R0
+ i
), (ARMREG_R0
+ i
+ 1));
687 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
689 g_assert ((code
- start
) <= size
);
691 mono_arch_flush_icache (start
, size
);
695 *info
= mono_tramp_info_create ("delegate_invoke_impl_has_target", start
, code
- start
, NULL
, unwind_ops
);
697 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", param_count
);
698 *info
= mono_tramp_info_create (name
, start
, code
- start
, NULL
, unwind_ops
);
702 mono_profiler_code_buffer_new (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
);
708 * mono_arch_get_delegate_invoke_impls:
710 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
714 mono_arch_get_delegate_invoke_impls (void)
720 get_delegate_invoke_impl (&info
, TRUE
, 0);
721 res
= g_slist_prepend (res
, info
);
723 for (i
= 0; i
<= MAX_ARCH_DELEGATE_PARAMS
; ++i
) {
724 get_delegate_invoke_impl (&info
, FALSE
, i
);
725 res
= g_slist_prepend (res
, info
);
732 mono_arch_get_delegate_invoke_impl (MonoMethodSignature
*sig
, gboolean has_target
)
734 guint8
*code
, *start
;
737 /* FIXME: Support more cases */
738 sig_ret
= mini_get_underlying_type (sig
->ret
);
739 if (MONO_TYPE_ISSTRUCT (sig_ret
))
743 static guint8
* cached
= NULL
;
744 mono_mini_arch_lock ();
746 mono_mini_arch_unlock ();
751 start
= mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
754 start
= get_delegate_invoke_impl (&info
, TRUE
, 0);
755 mono_tramp_info_register (info
, NULL
);
758 mono_mini_arch_unlock ();
761 static guint8
* cache
[MAX_ARCH_DELEGATE_PARAMS
+ 1] = {NULL
};
764 if (sig
->param_count
> MAX_ARCH_DELEGATE_PARAMS
)
766 for (i
= 0; i
< sig
->param_count
; ++i
)
767 if (!mono_is_regsize_var (sig
->params
[i
]))
770 mono_mini_arch_lock ();
771 code
= cache
[sig
->param_count
];
773 mono_mini_arch_unlock ();
778 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", sig
->param_count
);
779 start
= mono_aot_get_trampoline (name
);
783 start
= get_delegate_invoke_impl (&info
, FALSE
, sig
->param_count
);
784 mono_tramp_info_register (info
, NULL
);
786 cache
[sig
->param_count
] = start
;
787 mono_mini_arch_unlock ();
795 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature
*sig
, MonoMethod
*method
, int offset
, gboolean load_imt_reg
)
801 mono_arch_get_this_arg_from_call (mgreg_t
*regs
, guint8
*code
)
803 return (gpointer
)regs
[ARMREG_R0
];
807 * Initialize the cpu to execute managed code.
810 mono_arch_cpu_init (void)
812 i8_align
= MONO_ABI_ALIGNOF (gint64
);
813 #ifdef MONO_CROSS_COMPILE
814 /* Need to set the alignment of i8 since it can different on the target */
815 #ifdef TARGET_ANDROID
817 mono_type_set_alignment (MONO_TYPE_I8
, i8_align
);
823 * Initialize architecture specific code.
826 mono_arch_init (void)
828 const char *cpu_arch
;
830 mono_os_mutex_init_recursive (&mini_arch_mutex
);
831 if (mini_get_debug_options ()->soft_breakpoints
) {
832 breakpoint_tramp
= mini_get_breakpoint_trampoline ();
834 ss_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
|MONO_MMAP_32BIT
);
835 bp_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
|MONO_MMAP_32BIT
);
836 mono_mprotect (bp_trigger_page
, mono_pagesize (), 0);
839 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception
);
840 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token
);
841 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind
);
842 #if defined(MONO_ARCH_GSHAREDVT_SUPPORTED)
843 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call
);
845 mono_aot_register_jit_icall ("mono_arm_unaligned_stack", mono_arm_unaligned_stack
);
847 #if defined(__ARM_EABI__)
848 eabi_supported
= TRUE
;
851 #if defined(ARM_FPU_VFP_HARD)
852 arm_fpu
= MONO_ARM_FPU_VFP_HARD
;
854 arm_fpu
= MONO_ARM_FPU_VFP
;
856 #if defined(ARM_FPU_NONE) && !defined(__APPLE__)
858 * If we're compiling with a soft float fallback and it
859 * turns out that no VFP unit is available, we need to
860 * switch to soft float. We don't do this for iOS, since
861 * iOS devices always have a VFP unit.
863 if (!mono_hwcap_arm_has_vfp
)
864 arm_fpu
= MONO_ARM_FPU_NONE
;
867 * This environment variable can be useful in testing
868 * environments to make sure the soft float fallback
869 * works. Most ARM devices have VFP units these days, so
870 * normally soft float code would not be exercised much.
872 const char *soft
= g_getenv ("MONO_ARM_FORCE_SOFT_FLOAT");
874 if (soft
&& !strncmp (soft
, "1", 1))
875 arm_fpu
= MONO_ARM_FPU_NONE
;
879 v5_supported
= mono_hwcap_arm_is_v5
;
880 v6_supported
= mono_hwcap_arm_is_v6
;
881 v7_supported
= mono_hwcap_arm_is_v7
;
884 * On weird devices, the hwcap code may fail to detect
885 * the ARM version. In that case, we can at least safely
886 * assume the version the runtime was compiled for.
898 #if defined(__APPLE__)
899 /* iOS is special-cased here because we don't yet
900 have a way to properly detect CPU features on it. */
901 thumb_supported
= TRUE
;
904 thumb_supported
= mono_hwcap_arm_has_thumb
;
905 thumb2_supported
= mono_hwcap_arm_has_thumb2
;
908 /* Format: armv(5|6|7[s])[-thumb[2]] */
909 cpu_arch
= g_getenv ("MONO_CPU_ARCH");
911 /* Do this here so it overrides any detection. */
913 if (strncmp (cpu_arch
, "armv", 4) == 0) {
914 v5_supported
= cpu_arch
[4] >= '5';
915 v6_supported
= cpu_arch
[4] >= '6';
916 v7_supported
= cpu_arch
[4] >= '7';
917 v7s_supported
= strncmp (cpu_arch
, "armv7s", 6) == 0;
918 v7k_supported
= strncmp (cpu_arch
, "armv7k", 6) == 0;
921 thumb_supported
= strstr (cpu_arch
, "thumb") != NULL
;
922 thumb2_supported
= strstr (cpu_arch
, "thumb2") != NULL
;
927 * Cleanup architecture specific code.
930 mono_arch_cleanup (void)
935 * This function returns the optimizations supported on this cpu.
938 mono_arch_cpu_optimizations (guint32
*exclude_mask
)
940 /* no arm-specific optimizations yet */
946 * This function test for all SIMD functions supported.
948 * Returns a bitmask corresponding to all supported versions.
952 mono_arch_cpu_enumerate_simd_versions (void)
954 /* SIMD is currently unimplemented */
959 mono_arm_is_hard_float (void)
961 return arm_fpu
== MONO_ARM_FPU_VFP_HARD
;
967 mono_arch_opcode_needs_emulation (MonoCompile
*cfg
, int opcode
)
969 if (v7s_supported
|| v7k_supported
) {
983 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
985 mono_arch_is_soft_float (void)
987 return arm_fpu
== MONO_ARM_FPU_NONE
;
992 is_regsize_var (MonoType
*t
)
996 t
= mini_get_underlying_type (t
);
1003 case MONO_TYPE_FNPTR
:
1005 case MONO_TYPE_OBJECT
:
1006 case MONO_TYPE_STRING
:
1007 case MONO_TYPE_CLASS
:
1008 case MONO_TYPE_SZARRAY
:
1009 case MONO_TYPE_ARRAY
:
1011 case MONO_TYPE_GENERICINST
:
1012 if (!mono_type_generic_inst_is_valuetype (t
))
1015 case MONO_TYPE_VALUETYPE
:
1022 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
1027 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
1028 MonoInst
*ins
= cfg
->varinfo
[i
];
1029 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
1032 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
1035 if (ins
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
) || (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
1038 /* we can only allocate 32 bit values */
1039 if (is_regsize_var (ins
->inst_vtype
)) {
1040 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
1041 g_assert (i
== vmv
->idx
);
1042 vars
= mono_varlist_insert_sorted (cfg
, vars
, vmv
, FALSE
);
1050 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
1054 mono_arch_compute_omit_fp (cfg
);
1057 * FIXME: Interface calls might go through a static rgctx trampoline which
1058 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1061 if (cfg
->flags
& MONO_CFG_HAS_CALLS
)
1062 cfg
->uses_rgctx_reg
= TRUE
;
1064 if (cfg
->arch
.omit_fp
)
1065 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_FP
));
1066 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V1
));
1067 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V2
));
1068 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V3
));
1070 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1071 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V7
));
1073 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V4
));
1074 if (!(cfg
->compile_aot
|| cfg
->uses_rgctx_reg
|| COMPILE_LLVM (cfg
)))
1075 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1076 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V5
));
1077 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1078 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1084 * mono_arch_regalloc_cost:
1086 * Return the cost, in number of memory references, of the action of
1087 * allocating the variable VMV into a register during global register
1091 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
1097 #endif /* #ifndef DISABLE_JIT */
1100 mono_arch_flush_icache (guint8
*code
, gint size
)
1102 #if defined(MONO_CROSS_COMPILE)
1104 sys_icache_invalidate (code
, size
);
1106 __builtin___clear_cache (code
, code
+ size
);
1113 add_general (guint
*gr
, guint
*stack_size
, ArgInfo
*ainfo
, gboolean simple
)
1116 if (*gr
> ARMREG_R3
) {
1118 ainfo
->offset
= *stack_size
;
1119 ainfo
->reg
= ARMREG_SP
; /* in the caller */
1120 ainfo
->storage
= RegTypeBase
;
1123 ainfo
->storage
= RegTypeGeneral
;
1130 split
= i8_align
== 4;
1135 if (*gr
== ARMREG_R3
&& split
) {
1136 /* first word in r3 and the second on the stack */
1137 ainfo
->offset
= *stack_size
;
1138 ainfo
->reg
= ARMREG_SP
; /* in the caller */
1139 ainfo
->storage
= RegTypeBaseGen
;
1141 } else if (*gr
>= ARMREG_R3
) {
1142 if (eabi_supported
) {
1143 /* darwin aligns longs to 4 byte only */
1144 if (i8_align
== 8) {
1149 ainfo
->offset
= *stack_size
;
1150 ainfo
->reg
= ARMREG_SP
; /* in the caller */
1151 ainfo
->storage
= RegTypeBase
;
1154 if (eabi_supported
) {
1155 if (i8_align
== 8 && ((*gr
) & 1))
1158 ainfo
->storage
= RegTypeIRegPair
;
1167 add_float (guint
*fpr
, guint
*stack_size
, ArgInfo
*ainfo
, gboolean is_double
, gint
*float_spare
)
1170 * If we're calling a function like this:
1172 * void foo(float a, double b, float c)
1174 * We pass a in s0 and b in d1. That leaves us
1175 * with s1 being unused. The armhf ABI recognizes
1176 * this and requires register assignment to then
1177 * use that for the next single-precision arg,
1178 * i.e. c in this example. So float_spare either
1179 * tells us which reg to use for the next single-
1180 * precision arg, or it's -1, meaning use *fpr.
1182 * Note that even though most of the JIT speaks
1183 * double-precision, fpr represents single-
1184 * precision registers.
1186 * See parts 5.5 and 6.1.2 of the AAPCS for how
1190 if (*fpr
< ARM_VFP_F16
|| (!is_double
&& *float_spare
>= 0)) {
1191 ainfo
->storage
= RegTypeFP
;
1195 * If we're passing a double-precision value
1196 * and *fpr is odd (e.g. it's s1, s3, ...)
1197 * we need to use the next even register. So
1198 * we mark the current *fpr as a spare that
1199 * can be used for the next single-precision
1203 *float_spare
= *fpr
;
1208 * At this point, we have an even register
1209 * so we assign that and move along.
1213 } else if (*float_spare
>= 0) {
1215 * We're passing a single-precision value
1216 * and it looks like a spare single-
1217 * precision register is available. Let's
1221 ainfo
->reg
= *float_spare
;
1225 * If we hit this branch, we're passing a
1226 * single-precision value and we can simply
1227 * use the next available register.
1235 * We've exhausted available floating point
1236 * regs, so pass the rest on the stack.
1244 ainfo
->offset
= *stack_size
;
1245 ainfo
->reg
= ARMREG_SP
;
1246 ainfo
->storage
= RegTypeBase
;
1253 is_hfa (MonoType
*t
, int *out_nfields
, int *out_esize
)
1257 MonoClassField
*field
;
1258 MonoType
*ftype
, *prev_ftype
= NULL
;
1261 klass
= mono_class_from_mono_type (t
);
1263 while ((field
= mono_class_get_fields (klass
, &iter
))) {
1264 if (field
->type
->attrs
& FIELD_ATTRIBUTE_STATIC
)
1266 ftype
= mono_field_get_type (field
);
1267 ftype
= mini_get_underlying_type (ftype
);
1269 if (MONO_TYPE_ISSTRUCT (ftype
)) {
1270 int nested_nfields
, nested_esize
;
1272 if (!is_hfa (ftype
, &nested_nfields
, &nested_esize
))
1274 if (nested_esize
== 4)
1275 ftype
= &mono_defaults
.single_class
->byval_arg
;
1277 ftype
= &mono_defaults
.double_class
->byval_arg
;
1278 if (prev_ftype
&& prev_ftype
->type
!= ftype
->type
)
1281 nfields
+= nested_nfields
;
1283 if (!(!ftype
->byref
&& (ftype
->type
== MONO_TYPE_R4
|| ftype
->type
== MONO_TYPE_R8
)))
1285 if (prev_ftype
&& prev_ftype
->type
!= ftype
->type
)
1291 if (nfields
== 0 || nfields
> 4)
1293 *out_nfields
= nfields
;
1294 *out_esize
= prev_ftype
->type
== MONO_TYPE_R4
? 4 : 8;
1299 get_call_info (MonoMemPool
*mp
, MonoMethodSignature
*sig
)
1301 guint i
, gr
, fpr
, pstart
;
1303 int n
= sig
->hasthis
+ sig
->param_count
;
1307 guint32 stack_size
= 0;
1309 gboolean is_pinvoke
= sig
->pinvoke
;
1310 gboolean vtype_retaddr
= FALSE
;
1313 cinfo
= mono_mempool_alloc0 (mp
, sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
1315 cinfo
= g_malloc0 (sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
1322 t
= mini_get_underlying_type (sig
->ret
);
1333 case MONO_TYPE_FNPTR
:
1334 case MONO_TYPE_CLASS
:
1335 case MONO_TYPE_OBJECT
:
1336 case MONO_TYPE_SZARRAY
:
1337 case MONO_TYPE_ARRAY
:
1338 case MONO_TYPE_STRING
:
1339 cinfo
->ret
.storage
= RegTypeGeneral
;
1340 cinfo
->ret
.reg
= ARMREG_R0
;
1344 cinfo
->ret
.storage
= RegTypeIRegPair
;
1345 cinfo
->ret
.reg
= ARMREG_R0
;
1349 cinfo
->ret
.storage
= RegTypeFP
;
1351 if (t
->type
== MONO_TYPE_R4
)
1352 cinfo
->ret
.size
= 4;
1354 cinfo
->ret
.size
= 8;
1356 if (IS_HARD_FLOAT
) {
1357 cinfo
->ret
.reg
= ARM_VFP_F0
;
1359 cinfo
->ret
.reg
= ARMREG_R0
;
1362 case MONO_TYPE_GENERICINST
:
1363 if (!mono_type_generic_inst_is_valuetype (t
)) {
1364 cinfo
->ret
.storage
= RegTypeGeneral
;
1365 cinfo
->ret
.reg
= ARMREG_R0
;
1368 if (mini_is_gsharedvt_variable_type (t
)) {
1369 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1373 case MONO_TYPE_VALUETYPE
:
1374 case MONO_TYPE_TYPEDBYREF
:
1375 if (IS_HARD_FLOAT
&& sig
->pinvoke
&& is_hfa (t
, &nfields
, &esize
)) {
1376 cinfo
->ret
.storage
= RegTypeHFA
;
1378 cinfo
->ret
.nregs
= nfields
;
1379 cinfo
->ret
.esize
= esize
;
1382 int native_size
= mono_class_native_size (mono_class_from_mono_type (t
), &align
);
1385 #ifdef TARGET_WATCHOS
1390 if (native_size
<= max_size
) {
1391 cinfo
->ret
.storage
= RegTypeStructByVal
;
1392 cinfo
->ret
.struct_size
= native_size
;
1393 cinfo
->ret
.nregs
= ALIGN_TO (native_size
, 4) / 4;
1395 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1398 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1403 case MONO_TYPE_MVAR
:
1404 g_assert (mini_is_gsharedvt_type (t
));
1405 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1407 case MONO_TYPE_VOID
:
1410 g_error ("Can't handle as return value 0x%x", sig
->ret
->type
);
1413 vtype_retaddr
= cinfo
->ret
.storage
== RegTypeStructByAddr
;
1418 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1419 * the first argument, allowing 'this' to be always passed in the first arg reg.
1420 * Also do this if the first argument is a reference type, since virtual calls
1421 * are sometimes made using calli without sig->hasthis set, like in the delegate
1424 if (vtype_retaddr
&& !is_pinvoke
&& (sig
->hasthis
|| (sig
->param_count
> 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig
->params
[0]))))) {
1426 add_general (&gr
, &stack_size
, cinfo
->args
+ 0, TRUE
);
1428 add_general (&gr
, &stack_size
, &cinfo
->args
[sig
->hasthis
+ 0], TRUE
);
1432 cinfo
->ret
.reg
= gr
;
1434 cinfo
->vret_arg_index
= 1;
1438 add_general (&gr
, &stack_size
, cinfo
->args
+ 0, TRUE
);
1441 if (vtype_retaddr
) {
1442 cinfo
->ret
.reg
= gr
;
1447 DEBUG(g_print("params: %d\n", sig
->param_count
));
1448 for (i
= pstart
; i
< sig
->param_count
; ++i
) {
1449 ArgInfo
*ainfo
= &cinfo
->args
[n
];
1451 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1452 /* Prevent implicit arguments and sig_cookie from
1453 being passed in registers */
1456 /* Emit the signature cookie just before the implicit arguments */
1457 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
1459 DEBUG(g_print("param %d: ", i
));
1460 if (sig
->params
[i
]->byref
) {
1461 DEBUG(g_print("byref\n"));
1462 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1466 t
= mini_get_underlying_type (sig
->params
[i
]);
1470 cinfo
->args
[n
].size
= 1;
1471 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1475 cinfo
->args
[n
].size
= 2;
1476 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1480 cinfo
->args
[n
].size
= 4;
1481 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1486 case MONO_TYPE_FNPTR
:
1487 case MONO_TYPE_CLASS
:
1488 case MONO_TYPE_OBJECT
:
1489 case MONO_TYPE_STRING
:
1490 case MONO_TYPE_SZARRAY
:
1491 case MONO_TYPE_ARRAY
:
1492 cinfo
->args
[n
].size
= sizeof (gpointer
);
1493 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1495 case MONO_TYPE_GENERICINST
:
1496 if (!mono_type_generic_inst_is_valuetype (t
)) {
1497 cinfo
->args
[n
].size
= sizeof (gpointer
);
1498 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1501 if (mini_is_gsharedvt_variable_type (t
)) {
1502 /* gsharedvt arguments are passed by ref */
1503 g_assert (mini_is_gsharedvt_type (t
));
1504 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1505 switch (ainfo
->storage
) {
1506 case RegTypeGeneral
:
1507 ainfo
->storage
= RegTypeGSharedVtInReg
;
1510 ainfo
->storage
= RegTypeGSharedVtOnStack
;
1513 g_assert_not_reached ();
1518 case MONO_TYPE_TYPEDBYREF
:
1519 case MONO_TYPE_VALUETYPE
: {
1522 int nwords
, nfields
, esize
;
1525 if (IS_HARD_FLOAT
&& sig
->pinvoke
&& is_hfa (t
, &nfields
, &esize
)) {
1526 if (fpr
+ nfields
< ARM_VFP_F16
) {
1527 ainfo
->storage
= RegTypeHFA
;
1529 ainfo
->nregs
= nfields
;
1530 ainfo
->esize
= esize
;
1541 if (t
->type
== MONO_TYPE_TYPEDBYREF
) {
1542 size
= sizeof (MonoTypedRef
);
1543 align
= sizeof (gpointer
);
1545 MonoClass
*klass
= mono_class_from_mono_type (sig
->params
[i
]);
1547 size
= mono_class_native_size (klass
, &align
);
1549 size
= mini_type_stack_size_full (t
, &align
, FALSE
);
1551 DEBUG(g_print ("load %d bytes struct\n", size
));
1553 #ifdef TARGET_WATCHOS
1554 /* Watchos pass large structures by ref */
1555 /* We only do this for pinvoke to make gsharedvt/dyncall simpler */
1556 if (sig
->pinvoke
&& size
> 16) {
1557 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1558 switch (ainfo
->storage
) {
1559 case RegTypeGeneral
:
1560 ainfo
->storage
= RegTypeStructByAddr
;
1563 ainfo
->storage
= RegTypeStructByAddrOnStack
;
1566 g_assert_not_reached ();
1575 align_size
+= (sizeof (gpointer
) - 1);
1576 align_size
&= ~(sizeof (gpointer
) - 1);
1577 nwords
= (align_size
+ sizeof (gpointer
) -1 ) / sizeof (gpointer
);
1578 ainfo
->storage
= RegTypeStructByVal
;
1579 ainfo
->struct_size
= size
;
1580 /* FIXME: align stack_size if needed */
1581 if (eabi_supported
) {
1582 if (align
>= 8 && (gr
& 1))
1585 if (gr
> ARMREG_R3
) {
1587 ainfo
->vtsize
= nwords
;
1589 int rest
= ARMREG_R3
- gr
+ 1;
1590 int n_in_regs
= rest
>= nwords
? nwords
: rest
;
1592 ainfo
->size
= n_in_regs
;
1593 ainfo
->vtsize
= nwords
- n_in_regs
;
1596 nwords
-= n_in_regs
;
1598 if (sig
->call_convention
== MONO_CALL_VARARG
)
1599 /* This matches the alignment in mono_ArgIterator_IntGetNextArg () */
1600 stack_size
= ALIGN_TO (stack_size
, align
);
1601 ainfo
->offset
= stack_size
;
1602 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1603 stack_size
+= nwords
* sizeof (gpointer
);
1609 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
1615 add_float (&fpr
, &stack_size
, ainfo
, FALSE
, &float_spare
);
1617 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1623 add_float (&fpr
, &stack_size
, ainfo
, TRUE
, &float_spare
);
1625 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
1628 case MONO_TYPE_MVAR
:
1629 /* gsharedvt arguments are passed by ref */
1630 g_assert (mini_is_gsharedvt_type (t
));
1631 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1632 switch (ainfo
->storage
) {
1633 case RegTypeGeneral
:
1634 ainfo
->storage
= RegTypeGSharedVtInReg
;
1637 ainfo
->storage
= RegTypeGSharedVtOnStack
;
1640 g_assert_not_reached ();
1644 g_error ("Can't handle 0x%x", sig
->params
[i
]->type
);
1649 /* Handle the case where there are no implicit arguments */
1650 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1651 /* Prevent implicit arguments and sig_cookie from
1652 being passed in registers */
1655 /* Emit the signature cookie just before the implicit arguments */
1656 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
1659 /* align stack size to 8 */
1660 DEBUG (g_print (" stack size: %d (%d)\n", (stack_size
+ 15) & ~15, stack_size
));
1661 stack_size
= (stack_size
+ 7) & ~7;
1663 cinfo
->stack_usage
= stack_size
;
1669 mono_arch_tail_call_supported (MonoCompile
*cfg
, MonoMethodSignature
*caller_sig
, MonoMethodSignature
*callee_sig
)
1671 MonoType
*callee_ret
;
1675 c1
= get_call_info (NULL
, caller_sig
);
1676 c2
= get_call_info (NULL
, callee_sig
);
1679 * Tail calls with more callee stack usage than the caller cannot be supported, since
1680 * the extra stack space would be left on the stack after the tail call.
1682 res
= c1
->stack_usage
>= c2
->stack_usage
;
1683 callee_ret
= mini_get_underlying_type (callee_sig
->ret
);
1684 if (callee_ret
&& MONO_TYPE_ISSTRUCT (callee_ret
) && c2
->ret
.storage
!= RegTypeStructByVal
)
1685 /* An address on the callee's stack is passed as the first argument */
1688 if (c2
->stack_usage
> 16 * 4)
1700 debug_omit_fp (void)
1703 return mono_debug_count ();
1710 * mono_arch_compute_omit_fp:
1712 * Determine whenever the frame pointer can be eliminated.
1715 mono_arch_compute_omit_fp (MonoCompile
*cfg
)
1717 MonoMethodSignature
*sig
;
1718 MonoMethodHeader
*header
;
1722 if (cfg
->arch
.omit_fp_computed
)
1725 header
= cfg
->header
;
1727 sig
= mono_method_signature (cfg
->method
);
1729 if (!cfg
->arch
.cinfo
)
1730 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
1731 cinfo
= cfg
->arch
.cinfo
;
1734 * FIXME: Remove some of the restrictions.
1736 cfg
->arch
.omit_fp
= TRUE
;
1737 cfg
->arch
.omit_fp_computed
= TRUE
;
1739 if (cfg
->disable_omit_fp
)
1740 cfg
->arch
.omit_fp
= FALSE
;
1741 if (!debug_omit_fp ())
1742 cfg
->arch
.omit_fp
= FALSE
;
1744 if (cfg->method->save_lmf)
1745 cfg->arch.omit_fp = FALSE;
1747 if (cfg
->flags
& MONO_CFG_HAS_ALLOCA
)
1748 cfg
->arch
.omit_fp
= FALSE
;
1749 if (header
->num_clauses
)
1750 cfg
->arch
.omit_fp
= FALSE
;
1751 if (cfg
->param_area
)
1752 cfg
->arch
.omit_fp
= FALSE
;
1753 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
))
1754 cfg
->arch
.omit_fp
= FALSE
;
1755 if ((mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
)) ||
1756 (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
))
1757 cfg
->arch
.omit_fp
= FALSE
;
1758 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
1759 ArgInfo
*ainfo
= &cinfo
->args
[i
];
1761 if (ainfo
->storage
== RegTypeBase
|| ainfo
->storage
== RegTypeBaseGen
|| ainfo
->storage
== RegTypeStructByVal
) {
1763 * The stack offset can only be determined when the frame
1766 cfg
->arch
.omit_fp
= FALSE
;
1771 for (i
= cfg
->locals_start
; i
< cfg
->num_varinfo
; i
++) {
1772 MonoInst
*ins
= cfg
->varinfo
[i
];
1775 locals_size
+= mono_type_size (ins
->inst_vtype
, &ialign
);
1780 * Set var information according to the calling convention. arm version.
1781 * The locals var stuff should most likely be split in another method.
1784 mono_arch_allocate_vars (MonoCompile
*cfg
)
1786 MonoMethodSignature
*sig
;
1787 MonoMethodHeader
*header
;
1790 int i
, offset
, size
, align
, curinst
;
1795 sig
= mono_method_signature (cfg
->method
);
1797 if (!cfg
->arch
.cinfo
)
1798 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
1799 cinfo
= cfg
->arch
.cinfo
;
1800 sig_ret
= mini_get_underlying_type (sig
->ret
);
1802 mono_arch_compute_omit_fp (cfg
);
1804 if (cfg
->arch
.omit_fp
)
1805 cfg
->frame_reg
= ARMREG_SP
;
1807 cfg
->frame_reg
= ARMREG_FP
;
1809 cfg
->flags
|= MONO_CFG_HAS_SPILLUP
;
1811 /* allow room for the vararg method args: void* and long/double */
1812 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
))
1813 cfg
->param_area
= MAX (cfg
->param_area
, sizeof (gpointer
)*8);
1815 header
= cfg
->header
;
1817 /* See mono_arch_get_global_int_regs () */
1818 if (cfg
->flags
& MONO_CFG_HAS_CALLS
)
1819 cfg
->uses_rgctx_reg
= TRUE
;
1821 if (cfg
->frame_reg
!= ARMREG_SP
)
1822 cfg
->used_int_regs
|= 1 << cfg
->frame_reg
;
1824 if (cfg
->compile_aot
|| cfg
->uses_rgctx_reg
|| COMPILE_LLVM (cfg
))
1825 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1826 cfg
->used_int_regs
|= (1 << MONO_ARCH_IMT_REG
);
1830 if (!MONO_TYPE_ISSTRUCT (sig_ret
) && cinfo
->ret
.storage
!= RegTypeStructByAddr
) {
1831 if (sig_ret
->type
!= MONO_TYPE_VOID
) {
1832 cfg
->ret
->opcode
= OP_REGVAR
;
1833 cfg
->ret
->inst_c0
= ARMREG_R0
;
1836 /* local vars are at a positive offset from the stack pointer */
1838 * also note that if the function uses alloca, we use FP
1839 * to point at the local variables.
1841 offset
= 0; /* linkage area */
1842 /* align the offset to 16 bytes: not sure this is needed here */
1844 //offset &= ~(8 - 1);
1846 /* add parameter area size for called functions */
1847 offset
+= cfg
->param_area
;
1850 if (cfg
->flags
& MONO_CFG_HAS_FPOUT
)
1853 /* allow room to save the return value */
1854 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
))
1857 switch (cinfo
->ret
.storage
) {
1858 case RegTypeStructByVal
:
1860 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
1861 offset
= ALIGN_TO (offset
, 8);
1862 cfg
->ret
->opcode
= OP_REGOFFSET
;
1863 cfg
->ret
->inst_basereg
= cfg
->frame_reg
;
1864 cfg
->ret
->inst_offset
= offset
;
1865 if (cinfo
->ret
.storage
== RegTypeStructByVal
)
1866 offset
+= cinfo
->ret
.nregs
* sizeof (gpointer
);
1870 case RegTypeStructByAddr
:
1871 ins
= cfg
->vret_addr
;
1872 offset
+= sizeof(gpointer
) - 1;
1873 offset
&= ~(sizeof(gpointer
) - 1);
1874 ins
->inst_offset
= offset
;
1875 ins
->opcode
= OP_REGOFFSET
;
1876 ins
->inst_basereg
= cfg
->frame_reg
;
1877 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
1878 g_print ("vret_addr =");
1879 mono_print_ins (cfg
->vret_addr
);
1881 offset
+= sizeof(gpointer
);
1887 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1888 if (cfg
->arch
.seq_point_info_var
) {
1891 ins
= cfg
->arch
.seq_point_info_var
;
1895 offset
+= align
- 1;
1896 offset
&= ~(align
- 1);
1897 ins
->opcode
= OP_REGOFFSET
;
1898 ins
->inst_basereg
= cfg
->frame_reg
;
1899 ins
->inst_offset
= offset
;
1902 ins
= cfg
->arch
.ss_trigger_page_var
;
1905 offset
+= align
- 1;
1906 offset
&= ~(align
- 1);
1907 ins
->opcode
= OP_REGOFFSET
;
1908 ins
->inst_basereg
= cfg
->frame_reg
;
1909 ins
->inst_offset
= offset
;
1913 if (cfg
->arch
.seq_point_ss_method_var
) {
1916 ins
= cfg
->arch
.seq_point_ss_method_var
;
1919 offset
+= align
- 1;
1920 offset
&= ~(align
- 1);
1921 ins
->opcode
= OP_REGOFFSET
;
1922 ins
->inst_basereg
= cfg
->frame_reg
;
1923 ins
->inst_offset
= offset
;
1926 ins
= cfg
->arch
.seq_point_bp_method_var
;
1929 offset
+= align
- 1;
1930 offset
&= ~(align
- 1);
1931 ins
->opcode
= OP_REGOFFSET
;
1932 ins
->inst_basereg
= cfg
->frame_reg
;
1933 ins
->inst_offset
= offset
;
1937 if (cfg
->has_atomic_exchange_i4
|| cfg
->has_atomic_cas_i4
|| cfg
->has_atomic_add_i4
) {
1938 /* Allocate a temporary used by the atomic ops */
1942 /* Allocate a local slot to hold the sig cookie address */
1943 offset
+= align
- 1;
1944 offset
&= ~(align
- 1);
1945 cfg
->arch
.atomic_tmp_offset
= offset
;
1948 cfg
->arch
.atomic_tmp_offset
= -1;
1951 cfg
->locals_min_stack_offset
= offset
;
1953 curinst
= cfg
->locals_start
;
1954 for (i
= curinst
; i
< cfg
->num_varinfo
; ++i
) {
1957 ins
= cfg
->varinfo
[i
];
1958 if ((ins
->flags
& MONO_INST_IS_DEAD
) || ins
->opcode
== OP_REGVAR
|| ins
->opcode
== OP_REGOFFSET
)
1961 t
= ins
->inst_vtype
;
1962 if (cfg
->gsharedvt
&& mini_is_gsharedvt_variable_type (t
))
1965 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1966 * pinvoke wrappers when they call functions returning structure */
1967 if (ins
->backend
.is_pinvoke
&& MONO_TYPE_ISSTRUCT (t
) && t
->type
!= MONO_TYPE_TYPEDBYREF
) {
1968 size
= mono_class_native_size (mono_class_from_mono_type (t
), &ualign
);
1972 size
= mono_type_size (t
, &align
);
1974 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1975 * since it loads/stores misaligned words, which don't do the right thing.
1977 if (align
< 4 && size
>= 4)
1979 if (ALIGN_TO (offset
, align
) > ALIGN_TO (offset
, 4))
1980 mini_gc_set_slot_type_from_fp (cfg
, ALIGN_TO (offset
, 4), SLOT_NOREF
);
1981 offset
+= align
- 1;
1982 offset
&= ~(align
- 1);
1983 ins
->opcode
= OP_REGOFFSET
;
1984 ins
->inst_offset
= offset
;
1985 ins
->inst_basereg
= cfg
->frame_reg
;
1987 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1990 cfg
->locals_max_stack_offset
= offset
;
1994 ins
= cfg
->args
[curinst
];
1995 if (ins
->opcode
!= OP_REGVAR
) {
1996 ins
->opcode
= OP_REGOFFSET
;
1997 ins
->inst_basereg
= cfg
->frame_reg
;
1998 offset
+= sizeof (gpointer
) - 1;
1999 offset
&= ~(sizeof (gpointer
) - 1);
2000 ins
->inst_offset
= offset
;
2001 offset
+= sizeof (gpointer
);
2006 if (sig
->call_convention
== MONO_CALL_VARARG
) {
2010 /* Allocate a local slot to hold the sig cookie address */
2011 offset
+= align
- 1;
2012 offset
&= ~(align
- 1);
2013 cfg
->sig_cookie
= offset
;
2017 for (i
= 0; i
< sig
->param_count
; ++i
) {
2018 ainfo
= cinfo
->args
+ i
;
2020 ins
= cfg
->args
[curinst
];
2022 switch (ainfo
->storage
) {
2024 offset
= ALIGN_TO (offset
, 8);
2025 ins
->opcode
= OP_REGOFFSET
;
2026 ins
->inst_basereg
= cfg
->frame_reg
;
2027 /* These arguments are saved to the stack in the prolog */
2028 ins
->inst_offset
= offset
;
2029 if (cfg
->verbose_level
>= 2)
2030 g_print ("arg %d allocated to %s+0x%0x.\n", i
, mono_arch_regname (ins
->inst_basereg
), (int)ins
->inst_offset
);
2038 if (ins
->opcode
!= OP_REGVAR
) {
2039 ins
->opcode
= OP_REGOFFSET
;
2040 ins
->inst_basereg
= cfg
->frame_reg
;
2041 size
= mini_type_stack_size_full (sig
->params
[i
], &ualign
, sig
->pinvoke
);
2043 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2044 * since it loads/stores misaligned words, which don't do the right thing.
2046 if (align
< 4 && size
>= 4)
2048 /* The code in the prolog () stores words when storing vtypes received in a register */
2049 if (MONO_TYPE_ISSTRUCT (sig
->params
[i
]))
2051 if (ALIGN_TO (offset
, align
) > ALIGN_TO (offset
, 4))
2052 mini_gc_set_slot_type_from_fp (cfg
, ALIGN_TO (offset
, 4), SLOT_NOREF
);
2053 offset
+= align
- 1;
2054 offset
&= ~(align
- 1);
2055 ins
->inst_offset
= offset
;
2061 /* align the offset to 8 bytes */
2062 if (ALIGN_TO (offset
, 8) > ALIGN_TO (offset
, 4))
2063 mini_gc_set_slot_type_from_fp (cfg
, ALIGN_TO (offset
, 4), SLOT_NOREF
);
2068 cfg
->stack_offset
= offset
;
2072 mono_arch_create_vars (MonoCompile
*cfg
)
2074 MonoMethodSignature
*sig
;
2078 sig
= mono_method_signature (cfg
->method
);
2080 if (!cfg
->arch
.cinfo
)
2081 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
2082 cinfo
= cfg
->arch
.cinfo
;
2084 if (IS_HARD_FLOAT
) {
2085 for (i
= 0; i
< 2; i
++) {
2086 MonoInst
*inst
= mono_compile_create_var (cfg
, &mono_defaults
.double_class
->byval_arg
, OP_LOCAL
);
2087 inst
->flags
|= MONO_INST_VOLATILE
;
2089 cfg
->arch
.vfp_scratch_slots
[i
] = (gpointer
) inst
;
2093 if (cinfo
->ret
.storage
== RegTypeStructByVal
)
2094 cfg
->ret_var_is_local
= TRUE
;
2096 if (cinfo
->ret
.storage
== RegTypeStructByAddr
) {
2097 cfg
->vret_addr
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_ARG
);
2098 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
2099 g_print ("vret_addr = ");
2100 mono_print_ins (cfg
->vret_addr
);
2104 if (cfg
->gen_sdb_seq_points
) {
2105 if (cfg
->soft_breakpoints
) {
2108 ins
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
2109 ins
->flags
|= MONO_INST_VOLATILE
;
2110 cfg
->arch
.seq_point_ss_method_var
= ins
;
2112 ins
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
2113 ins
->flags
|= MONO_INST_VOLATILE
;
2114 cfg
->arch
.seq_point_bp_method_var
= ins
;
2116 g_assert (!cfg
->compile_aot
);
2117 } else if (cfg
->compile_aot
) {
2118 MonoInst
*ins
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
2119 ins
->flags
|= MONO_INST_VOLATILE
;
2120 cfg
->arch
.seq_point_info_var
= ins
;
2122 /* Allocate a separate variable for this to save 1 load per seq point */
2123 ins
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
2124 ins
->flags
|= MONO_INST_VOLATILE
;
2125 cfg
->arch
.ss_trigger_page_var
= ins
;
2131 emit_sig_cookie (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
)
2133 MonoMethodSignature
*tmp_sig
;
2136 if (call
->tail_call
)
2139 g_assert (cinfo
->sig_cookie
.storage
== RegTypeBase
);
2142 * mono_ArgIterator_Setup assumes the signature cookie is
2143 * passed first and all the arguments which were before it are
2144 * passed on the stack after the signature. So compensate by
2145 * passing a different signature.
2147 tmp_sig
= mono_metadata_signature_dup (call
->signature
);
2148 tmp_sig
->param_count
-= call
->signature
->sentinelpos
;
2149 tmp_sig
->sentinelpos
= 0;
2150 memcpy (tmp_sig
->params
, call
->signature
->params
+ call
->signature
->sentinelpos
, tmp_sig
->param_count
* sizeof (MonoType
*));
2152 sig_reg
= mono_alloc_ireg (cfg
);
2153 MONO_EMIT_NEW_SIGNATURECONST (cfg
, sig_reg
, tmp_sig
);
2155 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, cinfo
->sig_cookie
.offset
, sig_reg
);
2160 mono_arch_get_llvm_call_info (MonoCompile
*cfg
, MonoMethodSignature
*sig
)
2165 LLVMCallInfo
*linfo
;
2167 n
= sig
->param_count
+ sig
->hasthis
;
2169 cinfo
= get_call_info (cfg
->mempool
, sig
);
2171 linfo
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (LLVMCallInfo
) + (sizeof (LLVMArgInfo
) * n
));
2174 * LLVM always uses the native ABI while we use our own ABI, the
2175 * only difference is the handling of vtypes:
2176 * - we only pass/receive them in registers in some cases, and only
2177 * in 1 or 2 integer registers.
2179 switch (cinfo
->ret
.storage
) {
2180 case RegTypeGeneral
:
2183 case RegTypeIRegPair
:
2185 case RegTypeStructByAddr
:
2186 /* Vtype returned using a hidden argument */
2187 linfo
->ret
.storage
= LLVMArgVtypeRetAddr
;
2188 linfo
->vret_arg_index
= cinfo
->vret_arg_index
;
2191 case RegTypeStructByVal
:
2192 /* LLVM models this by returning an int array */
2193 linfo
->ret
.storage
= LLVMArgAsIArgs
;
2194 linfo
->ret
.nslots
= cinfo
->ret
.nregs
;
2198 cfg
->exception_message
= g_strdup_printf ("unknown ret conv (%d)", cinfo
->ret
.storage
);
2199 cfg
->disable_llvm
= TRUE
;
2203 for (i
= 0; i
< n
; ++i
) {
2204 LLVMArgInfo
*lainfo
= &linfo
->args
[i
];
2205 ainfo
= cinfo
->args
+ i
;
2207 lainfo
->storage
= LLVMArgNone
;
2209 switch (ainfo
->storage
) {
2210 case RegTypeGeneral
:
2211 case RegTypeIRegPair
:
2213 case RegTypeBaseGen
:
2215 lainfo
->storage
= LLVMArgNormal
;
2217 case RegTypeStructByVal
:
2218 lainfo
->storage
= LLVMArgAsIArgs
;
2219 lainfo
->nslots
= ainfo
->struct_size
/ sizeof (gpointer
);
2221 case RegTypeStructByAddr
:
2222 case RegTypeStructByAddrOnStack
:
2223 lainfo
->storage
= LLVMArgVtypeByRef
;
2226 cfg
->exception_message
= g_strdup_printf ("ainfo->storage (%d)", ainfo
->storage
);
2227 cfg
->disable_llvm
= TRUE
;
2237 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
2240 MonoMethodSignature
*sig
;
2244 sig
= call
->signature
;
2245 n
= sig
->param_count
+ sig
->hasthis
;
2247 cinfo
= get_call_info (cfg
->mempool
, sig
);
2249 switch (cinfo
->ret
.storage
) {
2250 case RegTypeStructByVal
:
2252 if (cinfo
->ret
.storage
== RegTypeStructByVal
&& cinfo
->ret
.nregs
== 1) {
2253 /* The JIT will transform this into a normal call */
2254 call
->vret_in_reg
= TRUE
;
2257 if (call
->inst
.opcode
== OP_TAILCALL
)
2260 * The vtype is returned in registers, save the return area address in a local, and save the vtype into
2261 * the location pointed to by it after call in emit_move_return_value ().
2263 if (!cfg
->arch
.vret_addr_loc
) {
2264 cfg
->arch
.vret_addr_loc
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
2265 /* Prevent it from being register allocated or optimized away */
2266 ((MonoInst
*)cfg
->arch
.vret_addr_loc
)->flags
|= MONO_INST_VOLATILE
;
2269 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, ((MonoInst
*)cfg
->arch
.vret_addr_loc
)->dreg
, call
->vret_var
->dreg
);
2271 case RegTypeStructByAddr
: {
2273 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
2274 vtarg
->sreg1
= call
->vret_var
->dreg
;
2275 vtarg
->dreg
= mono_alloc_preg (cfg
);
2276 MONO_ADD_INS (cfg
->cbb
, vtarg
);
2278 mono_call_inst_add_outarg_reg (cfg
, call
, vtarg
->dreg
, cinfo
->ret
.reg
, FALSE
);
2285 for (i
= 0; i
< n
; ++i
) {
2286 ArgInfo
*ainfo
= cinfo
->args
+ i
;
2289 if (i
>= sig
->hasthis
)
2290 t
= sig
->params
[i
- sig
->hasthis
];
2292 t
= &mono_defaults
.int_class
->byval_arg
;
2293 t
= mini_get_underlying_type (t
);
2295 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
2296 /* Emit the signature cookie just before the implicit arguments */
2297 emit_sig_cookie (cfg
, call
, cinfo
);
2300 in
= call
->args
[i
];
2302 switch (ainfo
->storage
) {
2303 case RegTypeGeneral
:
2304 case RegTypeIRegPair
:
2305 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
2306 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2307 ins
->dreg
= mono_alloc_ireg (cfg
);
2308 ins
->sreg1
= MONO_LVREG_LS (in
->dreg
);
2309 MONO_ADD_INS (cfg
->cbb
, ins
);
2310 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2312 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2313 ins
->dreg
= mono_alloc_ireg (cfg
);
2314 ins
->sreg1
= MONO_LVREG_MS (in
->dreg
);
2315 MONO_ADD_INS (cfg
->cbb
, ins
);
2316 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
2317 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R8
) || (t
->type
== MONO_TYPE_R4
))) {
2318 if (ainfo
->size
== 4) {
2319 if (IS_SOFT_FLOAT
) {
2320 /* mono_emit_call_args () have already done the r8->r4 conversion */
2321 /* The converted value is in an int vreg */
2322 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2323 ins
->dreg
= mono_alloc_ireg (cfg
);
2324 ins
->sreg1
= in
->dreg
;
2325 MONO_ADD_INS (cfg
->cbb
, ins
);
2326 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2330 cfg
->param_area
= MAX (cfg
->param_area
, 8);
2331 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
2332 creg
= mono_alloc_ireg (cfg
);
2333 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
2334 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
, FALSE
);
2337 if (IS_SOFT_FLOAT
) {
2338 MONO_INST_NEW (cfg
, ins
, OP_FGETLOW32
);
2339 ins
->dreg
= mono_alloc_ireg (cfg
);
2340 ins
->sreg1
= in
->dreg
;
2341 MONO_ADD_INS (cfg
->cbb
, ins
);
2342 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2344 MONO_INST_NEW (cfg
, ins
, OP_FGETHIGH32
);
2345 ins
->dreg
= mono_alloc_ireg (cfg
);
2346 ins
->sreg1
= in
->dreg
;
2347 MONO_ADD_INS (cfg
->cbb
, ins
);
2348 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
2352 cfg
->param_area
= MAX (cfg
->param_area
, 8);
2353 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
2354 creg
= mono_alloc_ireg (cfg
);
2355 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
2356 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
, FALSE
);
2357 creg
= mono_alloc_ireg (cfg
);
2358 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8 + 4));
2359 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
+ 1, FALSE
);
2362 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
2364 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2365 ins
->dreg
= mono_alloc_ireg (cfg
);
2366 ins
->sreg1
= in
->dreg
;
2367 MONO_ADD_INS (cfg
->cbb
, ins
);
2369 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2372 case RegTypeStructByVal
:
2373 case RegTypeGSharedVtInReg
:
2374 case RegTypeGSharedVtOnStack
:
2376 case RegTypeStructByAddr
:
2377 case RegTypeStructByAddrOnStack
:
2378 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
2379 ins
->opcode
= OP_OUTARG_VT
;
2380 ins
->sreg1
= in
->dreg
;
2381 ins
->klass
= in
->klass
;
2382 ins
->inst_p0
= call
;
2383 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
2384 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
2385 mono_call_inst_add_outarg_vt (cfg
, call
, ins
);
2386 MONO_ADD_INS (cfg
->cbb
, ins
);
2389 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
2390 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2391 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R4
) || (t
->type
== MONO_TYPE_R8
))) {
2392 if (t
->type
== MONO_TYPE_R8
) {
2393 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2396 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2398 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2401 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2404 case RegTypeBaseGen
:
2405 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
2406 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, (G_BYTE_ORDER
== G_BIG_ENDIAN
) ? MONO_LVREG_LS (in
->dreg
) : MONO_LVREG_MS (in
->dreg
));
2407 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2408 ins
->dreg
= mono_alloc_ireg (cfg
);
2409 ins
->sreg1
= G_BYTE_ORDER
== G_BIG_ENDIAN
? MONO_LVREG_MS (in
->dreg
) : MONO_LVREG_LS (in
->dreg
);
2410 MONO_ADD_INS (cfg
->cbb
, ins
);
2411 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ARMREG_R3
, FALSE
);
2412 } else if (!t
->byref
&& (t
->type
== MONO_TYPE_R8
)) {
2415 /* This should work for soft-float as well */
2417 cfg
->param_area
= MAX (cfg
->param_area
, 8);
2418 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
2419 creg
= mono_alloc_ireg (cfg
);
2420 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ARMREG_R3
, FALSE
);
2421 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
2422 creg
= mono_alloc_ireg (cfg
);
2423 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 4));
2424 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, creg
);
2425 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
2427 g_assert_not_reached ();
2431 int fdreg
= mono_alloc_freg (cfg
);
2433 if (ainfo
->size
== 8) {
2434 MONO_INST_NEW (cfg
, ins
, OP_FMOVE
);
2435 ins
->sreg1
= in
->dreg
;
2437 MONO_ADD_INS (cfg
->cbb
, ins
);
2439 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, TRUE
);
2444 * Mono's register allocator doesn't speak single-precision registers that
2445 * overlap double-precision registers (i.e. armhf). So we have to work around
2446 * the register allocator and load the value from memory manually.
2448 * So we create a variable for the float argument and an instruction to store
2449 * the argument into the variable. We then store the list of these arguments
2450 * in call->float_args. This list is then used by emit_float_args later to
2451 * pass the arguments in the various call opcodes.
2453 * This is not very nice, and we should really try to fix the allocator.
2456 MonoInst
*float_arg
= mono_compile_create_var (cfg
, &mono_defaults
.single_class
->byval_arg
, OP_LOCAL
);
2458 /* Make sure the instruction isn't seen as pointless and removed.
2460 float_arg
->flags
|= MONO_INST_VOLATILE
;
2462 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, float_arg
->dreg
, in
->dreg
);
2464 /* We use the dreg to look up the instruction later. The hreg is used to
2465 * emit the instruction that loads the value into the FP reg.
2467 fad
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (FloatArgData
));
2468 fad
->vreg
= float_arg
->dreg
;
2469 fad
->hreg
= ainfo
->reg
;
2471 call
->float_args
= g_slist_append_mempool (cfg
->mempool
, call
->float_args
, fad
);
2474 call
->used_iregs
|= 1 << ainfo
->reg
;
2475 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
2479 g_assert_not_reached ();
2483 /* Handle the case where there are no implicit arguments */
2484 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== sig
->sentinelpos
))
2485 emit_sig_cookie (cfg
, call
, cinfo
);
2487 call
->call_info
= cinfo
;
2488 call
->stack_usage
= cinfo
->stack_usage
;
2492 add_outarg_reg (MonoCompile
*cfg
, MonoCallInst
*call
, ArgStorage storage
, int reg
, MonoInst
*arg
)
2498 MONO_INST_NEW (cfg
, ins
, OP_FMOVE
);
2499 ins
->dreg
= mono_alloc_freg (cfg
);
2500 ins
->sreg1
= arg
->dreg
;
2501 MONO_ADD_INS (cfg
->cbb
, ins
);
2502 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, reg
, TRUE
);
2505 g_assert_not_reached ();
2511 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
2513 MonoCallInst
*call
= (MonoCallInst
*)ins
->inst_p0
;
2515 ArgInfo
*ainfo
= ins
->inst_p1
;
2516 int ovf_size
= ainfo
->vtsize
;
2517 int doffset
= ainfo
->offset
;
2518 int struct_size
= ainfo
->struct_size
;
2519 int i
, soffset
, dreg
, tmpreg
;
2521 switch (ainfo
->storage
) {
2522 case RegTypeGSharedVtInReg
:
2523 case RegTypeStructByAddr
:
2525 mono_call_inst_add_outarg_reg (cfg
, call
, src
->dreg
, ainfo
->reg
, FALSE
);
2527 case RegTypeGSharedVtOnStack
:
2528 case RegTypeStructByAddrOnStack
:
2529 /* Pass by addr on stack */
2530 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, src
->dreg
);
2533 for (i
= 0; i
< ainfo
->nregs
; ++i
) {
2534 if (ainfo
->esize
== 4)
2535 MONO_INST_NEW (cfg
, load
, OP_LOADR4_MEMBASE
);
2537 MONO_INST_NEW (cfg
, load
, OP_LOADR8_MEMBASE
);
2538 load
->dreg
= mono_alloc_freg (cfg
);
2539 load
->inst_basereg
= src
->dreg
;
2540 load
->inst_offset
= i
* ainfo
->esize
;
2541 MONO_ADD_INS (cfg
->cbb
, load
);
2543 if (ainfo
->esize
== 4) {
2546 /* See RegTypeFP in mono_arch_emit_call () */
2547 MonoInst
*float_arg
= mono_compile_create_var (cfg
, &mono_defaults
.single_class
->byval_arg
, OP_LOCAL
);
2548 float_arg
->flags
|= MONO_INST_VOLATILE
;
2549 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, float_arg
->dreg
, load
->dreg
);
2551 fad
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (FloatArgData
));
2552 fad
->vreg
= float_arg
->dreg
;
2553 fad
->hreg
= ainfo
->reg
+ i
;
2555 call
->float_args
= g_slist_append_mempool (cfg
->mempool
, call
->float_args
, fad
);
2557 add_outarg_reg (cfg
, call
, RegTypeFP
, ainfo
->reg
+ (i
* 2), load
);
2563 for (i
= 0; i
< ainfo
->size
; ++i
) {
2564 dreg
= mono_alloc_ireg (cfg
);
2565 switch (struct_size
) {
2567 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, dreg
, src
->dreg
, soffset
);
2570 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, dreg
, src
->dreg
, soffset
);
2573 tmpreg
= mono_alloc_ireg (cfg
);
2574 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, dreg
, src
->dreg
, soffset
);
2575 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, tmpreg
, src
->dreg
, soffset
+ 1);
2576 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, tmpreg
, tmpreg
, 8);
2577 MONO_EMIT_NEW_BIALU (cfg
, OP_IOR
, dreg
, dreg
, tmpreg
);
2578 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, tmpreg
, src
->dreg
, soffset
+ 2);
2579 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, tmpreg
, tmpreg
, 16);
2580 MONO_EMIT_NEW_BIALU (cfg
, OP_IOR
, dreg
, dreg
, tmpreg
);
2583 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, src
->dreg
, soffset
);
2586 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
+ i
, FALSE
);
2587 soffset
+= sizeof (gpointer
);
2588 struct_size
-= sizeof (gpointer
);
2590 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2592 mini_emit_memcpy (cfg
, ARMREG_SP
, doffset
, src
->dreg
, soffset
, MIN (ovf_size
* sizeof (gpointer
), struct_size
), struct_size
< 4 ? 1 : 4);
2598 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
2600 MonoType
*ret
= mini_get_underlying_type (mono_method_signature (method
)->ret
);
2603 if (ret
->type
== MONO_TYPE_I8
|| ret
->type
== MONO_TYPE_U8
) {
2606 if (COMPILE_LLVM (cfg
)) {
2607 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
2609 MONO_INST_NEW (cfg
, ins
, OP_SETLRET
);
2610 ins
->sreg1
= MONO_LVREG_LS (val
->dreg
);
2611 ins
->sreg2
= MONO_LVREG_MS (val
->dreg
);
2612 MONO_ADD_INS (cfg
->cbb
, ins
);
2617 case MONO_ARM_FPU_NONE
:
2618 if (ret
->type
== MONO_TYPE_R8
) {
2621 MONO_INST_NEW (cfg
, ins
, OP_SETFRET
);
2622 ins
->dreg
= cfg
->ret
->dreg
;
2623 ins
->sreg1
= val
->dreg
;
2624 MONO_ADD_INS (cfg
->cbb
, ins
);
2627 if (ret
->type
== MONO_TYPE_R4
) {
2628 /* Already converted to an int in method_to_ir () */
2629 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
2633 case MONO_ARM_FPU_VFP
:
2634 case MONO_ARM_FPU_VFP_HARD
:
2635 if (ret
->type
== MONO_TYPE_R8
|| ret
->type
== MONO_TYPE_R4
) {
2638 MONO_INST_NEW (cfg
, ins
, OP_SETFRET
);
2639 ins
->dreg
= cfg
->ret
->dreg
;
2640 ins
->sreg1
= val
->dreg
;
2641 MONO_ADD_INS (cfg
->cbb
, ins
);
2646 g_assert_not_reached ();
2650 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
2653 #endif /* #ifndef DISABLE_JIT */
2656 mono_arch_is_inst_imm (gint64 imm
)
2662 MonoMethodSignature
*sig
;
2665 MonoType
**param_types
;
2669 dyn_call_supported (CallInfo
*cinfo
, MonoMethodSignature
*sig
)
2673 if (sig
->hasthis
+ sig
->param_count
> PARAM_REGS
+ DYN_CALL_STACK_ARGS
)
2676 switch (cinfo
->ret
.storage
) {
2678 case RegTypeGeneral
:
2679 case RegTypeIRegPair
:
2680 case RegTypeStructByAddr
:
2691 for (i
= 0; i
< cinfo
->nargs
; ++i
) {
2692 ArgInfo
*ainfo
= &cinfo
->args
[i
];
2695 switch (ainfo
->storage
) {
2696 case RegTypeGeneral
:
2697 case RegTypeIRegPair
:
2698 case RegTypeBaseGen
:
2702 if (ainfo
->offset
>= (DYN_CALL_STACK_ARGS
* sizeof (gpointer
)))
2705 case RegTypeStructByVal
:
2706 if (ainfo
->size
== 0)
2707 last_slot
= PARAM_REGS
+ (ainfo
->offset
/ 4) + ainfo
->vtsize
;
2709 last_slot
= ainfo
->reg
+ ainfo
->size
+ ainfo
->vtsize
;
2710 if (last_slot
>= PARAM_REGS
+ DYN_CALL_STACK_ARGS
)
2718 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2719 for (i
= 0; i
< sig
->param_count
; ++i
) {
2720 MonoType
*t
= sig
->params
[i
];
2725 t
= mini_get_underlying_type (t
);
2748 mono_arch_dyn_call_prepare (MonoMethodSignature
*sig
)
2750 ArchDynCallInfo
*info
;
2754 cinfo
= get_call_info (NULL
, sig
);
2756 if (!dyn_call_supported (cinfo
, sig
)) {
2761 info
= g_new0 (ArchDynCallInfo
, 1);
2762 // FIXME: Preprocess the info to speed up start_dyn_call ()
2764 info
->cinfo
= cinfo
;
2765 info
->rtype
= mini_get_underlying_type (sig
->ret
);
2766 info
->param_types
= g_new0 (MonoType
*, sig
->param_count
);
2767 for (i
= 0; i
< sig
->param_count
; ++i
)
2768 info
->param_types
[i
] = mini_get_underlying_type (sig
->params
[i
]);
2770 return (MonoDynCallInfo
*)info
;
2774 mono_arch_dyn_call_free (MonoDynCallInfo
*info
)
2776 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
2778 g_free (ainfo
->cinfo
);
2783 mono_arch_start_dyn_call (MonoDynCallInfo
*info
, gpointer
**args
, guint8
*ret
, guint8
*buf
, int buf_len
)
2785 ArchDynCallInfo
*dinfo
= (ArchDynCallInfo
*)info
;
2786 DynCallArgs
*p
= (DynCallArgs
*)buf
;
2787 int arg_index
, greg
, i
, j
, pindex
;
2788 MonoMethodSignature
*sig
= dinfo
->sig
;
2790 g_assert (buf_len
>= sizeof (DynCallArgs
));
2800 if (sig
->hasthis
|| dinfo
->cinfo
->vret_arg_index
== 1) {
2801 p
->regs
[greg
++] = (mgreg_t
)*(args
[arg_index
++]);
2806 if (dinfo
->cinfo
->ret
.storage
== RegTypeStructByAddr
)
2807 p
->regs
[greg
++] = (mgreg_t
)ret
;
2809 for (i
= pindex
; i
< sig
->param_count
; i
++) {
2810 MonoType
*t
= dinfo
->param_types
[i
];
2811 gpointer
*arg
= args
[arg_index
++];
2812 ArgInfo
*ainfo
= &dinfo
->cinfo
->args
[i
+ sig
->hasthis
];
2815 if (ainfo
->storage
== RegTypeGeneral
|| ainfo
->storage
== RegTypeIRegPair
|| ainfo
->storage
== RegTypeStructByVal
) {
2817 } else if (ainfo
->storage
== RegTypeFP
) {
2818 } else if (ainfo
->storage
== RegTypeBase
) {
2819 slot
= PARAM_REGS
+ (ainfo
->offset
/ 4);
2820 } else if (ainfo
->storage
== RegTypeBaseGen
) {
2821 /* slot + 1 is the first stack slot, so the code below will work */
2824 g_assert_not_reached ();
2828 p
->regs
[slot
] = (mgreg_t
)*arg
;
2833 case MONO_TYPE_STRING
:
2834 case MONO_TYPE_CLASS
:
2835 case MONO_TYPE_ARRAY
:
2836 case MONO_TYPE_SZARRAY
:
2837 case MONO_TYPE_OBJECT
:
2841 p
->regs
[slot
] = (mgreg_t
)*arg
;
2844 p
->regs
[slot
] = *(guint8
*)arg
;
2847 p
->regs
[slot
] = *(gint8
*)arg
;
2850 p
->regs
[slot
] = *(gint16
*)arg
;
2853 p
->regs
[slot
] = *(guint16
*)arg
;
2856 p
->regs
[slot
] = *(gint32
*)arg
;
2859 p
->regs
[slot
] = *(guint32
*)arg
;
2863 p
->regs
[slot
++] = (mgreg_t
)arg
[0];
2864 p
->regs
[slot
] = (mgreg_t
)arg
[1];
2867 if (ainfo
->storage
== RegTypeFP
) {
2868 float f
= *(float*)arg
;
2869 p
->fpregs
[ainfo
->reg
/ 2] = *(double*)&f
;
2872 p
->regs
[slot
] = *(mgreg_t
*)arg
;
2876 if (ainfo
->storage
== RegTypeFP
) {
2877 p
->fpregs
[ainfo
->reg
/ 2] = *(double*)arg
;
2880 p
->regs
[slot
++] = (mgreg_t
)arg
[0];
2881 p
->regs
[slot
] = (mgreg_t
)arg
[1];
2884 case MONO_TYPE_GENERICINST
:
2885 if (MONO_TYPE_IS_REFERENCE (t
)) {
2886 p
->regs
[slot
] = (mgreg_t
)*arg
;
2889 if (t
->type
== MONO_TYPE_GENERICINST
&& mono_class_is_nullable (mono_class_from_mono_type (t
))) {
2890 MonoClass
*klass
= mono_class_from_mono_type (t
);
2891 guint8
*nullable_buf
;
2894 size
= mono_class_value_size (klass
, NULL
);
2895 nullable_buf
= g_alloca (size
);
2896 g_assert (nullable_buf
);
2898 /* The argument pointed to by arg is either a boxed vtype or null */
2899 mono_nullable_init (nullable_buf
, (MonoObject
*)arg
, klass
);
2901 arg
= (gpointer
*)nullable_buf
;
2907 case MONO_TYPE_VALUETYPE
:
2908 g_assert (ainfo
->storage
== RegTypeStructByVal
);
2910 if (ainfo
->size
== 0)
2911 slot
= PARAM_REGS
+ (ainfo
->offset
/ 4);
2915 for (j
= 0; j
< ainfo
->size
+ ainfo
->vtsize
; ++j
)
2916 p
->regs
[slot
++] = ((mgreg_t
*)arg
) [j
];
2919 g_assert_not_reached ();
2925 mono_arch_finish_dyn_call (MonoDynCallInfo
*info
, guint8
*buf
)
2927 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
2928 DynCallArgs
*p
= (DynCallArgs
*)buf
;
2929 MonoType
*ptype
= ainfo
->rtype
;
2930 guint8
*ret
= p
->ret
;
2931 mgreg_t res
= p
->res
;
2932 mgreg_t res2
= p
->res2
;
2934 switch (ptype
->type
) {
2935 case MONO_TYPE_VOID
:
2936 *(gpointer
*)ret
= NULL
;
2938 case MONO_TYPE_STRING
:
2939 case MONO_TYPE_CLASS
:
2940 case MONO_TYPE_ARRAY
:
2941 case MONO_TYPE_SZARRAY
:
2942 case MONO_TYPE_OBJECT
:
2946 *(gpointer
*)ret
= (gpointer
)res
;
2952 *(guint8
*)ret
= res
;
2955 *(gint16
*)ret
= res
;
2958 *(guint16
*)ret
= res
;
2961 *(gint32
*)ret
= res
;
2964 *(guint32
*)ret
= res
;
2968 /* This handles endianness as well */
2969 ((gint32
*)ret
) [0] = res
;
2970 ((gint32
*)ret
) [1] = res2
;
2972 case MONO_TYPE_GENERICINST
:
2973 if (MONO_TYPE_IS_REFERENCE (ptype
)) {
2974 *(gpointer
*)ret
= (gpointer
)res
;
2979 case MONO_TYPE_VALUETYPE
:
2980 g_assert (ainfo
->cinfo
->ret
.storage
== RegTypeStructByAddr
);
2986 *(float*)ret
= *(float*)&p
->fpregs
[0];
2988 *(float*)ret
= *(float*)&res
;
2990 case MONO_TYPE_R8
: {
2994 if (IS_HARD_FLOAT
) {
2995 *(double*)ret
= p
->fpregs
[0];
3000 *(double*)ret
= *(double*)®s
;
3005 g_assert_not_reached ();
3012 * Allow tracing to work with this interface (with an optional argument)
3016 mono_arch_instrument_prolog (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
)
3020 code
= mono_arm_emit_load_imm (code
, ARMREG_R0
, (guint32
)cfg
->method
);
3021 ARM_MOV_REG_IMM8 (code
, ARMREG_R1
, 0); /* NULL ebp for now */
3022 code
= mono_arm_emit_load_imm (code
, ARMREG_R2
, (guint32
)func
);
3023 code
= emit_call_reg (code
, ARMREG_R2
);
3037 mono_arch_instrument_epilog_full (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
, gboolean preserve_argument_registers
)
3040 int save_mode
= SAVE_NONE
;
3042 MonoMethod
*method
= cfg
->method
;
3043 MonoType
*ret_type
= mini_get_underlying_type (mono_method_signature (method
)->ret
);
3044 int rtype
= ret_type
->type
;
3045 int save_offset
= cfg
->param_area
;
3049 offset
= code
- cfg
->native_code
;
3050 /* we need about 16 instructions */
3051 if (offset
> (cfg
->code_size
- 16 * 4)) {
3052 cfg
->code_size
*= 2;
3053 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
3054 code
= cfg
->native_code
+ offset
;
3057 case MONO_TYPE_VOID
:
3058 /* special case string .ctor icall */
3059 if (strcmp (".ctor", method
->name
) && method
->klass
== mono_defaults
.string_class
)
3060 save_mode
= SAVE_ONE
;
3062 save_mode
= SAVE_NONE
;
3066 save_mode
= SAVE_TWO
;
3070 save_mode
= SAVE_ONE_FP
;
3072 save_mode
= SAVE_ONE
;
3076 save_mode
= SAVE_TWO_FP
;
3078 save_mode
= SAVE_TWO
;
3080 case MONO_TYPE_GENERICINST
:
3081 if (!mono_type_generic_inst_is_valuetype (ret_type
)) {
3082 save_mode
= SAVE_ONE
;
3086 case MONO_TYPE_VALUETYPE
:
3087 save_mode
= SAVE_STRUCT
;
3090 save_mode
= SAVE_ONE
;
3094 switch (save_mode
) {
3096 ARM_STR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
3097 ARM_STR_IMM (code
, ARMREG_R1
, cfg
->frame_reg
, save_offset
+ 4);
3098 if (enable_arguments
) {
3099 ARM_MOV_REG_REG (code
, ARMREG_R2
, ARMREG_R1
);
3100 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_R0
);
3104 ARM_STR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
3105 if (enable_arguments
) {
3106 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_R0
);
3110 ARM_FSTS (code
, ARM_VFP_F0
, cfg
->frame_reg
, save_offset
);
3111 if (enable_arguments
) {
3112 ARM_FMRS (code
, ARMREG_R1
, ARM_VFP_F0
);
3116 ARM_FSTD (code
, ARM_VFP_D0
, cfg
->frame_reg
, save_offset
);
3117 if (enable_arguments
) {
3118 ARM_FMDRR (code
, ARMREG_R1
, ARMREG_R2
, ARM_VFP_D0
);
3122 if (enable_arguments
) {
3123 /* FIXME: get the actual address */
3124 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_R0
);
3132 code
= mono_arm_emit_load_imm (code
, ARMREG_R0
, (guint32
)cfg
->method
);
3133 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, (guint32
)func
);
3134 code
= emit_call_reg (code
, ARMREG_IP
);
3136 switch (save_mode
) {
3138 ARM_LDR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
3139 ARM_LDR_IMM (code
, ARMREG_R1
, cfg
->frame_reg
, save_offset
+ 4);
3142 ARM_LDR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
3145 ARM_FLDS (code
, ARM_VFP_F0
, cfg
->frame_reg
, save_offset
);
3148 ARM_FLDD (code
, ARM_VFP_D0
, cfg
->frame_reg
, save_offset
);
3159 * The immediate field for cond branches is big enough for all reasonable methods
3161 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3162 if (0 && ins->inst_true_bb->native_offset) { \
3163 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3165 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3166 ARM_B_COND (code, (condcode), 0); \
3169 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3171 /* emit an exception if condition is fail
3173 * We assign the extra code used to throw the implicit exceptions
3174 * to cfg->bb_exit as far as the big branch handling is concerned
3176 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3178 mono_add_patch_info (cfg, code - cfg->native_code, \
3179 MONO_PATCH_INFO_EXC, exc_name); \
3180 ARM_BL_COND (code, (condcode), 0); \
3183 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3186 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3191 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3195 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
3196 MonoInst
*last_ins
= mono_inst_prev (ins
, FILTER_IL_SEQ_POINT
);
3198 switch (ins
->opcode
) {
3201 /* Already done by an arch-independent pass */
3203 case OP_LOAD_MEMBASE
:
3204 case OP_LOADI4_MEMBASE
:
3206 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3207 * OP_LOAD_MEMBASE offset(basereg), reg
3209 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_REG
3210 || last_ins
->opcode
== OP_STORE_MEMBASE_REG
) &&
3211 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3212 ins
->inst_offset
== last_ins
->inst_offset
) {
3213 if (ins
->dreg
== last_ins
->sreg1
) {
3214 MONO_DELETE_INS (bb
, ins
);
3217 //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
3218 ins
->opcode
= OP_MOVE
;
3219 ins
->sreg1
= last_ins
->sreg1
;
3223 * Note: reg1 must be different from the basereg in the second load
3224 * OP_LOAD_MEMBASE offset(basereg), reg1
3225 * OP_LOAD_MEMBASE offset(basereg), reg2
3227 * OP_LOAD_MEMBASE offset(basereg), reg1
3228 * OP_MOVE reg1, reg2
3230 } if (last_ins
&& (last_ins
->opcode
== OP_LOADI4_MEMBASE
3231 || last_ins
->opcode
== OP_LOAD_MEMBASE
) &&
3232 ins
->inst_basereg
!= last_ins
->dreg
&&
3233 ins
->inst_basereg
== last_ins
->inst_basereg
&&
3234 ins
->inst_offset
== last_ins
->inst_offset
) {
3236 if (ins
->dreg
== last_ins
->dreg
) {
3237 MONO_DELETE_INS (bb
, ins
);
3240 ins
->opcode
= OP_MOVE
;
3241 ins
->sreg1
= last_ins
->dreg
;
3244 //g_assert_not_reached ();
3248 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3249 * OP_LOAD_MEMBASE offset(basereg), reg
3251 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3252 * OP_ICONST reg, imm
3254 } else if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_IMM
3255 || last_ins
->opcode
== OP_STORE_MEMBASE_IMM
) &&
3256 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3257 ins
->inst_offset
== last_ins
->inst_offset
) {
3258 //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
3259 ins
->opcode
= OP_ICONST
;
3260 ins
->inst_c0
= last_ins
->inst_imm
;
3261 g_assert_not_reached (); // check this rule
3265 case OP_LOADU1_MEMBASE
:
3266 case OP_LOADI1_MEMBASE
:
3267 if (last_ins
&& (last_ins
->opcode
== OP_STOREI1_MEMBASE_REG
) &&
3268 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3269 ins
->inst_offset
== last_ins
->inst_offset
) {
3270 ins
->opcode
= (ins
->opcode
== OP_LOADI1_MEMBASE
) ? OP_ICONV_TO_I1
: OP_ICONV_TO_U1
;
3271 ins
->sreg1
= last_ins
->sreg1
;
3274 case OP_LOADU2_MEMBASE
:
3275 case OP_LOADI2_MEMBASE
:
3276 if (last_ins
&& (last_ins
->opcode
== OP_STOREI2_MEMBASE_REG
) &&
3277 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3278 ins
->inst_offset
== last_ins
->inst_offset
) {
3279 ins
->opcode
= (ins
->opcode
== OP_LOADI2_MEMBASE
) ? OP_ICONV_TO_I2
: OP_ICONV_TO_U2
;
3280 ins
->sreg1
= last_ins
->sreg1
;
3284 ins
->opcode
= OP_MOVE
;
3288 if (ins
->dreg
== ins
->sreg1
) {
3289 MONO_DELETE_INS (bb
, ins
);
3293 * OP_MOVE sreg, dreg
3294 * OP_MOVE dreg, sreg
3296 if (last_ins
&& last_ins
->opcode
== OP_MOVE
&&
3297 ins
->sreg1
== last_ins
->dreg
&&
3298 ins
->dreg
== last_ins
->sreg1
) {
3299 MONO_DELETE_INS (bb
, ins
);
3308 * the branch_cc_table should maintain the order of these
3322 branch_cc_table
[] = {
3336 #define ADD_NEW_INS(cfg,dest,op) do { \
3337 MONO_INST_NEW ((cfg), (dest), (op)); \
3338 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3342 map_to_reg_reg_op (int op
)
3351 case OP_COMPARE_IMM
:
3353 case OP_ICOMPARE_IMM
:
3367 case OP_LOAD_MEMBASE
:
3368 return OP_LOAD_MEMINDEX
;
3369 case OP_LOADI4_MEMBASE
:
3370 return OP_LOADI4_MEMINDEX
;
3371 case OP_LOADU4_MEMBASE
:
3372 return OP_LOADU4_MEMINDEX
;
3373 case OP_LOADU1_MEMBASE
:
3374 return OP_LOADU1_MEMINDEX
;
3375 case OP_LOADI2_MEMBASE
:
3376 return OP_LOADI2_MEMINDEX
;
3377 case OP_LOADU2_MEMBASE
:
3378 return OP_LOADU2_MEMINDEX
;
3379 case OP_LOADI1_MEMBASE
:
3380 return OP_LOADI1_MEMINDEX
;
3381 case OP_STOREI1_MEMBASE_REG
:
3382 return OP_STOREI1_MEMINDEX
;
3383 case OP_STOREI2_MEMBASE_REG
:
3384 return OP_STOREI2_MEMINDEX
;
3385 case OP_STOREI4_MEMBASE_REG
:
3386 return OP_STOREI4_MEMINDEX
;
3387 case OP_STORE_MEMBASE_REG
:
3388 return OP_STORE_MEMINDEX
;
3389 case OP_STORER4_MEMBASE_REG
:
3390 return OP_STORER4_MEMINDEX
;
3391 case OP_STORER8_MEMBASE_REG
:
3392 return OP_STORER8_MEMINDEX
;
3393 case OP_STORE_MEMBASE_IMM
:
3394 return OP_STORE_MEMBASE_REG
;
3395 case OP_STOREI1_MEMBASE_IMM
:
3396 return OP_STOREI1_MEMBASE_REG
;
3397 case OP_STOREI2_MEMBASE_IMM
:
3398 return OP_STOREI2_MEMBASE_REG
;
3399 case OP_STOREI4_MEMBASE_IMM
:
3400 return OP_STOREI4_MEMBASE_REG
;
3402 g_assert_not_reached ();
3406 * Remove from the instruction list the instructions that can't be
3407 * represented with very simple instructions with no register
3411 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3413 MonoInst
*ins
, *temp
, *last_ins
= NULL
;
3414 int rot_amount
, imm8
, low_imm
;
3416 MONO_BB_FOR_EACH_INS (bb
, ins
) {
3418 switch (ins
->opcode
) {
3422 case OP_COMPARE_IMM
:
3423 case OP_ICOMPARE_IMM
:
3437 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
)) < 0) {
3438 int opcode2
= mono_op_imm_to_op (ins
->opcode
);
3439 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3440 temp
->inst_c0
= ins
->inst_imm
;
3441 temp
->dreg
= mono_alloc_ireg (cfg
);
3442 ins
->sreg2
= temp
->dreg
;
3444 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins
->opcode
));
3445 ins
->opcode
= opcode2
;
3447 if (ins
->opcode
== OP_SBB
|| ins
->opcode
== OP_ISBB
|| ins
->opcode
== OP_SUBCC
)
3453 if (ins
->inst_imm
== 1) {
3454 ins
->opcode
= OP_MOVE
;
3457 if (ins
->inst_imm
== 0) {
3458 ins
->opcode
= OP_ICONST
;
3462 imm8
= mono_is_power_of_two (ins
->inst_imm
);
3464 ins
->opcode
= OP_SHL_IMM
;
3465 ins
->inst_imm
= imm8
;
3468 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3469 temp
->inst_c0
= ins
->inst_imm
;
3470 temp
->dreg
= mono_alloc_ireg (cfg
);
3471 ins
->sreg2
= temp
->dreg
;
3472 ins
->opcode
= OP_IMUL
;
3478 if (ins
->next
&& (ins
->next
->opcode
== OP_COND_EXC_C
|| ins
->next
->opcode
== OP_COND_EXC_IC
))
3479 /* ARM sets the C flag to 1 if there was _no_ overflow */
3480 ins
->next
->opcode
= OP_COND_EXC_NC
;
3483 case OP_IDIV_UN_IMM
:
3485 case OP_IREM_UN_IMM
: {
3486 int opcode2
= mono_op_imm_to_op (ins
->opcode
);
3487 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3488 temp
->inst_c0
= ins
->inst_imm
;
3489 temp
->dreg
= mono_alloc_ireg (cfg
);
3490 ins
->sreg2
= temp
->dreg
;
3492 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins
->opcode
));
3493 ins
->opcode
= opcode2
;
3496 case OP_LOCALLOC_IMM
:
3497 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3498 temp
->inst_c0
= ins
->inst_imm
;
3499 temp
->dreg
= mono_alloc_ireg (cfg
);
3500 ins
->sreg1
= temp
->dreg
;
3501 ins
->opcode
= OP_LOCALLOC
;
3503 case OP_LOAD_MEMBASE
:
3504 case OP_LOADI4_MEMBASE
:
3505 case OP_LOADU4_MEMBASE
:
3506 case OP_LOADU1_MEMBASE
:
3507 /* we can do two things: load the immed in a register
3508 * and use an indexed load, or see if the immed can be
3509 * represented as an ad_imm + a load with a smaller offset
3510 * that fits. We just do the first for now, optimize later.
3512 if (arm_is_imm12 (ins
->inst_offset
))
3514 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3515 temp
->inst_c0
= ins
->inst_offset
;
3516 temp
->dreg
= mono_alloc_ireg (cfg
);
3517 ins
->sreg2
= temp
->dreg
;
3518 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3520 case OP_LOADI2_MEMBASE
:
3521 case OP_LOADU2_MEMBASE
:
3522 case OP_LOADI1_MEMBASE
:
3523 if (arm_is_imm8 (ins
->inst_offset
))
3525 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3526 temp
->inst_c0
= ins
->inst_offset
;
3527 temp
->dreg
= mono_alloc_ireg (cfg
);
3528 ins
->sreg2
= temp
->dreg
;
3529 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3531 case OP_LOADR4_MEMBASE
:
3532 case OP_LOADR8_MEMBASE
:
3533 if (arm_is_fpimm8 (ins
->inst_offset
))
3535 low_imm
= ins
->inst_offset
& 0x1ff;
3536 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_offset
& ~0x1ff, &rot_amount
)) >= 0) {
3537 ADD_NEW_INS (cfg
, temp
, OP_ADD_IMM
);
3538 temp
->inst_imm
= ins
->inst_offset
& ~0x1ff;
3539 temp
->sreg1
= ins
->inst_basereg
;
3540 temp
->dreg
= mono_alloc_ireg (cfg
);
3541 ins
->inst_basereg
= temp
->dreg
;
3542 ins
->inst_offset
= low_imm
;
3546 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3547 temp
->inst_c0
= ins
->inst_offset
;
3548 temp
->dreg
= mono_alloc_ireg (cfg
);
3550 ADD_NEW_INS (cfg
, add_ins
, OP_IADD
);
3551 add_ins
->sreg1
= ins
->inst_basereg
;
3552 add_ins
->sreg2
= temp
->dreg
;
3553 add_ins
->dreg
= mono_alloc_ireg (cfg
);
3555 ins
->inst_basereg
= add_ins
->dreg
;
3556 ins
->inst_offset
= 0;
3559 case OP_STORE_MEMBASE_REG
:
3560 case OP_STOREI4_MEMBASE_REG
:
3561 case OP_STOREI1_MEMBASE_REG
:
3562 if (arm_is_imm12 (ins
->inst_offset
))
3564 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3565 temp
->inst_c0
= ins
->inst_offset
;
3566 temp
->dreg
= mono_alloc_ireg (cfg
);
3567 ins
->sreg2
= temp
->dreg
;
3568 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3570 case OP_STOREI2_MEMBASE_REG
:
3571 if (arm_is_imm8 (ins
->inst_offset
))
3573 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3574 temp
->inst_c0
= ins
->inst_offset
;
3575 temp
->dreg
= mono_alloc_ireg (cfg
);
3576 ins
->sreg2
= temp
->dreg
;
3577 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3579 case OP_STORER4_MEMBASE_REG
:
3580 case OP_STORER8_MEMBASE_REG
:
3581 if (arm_is_fpimm8 (ins
->inst_offset
))
3583 low_imm
= ins
->inst_offset
& 0x1ff;
3584 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_offset
& ~ 0x1ff, &rot_amount
)) >= 0 && arm_is_fpimm8 (low_imm
)) {
3585 ADD_NEW_INS (cfg
, temp
, OP_ADD_IMM
);
3586 temp
->inst_imm
= ins
->inst_offset
& ~0x1ff;
3587 temp
->sreg1
= ins
->inst_destbasereg
;
3588 temp
->dreg
= mono_alloc_ireg (cfg
);
3589 ins
->inst_destbasereg
= temp
->dreg
;
3590 ins
->inst_offset
= low_imm
;
3594 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3595 temp
->inst_c0
= ins
->inst_offset
;
3596 temp
->dreg
= mono_alloc_ireg (cfg
);
3598 ADD_NEW_INS (cfg
, add_ins
, OP_IADD
);
3599 add_ins
->sreg1
= ins
->inst_destbasereg
;
3600 add_ins
->sreg2
= temp
->dreg
;
3601 add_ins
->dreg
= mono_alloc_ireg (cfg
);
3603 ins
->inst_destbasereg
= add_ins
->dreg
;
3604 ins
->inst_offset
= 0;
3607 case OP_STORE_MEMBASE_IMM
:
3608 case OP_STOREI1_MEMBASE_IMM
:
3609 case OP_STOREI2_MEMBASE_IMM
:
3610 case OP_STOREI4_MEMBASE_IMM
:
3611 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3612 temp
->inst_c0
= ins
->inst_imm
;
3613 temp
->dreg
= mono_alloc_ireg (cfg
);
3614 ins
->sreg1
= temp
->dreg
;
3615 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3617 goto loop_start
; /* make it handle the possibly big ins->inst_offset */
3620 gboolean swap
= FALSE
;
3624 /* Optimized away */
3629 /* Some fp compares require swapped operands */
3630 switch (ins
->next
->opcode
) {
3632 ins
->next
->opcode
= OP_FBLT
;
3636 ins
->next
->opcode
= OP_FBLT_UN
;
3640 ins
->next
->opcode
= OP_FBGE
;
3644 ins
->next
->opcode
= OP_FBGE_UN
;
3652 ins
->sreg1
= ins
->sreg2
;
3661 bb
->last_ins
= last_ins
;
3662 bb
->max_vreg
= cfg
->next_vreg
;
3666 mono_arch_decompose_long_opts (MonoCompile
*cfg
, MonoInst
*long_ins
)
3670 if (long_ins
->opcode
== OP_LNEG
) {
3672 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ARM_RSBS_IMM
, MONO_LVREG_LS (ins
->dreg
), MONO_LVREG_LS (ins
->sreg1
), 0);
3673 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ARM_RSC_IMM
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->sreg1
), 0);
3679 emit_float_to_int (MonoCompile
*cfg
, guchar
*code
, int dreg
, int sreg
, int size
, gboolean is_signed
)
3681 /* sreg is a float, dreg is an integer reg */
3683 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
3685 ARM_TOSIZD (code
, vfp_scratch1
, sreg
);
3687 ARM_TOUIZD (code
, vfp_scratch1
, sreg
);
3688 ARM_FMRS (code
, dreg
, vfp_scratch1
);
3689 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
3693 ARM_AND_REG_IMM8 (code
, dreg
, dreg
, 0xff);
3694 else if (size
== 2) {
3695 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3696 ARM_SHR_IMM (code
, dreg
, dreg
, 16);
3700 ARM_SHL_IMM (code
, dreg
, dreg
, 24);
3701 ARM_SAR_IMM (code
, dreg
, dreg
, 24);
3702 } else if (size
== 2) {
3703 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3704 ARM_SAR_IMM (code
, dreg
, dreg
, 16);
3711 emit_r4_to_int (MonoCompile
*cfg
, guchar
*code
, int dreg
, int sreg
, int size
, gboolean is_signed
)
3713 /* sreg is a float, dreg is an integer reg */
3715 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
3717 ARM_TOSIZS (code
, vfp_scratch1
, sreg
);
3719 ARM_TOUIZS (code
, vfp_scratch1
, sreg
);
3720 ARM_FMRS (code
, dreg
, vfp_scratch1
);
3721 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
3725 ARM_AND_REG_IMM8 (code
, dreg
, dreg
, 0xff);
3726 else if (size
== 2) {
3727 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3728 ARM_SHR_IMM (code
, dreg
, dreg
, 16);
3732 ARM_SHL_IMM (code
, dreg
, dreg
, 24);
3733 ARM_SAR_IMM (code
, dreg
, dreg
, 24);
3734 } else if (size
== 2) {
3735 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3736 ARM_SAR_IMM (code
, dreg
, dreg
, 16);
3742 #endif /* #ifndef DISABLE_JIT */
3744 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3747 emit_thunk (guint8
*code
, gconstpointer target
)
3751 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
3752 if (thumb_supported
)
3753 ARM_BX (code
, ARMREG_IP
);
3755 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
3756 *(guint32
*)code
= (guint32
)target
;
3758 mono_arch_flush_icache (p
, code
- p
);
3762 handle_thunk (MonoCompile
*cfg
, MonoDomain
*domain
, guchar
*code
, const guchar
*target
)
3764 MonoJitInfo
*ji
= NULL
;
3765 MonoThunkJitInfo
*info
;
3768 guint8
*orig_target
;
3769 guint8
*target_thunk
;
3772 domain
= mono_domain_get ();
3776 * This can be called multiple times during JITting,
3777 * save the current position in cfg->arch to avoid
3778 * doing a O(n^2) search.
3780 if (!cfg
->arch
.thunks
) {
3781 cfg
->arch
.thunks
= cfg
->thunks
;
3782 cfg
->arch
.thunks_size
= cfg
->thunk_area
;
3784 thunks
= cfg
->arch
.thunks
;
3785 thunks_size
= cfg
->arch
.thunks_size
;
3787 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, mono_method_full_name (cfg
->method
, TRUE
));
3788 g_assert_not_reached ();
3791 g_assert (*(guint32
*)thunks
== 0);
3792 emit_thunk (thunks
, target
);
3793 arm_patch (code
, thunks
);
3795 cfg
->arch
.thunks
+= THUNK_SIZE
;
3796 cfg
->arch
.thunks_size
-= THUNK_SIZE
;
3798 ji
= mini_jit_info_table_find (domain
, (char*)code
, NULL
);
3800 info
= mono_jit_info_get_thunk_info (ji
);
3803 thunks
= (guint8
*)ji
->code_start
+ info
->thunks_offset
;
3804 thunks_size
= info
->thunks_size
;
3806 orig_target
= mono_arch_get_call_target (code
+ 4);
3808 mono_mini_arch_lock ();
3810 target_thunk
= NULL
;
3811 if (orig_target
>= thunks
&& orig_target
< thunks
+ thunks_size
) {
3812 /* The call already points to a thunk, because of trampolines etc. */
3813 target_thunk
= orig_target
;
3815 for (p
= thunks
; p
< thunks
+ thunks_size
; p
+= THUNK_SIZE
) {
3816 if (((guint32
*)p
) [0] == 0) {
3820 } else if (((guint32
*)p
) [2] == (guint32
)target
) {
3821 /* Thunk already points to target */
3828 //g_print ("THUNK: %p %p %p\n", code, target, target_thunk);
3830 if (!target_thunk
) {
3831 mono_mini_arch_unlock ();
3832 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, cfg
? mono_method_full_name (cfg
->method
, TRUE
) : mono_method_full_name (jinfo_get_method (ji
), TRUE
));
3833 g_assert_not_reached ();
3836 emit_thunk (target_thunk
, target
);
3837 arm_patch (code
, target_thunk
);
3838 mono_arch_flush_icache (code
, 4);
3840 mono_mini_arch_unlock ();
3845 arm_patch_general (MonoCompile
*cfg
, MonoDomain
*domain
, guchar
*code
, const guchar
*target
)
3847 guint32
*code32
= (void*)code
;
3848 guint32 ins
= *code32
;
3849 guint32 prim
= (ins
>> 25) & 7;
3850 guint32 tval
= GPOINTER_TO_UINT (target
);
3852 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3853 if (prim
== 5) { /* 101b */
3854 /* the diff starts 8 bytes from the branch opcode */
3855 gint diff
= target
- code
- 8;
3857 gint tmask
= 0xffffffff;
3858 if (tval
& 1) { /* entering thumb mode */
3859 diff
= target
- 1 - code
- 8;
3860 g_assert (thumb_supported
);
3861 tbits
= 0xf << 28; /* bl->blx bit pattern */
3862 g_assert ((ins
& (1 << 24))); /* it must be a bl, not b instruction */
3863 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3867 tmask
= ~(1 << 24); /* clear the link bit */
3868 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3873 if (diff
<= 33554431) {
3875 ins
= (ins
& 0xff000000) | diff
;
3877 *code32
= ins
| tbits
;
3881 /* diff between 0 and -33554432 */
3882 if (diff
>= -33554432) {
3884 ins
= (ins
& 0xff000000) | (diff
& ~0xff000000);
3886 *code32
= ins
| tbits
;
3891 handle_thunk (cfg
, domain
, code
, target
);
3896 * The alternative call sequences looks like this:
3898 * ldr ip, [pc] // loads the address constant
3899 * b 1f // jumps around the constant
3900 * address constant embedded in the code
3905 * There are two cases for patching:
3906 * a) at the end of method emission: in this case code points to the start
3907 * of the call sequence
3908 * b) during runtime patching of the call site: in this case code points
3909 * to the mov pc, ip instruction
3911 * We have to handle also the thunk jump code sequence:
3915 * address constant // execution never reaches here
3917 if ((ins
& 0x0ffffff0) == 0x12fff10) {
3918 /* Branch and exchange: the address is constructed in a reg
3919 * We can patch BX when the code sequence is the following:
3920 * ldr ip, [pc, #0] ; 0x8
3927 guint8
*emit
= (guint8
*)ccode
;
3928 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
3930 ARM_MOV_REG_REG (emit
, ARMREG_LR
, ARMREG_PC
);
3931 ARM_BX (emit
, ARMREG_IP
);
3933 /*patching from magic trampoline*/
3934 if (ins
== ccode
[3]) {
3935 g_assert (code32
[-4] == ccode
[0]);
3936 g_assert (code32
[-3] == ccode
[1]);
3937 g_assert (code32
[-1] == ccode
[2]);
3938 code32
[-2] = (guint32
)target
;
3941 /*patching from JIT*/
3942 if (ins
== ccode
[0]) {
3943 g_assert (code32
[1] == ccode
[1]);
3944 g_assert (code32
[3] == ccode
[2]);
3945 g_assert (code32
[4] == ccode
[3]);
3946 code32
[2] = (guint32
)target
;
3949 g_assert_not_reached ();
3950 } else if ((ins
& 0x0ffffff0) == 0x12fff30) {
3958 guint8
*emit
= (guint8
*)ccode
;
3959 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
3961 ARM_BLX_REG (emit
, ARMREG_IP
);
3963 g_assert (code32
[-3] == ccode
[0]);
3964 g_assert (code32
[-2] == ccode
[1]);
3965 g_assert (code32
[0] == ccode
[2]);
3967 code32
[-1] = (guint32
)target
;
3970 guint32
*tmp
= ccode
;
3971 guint8
*emit
= (guint8
*)tmp
;
3972 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
3973 ARM_MOV_REG_REG (emit
, ARMREG_LR
, ARMREG_PC
);
3974 ARM_MOV_REG_REG (emit
, ARMREG_PC
, ARMREG_IP
);
3975 ARM_BX (emit
, ARMREG_IP
);
3976 if (ins
== ccode
[2]) {
3977 g_assert_not_reached (); // should be -2 ...
3978 code32
[-1] = (guint32
)target
;
3981 if (ins
== ccode
[0]) {
3982 /* handles both thunk jump code and the far call sequence */
3983 code32
[2] = (guint32
)target
;
3986 g_assert_not_reached ();
3988 // g_print ("patched with 0x%08x\n", ins);
3992 arm_patch (guchar
*code
, const guchar
*target
)
3994 arm_patch_general (NULL
, NULL
, code
, target
);
3998 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
3999 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
4000 * to be used with the emit macros.
4001 * Return -1 otherwise.
4004 mono_arm_is_rotated_imm8 (guint32 val
, gint
*rot_amount
)
4007 for (i
= 0; i
< 31; i
+= 2) {
4008 res
= (val
<< (32 - i
)) | (val
>> i
);
4011 *rot_amount
= i
? 32 - i
: 0;
4018 * Emits in code a sequence of instructions that load the value 'val'
4019 * into the dreg register. Uses at most 4 instructions.
4022 mono_arm_emit_load_imm (guint8
*code
, int dreg
, guint32 val
)
4024 int imm8
, rot_amount
;
4026 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
4027 /* skip the constant pool */
4033 if (mini_get_debug_options()->single_imm_size
&& v7_supported
) {
4034 ARM_MOVW_REG_IMM (code
, dreg
, val
& 0xffff);
4035 ARM_MOVT_REG_IMM (code
, dreg
, (val
>> 16) & 0xffff);
4039 if ((imm8
= mono_arm_is_rotated_imm8 (val
, &rot_amount
)) >= 0) {
4040 ARM_MOV_REG_IMM (code
, dreg
, imm8
, rot_amount
);
4041 } else if ((imm8
= mono_arm_is_rotated_imm8 (~val
, &rot_amount
)) >= 0) {
4042 ARM_MVN_REG_IMM (code
, dreg
, imm8
, rot_amount
);
4045 ARM_MOVW_REG_IMM (code
, dreg
, val
& 0xffff);
4047 ARM_MOVT_REG_IMM (code
, dreg
, (val
>> 16) & 0xffff);
4051 ARM_MOV_REG_IMM8 (code
, dreg
, (val
& 0xFF));
4053 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF00) >> 8, 24);
4055 if (val
& 0xFF0000) {
4056 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
4058 if (val
& 0xFF000000) {
4059 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
4061 } else if (val
& 0xFF00) {
4062 ARM_MOV_REG_IMM (code
, dreg
, (val
& 0xFF00) >> 8, 24);
4063 if (val
& 0xFF0000) {
4064 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
4066 if (val
& 0xFF000000) {
4067 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
4069 } else if (val
& 0xFF0000) {
4070 ARM_MOV_REG_IMM (code
, dreg
, (val
& 0xFF0000) >> 16, 16);
4071 if (val
& 0xFF000000) {
4072 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
4075 //g_assert_not_reached ();
4081 mono_arm_thumb_supported (void)
4083 return thumb_supported
;
4089 emit_move_return_value (MonoCompile
*cfg
, MonoInst
*ins
, guint8
*code
)
4094 call
= (MonoCallInst
*)ins
;
4095 cinfo
= call
->call_info
;
4097 switch (cinfo
->ret
.storage
) {
4098 case RegTypeStructByVal
:
4100 MonoInst
*loc
= cfg
->arch
.vret_addr_loc
;
4103 if (cinfo
->ret
.storage
== RegTypeStructByVal
&& cinfo
->ret
.nregs
== 1) {
4104 /* The JIT treats this as a normal call */
4108 /* Load the destination address */
4109 g_assert (loc
&& loc
->opcode
== OP_REGOFFSET
);
4111 if (arm_is_imm12 (loc
->inst_offset
)) {
4112 ARM_LDR_IMM (code
, ARMREG_LR
, loc
->inst_basereg
, loc
->inst_offset
);
4114 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, loc
->inst_offset
);
4115 ARM_LDR_REG_REG (code
, ARMREG_LR
, loc
->inst_basereg
, ARMREG_LR
);
4118 if (cinfo
->ret
.storage
== RegTypeStructByVal
) {
4119 int rsize
= cinfo
->ret
.struct_size
;
4121 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
4122 g_assert (rsize
>= 0);
4127 ARM_STRB_IMM (code
, i
, ARMREG_LR
, i
* 4);
4130 ARM_STRH_IMM (code
, i
, ARMREG_LR
, i
* 4);
4133 ARM_STR_IMM (code
, i
, ARMREG_LR
, i
* 4);
4139 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
4140 if (cinfo
->ret
.esize
== 4)
4141 ARM_FSTS (code
, cinfo
->ret
.reg
+ i
, ARMREG_LR
, i
* 4);
4143 ARM_FSTD (code
, cinfo
->ret
.reg
+ (i
* 2), ARMREG_LR
, i
* 8);
4152 switch (ins
->opcode
) {
4155 case OP_FCALL_MEMBASE
:
4157 MonoType
*sig_ret
= mini_get_underlying_type (((MonoCallInst
*)ins
)->signature
->ret
);
4158 if (sig_ret
->type
== MONO_TYPE_R4
) {
4159 if (IS_HARD_FLOAT
) {
4160 ARM_CVTS (code
, ins
->dreg
, ARM_VFP_F0
);
4162 ARM_FMSR (code
, ins
->dreg
, ARMREG_R0
);
4163 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
4166 if (IS_HARD_FLOAT
) {
4167 ARM_CPYD (code
, ins
->dreg
, ARM_VFP_D0
);
4169 ARM_FMDRR (code
, ARMREG_R0
, ARMREG_R1
, ins
->dreg
);
4176 case OP_RCALL_MEMBASE
: {
4181 sig_ret
= mini_get_underlying_type (((MonoCallInst
*)ins
)->signature
->ret
);
4182 g_assert (sig_ret
->type
== MONO_TYPE_R4
);
4183 if (IS_HARD_FLOAT
) {
4184 ARM_CPYS (code
, ins
->dreg
, ARM_VFP_F0
);
4186 ARM_FMSR (code
, ins
->dreg
, ARMREG_R0
);
4187 ARM_CPYS (code
, ins
->dreg
, ins
->dreg
);
4199 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
4204 guint8
*code
= cfg
->native_code
+ cfg
->code_len
;
4205 MonoInst
*last_ins
= NULL
;
4206 guint last_offset
= 0;
4208 int imm8
, rot_amount
;
4210 /* we don't align basic blocks of loops on arm */
4212 if (cfg
->verbose_level
> 2)
4213 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
4215 cpos
= bb
->max_offset
;
4217 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
) {
4218 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
4219 //g_assert (!mono_compile_aot);
4222 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
4223 /* this is not thread save, but good enough */
4224 /* fixme: howto handle overflows? */
4225 //x86_inc_mem (code, &cov->data [bb->dfn].count);
4228 if (mono_break_at_bb_method
&& mono_method_desc_full_match (mono_break_at_bb_method
, cfg
->method
) && bb
->block_num
== mono_break_at_bb_bb_num
) {
4229 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
4230 (gpointer
)"mono_break");
4231 code
= emit_call_seq (cfg
, code
);
4234 MONO_BB_FOR_EACH_INS (bb
, ins
) {
4235 offset
= code
- cfg
->native_code
;
4237 max_len
= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
4239 if (offset
> (cfg
->code_size
- max_len
- 16)) {
4240 cfg
->code_size
*= 2;
4241 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4242 code
= cfg
->native_code
+ offset
;
4244 // if (ins->cil_code)
4245 // g_print ("cil code\n");
4246 mono_debug_record_line_number (cfg
, ins
, offset
);
4248 switch (ins
->opcode
) {
4249 case OP_MEMORY_BARRIER
:
4251 ARM_MOV_REG_IMM8 (code
, ARMREG_R0
, 0);
4252 ARM_MCR (code
, 15, 0, ARMREG_R0
, 7, 10, 5);
4256 code
= mono_arm_emit_tls_get (cfg
, code
, ins
->dreg
, ins
->inst_offset
);
4258 case OP_TLS_GET_REG
:
4259 code
= mono_arm_emit_tls_get_reg (cfg
, code
, ins
->dreg
, ins
->sreg1
);
4262 code
= mono_arm_emit_tls_set (cfg
, code
, ins
->sreg1
, ins
->inst_offset
);
4264 case OP_TLS_SET_REG
:
4265 code
= mono_arm_emit_tls_set_reg (cfg
, code
, ins
->sreg1
, ins
->sreg2
);
4267 case OP_ATOMIC_EXCHANGE_I4
:
4268 case OP_ATOMIC_CAS_I4
:
4269 case OP_ATOMIC_ADD_I4
: {
4273 g_assert (v7_supported
);
4276 if (ins
->sreg1
!= ARMREG_IP
&& ins
->sreg2
!= ARMREG_IP
&& ins
->sreg3
!= ARMREG_IP
)
4278 else if (ins
->sreg1
!= ARMREG_R0
&& ins
->sreg2
!= ARMREG_R0
&& ins
->sreg3
!= ARMREG_R0
)
4280 else if (ins
->sreg1
!= ARMREG_R1
&& ins
->sreg2
!= ARMREG_R1
&& ins
->sreg3
!= ARMREG_R1
)
4284 g_assert (cfg
->arch
.atomic_tmp_offset
!= -1);
4285 ARM_STR_IMM (code
, tmpreg
, cfg
->frame_reg
, cfg
->arch
.atomic_tmp_offset
);
4287 switch (ins
->opcode
) {
4288 case OP_ATOMIC_EXCHANGE_I4
:
4290 ARM_DMB (code
, ARM_DMB_SY
);
4291 ARM_LDREX_REG (code
, ARMREG_LR
, ins
->sreg1
);
4292 ARM_STREX_REG (code
, tmpreg
, ins
->sreg2
, ins
->sreg1
);
4293 ARM_CMP_REG_IMM (code
, tmpreg
, 0, 0);
4295 ARM_B_COND (code
, ARMCOND_NE
, 0);
4296 arm_patch (buf
[1], buf
[0]);
4298 case OP_ATOMIC_CAS_I4
:
4299 ARM_DMB (code
, ARM_DMB_SY
);
4301 ARM_LDREX_REG (code
, ARMREG_LR
, ins
->sreg1
);
4302 ARM_CMP_REG_REG (code
, ARMREG_LR
, ins
->sreg3
);
4304 ARM_B_COND (code
, ARMCOND_NE
, 0);
4305 ARM_STREX_REG (code
, tmpreg
, ins
->sreg2
, ins
->sreg1
);
4306 ARM_CMP_REG_IMM (code
, tmpreg
, 0, 0);
4308 ARM_B_COND (code
, ARMCOND_NE
, 0);
4309 arm_patch (buf
[2], buf
[0]);
4310 arm_patch (buf
[1], code
);
4312 case OP_ATOMIC_ADD_I4
:
4314 ARM_DMB (code
, ARM_DMB_SY
);
4315 ARM_LDREX_REG (code
, ARMREG_LR
, ins
->sreg1
);
4316 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->sreg2
);
4317 ARM_STREX_REG (code
, tmpreg
, ARMREG_LR
, ins
->sreg1
);
4318 ARM_CMP_REG_IMM (code
, tmpreg
, 0, 0);
4320 ARM_B_COND (code
, ARMCOND_NE
, 0);
4321 arm_patch (buf
[1], buf
[0]);
4324 g_assert_not_reached ();
4327 ARM_DMB (code
, ARM_DMB_SY
);
4328 if (tmpreg
!= ins
->dreg
)
4329 ARM_LDR_IMM (code
, tmpreg
, cfg
->frame_reg
, cfg
->arch
.atomic_tmp_offset
);
4330 ARM_MOV_REG_REG (code
, ins
->dreg
, ARMREG_LR
);
4333 case OP_ATOMIC_LOAD_I1
:
4334 case OP_ATOMIC_LOAD_U1
:
4335 case OP_ATOMIC_LOAD_I2
:
4336 case OP_ATOMIC_LOAD_U2
:
4337 case OP_ATOMIC_LOAD_I4
:
4338 case OP_ATOMIC_LOAD_U4
:
4339 case OP_ATOMIC_LOAD_R4
:
4340 case OP_ATOMIC_LOAD_R8
: {
4341 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4342 ARM_DMB (code
, ARM_DMB_SY
);
4344 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4346 switch (ins
->opcode
) {
4347 case OP_ATOMIC_LOAD_I1
:
4348 ARM_LDRSB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4350 case OP_ATOMIC_LOAD_U1
:
4351 ARM_LDRB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4353 case OP_ATOMIC_LOAD_I2
:
4354 ARM_LDRSH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4356 case OP_ATOMIC_LOAD_U2
:
4357 ARM_LDRH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4359 case OP_ATOMIC_LOAD_I4
:
4360 case OP_ATOMIC_LOAD_U4
:
4361 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4363 case OP_ATOMIC_LOAD_R4
:
4365 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_basereg
, ARMREG_LR
);
4366 ARM_FLDS (code
, ins
->dreg
, ARMREG_LR
, 0);
4368 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
4369 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_basereg
, ARMREG_LR
);
4370 ARM_FLDS (code
, vfp_scratch1
, ARMREG_LR
, 0);
4371 ARM_CVTS (code
, ins
->dreg
, vfp_scratch1
);
4372 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
4375 case OP_ATOMIC_LOAD_R8
:
4376 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_basereg
, ARMREG_LR
);
4377 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
4381 if (ins
->backend
.memory_barrier_kind
!= MONO_MEMORY_BARRIER_NONE
)
4382 ARM_DMB (code
, ARM_DMB_SY
);
4385 case OP_ATOMIC_STORE_I1
:
4386 case OP_ATOMIC_STORE_U1
:
4387 case OP_ATOMIC_STORE_I2
:
4388 case OP_ATOMIC_STORE_U2
:
4389 case OP_ATOMIC_STORE_I4
:
4390 case OP_ATOMIC_STORE_U4
:
4391 case OP_ATOMIC_STORE_R4
:
4392 case OP_ATOMIC_STORE_R8
: {
4393 if (ins
->backend
.memory_barrier_kind
!= MONO_MEMORY_BARRIER_NONE
)
4394 ARM_DMB (code
, ARM_DMB_SY
);
4396 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4398 switch (ins
->opcode
) {
4399 case OP_ATOMIC_STORE_I1
:
4400 case OP_ATOMIC_STORE_U1
:
4401 ARM_STRB_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4403 case OP_ATOMIC_STORE_I2
:
4404 case OP_ATOMIC_STORE_U2
:
4405 ARM_STRH_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4407 case OP_ATOMIC_STORE_I4
:
4408 case OP_ATOMIC_STORE_U4
:
4409 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4411 case OP_ATOMIC_STORE_R4
:
4413 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_destbasereg
, ARMREG_LR
);
4414 ARM_FSTS (code
, ins
->sreg1
, ARMREG_LR
, 0);
4416 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
4417 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_destbasereg
, ARMREG_LR
);
4418 ARM_CVTD (code
, vfp_scratch1
, ins
->sreg1
);
4419 ARM_FSTS (code
, vfp_scratch1
, ARMREG_LR
, 0);
4420 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
4423 case OP_ATOMIC_STORE_R8
:
4424 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_destbasereg
, ARMREG_LR
);
4425 ARM_FSTD (code
, ins
->sreg1
, ARMREG_LR
, 0);
4429 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4430 ARM_DMB (code
, ARM_DMB_SY
);
4434 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4435 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
4438 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4439 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
4441 case OP_STOREI1_MEMBASE_IMM
:
4442 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
& 0xFF);
4443 g_assert (arm_is_imm12 (ins
->inst_offset
));
4444 ARM_STRB_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4446 case OP_STOREI2_MEMBASE_IMM
:
4447 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
& 0xFFFF);
4448 g_assert (arm_is_imm8 (ins
->inst_offset
));
4449 ARM_STRH_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4451 case OP_STORE_MEMBASE_IMM
:
4452 case OP_STOREI4_MEMBASE_IMM
:
4453 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
);
4454 g_assert (arm_is_imm12 (ins
->inst_offset
));
4455 ARM_STR_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4457 case OP_STOREI1_MEMBASE_REG
:
4458 g_assert (arm_is_imm12 (ins
->inst_offset
));
4459 ARM_STRB_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4461 case OP_STOREI2_MEMBASE_REG
:
4462 g_assert (arm_is_imm8 (ins
->inst_offset
));
4463 ARM_STRH_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4465 case OP_STORE_MEMBASE_REG
:
4466 case OP_STOREI4_MEMBASE_REG
:
4467 /* this case is special, since it happens for spill code after lowering has been called */
4468 if (arm_is_imm12 (ins
->inst_offset
)) {
4469 ARM_STR_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4471 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4472 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4475 case OP_STOREI1_MEMINDEX
:
4476 ARM_STRB_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4478 case OP_STOREI2_MEMINDEX
:
4479 ARM_STRH_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4481 case OP_STORE_MEMINDEX
:
4482 case OP_STOREI4_MEMINDEX
:
4483 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4486 g_assert_not_reached ();
4488 case OP_LOAD_MEMINDEX
:
4489 case OP_LOADI4_MEMINDEX
:
4490 case OP_LOADU4_MEMINDEX
:
4491 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4493 case OP_LOADI1_MEMINDEX
:
4494 ARM_LDRSB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4496 case OP_LOADU1_MEMINDEX
:
4497 ARM_LDRB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4499 case OP_LOADI2_MEMINDEX
:
4500 ARM_LDRSH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4502 case OP_LOADU2_MEMINDEX
:
4503 ARM_LDRH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4505 case OP_LOAD_MEMBASE
:
4506 case OP_LOADI4_MEMBASE
:
4507 case OP_LOADU4_MEMBASE
:
4508 /* this case is special, since it happens for spill code after lowering has been called */
4509 if (arm_is_imm12 (ins
->inst_offset
)) {
4510 ARM_LDR_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4512 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4513 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4516 case OP_LOADI1_MEMBASE
:
4517 g_assert (arm_is_imm8 (ins
->inst_offset
));
4518 ARM_LDRSB_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4520 case OP_LOADU1_MEMBASE
:
4521 g_assert (arm_is_imm12 (ins
->inst_offset
));
4522 ARM_LDRB_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4524 case OP_LOADU2_MEMBASE
:
4525 g_assert (arm_is_imm8 (ins
->inst_offset
));
4526 ARM_LDRH_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4528 case OP_LOADI2_MEMBASE
:
4529 g_assert (arm_is_imm8 (ins
->inst_offset
));
4530 ARM_LDRSH_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4532 case OP_ICONV_TO_I1
:
4533 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 24);
4534 ARM_SAR_IMM (code
, ins
->dreg
, ins
->dreg
, 24);
4536 case OP_ICONV_TO_I2
:
4537 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 16);
4538 ARM_SAR_IMM (code
, ins
->dreg
, ins
->dreg
, 16);
4540 case OP_ICONV_TO_U1
:
4541 ARM_AND_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 0xff);
4543 case OP_ICONV_TO_U2
:
4544 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 16);
4545 ARM_SHR_IMM (code
, ins
->dreg
, ins
->dreg
, 16);
4549 ARM_CMP_REG_REG (code
, ins
->sreg1
, ins
->sreg2
);
4551 case OP_COMPARE_IMM
:
4552 case OP_ICOMPARE_IMM
:
4553 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4554 g_assert (imm8
>= 0);
4555 ARM_CMP_REG_IMM (code
, ins
->sreg1
, imm8
, rot_amount
);
4559 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4560 * So instead of emitting a trap, we emit a call a C function and place a
4563 //*(int*)code = 0xef9f0001;
4566 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
4567 (gpointer
)"mono_break");
4568 code
= emit_call_seq (cfg
, code
);
4570 case OP_RELAXED_NOP
:
4575 case OP_DUMMY_STORE
:
4576 case OP_DUMMY_ICONST
:
4577 case OP_DUMMY_R8CONST
:
4578 case OP_NOT_REACHED
:
4581 case OP_IL_SEQ_POINT
:
4582 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
4584 case OP_SEQ_POINT
: {
4586 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
4587 MonoInst
*ss_trigger_page_var
= cfg
->arch
.ss_trigger_page_var
;
4588 MonoInst
*ss_method_var
= cfg
->arch
.seq_point_ss_method_var
;
4589 MonoInst
*bp_method_var
= cfg
->arch
.seq_point_bp_method_var
;
4591 int dreg
= ARMREG_LR
;
4593 if (cfg
->soft_breakpoints
) {
4594 g_assert (!cfg
->compile_aot
);
4598 * For AOT, we use one got slot per method, which will point to a
4599 * SeqPointInfo structure, containing all the information required
4600 * by the code below.
4602 if (cfg
->compile_aot
) {
4603 g_assert (info_var
);
4604 g_assert (info_var
->opcode
== OP_REGOFFSET
);
4605 g_assert (arm_is_imm12 (info_var
->inst_offset
));
4608 if (!cfg
->soft_breakpoints
&& !cfg
->compile_aot
) {
4610 * Read from the single stepping trigger page. This will cause a
4611 * SIGSEGV when single stepping is enabled.
4612 * We do this _before_ the breakpoint, so single stepping after
4613 * a breakpoint is hit will step to the next IL offset.
4615 g_assert (((guint64
)(gsize
)ss_trigger_page
>> 32) == 0);
4618 if (ins
->flags
& MONO_INST_SINGLE_STEP_LOC
) {
4619 if (cfg
->soft_breakpoints
) {
4620 /* Load the address of the sequence point method variable. */
4621 var
= ss_method_var
;
4623 g_assert (var
->opcode
== OP_REGOFFSET
);
4624 g_assert (arm_is_imm12 (var
->inst_offset
));
4625 ARM_LDR_IMM (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
4627 /* Read the value and check whether it is non-zero. */
4628 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
4629 ARM_CMP_REG_IMM (code
, dreg
, 0, 0);
4630 /* Call it conditionally. */
4631 ARM_BLX_REG_COND (code
, ARMCOND_NE
, dreg
);
4633 if (cfg
->compile_aot
) {
4634 /* Load the trigger page addr from the variable initialized in the prolog */
4635 var
= ss_trigger_page_var
;
4637 g_assert (var
->opcode
== OP_REGOFFSET
);
4638 g_assert (arm_is_imm12 (var
->inst_offset
));
4639 ARM_LDR_IMM (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
4641 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
4643 *(int*)code
= (int)ss_trigger_page
;
4646 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
4650 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
4652 if (cfg
->soft_breakpoints
) {
4653 /* Load the address of the breakpoint method into ip. */
4654 var
= bp_method_var
;
4656 g_assert (var
->opcode
== OP_REGOFFSET
);
4657 g_assert (arm_is_imm12 (var
->inst_offset
));
4658 ARM_LDR_IMM (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
4661 * A placeholder for a possible breakpoint inserted by
4662 * mono_arch_set_breakpoint ().
4665 } else if (cfg
->compile_aot
) {
4666 guint32 offset
= code
- cfg
->native_code
;
4669 ARM_LDR_IMM (code
, dreg
, info_var
->inst_basereg
, info_var
->inst_offset
);
4670 /* Add the offset */
4671 val
= ((offset
/ 4) * sizeof (guint8
*)) + MONO_STRUCT_OFFSET (SeqPointInfo
, bp_addrs
);
4672 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4673 if (arm_is_imm12 ((int)val
)) {
4674 ARM_LDR_IMM (code
, dreg
, dreg
, val
);
4676 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF), 0);
4678 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF00) >> 8, 24);
4680 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
4681 g_assert (!(val
& 0xFF000000));
4683 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
4685 /* What is faster, a branch or a load ? */
4686 ARM_CMP_REG_IMM (code
, dreg
, 0, 0);
4687 /* The breakpoint instruction */
4688 ARM_LDR_IMM_COND (code
, dreg
, dreg
, 0, ARMCOND_NE
);
4691 * A placeholder for a possible breakpoint inserted by
4692 * mono_arch_set_breakpoint ().
4694 for (i
= 0; i
< 4; ++i
)
4699 * Add an additional nop so skipping the bp doesn't cause the ip to point
4700 * to another IL offset.
4708 ARM_ADDS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4711 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4715 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4718 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4719 g_assert (imm8
>= 0);
4720 ARM_ADDS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4724 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4725 g_assert (imm8
>= 0);
4726 ARM_ADD_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4730 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4731 g_assert (imm8
>= 0);
4732 ARM_ADCS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4735 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4736 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4738 case OP_IADD_OVF_UN
:
4739 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4740 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4743 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4744 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4746 case OP_ISUB_OVF_UN
:
4747 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4748 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4750 case OP_ADD_OVF_CARRY
:
4751 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4752 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4754 case OP_ADD_OVF_UN_CARRY
:
4755 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4756 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4758 case OP_SUB_OVF_CARRY
:
4759 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4760 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4762 case OP_SUB_OVF_UN_CARRY
:
4763 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4764 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4768 ARM_SUBS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4771 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4772 g_assert (imm8
>= 0);
4773 ARM_SUBS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4776 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4780 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4784 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4785 g_assert (imm8
>= 0);
4786 ARM_SUB_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4790 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4791 g_assert (imm8
>= 0);
4792 ARM_SBCS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4794 case OP_ARM_RSBS_IMM
:
4795 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4796 g_assert (imm8
>= 0);
4797 ARM_RSBS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4799 case OP_ARM_RSC_IMM
:
4800 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4801 g_assert (imm8
>= 0);
4802 ARM_RSC_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4805 ARM_AND_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4809 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4810 g_assert (imm8
>= 0);
4811 ARM_AND_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4814 g_assert (v7s_supported
|| v7k_supported
);
4815 ARM_SDIV (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4818 g_assert (v7s_supported
|| v7k_supported
);
4819 ARM_UDIV (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4822 g_assert (v7s_supported
|| v7k_supported
);
4823 ARM_SDIV (code
, ARMREG_LR
, ins
->sreg1
, ins
->sreg2
);
4824 ARM_MLS (code
, ins
->dreg
, ARMREG_LR
, ins
->sreg2
, ins
->sreg1
);
4827 g_assert (v7s_supported
|| v7k_supported
);
4828 ARM_UDIV (code
, ARMREG_LR
, ins
->sreg1
, ins
->sreg2
);
4829 ARM_MLS (code
, ins
->dreg
, ARMREG_LR
, ins
->sreg2
, ins
->sreg1
);
4833 g_assert_not_reached ();
4835 ARM_ORR_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4839 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4840 g_assert (imm8
>= 0);
4841 ARM_ORR_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4844 ARM_EOR_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4848 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4849 g_assert (imm8
>= 0);
4850 ARM_EOR_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4853 ARM_SHL_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4858 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4859 else if (ins
->dreg
!= ins
->sreg1
)
4860 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4863 ARM_SAR_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4868 ARM_SAR_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4869 else if (ins
->dreg
!= ins
->sreg1
)
4870 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4873 case OP_ISHR_UN_IMM
:
4875 ARM_SHR_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4876 else if (ins
->dreg
!= ins
->sreg1
)
4877 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4880 ARM_SHR_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4883 ARM_MVN_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4886 ARM_RSB_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 0);
4889 if (ins
->dreg
== ins
->sreg2
)
4890 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4892 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
4895 g_assert_not_reached ();
4898 /* FIXME: handle ovf/ sreg2 != dreg */
4899 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4900 /* FIXME: MUL doesn't set the C/O flags on ARM */
4902 case OP_IMUL_OVF_UN
:
4903 /* FIXME: handle ovf/ sreg2 != dreg */
4904 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4905 /* FIXME: MUL doesn't set the C/O flags on ARM */
4908 code
= mono_arm_emit_load_imm (code
, ins
->dreg
, ins
->inst_c0
);
4911 /* Load the GOT offset */
4912 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
4913 ARM_LDR_IMM (code
, ins
->dreg
, ARMREG_PC
, 0);
4915 *(gpointer
*)code
= NULL
;
4917 /* Load the value from the GOT */
4918 ARM_LDR_REG_REG (code
, ins
->dreg
, ARMREG_PC
, ins
->dreg
);
4920 case OP_OBJC_GET_SELECTOR
:
4921 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_OBJC_SELECTOR_REF
, ins
->inst_p0
);
4922 ARM_LDR_IMM (code
, ins
->dreg
, ARMREG_PC
, 0);
4924 *(gpointer
*)code
= NULL
;
4926 ARM_LDR_REG_REG (code
, ins
->dreg
, ARMREG_PC
, ins
->dreg
);
4928 case OP_ICONV_TO_I4
:
4929 case OP_ICONV_TO_U4
:
4931 if (ins
->dreg
!= ins
->sreg1
)
4932 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4935 int saved
= ins
->sreg2
;
4936 if (ins
->sreg2
== ARM_LSW_REG
) {
4937 ARM_MOV_REG_REG (code
, ARMREG_LR
, ins
->sreg2
);
4940 if (ins
->sreg1
!= ARM_LSW_REG
)
4941 ARM_MOV_REG_REG (code
, ARM_LSW_REG
, ins
->sreg1
);
4942 if (saved
!= ARM_MSW_REG
)
4943 ARM_MOV_REG_REG (code
, ARM_MSW_REG
, saved
);
4947 if (IS_VFP
&& ins
->dreg
!= ins
->sreg1
)
4948 ARM_CPYD (code
, ins
->dreg
, ins
->sreg1
);
4951 if (IS_VFP
&& ins
->dreg
!= ins
->sreg1
)
4952 ARM_CPYS (code
, ins
->dreg
, ins
->sreg1
);
4954 case OP_MOVE_F_TO_I4
:
4956 ARM_FMRS (code
, ins
->dreg
, ins
->sreg1
);
4958 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
4959 ARM_CVTD (code
, vfp_scratch1
, ins
->sreg1
);
4960 ARM_FMRS (code
, ins
->dreg
, vfp_scratch1
);
4961 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
4964 case OP_MOVE_I4_TO_F
:
4966 ARM_FMSR (code
, ins
->dreg
, ins
->sreg1
);
4968 ARM_FMSR (code
, ins
->dreg
, ins
->sreg1
);
4969 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
4972 case OP_FCONV_TO_R4
:
4975 ARM_CVTD (code
, ins
->dreg
, ins
->sreg1
);
4977 ARM_CVTD (code
, ins
->dreg
, ins
->sreg1
);
4978 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
4983 MonoCallInst
*call
= (MonoCallInst
*)ins
;
4986 * The stack looks like the following:
4987 * <caller argument area>
4990 * <callee argument area>
4991 * Need to copy the arguments from the callee argument area to
4992 * the caller argument area, and pop the frame.
4994 if (call
->stack_usage
) {
4995 int i
, prev_sp_offset
= 0;
4997 /* Compute size of saved registers restored below */
4999 prev_sp_offset
= 2 * 4;
5001 prev_sp_offset
= 1 * 4;
5002 for (i
= 0; i
< 16; ++i
) {
5003 if (cfg
->used_int_regs
& (1 << i
))
5004 prev_sp_offset
+= 4;
5007 code
= emit_big_add (code
, ARMREG_IP
, cfg
->frame_reg
, cfg
->stack_usage
+ prev_sp_offset
);
5009 /* Copy arguments on the stack to our argument area */
5010 for (i
= 0; i
< call
->stack_usage
; i
+= sizeof (mgreg_t
)) {
5011 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, i
);
5012 ARM_STR_IMM (code
, ARMREG_LR
, ARMREG_IP
, i
);
5017 * Keep in sync with mono_arch_emit_epilog
5019 g_assert (!cfg
->method
->save_lmf
);
5021 code
= emit_big_add (code
, ARMREG_SP
, cfg
->frame_reg
, cfg
->stack_usage
);
5023 if (cfg
->used_int_regs
)
5024 ARM_POP (code
, cfg
->used_int_regs
);
5025 ARM_POP (code
, (1 << ARMREG_R7
) | (1 << ARMREG_LR
));
5027 ARM_POP (code
, cfg
->used_int_regs
| (1 << ARMREG_LR
));
5030 mono_add_patch_info (cfg
, (guint8
*) code
- cfg
->native_code
, MONO_PATCH_INFO_METHOD_JUMP
, call
->method
);
5031 if (cfg
->compile_aot
) {
5032 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
5034 *(gpointer
*)code
= NULL
;
5036 ARM_LDR_REG_REG (code
, ARMREG_PC
, ARMREG_PC
, ARMREG_IP
);
5038 code
= mono_arm_patchable_b (code
, ARMCOND_AL
);
5039 cfg
->thunk_area
+= THUNK_SIZE
;
5044 /* ensure ins->sreg1 is not NULL */
5045 ARM_LDRB_IMM (code
, ARMREG_LR
, ins
->sreg1
, 0);
5048 g_assert (cfg
->sig_cookie
< 128);
5049 ARM_LDR_IMM (code
, ARMREG_IP
, cfg
->frame_reg
, cfg
->sig_cookie
);
5050 ARM_STR_IMM (code
, ARMREG_IP
, ins
->sreg1
, 0);
5060 call
= (MonoCallInst
*)ins
;
5063 code
= emit_float_args (cfg
, call
, code
, &max_len
, &offset
);
5065 if (ins
->flags
& MONO_INST_HAS_METHOD
)
5066 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_METHOD
, call
->method
);
5068 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_ABS
, call
->fptr
);
5069 code
= emit_call_seq (cfg
, code
);
5070 ins
->flags
|= MONO_INST_GC_CALLSITE
;
5071 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5072 code
= emit_move_return_value (cfg
, ins
, code
);
5079 case OP_VOIDCALL_REG
:
5082 code
= emit_float_args (cfg
, (MonoCallInst
*)ins
, code
, &max_len
, &offset
);
5084 code
= emit_call_reg (code
, ins
->sreg1
);
5085 ins
->flags
|= MONO_INST_GC_CALLSITE
;
5086 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5087 code
= emit_move_return_value (cfg
, ins
, code
);
5089 case OP_FCALL_MEMBASE
:
5090 case OP_RCALL_MEMBASE
:
5091 case OP_LCALL_MEMBASE
:
5092 case OP_VCALL_MEMBASE
:
5093 case OP_VCALL2_MEMBASE
:
5094 case OP_VOIDCALL_MEMBASE
:
5095 case OP_CALL_MEMBASE
: {
5096 g_assert (ins
->sreg1
!= ARMREG_LR
);
5097 call
= (MonoCallInst
*)ins
;
5100 code
= emit_float_args (cfg
, call
, code
, &max_len
, &offset
);
5101 if (!arm_is_imm12 (ins
->inst_offset
)) {
5102 /* sreg1 might be IP */
5103 ARM_MOV_REG_REG (code
, ARMREG_LR
, ins
->sreg1
);
5104 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, ins
->inst_offset
);
5105 ARM_ADD_REG_REG (code
, ARMREG_IP
, ARMREG_IP
, ARMREG_LR
);
5106 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
5107 ARM_LDR_IMM (code
, ARMREG_PC
, ARMREG_IP
, 0);
5109 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
5110 ARM_LDR_IMM (code
, ARMREG_PC
, ins
->sreg1
, ins
->inst_offset
);
5112 ins
->flags
|= MONO_INST_GC_CALLSITE
;
5113 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5114 code
= emit_move_return_value (cfg
, ins
, code
);
5117 case OP_GENERIC_CLASS_INIT
: {
5118 static int byte_offset
= -1;
5119 static guint8 bitmask
;
5123 if (byte_offset
< 0)
5124 mono_marshal_find_bitfield_offset (MonoVTable
, initialized
, &byte_offset
, &bitmask
);
5126 g_assert (arm_is_imm8 (byte_offset
));
5127 ARM_LDRSB_IMM (code
, ARMREG_IP
, ins
->sreg1
, byte_offset
);
5128 imm8
= mono_arm_is_rotated_imm8 (bitmask
, &rot_amount
);
5129 g_assert (imm8
>= 0);
5130 ARM_AND_REG_IMM (code
, ARMREG_IP
, ARMREG_IP
, imm8
, rot_amount
);
5131 ARM_CMP_REG_IMM (code
, ARMREG_IP
, 0, 0);
5133 ARM_B_COND (code
, ARMCOND_NE
, 0);
5135 /* Uninitialized case */
5136 g_assert (ins
->sreg1
== ARMREG_R0
);
5138 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
5139 (gpointer
)"mono_generic_class_init");
5140 code
= emit_call_seq (cfg
, code
);
5142 /* Initialized case */
5143 arm_patch (jump
, code
);
5147 /* round the size to 8 bytes */
5148 ARM_ADD_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, (MONO_ARCH_FRAME_ALIGNMENT
- 1));
5149 ARM_BIC_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, (MONO_ARCH_FRAME_ALIGNMENT
- 1));
5150 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ins
->dreg
);
5151 /* memzero the area: dreg holds the size, sp is the pointer */
5152 if (ins
->flags
& MONO_INST_INIT
) {
5153 guint8
*start_loop
, *branch_to_cond
;
5154 ARM_MOV_REG_IMM8 (code
, ARMREG_LR
, 0);
5155 branch_to_cond
= code
;
5158 ARM_STR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ins
->dreg
);
5159 arm_patch (branch_to_cond
, code
);
5160 /* decrement by 4 and set flags */
5161 ARM_SUBS_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, sizeof (mgreg_t
));
5162 ARM_B_COND (code
, ARMCOND_GE
, 0);
5163 arm_patch (code
- 4, start_loop
);
5165 ARM_MOV_REG_REG (code
, ins
->dreg
, ARMREG_SP
);
5166 if (cfg
->param_area
)
5167 code
= emit_sub_imm (code
, ARMREG_SP
, ARMREG_SP
, ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
));
5172 MonoInst
*var
= cfg
->dyn_call_var
;
5175 g_assert (var
->opcode
== OP_REGOFFSET
);
5176 g_assert (arm_is_imm12 (var
->inst_offset
));
5178 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
5179 ARM_MOV_REG_REG (code
, ARMREG_LR
, ins
->sreg1
);
5181 ARM_MOV_REG_REG (code
, ARMREG_IP
, ins
->sreg2
);
5183 /* Save args buffer */
5184 ARM_STR_IMM (code
, ARMREG_LR
, var
->inst_basereg
, var
->inst_offset
);
5186 /* Set stack slots using R0 as scratch reg */
5187 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
5188 for (i
= 0; i
< DYN_CALL_STACK_ARGS
; ++i
) {
5189 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_LR
, (PARAM_REGS
+ i
) * sizeof (mgreg_t
));
5190 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_SP
, i
* sizeof (mgreg_t
));
5193 /* Set fp argument registers */
5194 if (IS_HARD_FLOAT
) {
5195 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, has_fpregs
));
5196 ARM_CMP_REG_IMM (code
, ARMREG_R0
, 0, 0);
5198 ARM_B_COND (code
, ARMCOND_EQ
, 0);
5199 for (i
= 0; i
< FP_PARAM_REGS
; ++i
) {
5200 int offset
= MONO_STRUCT_OFFSET (DynCallArgs
, fpregs
) + (i
* sizeof (double));
5201 g_assert (arm_is_fpimm8 (offset
));
5202 ARM_FLDD (code
, i
* 2, ARMREG_LR
, offset
);
5204 arm_patch (buf
[0], code
);
5207 /* Set argument registers */
5208 for (i
= 0; i
< PARAM_REGS
; ++i
)
5209 ARM_LDR_IMM (code
, i
, ARMREG_LR
, i
* sizeof (mgreg_t
));
5212 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
5213 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
5216 ARM_LDR_IMM (code
, ARMREG_IP
, var
->inst_basereg
, var
->inst_offset
);
5217 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_IP
, MONO_STRUCT_OFFSET (DynCallArgs
, res
));
5218 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_IP
, MONO_STRUCT_OFFSET (DynCallArgs
, res2
));
5220 ARM_FSTD (code
, ARM_VFP_D0
, ARMREG_IP
, MONO_STRUCT_OFFSET (DynCallArgs
, fpregs
));
5224 if (ins
->sreg1
!= ARMREG_R0
)
5225 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
5226 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
5227 (gpointer
)"mono_arch_throw_exception");
5228 code
= emit_call_seq (cfg
, code
);
5232 if (ins
->sreg1
!= ARMREG_R0
)
5233 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
5234 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
5235 (gpointer
)"mono_arch_rethrow_exception");
5236 code
= emit_call_seq (cfg
, code
);
5239 case OP_START_HANDLER
: {
5240 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
5241 int param_area
= ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
);
5244 /* Reserve a param area, see filter-stack.exe */
5246 if ((i
= mono_arm_is_rotated_imm8 (param_area
, &rot_amount
)) >= 0) {
5247 ARM_SUB_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
5249 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, param_area
);
5250 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
5254 if (arm_is_imm12 (spvar
->inst_offset
)) {
5255 ARM_STR_IMM (code
, ARMREG_LR
, spvar
->inst_basereg
, spvar
->inst_offset
);
5257 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
5258 ARM_STR_REG_REG (code
, ARMREG_LR
, spvar
->inst_basereg
, ARMREG_IP
);
5262 case OP_ENDFILTER
: {
5263 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
5264 int param_area
= ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
);
5267 /* Free the param area */
5269 if ((i
= mono_arm_is_rotated_imm8 (param_area
, &rot_amount
)) >= 0) {
5270 ARM_ADD_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
5272 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, param_area
);
5273 ARM_ADD_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
5277 if (ins
->sreg1
!= ARMREG_R0
)
5278 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
5279 if (arm_is_imm12 (spvar
->inst_offset
)) {
5280 ARM_LDR_IMM (code
, ARMREG_IP
, spvar
->inst_basereg
, spvar
->inst_offset
);
5282 g_assert (ARMREG_IP
!= spvar
->inst_basereg
);
5283 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
5284 ARM_LDR_REG_REG (code
, ARMREG_IP
, spvar
->inst_basereg
, ARMREG_IP
);
5286 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
5289 case OP_ENDFINALLY
: {
5290 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
5291 int param_area
= ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
);
5294 /* Free the param area */
5296 if ((i
= mono_arm_is_rotated_imm8 (param_area
, &rot_amount
)) >= 0) {
5297 ARM_ADD_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
5299 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, param_area
);
5300 ARM_ADD_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
5304 if (arm_is_imm12 (spvar
->inst_offset
)) {
5305 ARM_LDR_IMM (code
, ARMREG_IP
, spvar
->inst_basereg
, spvar
->inst_offset
);
5307 g_assert (ARMREG_IP
!= spvar
->inst_basereg
);
5308 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
5309 ARM_LDR_REG_REG (code
, ARMREG_IP
, spvar
->inst_basereg
, ARMREG_IP
);
5311 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
5314 case OP_CALL_HANDLER
:
5315 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
5316 code
= mono_arm_patchable_bl (code
, ARMCOND_AL
);
5317 cfg
->thunk_area
+= THUNK_SIZE
;
5318 mono_cfg_add_try_hole (cfg
, ins
->inst_eh_block
, code
, bb
);
5321 if (ins
->dreg
!= ARMREG_R0
)
5322 ARM_MOV_REG_REG (code
, ins
->dreg
, ARMREG_R0
);
5326 ins
->inst_c0
= code
- cfg
->native_code
;
5329 /*if (ins->inst_target_bb->native_offset) {
5331 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5333 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
5334 code
= mono_arm_patchable_b (code
, ARMCOND_AL
);
5338 ARM_MOV_REG_REG (code
, ARMREG_PC
, ins
->sreg1
);
5342 * In the normal case we have:
5343 * ldr pc, [pc, ins->sreg1 << 2]
5346 * ldr lr, [pc, ins->sreg1 << 2]
5348 * After follows the data.
5349 * FIXME: add aot support.
5351 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_SWITCH
, ins
->inst_p0
);
5352 max_len
+= 4 * GPOINTER_TO_INT (ins
->klass
);
5353 if (offset
+ max_len
> (cfg
->code_size
- 16)) {
5354 cfg
->code_size
+= max_len
;
5355 cfg
->code_size
*= 2;
5356 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
5357 code
= cfg
->native_code
+ offset
;
5359 ARM_LDR_REG_REG_SHIFT (code
, ARMREG_PC
, ARMREG_PC
, ins
->sreg1
, ARMSHIFT_LSL
, 2);
5361 code
+= 4 * GPOINTER_TO_INT (ins
->klass
);
5365 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
5366 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
5370 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5371 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_LT
);
5375 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5376 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_LO
);
5380 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5381 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_GT
);
5385 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5386 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_HI
);
5389 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_NE
);
5390 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_EQ
);
5393 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5394 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_LT
);
5397 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5398 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_GT
);
5401 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5402 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_LO
);
5405 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5406 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_HI
);
5408 case OP_COND_EXC_EQ
:
5409 case OP_COND_EXC_NE_UN
:
5410 case OP_COND_EXC_LT
:
5411 case OP_COND_EXC_LT_UN
:
5412 case OP_COND_EXC_GT
:
5413 case OP_COND_EXC_GT_UN
:
5414 case OP_COND_EXC_GE
:
5415 case OP_COND_EXC_GE_UN
:
5416 case OP_COND_EXC_LE
:
5417 case OP_COND_EXC_LE_UN
:
5418 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_EQ
, ins
->inst_p1
);
5420 case OP_COND_EXC_IEQ
:
5421 case OP_COND_EXC_INE_UN
:
5422 case OP_COND_EXC_ILT
:
5423 case OP_COND_EXC_ILT_UN
:
5424 case OP_COND_EXC_IGT
:
5425 case OP_COND_EXC_IGT_UN
:
5426 case OP_COND_EXC_IGE
:
5427 case OP_COND_EXC_IGE_UN
:
5428 case OP_COND_EXC_ILE
:
5429 case OP_COND_EXC_ILE_UN
:
5430 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_IEQ
, ins
->inst_p1
);
5433 case OP_COND_EXC_IC
:
5434 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS
, ins
->inst_p1
);
5436 case OP_COND_EXC_OV
:
5437 case OP_COND_EXC_IOV
:
5438 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS
, ins
->inst_p1
);
5440 case OP_COND_EXC_NC
:
5441 case OP_COND_EXC_INC
:
5442 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC
, ins
->inst_p1
);
5444 case OP_COND_EXC_NO
:
5445 case OP_COND_EXC_INO
:
5446 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC
, ins
->inst_p1
);
5458 EMIT_COND_BRANCH (ins
, ins
->opcode
- OP_IBEQ
);
5461 /* floating point opcodes */
5463 if (cfg
->compile_aot
) {
5464 ARM_FLDD (code
, ins
->dreg
, ARMREG_PC
, 0);
5466 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
5468 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[1];
5471 /* FIXME: we can optimize the imm load by dealing with part of
5472 * the displacement in LDFD (aligning to 512).
5474 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)ins
->inst_p0
);
5475 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
5479 if (cfg
->compile_aot
) {
5480 ARM_FLDS (code
, ins
->dreg
, ARMREG_PC
, 0);
5482 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
5485 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
5487 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)ins
->inst_p0
);
5488 ARM_FLDS (code
, ins
->dreg
, ARMREG_LR
, 0);
5490 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
5493 case OP_STORER8_MEMBASE_REG
:
5494 /* This is generated by the local regalloc pass which runs after the lowering pass */
5495 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
5496 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
5497 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_destbasereg
);
5498 ARM_FSTD (code
, ins
->sreg1
, ARMREG_LR
, 0);
5500 ARM_FSTD (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
5503 case OP_LOADR8_MEMBASE
:
5504 /* This is generated by the local regalloc pass which runs after the lowering pass */
5505 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
5506 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
5507 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_basereg
);
5508 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
5510 ARM_FLDD (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
5513 case OP_STORER4_MEMBASE_REG
:
5514 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
5516 ARM_FSTS (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
5518 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5519 ARM_CVTD (code
, vfp_scratch1
, ins
->sreg1
);
5520 ARM_FSTS (code
, vfp_scratch1
, ins
->inst_destbasereg
, ins
->inst_offset
);
5521 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5524 case OP_LOADR4_MEMBASE
:
5526 ARM_FLDS (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
5528 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
5529 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5530 ARM_FLDS (code
, vfp_scratch1
, ins
->inst_basereg
, ins
->inst_offset
);
5531 ARM_CVTS (code
, ins
->dreg
, vfp_scratch1
);
5532 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5535 case OP_ICONV_TO_R_UN
: {
5536 g_assert_not_reached ();
5539 case OP_ICONV_TO_R4
:
5541 ARM_FMSR (code
, ins
->dreg
, ins
->sreg1
);
5542 ARM_FSITOS (code
, ins
->dreg
, ins
->dreg
);
5544 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5545 ARM_FMSR (code
, vfp_scratch1
, ins
->sreg1
);
5546 ARM_FSITOS (code
, vfp_scratch1
, vfp_scratch1
);
5547 ARM_CVTS (code
, ins
->dreg
, vfp_scratch1
);
5548 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5551 case OP_ICONV_TO_R8
:
5552 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5553 ARM_FMSR (code
, vfp_scratch1
, ins
->sreg1
);
5554 ARM_FSITOD (code
, ins
->dreg
, vfp_scratch1
);
5555 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5559 MonoType
*sig_ret
= mini_get_underlying_type (mono_method_signature (cfg
->method
)->ret
);
5560 if (sig_ret
->type
== MONO_TYPE_R4
) {
5562 if (IS_HARD_FLOAT
) {
5563 if (ins
->sreg1
!= ARM_VFP_D0
)
5564 ARM_CPYS (code
, ARM_VFP_D0
, ins
->sreg1
);
5566 ARM_FMRS (code
, ARMREG_R0
, ins
->sreg1
);
5569 ARM_CVTD (code
, ARM_VFP_F0
, ins
->sreg1
);
5572 ARM_FMRS (code
, ARMREG_R0
, ARM_VFP_F0
);
5576 ARM_CPYD (code
, ARM_VFP_D0
, ins
->sreg1
);
5578 ARM_FMRRD (code
, ARMREG_R0
, ARMREG_R1
, ins
->sreg1
);
5582 case OP_FCONV_TO_I1
:
5583 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, TRUE
);
5585 case OP_FCONV_TO_U1
:
5586 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, FALSE
);
5588 case OP_FCONV_TO_I2
:
5589 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, TRUE
);
5591 case OP_FCONV_TO_U2
:
5592 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, FALSE
);
5594 case OP_FCONV_TO_I4
:
5596 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, TRUE
);
5598 case OP_FCONV_TO_U4
:
5600 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, FALSE
);
5602 case OP_FCONV_TO_I8
:
5603 case OP_FCONV_TO_U8
:
5604 g_assert_not_reached ();
5605 /* Implemented as helper calls */
5607 case OP_LCONV_TO_R_UN
:
5608 g_assert_not_reached ();
5609 /* Implemented as helper calls */
5611 case OP_LCONV_TO_OVF_I4_2
: {
5612 guint8
*high_bit_not_set
, *valid_negative
, *invalid_negative
, *valid_positive
;
5614 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5617 ARM_CMP_REG_IMM8 (code
, ins
->sreg1
, 0);
5618 high_bit_not_set
= code
;
5619 ARM_B_COND (code
, ARMCOND_GE
, 0); /*branch if bit 31 of the lower part is not set*/
5621 ARM_CMN_REG_IMM8 (code
, ins
->sreg2
, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5622 valid_negative
= code
;
5623 ARM_B_COND (code
, ARMCOND_EQ
, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5624 invalid_negative
= code
;
5625 ARM_B_COND (code
, ARMCOND_AL
, 0);
5627 arm_patch (high_bit_not_set
, code
);
5629 ARM_CMP_REG_IMM8 (code
, ins
->sreg2
, 0);
5630 valid_positive
= code
;
5631 ARM_B_COND (code
, ARMCOND_EQ
, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5633 arm_patch (invalid_negative
, code
);
5634 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL
, "OverflowException");
5636 arm_patch (valid_negative
, code
);
5637 arm_patch (valid_positive
, code
);
5639 if (ins
->dreg
!= ins
->sreg1
)
5640 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
5644 ARM_VFP_ADDD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5647 ARM_VFP_SUBD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5650 ARM_VFP_MULD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5653 ARM_VFP_DIVD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5656 ARM_NEGD (code
, ins
->dreg
, ins
->sreg1
);
5660 g_assert_not_reached ();
5664 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5670 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5675 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5678 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
5679 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
5683 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5686 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5687 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5691 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5694 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5695 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5696 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5700 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
5703 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5704 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5708 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
5711 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5712 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5713 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5717 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5720 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_NE
);
5721 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_EQ
);
5725 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5728 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5729 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
5733 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
5736 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5737 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
5740 /* ARM FPA flags table:
5741 * N Less than ARMCOND_MI
5742 * Z Equal ARMCOND_EQ
5743 * C Greater Than or Equal ARMCOND_CS
5744 * V Unordered ARMCOND_VS
5747 EMIT_COND_BRANCH (ins
, OP_IBEQ
- OP_IBEQ
);
5750 EMIT_COND_BRANCH (ins
, OP_IBNE_UN
- OP_IBEQ
);
5753 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_MI
); /* N set */
5756 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_VS
); /* V set */
5757 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_MI
); /* N set */
5763 g_assert_not_reached ();
5767 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_GE
);
5769 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5770 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_EQ
);
5771 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_CS
);
5775 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_VS
); /* V set */
5776 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_GE
);
5781 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5782 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch2
);
5784 ARM_ABSD (code
, vfp_scratch2
, ins
->sreg1
);
5785 ARM_FLDD (code
, vfp_scratch1
, ARMREG_PC
, 0);
5787 *(guint32
*)code
= 0xffffffff;
5789 *(guint32
*)code
= 0x7fefffff;
5791 ARM_CMPD (code
, vfp_scratch2
, vfp_scratch1
);
5793 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT
, "OverflowException");
5794 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg1
);
5796 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS
, "OverflowException");
5797 ARM_CPYD (code
, ins
->dreg
, ins
->sreg1
);
5799 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5800 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch2
);
5805 case OP_RCONV_TO_I1
:
5806 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, TRUE
);
5808 case OP_RCONV_TO_U1
:
5809 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, FALSE
);
5811 case OP_RCONV_TO_I2
:
5812 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, TRUE
);
5814 case OP_RCONV_TO_U2
:
5815 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, FALSE
);
5817 case OP_RCONV_TO_I4
:
5818 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, TRUE
);
5820 case OP_RCONV_TO_U4
:
5821 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, FALSE
);
5823 case OP_RCONV_TO_R4
:
5825 if (ins
->dreg
!= ins
->sreg1
)
5826 ARM_CPYS (code
, ins
->dreg
, ins
->sreg1
);
5828 case OP_RCONV_TO_R8
:
5830 ARM_CVTS (code
, ins
->dreg
, ins
->sreg1
);
5833 ARM_VFP_ADDS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5836 ARM_VFP_SUBS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5839 ARM_VFP_MULS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5842 ARM_VFP_DIVS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5845 ARM_NEGS (code
, ins
->dreg
, ins
->sreg1
);
5849 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5852 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
5853 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
5857 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5860 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5861 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5865 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5868 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5869 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5870 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5874 ARM_CMPS (code
, ins
->sreg2
, ins
->sreg1
);
5877 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5878 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5882 ARM_CMPS (code
, ins
->sreg2
, ins
->sreg1
);
5885 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5886 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5887 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5891 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5894 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_NE
);
5895 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_EQ
);
5899 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5902 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5903 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
5907 ARM_CMPS (code
, ins
->sreg2
, ins
->sreg1
);
5910 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5911 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
5914 case OP_GC_LIVENESS_DEF
:
5915 case OP_GC_LIVENESS_USE
:
5916 case OP_GC_PARAM_SLOT_LIVENESS_DEF
:
5917 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5919 case OP_GC_SPILL_SLOT_LIVENESS_DEF
:
5920 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5921 bb
->spill_slot_defs
= g_slist_prepend_mempool (cfg
->mempool
, bb
->spill_slot_defs
, ins
);
5923 case OP_GC_SAFE_POINT
: {
5926 g_assert (mono_threads_is_coop_enabled ());
5928 ARM_LDR_IMM (code
, ARMREG_IP
, ins
->sreg1
, 0);
5929 ARM_CMP_REG_IMM (code
, ARMREG_IP
, 0, 0);
5931 ARM_B_COND (code
, ARMCOND_EQ
, 0);
5932 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
, "mono_threads_state_poll");
5933 code
= emit_call_seq (cfg
, code
);
5934 arm_patch (buf
[0], code
);
5939 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
5940 g_assert_not_reached ();
5943 if ((cfg
->opt
& MONO_OPT_BRANCH
) && ((code
- cfg
->native_code
- offset
) > max_len
)) {
5944 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
5945 mono_inst_name (ins
->opcode
), max_len
, code
- cfg
->native_code
- offset
);
5946 g_assert_not_reached ();
5952 last_offset
= offset
;
5955 cfg
->code_len
= code
- cfg
->native_code
;
5958 #endif /* DISABLE_JIT */
5961 mono_arch_register_lowlevel_calls (void)
5963 /* The signature doesn't matter */
5964 mono_register_jit_icall (mono_arm_throw_exception
, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE
);
5965 mono_register_jit_icall (mono_arm_throw_exception_by_token
, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE
);
5966 mono_register_jit_icall (mono_arm_unaligned_stack
, "mono_arm_unaligned_stack", mono_create_icall_signature ("void"), TRUE
);
5968 #ifndef MONO_CROSS_COMPILE
5969 if (mono_arm_have_tls_get ()) {
5970 MonoTlsImplementation tls_imp
= mono_arm_get_tls_implementation ();
5972 mono_register_jit_icall (tls_imp
.get_tls_thunk
, "mono_get_tls_key", mono_create_icall_signature ("ptr ptr"), TRUE
);
5973 mono_register_jit_icall (tls_imp
.set_tls_thunk
, "mono_set_tls_key", mono_create_icall_signature ("void ptr ptr"), TRUE
);
5975 if (tls_imp
.get_tls_thunk_end
) {
5976 mono_tramp_info_register (
5977 mono_tramp_info_create (
5979 (guint8
*)tls_imp
.get_tls_thunk
,
5980 (guint8
*)tls_imp
.get_tls_thunk_end
- (guint8
*)tls_imp
.get_tls_thunk
,
5982 mono_arch_get_cie_program ()
5986 mono_tramp_info_register (
5987 mono_tramp_info_create (
5989 (guint8
*)tls_imp
.set_tls_thunk
,
5990 (guint8
*)tls_imp
.set_tls_thunk_end
- (guint8
*)tls_imp
.set_tls_thunk
,
5992 mono_arch_get_cie_program ()
6001 #define patch_lis_ori(ip,val) do {\
6002 guint16 *__lis_ori = (guint16*)(ip); \
6003 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
6004 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
6008 mono_arch_patch_code_new (MonoCompile
*cfg
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gpointer target
)
6010 unsigned char *ip
= ji
->ip
.i
+ code
;
6012 if (ji
->type
== MONO_PATCH_INFO_SWITCH
) {
6016 case MONO_PATCH_INFO_SWITCH
: {
6017 gpointer
*jt
= (gpointer
*)(ip
+ 8);
6019 /* jt is the inlined jump table, 2 instructions after ip
6020 * In the normal case we store the absolute addresses,
6021 * otherwise the displacements.
6023 for (i
= 0; i
< ji
->data
.table
->table_size
; i
++)
6024 jt
[i
] = code
+ (int)ji
->data
.table
->table
[i
];
6027 case MONO_PATCH_INFO_IP
:
6028 g_assert_not_reached ();
6029 patch_lis_ori (ip
, ip
);
6031 case MONO_PATCH_INFO_METHOD_REL
:
6032 g_assert_not_reached ();
6033 *((gpointer
*)(ip
)) = target
;
6035 case MONO_PATCH_INFO_METHODCONST
:
6036 case MONO_PATCH_INFO_CLASS
:
6037 case MONO_PATCH_INFO_IMAGE
:
6038 case MONO_PATCH_INFO_FIELD
:
6039 case MONO_PATCH_INFO_VTABLE
:
6040 case MONO_PATCH_INFO_IID
:
6041 case MONO_PATCH_INFO_SFLDA
:
6042 case MONO_PATCH_INFO_LDSTR
:
6043 case MONO_PATCH_INFO_TYPE_FROM_HANDLE
:
6044 case MONO_PATCH_INFO_LDTOKEN
:
6045 g_assert_not_reached ();
6046 /* from OP_AOTCONST : lis + ori */
6047 patch_lis_ori (ip
, target
);
6049 case MONO_PATCH_INFO_R4
:
6050 case MONO_PATCH_INFO_R8
:
6051 g_assert_not_reached ();
6052 *((gconstpointer
*)(ip
+ 2)) = target
;
6054 case MONO_PATCH_INFO_EXC_NAME
:
6055 g_assert_not_reached ();
6056 *((gconstpointer
*)(ip
+ 1)) = target
;
6058 case MONO_PATCH_INFO_NONE
:
6059 case MONO_PATCH_INFO_BB_OVF
:
6060 case MONO_PATCH_INFO_EXC_OVF
:
6061 /* everything is dealt with at epilog output time */
6064 arm_patch_general (cfg
, domain
, ip
, target
);
6070 mono_arm_unaligned_stack (MonoMethod
*method
)
6072 g_assert_not_reached ();
6078 * Stack frame layout:
6080 * ------------------- fp
6081 * MonoLMF structure or saved registers
6082 * -------------------
6084 * -------------------
6086 * -------------------
6087 * optional 8 bytes for tracing
6088 * -------------------
6089 * param area size is cfg->param_area
6090 * ------------------- sp
6093 mono_arch_emit_prolog (MonoCompile
*cfg
)
6095 MonoMethod
*method
= cfg
->method
;
6097 MonoMethodSignature
*sig
;
6099 int alloc_size
, orig_alloc_size
, pos
, max_offset
, i
, rot_amount
, part
;
6104 int prev_sp_offset
, reg_offset
;
6106 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
6109 sig
= mono_method_signature (method
);
6110 cfg
->code_size
= 256 + sig
->param_count
* 64;
6111 code
= cfg
->native_code
= g_malloc (cfg
->code_size
);
6113 mono_emit_unwind_op_def_cfa (cfg
, code
, ARMREG_SP
, 0);
6115 alloc_size
= cfg
->stack_offset
;
6121 * The iphone uses R7 as the frame pointer, and it points at the saved
6126 * We can't use r7 as a frame pointer since it points into the middle of
6127 * the frame, so we keep using our own frame pointer.
6128 * FIXME: Optimize this.
6130 ARM_PUSH (code
, (1 << ARMREG_R7
) | (1 << ARMREG_LR
));
6131 prev_sp_offset
+= 8; /* r7 and lr */
6132 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
6133 mono_emit_unwind_op_offset (cfg
, code
, ARMREG_R7
, (- prev_sp_offset
) + 0);
6134 ARM_MOV_REG_REG (code
, ARMREG_R7
, ARMREG_SP
);
6137 if (!method
->save_lmf
) {
6139 /* No need to push LR again */
6140 if (cfg
->used_int_regs
)
6141 ARM_PUSH (code
, cfg
->used_int_regs
);
6143 ARM_PUSH (code
, cfg
->used_int_regs
| (1 << ARMREG_LR
));
6144 prev_sp_offset
+= 4;
6146 for (i
= 0; i
< 16; ++i
) {
6147 if (cfg
->used_int_regs
& (1 << i
))
6148 prev_sp_offset
+= 4;
6150 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
6152 for (i
= 0; i
< 16; ++i
) {
6153 if ((cfg
->used_int_regs
& (1 << i
))) {
6154 mono_emit_unwind_op_offset (cfg
, code
, i
, (- prev_sp_offset
) + reg_offset
);
6155 mini_gc_set_slot_type_from_cfa (cfg
, (- prev_sp_offset
) + reg_offset
, SLOT_NOREF
);
6159 mono_emit_unwind_op_offset (cfg
, code
, ARMREG_LR
, -4);
6160 mini_gc_set_slot_type_from_cfa (cfg
, -4, SLOT_NOREF
);
6162 ARM_MOV_REG_REG (code
, ARMREG_IP
, ARMREG_SP
);
6163 ARM_PUSH (code
, 0x5ff0);
6164 prev_sp_offset
+= 4 * 10; /* all but r0-r3, sp and pc */
6165 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
6167 for (i
= 0; i
< 16; ++i
) {
6168 if ((i
> ARMREG_R3
) && (i
!= ARMREG_SP
) && (i
!= ARMREG_PC
)) {
6169 /* The original r7 is saved at the start */
6170 if (!(iphone_abi
&& i
== ARMREG_R7
))
6171 mono_emit_unwind_op_offset (cfg
, code
, i
, (- prev_sp_offset
) + reg_offset
);
6175 g_assert (reg_offset
== 4 * 10);
6176 pos
+= sizeof (MonoLMF
) - (4 * 10);
6180 orig_alloc_size
= alloc_size
;
6181 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
6182 if (alloc_size
& (MONO_ARCH_FRAME_ALIGNMENT
- 1)) {
6183 alloc_size
+= MONO_ARCH_FRAME_ALIGNMENT
- 1;
6184 alloc_size
&= ~(MONO_ARCH_FRAME_ALIGNMENT
- 1);
6187 /* the stack used in the pushed regs */
6188 alloc_size
+= ALIGN_TO (prev_sp_offset
, MONO_ARCH_FRAME_ALIGNMENT
) - prev_sp_offset
;
6189 cfg
->stack_usage
= alloc_size
;
6191 if ((i
= mono_arm_is_rotated_imm8 (alloc_size
, &rot_amount
)) >= 0) {
6192 ARM_SUB_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
6194 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, alloc_size
);
6195 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
6197 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
+ alloc_size
);
6199 if (cfg
->frame_reg
!= ARMREG_SP
) {
6200 ARM_MOV_REG_REG (code
, cfg
->frame_reg
, ARMREG_SP
);
6201 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, cfg
->frame_reg
);
6203 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
6204 prev_sp_offset
+= alloc_size
;
6206 for (i
= 0; i
< alloc_size
- orig_alloc_size
; i
+= 4)
6207 mini_gc_set_slot_type_from_cfa (cfg
, (- prev_sp_offset
) + orig_alloc_size
+ i
, SLOT_NOREF
);
6209 /* compute max_offset in order to use short forward jumps
6210 * we could skip do it on arm because the immediate displacement
6211 * for jumps is large enough, it may be useful later for constant pools
6214 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
6215 MonoInst
*ins
= bb
->code
;
6216 bb
->max_offset
= max_offset
;
6218 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
)
6221 MONO_BB_FOR_EACH_INS (bb
, ins
)
6222 max_offset
+= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
6225 /* stack alignment check */
6229 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_SP);
6230 code = mono_arm_emit_load_imm (code, ARMREG_IP, MONO_ARCH_FRAME_ALIGNMENT -1);
6231 ARM_AND_REG_REG (code, ARMREG_LR, ARMREG_LR, ARMREG_IP);
6232 ARM_CMP_REG_IMM (code, ARMREG_LR, 0, 0);
6234 ARM_B_COND (code, ARMCOND_EQ, 0);
6235 if (cfg->compile_aot)
6236 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
6238 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
6239 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD, "mono_arm_unaligned_stack");
6240 code = emit_call_seq (cfg, code);
6241 arm_patch (buf [0], code);
6245 /* store runtime generic context */
6246 if (cfg
->rgctx_var
) {
6247 MonoInst
*ins
= cfg
->rgctx_var
;
6249 g_assert (ins
->opcode
== OP_REGOFFSET
);
6251 if (arm_is_imm12 (ins
->inst_offset
)) {
6252 ARM_STR_IMM (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ins
->inst_offset
);
6254 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
6255 ARM_STR_REG_REG (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ARMREG_LR
);
6259 /* load arguments allocated to register from the stack */
6262 cinfo
= get_call_info (NULL
, sig
);
6264 if (cinfo
->ret
.storage
== RegTypeStructByAddr
) {
6265 ArgInfo
*ainfo
= &cinfo
->ret
;
6266 inst
= cfg
->vret_addr
;
6267 g_assert (arm_is_imm12 (inst
->inst_offset
));
6268 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6271 if (sig
->call_convention
== MONO_CALL_VARARG
) {
6272 ArgInfo
*cookie
= &cinfo
->sig_cookie
;
6274 /* Save the sig cookie address */
6275 g_assert (cookie
->storage
== RegTypeBase
);
6277 g_assert (arm_is_imm12 (prev_sp_offset
+ cookie
->offset
));
6278 g_assert (arm_is_imm12 (cfg
->sig_cookie
));
6279 ARM_ADD_REG_IMM8 (code
, ARMREG_IP
, cfg
->frame_reg
, prev_sp_offset
+ cookie
->offset
);
6280 ARM_STR_IMM (code
, ARMREG_IP
, cfg
->frame_reg
, cfg
->sig_cookie
);
6283 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
6284 ArgInfo
*ainfo
= cinfo
->args
+ i
;
6285 inst
= cfg
->args
[pos
];
6287 if (cfg
->verbose_level
> 2)
6288 g_print ("Saving argument %d (type: %d)\n", i
, ainfo
->storage
);
6290 if (inst
->opcode
== OP_REGVAR
) {
6291 if (ainfo
->storage
== RegTypeGeneral
)
6292 ARM_MOV_REG_REG (code
, inst
->dreg
, ainfo
->reg
);
6293 else if (ainfo
->storage
== RegTypeFP
) {
6294 g_assert_not_reached ();
6295 } else if (ainfo
->storage
== RegTypeBase
) {
6296 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
6297 ARM_LDR_IMM (code
, inst
->dreg
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
6299 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
);
6300 ARM_LDR_REG_REG (code
, inst
->dreg
, ARMREG_SP
, ARMREG_IP
);
6303 g_assert_not_reached ();
6305 if (cfg
->verbose_level
> 2)
6306 g_print ("Argument %d assigned to register %s\n", pos
, mono_arch_regname (inst
->dreg
));
6308 switch (ainfo
->storage
) {
6310 for (part
= 0; part
< ainfo
->nregs
; part
++) {
6311 if (ainfo
->esize
== 4)
6312 ARM_FSTS (code
, ainfo
->reg
+ part
, inst
->inst_basereg
, inst
->inst_offset
+ (part
* ainfo
->esize
));
6314 ARM_FSTD (code
, ainfo
->reg
+ (part
* 2), inst
->inst_basereg
, inst
->inst_offset
+ (part
* ainfo
->esize
));
6317 case RegTypeGeneral
:
6318 case RegTypeIRegPair
:
6319 case RegTypeGSharedVtInReg
:
6320 case RegTypeStructByAddr
:
6321 switch (ainfo
->size
) {
6323 if (arm_is_imm12 (inst
->inst_offset
))
6324 ARM_STRB_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6326 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6327 ARM_STRB_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6331 if (arm_is_imm8 (inst
->inst_offset
)) {
6332 ARM_STRH_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6334 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6335 ARM_STRH_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6339 if (arm_is_imm12 (inst
->inst_offset
)) {
6340 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6342 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6343 ARM_STR_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6345 if (arm_is_imm12 (inst
->inst_offset
+ 4)) {
6346 ARM_STR_IMM (code
, ainfo
->reg
+ 1, inst
->inst_basereg
, inst
->inst_offset
+ 4);
6348 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
+ 4);
6349 ARM_STR_REG_REG (code
, ainfo
->reg
+ 1, inst
->inst_basereg
, ARMREG_IP
);
6353 if (arm_is_imm12 (inst
->inst_offset
)) {
6354 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6356 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6357 ARM_STR_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6362 case RegTypeBaseGen
:
6363 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
6364 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
6366 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
);
6367 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
6369 if (arm_is_imm12 (inst
->inst_offset
+ 4)) {
6370 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
6371 ARM_STR_IMM (code
, ARMREG_R3
, inst
->inst_basereg
, inst
->inst_offset
);
6373 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
+ 4);
6374 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6375 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6376 ARM_STR_REG_REG (code
, ARMREG_R3
, inst
->inst_basereg
, ARMREG_IP
);
6380 case RegTypeGSharedVtOnStack
:
6381 case RegTypeStructByAddrOnStack
:
6382 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
6383 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
6385 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
);
6386 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
6389 switch (ainfo
->size
) {
6391 if (arm_is_imm8 (inst
->inst_offset
)) {
6392 ARM_STRB_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6394 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6395 ARM_STRB_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6399 if (arm_is_imm8 (inst
->inst_offset
)) {
6400 ARM_STRH_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6402 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6403 ARM_STRH_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6407 if (arm_is_imm12 (inst
->inst_offset
)) {
6408 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6410 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6411 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6413 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
+ 4)) {
6414 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
+ 4));
6416 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
+ 4);
6417 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
6419 if (arm_is_imm12 (inst
->inst_offset
+ 4)) {
6420 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
6422 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
+ 4);
6423 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6427 if (arm_is_imm12 (inst
->inst_offset
)) {
6428 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6430 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6431 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6437 int imm8
, rot_amount
;
6439 if ((imm8
= mono_arm_is_rotated_imm8 (inst
->inst_offset
, &rot_amount
)) == -1) {
6440 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6441 ARM_ADD_REG_REG (code
, ARMREG_IP
, ARMREG_IP
, inst
->inst_basereg
);
6443 ARM_ADD_REG_IMM (code
, ARMREG_IP
, inst
->inst_basereg
, imm8
, rot_amount
);
6445 if (ainfo
->size
== 8)
6446 ARM_FSTD (code
, ainfo
->reg
, ARMREG_IP
, 0);
6448 ARM_FSTS (code
, ainfo
->reg
, ARMREG_IP
, 0);
6451 case RegTypeStructByVal
: {
6452 int doffset
= inst
->inst_offset
;
6456 size
= mini_type_stack_size_full (inst
->inst_vtype
, NULL
, sig
->pinvoke
);
6457 for (cur_reg
= 0; cur_reg
< ainfo
->size
; ++cur_reg
) {
6458 if (arm_is_imm12 (doffset
)) {
6459 ARM_STR_IMM (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, doffset
);
6461 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, doffset
);
6462 ARM_STR_REG_REG (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, ARMREG_IP
);
6464 soffset
+= sizeof (gpointer
);
6465 doffset
+= sizeof (gpointer
);
6467 if (ainfo
->vtsize
) {
6468 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6469 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6470 code
= emit_memcpy (code
, ainfo
->vtsize
* sizeof (gpointer
), inst
->inst_basereg
, doffset
, ARMREG_SP
, prev_sp_offset
+ ainfo
->offset
);
6475 g_assert_not_reached ();
6482 if (method
->save_lmf
)
6483 code
= emit_save_lmf (cfg
, code
, alloc_size
- lmf_offset
);
6486 code
= mono_arch_instrument_prolog (cfg
, mono_trace_enter_method
, code
, TRUE
);
6488 if (cfg
->arch
.seq_point_info_var
) {
6489 MonoInst
*ins
= cfg
->arch
.seq_point_info_var
;
6491 /* Initialize the variable from a GOT slot */
6492 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_SEQ_POINT_INFO
, cfg
->method
);
6493 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_PC
, 0);
6495 *(gpointer
*)code
= NULL
;
6497 ARM_LDR_REG_REG (code
, ARMREG_R0
, ARMREG_PC
, ARMREG_R0
);
6499 g_assert (ins
->opcode
== OP_REGOFFSET
);
6501 if (arm_is_imm12 (ins
->inst_offset
)) {
6502 ARM_STR_IMM (code
, ARMREG_R0
, ins
->inst_basereg
, ins
->inst_offset
);
6504 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
6505 ARM_STR_REG_REG (code
, ARMREG_R0
, ins
->inst_basereg
, ARMREG_LR
);
6509 /* Initialize ss_trigger_page_var */
6510 if (!cfg
->soft_breakpoints
) {
6511 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
6512 MonoInst
*ss_trigger_page_var
= cfg
->arch
.ss_trigger_page_var
;
6513 int dreg
= ARMREG_LR
;
6516 g_assert (info_var
->opcode
== OP_REGOFFSET
);
6517 g_assert (arm_is_imm12 (info_var
->inst_offset
));
6519 ARM_LDR_IMM (code
, dreg
, info_var
->inst_basereg
, info_var
->inst_offset
);
6520 /* Load the trigger page addr */
6521 ARM_LDR_IMM (code
, dreg
, dreg
, MONO_STRUCT_OFFSET (SeqPointInfo
, ss_trigger_page
));
6522 ARM_STR_IMM (code
, dreg
, ss_trigger_page_var
->inst_basereg
, ss_trigger_page_var
->inst_offset
);
6526 if (cfg
->arch
.seq_point_ss_method_var
) {
6527 MonoInst
*ss_method_ins
= cfg
->arch
.seq_point_ss_method_var
;
6528 MonoInst
*bp_method_ins
= cfg
->arch
.seq_point_bp_method_var
;
6529 g_assert (ss_method_ins
->opcode
== OP_REGOFFSET
);
6530 g_assert (arm_is_imm12 (ss_method_ins
->inst_offset
));
6531 g_assert (bp_method_ins
->opcode
== OP_REGOFFSET
);
6532 g_assert (arm_is_imm12 (bp_method_ins
->inst_offset
));
6534 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
6536 *(gpointer
*)code
= &single_step_tramp
;
6538 *(gpointer
*)code
= breakpoint_tramp
;
6541 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_LR
, 0);
6542 ARM_STR_IMM (code
, ARMREG_IP
, ss_method_ins
->inst_basereg
, ss_method_ins
->inst_offset
);
6543 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_LR
, 4);
6544 ARM_STR_IMM (code
, ARMREG_IP
, bp_method_ins
->inst_basereg
, bp_method_ins
->inst_offset
);
6547 cfg
->code_len
= code
- cfg
->native_code
;
6548 g_assert (cfg
->code_len
< cfg
->code_size
);
6555 mono_arch_emit_epilog (MonoCompile
*cfg
)
6557 MonoMethod
*method
= cfg
->method
;
6558 int pos
, i
, rot_amount
;
6559 int max_epilog_size
= 16 + 20*4;
6563 if (cfg
->method
->save_lmf
)
6564 max_epilog_size
+= 128;
6566 if (mono_jit_trace_calls
!= NULL
)
6567 max_epilog_size
+= 50;
6569 if (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)
6570 max_epilog_size
+= 50;
6572 while (cfg
->code_len
+ max_epilog_size
> (cfg
->code_size
- 16)) {
6573 cfg
->code_size
*= 2;
6574 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
6575 cfg
->stat_code_reallocs
++;
6579 * Keep in sync with OP_JMP
6581 code
= cfg
->native_code
+ cfg
->code_len
;
6583 /* Save the uwind state which is needed by the out-of-line code */
6584 mono_emit_unwind_op_remember_state (cfg
, code
);
6586 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
)) {
6587 code
= mono_arch_instrument_epilog (cfg
, mono_trace_leave_method
, code
, TRUE
);
6591 /* Load returned vtypes into registers if needed */
6592 cinfo
= cfg
->arch
.cinfo
;
6593 switch (cinfo
->ret
.storage
) {
6594 case RegTypeStructByVal
: {
6595 MonoInst
*ins
= cfg
->ret
;
6597 if (cinfo
->ret
.nregs
== 1) {
6598 if (arm_is_imm12 (ins
->inst_offset
)) {
6599 ARM_LDR_IMM (code
, ARMREG_R0
, ins
->inst_basereg
, ins
->inst_offset
);
6601 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
6602 ARM_LDR_REG_REG (code
, ARMREG_R0
, ins
->inst_basereg
, ARMREG_LR
);
6605 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
6606 int offset
= ins
->inst_offset
+ (i
* 4);
6607 if (arm_is_imm12 (offset
)) {
6608 ARM_LDR_IMM (code
, i
, ins
->inst_basereg
, offset
);
6610 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, offset
);
6611 ARM_LDR_REG_REG (code
, i
, ins
->inst_basereg
, ARMREG_LR
);
6618 MonoInst
*ins
= cfg
->ret
;
6620 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
6621 if (cinfo
->ret
.esize
== 4)
6622 ARM_FLDS (code
, cinfo
->ret
.reg
+ i
, ins
->inst_basereg
, ins
->inst_offset
+ (i
* cinfo
->ret
.esize
));
6624 ARM_FLDD (code
, cinfo
->ret
.reg
+ (i
* 2), ins
->inst_basereg
, ins
->inst_offset
+ (i
* cinfo
->ret
.esize
));
6632 if (method
->save_lmf
) {
6633 int lmf_offset
, reg
, sp_adj
, regmask
, nused_int_regs
= 0;
6634 /* all but r0-r3, sp and pc */
6635 pos
+= sizeof (MonoLMF
) - (MONO_ARM_NUM_SAVED_REGS
* sizeof (mgreg_t
));
6638 code
= emit_restore_lmf (cfg
, code
, cfg
->stack_usage
- lmf_offset
);
6640 /* This points to r4 inside MonoLMF->iregs */
6641 sp_adj
= (sizeof (MonoLMF
) - MONO_ARM_NUM_SAVED_REGS
* sizeof (mgreg_t
));
6643 regmask
= 0x9ff0; /* restore lr to pc */
6644 /* Skip caller saved registers not used by the method */
6645 while (!(cfg
->used_int_regs
& (1 << reg
)) && reg
< ARMREG_FP
) {
6646 regmask
&= ~(1 << reg
);
6651 /* Restored later */
6652 regmask
&= ~(1 << ARMREG_PC
);
6653 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6654 code
= emit_big_add (code
, ARMREG_SP
, cfg
->frame_reg
, cfg
->stack_usage
- lmf_offset
+ sp_adj
);
6655 for (i
= 0; i
< 16; i
++) {
6656 if (regmask
& (1 << i
))
6659 mono_emit_unwind_op_def_cfa (cfg
, code
, ARMREG_SP
, ((iphone_abi
? 3 : 0) + nused_int_regs
) * 4);
6661 ARM_POP (code
, regmask
);
6663 for (i
= 0; i
< 16; i
++) {
6664 if (regmask
& (1 << i
))
6665 mono_emit_unwind_op_same_value (cfg
, code
, i
);
6667 /* Restore saved r7, restore LR to PC */
6668 /* Skip lr from the lmf */
6669 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, 3 * 4);
6670 ARM_ADD_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, sizeof (gpointer
), 0);
6671 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, 2 * 4);
6672 ARM_POP (code
, (1 << ARMREG_R7
) | (1 << ARMREG_PC
));
6675 int i
, nused_int_regs
= 0;
6677 for (i
= 0; i
< 16; i
++) {
6678 if (cfg
->used_int_regs
& (1 << i
))
6682 if ((i
= mono_arm_is_rotated_imm8 (cfg
->stack_usage
, &rot_amount
)) >= 0) {
6683 ARM_ADD_REG_IMM (code
, ARMREG_SP
, cfg
->frame_reg
, i
, rot_amount
);
6685 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, cfg
->stack_usage
);
6686 ARM_ADD_REG_REG (code
, ARMREG_SP
, cfg
->frame_reg
, ARMREG_IP
);
6689 if (cfg
->frame_reg
!= ARMREG_SP
) {
6690 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, ARMREG_SP
);
6694 /* Restore saved gregs */
6695 if (cfg
->used_int_regs
) {
6696 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, (2 + nused_int_regs
) * 4);
6697 ARM_POP (code
, cfg
->used_int_regs
);
6698 for (i
= 0; i
< 16; i
++) {
6699 if (cfg
->used_int_regs
& (1 << i
))
6700 mono_emit_unwind_op_same_value (cfg
, code
, i
);
6703 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, 2 * 4);
6704 /* Restore saved r7, restore LR to PC */
6705 ARM_POP (code
, (1 << ARMREG_R7
) | (1 << ARMREG_PC
));
6707 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, (nused_int_regs
+ 1) * 4);
6708 ARM_POP (code
, cfg
->used_int_regs
| (1 << ARMREG_PC
));
6712 /* Restore the unwind state to be the same as before the epilog */
6713 mono_emit_unwind_op_restore_state (cfg
, code
);
6715 cfg
->code_len
= code
- cfg
->native_code
;
6717 g_assert (cfg
->code_len
< cfg
->code_size
);
6722 mono_arch_emit_exceptions (MonoCompile
*cfg
)
6724 MonoJumpInfo
*patch_info
;
6727 guint8
* exc_throw_pos
[MONO_EXC_INTRINS_NUM
];
6728 guint8 exc_throw_found
[MONO_EXC_INTRINS_NUM
];
6729 int max_epilog_size
= 50;
6731 for (i
= 0; i
< MONO_EXC_INTRINS_NUM
; i
++) {
6732 exc_throw_pos
[i
] = NULL
;
6733 exc_throw_found
[i
] = 0;
6736 /* count the number of exception infos */
6739 * make sure we have enough space for exceptions
6741 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
6742 if (patch_info
->type
== MONO_PATCH_INFO_EXC
) {
6743 i
= mini_exception_id_by_name (patch_info
->data
.target
);
6744 if (!exc_throw_found
[i
]) {
6745 max_epilog_size
+= 32;
6746 exc_throw_found
[i
] = TRUE
;
6751 while (cfg
->code_len
+ max_epilog_size
> (cfg
->code_size
- 16)) {
6752 cfg
->code_size
*= 2;
6753 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
6754 cfg
->stat_code_reallocs
++;
6757 code
= cfg
->native_code
+ cfg
->code_len
;
6759 /* add code to raise exceptions */
6760 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
6761 switch (patch_info
->type
) {
6762 case MONO_PATCH_INFO_EXC
: {
6763 MonoClass
*exc_class
;
6764 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
6766 i
= mini_exception_id_by_name (patch_info
->data
.target
);
6767 if (exc_throw_pos
[i
]) {
6768 arm_patch (ip
, exc_throw_pos
[i
]);
6769 patch_info
->type
= MONO_PATCH_INFO_NONE
;
6772 exc_throw_pos
[i
] = code
;
6774 arm_patch (ip
, code
);
6776 exc_class
= mono_class_load_from_name (mono_defaults
.corlib
, "System", patch_info
->data
.name
);
6778 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_LR
);
6779 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_PC
, 0);
6780 patch_info
->type
= MONO_PATCH_INFO_INTERNAL_METHOD
;
6781 patch_info
->data
.name
= "mono_arch_throw_corlib_exception";
6782 patch_info
->ip
.i
= code
- cfg
->native_code
;
6784 cfg
->thunk_area
+= THUNK_SIZE
;
6785 *(guint32
*)(gpointer
)code
= exc_class
->type_token
- MONO_TOKEN_TYPE_DEF
;
6795 cfg
->code_len
= code
- cfg
->native_code
;
6797 g_assert (cfg
->code_len
< cfg
->code_size
);
6801 #endif /* #ifndef DISABLE_JIT */
6804 mono_arch_finish_init (void)
6809 mono_arch_free_jit_tls_data (MonoJitTlsData
*tls
)
6814 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
6821 mono_arch_print_tree (MonoInst
*tree
, int arity
)
6831 mono_arch_get_patch_offset (guint8
*code
)
6838 mono_arch_flush_register_windows (void)
6843 mono_arch_find_imt_method (mgreg_t
*regs
, guint8
*code
)
6845 return (MonoMethod
*)regs
[MONO_ARCH_IMT_REG
];
6849 mono_arch_find_static_call_vtable (mgreg_t
*regs
, guint8
*code
)
6851 return (MonoVTable
*) regs
[MONO_ARCH_RGCTX_REG
];
6855 mono_arch_get_cie_program (void)
6859 mono_add_unwind_op_def_cfa (l
, (guint8
*)NULL
, (guint8
*)NULL
, ARMREG_SP
, 0);
6864 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6865 #define BASE_SIZE (6 * 4)
6866 #define BSEARCH_ENTRY_SIZE (4 * 4)
6867 #define CMP_SIZE (3 * 4)
6868 #define BRANCH_SIZE (1 * 4)
6869 #define CALL_SIZE (2 * 4)
6870 #define WMC_SIZE (8 * 4)
6871 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
6874 arm_emit_value_and_patch_ldr (arminstr_t
*code
, arminstr_t
*target
, guint32 value
)
6876 guint32 delta
= DISTANCE (target
, code
);
6878 g_assert (delta
>= 0 && delta
<= 0xFFF);
6879 *target
= *target
| delta
;
6884 #ifdef ENABLE_WRONG_METHOD_CHECK
6886 mini_dump_bad_imt (int input_imt
, int compared_imt
, int pc
)
6888 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt
, compared_imt
, pc
);
6894 mono_arch_build_imt_thunk (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
6895 gpointer fail_tramp
)
6898 arminstr_t
*code
, *start
;
6899 gboolean large_offsets
= FALSE
;
6900 guint32
**constant_pool_starts
;
6901 arminstr_t
*vtable_target
= NULL
;
6902 int extra_space
= 0;
6903 #ifdef ENABLE_WRONG_METHOD_CHECK
6909 constant_pool_starts
= g_new0 (guint32
*, count
);
6911 for (i
= 0; i
< count
; ++i
) {
6912 MonoIMTCheckItem
*item
= imt_entries
[i
];
6913 if (item
->is_equals
) {
6914 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
6916 if (item
->has_target_code
|| !arm_is_imm12 (DISTANCE (vtable
, &vtable
->vtable
[item
->value
.vtable_slot
]))) {
6917 item
->chunk_size
+= 32;
6918 large_offsets
= TRUE
;
6921 if (item
->check_target_idx
|| fail_case
) {
6922 if (!item
->compare_done
|| fail_case
)
6923 item
->chunk_size
+= CMP_SIZE
;
6924 item
->chunk_size
+= BRANCH_SIZE
;
6926 #ifdef ENABLE_WRONG_METHOD_CHECK
6927 item
->chunk_size
+= WMC_SIZE
;
6931 item
->chunk_size
+= 16;
6932 large_offsets
= TRUE
;
6934 item
->chunk_size
+= CALL_SIZE
;
6936 item
->chunk_size
+= BSEARCH_ENTRY_SIZE
;
6937 imt_entries
[item
->check_target_idx
]->compare_done
= TRUE
;
6939 size
+= item
->chunk_size
;
6943 size
+= 4 * count
; /* The ARM_ADD_REG_IMM to pop the stack */
6946 code
= mono_method_alloc_generic_virtual_thunk (domain
, size
);
6948 code
= mono_domain_code_reserve (domain
, size
);
6951 unwind_ops
= mono_arch_get_cie_program ();
6954 g_print ("Building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", vtable
->klass
->name_space
, vtable
->klass
->name
, count
, size
, start
, ((guint8
*)start
) + size
, vtable
, fail_tramp
);
6955 for (i
= 0; i
< count
; ++i
) {
6956 MonoIMTCheckItem
*item
= imt_entries
[i
];
6957 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i
, item
->key
, ((MonoMethod
*)item
->key
)->name
, &vtable
->vtable
[item
->value
.vtable_slot
], item
->is_equals
, item
->chunk_size
);
6961 if (large_offsets
) {
6962 ARM_PUSH4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
6963 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 4 * sizeof (mgreg_t
));
6965 ARM_PUSH2 (code
, ARMREG_R0
, ARMREG_R1
);
6966 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 2 * sizeof (mgreg_t
));
6968 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_LR
, -4);
6969 vtable_target
= code
;
6970 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
6971 ARM_MOV_REG_REG (code
, ARMREG_R0
, ARMREG_V5
);
6973 for (i
= 0; i
< count
; ++i
) {
6974 MonoIMTCheckItem
*item
= imt_entries
[i
];
6975 arminstr_t
*imt_method
= NULL
, *vtable_offset_ins
= NULL
, *target_code_ins
= NULL
;
6976 gint32 vtable_offset
;
6978 item
->code_target
= (guint8
*)code
;
6980 if (item
->is_equals
) {
6981 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
6983 if (item
->check_target_idx
|| fail_case
) {
6984 if (!item
->compare_done
|| fail_case
) {
6986 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
6987 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
6989 item
->jmp_code
= (guint8
*)code
;
6990 ARM_B_COND (code
, ARMCOND_NE
, 0);
6992 /*Enable the commented code to assert on wrong method*/
6993 #ifdef ENABLE_WRONG_METHOD_CHECK
6995 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
6996 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
6998 ARM_B_COND (code
, ARMCOND_EQ
, 0);
7000 /* Define this if your system is so bad that gdb is failing. */
7001 #ifdef BROKEN_DEV_ENV
7002 ARM_MOV_REG_REG (code
, ARMREG_R2
, ARMREG_PC
);
7004 arm_patch (code
- 1, mini_dump_bad_imt
);
7008 arm_patch (cond
, code
);
7012 if (item
->has_target_code
) {
7013 /* Load target address */
7014 target_code_ins
= code
;
7015 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7016 /* Save it to the fourth slot */
7017 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (gpointer
));
7018 /* Restore registers and branch */
7019 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
7021 code
= arm_emit_value_and_patch_ldr (code
, target_code_ins
, (gsize
)item
->value
.target_code
);
7023 vtable_offset
= DISTANCE (vtable
, &vtable
->vtable
[item
->value
.vtable_slot
]);
7024 if (!arm_is_imm12 (vtable_offset
)) {
7026 * We need to branch to a computed address but we don't have
7027 * a free register to store it, since IP must contain the
7028 * vtable address. So we push the two values to the stack, and
7029 * load them both using LDM.
7031 /* Compute target address */
7032 vtable_offset_ins
= code
;
7033 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7034 ARM_LDR_REG_REG (code
, ARMREG_R1
, ARMREG_IP
, ARMREG_R1
);
7035 /* Save it to the fourth slot */
7036 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (gpointer
));
7037 /* Restore registers and branch */
7038 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
7040 code
= arm_emit_value_and_patch_ldr (code
, vtable_offset_ins
, vtable_offset
);
7042 ARM_POP2 (code
, ARMREG_R0
, ARMREG_R1
);
7043 if (large_offsets
) {
7044 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 2 * sizeof (mgreg_t
));
7045 ARM_ADD_REG_IMM8 (code
, ARMREG_SP
, ARMREG_SP
, 2 * sizeof (gpointer
));
7047 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 0);
7048 ARM_LDR_IMM (code
, ARMREG_PC
, ARMREG_IP
, vtable_offset
);
7053 arm_patch (item
->jmp_code
, (guchar
*)code
);
7055 target_code_ins
= code
;
7056 /* Load target address */
7057 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7058 /* Save it to the fourth slot */
7059 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (gpointer
));
7060 /* Restore registers and branch */
7061 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
7063 code
= arm_emit_value_and_patch_ldr (code
, target_code_ins
, (gsize
)fail_tramp
);
7064 item
->jmp_code
= NULL
;
7068 code
= arm_emit_value_and_patch_ldr (code
, imt_method
, (guint32
)item
->key
);
7070 /*must emit after unconditional branch*/
7071 if (vtable_target
) {
7072 code
= arm_emit_value_and_patch_ldr (code
, vtable_target
, (guint32
)vtable
);
7073 item
->chunk_size
+= 4;
7074 vtable_target
= NULL
;
7077 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
7078 constant_pool_starts
[i
] = code
;
7080 code
+= extra_space
;
7084 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7085 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
7087 item
->jmp_code
= (guint8
*)code
;
7088 ARM_B_COND (code
, ARMCOND_HS
, 0);
7093 for (i
= 0; i
< count
; ++i
) {
7094 MonoIMTCheckItem
*item
= imt_entries
[i
];
7095 if (item
->jmp_code
) {
7096 if (item
->check_target_idx
)
7097 arm_patch (item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
);
7099 if (i
> 0 && item
->is_equals
) {
7101 arminstr_t
*space_start
= constant_pool_starts
[i
];
7102 for (j
= i
- 1; j
>= 0 && !imt_entries
[j
]->is_equals
; --j
) {
7103 space_start
= arm_emit_value_and_patch_ldr (space_start
, (arminstr_t
*)imt_entries
[j
]->code_target
, (guint32
)imt_entries
[j
]->key
);
7110 char *buff
= g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable
->klass
->name_space
, vtable
->klass
->name
, count
);
7111 mono_disassemble_code (NULL
, (guint8
*)start
, size
, buff
);
7116 g_free (constant_pool_starts
);
7118 mono_arch_flush_icache ((guint8
*)start
, size
);
7119 mono_profiler_code_buffer_new (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE
, NULL
);
7120 mono_stats
.imt_thunks_size
+= code
- start
;
7122 g_assert (DISTANCE (start
, code
) <= size
);
7124 mono_tramp_info_register (mono_tramp_info_create (NULL
, (guint8
*)start
, DISTANCE (start
, code
), NULL
, unwind_ops
), domain
);
7130 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
7132 return ctx
->regs
[reg
];
7136 mono_arch_context_set_int_reg (MonoContext
*ctx
, int reg
, mgreg_t val
)
7138 ctx
->regs
[reg
] = val
;
7142 * mono_arch_get_trampolines:
7144 * Return a list of MonoTrampInfo structures describing arch specific trampolines
7148 mono_arch_get_trampolines (gboolean aot
)
7150 return mono_arm_get_exception_trampolines (aot
);
7154 mono_arch_install_handler_block_guard (MonoJitInfo
*ji
, MonoJitExceptionInfo
*clause
, MonoContext
*ctx
, gpointer new_value
)
7161 bp
= MONO_CONTEXT_GET_BP (ctx
);
7162 lr_loc
= (gpointer
*)(bp
+ clause
->exvar_offset
);
7164 old_value
= *lr_loc
;
7165 if ((char*)old_value
< (char*)ji
->code_start
|| (char*)old_value
> ((char*)ji
->code_start
+ ji
->code_size
))
7168 *lr_loc
= new_value
;
7173 #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
7175 * mono_arch_set_breakpoint:
7177 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
7178 * The location should contain code emitted by OP_SEQ_POINT.
7181 mono_arch_set_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
7184 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
7185 MonoDebugOptions
*opt
= mini_get_debug_options ();
7187 if (opt
->soft_breakpoints
) {
7188 g_assert (!ji
->from_aot
);
7190 ARM_BLX_REG (code
, ARMREG_LR
);
7191 mono_arch_flush_icache (code
- 4, 4);
7192 } else if (ji
->from_aot
) {
7193 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), ji
->code_start
);
7195 g_assert (native_offset
% 4 == 0);
7196 g_assert (info
->bp_addrs
[native_offset
/ 4] == 0);
7197 info
->bp_addrs
[native_offset
/ 4] = bp_trigger_page
;
7199 int dreg
= ARMREG_LR
;
7201 /* Read from another trigger page */
7202 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
7204 *(int*)code
= (int)bp_trigger_page
;
7206 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
7208 mono_arch_flush_icache (code
- 16, 16);
7211 /* This is currently implemented by emitting an SWI instruction, which
7212 * qemu/linux seems to convert to a SIGILL.
7214 *(int*)code
= (0xef << 24) | 8;
7216 mono_arch_flush_icache (code
- 4, 4);
7222 * mono_arch_clear_breakpoint:
7224 * Clear the breakpoint at IP.
7227 mono_arch_clear_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
7229 MonoDebugOptions
*opt
= mini_get_debug_options ();
7233 if (opt
->soft_breakpoints
) {
7234 g_assert (!ji
->from_aot
);
7237 mono_arch_flush_icache (code
- 4, 4);
7238 } else if (ji
->from_aot
) {
7239 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
7240 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), ji
->code_start
);
7242 g_assert (native_offset
% 4 == 0);
7243 g_assert (info
->bp_addrs
[native_offset
/ 4] == bp_trigger_page
);
7244 info
->bp_addrs
[native_offset
/ 4] = 0;
7246 for (i
= 0; i
< 4; ++i
)
7249 mono_arch_flush_icache (ip
, code
- ip
);
7254 * mono_arch_start_single_stepping:
7256 * Start single stepping.
7259 mono_arch_start_single_stepping (void)
7261 if (ss_trigger_page
)
7262 mono_mprotect (ss_trigger_page
, mono_pagesize (), 0);
7264 single_step_tramp
= mini_get_single_step_trampoline ();
7268 * mono_arch_stop_single_stepping:
7270 * Stop single stepping.
7273 mono_arch_stop_single_stepping (void)
7275 if (ss_trigger_page
)
7276 mono_mprotect (ss_trigger_page
, mono_pagesize (), MONO_MMAP_READ
);
7278 single_step_tramp
= NULL
;
7282 #define DBG_SIGNAL SIGBUS
7284 #define DBG_SIGNAL SIGSEGV
7288 * mono_arch_is_single_step_event:
7290 * Return whenever the machine state in SIGCTX corresponds to a single
7294 mono_arch_is_single_step_event (void *info
, void *sigctx
)
7296 siginfo_t
*sinfo
= info
;
7298 if (!ss_trigger_page
)
7301 /* Sometimes the address is off by 4 */
7302 if (sinfo
->si_addr
>= ss_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)ss_trigger_page
+ 128)
7309 * mono_arch_is_breakpoint_event:
7311 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7314 mono_arch_is_breakpoint_event (void *info
, void *sigctx
)
7316 siginfo_t
*sinfo
= info
;
7318 if (!ss_trigger_page
)
7321 if (sinfo
->si_signo
== DBG_SIGNAL
) {
7322 /* Sometimes the address is off by 4 */
7323 if (sinfo
->si_addr
>= bp_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)bp_trigger_page
+ 128)
7333 * mono_arch_skip_breakpoint:
7335 * See mini-amd64.c for docs.
7338 mono_arch_skip_breakpoint (MonoContext
*ctx
, MonoJitInfo
*ji
)
7340 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
7344 * mono_arch_skip_single_step:
7346 * See mini-amd64.c for docs.
7349 mono_arch_skip_single_step (MonoContext
*ctx
)
7351 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
7354 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7357 * mono_arch_get_seq_point_info:
7359 * See mini-amd64.c for docs.
7362 mono_arch_get_seq_point_info (MonoDomain
*domain
, guint8
*code
)
7367 // FIXME: Add a free function
7369 mono_domain_lock (domain
);
7370 info
= g_hash_table_lookup (domain_jit_info (domain
)->arch_seq_points
,
7372 mono_domain_unlock (domain
);
7375 ji
= mono_jit_info_table_find (domain
, (char*)code
);
7378 info
= g_malloc0 (sizeof (SeqPointInfo
) + ji
->code_size
);
7380 info
->ss_trigger_page
= ss_trigger_page
;
7381 info
->bp_trigger_page
= bp_trigger_page
;
7383 mono_domain_lock (domain
);
7384 g_hash_table_insert (domain_jit_info (domain
)->arch_seq_points
,
7386 mono_domain_unlock (domain
);
7393 mono_arch_init_lmf_ext (MonoLMFExt
*ext
, gpointer prev_lmf
)
7395 ext
->lmf
.previous_lmf
= prev_lmf
;
7396 /* Mark that this is a MonoLMFExt */
7397 ext
->lmf
.previous_lmf
= (gpointer
)(((gssize
)ext
->lmf
.previous_lmf
) | 2);
7398 ext
->lmf
.sp
= (gssize
)ext
;
7402 * mono_arch_set_target:
7404 * Set the target architecture the JIT backend should generate code for, in the form
7405 * of a GNU target triplet. Only used in AOT mode.
7408 mono_arch_set_target (char *mtriple
)
7410 /* The GNU target triple format is not very well documented */
7411 if (strstr (mtriple
, "armv7")) {
7412 v5_supported
= TRUE
;
7413 v6_supported
= TRUE
;
7414 v7_supported
= TRUE
;
7416 if (strstr (mtriple
, "armv6")) {
7417 v5_supported
= TRUE
;
7418 v6_supported
= TRUE
;
7420 if (strstr (mtriple
, "armv7s")) {
7421 v7s_supported
= TRUE
;
7423 if (strstr (mtriple
, "armv7k")) {
7424 v7k_supported
= TRUE
;
7426 if (strstr (mtriple
, "thumbv7s")) {
7427 v5_supported
= TRUE
;
7428 v6_supported
= TRUE
;
7429 v7_supported
= TRUE
;
7430 v7s_supported
= TRUE
;
7431 thumb_supported
= TRUE
;
7432 thumb2_supported
= TRUE
;
7434 if (strstr (mtriple
, "darwin") || strstr (mtriple
, "ios")) {
7435 v5_supported
= TRUE
;
7436 v6_supported
= TRUE
;
7437 thumb_supported
= TRUE
;
7440 if (strstr (mtriple
, "gnueabi"))
7441 eabi_supported
= TRUE
;
7445 mono_arch_opcode_supported (int opcode
)
7448 case OP_ATOMIC_ADD_I4
:
7449 case OP_ATOMIC_EXCHANGE_I4
:
7450 case OP_ATOMIC_CAS_I4
:
7451 case OP_ATOMIC_LOAD_I1
:
7452 case OP_ATOMIC_LOAD_I2
:
7453 case OP_ATOMIC_LOAD_I4
:
7454 case OP_ATOMIC_LOAD_U1
:
7455 case OP_ATOMIC_LOAD_U2
:
7456 case OP_ATOMIC_LOAD_U4
:
7457 case OP_ATOMIC_STORE_I1
:
7458 case OP_ATOMIC_STORE_I2
:
7459 case OP_ATOMIC_STORE_I4
:
7460 case OP_ATOMIC_STORE_U1
:
7461 case OP_ATOMIC_STORE_U2
:
7462 case OP_ATOMIC_STORE_U4
:
7463 return v7_supported
;
7464 case OP_ATOMIC_LOAD_R4
:
7465 case OP_ATOMIC_LOAD_R8
:
7466 case OP_ATOMIC_STORE_R4
:
7467 case OP_ATOMIC_STORE_R8
:
7468 return v7_supported
&& IS_VFP
;
7475 mono_arch_get_call_info (MonoMemPool
*mp
, MonoMethodSignature
*sig
)
7477 return get_call_info (mp
, sig
);