3 * ARM backend for the Mono code generator
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2003 Ximian, Inc.
10 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
17 #include <mono/metadata/abi-details.h>
18 #include <mono/metadata/appdomain.h>
19 #include <mono/metadata/profiler-private.h>
20 #include <mono/metadata/debug-helpers.h>
21 #include <mono/utils/mono-mmap.h>
22 #include <mono/utils/mono-hwcap.h>
23 #include <mono/utils/mono-memory-model.h>
24 #include <mono/utils/mono-threads-coop.h>
25 #include <mono/utils/unlocked.h>
27 #include "interp/interp.h"
32 #include "debugger-agent.h"
34 #include "mini-runtime.h"
35 #include "aot-runtime.h"
36 #include "mono/arch/arm/arm-vfp-codegen.h"
37 #include "mono/utils/mono-tls-inline.h"
39 /* Sanity check: This makes no sense */
40 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
41 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
45 * IS_SOFT_FLOAT: Is full software floating point used?
46 * IS_HARD_FLOAT: Is full hardware floating point used?
47 * IS_VFP: Is hardware floating point with software ABI used?
49 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
50 * IS_VFP may delegate to mono_arch_is_soft_float ().
53 #if defined(ARM_FPU_VFP_HARD)
54 #define IS_SOFT_FLOAT (FALSE)
55 #define IS_HARD_FLOAT (TRUE)
57 #elif defined(ARM_FPU_NONE)
58 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
59 #define IS_HARD_FLOAT (FALSE)
60 #define IS_VFP (!mono_arch_is_soft_float ())
62 #define IS_SOFT_FLOAT (FALSE)
63 #define IS_HARD_FLOAT (FALSE)
67 #define THUNK_SIZE (3 * 4)
71 void sys_icache_invalidate (void *start
, size_t len
);
75 /* This mutex protects architecture specific caches */
76 #define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex)
77 #define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex)
78 static mono_mutex_t mini_arch_mutex
;
80 static gboolean v5_supported
= FALSE
;
81 static gboolean v6_supported
= FALSE
;
82 static gboolean v7_supported
= FALSE
;
83 static gboolean v7s_supported
= FALSE
;
84 static gboolean v7k_supported
= FALSE
;
85 static gboolean thumb_supported
= FALSE
;
86 static gboolean thumb2_supported
= FALSE
;
88 * Whenever to use the ARM EABI
90 static gboolean eabi_supported
= FALSE
;
93 * Whenever to use the iphone ABI extensions:
94 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
95 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
96 * This is required for debugging/profiling tools to work, but it has some overhead so it should
97 * only be turned on in debug builds.
99 static gboolean iphone_abi
= FALSE
;
102 * The FPU we are generating code for. This is NOT runtime configurable right now,
103 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
105 static MonoArmFPU arm_fpu
;
107 #if defined(ARM_FPU_VFP_HARD)
109 * On armhf, d0-d7 are used for argument passing and d8-d15
110 * must be preserved across calls, which leaves us no room
111 * for scratch registers. So we use d14-d15 but back up their
112 * previous contents to a stack slot before using them - see
113 * mono_arm_emit_vfp_scratch_save/_restore ().
115 static int vfp_scratch1
= ARM_VFP_D14
;
116 static int vfp_scratch2
= ARM_VFP_D15
;
119 * On armel, d0-d7 do not need to be preserved, so we can
120 * freely make use of them as scratch registers.
122 static int vfp_scratch1
= ARM_VFP_D0
;
123 static int vfp_scratch2
= ARM_VFP_D1
;
128 static gpointer single_step_tramp
, breakpoint_tramp
;
131 * The code generated for sequence points reads from this location, which is
132 * made read-only when single stepping is enabled.
134 static gpointer ss_trigger_page
;
136 /* Enabled breakpoints read from this trigger page */
137 static gpointer bp_trigger_page
;
141 * floating point support: on ARM it is a mess, there are at least 3
142 * different setups, each of which binary incompat with the other.
143 * 1) FPA: old and ugly, but unfortunately what current distros use
144 * the double binary format has the two words swapped. 8 double registers.
145 * Implemented usually by kernel emulation.
146 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
147 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
148 * 3) VFP: the new and actually sensible and useful FP support. Implemented
149 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
151 * We do not care about FPA. We will support soft float and VFP.
153 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
154 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
155 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
157 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
158 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
159 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
161 //#define DEBUG_IMT 0
164 static void mono_arch_compute_omit_fp (MonoCompile
*cfg
);
168 emit_aotconst (MonoCompile
*cfg
, guint8
*code
, int dreg
, int patch_type
, gpointer data
);
171 mono_arch_regname (int reg
)
173 static const char * rnames
[] = {
174 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
175 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
176 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
179 if (reg
>= 0 && reg
< 16)
185 mono_arch_fregname (int reg
)
187 static const char * rnames
[] = {
188 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
189 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
190 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
191 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
192 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
193 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
196 if (reg
>= 0 && reg
< 32)
204 emit_big_add_temp (guint8
*code
, int dreg
, int sreg
, int imm
, int temp
)
206 int imm8
, rot_amount
;
208 g_assert (temp
== ARMREG_IP
|| temp
== ARMREG_LR
);
212 ARM_MOV_REG_REG (code
, dreg
, sreg
);
213 } else if ((imm8
= mono_arm_is_rotated_imm8 (imm
, &rot_amount
)) >= 0) {
214 ARM_ADD_REG_IMM (code
, dreg
, sreg
, imm8
, rot_amount
);
218 code
= mono_arm_emit_load_imm (code
, temp
, imm
);
219 ARM_ADD_REG_REG (code
, dreg
, sreg
, temp
);
221 code
= mono_arm_emit_load_imm (code
, dreg
, imm
);
222 ARM_ADD_REG_REG (code
, dreg
, dreg
, sreg
);
228 emit_big_add (guint8
*code
, int dreg
, int sreg
, int imm
)
230 return emit_big_add_temp (code
, dreg
, sreg
, imm
, ARMREG_IP
);
234 emit_ldr_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
236 if (!arm_is_imm12 (imm
)) {
237 g_assert (dreg
!= sreg
);
238 code
= emit_big_add (code
, dreg
, sreg
, imm
);
239 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
241 ARM_LDR_IMM (code
, dreg
, sreg
, imm
);
246 /* If dreg == sreg, this clobbers IP */
248 emit_sub_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
250 int imm8
, rot_amount
;
251 if ((imm8
= mono_arm_is_rotated_imm8 (imm
, &rot_amount
)) >= 0) {
252 ARM_SUB_REG_IMM (code
, dreg
, sreg
, imm8
, rot_amount
);
256 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, imm
);
257 ARM_SUB_REG_REG (code
, dreg
, sreg
, ARMREG_IP
);
259 code
= mono_arm_emit_load_imm (code
, dreg
, imm
);
260 ARM_SUB_REG_REG (code
, dreg
, dreg
, sreg
);
266 emit_memcpy (guint8
*code
, int size
, int dreg
, int doffset
, int sreg
, int soffset
)
268 /* we can use r0-r3, since this is called only for incoming args on the stack */
269 if (size
> sizeof (target_mgreg_t
) * 4) {
271 code
= emit_big_add (code
, ARMREG_R0
, sreg
, soffset
);
272 code
= emit_big_add (code
, ARMREG_R1
, dreg
, doffset
);
273 start_loop
= code
= mono_arm_emit_load_imm (code
, ARMREG_R2
, size
);
274 ARM_LDR_IMM (code
, ARMREG_R3
, ARMREG_R0
, 0);
275 ARM_STR_IMM (code
, ARMREG_R3
, ARMREG_R1
, 0);
276 ARM_ADD_REG_IMM8 (code
, ARMREG_R0
, ARMREG_R0
, 4);
277 ARM_ADD_REG_IMM8 (code
, ARMREG_R1
, ARMREG_R1
, 4);
278 ARM_SUBS_REG_IMM8 (code
, ARMREG_R2
, ARMREG_R2
, 4);
279 ARM_B_COND (code
, ARMCOND_NE
, 0);
280 arm_patch (code
- 4, start_loop
);
283 if (arm_is_imm12 (doffset
) && arm_is_imm12 (doffset
+ size
) &&
284 arm_is_imm12 (soffset
) && arm_is_imm12 (soffset
+ size
)) {
286 ARM_LDR_IMM (code
, ARMREG_LR
, sreg
, soffset
);
287 ARM_STR_IMM (code
, ARMREG_LR
, dreg
, doffset
);
293 code
= emit_big_add (code
, ARMREG_R0
, sreg
, soffset
);
294 code
= emit_big_add (code
, ARMREG_R1
, dreg
, doffset
);
295 doffset
= soffset
= 0;
297 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_R0
, soffset
);
298 ARM_STR_IMM (code
, ARMREG_LR
, ARMREG_R1
, doffset
);
304 g_assert (size
== 0);
309 emit_jmp_reg (guint8
*code
, int reg
)
314 ARM_MOV_REG_REG (code
, ARMREG_PC
, reg
);
319 emit_call_reg (guint8
*code
, int reg
)
322 ARM_BLX_REG (code
, reg
);
324 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
325 return emit_jmp_reg (code
, reg
);
331 emit_call_seq (MonoCompile
*cfg
, guint8
*code
)
333 if (cfg
->method
->dynamic
) {
334 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
336 *(gpointer
*)code
= NULL
;
338 code
= emit_call_reg (code
, ARMREG_IP
);
342 cfg
->thunk_area
+= THUNK_SIZE
;
347 mono_arm_patchable_b (guint8
*code
, int cond
)
349 ARM_B_COND (code
, cond
, 0);
354 mono_arm_patchable_bl (guint8
*code
, int cond
)
356 ARM_BL_COND (code
, cond
, 0);
360 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(HOST_ANDROID) && !defined(MONO_CROSS_COMPILE)
361 #define HAVE_AEABI_READ_TP 1
364 #ifdef HAVE_AEABI_READ_TP
366 gpointer
__aeabi_read_tp (void);
371 mono_arch_have_fast_tls (void)
373 #ifdef HAVE_AEABI_READ_TP
374 static gboolean have_fast_tls
= FALSE
;
375 static gboolean inited
= FALSE
;
377 if (mini_debug_options
.use_fallback_tls
)
381 return have_fast_tls
;
386 tp1
= __aeabi_read_tp ();
387 asm volatile("mrc p15, 0, %0, c13, c0, 3" : "=r" (tp2
));
389 have_fast_tls
= tp1
&& tp1
== tp2
;
392 return have_fast_tls
;
399 emit_tls_get (guint8
*code
, int dreg
, int tls_offset
)
401 g_assert (v7_supported
);
402 ARM_MRC (code
, 15, 0, dreg
, 13, 0, 3);
403 ARM_LDR_IMM (code
, dreg
, dreg
, tls_offset
);
408 emit_tls_set (guint8
*code
, int sreg
, int tls_offset
)
410 int tp_reg
= (sreg
!= ARMREG_R0
) ? ARMREG_R0
: ARMREG_R1
;
411 g_assert (v7_supported
);
412 ARM_MRC (code
, 15, 0, tp_reg
, 13, 0, 3);
413 ARM_STR_IMM (code
, sreg
, tp_reg
, tls_offset
);
420 * Emit code to push an LMF structure on the LMF stack.
421 * On arm, this is intermixed with the initialization of other fields of the structure.
424 emit_save_lmf (MonoCompile
*cfg
, guint8
*code
, gint32 lmf_offset
)
428 if (mono_arch_have_fast_tls () && mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR
) != -1) {
429 code
= emit_tls_get (code
, ARMREG_R0
, mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR
));
431 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
432 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_tls_get_lmf_addr_extern
));
433 code
= emit_call_seq (cfg
, code
);
435 /* we build the MonoLMF structure on the stack - see mini-arm.h */
436 /* lmf_offset is the offset from the previous stack pointer,
437 * alloc_size is the total stack space allocated, so the offset
438 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
439 * The pointer to the struct is put in r1 (new_lmf).
440 * ip is used as scratch
441 * The callee-saved registers are already in the MonoLMF structure
443 code
= emit_big_add (code
, ARMREG_R1
, ARMREG_SP
, lmf_offset
);
444 /* r0 is the result from mono_get_lmf_addr () */
445 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
446 /* new_lmf->previous_lmf = *lmf_addr */
447 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
448 ARM_STR_IMM (code
, ARMREG_IP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
449 /* *(lmf_addr) = r1 */
450 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
451 /* Skip method (only needed for trampoline LMF frames) */
452 ARM_STR_IMM (code
, ARMREG_SP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, sp
));
453 ARM_STR_IMM (code
, ARMREG_FP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, fp
));
454 /* save the current IP */
455 ARM_MOV_REG_REG (code
, ARMREG_IP
, ARMREG_PC
);
456 ARM_STR_IMM (code
, ARMREG_IP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, ip
));
458 for (i
= 0; i
< MONO_ABI_SIZEOF (MonoLMF
); i
+= sizeof (target_mgreg_t
))
459 mini_gc_set_slot_type_from_fp (cfg
, lmf_offset
+ i
, SLOT_NOREF
);
470 emit_float_args (MonoCompile
*cfg
, MonoCallInst
*inst
, guint8
*code
, int *max_len
, guint
*offset
)
474 set_code_cursor (cfg
, code
);
476 for (list
= inst
->float_args
; list
; list
= list
->next
) {
477 FloatArgData
*fad
= (FloatArgData
*)list
->data
;
478 MonoInst
*var
= get_vreg_to_inst (cfg
, fad
->vreg
);
479 gboolean imm
= arm_is_fpimm8 (var
->inst_offset
);
481 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
487 code
= realloc_code (cfg
, *max_len
);
490 code
= emit_big_add (code
, ARMREG_LR
, var
->inst_basereg
, var
->inst_offset
);
491 ARM_FLDS (code
, fad
->hreg
, ARMREG_LR
, 0);
493 ARM_FLDS (code
, fad
->hreg
, var
->inst_basereg
, var
->inst_offset
);
495 set_code_cursor (cfg
, code
);
496 *offset
= code
- cfg
->native_code
;
503 mono_arm_emit_vfp_scratch_save (MonoCompile
*cfg
, guint8
*code
, int reg
)
507 g_assert (reg
== vfp_scratch1
|| reg
== vfp_scratch2
);
509 inst
= cfg
->arch
.vfp_scratch_slots
[reg
== vfp_scratch1
? 0 : 1];
512 if (!arm_is_fpimm8 (inst
->inst_offset
)) {
513 code
= emit_big_add (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
514 ARM_FSTD (code
, reg
, ARMREG_LR
, 0);
516 ARM_FSTD (code
, reg
, inst
->inst_basereg
, inst
->inst_offset
);
523 mono_arm_emit_vfp_scratch_restore (MonoCompile
*cfg
, guint8
*code
, int reg
)
527 g_assert (reg
== vfp_scratch1
|| reg
== vfp_scratch2
);
529 inst
= cfg
->arch
.vfp_scratch_slots
[reg
== vfp_scratch1
? 0 : 1];
532 if (!arm_is_fpimm8 (inst
->inst_offset
)) {
533 code
= emit_big_add (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
534 ARM_FLDD (code
, reg
, ARMREG_LR
, 0);
536 ARM_FLDD (code
, reg
, inst
->inst_basereg
, inst
->inst_offset
);
545 * Emit code to pop an LMF structure from the LMF stack.
548 emit_restore_lmf (MonoCompile
*cfg
, guint8
*code
, gint32 lmf_offset
)
552 if (lmf_offset
< 32) {
553 basereg
= cfg
->frame_reg
;
558 code
= emit_big_add (code
, ARMREG_R2
, cfg
->frame_reg
, lmf_offset
);
561 /* ip = previous_lmf */
562 ARM_LDR_IMM (code
, ARMREG_IP
, basereg
, offset
+ MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
564 ARM_LDR_IMM (code
, ARMREG_LR
, basereg
, offset
+ MONO_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
565 /* *(lmf_addr) = previous_lmf */
566 ARM_STR_IMM (code
, ARMREG_IP
, ARMREG_LR
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
571 #endif /* #ifndef DISABLE_JIT */
574 * mono_arch_get_argument_info:
575 * @csig: a method signature
576 * @param_count: the number of parameters to consider
577 * @arg_info: an array to store the result infos
579 * Gathers information on parameters such as size, alignment and
580 * padding. arg_info should be large enought to hold param_count + 1 entries.
582 * Returns the size of the activation frame.
585 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
587 int k
, frame_size
= 0;
588 guint32 size
, align
, pad
;
592 t
= mini_get_underlying_type (csig
->ret
);
593 if (MONO_TYPE_ISSTRUCT (t
)) {
594 frame_size
+= sizeof (target_mgreg_t
);
598 arg_info
[0].offset
= offset
;
601 frame_size
+= sizeof (target_mgreg_t
);
605 arg_info
[0].size
= frame_size
;
607 for (k
= 0; k
< param_count
; k
++) {
608 size
= mini_type_stack_size_full (csig
->params
[k
], &align
, csig
->pinvoke
);
610 /* ignore alignment for now */
613 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
614 arg_info
[k
].pad
= pad
;
616 arg_info
[k
+ 1].pad
= 0;
617 arg_info
[k
+ 1].size
= size
;
619 arg_info
[k
+ 1].offset
= offset
;
623 align
= MONO_ARCH_FRAME_ALIGNMENT
;
624 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
625 arg_info
[k
].pad
= pad
;
630 #define MAX_ARCH_DELEGATE_PARAMS 3
633 get_delegate_invoke_impl (MonoTrampInfo
**info
, gboolean has_target
, gboolean param_count
)
635 guint8
*code
, *start
;
636 GSList
*unwind_ops
= mono_arch_get_cie_program ();
639 start
= code
= mono_global_codeman_reserve (12);
641 /* Replace the this argument with the target */
642 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
643 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, target
));
644 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
646 g_assert ((code
- start
) <= 12);
648 mono_arch_flush_icache (start
, 12);
649 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
));
653 size
= 8 + param_count
* 4;
654 start
= code
= mono_global_codeman_reserve (size
);
656 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
657 /* slide down the arguments */
658 for (i
= 0; i
< param_count
; ++i
) {
659 ARM_MOV_REG_REG (code
, (ARMREG_R0
+ i
), (ARMREG_R0
+ i
+ 1));
661 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
663 g_assert ((code
- start
) <= size
);
665 mono_arch_flush_icache (start
, size
);
666 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
));
670 *info
= mono_tramp_info_create ("delegate_invoke_impl_has_target", start
, code
- start
, NULL
, unwind_ops
);
672 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", param_count
);
673 *info
= mono_tramp_info_create (name
, start
, code
- start
, NULL
, unwind_ops
);
677 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
));
683 * mono_arch_get_delegate_invoke_impls:
685 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
689 mono_arch_get_delegate_invoke_impls (void)
695 get_delegate_invoke_impl (&info
, TRUE
, 0);
696 res
= g_slist_prepend (res
, info
);
698 for (i
= 0; i
<= MAX_ARCH_DELEGATE_PARAMS
; ++i
) {
699 get_delegate_invoke_impl (&info
, FALSE
, i
);
700 res
= g_slist_prepend (res
, info
);
707 mono_arch_get_delegate_invoke_impl (MonoMethodSignature
*sig
, gboolean has_target
)
709 guint8
*code
, *start
;
712 /* FIXME: Support more cases */
713 sig_ret
= mini_get_underlying_type (sig
->ret
);
714 if (MONO_TYPE_ISSTRUCT (sig_ret
))
718 static guint8
* cached
= NULL
;
719 mono_mini_arch_lock ();
721 mono_mini_arch_unlock ();
725 if (mono_ee_features
.use_aot_trampolines
) {
726 start
= (guint8
*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
729 start
= get_delegate_invoke_impl (&info
, TRUE
, 0);
730 mono_tramp_info_register (info
, NULL
);
733 mono_mini_arch_unlock ();
736 static guint8
* cache
[MAX_ARCH_DELEGATE_PARAMS
+ 1] = {NULL
};
739 if (sig
->param_count
> MAX_ARCH_DELEGATE_PARAMS
)
741 for (i
= 0; i
< sig
->param_count
; ++i
)
742 if (!mono_is_regsize_var (sig
->params
[i
]))
745 mono_mini_arch_lock ();
746 code
= cache
[sig
->param_count
];
748 mono_mini_arch_unlock ();
752 if (mono_ee_features
.use_aot_trampolines
) {
753 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", sig
->param_count
);
754 start
= (guint8
*)mono_aot_get_trampoline (name
);
758 start
= get_delegate_invoke_impl (&info
, FALSE
, sig
->param_count
);
759 mono_tramp_info_register (info
, NULL
);
761 cache
[sig
->param_count
] = start
;
762 mono_mini_arch_unlock ();
770 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature
*sig
, MonoMethod
*method
, int offset
, gboolean load_imt_reg
)
776 mono_arch_get_this_arg_from_call (host_mgreg_t
*regs
, guint8
*code
)
778 return (gpointer
)regs
[ARMREG_R0
];
782 * Initialize the cpu to execute managed code.
785 mono_arch_cpu_init (void)
787 i8_align
= MONO_ABI_ALIGNOF (gint64
);
788 #ifdef MONO_CROSS_COMPILE
789 /* Need to set the alignment of i8 since it can different on the target */
790 #ifdef TARGET_ANDROID
792 mono_type_set_alignment (MONO_TYPE_I8
, i8_align
);
798 * Initialize architecture specific code.
801 mono_arch_init (void)
805 #ifdef TARGET_WATCHOS
806 mini_debug_options
.soft_breakpoints
= TRUE
;
809 mono_os_mutex_init_recursive (&mini_arch_mutex
);
810 if (mini_debug_options
.soft_breakpoints
) {
812 breakpoint_tramp
= mini_get_breakpoint_trampoline ();
814 ss_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
, MONO_MEM_ACCOUNT_OTHER
);
815 bp_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
, MONO_MEM_ACCOUNT_OTHER
);
816 mono_mprotect (bp_trigger_page
, mono_pagesize (), 0);
819 #if defined(__ARM_EABI__)
820 eabi_supported
= TRUE
;
823 #if defined(ARM_FPU_VFP_HARD)
824 arm_fpu
= MONO_ARM_FPU_VFP_HARD
;
826 arm_fpu
= MONO_ARM_FPU_VFP
;
828 #if defined(ARM_FPU_NONE) && !defined(TARGET_IOS)
830 * If we're compiling with a soft float fallback and it
831 * turns out that no VFP unit is available, we need to
832 * switch to soft float. We don't do this for iOS, since
833 * iOS devices always have a VFP unit.
835 if (!mono_hwcap_arm_has_vfp
)
836 arm_fpu
= MONO_ARM_FPU_NONE
;
839 * This environment variable can be useful in testing
840 * environments to make sure the soft float fallback
841 * works. Most ARM devices have VFP units these days, so
842 * normally soft float code would not be exercised much.
844 char *soft
= g_getenv ("MONO_ARM_FORCE_SOFT_FLOAT");
846 if (soft
&& !strncmp (soft
, "1", 1))
847 arm_fpu
= MONO_ARM_FPU_NONE
;
852 v5_supported
= mono_hwcap_arm_is_v5
;
853 v6_supported
= mono_hwcap_arm_is_v6
;
854 v7_supported
= mono_hwcap_arm_is_v7
;
857 * On weird devices, the hwcap code may fail to detect
858 * the ARM version. In that case, we can at least safely
859 * assume the version the runtime was compiled for.
871 #if defined(TARGET_IOS)
872 /* iOS is special-cased here because we don't yet
873 have a way to properly detect CPU features on it. */
874 thumb_supported
= TRUE
;
877 thumb_supported
= mono_hwcap_arm_has_thumb
;
878 thumb2_supported
= mono_hwcap_arm_has_thumb2
;
881 /* Format: armv(5|6|7[s])[-thumb[2]] */
882 cpu_arch
= g_getenv ("MONO_CPU_ARCH");
884 /* Do this here so it overrides any detection. */
886 if (strncmp (cpu_arch
, "armv", 4) == 0) {
887 v5_supported
= cpu_arch
[4] >= '5';
888 v6_supported
= cpu_arch
[4] >= '6';
889 v7_supported
= cpu_arch
[4] >= '7';
890 v7s_supported
= strncmp (cpu_arch
, "armv7s", 6) == 0;
891 v7k_supported
= strncmp (cpu_arch
, "armv7k", 6) == 0;
894 thumb_supported
= strstr (cpu_arch
, "thumb") != NULL
;
895 thumb2_supported
= strstr (cpu_arch
, "thumb2") != NULL
;
901 * Cleanup architecture specific code.
904 mono_arch_cleanup (void)
909 * This function returns the optimizations supported on this cpu.
912 mono_arch_cpu_optimizations (guint32
*exclude_mask
)
914 /* no arm-specific optimizations yet */
920 mono_arm_is_hard_float (void)
922 return arm_fpu
== MONO_ARM_FPU_VFP_HARD
;
928 mono_arch_opcode_needs_emulation (MonoCompile
*cfg
, int opcode
)
930 if (v7s_supported
|| v7k_supported
) {
944 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
946 mono_arch_is_soft_float (void)
948 return arm_fpu
== MONO_ARM_FPU_NONE
;
953 is_regsize_var (MonoType
*t
)
957 t
= mini_get_underlying_type (t
);
964 case MONO_TYPE_FNPTR
:
966 case MONO_TYPE_OBJECT
:
968 case MONO_TYPE_GENERICINST
:
969 if (!mono_type_generic_inst_is_valuetype (t
))
972 case MONO_TYPE_VALUETYPE
:
979 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
984 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
985 MonoInst
*ins
= cfg
->varinfo
[i
];
986 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
989 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
992 if (ins
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
) || (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
995 /* we can only allocate 32 bit values */
996 if (is_regsize_var (ins
->inst_vtype
)) {
997 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
998 g_assert (i
== vmv
->idx
);
999 vars
= mono_varlist_insert_sorted (cfg
, vars
, vmv
, FALSE
);
1007 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
1011 mono_arch_compute_omit_fp (cfg
);
1014 * FIXME: Interface calls might go through a static rgctx trampoline which
1015 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1018 if (cfg
->flags
& MONO_CFG_HAS_CALLS
)
1019 cfg
->uses_rgctx_reg
= TRUE
;
1021 if (cfg
->arch
.omit_fp
)
1022 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_FP
));
1023 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V1
));
1024 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V2
));
1025 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V3
));
1027 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1028 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V7
));
1030 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V4
));
1031 if (!(cfg
->compile_aot
|| cfg
->uses_rgctx_reg
|| COMPILE_LLVM (cfg
)))
1032 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1033 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V5
));
1034 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1035 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1041 * mono_arch_regalloc_cost:
1043 * Return the cost, in number of memory references, of the action of
1044 * allocating the variable VMV into a register during global register
1048 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
1054 #endif /* #ifndef DISABLE_JIT */
1057 mono_arch_flush_icache (guint8
*code
, gint size
)
1059 #if defined(MONO_CROSS_COMPILE)
1061 sys_icache_invalidate (code
, size
);
1063 __builtin___clear_cache ((char*)code
, (char*)code
+ size
);
1070 add_general (guint
*gr
, guint
*stack_size
, ArgInfo
*ainfo
, gboolean simple
)
1073 if (*gr
> ARMREG_R3
) {
1075 ainfo
->offset
= *stack_size
;
1076 ainfo
->reg
= ARMREG_SP
; /* in the caller */
1077 ainfo
->storage
= RegTypeBase
;
1080 ainfo
->storage
= RegTypeGeneral
;
1087 split
= i8_align
== 4;
1092 if (*gr
== ARMREG_R3
&& split
) {
1093 /* first word in r3 and the second on the stack */
1094 ainfo
->offset
= *stack_size
;
1095 ainfo
->reg
= ARMREG_SP
; /* in the caller */
1096 ainfo
->storage
= RegTypeBaseGen
;
1098 } else if (*gr
>= ARMREG_R3
) {
1099 if (eabi_supported
) {
1100 /* darwin aligns longs to 4 byte only */
1101 if (i8_align
== 8) {
1106 ainfo
->offset
= *stack_size
;
1107 ainfo
->reg
= ARMREG_SP
; /* in the caller */
1108 ainfo
->storage
= RegTypeBase
;
1111 if (eabi_supported
) {
1112 if (i8_align
== 8 && ((*gr
) & 1))
1115 ainfo
->storage
= RegTypeIRegPair
;
1124 add_float (guint
*fpr
, guint
*stack_size
, ArgInfo
*ainfo
, gboolean is_double
, gint
*float_spare
)
1127 * If we're calling a function like this:
1129 * void foo(float a, double b, float c)
1131 * We pass a in s0 and b in d1. That leaves us
1132 * with s1 being unused. The armhf ABI recognizes
1133 * this and requires register assignment to then
1134 * use that for the next single-precision arg,
1135 * i.e. c in this example. So float_spare either
1136 * tells us which reg to use for the next single-
1137 * precision arg, or it's -1, meaning use *fpr.
1139 * Note that even though most of the JIT speaks
1140 * double-precision, fpr represents single-
1141 * precision registers.
1143 * See parts 5.5 and 6.1.2 of the AAPCS for how
1147 if (*fpr
< ARM_VFP_F16
|| (!is_double
&& *float_spare
>= 0)) {
1148 ainfo
->storage
= RegTypeFP
;
1152 * If we're passing a double-precision value
1153 * and *fpr is odd (e.g. it's s1, s3, ...)
1154 * we need to use the next even register. So
1155 * we mark the current *fpr as a spare that
1156 * can be used for the next single-precision
1160 *float_spare
= *fpr
;
1165 * At this point, we have an even register
1166 * so we assign that and move along.
1170 } else if (*float_spare
>= 0) {
1172 * We're passing a single-precision value
1173 * and it looks like a spare single-
1174 * precision register is available. Let's
1178 ainfo
->reg
= *float_spare
;
1182 * If we hit this branch, we're passing a
1183 * single-precision value and we can simply
1184 * use the next available register.
1192 * We've exhausted available floating point
1193 * regs, so pass the rest on the stack.
1201 ainfo
->offset
= *stack_size
;
1202 ainfo
->reg
= ARMREG_SP
;
1203 ainfo
->storage
= RegTypeBase
;
1205 *stack_size
+= is_double
? 8 : 4;
1210 is_hfa (MonoType
*t
, int *out_nfields
, int *out_esize
)
1214 MonoClassField
*field
;
1215 MonoType
*ftype
, *prev_ftype
= NULL
;
1218 klass
= mono_class_from_mono_type_internal (t
);
1220 while ((field
= mono_class_get_fields_internal (klass
, &iter
))) {
1221 if (field
->type
->attrs
& FIELD_ATTRIBUTE_STATIC
)
1223 ftype
= mono_field_get_type_internal (field
);
1224 ftype
= mini_get_underlying_type (ftype
);
1226 if (MONO_TYPE_ISSTRUCT (ftype
)) {
1227 int nested_nfields
, nested_esize
;
1229 if (!is_hfa (ftype
, &nested_nfields
, &nested_esize
))
1231 if (nested_esize
== 4)
1232 ftype
= m_class_get_byval_arg (mono_defaults
.single_class
);
1234 ftype
= m_class_get_byval_arg (mono_defaults
.double_class
);
1235 if (prev_ftype
&& prev_ftype
->type
!= ftype
->type
)
1238 nfields
+= nested_nfields
;
1240 if (!(!ftype
->byref
&& (ftype
->type
== MONO_TYPE_R4
|| ftype
->type
== MONO_TYPE_R8
)))
1242 if (prev_ftype
&& prev_ftype
->type
!= ftype
->type
)
1248 if (nfields
== 0 || nfields
> 4)
1250 *out_nfields
= nfields
;
1251 *out_esize
= prev_ftype
->type
== MONO_TYPE_R4
? 4 : 8;
1256 get_call_info (MonoMemPool
*mp
, MonoMethodSignature
*sig
)
1258 guint i
, gr
, fpr
, pstart
;
1260 int n
= sig
->hasthis
+ sig
->param_count
;
1264 guint32 stack_size
= 0;
1266 gboolean is_pinvoke
= sig
->pinvoke
;
1267 gboolean vtype_retaddr
= FALSE
;
1270 cinfo
= mono_mempool_alloc0 (mp
, sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
1272 cinfo
= g_malloc0 (sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
1279 t
= mini_get_underlying_type (sig
->ret
);
1290 case MONO_TYPE_FNPTR
:
1291 case MONO_TYPE_OBJECT
:
1292 cinfo
->ret
.storage
= RegTypeGeneral
;
1293 cinfo
->ret
.reg
= ARMREG_R0
;
1297 cinfo
->ret
.storage
= RegTypeIRegPair
;
1298 cinfo
->ret
.reg
= ARMREG_R0
;
1302 cinfo
->ret
.storage
= RegTypeFP
;
1304 if (t
->type
== MONO_TYPE_R4
)
1305 cinfo
->ret
.size
= 4;
1307 cinfo
->ret
.size
= 8;
1309 if (IS_HARD_FLOAT
) {
1310 cinfo
->ret
.reg
= ARM_VFP_F0
;
1312 cinfo
->ret
.reg
= ARMREG_R0
;
1315 case MONO_TYPE_GENERICINST
:
1316 if (!mono_type_generic_inst_is_valuetype (t
)) {
1317 cinfo
->ret
.storage
= RegTypeGeneral
;
1318 cinfo
->ret
.reg
= ARMREG_R0
;
1321 if (mini_is_gsharedvt_variable_type (t
)) {
1322 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1326 case MONO_TYPE_VALUETYPE
:
1327 case MONO_TYPE_TYPEDBYREF
:
1328 if (IS_HARD_FLOAT
&& sig
->pinvoke
&& is_hfa (t
, &nfields
, &esize
)) {
1329 cinfo
->ret
.storage
= RegTypeHFA
;
1331 cinfo
->ret
.nregs
= nfields
;
1332 cinfo
->ret
.esize
= esize
;
1335 int native_size
= mono_class_native_size (mono_class_from_mono_type_internal (t
), &align
);
1338 #ifdef TARGET_WATCHOS
1343 if (native_size
<= max_size
) {
1344 cinfo
->ret
.storage
= RegTypeStructByVal
;
1345 cinfo
->ret
.struct_size
= native_size
;
1346 cinfo
->ret
.nregs
= ALIGN_TO (native_size
, 4) / 4;
1348 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1351 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1356 case MONO_TYPE_MVAR
:
1357 g_assert (mini_is_gsharedvt_type (t
));
1358 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1360 case MONO_TYPE_VOID
:
1363 g_error ("Can't handle as return value 0x%x", sig
->ret
->type
);
1366 vtype_retaddr
= cinfo
->ret
.storage
== RegTypeStructByAddr
;
1371 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1372 * the first argument, allowing 'this' to be always passed in the first arg reg.
1373 * Also do this if the first argument is a reference type, since virtual calls
1374 * are sometimes made using calli without sig->hasthis set, like in the delegate
1377 if (vtype_retaddr
&& !is_pinvoke
&& (sig
->hasthis
|| (sig
->param_count
> 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig
->params
[0]))))) {
1379 add_general (&gr
, &stack_size
, cinfo
->args
+ 0, TRUE
);
1381 add_general (&gr
, &stack_size
, &cinfo
->args
[sig
->hasthis
+ 0], TRUE
);
1385 cinfo
->ret
.reg
= gr
;
1387 cinfo
->vret_arg_index
= 1;
1391 add_general (&gr
, &stack_size
, cinfo
->args
+ 0, TRUE
);
1394 if (vtype_retaddr
) {
1395 cinfo
->ret
.reg
= gr
;
1400 DEBUG(g_print("params: %d\n", sig
->param_count
));
1401 for (i
= pstart
; i
< sig
->param_count
; ++i
) {
1402 ArgInfo
*ainfo
= &cinfo
->args
[n
];
1404 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1405 /* Prevent implicit arguments and sig_cookie from
1406 being passed in registers */
1409 /* Emit the signature cookie just before the implicit arguments */
1410 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
1412 DEBUG(g_print("param %d: ", i
));
1413 if (sig
->params
[i
]->byref
) {
1414 DEBUG(g_print("byref\n"));
1415 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1419 t
= mini_get_underlying_type (sig
->params
[i
]);
1422 cinfo
->args
[n
].is_signed
= 1;
1424 cinfo
->args
[n
].size
= 1;
1425 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1428 cinfo
->args
[n
].is_signed
= 1;
1430 cinfo
->args
[n
].size
= 2;
1431 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1435 cinfo
->args
[n
].size
= 4;
1436 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1441 case MONO_TYPE_FNPTR
:
1442 case MONO_TYPE_OBJECT
:
1443 cinfo
->args
[n
].size
= sizeof (target_mgreg_t
);
1444 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1446 case MONO_TYPE_GENERICINST
:
1447 if (!mono_type_generic_inst_is_valuetype (t
)) {
1448 cinfo
->args
[n
].size
= sizeof (target_mgreg_t
);
1449 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1452 if (mini_is_gsharedvt_variable_type (t
)) {
1453 /* gsharedvt arguments are passed by ref */
1454 g_assert (mini_is_gsharedvt_type (t
));
1455 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1456 switch (ainfo
->storage
) {
1457 case RegTypeGeneral
:
1458 ainfo
->storage
= RegTypeGSharedVtInReg
;
1461 ainfo
->storage
= RegTypeGSharedVtOnStack
;
1464 g_assert_not_reached ();
1469 case MONO_TYPE_TYPEDBYREF
:
1470 case MONO_TYPE_VALUETYPE
: {
1473 int nwords
, nfields
, esize
;
1476 if (IS_HARD_FLOAT
&& sig
->pinvoke
&& is_hfa (t
, &nfields
, &esize
)) {
1477 if (fpr
+ nfields
< ARM_VFP_F16
) {
1478 ainfo
->storage
= RegTypeHFA
;
1480 ainfo
->nregs
= nfields
;
1481 ainfo
->esize
= esize
;
1492 if (t
->type
== MONO_TYPE_TYPEDBYREF
) {
1493 size
= MONO_ABI_SIZEOF (MonoTypedRef
);
1494 align
= sizeof (target_mgreg_t
);
1496 MonoClass
*klass
= mono_class_from_mono_type_internal (sig
->params
[i
]);
1498 size
= mono_class_native_size (klass
, &align
);
1500 size
= mini_type_stack_size_full (t
, &align
, FALSE
);
1502 DEBUG(g_print ("load %d bytes struct\n", size
));
1504 #ifdef TARGET_WATCHOS
1505 /* Watchos pass large structures by ref */
1506 /* We only do this for pinvoke to make gsharedvt/dyncall simpler */
1507 if (sig
->pinvoke
&& size
> 16) {
1508 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1509 switch (ainfo
->storage
) {
1510 case RegTypeGeneral
:
1511 ainfo
->storage
= RegTypeStructByAddr
;
1514 ainfo
->storage
= RegTypeStructByAddrOnStack
;
1517 g_assert_not_reached ();
1526 align_size
+= (sizeof (target_mgreg_t
) - 1);
1527 align_size
&= ~(sizeof (target_mgreg_t
) - 1);
1528 nwords
= (align_size
+ sizeof (target_mgreg_t
) -1 ) / sizeof (target_mgreg_t
);
1529 ainfo
->storage
= RegTypeStructByVal
;
1530 ainfo
->struct_size
= size
;
1531 ainfo
->align
= align
;
1533 if (eabi_supported
) {
1534 if (align
>= 8 && (gr
& 1))
1537 if (gr
> ARMREG_R3
) {
1539 ainfo
->vtsize
= nwords
;
1541 int rest
= ARMREG_R3
- gr
+ 1;
1542 int n_in_regs
= rest
>= nwords
? nwords
: rest
;
1544 ainfo
->size
= n_in_regs
;
1545 ainfo
->vtsize
= nwords
- n_in_regs
;
1548 nwords
-= n_in_regs
;
1550 stack_size
= ALIGN_TO (stack_size
, align
);
1552 ainfo
->offset
= stack_size
;
1553 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1554 stack_size
+= nwords
* sizeof (target_mgreg_t
);
1560 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
1566 add_float (&fpr
, &stack_size
, ainfo
, FALSE
, &float_spare
);
1568 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1574 add_float (&fpr
, &stack_size
, ainfo
, TRUE
, &float_spare
);
1576 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
1579 case MONO_TYPE_MVAR
:
1580 /* gsharedvt arguments are passed by ref */
1581 g_assert (mini_is_gsharedvt_type (t
));
1582 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1583 switch (ainfo
->storage
) {
1584 case RegTypeGeneral
:
1585 ainfo
->storage
= RegTypeGSharedVtInReg
;
1588 ainfo
->storage
= RegTypeGSharedVtOnStack
;
1591 g_assert_not_reached ();
1595 g_error ("Can't handle 0x%x", sig
->params
[i
]->type
);
1600 /* Handle the case where there are no implicit arguments */
1601 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1602 /* Prevent implicit arguments and sig_cookie from
1603 being passed in registers */
1606 /* Emit the signature cookie just before the implicit arguments */
1607 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
1610 DEBUG (g_print (" stack size: %d (%d)\n", (stack_size
+ 15) & ~15, stack_size
));
1611 stack_size
= ALIGN_TO (stack_size
, MONO_ARCH_FRAME_ALIGNMENT
);
1613 cinfo
->stack_usage
= stack_size
;
1618 * We need to create a temporary value if the argument is not stored in
1619 * a linear memory range in the ccontext (this normally happens for
1620 * value types if they are passed both by stack and regs).
1623 arg_need_temp (ArgInfo
*ainfo
)
1625 if (ainfo
->storage
== RegTypeStructByVal
&& ainfo
->vtsize
)
1626 return ainfo
->struct_size
;
1631 arg_get_storage (CallContext
*ccontext
, ArgInfo
*ainfo
)
1633 switch (ainfo
->storage
) {
1634 case RegTypeIRegPair
:
1635 case RegTypeGeneral
:
1636 case RegTypeStructByVal
:
1637 return &ccontext
->gregs
[ainfo
->reg
];
1641 return &ccontext
->fregs
[ainfo
->reg
];
1643 return &ccontext
->gregs
[ainfo
->reg
];
1645 return ccontext
->stack
+ ainfo
->offset
;
1647 g_error ("Arg storage type not yet supported");
1652 arg_get_val (CallContext
*ccontext
, ArgInfo
*ainfo
, gpointer dest
)
1654 int reg_size
= ainfo
->size
* sizeof (host_mgreg_t
);
1655 g_assert (arg_need_temp (ainfo
));
1656 memcpy (dest
, &ccontext
->gregs
[ainfo
->reg
], reg_size
);
1657 memcpy ((host_mgreg_t
*)dest
+ ainfo
->size
, ccontext
->stack
+ ainfo
->offset
, ainfo
->struct_size
- reg_size
);
1661 arg_set_val (CallContext
*ccontext
, ArgInfo
*ainfo
, gpointer src
)
1663 int reg_size
= ainfo
->size
* sizeof (host_mgreg_t
);
1664 g_assert (arg_need_temp (ainfo
));
1665 memcpy (&ccontext
->gregs
[ainfo
->reg
], src
, reg_size
);
1666 memcpy (ccontext
->stack
+ ainfo
->offset
, (host_mgreg_t
*)src
+ ainfo
->size
, ainfo
->struct_size
- reg_size
);
1669 /* Set arguments in the ccontext (for i2n entry) */
1671 mono_arch_set_native_call_context_args (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1673 const MonoEECallbacks
*interp_cb
= mini_get_interp_callbacks ();
1674 CallInfo
*cinfo
= get_call_info (NULL
, sig
);
1678 memset (ccontext
, 0, sizeof (CallContext
));
1680 ccontext
->stack_size
= ALIGN_TO (cinfo
->stack_usage
, MONO_ARCH_FRAME_ALIGNMENT
);
1681 if (ccontext
->stack_size
)
1682 ccontext
->stack
= (guint8
*)g_calloc (1, ccontext
->stack_size
);
1684 if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
1685 ainfo
= &cinfo
->ret
;
1686 if (ainfo
->storage
== RegTypeStructByAddr
) {
1687 storage
= interp_cb
->frame_arg_to_storage ((MonoInterpFrameHandle
)frame
, sig
, -1);
1688 ccontext
->gregs
[cinfo
->ret
.reg
] = (host_mgreg_t
)(gsize
)storage
;
1692 g_assert (!sig
->hasthis
);
1694 for (int i
= 0; i
< sig
->param_count
; i
++) {
1695 ainfo
= &cinfo
->args
[i
];
1696 int temp_size
= arg_need_temp (ainfo
);
1699 storage
= alloca (temp_size
); // FIXME? alloca in a loop
1701 storage
= arg_get_storage (ccontext
, ainfo
);
1703 interp_cb
->frame_arg_to_data ((MonoInterpFrameHandle
)frame
, sig
, i
, storage
);
1705 arg_set_val (ccontext
, ainfo
, storage
);
1711 /* Set return value in the ccontext (for n2i return) */
1713 mono_arch_set_native_call_context_ret (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1715 const MonoEECallbacks
*interp_cb
;
1720 if (sig
->ret
->type
== MONO_TYPE_VOID
)
1723 interp_cb
= mini_get_interp_callbacks ();
1724 cinfo
= get_call_info (NULL
, sig
);
1725 ainfo
= &cinfo
->ret
;
1727 if (ainfo
->storage
!= RegTypeStructByAddr
) {
1728 g_assert (!arg_need_temp (ainfo
));
1729 storage
= arg_get_storage (ccontext
, ainfo
);
1730 memset (ccontext
, 0, sizeof (CallContext
)); // FIXME
1731 interp_cb
->frame_arg_to_data ((MonoInterpFrameHandle
)frame
, sig
, -1, storage
);
1737 /* Gets the arguments from ccontext (for n2i entry) */
1739 mono_arch_get_native_call_context_args (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1741 const MonoEECallbacks
*interp_cb
= mini_get_interp_callbacks ();
1742 CallInfo
*cinfo
= get_call_info (NULL
, sig
);
1746 if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
1747 ainfo
= &cinfo
->ret
;
1748 if (ainfo
->storage
== RegTypeStructByAddr
) {
1749 storage
= (gpointer
)(gsize
)ccontext
->gregs
[cinfo
->ret
.reg
];
1750 interp_cb
->frame_arg_set_storage ((MonoInterpFrameHandle
)frame
, sig
, -1, storage
);
1754 for (int i
= 0; i
< sig
->param_count
+ sig
->hasthis
; i
++) {
1755 ainfo
= &cinfo
->args
[i
];
1756 int temp_size
= arg_need_temp (ainfo
);
1759 storage
= alloca (temp_size
); // FIXME? alloca in a loop
1760 arg_get_val (ccontext
, ainfo
, storage
);
1762 storage
= arg_get_storage (ccontext
, ainfo
);
1764 interp_cb
->data_to_frame_arg ((MonoInterpFrameHandle
)frame
, sig
, i
, storage
);
1770 /* Gets the return value from ccontext (for i2n exit) */
1772 mono_arch_get_native_call_context_ret (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1774 const MonoEECallbacks
*interp_cb
;
1779 if (sig
->ret
->type
== MONO_TYPE_VOID
)
1782 interp_cb
= mini_get_interp_callbacks ();
1783 cinfo
= get_call_info (NULL
, sig
);
1784 ainfo
= &cinfo
->ret
;
1786 if (ainfo
->storage
!= RegTypeStructByAddr
) {
1787 g_assert (!arg_need_temp (ainfo
));
1788 storage
= arg_get_storage (ccontext
, ainfo
);
1789 interp_cb
->data_to_frame_arg ((MonoInterpFrameHandle
)frame
, sig
, -1, storage
);
1798 mono_arch_tailcall_supported (MonoCompile
*cfg
, MonoMethodSignature
*caller_sig
, MonoMethodSignature
*callee_sig
, gboolean virtual_
)
1800 g_assert (caller_sig
);
1801 g_assert (callee_sig
);
1803 CallInfo
*caller_info
= get_call_info (NULL
, caller_sig
);
1804 CallInfo
*callee_info
= get_call_info (NULL
, callee_sig
);
1807 * Tailcalls with more callee stack usage than the caller cannot be supported, since
1808 * the extra stack space would be left on the stack after the tailcall.
1810 gboolean res
= IS_SUPPORTED_TAILCALL (callee_info
->stack_usage
<= caller_info
->stack_usage
)
1811 && IS_SUPPORTED_TAILCALL (caller_info
->ret
.storage
== callee_info
->ret
.storage
);
1813 // FIXME The limit here is that moving the parameters requires addressing the parameters
1814 // with 12bit (4K) immediate offsets. - 4 for TAILCALL_REG/MEMBASE
1815 res
&= IS_SUPPORTED_TAILCALL (callee_info
->stack_usage
< (4096 - 4));
1816 res
&= IS_SUPPORTED_TAILCALL (caller_info
->stack_usage
< (4096 - 4));
1818 g_free (caller_info
);
1819 g_free (callee_info
);
1825 debug_omit_fp (void)
1828 return mono_debug_count ();
1835 * mono_arch_compute_omit_fp:
1836 * Determine whether the frame pointer can be eliminated.
1839 mono_arch_compute_omit_fp (MonoCompile
*cfg
)
1841 MonoMethodSignature
*sig
;
1842 MonoMethodHeader
*header
;
1846 if (cfg
->arch
.omit_fp_computed
)
1849 header
= cfg
->header
;
1851 sig
= mono_method_signature_internal (cfg
->method
);
1853 if (!cfg
->arch
.cinfo
)
1854 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
1855 cinfo
= cfg
->arch
.cinfo
;
1858 * FIXME: Remove some of the restrictions.
1860 cfg
->arch
.omit_fp
= TRUE
;
1861 cfg
->arch
.omit_fp_computed
= TRUE
;
1863 if (cfg
->disable_omit_fp
)
1864 cfg
->arch
.omit_fp
= FALSE
;
1865 if (!debug_omit_fp ())
1866 cfg
->arch
.omit_fp
= FALSE
;
1868 if (cfg->method->save_lmf)
1869 cfg->arch.omit_fp = FALSE;
1871 if (cfg
->flags
& MONO_CFG_HAS_ALLOCA
)
1872 cfg
->arch
.omit_fp
= FALSE
;
1873 if (header
->num_clauses
)
1874 cfg
->arch
.omit_fp
= FALSE
;
1875 if (cfg
->param_area
)
1876 cfg
->arch
.omit_fp
= FALSE
;
1877 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
))
1878 cfg
->arch
.omit_fp
= FALSE
;
1879 if ((mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
)))
1880 cfg
->arch
.omit_fp
= FALSE
;
1881 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
1882 ArgInfo
*ainfo
= &cinfo
->args
[i
];
1884 if (ainfo
->storage
== RegTypeBase
|| ainfo
->storage
== RegTypeBaseGen
|| ainfo
->storage
== RegTypeStructByVal
) {
1886 * The stack offset can only be determined when the frame
1889 cfg
->arch
.omit_fp
= FALSE
;
1894 for (i
= cfg
->locals_start
; i
< cfg
->num_varinfo
; i
++) {
1895 MonoInst
*ins
= cfg
->varinfo
[i
];
1898 locals_size
+= mono_type_size (ins
->inst_vtype
, &ialign
);
1903 * Set var information according to the calling convention. arm version.
1904 * The locals var stuff should most likely be split in another method.
1907 mono_arch_allocate_vars (MonoCompile
*cfg
)
1909 MonoMethodSignature
*sig
;
1910 MonoMethodHeader
*header
;
1913 int i
, offset
, size
, align
, curinst
;
1918 sig
= mono_method_signature_internal (cfg
->method
);
1920 if (!cfg
->arch
.cinfo
)
1921 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
1922 cinfo
= cfg
->arch
.cinfo
;
1923 sig_ret
= mini_get_underlying_type (sig
->ret
);
1925 mono_arch_compute_omit_fp (cfg
);
1927 if (cfg
->arch
.omit_fp
)
1928 cfg
->frame_reg
= ARMREG_SP
;
1930 cfg
->frame_reg
= ARMREG_FP
;
1932 cfg
->flags
|= MONO_CFG_HAS_SPILLUP
;
1934 /* allow room for the vararg method args: void* and long/double */
1935 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
))
1936 cfg
->param_area
= MAX (cfg
->param_area
, sizeof (target_mgreg_t
)*8);
1938 header
= cfg
->header
;
1940 /* See mono_arch_get_global_int_regs () */
1941 if (cfg
->flags
& MONO_CFG_HAS_CALLS
)
1942 cfg
->uses_rgctx_reg
= TRUE
;
1944 if (cfg
->frame_reg
!= ARMREG_SP
)
1945 cfg
->used_int_regs
|= 1 << cfg
->frame_reg
;
1947 if (cfg
->compile_aot
|| cfg
->uses_rgctx_reg
|| COMPILE_LLVM (cfg
))
1948 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1949 cfg
->used_int_regs
|= (1 << MONO_ARCH_IMT_REG
);
1953 if (!MONO_TYPE_ISSTRUCT (sig_ret
) && cinfo
->ret
.storage
!= RegTypeStructByAddr
) {
1954 if (sig_ret
->type
!= MONO_TYPE_VOID
) {
1955 cfg
->ret
->opcode
= OP_REGVAR
;
1956 cfg
->ret
->inst_c0
= ARMREG_R0
;
1959 /* local vars are at a positive offset from the stack pointer */
1961 * also note that if the function uses alloca, we use FP
1962 * to point at the local variables.
1964 offset
= 0; /* linkage area */
1965 /* align the offset to 16 bytes: not sure this is needed here */
1967 //offset &= ~(8 - 1);
1969 /* add parameter area size for called functions */
1970 offset
+= cfg
->param_area
;
1973 if (cfg
->flags
& MONO_CFG_HAS_FPOUT
)
1976 /* allow room to save the return value */
1977 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
))
1980 switch (cinfo
->ret
.storage
) {
1981 case RegTypeStructByVal
:
1983 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
1984 offset
= ALIGN_TO (offset
, 8);
1985 cfg
->ret
->opcode
= OP_REGOFFSET
;
1986 cfg
->ret
->inst_basereg
= cfg
->frame_reg
;
1987 cfg
->ret
->inst_offset
= offset
;
1988 if (cinfo
->ret
.storage
== RegTypeStructByVal
)
1989 offset
+= cinfo
->ret
.nregs
* sizeof (target_mgreg_t
);
1993 case RegTypeStructByAddr
:
1994 ins
= cfg
->vret_addr
;
1995 offset
+= sizeof (target_mgreg_t
) - 1;
1996 offset
&= ~(sizeof (target_mgreg_t
) - 1);
1997 ins
->inst_offset
= offset
;
1998 ins
->opcode
= OP_REGOFFSET
;
1999 ins
->inst_basereg
= cfg
->frame_reg
;
2000 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
2001 g_print ("vret_addr =");
2002 mono_print_ins (cfg
->vret_addr
);
2004 offset
+= sizeof (target_mgreg_t
);
2010 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
2011 if (cfg
->arch
.seq_point_info_var
) {
2014 ins
= cfg
->arch
.seq_point_info_var
;
2018 offset
+= align
- 1;
2019 offset
&= ~(align
- 1);
2020 ins
->opcode
= OP_REGOFFSET
;
2021 ins
->inst_basereg
= cfg
->frame_reg
;
2022 ins
->inst_offset
= offset
;
2025 if (cfg
->arch
.ss_trigger_page_var
) {
2028 ins
= cfg
->arch
.ss_trigger_page_var
;
2031 offset
+= align
- 1;
2032 offset
&= ~(align
- 1);
2033 ins
->opcode
= OP_REGOFFSET
;
2034 ins
->inst_basereg
= cfg
->frame_reg
;
2035 ins
->inst_offset
= offset
;
2039 if (cfg
->arch
.seq_point_ss_method_var
) {
2042 ins
= cfg
->arch
.seq_point_ss_method_var
;
2045 offset
+= align
- 1;
2046 offset
&= ~(align
- 1);
2047 ins
->opcode
= OP_REGOFFSET
;
2048 ins
->inst_basereg
= cfg
->frame_reg
;
2049 ins
->inst_offset
= offset
;
2052 if (cfg
->arch
.seq_point_bp_method_var
) {
2055 ins
= cfg
->arch
.seq_point_bp_method_var
;
2058 offset
+= align
- 1;
2059 offset
&= ~(align
- 1);
2060 ins
->opcode
= OP_REGOFFSET
;
2061 ins
->inst_basereg
= cfg
->frame_reg
;
2062 ins
->inst_offset
= offset
;
2066 if (cfg
->has_atomic_exchange_i4
|| cfg
->has_atomic_cas_i4
|| cfg
->has_atomic_add_i4
) {
2067 /* Allocate a temporary used by the atomic ops */
2071 /* Allocate a local slot to hold the sig cookie address */
2072 offset
+= align
- 1;
2073 offset
&= ~(align
- 1);
2074 cfg
->arch
.atomic_tmp_offset
= offset
;
2077 cfg
->arch
.atomic_tmp_offset
= -1;
2080 cfg
->locals_min_stack_offset
= offset
;
2082 curinst
= cfg
->locals_start
;
2083 for (i
= curinst
; i
< cfg
->num_varinfo
; ++i
) {
2086 ins
= cfg
->varinfo
[i
];
2087 if ((ins
->flags
& MONO_INST_IS_DEAD
) || ins
->opcode
== OP_REGVAR
|| ins
->opcode
== OP_REGOFFSET
)
2090 t
= ins
->inst_vtype
;
2091 if (cfg
->gsharedvt
&& mini_is_gsharedvt_variable_type (t
))
2094 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
2095 * pinvoke wrappers when they call functions returning structure */
2096 if (ins
->backend
.is_pinvoke
&& MONO_TYPE_ISSTRUCT (t
) && t
->type
!= MONO_TYPE_TYPEDBYREF
) {
2097 size
= mono_class_native_size (mono_class_from_mono_type_internal (t
), &ualign
);
2101 size
= mono_type_size (t
, &align
);
2103 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2104 * since it loads/stores misaligned words, which don't do the right thing.
2106 if (align
< 4 && size
>= 4)
2108 if (ALIGN_TO (offset
, align
) > ALIGN_TO (offset
, 4))
2109 mini_gc_set_slot_type_from_fp (cfg
, ALIGN_TO (offset
, 4), SLOT_NOREF
);
2110 offset
+= align
- 1;
2111 offset
&= ~(align
- 1);
2112 ins
->opcode
= OP_REGOFFSET
;
2113 ins
->inst_offset
= offset
;
2114 ins
->inst_basereg
= cfg
->frame_reg
;
2116 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2119 cfg
->locals_max_stack_offset
= offset
;
2123 ins
= cfg
->args
[curinst
];
2124 if (ins
->opcode
!= OP_REGVAR
) {
2125 ins
->opcode
= OP_REGOFFSET
;
2126 ins
->inst_basereg
= cfg
->frame_reg
;
2127 offset
+= sizeof (target_mgreg_t
) - 1;
2128 offset
&= ~(sizeof (target_mgreg_t
) - 1);
2129 ins
->inst_offset
= offset
;
2130 offset
+= sizeof (target_mgreg_t
);
2135 if (sig
->call_convention
== MONO_CALL_VARARG
) {
2139 /* Allocate a local slot to hold the sig cookie address */
2140 offset
+= align
- 1;
2141 offset
&= ~(align
- 1);
2142 cfg
->sig_cookie
= offset
;
2146 for (i
= 0; i
< sig
->param_count
; ++i
) {
2147 ainfo
= cinfo
->args
+ i
;
2149 ins
= cfg
->args
[curinst
];
2151 switch (ainfo
->storage
) {
2153 offset
= ALIGN_TO (offset
, 8);
2154 ins
->opcode
= OP_REGOFFSET
;
2155 ins
->inst_basereg
= cfg
->frame_reg
;
2156 /* These arguments are saved to the stack in the prolog */
2157 ins
->inst_offset
= offset
;
2158 if (cfg
->verbose_level
>= 2)
2159 g_print ("arg %d allocated to %s+0x%0x.\n", i
, mono_arch_regname (ins
->inst_basereg
), (int)ins
->inst_offset
);
2167 if (ins
->opcode
!= OP_REGVAR
) {
2168 ins
->opcode
= OP_REGOFFSET
;
2169 ins
->inst_basereg
= cfg
->frame_reg
;
2170 size
= mini_type_stack_size_full (sig
->params
[i
], &ualign
, sig
->pinvoke
);
2172 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2173 * since it loads/stores misaligned words, which don't do the right thing.
2175 if (align
< 4 && size
>= 4)
2177 /* The code in the prolog () stores words when storing vtypes received in a register */
2178 if (MONO_TYPE_ISSTRUCT (sig
->params
[i
]))
2180 if (ALIGN_TO (offset
, align
) > ALIGN_TO (offset
, 4))
2181 mini_gc_set_slot_type_from_fp (cfg
, ALIGN_TO (offset
, 4), SLOT_NOREF
);
2182 offset
+= align
- 1;
2183 offset
&= ~(align
- 1);
2184 ins
->inst_offset
= offset
;
2190 /* align the offset to 8 bytes */
2191 if (ALIGN_TO (offset
, 8) > ALIGN_TO (offset
, 4))
2192 mini_gc_set_slot_type_from_fp (cfg
, ALIGN_TO (offset
, 4), SLOT_NOREF
);
2197 cfg
->stack_offset
= offset
;
2201 mono_arch_create_vars (MonoCompile
*cfg
)
2203 MonoMethodSignature
*sig
;
2207 sig
= mono_method_signature_internal (cfg
->method
);
2209 if (!cfg
->arch
.cinfo
)
2210 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
2211 cinfo
= cfg
->arch
.cinfo
;
2213 if (IS_HARD_FLOAT
) {
2214 for (i
= 0; i
< 2; i
++) {
2215 MonoInst
*inst
= mono_compile_create_var (cfg
, m_class_get_byval_arg (mono_defaults
.double_class
), OP_LOCAL
);
2216 inst
->flags
|= MONO_INST_VOLATILE
;
2218 cfg
->arch
.vfp_scratch_slots
[i
] = inst
;
2222 if (cinfo
->ret
.storage
== RegTypeStructByVal
)
2223 cfg
->ret_var_is_local
= TRUE
;
2225 if (cinfo
->ret
.storage
== RegTypeStructByAddr
) {
2226 cfg
->vret_addr
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_ARG
);
2227 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
2228 g_print ("vret_addr = ");
2229 mono_print_ins (cfg
->vret_addr
);
2233 if (cfg
->gen_sdb_seq_points
) {
2234 if (cfg
->compile_aot
) {
2235 MonoInst
*ins
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2236 ins
->flags
|= MONO_INST_VOLATILE
;
2237 cfg
->arch
.seq_point_info_var
= ins
;
2239 if (!cfg
->soft_breakpoints
) {
2240 /* Allocate a separate variable for this to save 1 load per seq point */
2241 ins
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2242 ins
->flags
|= MONO_INST_VOLATILE
;
2243 cfg
->arch
.ss_trigger_page_var
= ins
;
2246 if (cfg
->soft_breakpoints
) {
2249 ins
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2250 ins
->flags
|= MONO_INST_VOLATILE
;
2251 cfg
->arch
.seq_point_ss_method_var
= ins
;
2253 ins
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2254 ins
->flags
|= MONO_INST_VOLATILE
;
2255 cfg
->arch
.seq_point_bp_method_var
= ins
;
2261 emit_sig_cookie (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
)
2263 MonoMethodSignature
*tmp_sig
;
2266 if (MONO_IS_TAILCALL_OPCODE (call
))
2269 g_assert (cinfo
->sig_cookie
.storage
== RegTypeBase
);
2272 * mono_ArgIterator_Setup assumes the signature cookie is
2273 * passed first and all the arguments which were before it are
2274 * passed on the stack after the signature. So compensate by
2275 * passing a different signature.
2277 tmp_sig
= mono_metadata_signature_dup (call
->signature
);
2278 tmp_sig
->param_count
-= call
->signature
->sentinelpos
;
2279 tmp_sig
->sentinelpos
= 0;
2280 memcpy (tmp_sig
->params
, call
->signature
->params
+ call
->signature
->sentinelpos
, tmp_sig
->param_count
* sizeof (MonoType
*));
2282 sig_reg
= mono_alloc_ireg (cfg
);
2283 MONO_EMIT_NEW_SIGNATURECONST (cfg
, sig_reg
, tmp_sig
);
2285 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, cinfo
->sig_cookie
.offset
, sig_reg
);
2290 mono_arch_get_llvm_call_info (MonoCompile
*cfg
, MonoMethodSignature
*sig
)
2295 LLVMCallInfo
*linfo
;
2297 n
= sig
->param_count
+ sig
->hasthis
;
2299 cinfo
= get_call_info (cfg
->mempool
, sig
);
2301 linfo
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (LLVMCallInfo
) + (sizeof (LLVMArgInfo
) * n
));
2304 * LLVM always uses the native ABI while we use our own ABI, the
2305 * only difference is the handling of vtypes:
2306 * - we only pass/receive them in registers in some cases, and only
2307 * in 1 or 2 integer registers.
2309 switch (cinfo
->ret
.storage
) {
2310 case RegTypeGeneral
:
2313 case RegTypeIRegPair
:
2315 case RegTypeStructByAddr
:
2317 linfo
->ret
.storage
= LLVMArgVtypeByRef
;
2319 /* Vtype returned using a hidden argument */
2320 linfo
->ret
.storage
= LLVMArgVtypeRetAddr
;
2321 linfo
->vret_arg_index
= cinfo
->vret_arg_index
;
2325 case RegTypeStructByVal
:
2326 /* LLVM models this by returning an int array */
2327 linfo
->ret
.storage
= LLVMArgAsIArgs
;
2328 linfo
->ret
.nslots
= cinfo
->ret
.nregs
;
2332 linfo
->ret
.storage
= LLVMArgFpStruct
;
2333 linfo
->ret
.nslots
= cinfo
->ret
.nregs
;
2334 linfo
->ret
.esize
= cinfo
->ret
.esize
;
2337 cfg
->exception_message
= g_strdup_printf ("unknown ret conv (%d)", cinfo
->ret
.storage
);
2338 cfg
->disable_llvm
= TRUE
;
2342 for (i
= 0; i
< n
; ++i
) {
2343 LLVMArgInfo
*lainfo
= &linfo
->args
[i
];
2344 ainfo
= cinfo
->args
+ i
;
2346 lainfo
->storage
= LLVMArgNone
;
2348 switch (ainfo
->storage
) {
2349 case RegTypeGeneral
:
2350 case RegTypeIRegPair
:
2352 case RegTypeBaseGen
:
2354 lainfo
->storage
= LLVMArgNormal
;
2356 case RegTypeStructByVal
: {
2357 lainfo
->storage
= LLVMArgAsIArgs
;
2358 int slotsize
= eabi_supported
&& ainfo
->align
== 8 ? 8 : 4;
2359 lainfo
->nslots
= ALIGN_TO (ainfo
->struct_size
, slotsize
) / slotsize
;
2360 lainfo
->esize
= slotsize
;
2363 case RegTypeStructByAddr
:
2364 case RegTypeStructByAddrOnStack
:
2365 lainfo
->storage
= LLVMArgVtypeByRef
;
2370 lainfo
->storage
= LLVMArgAsFpArgs
;
2371 lainfo
->nslots
= ainfo
->nregs
;
2372 lainfo
->esize
= ainfo
->esize
;
2373 for (j
= 0; j
< ainfo
->nregs
; ++j
)
2374 lainfo
->pair_storage
[j
] = LLVMArgInFPReg
;
2378 cfg
->exception_message
= g_strdup_printf ("ainfo->storage (%d)", ainfo
->storage
);
2379 cfg
->disable_llvm
= TRUE
;
2389 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
2392 MonoMethodSignature
*sig
;
2396 sig
= call
->signature
;
2397 n
= sig
->param_count
+ sig
->hasthis
;
2399 cinfo
= get_call_info (cfg
->mempool
, sig
);
2401 switch (cinfo
->ret
.storage
) {
2402 case RegTypeStructByVal
:
2404 if (cinfo
->ret
.storage
== RegTypeStructByVal
&& cinfo
->ret
.nregs
== 1) {
2405 /* The JIT will transform this into a normal call */
2406 call
->vret_in_reg
= TRUE
;
2409 if (MONO_IS_TAILCALL_OPCODE (call
))
2412 * The vtype is returned in registers, save the return area address in a local, and save the vtype into
2413 * the location pointed to by it after call in emit_move_return_value ().
2415 if (!cfg
->arch
.vret_addr_loc
) {
2416 cfg
->arch
.vret_addr_loc
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2417 /* Prevent it from being register allocated or optimized away */
2418 cfg
->arch
.vret_addr_loc
->flags
|= MONO_INST_VOLATILE
;
2421 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->arch
.vret_addr_loc
->dreg
, call
->vret_var
->dreg
);
2423 case RegTypeStructByAddr
: {
2425 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
2426 vtarg
->sreg1
= call
->vret_var
->dreg
;
2427 vtarg
->dreg
= mono_alloc_preg (cfg
);
2428 MONO_ADD_INS (cfg
->cbb
, vtarg
);
2430 mono_call_inst_add_outarg_reg (cfg
, call
, vtarg
->dreg
, cinfo
->ret
.reg
, FALSE
);
2437 for (i
= 0; i
< n
; ++i
) {
2438 ArgInfo
*ainfo
= cinfo
->args
+ i
;
2441 if (i
>= sig
->hasthis
)
2442 t
= sig
->params
[i
- sig
->hasthis
];
2444 t
= mono_get_int_type ();
2445 t
= mini_get_underlying_type (t
);
2447 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
2448 /* Emit the signature cookie just before the implicit arguments */
2449 emit_sig_cookie (cfg
, call
, cinfo
);
2452 in
= call
->args
[i
];
2454 switch (ainfo
->storage
) {
2455 case RegTypeGeneral
:
2456 case RegTypeIRegPair
:
2457 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
2458 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2459 ins
->dreg
= mono_alloc_ireg (cfg
);
2460 ins
->sreg1
= MONO_LVREG_LS (in
->dreg
);
2461 MONO_ADD_INS (cfg
->cbb
, ins
);
2462 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2464 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2465 ins
->dreg
= mono_alloc_ireg (cfg
);
2466 ins
->sreg1
= MONO_LVREG_MS (in
->dreg
);
2467 MONO_ADD_INS (cfg
->cbb
, ins
);
2468 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
2469 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R8
) || (t
->type
== MONO_TYPE_R4
))) {
2470 if (ainfo
->size
== 4) {
2471 if (IS_SOFT_FLOAT
) {
2472 /* mono_emit_call_args () have already done the r8->r4 conversion */
2473 /* The converted value is in an int vreg */
2474 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2475 ins
->dreg
= mono_alloc_ireg (cfg
);
2476 ins
->sreg1
= in
->dreg
;
2477 MONO_ADD_INS (cfg
->cbb
, ins
);
2478 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2482 cfg
->param_area
= MAX (cfg
->param_area
, 8);
2483 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
2484 creg
= mono_alloc_ireg (cfg
);
2485 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
2486 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
, FALSE
);
2489 if (IS_SOFT_FLOAT
) {
2490 MONO_INST_NEW (cfg
, ins
, OP_FGETLOW32
);
2491 ins
->dreg
= mono_alloc_ireg (cfg
);
2492 ins
->sreg1
= in
->dreg
;
2493 MONO_ADD_INS (cfg
->cbb
, ins
);
2494 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2496 MONO_INST_NEW (cfg
, ins
, OP_FGETHIGH32
);
2497 ins
->dreg
= mono_alloc_ireg (cfg
);
2498 ins
->sreg1
= in
->dreg
;
2499 MONO_ADD_INS (cfg
->cbb
, ins
);
2500 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
2504 cfg
->param_area
= MAX (cfg
->param_area
, 8);
2505 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
2506 creg
= mono_alloc_ireg (cfg
);
2507 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
2508 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
, FALSE
);
2509 creg
= mono_alloc_ireg (cfg
);
2510 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8 + 4));
2511 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
+ 1, FALSE
);
2514 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
2516 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2517 ins
->dreg
= mono_alloc_ireg (cfg
);
2518 ins
->sreg1
= in
->dreg
;
2519 MONO_ADD_INS (cfg
->cbb
, ins
);
2521 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2524 case RegTypeStructByVal
:
2525 case RegTypeGSharedVtInReg
:
2526 case RegTypeGSharedVtOnStack
:
2528 case RegTypeStructByAddr
:
2529 case RegTypeStructByAddrOnStack
:
2530 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
2531 ins
->opcode
= OP_OUTARG_VT
;
2532 ins
->sreg1
= in
->dreg
;
2533 ins
->klass
= in
->klass
;
2534 ins
->inst_p0
= call
;
2535 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
2536 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
2537 mono_call_inst_add_outarg_vt (cfg
, call
, ins
);
2538 MONO_ADD_INS (cfg
->cbb
, ins
);
2541 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
2542 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2543 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R4
) || (t
->type
== MONO_TYPE_R8
))) {
2544 if (t
->type
== MONO_TYPE_R8
) {
2545 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2548 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2550 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2553 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2556 case RegTypeBaseGen
:
2557 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
2558 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, (G_BYTE_ORDER
== G_BIG_ENDIAN
) ? MONO_LVREG_LS (in
->dreg
) : MONO_LVREG_MS (in
->dreg
));
2559 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2560 ins
->dreg
= mono_alloc_ireg (cfg
);
2561 ins
->sreg1
= G_BYTE_ORDER
== G_BIG_ENDIAN
? MONO_LVREG_MS (in
->dreg
) : MONO_LVREG_LS (in
->dreg
);
2562 MONO_ADD_INS (cfg
->cbb
, ins
);
2563 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ARMREG_R3
, FALSE
);
2564 } else if (!t
->byref
&& (t
->type
== MONO_TYPE_R8
)) {
2567 /* This should work for soft-float as well */
2569 cfg
->param_area
= MAX (cfg
->param_area
, 8);
2570 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
2571 creg
= mono_alloc_ireg (cfg
);
2572 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ARMREG_R3
, FALSE
);
2573 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
2574 creg
= mono_alloc_ireg (cfg
);
2575 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 4));
2576 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, creg
);
2577 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
2579 g_assert_not_reached ();
2583 int fdreg
= mono_alloc_freg (cfg
);
2585 if (ainfo
->size
== 8) {
2586 MONO_INST_NEW (cfg
, ins
, OP_FMOVE
);
2587 ins
->sreg1
= in
->dreg
;
2589 MONO_ADD_INS (cfg
->cbb
, ins
);
2591 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, TRUE
);
2596 * Mono's register allocator doesn't speak single-precision registers that
2597 * overlap double-precision registers (i.e. armhf). So we have to work around
2598 * the register allocator and load the value from memory manually.
2600 * So we create a variable for the float argument and an instruction to store
2601 * the argument into the variable. We then store the list of these arguments
2602 * in call->float_args. This list is then used by emit_float_args later to
2603 * pass the arguments in the various call opcodes.
2605 * This is not very nice, and we should really try to fix the allocator.
2608 MonoInst
*float_arg
= mono_compile_create_var (cfg
, m_class_get_byval_arg (mono_defaults
.single_class
), OP_LOCAL
);
2610 /* Make sure the instruction isn't seen as pointless and removed.
2612 float_arg
->flags
|= MONO_INST_VOLATILE
;
2614 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, float_arg
->dreg
, in
->dreg
);
2616 /* We use the dreg to look up the instruction later. The hreg is used to
2617 * emit the instruction that loads the value into the FP reg.
2619 fad
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (FloatArgData
));
2620 fad
->vreg
= float_arg
->dreg
;
2621 fad
->hreg
= ainfo
->reg
;
2623 call
->float_args
= g_slist_append_mempool (cfg
->mempool
, call
->float_args
, fad
);
2626 call
->used_iregs
|= 1 << ainfo
->reg
;
2627 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
2631 g_assert_not_reached ();
2635 /* Handle the case where there are no implicit arguments */
2636 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== sig
->sentinelpos
))
2637 emit_sig_cookie (cfg
, call
, cinfo
);
2639 call
->call_info
= cinfo
;
2640 call
->stack_usage
= cinfo
->stack_usage
;
2644 add_outarg_reg (MonoCompile
*cfg
, MonoCallInst
*call
, ArgStorage storage
, int reg
, MonoInst
*arg
)
2650 MONO_INST_NEW (cfg
, ins
, OP_FMOVE
);
2651 ins
->dreg
= mono_alloc_freg (cfg
);
2652 ins
->sreg1
= arg
->dreg
;
2653 MONO_ADD_INS (cfg
->cbb
, ins
);
2654 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, reg
, TRUE
);
2657 g_assert_not_reached ();
2663 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
2665 MonoCallInst
*call
= (MonoCallInst
*)ins
->inst_p0
;
2667 ArgInfo
*ainfo
= (ArgInfo
*)ins
->inst_p1
;
2668 int ovf_size
= ainfo
->vtsize
;
2669 int doffset
= ainfo
->offset
;
2670 int struct_size
= ainfo
->struct_size
;
2671 int i
, soffset
, dreg
, tmpreg
;
2673 switch (ainfo
->storage
) {
2674 case RegTypeGSharedVtInReg
:
2675 case RegTypeStructByAddr
:
2677 mono_call_inst_add_outarg_reg (cfg
, call
, src
->dreg
, ainfo
->reg
, FALSE
);
2679 case RegTypeGSharedVtOnStack
:
2680 case RegTypeStructByAddrOnStack
:
2681 /* Pass by addr on stack */
2682 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, src
->dreg
);
2685 for (i
= 0; i
< ainfo
->nregs
; ++i
) {
2686 if (ainfo
->esize
== 4)
2687 MONO_INST_NEW (cfg
, load
, OP_LOADR4_MEMBASE
);
2689 MONO_INST_NEW (cfg
, load
, OP_LOADR8_MEMBASE
);
2690 load
->dreg
= mono_alloc_freg (cfg
);
2691 load
->inst_basereg
= src
->dreg
;
2692 load
->inst_offset
= i
* ainfo
->esize
;
2693 MONO_ADD_INS (cfg
->cbb
, load
);
2695 if (ainfo
->esize
== 4) {
2698 /* See RegTypeFP in mono_arch_emit_call () */
2699 MonoInst
*float_arg
= mono_compile_create_var (cfg
, m_class_get_byval_arg (mono_defaults
.single_class
), OP_LOCAL
);
2700 float_arg
->flags
|= MONO_INST_VOLATILE
;
2701 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, float_arg
->dreg
, load
->dreg
);
2703 fad
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (FloatArgData
));
2704 fad
->vreg
= float_arg
->dreg
;
2705 fad
->hreg
= ainfo
->reg
+ i
;
2707 call
->float_args
= g_slist_append_mempool (cfg
->mempool
, call
->float_args
, fad
);
2709 add_outarg_reg (cfg
, call
, RegTypeFP
, ainfo
->reg
+ (i
* 2), load
);
2715 for (i
= 0; i
< ainfo
->size
; ++i
) {
2716 dreg
= mono_alloc_ireg (cfg
);
2717 switch (struct_size
) {
2719 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, dreg
, src
->dreg
, soffset
);
2722 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, dreg
, src
->dreg
, soffset
);
2725 tmpreg
= mono_alloc_ireg (cfg
);
2726 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, dreg
, src
->dreg
, soffset
);
2727 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, tmpreg
, src
->dreg
, soffset
+ 1);
2728 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, tmpreg
, tmpreg
, 8);
2729 MONO_EMIT_NEW_BIALU (cfg
, OP_IOR
, dreg
, dreg
, tmpreg
);
2730 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, tmpreg
, src
->dreg
, soffset
+ 2);
2731 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, tmpreg
, tmpreg
, 16);
2732 MONO_EMIT_NEW_BIALU (cfg
, OP_IOR
, dreg
, dreg
, tmpreg
);
2735 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, src
->dreg
, soffset
);
2738 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
+ i
, FALSE
);
2739 soffset
+= sizeof (target_mgreg_t
);
2740 struct_size
-= sizeof (target_mgreg_t
);
2742 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2744 mini_emit_memcpy (cfg
, ARMREG_SP
, doffset
, src
->dreg
, soffset
, MIN (ovf_size
* sizeof (target_mgreg_t
), struct_size
), struct_size
< 4 ? 1 : 4);
2750 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
2752 MonoType
*ret
= mini_get_underlying_type (mono_method_signature_internal (method
)->ret
);
2755 if (ret
->type
== MONO_TYPE_I8
|| ret
->type
== MONO_TYPE_U8
) {
2758 if (COMPILE_LLVM (cfg
)) {
2759 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
2761 MONO_INST_NEW (cfg
, ins
, OP_SETLRET
);
2762 ins
->sreg1
= MONO_LVREG_LS (val
->dreg
);
2763 ins
->sreg2
= MONO_LVREG_MS (val
->dreg
);
2764 MONO_ADD_INS (cfg
->cbb
, ins
);
2769 case MONO_ARM_FPU_NONE
:
2770 if (ret
->type
== MONO_TYPE_R8
) {
2773 MONO_INST_NEW (cfg
, ins
, OP_SETFRET
);
2774 ins
->dreg
= cfg
->ret
->dreg
;
2775 ins
->sreg1
= val
->dreg
;
2776 MONO_ADD_INS (cfg
->cbb
, ins
);
2779 if (ret
->type
== MONO_TYPE_R4
) {
2780 /* Already converted to an int in method_to_ir () */
2781 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
2785 case MONO_ARM_FPU_VFP
:
2786 case MONO_ARM_FPU_VFP_HARD
:
2787 if (ret
->type
== MONO_TYPE_R8
|| ret
->type
== MONO_TYPE_R4
) {
2790 MONO_INST_NEW (cfg
, ins
, OP_SETFRET
);
2791 ins
->dreg
= cfg
->ret
->dreg
;
2792 ins
->sreg1
= val
->dreg
;
2793 MONO_ADD_INS (cfg
->cbb
, ins
);
2798 g_assert_not_reached ();
2802 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
2805 #endif /* #ifndef DISABLE_JIT */
2808 mono_arch_is_inst_imm (int opcode
, int imm_opcode
, gint64 imm
)
2814 MonoMethodSignature
*sig
;
2817 MonoType
**param_types
;
2821 dyn_call_supported (CallInfo
*cinfo
, MonoMethodSignature
*sig
)
2825 switch (cinfo
->ret
.storage
) {
2827 case RegTypeGeneral
:
2828 case RegTypeIRegPair
:
2829 case RegTypeStructByAddr
:
2840 for (i
= 0; i
< cinfo
->nargs
; ++i
) {
2841 ArgInfo
*ainfo
= &cinfo
->args
[i
];
2844 switch (ainfo
->storage
) {
2845 case RegTypeGeneral
:
2846 case RegTypeIRegPair
:
2847 case RegTypeBaseGen
:
2852 case RegTypeStructByVal
:
2853 if (ainfo
->size
== 0)
2854 last_slot
= PARAM_REGS
+ (ainfo
->offset
/ 4) + ainfo
->vtsize
;
2856 last_slot
= ainfo
->reg
+ ainfo
->size
+ ainfo
->vtsize
;
2863 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2864 for (i
= 0; i
< sig
->param_count
; ++i
) {
2865 MonoType
*t
= sig
->params
[i
];
2870 t
= mini_get_underlying_type (t
);
2893 mono_arch_dyn_call_prepare (MonoMethodSignature
*sig
)
2895 ArchDynCallInfo
*info
;
2899 cinfo
= get_call_info (NULL
, sig
);
2901 if (!dyn_call_supported (cinfo
, sig
)) {
2906 info
= g_new0 (ArchDynCallInfo
, 1);
2907 // FIXME: Preprocess the info to speed up start_dyn_call ()
2909 info
->cinfo
= cinfo
;
2910 info
->rtype
= mini_get_underlying_type (sig
->ret
);
2911 info
->param_types
= g_new0 (MonoType
*, sig
->param_count
);
2912 for (i
= 0; i
< sig
->param_count
; ++i
)
2913 info
->param_types
[i
] = mini_get_underlying_type (sig
->params
[i
]);
2915 return (MonoDynCallInfo
*)info
;
2919 mono_arch_dyn_call_free (MonoDynCallInfo
*info
)
2921 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
2923 g_free (ainfo
->cinfo
);
2928 mono_arch_dyn_call_get_buf_size (MonoDynCallInfo
*info
)
2930 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
2932 g_assert (ainfo
->cinfo
->stack_usage
% MONO_ARCH_FRAME_ALIGNMENT
== 0);
2933 return sizeof (DynCallArgs
) + ainfo
->cinfo
->stack_usage
;
2937 mono_arch_start_dyn_call (MonoDynCallInfo
*info
, gpointer
**args
, guint8
*ret
, guint8
*buf
)
2939 ArchDynCallInfo
*dinfo
= (ArchDynCallInfo
*)info
;
2940 CallInfo
*cinfo
= dinfo
->cinfo
;
2941 DynCallArgs
*p
= (DynCallArgs
*)buf
;
2942 int arg_index
, greg
, i
, j
, pindex
;
2943 MonoMethodSignature
*sig
= dinfo
->sig
;
2948 p
->n_stackargs
= cinfo
->stack_usage
/ sizeof (host_mgreg_t
);
2954 if (sig
->hasthis
|| dinfo
->cinfo
->vret_arg_index
== 1) {
2955 p
->regs
[greg
++] = (host_mgreg_t
)(gsize
)*(args
[arg_index
++]);
2960 if (dinfo
->cinfo
->ret
.storage
== RegTypeStructByAddr
)
2961 p
->regs
[greg
++] = (host_mgreg_t
)(gsize
)ret
;
2963 for (i
= pindex
; i
< sig
->param_count
; i
++) {
2964 MonoType
*t
= dinfo
->param_types
[i
];
2965 gpointer
*arg
= args
[arg_index
++];
2966 ArgInfo
*ainfo
= &dinfo
->cinfo
->args
[i
+ sig
->hasthis
];
2969 if (ainfo
->storage
== RegTypeGeneral
|| ainfo
->storage
== RegTypeIRegPair
|| ainfo
->storage
== RegTypeStructByVal
) {
2971 } else if (ainfo
->storage
== RegTypeFP
) {
2972 } else if (ainfo
->storage
== RegTypeBase
) {
2973 slot
= PARAM_REGS
+ (ainfo
->offset
/ 4);
2974 } else if (ainfo
->storage
== RegTypeBaseGen
) {
2975 /* slot + 1 is the first stack slot, so the code below will work */
2978 g_assert_not_reached ();
2982 p
->regs
[slot
] = (host_mgreg_t
)(gsize
)*arg
;
2987 case MONO_TYPE_OBJECT
:
2991 p
->regs
[slot
] = (host_mgreg_t
)(gsize
)*arg
;
2994 p
->regs
[slot
] = *(guint8
*)arg
;
2997 p
->regs
[slot
] = *(gint8
*)arg
;
3000 p
->regs
[slot
] = *(gint16
*)arg
;
3003 p
->regs
[slot
] = *(guint16
*)arg
;
3006 p
->regs
[slot
] = *(gint32
*)arg
;
3009 p
->regs
[slot
] = *(guint32
*)arg
;
3013 p
->regs
[slot
++] = (host_mgreg_t
)(gsize
)arg
[0];
3014 p
->regs
[slot
] = (host_mgreg_t
)(gsize
)arg
[1];
3017 if (ainfo
->storage
== RegTypeFP
) {
3018 float f
= *(float*)arg
;
3019 p
->fpregs
[ainfo
->reg
/ 2] = *(double*)&f
;
3022 p
->regs
[slot
] = *(host_mgreg_t
*)arg
;
3026 if (ainfo
->storage
== RegTypeFP
) {
3027 p
->fpregs
[ainfo
->reg
/ 2] = *(double*)arg
;
3030 p
->regs
[slot
++] = (host_mgreg_t
)(gsize
)arg
[0];
3031 p
->regs
[slot
] = (host_mgreg_t
)(gsize
)arg
[1];
3034 case MONO_TYPE_GENERICINST
:
3035 if (MONO_TYPE_IS_REFERENCE (t
)) {
3036 p
->regs
[slot
] = (host_mgreg_t
)(gsize
)*arg
;
3039 if (t
->type
== MONO_TYPE_GENERICINST
&& mono_class_is_nullable (mono_class_from_mono_type_internal (t
))) {
3040 MonoClass
*klass
= mono_class_from_mono_type_internal (t
);
3041 guint8
*nullable_buf
;
3044 size
= mono_class_value_size (klass
, NULL
);
3045 nullable_buf
= g_alloca (size
);
3046 g_assert (nullable_buf
);
3048 /* The argument pointed to by arg is either a boxed vtype or null */
3049 mono_nullable_init (nullable_buf
, (MonoObject
*)arg
, klass
);
3051 arg
= (gpointer
*)nullable_buf
;
3057 case MONO_TYPE_VALUETYPE
:
3058 g_assert (ainfo
->storage
== RegTypeStructByVal
);
3060 if (ainfo
->size
== 0)
3061 slot
= PARAM_REGS
+ (ainfo
->offset
/ 4);
3065 for (j
= 0; j
< ainfo
->size
+ ainfo
->vtsize
; ++j
)
3066 p
->regs
[slot
++] = ((host_mgreg_t
*)arg
) [j
];
3069 g_assert_not_reached ();
3075 mono_arch_finish_dyn_call (MonoDynCallInfo
*info
, guint8
*buf
)
3077 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
3078 DynCallArgs
*p
= (DynCallArgs
*)buf
;
3079 MonoType
*ptype
= ainfo
->rtype
;
3080 guint8
*ret
= p
->ret
;
3081 host_mgreg_t res
= p
->res
;
3082 host_mgreg_t res2
= p
->res2
;
3084 switch (ptype
->type
) {
3085 case MONO_TYPE_VOID
:
3086 *(gpointer
*)ret
= NULL
;
3088 case MONO_TYPE_OBJECT
:
3092 *(gpointer
*)ret
= (gpointer
)(gsize
)res
;
3098 *(guint8
*)ret
= res
;
3101 *(gint16
*)ret
= res
;
3104 *(guint16
*)ret
= res
;
3107 *(gint32
*)ret
= res
;
3110 *(guint32
*)ret
= res
;
3114 /* This handles endianness as well */
3115 ((gint32
*)ret
) [0] = res
;
3116 ((gint32
*)ret
) [1] = res2
;
3118 case MONO_TYPE_GENERICINST
:
3119 if (MONO_TYPE_IS_REFERENCE (ptype
)) {
3120 *(gpointer
*)ret
= (gpointer
)res
;
3125 case MONO_TYPE_VALUETYPE
:
3126 g_assert (ainfo
->cinfo
->ret
.storage
== RegTypeStructByAddr
);
3132 *(float*)ret
= *(float*)&p
->fpregs
[0];
3134 *(float*)ret
= *(float*)&res
;
3136 case MONO_TYPE_R8
: {
3137 host_mgreg_t regs
[2];
3140 if (IS_HARD_FLOAT
) {
3141 *(double*)ret
= p
->fpregs
[0];
3146 *(double*)ret
= *(double*)®s
;
3151 g_assert_not_reached ();
3158 * The immediate field for cond branches is big enough for all reasonable methods
3160 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3161 if (0 && ins->inst_true_bb->native_offset) { \
3162 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3164 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3165 ARM_B_COND (code, (condcode), 0); \
3168 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3170 /* emit an exception if condition is fail
3172 * We assign the extra code used to throw the implicit exceptions
3173 * to cfg->bb_exit as far as the big branch handling is concerned
3175 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3177 mono_add_patch_info (cfg, code - cfg->native_code, \
3178 MONO_PATCH_INFO_EXC, exc_name); \
3179 ARM_BL_COND (code, (condcode), 0); \
3182 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3185 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3190 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3194 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
3195 MonoInst
*last_ins
= mono_inst_prev (ins
, FILTER_IL_SEQ_POINT
);
3197 switch (ins
->opcode
) {
3200 /* Already done by an arch-independent pass */
3202 case OP_LOAD_MEMBASE
:
3203 case OP_LOADI4_MEMBASE
:
3205 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3206 * OP_LOAD_MEMBASE offset(basereg), reg
3208 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_REG
3209 || last_ins
->opcode
== OP_STORE_MEMBASE_REG
) &&
3210 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3211 ins
->inst_offset
== last_ins
->inst_offset
) {
3212 if (ins
->dreg
== last_ins
->sreg1
) {
3213 MONO_DELETE_INS (bb
, ins
);
3216 //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
3217 ins
->opcode
= OP_MOVE
;
3218 ins
->sreg1
= last_ins
->sreg1
;
3222 * Note: reg1 must be different from the basereg in the second load
3223 * OP_LOAD_MEMBASE offset(basereg), reg1
3224 * OP_LOAD_MEMBASE offset(basereg), reg2
3226 * OP_LOAD_MEMBASE offset(basereg), reg1
3227 * OP_MOVE reg1, reg2
3229 } if (last_ins
&& (last_ins
->opcode
== OP_LOADI4_MEMBASE
3230 || last_ins
->opcode
== OP_LOAD_MEMBASE
) &&
3231 ins
->inst_basereg
!= last_ins
->dreg
&&
3232 ins
->inst_basereg
== last_ins
->inst_basereg
&&
3233 ins
->inst_offset
== last_ins
->inst_offset
) {
3235 if (ins
->dreg
== last_ins
->dreg
) {
3236 MONO_DELETE_INS (bb
, ins
);
3239 ins
->opcode
= OP_MOVE
;
3240 ins
->sreg1
= last_ins
->dreg
;
3243 //g_assert_not_reached ();
3247 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3248 * OP_LOAD_MEMBASE offset(basereg), reg
3250 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3251 * OP_ICONST reg, imm
3253 } else if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_IMM
3254 || last_ins
->opcode
== OP_STORE_MEMBASE_IMM
) &&
3255 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3256 ins
->inst_offset
== last_ins
->inst_offset
) {
3257 //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
3258 ins
->opcode
= OP_ICONST
;
3259 ins
->inst_c0
= last_ins
->inst_imm
;
3260 g_assert_not_reached (); // check this rule
3264 case OP_LOADU1_MEMBASE
:
3265 case OP_LOADI1_MEMBASE
:
3266 if (last_ins
&& (last_ins
->opcode
== OP_STOREI1_MEMBASE_REG
) &&
3267 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3268 ins
->inst_offset
== last_ins
->inst_offset
) {
3269 ins
->opcode
= (ins
->opcode
== OP_LOADI1_MEMBASE
) ? OP_ICONV_TO_I1
: OP_ICONV_TO_U1
;
3270 ins
->sreg1
= last_ins
->sreg1
;
3273 case OP_LOADU2_MEMBASE
:
3274 case OP_LOADI2_MEMBASE
:
3275 if (last_ins
&& (last_ins
->opcode
== OP_STOREI2_MEMBASE_REG
) &&
3276 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3277 ins
->inst_offset
== last_ins
->inst_offset
) {
3278 ins
->opcode
= (ins
->opcode
== OP_LOADI2_MEMBASE
) ? OP_ICONV_TO_I2
: OP_ICONV_TO_U2
;
3279 ins
->sreg1
= last_ins
->sreg1
;
3283 ins
->opcode
= OP_MOVE
;
3287 if (ins
->dreg
== ins
->sreg1
) {
3288 MONO_DELETE_INS (bb
, ins
);
3292 * OP_MOVE sreg, dreg
3293 * OP_MOVE dreg, sreg
3295 if (last_ins
&& last_ins
->opcode
== OP_MOVE
&&
3296 ins
->sreg1
== last_ins
->dreg
&&
3297 ins
->dreg
== last_ins
->sreg1
) {
3298 MONO_DELETE_INS (bb
, ins
);
3307 * the branch_cc_table should maintain the order of these
3321 branch_cc_table
[] = {
3335 #define ADD_NEW_INS(cfg,dest,op) do { \
3336 MONO_INST_NEW ((cfg), (dest), (op)); \
3337 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3341 map_to_reg_reg_op (int op
)
3350 case OP_COMPARE_IMM
:
3352 case OP_ICOMPARE_IMM
:
3366 case OP_LOAD_MEMBASE
:
3367 return OP_LOAD_MEMINDEX
;
3368 case OP_LOADI4_MEMBASE
:
3369 return OP_LOADI4_MEMINDEX
;
3370 case OP_LOADU4_MEMBASE
:
3371 return OP_LOADU4_MEMINDEX
;
3372 case OP_LOADU1_MEMBASE
:
3373 return OP_LOADU1_MEMINDEX
;
3374 case OP_LOADI2_MEMBASE
:
3375 return OP_LOADI2_MEMINDEX
;
3376 case OP_LOADU2_MEMBASE
:
3377 return OP_LOADU2_MEMINDEX
;
3378 case OP_LOADI1_MEMBASE
:
3379 return OP_LOADI1_MEMINDEX
;
3380 case OP_STOREI1_MEMBASE_REG
:
3381 return OP_STOREI1_MEMINDEX
;
3382 case OP_STOREI2_MEMBASE_REG
:
3383 return OP_STOREI2_MEMINDEX
;
3384 case OP_STOREI4_MEMBASE_REG
:
3385 return OP_STOREI4_MEMINDEX
;
3386 case OP_STORE_MEMBASE_REG
:
3387 return OP_STORE_MEMINDEX
;
3388 case OP_STORER4_MEMBASE_REG
:
3389 return OP_STORER4_MEMINDEX
;
3390 case OP_STORER8_MEMBASE_REG
:
3391 return OP_STORER8_MEMINDEX
;
3392 case OP_STORE_MEMBASE_IMM
:
3393 return OP_STORE_MEMBASE_REG
;
3394 case OP_STOREI1_MEMBASE_IMM
:
3395 return OP_STOREI1_MEMBASE_REG
;
3396 case OP_STOREI2_MEMBASE_IMM
:
3397 return OP_STOREI2_MEMBASE_REG
;
3398 case OP_STOREI4_MEMBASE_IMM
:
3399 return OP_STOREI4_MEMBASE_REG
;
3401 g_assert_not_reached ();
3405 * Remove from the instruction list the instructions that can't be
3406 * represented with very simple instructions with no register
3410 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3412 MonoInst
*ins
, *temp
, *last_ins
= NULL
;
3413 int rot_amount
, imm8
, low_imm
;
3415 MONO_BB_FOR_EACH_INS (bb
, ins
) {
3417 switch (ins
->opcode
) {
3421 case OP_COMPARE_IMM
:
3422 case OP_ICOMPARE_IMM
:
3436 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
)) < 0) {
3437 int opcode2
= mono_op_imm_to_op (ins
->opcode
);
3438 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3439 temp
->inst_c0
= ins
->inst_imm
;
3440 temp
->dreg
= mono_alloc_ireg (cfg
);
3441 ins
->sreg2
= temp
->dreg
;
3443 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins
->opcode
));
3444 ins
->opcode
= opcode2
;
3446 if (ins
->opcode
== OP_SBB
|| ins
->opcode
== OP_ISBB
|| ins
->opcode
== OP_SUBCC
)
3452 if (ins
->inst_imm
== 1) {
3453 ins
->opcode
= OP_MOVE
;
3456 if (ins
->inst_imm
== 0) {
3457 ins
->opcode
= OP_ICONST
;
3461 imm8
= mono_is_power_of_two (ins
->inst_imm
);
3463 ins
->opcode
= OP_SHL_IMM
;
3464 ins
->inst_imm
= imm8
;
3467 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3468 temp
->inst_c0
= ins
->inst_imm
;
3469 temp
->dreg
= mono_alloc_ireg (cfg
);
3470 ins
->sreg2
= temp
->dreg
;
3471 ins
->opcode
= OP_IMUL
;
3478 MonoInst
*current
= ins
;
3480 /* may require a look-ahead of a couple instructions due to spilling */
3481 while (try_count
-- && current
->next
) {
3482 if (current
->next
->opcode
== OP_COND_EXC_C
|| current
->next
->opcode
== OP_COND_EXC_IC
) {
3483 /* ARM sets the C flag to 1 if there was _no_ overflow */
3484 current
->next
->opcode
= OP_COND_EXC_NC
;
3487 current
= current
->next
;
3492 case OP_IDIV_UN_IMM
:
3494 case OP_IREM_UN_IMM
: {
3495 int opcode2
= mono_op_imm_to_op (ins
->opcode
);
3496 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3497 temp
->inst_c0
= ins
->inst_imm
;
3498 temp
->dreg
= mono_alloc_ireg (cfg
);
3499 ins
->sreg2
= temp
->dreg
;
3501 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins
->opcode
));
3502 ins
->opcode
= opcode2
;
3505 case OP_LOCALLOC_IMM
:
3506 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3507 temp
->inst_c0
= ins
->inst_imm
;
3508 temp
->dreg
= mono_alloc_ireg (cfg
);
3509 ins
->sreg1
= temp
->dreg
;
3510 ins
->opcode
= OP_LOCALLOC
;
3512 case OP_LOAD_MEMBASE
:
3513 case OP_LOADI4_MEMBASE
:
3514 case OP_LOADU4_MEMBASE
:
3515 case OP_LOADU1_MEMBASE
:
3516 /* we can do two things: load the immed in a register
3517 * and use an indexed load, or see if the immed can be
3518 * represented as an ad_imm + a load with a smaller offset
3519 * that fits. We just do the first for now, optimize later.
3521 if (arm_is_imm12 (ins
->inst_offset
))
3523 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3524 temp
->inst_c0
= ins
->inst_offset
;
3525 temp
->dreg
= mono_alloc_ireg (cfg
);
3526 ins
->sreg2
= temp
->dreg
;
3527 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3529 case OP_LOADI2_MEMBASE
:
3530 case OP_LOADU2_MEMBASE
:
3531 case OP_LOADI1_MEMBASE
:
3532 if (arm_is_imm8 (ins
->inst_offset
))
3534 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3535 temp
->inst_c0
= ins
->inst_offset
;
3536 temp
->dreg
= mono_alloc_ireg (cfg
);
3537 ins
->sreg2
= temp
->dreg
;
3538 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3540 case OP_LOADR4_MEMBASE
:
3541 case OP_LOADR8_MEMBASE
:
3542 if (arm_is_fpimm8 (ins
->inst_offset
))
3544 low_imm
= ins
->inst_offset
& 0x1ff;
3545 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_offset
& ~0x1ff, &rot_amount
)) >= 0) {
3546 ADD_NEW_INS (cfg
, temp
, OP_ADD_IMM
);
3547 temp
->inst_imm
= ins
->inst_offset
& ~0x1ff;
3548 temp
->sreg1
= ins
->inst_basereg
;
3549 temp
->dreg
= mono_alloc_ireg (cfg
);
3550 ins
->inst_basereg
= temp
->dreg
;
3551 ins
->inst_offset
= low_imm
;
3555 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3556 temp
->inst_c0
= ins
->inst_offset
;
3557 temp
->dreg
= mono_alloc_ireg (cfg
);
3559 ADD_NEW_INS (cfg
, add_ins
, OP_IADD
);
3560 add_ins
->sreg1
= ins
->inst_basereg
;
3561 add_ins
->sreg2
= temp
->dreg
;
3562 add_ins
->dreg
= mono_alloc_ireg (cfg
);
3564 ins
->inst_basereg
= add_ins
->dreg
;
3565 ins
->inst_offset
= 0;
3568 case OP_STORE_MEMBASE_REG
:
3569 case OP_STOREI4_MEMBASE_REG
:
3570 case OP_STOREI1_MEMBASE_REG
:
3571 if (arm_is_imm12 (ins
->inst_offset
))
3573 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3574 temp
->inst_c0
= ins
->inst_offset
;
3575 temp
->dreg
= mono_alloc_ireg (cfg
);
3576 ins
->sreg2
= temp
->dreg
;
3577 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3579 case OP_STOREI2_MEMBASE_REG
:
3580 if (arm_is_imm8 (ins
->inst_offset
))
3582 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3583 temp
->inst_c0
= ins
->inst_offset
;
3584 temp
->dreg
= mono_alloc_ireg (cfg
);
3585 ins
->sreg2
= temp
->dreg
;
3586 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3588 case OP_STORER4_MEMBASE_REG
:
3589 case OP_STORER8_MEMBASE_REG
:
3590 if (arm_is_fpimm8 (ins
->inst_offset
))
3592 low_imm
= ins
->inst_offset
& 0x1ff;
3593 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_offset
& ~ 0x1ff, &rot_amount
)) >= 0 && arm_is_fpimm8 (low_imm
)) {
3594 ADD_NEW_INS (cfg
, temp
, OP_ADD_IMM
);
3595 temp
->inst_imm
= ins
->inst_offset
& ~0x1ff;
3596 temp
->sreg1
= ins
->inst_destbasereg
;
3597 temp
->dreg
= mono_alloc_ireg (cfg
);
3598 ins
->inst_destbasereg
= temp
->dreg
;
3599 ins
->inst_offset
= low_imm
;
3603 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3604 temp
->inst_c0
= ins
->inst_offset
;
3605 temp
->dreg
= mono_alloc_ireg (cfg
);
3607 ADD_NEW_INS (cfg
, add_ins
, OP_IADD
);
3608 add_ins
->sreg1
= ins
->inst_destbasereg
;
3609 add_ins
->sreg2
= temp
->dreg
;
3610 add_ins
->dreg
= mono_alloc_ireg (cfg
);
3612 ins
->inst_destbasereg
= add_ins
->dreg
;
3613 ins
->inst_offset
= 0;
3616 case OP_STORE_MEMBASE_IMM
:
3617 case OP_STOREI1_MEMBASE_IMM
:
3618 case OP_STOREI2_MEMBASE_IMM
:
3619 case OP_STOREI4_MEMBASE_IMM
:
3620 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3621 temp
->inst_c0
= ins
->inst_imm
;
3622 temp
->dreg
= mono_alloc_ireg (cfg
);
3623 ins
->sreg1
= temp
->dreg
;
3624 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3626 goto loop_start
; /* make it handle the possibly big ins->inst_offset */
3629 gboolean swap
= FALSE
;
3633 /* Optimized away */
3638 /* Some fp compares require swapped operands */
3639 switch (ins
->next
->opcode
) {
3641 ins
->next
->opcode
= OP_FBLT
;
3645 ins
->next
->opcode
= OP_FBLT_UN
;
3649 ins
->next
->opcode
= OP_FBGE
;
3653 ins
->next
->opcode
= OP_FBGE_UN
;
3661 ins
->sreg1
= ins
->sreg2
;
3670 bb
->last_ins
= last_ins
;
3671 bb
->max_vreg
= cfg
->next_vreg
;
3675 mono_arch_decompose_long_opts (MonoCompile
*cfg
, MonoInst
*long_ins
)
3679 if (long_ins
->opcode
== OP_LNEG
) {
3681 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ARM_RSBS_IMM
, MONO_LVREG_LS (ins
->dreg
), MONO_LVREG_LS (ins
->sreg1
), 0);
3682 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ARM_RSC_IMM
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->sreg1
), 0);
3688 emit_float_to_int (MonoCompile
*cfg
, guchar
*code
, int dreg
, int sreg
, int size
, gboolean is_signed
)
3690 /* sreg is a float, dreg is an integer reg */
3692 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
3694 ARM_TOSIZD (code
, vfp_scratch1
, sreg
);
3696 ARM_TOUIZD (code
, vfp_scratch1
, sreg
);
3697 ARM_FMRS (code
, dreg
, vfp_scratch1
);
3698 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
3702 ARM_AND_REG_IMM8 (code
, dreg
, dreg
, 0xff);
3703 else if (size
== 2) {
3704 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3705 ARM_SHR_IMM (code
, dreg
, dreg
, 16);
3709 ARM_SHL_IMM (code
, dreg
, dreg
, 24);
3710 ARM_SAR_IMM (code
, dreg
, dreg
, 24);
3711 } else if (size
== 2) {
3712 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3713 ARM_SAR_IMM (code
, dreg
, dreg
, 16);
3720 emit_r4_to_int (MonoCompile
*cfg
, guchar
*code
, int dreg
, int sreg
, int size
, gboolean is_signed
)
3722 /* sreg is a float, dreg is an integer reg */
3724 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
3726 ARM_TOSIZS (code
, vfp_scratch1
, sreg
);
3728 ARM_TOUIZS (code
, vfp_scratch1
, sreg
);
3729 ARM_FMRS (code
, dreg
, vfp_scratch1
);
3730 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
3734 ARM_AND_REG_IMM8 (code
, dreg
, dreg
, 0xff);
3735 else if (size
== 2) {
3736 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3737 ARM_SHR_IMM (code
, dreg
, dreg
, 16);
3741 ARM_SHL_IMM (code
, dreg
, dreg
, 24);
3742 ARM_SAR_IMM (code
, dreg
, dreg
, 24);
3743 } else if (size
== 2) {
3744 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3745 ARM_SAR_IMM (code
, dreg
, dreg
, 16);
3751 #endif /* #ifndef DISABLE_JIT */
3753 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3756 emit_thunk (guint8
*code
, gconstpointer target
)
3760 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
3761 if (thumb_supported
)
3762 ARM_BX (code
, ARMREG_IP
);
3764 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
3765 *(guint32
*)code
= (guint32
)(gsize
)target
;
3767 mono_arch_flush_icache (p
, code
- p
);
3771 handle_thunk (MonoCompile
*cfg
, MonoDomain
*domain
, guchar
*code
, const guchar
*target
)
3773 MonoJitInfo
*ji
= NULL
;
3774 MonoThunkJitInfo
*info
;
3777 guint8
*orig_target
;
3778 guint8
*target_thunk
;
3781 domain
= mono_domain_get ();
3785 * This can be called multiple times during JITting,
3786 * save the current position in cfg->arch to avoid
3787 * doing a O(n^2) search.
3789 if (!cfg
->arch
.thunks
) {
3790 cfg
->arch
.thunks
= cfg
->thunks
;
3791 cfg
->arch
.thunks_size
= cfg
->thunk_area
;
3793 thunks
= cfg
->arch
.thunks
;
3794 thunks_size
= cfg
->arch
.thunks_size
;
3796 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, mono_method_full_name (cfg
->method
, TRUE
));
3797 g_assert_not_reached ();
3800 g_assert (*(guint32
*)thunks
== 0);
3801 emit_thunk (thunks
, target
);
3802 arm_patch (code
, thunks
);
3804 cfg
->arch
.thunks
+= THUNK_SIZE
;
3805 cfg
->arch
.thunks_size
-= THUNK_SIZE
;
3807 ji
= mini_jit_info_table_find (domain
, (char*)code
, NULL
);
3809 info
= mono_jit_info_get_thunk_info (ji
);
3812 thunks
= (guint8
*)ji
->code_start
+ info
->thunks_offset
;
3813 thunks_size
= info
->thunks_size
;
3815 orig_target
= mono_arch_get_call_target (code
+ 4);
3817 mono_mini_arch_lock ();
3819 target_thunk
= NULL
;
3820 if (orig_target
>= thunks
&& orig_target
< thunks
+ thunks_size
) {
3821 /* The call already points to a thunk, because of trampolines etc. */
3822 target_thunk
= orig_target
;
3824 for (p
= thunks
; p
< thunks
+ thunks_size
; p
+= THUNK_SIZE
) {
3825 if (((guint32
*)p
) [0] == 0) {
3829 } else if (((guint32
*)p
) [2] == (guint32
)(gsize
)target
) {
3830 /* Thunk already points to target */
3837 //g_print ("THUNK: %p %p %p\n", code, target, target_thunk);
3839 if (!target_thunk
) {
3840 mono_mini_arch_unlock ();
3841 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, cfg
? mono_method_full_name (cfg
->method
, TRUE
) : mono_method_full_name (jinfo_get_method (ji
), TRUE
));
3842 g_assert_not_reached ();
3845 emit_thunk (target_thunk
, target
);
3846 arm_patch (code
, target_thunk
);
3847 mono_arch_flush_icache (code
, 4);
3849 mono_mini_arch_unlock ();
3854 arm_patch_general (MonoCompile
*cfg
, MonoDomain
*domain
, guchar
*code
, const guchar
*target
)
3856 guint32
*code32
= (guint32
*)code
;
3857 guint32 ins
= *code32
;
3858 guint32 prim
= (ins
>> 25) & 7;
3859 guint32 tval
= GPOINTER_TO_UINT (target
);
3861 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3862 if (prim
== 5) { /* 101b */
3863 /* the diff starts 8 bytes from the branch opcode */
3864 gint diff
= target
- code
- 8;
3866 gint tmask
= 0xffffffff;
3867 if (tval
& 1) { /* entering thumb mode */
3868 diff
= target
- 1 - code
- 8;
3869 g_assert (thumb_supported
);
3870 tbits
= 0xf << 28; /* bl->blx bit pattern */
3871 g_assert ((ins
& (1 << 24))); /* it must be a bl, not b instruction */
3872 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3876 tmask
= ~(1 << 24); /* clear the link bit */
3877 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3882 if (diff
<= 33554431) {
3884 ins
= (ins
& 0xff000000) | diff
;
3886 *code32
= ins
| tbits
;
3890 /* diff between 0 and -33554432 */
3891 if (diff
>= -33554432) {
3893 ins
= (ins
& 0xff000000) | (diff
& ~0xff000000);
3895 *code32
= ins
| tbits
;
3900 handle_thunk (cfg
, domain
, code
, target
);
3905 * The alternative call sequences looks like this:
3907 * ldr ip, [pc] // loads the address constant
3908 * b 1f // jumps around the constant
3909 * address constant embedded in the code
3914 * There are two cases for patching:
3915 * a) at the end of method emission: in this case code points to the start
3916 * of the call sequence
3917 * b) during runtime patching of the call site: in this case code points
3918 * to the mov pc, ip instruction
3920 * We have to handle also the thunk jump code sequence:
3924 * address constant // execution never reaches here
3926 if ((ins
& 0x0ffffff0) == 0x12fff10) {
3927 /* Branch and exchange: the address is constructed in a reg
3928 * We can patch BX when the code sequence is the following:
3929 * ldr ip, [pc, #0] ; 0x8
3936 guint8
*emit
= (guint8
*)ccode
;
3937 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
3939 ARM_MOV_REG_REG (emit
, ARMREG_LR
, ARMREG_PC
);
3940 ARM_BX (emit
, ARMREG_IP
);
3942 /*patching from magic trampoline*/
3943 if (ins
== ccode
[3]) {
3944 g_assert (code32
[-4] == ccode
[0]);
3945 g_assert (code32
[-3] == ccode
[1]);
3946 g_assert (code32
[-1] == ccode
[2]);
3947 code32
[-2] = (guint32
)(gsize
)target
;
3950 /*patching from JIT*/
3951 if (ins
== ccode
[0]) {
3952 g_assert (code32
[1] == ccode
[1]);
3953 g_assert (code32
[3] == ccode
[2]);
3954 g_assert (code32
[4] == ccode
[3]);
3955 code32
[2] = (guint32
)(gsize
)target
;
3958 g_assert_not_reached ();
3959 } else if ((ins
& 0x0ffffff0) == 0x12fff30) {
3967 guint8
*emit
= (guint8
*)ccode
;
3968 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
3970 ARM_BLX_REG (emit
, ARMREG_IP
);
3972 g_assert (code32
[-3] == ccode
[0]);
3973 g_assert (code32
[-2] == ccode
[1]);
3974 g_assert (code32
[0] == ccode
[2]);
3976 code32
[-1] = (guint32
)(gsize
)target
;
3979 guint32
*tmp
= ccode
;
3980 guint8
*emit
= (guint8
*)tmp
;
3981 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
3982 ARM_MOV_REG_REG (emit
, ARMREG_LR
, ARMREG_PC
);
3983 ARM_MOV_REG_REG (emit
, ARMREG_PC
, ARMREG_IP
);
3984 ARM_BX (emit
, ARMREG_IP
);
3985 if (ins
== ccode
[2]) {
3986 g_assert_not_reached (); // should be -2 ...
3987 code32
[-1] = (guint32
)(gsize
)target
;
3990 if (ins
== ccode
[0]) {
3991 /* handles both thunk jump code and the far call sequence */
3992 code32
[2] = (guint32
)(gsize
)target
;
3995 g_assert_not_reached ();
3997 // g_print ("patched with 0x%08x\n", ins);
4001 arm_patch (guchar
*code
, const guchar
*target
)
4003 arm_patch_general (NULL
, NULL
, code
, target
);
4007 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
4008 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
4009 * to be used with the emit macros.
4010 * Return -1 otherwise.
4013 mono_arm_is_rotated_imm8 (guint32 val
, gint
*rot_amount
)
4016 for (i
= 0; i
< 31; i
+= 2) {
4020 res
= (val
<< (32 - i
)) | (val
>> i
);
4023 *rot_amount
= i
? 32 - i
: 0;
4030 * Emits in code a sequence of instructions that load the value 'val'
4031 * into the dreg register. Uses at most 4 instructions.
4034 mono_arm_emit_load_imm (guint8
*code
, int dreg
, guint32 val
)
4036 int imm8
, rot_amount
;
4038 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
4039 /* skip the constant pool */
4045 if (mini_debug_options
.single_imm_size
&& v7_supported
) {
4046 ARM_MOVW_REG_IMM (code
, dreg
, val
& 0xffff);
4047 ARM_MOVT_REG_IMM (code
, dreg
, (val
>> 16) & 0xffff);
4051 if ((imm8
= mono_arm_is_rotated_imm8 (val
, &rot_amount
)) >= 0) {
4052 ARM_MOV_REG_IMM (code
, dreg
, imm8
, rot_amount
);
4053 } else if ((imm8
= mono_arm_is_rotated_imm8 (~val
, &rot_amount
)) >= 0) {
4054 ARM_MVN_REG_IMM (code
, dreg
, imm8
, rot_amount
);
4057 ARM_MOVW_REG_IMM (code
, dreg
, val
& 0xffff);
4059 ARM_MOVT_REG_IMM (code
, dreg
, (val
>> 16) & 0xffff);
4063 ARM_MOV_REG_IMM8 (code
, dreg
, (val
& 0xFF));
4065 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF00) >> 8, 24);
4067 if (val
& 0xFF0000) {
4068 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
4070 if (val
& 0xFF000000) {
4071 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
4073 } else if (val
& 0xFF00) {
4074 ARM_MOV_REG_IMM (code
, dreg
, (val
& 0xFF00) >> 8, 24);
4075 if (val
& 0xFF0000) {
4076 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
4078 if (val
& 0xFF000000) {
4079 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
4081 } else if (val
& 0xFF0000) {
4082 ARM_MOV_REG_IMM (code
, dreg
, (val
& 0xFF0000) >> 16, 16);
4083 if (val
& 0xFF000000) {
4084 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
4087 //g_assert_not_reached ();
4093 mono_arm_thumb_supported (void)
4095 return thumb_supported
;
4099 mono_arm_eabi_supported (void)
4101 return eabi_supported
;
4105 mono_arm_i8_align (void)
4113 emit_move_return_value (MonoCompile
*cfg
, MonoInst
*ins
, guint8
*code
)
4118 call
= (MonoCallInst
*)ins
;
4119 cinfo
= call
->call_info
;
4121 switch (cinfo
->ret
.storage
) {
4122 case RegTypeStructByVal
:
4124 MonoInst
*loc
= cfg
->arch
.vret_addr_loc
;
4127 if (cinfo
->ret
.storage
== RegTypeStructByVal
&& cinfo
->ret
.nregs
== 1) {
4128 /* The JIT treats this as a normal call */
4132 /* Load the destination address */
4133 g_assert (loc
&& loc
->opcode
== OP_REGOFFSET
);
4135 if (arm_is_imm12 (loc
->inst_offset
)) {
4136 ARM_LDR_IMM (code
, ARMREG_LR
, loc
->inst_basereg
, loc
->inst_offset
);
4138 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, loc
->inst_offset
);
4139 ARM_LDR_REG_REG (code
, ARMREG_LR
, loc
->inst_basereg
, ARMREG_LR
);
4142 if (cinfo
->ret
.storage
== RegTypeStructByVal
) {
4143 int rsize
= cinfo
->ret
.struct_size
;
4145 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
4146 g_assert (rsize
>= 0);
4151 ARM_STRB_IMM (code
, i
, ARMREG_LR
, i
* 4);
4154 ARM_STRH_IMM (code
, i
, ARMREG_LR
, i
* 4);
4157 ARM_STR_IMM (code
, i
, ARMREG_LR
, i
* 4);
4163 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
4164 if (cinfo
->ret
.esize
== 4)
4165 ARM_FSTS (code
, cinfo
->ret
.reg
+ i
, ARMREG_LR
, i
* 4);
4167 ARM_FSTD (code
, cinfo
->ret
.reg
+ (i
* 2), ARMREG_LR
, i
* 8);
4176 switch (ins
->opcode
) {
4179 case OP_FCALL_MEMBASE
:
4181 MonoType
*sig_ret
= mini_get_underlying_type (((MonoCallInst
*)ins
)->signature
->ret
);
4182 if (sig_ret
->type
== MONO_TYPE_R4
) {
4183 if (IS_HARD_FLOAT
) {
4184 ARM_CVTS (code
, ins
->dreg
, ARM_VFP_F0
);
4186 ARM_FMSR (code
, ins
->dreg
, ARMREG_R0
);
4187 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
4190 if (IS_HARD_FLOAT
) {
4191 ARM_CPYD (code
, ins
->dreg
, ARM_VFP_D0
);
4193 ARM_FMDRR (code
, ARMREG_R0
, ARMREG_R1
, ins
->dreg
);
4200 case OP_RCALL_MEMBASE
: {
4205 sig_ret
= mini_get_underlying_type (((MonoCallInst
*)ins
)->signature
->ret
);
4206 g_assert (sig_ret
->type
== MONO_TYPE_R4
);
4207 if (IS_HARD_FLOAT
) {
4208 ARM_CPYS (code
, ins
->dreg
, ARM_VFP_F0
);
4210 ARM_FMSR (code
, ins
->dreg
, ARMREG_R0
);
4211 ARM_CPYS (code
, ins
->dreg
, ins
->dreg
);
4223 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
4227 guint8
*code
= cfg
->native_code
+ cfg
->code_len
;
4228 MonoInst
*last_ins
= NULL
;
4230 int imm8
, rot_amount
;
4232 /* we don't align basic blocks of loops on arm */
4234 if (cfg
->verbose_level
> 2)
4235 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
4237 cpos
= bb
->max_offset
;
4239 if (mono_break_at_bb_method
&& mono_method_desc_full_match (mono_break_at_bb_method
, cfg
->method
) && bb
->block_num
== mono_break_at_bb_bb_num
) {
4240 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
4241 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break
));
4242 code
= emit_call_seq (cfg
, code
);
4245 MONO_BB_FOR_EACH_INS (bb
, ins
) {
4246 guint offset
= code
- cfg
->native_code
;
4247 set_code_cursor (cfg
, code
);
4248 max_len
= ins_get_size (ins
->opcode
);
4249 code
= realloc_code (cfg
, max_len
);
4250 // if (ins->cil_code)
4251 // g_print ("cil code\n");
4252 mono_debug_record_line_number (cfg
, ins
, offset
);
4254 switch (ins
->opcode
) {
4255 case OP_MEMORY_BARRIER
:
4257 ARM_DMB (code
, ARM_DMB_ISH
);
4258 } else if (v6_supported
) {
4259 ARM_MOV_REG_IMM8 (code
, ARMREG_R0
, 0);
4260 ARM_MCR (code
, 15, 0, ARMREG_R0
, 7, 10, 5);
4264 code
= emit_tls_get (code
, ins
->dreg
, ins
->inst_offset
);
4267 code
= emit_tls_set (code
, ins
->sreg1
, ins
->inst_offset
);
4269 case OP_ATOMIC_EXCHANGE_I4
:
4270 case OP_ATOMIC_CAS_I4
:
4271 case OP_ATOMIC_ADD_I4
: {
4275 g_assert (v7_supported
);
4278 if (ins
->sreg1
!= ARMREG_IP
&& ins
->sreg2
!= ARMREG_IP
&& ins
->sreg3
!= ARMREG_IP
)
4280 else if (ins
->sreg1
!= ARMREG_R0
&& ins
->sreg2
!= ARMREG_R0
&& ins
->sreg3
!= ARMREG_R0
)
4282 else if (ins
->sreg1
!= ARMREG_R1
&& ins
->sreg2
!= ARMREG_R1
&& ins
->sreg3
!= ARMREG_R1
)
4286 g_assert (cfg
->arch
.atomic_tmp_offset
!= -1);
4287 ARM_STR_IMM (code
, tmpreg
, cfg
->frame_reg
, cfg
->arch
.atomic_tmp_offset
);
4289 switch (ins
->opcode
) {
4290 case OP_ATOMIC_EXCHANGE_I4
:
4292 ARM_DMB (code
, ARM_DMB_ISH
);
4293 ARM_LDREX_REG (code
, ARMREG_LR
, ins
->sreg1
);
4294 ARM_STREX_REG (code
, tmpreg
, ins
->sreg2
, ins
->sreg1
);
4295 ARM_CMP_REG_IMM (code
, tmpreg
, 0, 0);
4297 ARM_B_COND (code
, ARMCOND_NE
, 0);
4298 arm_patch (buf
[1], buf
[0]);
4300 case OP_ATOMIC_CAS_I4
:
4301 ARM_DMB (code
, ARM_DMB_ISH
);
4303 ARM_LDREX_REG (code
, ARMREG_LR
, ins
->sreg1
);
4304 ARM_CMP_REG_REG (code
, ARMREG_LR
, ins
->sreg3
);
4306 ARM_B_COND (code
, ARMCOND_NE
, 0);
4307 ARM_STREX_REG (code
, tmpreg
, ins
->sreg2
, ins
->sreg1
);
4308 ARM_CMP_REG_IMM (code
, tmpreg
, 0, 0);
4310 ARM_B_COND (code
, ARMCOND_NE
, 0);
4311 arm_patch (buf
[2], buf
[0]);
4312 arm_patch (buf
[1], code
);
4314 case OP_ATOMIC_ADD_I4
:
4316 ARM_DMB (code
, ARM_DMB_ISH
);
4317 ARM_LDREX_REG (code
, ARMREG_LR
, ins
->sreg1
);
4318 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->sreg2
);
4319 ARM_STREX_REG (code
, tmpreg
, ARMREG_LR
, ins
->sreg1
);
4320 ARM_CMP_REG_IMM (code
, tmpreg
, 0, 0);
4322 ARM_B_COND (code
, ARMCOND_NE
, 0);
4323 arm_patch (buf
[1], buf
[0]);
4326 g_assert_not_reached ();
4329 ARM_DMB (code
, ARM_DMB_ISH
);
4330 if (tmpreg
!= ins
->dreg
)
4331 ARM_LDR_IMM (code
, tmpreg
, cfg
->frame_reg
, cfg
->arch
.atomic_tmp_offset
);
4332 ARM_MOV_REG_REG (code
, ins
->dreg
, ARMREG_LR
);
4335 case OP_ATOMIC_LOAD_I1
:
4336 case OP_ATOMIC_LOAD_U1
:
4337 case OP_ATOMIC_LOAD_I2
:
4338 case OP_ATOMIC_LOAD_U2
:
4339 case OP_ATOMIC_LOAD_I4
:
4340 case OP_ATOMIC_LOAD_U4
:
4341 case OP_ATOMIC_LOAD_R4
:
4342 case OP_ATOMIC_LOAD_R8
: {
4343 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4344 ARM_DMB (code
, ARM_DMB_ISH
);
4346 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4348 switch (ins
->opcode
) {
4349 case OP_ATOMIC_LOAD_I1
:
4350 ARM_LDRSB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4352 case OP_ATOMIC_LOAD_U1
:
4353 ARM_LDRB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4355 case OP_ATOMIC_LOAD_I2
:
4356 ARM_LDRSH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4358 case OP_ATOMIC_LOAD_U2
:
4359 ARM_LDRH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4361 case OP_ATOMIC_LOAD_I4
:
4362 case OP_ATOMIC_LOAD_U4
:
4363 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4365 case OP_ATOMIC_LOAD_R4
:
4367 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_basereg
, ARMREG_LR
);
4368 ARM_FLDS (code
, ins
->dreg
, ARMREG_LR
, 0);
4370 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
4371 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_basereg
, ARMREG_LR
);
4372 ARM_FLDS (code
, vfp_scratch1
, ARMREG_LR
, 0);
4373 ARM_CVTS (code
, ins
->dreg
, vfp_scratch1
);
4374 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
4377 case OP_ATOMIC_LOAD_R8
:
4378 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_basereg
, ARMREG_LR
);
4379 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
4383 if (ins
->backend
.memory_barrier_kind
!= MONO_MEMORY_BARRIER_NONE
)
4384 ARM_DMB (code
, ARM_DMB_ISH
);
4387 case OP_ATOMIC_STORE_I1
:
4388 case OP_ATOMIC_STORE_U1
:
4389 case OP_ATOMIC_STORE_I2
:
4390 case OP_ATOMIC_STORE_U2
:
4391 case OP_ATOMIC_STORE_I4
:
4392 case OP_ATOMIC_STORE_U4
:
4393 case OP_ATOMIC_STORE_R4
:
4394 case OP_ATOMIC_STORE_R8
: {
4395 if (ins
->backend
.memory_barrier_kind
!= MONO_MEMORY_BARRIER_NONE
)
4396 ARM_DMB (code
, ARM_DMB_ISH
);
4398 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4400 switch (ins
->opcode
) {
4401 case OP_ATOMIC_STORE_I1
:
4402 case OP_ATOMIC_STORE_U1
:
4403 ARM_STRB_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4405 case OP_ATOMIC_STORE_I2
:
4406 case OP_ATOMIC_STORE_U2
:
4407 ARM_STRH_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4409 case OP_ATOMIC_STORE_I4
:
4410 case OP_ATOMIC_STORE_U4
:
4411 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4413 case OP_ATOMIC_STORE_R4
:
4415 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_destbasereg
, ARMREG_LR
);
4416 ARM_FSTS (code
, ins
->sreg1
, ARMREG_LR
, 0);
4418 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
4419 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_destbasereg
, ARMREG_LR
);
4420 ARM_CVTD (code
, vfp_scratch1
, ins
->sreg1
);
4421 ARM_FSTS (code
, vfp_scratch1
, ARMREG_LR
, 0);
4422 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
4425 case OP_ATOMIC_STORE_R8
:
4426 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_destbasereg
, ARMREG_LR
);
4427 ARM_FSTD (code
, ins
->sreg1
, ARMREG_LR
, 0);
4431 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4432 ARM_DMB (code
, ARM_DMB_ISH
);
4436 ARM_SMULL_REG_REG (code
, ins
->backend
.reg3
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4439 ARM_UMULL_REG_REG (code
, ins
->backend
.reg3
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4441 case OP_STOREI1_MEMBASE_IMM
:
4442 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
& 0xFF);
4443 g_assert (arm_is_imm12 (ins
->inst_offset
));
4444 ARM_STRB_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4446 case OP_STOREI2_MEMBASE_IMM
:
4447 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
& 0xFFFF);
4448 g_assert (arm_is_imm8 (ins
->inst_offset
));
4449 ARM_STRH_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4451 case OP_STORE_MEMBASE_IMM
:
4452 case OP_STOREI4_MEMBASE_IMM
:
4453 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
);
4454 g_assert (arm_is_imm12 (ins
->inst_offset
));
4455 ARM_STR_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4457 case OP_STOREI1_MEMBASE_REG
:
4458 g_assert (arm_is_imm12 (ins
->inst_offset
));
4459 ARM_STRB_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4461 case OP_STOREI2_MEMBASE_REG
:
4462 g_assert (arm_is_imm8 (ins
->inst_offset
));
4463 ARM_STRH_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4465 case OP_STORE_MEMBASE_REG
:
4466 case OP_STOREI4_MEMBASE_REG
:
4467 /* this case is special, since it happens for spill code after lowering has been called */
4468 if (arm_is_imm12 (ins
->inst_offset
)) {
4469 ARM_STR_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4471 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4472 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4475 case OP_STOREI1_MEMINDEX
:
4476 ARM_STRB_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4478 case OP_STOREI2_MEMINDEX
:
4479 ARM_STRH_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4481 case OP_STORE_MEMINDEX
:
4482 case OP_STOREI4_MEMINDEX
:
4483 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4486 g_assert_not_reached ();
4488 case OP_LOAD_MEMINDEX
:
4489 case OP_LOADI4_MEMINDEX
:
4490 case OP_LOADU4_MEMINDEX
:
4491 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4493 case OP_LOADI1_MEMINDEX
:
4494 ARM_LDRSB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4496 case OP_LOADU1_MEMINDEX
:
4497 ARM_LDRB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4499 case OP_LOADI2_MEMINDEX
:
4500 ARM_LDRSH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4502 case OP_LOADU2_MEMINDEX
:
4503 ARM_LDRH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4505 case OP_LOAD_MEMBASE
:
4506 case OP_LOADI4_MEMBASE
:
4507 case OP_LOADU4_MEMBASE
:
4508 /* this case is special, since it happens for spill code after lowering has been called */
4509 if (arm_is_imm12 (ins
->inst_offset
)) {
4510 ARM_LDR_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4512 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4513 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4516 case OP_LOADI1_MEMBASE
:
4517 g_assert (arm_is_imm8 (ins
->inst_offset
));
4518 ARM_LDRSB_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4520 case OP_LOADU1_MEMBASE
:
4521 g_assert (arm_is_imm12 (ins
->inst_offset
));
4522 ARM_LDRB_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4524 case OP_LOADU2_MEMBASE
:
4525 g_assert (arm_is_imm8 (ins
->inst_offset
));
4526 ARM_LDRH_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4528 case OP_LOADI2_MEMBASE
:
4529 g_assert (arm_is_imm8 (ins
->inst_offset
));
4530 ARM_LDRSH_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4532 case OP_ICONV_TO_I1
:
4533 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 24);
4534 ARM_SAR_IMM (code
, ins
->dreg
, ins
->dreg
, 24);
4536 case OP_ICONV_TO_I2
:
4537 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 16);
4538 ARM_SAR_IMM (code
, ins
->dreg
, ins
->dreg
, 16);
4540 case OP_ICONV_TO_U1
:
4541 ARM_AND_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 0xff);
4543 case OP_ICONV_TO_U2
:
4544 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 16);
4545 ARM_SHR_IMM (code
, ins
->dreg
, ins
->dreg
, 16);
4549 ARM_CMP_REG_REG (code
, ins
->sreg1
, ins
->sreg2
);
4551 case OP_COMPARE_IMM
:
4552 case OP_ICOMPARE_IMM
:
4553 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4554 g_assert (imm8
>= 0);
4555 ARM_CMP_REG_IMM (code
, ins
->sreg1
, imm8
, rot_amount
);
4559 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4560 * So instead of emitting a trap, we emit a call a C function and place a
4563 //*(int*)code = 0xef9f0001;
4566 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
4567 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break
));
4568 code
= emit_call_seq (cfg
, code
);
4570 case OP_RELAXED_NOP
:
4575 case OP_DUMMY_ICONST
:
4576 case OP_DUMMY_R8CONST
:
4577 case OP_DUMMY_R4CONST
:
4578 case OP_NOT_REACHED
:
4581 case OP_IL_SEQ_POINT
:
4582 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
4584 case OP_SEQ_POINT
: {
4586 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
4587 MonoInst
*ss_trigger_page_var
= cfg
->arch
.ss_trigger_page_var
;
4588 MonoInst
*ss_method_var
= cfg
->arch
.seq_point_ss_method_var
;
4589 MonoInst
*bp_method_var
= cfg
->arch
.seq_point_bp_method_var
;
4591 int dreg
= ARMREG_LR
;
4594 if (cfg
->soft_breakpoints
) {
4595 g_assert (!cfg
->compile_aot
);
4600 * For AOT, we use one got slot per method, which will point to a
4601 * SeqPointInfo structure, containing all the information required
4602 * by the code below.
4604 if (cfg
->compile_aot
) {
4605 g_assert (info_var
);
4606 g_assert (info_var
->opcode
== OP_REGOFFSET
);
4609 if (!cfg
->soft_breakpoints
&& !cfg
->compile_aot
) {
4611 * Read from the single stepping trigger page. This will cause a
4612 * SIGSEGV when single stepping is enabled.
4613 * We do this _before_ the breakpoint, so single stepping after
4614 * a breakpoint is hit will step to the next IL offset.
4616 g_assert (((guint64
)(gsize
)ss_trigger_page
>> 32) == 0);
4619 /* Single step check */
4620 if (ins
->flags
& MONO_INST_SINGLE_STEP_LOC
) {
4621 if (cfg
->soft_breakpoints
) {
4622 /* Load the address of the sequence point method variable. */
4623 var
= ss_method_var
;
4625 g_assert (var
->opcode
== OP_REGOFFSET
);
4626 code
= emit_ldr_imm (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
4627 /* Read the value and check whether it is non-zero. */
4628 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
4629 ARM_CMP_REG_IMM (code
, dreg
, 0, 0);
4630 /* Call it conditionally. */
4631 ARM_BLX_REG_COND (code
, ARMCOND_NE
, dreg
);
4633 if (cfg
->compile_aot
) {
4634 /* Load the trigger page addr from the variable initialized in the prolog */
4635 var
= ss_trigger_page_var
;
4637 g_assert (var
->opcode
== OP_REGOFFSET
);
4638 code
= emit_ldr_imm (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
4640 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
4642 *(int*)code
= (int)(gsize
)ss_trigger_page
;
4645 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
4649 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
4651 /* Breakpoint check */
4652 if (cfg
->compile_aot
) {
4653 const guint32 offset
= code
- cfg
->native_code
;
4657 code
= emit_ldr_imm (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
4658 /* Add the offset */
4659 val
= ((offset
/ 4) * sizeof (target_mgreg_t
)) + MONO_STRUCT_OFFSET (SeqPointInfo
, bp_addrs
);
4660 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4661 if (arm_is_imm12 ((int)val
)) {
4662 ARM_LDR_IMM (code
, dreg
, dreg
, val
);
4664 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF), 0);
4666 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF00) >> 8, 24);
4668 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
4669 g_assert (!(val
& 0xFF000000));
4671 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
4673 /* What is faster, a branch or a load ? */
4674 ARM_CMP_REG_IMM (code
, dreg
, 0, 0);
4675 /* The breakpoint instruction */
4676 if (cfg
->soft_breakpoints
)
4677 ARM_BLX_REG_COND (code
, ARMCOND_NE
, dreg
);
4679 ARM_LDR_IMM_COND (code
, dreg
, dreg
, 0, ARMCOND_NE
);
4680 } else if (cfg
->soft_breakpoints
) {
4681 /* Load the address of the breakpoint method into ip. */
4682 var
= bp_method_var
;
4684 g_assert (var
->opcode
== OP_REGOFFSET
);
4685 g_assert (arm_is_imm12 (var
->inst_offset
));
4686 ARM_LDR_IMM (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
4689 * A placeholder for a possible breakpoint inserted by
4690 * mono_arch_set_breakpoint ().
4695 * A placeholder for a possible breakpoint inserted by
4696 * mono_arch_set_breakpoint ().
4698 for (i
= 0; i
< 4; ++i
)
4703 * Add an additional nop so skipping the bp doesn't cause the ip to point
4704 * to another IL offset.
4712 ARM_ADDS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4715 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4719 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4722 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4723 g_assert (imm8
>= 0);
4724 ARM_ADDS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4728 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4729 g_assert (imm8
>= 0);
4730 ARM_ADD_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4734 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4735 g_assert (imm8
>= 0);
4736 ARM_ADCS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4739 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4740 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4742 case OP_IADD_OVF_UN
:
4743 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4744 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4747 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4748 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4750 case OP_ISUB_OVF_UN
:
4751 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4752 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4754 case OP_ADD_OVF_CARRY
:
4755 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4756 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4758 case OP_ADD_OVF_UN_CARRY
:
4759 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4760 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4762 case OP_SUB_OVF_CARRY
:
4763 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4764 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4766 case OP_SUB_OVF_UN_CARRY
:
4767 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4768 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4772 ARM_SUBS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4775 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4776 g_assert (imm8
>= 0);
4777 ARM_SUBS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4780 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4784 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4788 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4789 g_assert (imm8
>= 0);
4790 ARM_SUB_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4794 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4795 g_assert (imm8
>= 0);
4796 ARM_SBCS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4798 case OP_ARM_RSBS_IMM
:
4799 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4800 g_assert (imm8
>= 0);
4801 ARM_RSBS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4803 case OP_ARM_RSC_IMM
:
4804 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4805 g_assert (imm8
>= 0);
4806 ARM_RSC_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4809 ARM_AND_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4813 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4814 g_assert (imm8
>= 0);
4815 ARM_AND_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4818 g_assert (v7s_supported
|| v7k_supported
);
4819 ARM_SDIV (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4822 g_assert (v7s_supported
|| v7k_supported
);
4823 ARM_UDIV (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4826 g_assert (v7s_supported
|| v7k_supported
);
4827 ARM_SDIV (code
, ARMREG_LR
, ins
->sreg1
, ins
->sreg2
);
4828 ARM_MLS (code
, ins
->dreg
, ARMREG_LR
, ins
->sreg2
, ins
->sreg1
);
4831 g_assert (v7s_supported
|| v7k_supported
);
4832 ARM_UDIV (code
, ARMREG_LR
, ins
->sreg1
, ins
->sreg2
);
4833 ARM_MLS (code
, ins
->dreg
, ARMREG_LR
, ins
->sreg2
, ins
->sreg1
);
4837 g_assert_not_reached ();
4839 ARM_ORR_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4843 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4844 g_assert (imm8
>= 0);
4845 ARM_ORR_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4848 ARM_EOR_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4852 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4853 g_assert (imm8
>= 0);
4854 ARM_EOR_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4857 ARM_SHL_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4862 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4863 else if (ins
->dreg
!= ins
->sreg1
)
4864 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4867 ARM_SAR_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4872 ARM_SAR_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4873 else if (ins
->dreg
!= ins
->sreg1
)
4874 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4877 case OP_ISHR_UN_IMM
:
4879 ARM_SHR_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4880 else if (ins
->dreg
!= ins
->sreg1
)
4881 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4884 ARM_SHR_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4887 ARM_MVN_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4890 ARM_RSB_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 0);
4893 if (ins
->dreg
== ins
->sreg2
)
4894 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4896 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
4899 g_assert_not_reached ();
4902 /* FIXME: handle ovf/ sreg2 != dreg */
4903 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4904 /* FIXME: MUL doesn't set the C/O flags on ARM */
4906 case OP_IMUL_OVF_UN
:
4907 /* FIXME: handle ovf/ sreg2 != dreg */
4908 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4909 /* FIXME: MUL doesn't set the C/O flags on ARM */
4912 code
= mono_arm_emit_load_imm (code
, ins
->dreg
, ins
->inst_c0
);
4915 /* Load the GOT offset */
4916 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)(gsize
)ins
->inst_i1
, ins
->inst_p0
);
4917 ARM_LDR_IMM (code
, ins
->dreg
, ARMREG_PC
, 0);
4919 *(gpointer
*)code
= NULL
;
4921 /* Load the value from the GOT */
4922 ARM_LDR_REG_REG (code
, ins
->dreg
, ARMREG_PC
, ins
->dreg
);
4924 case OP_OBJC_GET_SELECTOR
:
4925 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_OBJC_SELECTOR_REF
, ins
->inst_p0
);
4926 ARM_LDR_IMM (code
, ins
->dreg
, ARMREG_PC
, 0);
4928 *(gpointer
*)code
= NULL
;
4930 ARM_LDR_REG_REG (code
, ins
->dreg
, ARMREG_PC
, ins
->dreg
);
4932 case OP_ICONV_TO_I4
:
4933 case OP_ICONV_TO_U4
:
4935 if (ins
->dreg
!= ins
->sreg1
)
4936 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4939 int saved
= ins
->sreg2
;
4940 if (ins
->sreg2
== ARM_LSW_REG
) {
4941 ARM_MOV_REG_REG (code
, ARMREG_LR
, ins
->sreg2
);
4944 if (ins
->sreg1
!= ARM_LSW_REG
)
4945 ARM_MOV_REG_REG (code
, ARM_LSW_REG
, ins
->sreg1
);
4946 if (saved
!= ARM_MSW_REG
)
4947 ARM_MOV_REG_REG (code
, ARM_MSW_REG
, saved
);
4951 if (IS_VFP
&& ins
->dreg
!= ins
->sreg1
)
4952 ARM_CPYD (code
, ins
->dreg
, ins
->sreg1
);
4955 if (IS_VFP
&& ins
->dreg
!= ins
->sreg1
)
4956 ARM_CPYS (code
, ins
->dreg
, ins
->sreg1
);
4958 case OP_MOVE_F_TO_I4
:
4960 ARM_FMRS (code
, ins
->dreg
, ins
->sreg1
);
4962 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
4963 ARM_CVTD (code
, vfp_scratch1
, ins
->sreg1
);
4964 ARM_FMRS (code
, ins
->dreg
, vfp_scratch1
);
4965 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
4968 case OP_MOVE_I4_TO_F
:
4970 ARM_FMSR (code
, ins
->dreg
, ins
->sreg1
);
4972 ARM_FMSR (code
, ins
->dreg
, ins
->sreg1
);
4973 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
4976 case OP_FCONV_TO_R4
:
4979 ARM_CVTD (code
, ins
->dreg
, ins
->sreg1
);
4981 ARM_CVTD (code
, ins
->dreg
, ins
->sreg1
);
4982 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
4987 case OP_TAILCALL_PARAMETER
:
4988 // This opcode helps compute sizes, i.e.
4989 // of the subsequent OP_TAILCALL, but contributes no code.
4990 g_assert (ins
->next
);
4994 case OP_TAILCALL_MEMBASE
:
4995 case OP_TAILCALL_REG
: {
4996 gboolean
const tailcall_membase
= ins
->opcode
== OP_TAILCALL_MEMBASE
;
4997 gboolean
const tailcall_reg
= ins
->opcode
== OP_TAILCALL_REG
;
4998 MonoCallInst
*call
= (MonoCallInst
*)ins
;
5000 max_len
+= call
->stack_usage
/ sizeof (target_mgreg_t
) * ins_get_size (OP_TAILCALL_PARAMETER
);
5003 code
= emit_float_args (cfg
, call
, code
, &max_len
, &offset
);
5005 code
= realloc_code (cfg
, max_len
);
5007 // For reg and membase, get destination in IP.
5010 g_assert (ins
->sreg1
> -1);
5011 if (ins
->sreg1
!= ARMREG_IP
)
5012 ARM_MOV_REG_REG (code
, ARMREG_IP
, ins
->sreg1
);
5013 } else if (tailcall_membase
) {
5014 g_assert (ins
->sreg1
> -1);
5015 if (!arm_is_imm12 (ins
->inst_offset
)) {
5016 g_assert (ins
->sreg1
!= ARMREG_IP
); // temp in emit_big_add
5017 code
= emit_big_add (code
, ARMREG_IP
, ins
->sreg1
, ins
->inst_offset
);
5018 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_IP
, 0);
5020 ARM_LDR_IMM (code
, ARMREG_IP
, ins
->sreg1
, ins
->inst_offset
);
5025 * The stack looks like the following:
5026 * <caller argument area>
5029 * <callee argument area>
5030 * <optionally saved IP> (about to be)
5031 * Need to copy the arguments from the callee argument area to
5032 * the caller argument area, and pop the frame.
5034 if (call
->stack_usage
) {
5035 int i
, prev_sp_offset
= 0;
5037 // When we get here, the parameters to the tailcall are already formed,
5038 // in registers and at the bottom of the grow-down stack.
5040 // Our goal is generally preserve parameters, and trim the stack,
5041 // and, before trimming stack, move parameters from the bottom of the
5042 // frame to the bottom of the trimmed frame.
5044 // For the case of large frames, and presently therefore always,
5045 // IP is used as an adjusted frame_reg.
5046 // Be conservative and save IP around the movement
5047 // of parameters from the bottom of frame to top of the frame.
5048 const gboolean save_ip
= tailcall_membase
|| tailcall_reg
;
5050 ARM_PUSH (code
, 1 << ARMREG_IP
);
5052 // When moving stacked parameters from the bottom
5053 // of the frame (sp) to the top of the frame (ip),
5054 // account, 0 or 4, for the conditional save of IP.
5055 const int offset_sp
= save_ip
? 4 : 0;
5056 const int offset_ip
= (save_ip
&& (cfg
->frame_reg
== ARMREG_SP
)) ? 4 : 0;
5058 /* Compute size of saved registers restored below */
5060 prev_sp_offset
= 2 * 4;
5062 prev_sp_offset
= 1 * 4;
5063 for (i
= 0; i
< 16; ++i
) {
5064 if (cfg
->used_int_regs
& (1 << i
))
5065 prev_sp_offset
+= 4;
5068 // Point IP at the start of where the parameters will go after trimming stack.
5069 // After locals and saved registers.
5070 code
= emit_big_add (code
, ARMREG_IP
, cfg
->frame_reg
, cfg
->stack_usage
+ prev_sp_offset
);
5072 /* Copy arguments on the stack to our argument area */
5073 // FIXME a fixed size memcpy is desirable here,
5074 // at least for larger values of stack_usage.
5076 // FIXME For most functions, with frames < 4K, we can use frame_reg directly here instead of IP.
5077 // See https://github.com/mono/mono/pull/12079
5078 // See https://github.com/mono/mono/pull/12079/commits/93e7007a9567b78fa8152ce404b372b26e735516
5079 for (i
= 0; i
< call
->stack_usage
; i
+= sizeof (target_mgreg_t
)) {
5080 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, i
+ offset_sp
);
5081 ARM_STR_IMM (code
, ARMREG_LR
, ARMREG_IP
, i
+ offset_ip
);
5085 ARM_POP (code
, 1 << ARMREG_IP
);
5089 * Keep in sync with mono_arch_emit_epilog
5091 g_assert (!cfg
->method
->save_lmf
);
5092 code
= emit_big_add_temp (code
, ARMREG_SP
, cfg
->frame_reg
, cfg
->stack_usage
, ARMREG_LR
);
5094 if (cfg
->used_int_regs
)
5095 ARM_POP (code
, cfg
->used_int_regs
);
5096 ARM_POP (code
, (1 << ARMREG_R7
) | (1 << ARMREG_LR
));
5098 ARM_POP (code
, cfg
->used_int_regs
| (1 << ARMREG_LR
));
5101 if (tailcall_reg
|| tailcall_membase
) {
5102 code
= emit_jmp_reg (code
, ARMREG_IP
);
5104 mono_add_patch_info (cfg
, (guint8
*) code
- cfg
->native_code
, MONO_PATCH_INFO_METHOD_JUMP
, call
->method
);
5106 if (cfg
->compile_aot
) {
5107 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
5109 *(gpointer
*)code
= NULL
;
5111 ARM_LDR_REG_REG (code
, ARMREG_PC
, ARMREG_PC
, ARMREG_IP
);
5113 code
= mono_arm_patchable_b (code
, ARMCOND_AL
);
5114 cfg
->thunk_area
+= THUNK_SIZE
;
5120 /* ensure ins->sreg1 is not NULL */
5121 ARM_LDRB_IMM (code
, ARMREG_LR
, ins
->sreg1
, 0);
5124 g_assert (cfg
->sig_cookie
< 128);
5125 ARM_LDR_IMM (code
, ARMREG_IP
, cfg
->frame_reg
, cfg
->sig_cookie
);
5126 ARM_STR_IMM (code
, ARMREG_IP
, ins
->sreg1
, 0);
5136 call
= (MonoCallInst
*)ins
;
5139 code
= emit_float_args (cfg
, call
, code
, &max_len
, &offset
);
5141 mono_call_add_patch_info (cfg
, call
, code
- cfg
->native_code
);
5143 code
= emit_call_seq (cfg
, code
);
5144 ins
->flags
|= MONO_INST_GC_CALLSITE
;
5145 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5146 code
= emit_move_return_value (cfg
, ins
, code
);
5153 case OP_VOIDCALL_REG
:
5156 code
= emit_float_args (cfg
, (MonoCallInst
*)ins
, code
, &max_len
, &offset
);
5158 code
= emit_call_reg (code
, ins
->sreg1
);
5159 ins
->flags
|= MONO_INST_GC_CALLSITE
;
5160 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5161 code
= emit_move_return_value (cfg
, ins
, code
);
5163 case OP_FCALL_MEMBASE
:
5164 case OP_RCALL_MEMBASE
:
5165 case OP_LCALL_MEMBASE
:
5166 case OP_VCALL_MEMBASE
:
5167 case OP_VCALL2_MEMBASE
:
5168 case OP_VOIDCALL_MEMBASE
:
5169 case OP_CALL_MEMBASE
: {
5170 g_assert (ins
->sreg1
!= ARMREG_LR
);
5171 call
= (MonoCallInst
*)ins
;
5174 code
= emit_float_args (cfg
, call
, code
, &max_len
, &offset
);
5175 if (!arm_is_imm12 (ins
->inst_offset
)) {
5176 /* sreg1 might be IP */
5177 ARM_MOV_REG_REG (code
, ARMREG_LR
, ins
->sreg1
);
5178 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, ins
->inst_offset
);
5179 ARM_ADD_REG_REG (code
, ARMREG_IP
, ARMREG_IP
, ARMREG_LR
);
5180 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
5181 ARM_LDR_IMM (code
, ARMREG_PC
, ARMREG_IP
, 0);
5183 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
5184 ARM_LDR_IMM (code
, ARMREG_PC
, ins
->sreg1
, ins
->inst_offset
);
5186 ins
->flags
|= MONO_INST_GC_CALLSITE
;
5187 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5188 code
= emit_move_return_value (cfg
, ins
, code
);
5191 case OP_GENERIC_CLASS_INIT
: {
5195 byte_offset
= MONO_STRUCT_OFFSET (MonoVTable
, initialized
);
5197 g_assert (arm_is_imm8 (byte_offset
));
5198 ARM_LDRSB_IMM (code
, ARMREG_IP
, ins
->sreg1
, byte_offset
);
5199 ARM_CMP_REG_IMM (code
, ARMREG_IP
, 0, 0);
5201 ARM_B_COND (code
, ARMCOND_NE
, 0);
5203 /* Uninitialized case */
5204 g_assert (ins
->sreg1
== ARMREG_R0
);
5206 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
5207 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_generic_class_init
));
5208 code
= emit_call_seq (cfg
, code
);
5210 /* Initialized case */
5211 arm_patch (jump
, code
);
5215 /* round the size to 8 bytes */
5216 ARM_ADD_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, (MONO_ARCH_FRAME_ALIGNMENT
- 1));
5217 ARM_BIC_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, (MONO_ARCH_FRAME_ALIGNMENT
- 1));
5218 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ins
->dreg
);
5219 /* memzero the area: dreg holds the size, sp is the pointer */
5220 if (ins
->flags
& MONO_INST_INIT
) {
5221 guint8
*start_loop
, *branch_to_cond
;
5222 ARM_MOV_REG_IMM8 (code
, ARMREG_LR
, 0);
5223 branch_to_cond
= code
;
5226 ARM_STR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ins
->dreg
);
5227 arm_patch (branch_to_cond
, code
);
5228 /* decrement by 4 and set flags */
5229 ARM_SUBS_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, sizeof (target_mgreg_t
));
5230 ARM_B_COND (code
, ARMCOND_GE
, 0);
5231 arm_patch (code
- 4, start_loop
);
5233 ARM_MOV_REG_REG (code
, ins
->dreg
, ARMREG_SP
);
5234 if (cfg
->param_area
)
5235 code
= emit_sub_imm (code
, ARMREG_SP
, ARMREG_SP
, ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
));
5240 MonoInst
*var
= cfg
->dyn_call_var
;
5241 guint8
*labels
[16];
5243 g_assert (var
->opcode
== OP_REGOFFSET
);
5244 g_assert (arm_is_imm12 (var
->inst_offset
));
5246 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
5247 ARM_MOV_REG_REG (code
, ARMREG_LR
, ins
->sreg1
);
5249 ARM_MOV_REG_REG (code
, ARMREG_IP
, ins
->sreg2
);
5251 /* Save args buffer */
5252 ARM_STR_IMM (code
, ARMREG_LR
, var
->inst_basereg
, var
->inst_offset
);
5254 /* Set fp argument registers */
5255 if (IS_HARD_FLOAT
) {
5256 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, has_fpregs
));
5257 ARM_CMP_REG_IMM (code
, ARMREG_R0
, 0, 0);
5259 ARM_B_COND (code
, ARMCOND_EQ
, 0);
5260 for (i
= 0; i
< FP_PARAM_REGS
; ++i
) {
5261 const int offset
= MONO_STRUCT_OFFSET (DynCallArgs
, fpregs
) + (i
* sizeof (double));
5262 g_assert (arm_is_fpimm8 (offset
));
5263 ARM_FLDD (code
, i
* 2, ARMREG_LR
, offset
);
5265 arm_patch (labels
[0], code
);
5268 /* Allocate callee area */
5269 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, n_stackargs
));
5270 ARM_SHL_IMM (code
, ARMREG_R1
, ARMREG_R1
, 2);
5271 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_R1
);
5273 /* Set stack args */
5275 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, n_stackargs
));
5276 /* R2 = pointer into regs */
5277 code
= emit_big_add (code
, ARMREG_R2
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, regs
) + (PARAM_REGS
* sizeof (target_mgreg_t
)));
5278 /* R3 = pointer to stack */
5279 ARM_MOV_REG_REG (code
, ARMREG_R3
, ARMREG_SP
);
5282 ARM_B_COND (code
, ARMCOND_AL
, 0);
5284 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_R2
, 0);
5285 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_R3
, 0);
5286 ARM_ADD_REG_IMM (code
, ARMREG_R2
, ARMREG_R2
, sizeof (target_mgreg_t
), 0);
5287 ARM_ADD_REG_IMM (code
, ARMREG_R3
, ARMREG_R3
, sizeof (target_mgreg_t
), 0);
5288 ARM_SUB_REG_IMM (code
, ARMREG_R1
, ARMREG_R1
, 1, 0);
5289 arm_patch (labels
[0], code
);
5290 ARM_CMP_REG_IMM (code
, ARMREG_R1
, 0, 0);
5292 ARM_B_COND (code
, ARMCOND_GT
, 0);
5293 arm_patch (labels
[2], labels
[1]);
5295 /* Set argument registers */
5296 for (i
= 0; i
< PARAM_REGS
; ++i
)
5297 ARM_LDR_IMM (code
, i
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, regs
) + (i
* sizeof (target_mgreg_t
)));
5300 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
5301 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
5304 ARM_LDR_IMM (code
, ARMREG_IP
, var
->inst_basereg
, var
->inst_offset
);
5305 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_IP
, MONO_STRUCT_OFFSET (DynCallArgs
, res
));
5306 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_IP
, MONO_STRUCT_OFFSET (DynCallArgs
, res2
));
5308 ARM_FSTD (code
, ARM_VFP_D0
, ARMREG_IP
, MONO_STRUCT_OFFSET (DynCallArgs
, fpregs
));
5312 if (ins
->sreg1
!= ARMREG_R0
)
5313 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
5314 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
5315 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception
));
5316 code
= emit_call_seq (cfg
, code
);
5320 if (ins
->sreg1
!= ARMREG_R0
)
5321 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
5322 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
5323 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception
));
5324 code
= emit_call_seq (cfg
, code
);
5327 case OP_START_HANDLER
: {
5328 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
5329 int param_area
= ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
);
5332 /* Reserve a param area, see filter-stack.exe */
5334 if ((i
= mono_arm_is_rotated_imm8 (param_area
, &rot_amount
)) >= 0) {
5335 ARM_SUB_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
5337 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, param_area
);
5338 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
5342 if (arm_is_imm12 (spvar
->inst_offset
)) {
5343 ARM_STR_IMM (code
, ARMREG_LR
, spvar
->inst_basereg
, spvar
->inst_offset
);
5345 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
5346 ARM_STR_REG_REG (code
, ARMREG_LR
, spvar
->inst_basereg
, ARMREG_IP
);
5350 case OP_ENDFILTER
: {
5351 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
5352 int param_area
= ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
);
5355 /* Free the param area */
5357 if ((i
= mono_arm_is_rotated_imm8 (param_area
, &rot_amount
)) >= 0) {
5358 ARM_ADD_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
5360 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, param_area
);
5361 ARM_ADD_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
5365 if (ins
->sreg1
!= ARMREG_R0
)
5366 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
5367 if (arm_is_imm12 (spvar
->inst_offset
)) {
5368 ARM_LDR_IMM (code
, ARMREG_IP
, spvar
->inst_basereg
, spvar
->inst_offset
);
5370 g_assert (ARMREG_IP
!= spvar
->inst_basereg
);
5371 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
5372 ARM_LDR_REG_REG (code
, ARMREG_IP
, spvar
->inst_basereg
, ARMREG_IP
);
5374 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
5377 case OP_ENDFINALLY
: {
5378 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
5379 int param_area
= ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
);
5382 /* Free the param area */
5384 if ((i
= mono_arm_is_rotated_imm8 (param_area
, &rot_amount
)) >= 0) {
5385 ARM_ADD_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
5387 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, param_area
);
5388 ARM_ADD_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
5392 if (arm_is_imm12 (spvar
->inst_offset
)) {
5393 ARM_LDR_IMM (code
, ARMREG_IP
, spvar
->inst_basereg
, spvar
->inst_offset
);
5395 g_assert (ARMREG_IP
!= spvar
->inst_basereg
);
5396 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
5397 ARM_LDR_REG_REG (code
, ARMREG_IP
, spvar
->inst_basereg
, ARMREG_IP
);
5399 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
5402 case OP_CALL_HANDLER
:
5403 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
5404 code
= mono_arm_patchable_bl (code
, ARMCOND_AL
);
5405 cfg
->thunk_area
+= THUNK_SIZE
;
5406 for (GList
*tmp
= ins
->inst_eh_blocks
; tmp
!= bb
->clause_holes
; tmp
= tmp
->prev
)
5407 mono_cfg_add_try_hole (cfg
, ((MonoLeaveClause
*) tmp
->data
)->clause
, code
, bb
);
5410 if (ins
->dreg
!= ARMREG_R0
)
5411 ARM_MOV_REG_REG (code
, ins
->dreg
, ARMREG_R0
);
5415 ins
->inst_c0
= code
- cfg
->native_code
;
5418 /*if (ins->inst_target_bb->native_offset) {
5420 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5422 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
5423 code
= mono_arm_patchable_b (code
, ARMCOND_AL
);
5427 ARM_MOV_REG_REG (code
, ARMREG_PC
, ins
->sreg1
);
5431 * In the normal case we have:
5432 * ldr pc, [pc, ins->sreg1 << 2]
5435 * ldr lr, [pc, ins->sreg1 << 2]
5437 * After follows the data.
5438 * FIXME: add aot support.
5440 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_SWITCH
, ins
->inst_p0
);
5441 max_len
+= 4 * GPOINTER_TO_INT (ins
->klass
);
5442 code
= realloc_code (cfg
, max_len
);
5443 ARM_LDR_REG_REG_SHIFT (code
, ARMREG_PC
, ARMREG_PC
, ins
->sreg1
, ARMSHIFT_LSL
, 2);
5445 code
+= 4 * GPOINTER_TO_INT (ins
->klass
);
5449 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
5450 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
5454 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5455 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_LT
);
5459 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5460 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_LO
);
5464 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5465 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_GT
);
5469 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5470 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_HI
);
5473 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_NE
);
5474 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_EQ
);
5477 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5478 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_LT
);
5481 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5482 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_GT
);
5485 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5486 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_LO
);
5489 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5490 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_HI
);
5492 case OP_COND_EXC_EQ
:
5493 case OP_COND_EXC_NE_UN
:
5494 case OP_COND_EXC_LT
:
5495 case OP_COND_EXC_LT_UN
:
5496 case OP_COND_EXC_GT
:
5497 case OP_COND_EXC_GT_UN
:
5498 case OP_COND_EXC_GE
:
5499 case OP_COND_EXC_GE_UN
:
5500 case OP_COND_EXC_LE
:
5501 case OP_COND_EXC_LE_UN
:
5502 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_EQ
, ins
->inst_p1
);
5504 case OP_COND_EXC_IEQ
:
5505 case OP_COND_EXC_INE_UN
:
5506 case OP_COND_EXC_ILT
:
5507 case OP_COND_EXC_ILT_UN
:
5508 case OP_COND_EXC_IGT
:
5509 case OP_COND_EXC_IGT_UN
:
5510 case OP_COND_EXC_IGE
:
5511 case OP_COND_EXC_IGE_UN
:
5512 case OP_COND_EXC_ILE
:
5513 case OP_COND_EXC_ILE_UN
:
5514 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_IEQ
, ins
->inst_p1
);
5517 case OP_COND_EXC_IC
:
5518 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS
, ins
->inst_p1
);
5520 case OP_COND_EXC_OV
:
5521 case OP_COND_EXC_IOV
:
5522 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS
, ins
->inst_p1
);
5524 case OP_COND_EXC_NC
:
5525 case OP_COND_EXC_INC
:
5526 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC
, ins
->inst_p1
);
5528 case OP_COND_EXC_NO
:
5529 case OP_COND_EXC_INO
:
5530 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC
, ins
->inst_p1
);
5542 EMIT_COND_BRANCH (ins
, ins
->opcode
- OP_IBEQ
);
5545 /* floating point opcodes */
5547 if (cfg
->compile_aot
) {
5548 ARM_FLDD (code
, ins
->dreg
, ARMREG_PC
, 0);
5550 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
5552 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[1];
5555 /* FIXME: we can optimize the imm load by dealing with part of
5556 * the displacement in LDFD (aligning to 512).
5558 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)(gsize
)ins
->inst_p0
);
5559 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
5563 if (cfg
->compile_aot
) {
5564 ARM_FLDS (code
, ins
->dreg
, ARMREG_PC
, 0);
5566 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
5569 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
5571 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)(gsize
)ins
->inst_p0
);
5572 ARM_FLDS (code
, ins
->dreg
, ARMREG_LR
, 0);
5574 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
5577 case OP_STORER8_MEMBASE_REG
:
5578 /* This is generated by the local regalloc pass which runs after the lowering pass */
5579 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
5580 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
5581 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_destbasereg
);
5582 ARM_FSTD (code
, ins
->sreg1
, ARMREG_LR
, 0);
5584 ARM_FSTD (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
5587 case OP_LOADR8_MEMBASE
:
5588 /* This is generated by the local regalloc pass which runs after the lowering pass */
5589 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
5590 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
5591 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_basereg
);
5592 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
5594 ARM_FLDD (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
5597 case OP_STORER4_MEMBASE_REG
:
5598 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
5600 ARM_FSTS (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
5602 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5603 ARM_CVTD (code
, vfp_scratch1
, ins
->sreg1
);
5604 ARM_FSTS (code
, vfp_scratch1
, ins
->inst_destbasereg
, ins
->inst_offset
);
5605 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5608 case OP_LOADR4_MEMBASE
:
5610 ARM_FLDS (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
5612 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
5613 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5614 ARM_FLDS (code
, vfp_scratch1
, ins
->inst_basereg
, ins
->inst_offset
);
5615 ARM_CVTS (code
, ins
->dreg
, vfp_scratch1
);
5616 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5619 case OP_ICONV_TO_R_UN
: {
5620 g_assert_not_reached ();
5623 case OP_ICONV_TO_R4
:
5625 ARM_FMSR (code
, ins
->dreg
, ins
->sreg1
);
5626 ARM_FSITOS (code
, ins
->dreg
, ins
->dreg
);
5628 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5629 ARM_FMSR (code
, vfp_scratch1
, ins
->sreg1
);
5630 ARM_FSITOS (code
, vfp_scratch1
, vfp_scratch1
);
5631 ARM_CVTS (code
, ins
->dreg
, vfp_scratch1
);
5632 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5635 case OP_ICONV_TO_R8
:
5636 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5637 ARM_FMSR (code
, vfp_scratch1
, ins
->sreg1
);
5638 ARM_FSITOD (code
, ins
->dreg
, vfp_scratch1
);
5639 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5643 MonoType
*sig_ret
= mini_get_underlying_type (mono_method_signature_internal (cfg
->method
)->ret
);
5644 if (sig_ret
->type
== MONO_TYPE_R4
) {
5646 if (IS_HARD_FLOAT
) {
5647 if (ins
->sreg1
!= ARM_VFP_D0
)
5648 ARM_CPYS (code
, ARM_VFP_D0
, ins
->sreg1
);
5650 ARM_FMRS (code
, ARMREG_R0
, ins
->sreg1
);
5653 ARM_CVTD (code
, ARM_VFP_F0
, ins
->sreg1
);
5656 ARM_FMRS (code
, ARMREG_R0
, ARM_VFP_F0
);
5660 ARM_CPYD (code
, ARM_VFP_D0
, ins
->sreg1
);
5662 ARM_FMRRD (code
, ARMREG_R0
, ARMREG_R1
, ins
->sreg1
);
5666 case OP_FCONV_TO_I1
:
5667 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, TRUE
);
5669 case OP_FCONV_TO_U1
:
5670 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, FALSE
);
5672 case OP_FCONV_TO_I2
:
5673 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, TRUE
);
5675 case OP_FCONV_TO_U2
:
5676 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, FALSE
);
5678 case OP_FCONV_TO_I4
:
5680 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, TRUE
);
5682 case OP_FCONV_TO_U4
:
5684 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, FALSE
);
5686 case OP_FCONV_TO_I8
:
5687 case OP_FCONV_TO_U8
:
5688 g_assert_not_reached ();
5689 /* Implemented as helper calls */
5691 case OP_LCONV_TO_R_UN
:
5692 g_assert_not_reached ();
5693 /* Implemented as helper calls */
5695 case OP_LCONV_TO_OVF_I4_2
: {
5696 guint8
*high_bit_not_set
, *valid_negative
, *invalid_negative
, *valid_positive
;
5698 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5701 ARM_CMP_REG_IMM8 (code
, ins
->sreg1
, 0);
5702 high_bit_not_set
= code
;
5703 ARM_B_COND (code
, ARMCOND_GE
, 0); /*branch if bit 31 of the lower part is not set*/
5705 ARM_CMN_REG_IMM8 (code
, ins
->sreg2
, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5706 valid_negative
= code
;
5707 ARM_B_COND (code
, ARMCOND_EQ
, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5708 invalid_negative
= code
;
5709 ARM_B_COND (code
, ARMCOND_AL
, 0);
5711 arm_patch (high_bit_not_set
, code
);
5713 ARM_CMP_REG_IMM8 (code
, ins
->sreg2
, 0);
5714 valid_positive
= code
;
5715 ARM_B_COND (code
, ARMCOND_EQ
, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5717 arm_patch (invalid_negative
, code
);
5718 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL
, "OverflowException");
5720 arm_patch (valid_negative
, code
);
5721 arm_patch (valid_positive
, code
);
5723 if (ins
->dreg
!= ins
->sreg1
)
5724 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
5728 ARM_VFP_ADDD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5731 ARM_VFP_SUBD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5734 ARM_VFP_MULD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5737 ARM_VFP_DIVD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5740 ARM_NEGD (code
, ins
->dreg
, ins
->sreg1
);
5744 g_assert_not_reached ();
5748 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5754 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5759 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5762 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
5763 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
5767 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5770 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5771 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5775 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5778 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5779 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5780 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5784 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
5787 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5788 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5792 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
5795 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5796 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5797 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5801 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5804 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_NE
);
5805 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_EQ
);
5809 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5812 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5813 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
5817 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
5820 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5821 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
5824 /* ARM FPA flags table:
5825 * N Less than ARMCOND_MI
5826 * Z Equal ARMCOND_EQ
5827 * C Greater Than or Equal ARMCOND_CS
5828 * V Unordered ARMCOND_VS
5831 EMIT_COND_BRANCH (ins
, OP_IBEQ
- OP_IBEQ
);
5834 EMIT_COND_BRANCH (ins
, OP_IBNE_UN
- OP_IBEQ
);
5837 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_MI
); /* N set */
5840 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_VS
); /* V set */
5841 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_MI
); /* N set */
5847 g_assert_not_reached ();
5851 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_GE
);
5853 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5854 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_EQ
);
5855 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_CS
);
5859 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_VS
); /* V set */
5860 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_GE
);
5865 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5866 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch2
);
5868 ARM_ABSD (code
, vfp_scratch2
, ins
->sreg1
);
5869 ARM_FLDD (code
, vfp_scratch1
, ARMREG_PC
, 0);
5871 *(guint32
*)code
= 0xffffffff;
5873 *(guint32
*)code
= 0x7fefffff;
5875 ARM_CMPD (code
, vfp_scratch2
, vfp_scratch1
);
5877 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT
, "OverflowException");
5878 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg1
);
5880 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS
, "OverflowException");
5881 ARM_CPYD (code
, ins
->dreg
, ins
->sreg1
);
5883 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5884 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch2
);
5889 case OP_RCONV_TO_I1
:
5890 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, TRUE
);
5892 case OP_RCONV_TO_U1
:
5893 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, FALSE
);
5895 case OP_RCONV_TO_I2
:
5896 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, TRUE
);
5898 case OP_RCONV_TO_U2
:
5899 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, FALSE
);
5901 case OP_RCONV_TO_I4
:
5902 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, TRUE
);
5904 case OP_RCONV_TO_U4
:
5905 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, FALSE
);
5907 case OP_RCONV_TO_R4
:
5909 if (ins
->dreg
!= ins
->sreg1
)
5910 ARM_CPYS (code
, ins
->dreg
, ins
->sreg1
);
5912 case OP_RCONV_TO_R8
:
5914 ARM_CVTS (code
, ins
->dreg
, ins
->sreg1
);
5917 ARM_VFP_ADDS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5920 ARM_VFP_SUBS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5923 ARM_VFP_MULS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5926 ARM_VFP_DIVS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5929 ARM_NEGS (code
, ins
->dreg
, ins
->sreg1
);
5933 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5936 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
5937 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
5941 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5944 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5945 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5949 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5952 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5953 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5954 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5958 ARM_CMPS (code
, ins
->sreg2
, ins
->sreg1
);
5961 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5962 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5966 ARM_CMPS (code
, ins
->sreg2
, ins
->sreg1
);
5969 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5970 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5971 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5975 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5978 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_NE
);
5979 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_EQ
);
5983 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5986 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5987 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
5991 ARM_CMPS (code
, ins
->sreg2
, ins
->sreg1
);
5994 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5995 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
5998 case OP_GC_LIVENESS_DEF
:
5999 case OP_GC_LIVENESS_USE
:
6000 case OP_GC_PARAM_SLOT_LIVENESS_DEF
:
6001 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
6003 case OP_GC_SPILL_SLOT_LIVENESS_DEF
:
6004 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
6005 bb
->spill_slot_defs
= g_slist_prepend_mempool (cfg
->mempool
, bb
->spill_slot_defs
, ins
);
6007 case OP_LIVERANGE_START
: {
6008 if (cfg
->verbose_level
> 1)
6009 printf ("R%d START=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
6010 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_start
= code
- cfg
->native_code
;
6013 case OP_LIVERANGE_END
: {
6014 if (cfg
->verbose_level
> 1)
6015 printf ("R%d END=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
6016 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_end
= code
- cfg
->native_code
;
6019 case OP_GC_SAFE_POINT
: {
6022 ARM_LDR_IMM (code
, ARMREG_IP
, ins
->sreg1
, 0);
6023 ARM_CMP_REG_IMM (code
, ARMREG_IP
, 0, 0);
6025 ARM_B_COND (code
, ARMCOND_EQ
, 0);
6026 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll
));
6027 code
= emit_call_seq (cfg
, code
);
6028 arm_patch (buf
[0], code
);
6031 case OP_FILL_PROF_CALL_CTX
:
6032 for (int i
= 0; i
< ARMREG_MAX
; i
++)
6033 if ((MONO_ARCH_CALLEE_SAVED_REGS
& (1 << i
)) || i
== ARMREG_SP
|| i
== ARMREG_FP
)
6034 ARM_STR_IMM (code
, i
, ins
->sreg1
, MONO_STRUCT_OFFSET (MonoContext
, regs
) + i
* sizeof (target_mgreg_t
));
6037 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
6038 g_assert_not_reached ();
6041 if ((cfg
->opt
& MONO_OPT_BRANCH
) && ((code
- cfg
->native_code
- offset
) > max_len
)) {
6042 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
6043 mono_inst_name (ins
->opcode
), max_len
, code
- cfg
->native_code
- offset
);
6044 g_assert_not_reached ();
6052 set_code_cursor (cfg
, code
);
6055 #endif /* DISABLE_JIT */
6058 mono_arch_register_lowlevel_calls (void)
6060 /* The signature doesn't matter */
6061 mono_register_jit_icall (mono_arm_throw_exception
, mono_icall_sig_void
, TRUE
);
6062 mono_register_jit_icall (mono_arm_throw_exception_by_token
, mono_icall_sig_void
, TRUE
);
6063 mono_register_jit_icall (mono_arm_unaligned_stack
, mono_icall_sig_void
, TRUE
);
6066 #define patch_lis_ori(ip,val) do {\
6067 guint16 *__lis_ori = (guint16*)(ip); \
6068 __lis_ori [1] = (((guint32)(gsize)(val)) >> 16) & 0xffff; \
6069 __lis_ori [3] = ((guint32)(gsize)(val)) & 0xffff; \
6073 mono_arch_patch_code_new (MonoCompile
*cfg
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gpointer target
)
6075 unsigned char *ip
= ji
->ip
.i
+ code
;
6077 if (ji
->type
== MONO_PATCH_INFO_SWITCH
) {
6081 case MONO_PATCH_INFO_SWITCH
: {
6082 gpointer
*jt
= (gpointer
*)(ip
+ 8);
6084 /* jt is the inlined jump table, 2 instructions after ip
6085 * In the normal case we store the absolute addresses,
6086 * otherwise the displacements.
6088 for (i
= 0; i
< ji
->data
.table
->table_size
; i
++)
6089 jt
[i
] = code
+ (int)(gsize
)ji
->data
.table
->table
[i
];
6092 case MONO_PATCH_INFO_IP
:
6093 g_assert_not_reached ();
6094 patch_lis_ori (ip
, ip
);
6096 case MONO_PATCH_INFO_METHODCONST
:
6097 case MONO_PATCH_INFO_CLASS
:
6098 case MONO_PATCH_INFO_IMAGE
:
6099 case MONO_PATCH_INFO_FIELD
:
6100 case MONO_PATCH_INFO_VTABLE
:
6101 case MONO_PATCH_INFO_IID
:
6102 case MONO_PATCH_INFO_SFLDA
:
6103 case MONO_PATCH_INFO_LDSTR
:
6104 case MONO_PATCH_INFO_TYPE_FROM_HANDLE
:
6105 case MONO_PATCH_INFO_LDTOKEN
:
6106 g_assert_not_reached ();
6107 /* from OP_AOTCONST : lis + ori */
6108 patch_lis_ori (ip
, target
);
6110 case MONO_PATCH_INFO_R4
:
6111 case MONO_PATCH_INFO_R8
:
6112 g_assert_not_reached ();
6113 *((gconstpointer
*)(ip
+ 2)) = target
;
6115 case MONO_PATCH_INFO_EXC_NAME
:
6116 g_assert_not_reached ();
6117 *((gconstpointer
*)(ip
+ 1)) = target
;
6119 case MONO_PATCH_INFO_NONE
:
6120 case MONO_PATCH_INFO_BB_OVF
:
6121 case MONO_PATCH_INFO_EXC_OVF
:
6122 /* everything is dealt with at epilog output time */
6125 arm_patch_general (cfg
, domain
, ip
, (const guchar
*)target
);
6131 mono_arm_unaligned_stack (MonoMethod
*method
)
6133 g_assert_not_reached ();
6139 * Stack frame layout:
6141 * ------------------- fp
6142 * MonoLMF structure or saved registers
6143 * -------------------
6145 * -------------------
6147 * -------------------
6148 * param area size is cfg->param_area
6149 * ------------------- sp
6152 mono_arch_emit_prolog (MonoCompile
*cfg
)
6154 MonoMethod
*method
= cfg
->method
;
6156 MonoMethodSignature
*sig
;
6158 int alloc_size
, orig_alloc_size
, pos
, max_offset
, i
, rot_amount
, part
;
6162 int prev_sp_offset
, reg_offset
;
6164 sig
= mono_method_signature_internal (method
);
6165 cfg
->code_size
= 256 + sig
->param_count
* 64;
6166 code
= cfg
->native_code
= g_malloc (cfg
->code_size
);
6168 mono_emit_unwind_op_def_cfa (cfg
, code
, ARMREG_SP
, 0);
6170 alloc_size
= cfg
->stack_offset
;
6176 * The iphone uses R7 as the frame pointer, and it points at the saved
6181 * We can't use r7 as a frame pointer since it points into the middle of
6182 * the frame, so we keep using our own frame pointer.
6183 * FIXME: Optimize this.
6185 ARM_PUSH (code
, (1 << ARMREG_R7
) | (1 << ARMREG_LR
));
6186 prev_sp_offset
+= 8; /* r7 and lr */
6187 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
6188 mono_emit_unwind_op_offset (cfg
, code
, ARMREG_R7
, (- prev_sp_offset
) + 0);
6189 ARM_MOV_REG_REG (code
, ARMREG_R7
, ARMREG_SP
);
6192 if (!method
->save_lmf
) {
6194 /* No need to push LR again */
6195 if (cfg
->used_int_regs
)
6196 ARM_PUSH (code
, cfg
->used_int_regs
);
6198 ARM_PUSH (code
, cfg
->used_int_regs
| (1 << ARMREG_LR
));
6199 prev_sp_offset
+= 4;
6201 for (i
= 0; i
< 16; ++i
) {
6202 if (cfg
->used_int_regs
& (1 << i
))
6203 prev_sp_offset
+= 4;
6205 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
6207 for (i
= 0; i
< 16; ++i
) {
6208 if ((cfg
->used_int_regs
& (1 << i
))) {
6209 mono_emit_unwind_op_offset (cfg
, code
, i
, (- prev_sp_offset
) + reg_offset
);
6210 mini_gc_set_slot_type_from_cfa (cfg
, (- prev_sp_offset
) + reg_offset
, SLOT_NOREF
);
6214 mono_emit_unwind_op_offset (cfg
, code
, ARMREG_LR
, -4);
6215 mini_gc_set_slot_type_from_cfa (cfg
, -4, SLOT_NOREF
);
6217 ARM_MOV_REG_REG (code
, ARMREG_IP
, ARMREG_SP
);
6218 ARM_PUSH (code
, 0x5ff0);
6219 prev_sp_offset
+= 4 * 10; /* all but r0-r3, sp and pc */
6220 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
6222 for (i
= 0; i
< 16; ++i
) {
6223 if ((i
> ARMREG_R3
) && (i
!= ARMREG_SP
) && (i
!= ARMREG_PC
)) {
6224 /* The original r7 is saved at the start */
6225 if (!(iphone_abi
&& i
== ARMREG_R7
))
6226 mono_emit_unwind_op_offset (cfg
, code
, i
, (- prev_sp_offset
) + reg_offset
);
6230 g_assert (reg_offset
== 4 * 10);
6231 pos
+= MONO_ABI_SIZEOF (MonoLMF
) - (4 * 10);
6235 orig_alloc_size
= alloc_size
;
6236 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
6237 if (alloc_size
& (MONO_ARCH_FRAME_ALIGNMENT
- 1)) {
6238 alloc_size
+= MONO_ARCH_FRAME_ALIGNMENT
- 1;
6239 alloc_size
&= ~(MONO_ARCH_FRAME_ALIGNMENT
- 1);
6242 /* the stack used in the pushed regs */
6243 alloc_size
+= ALIGN_TO (prev_sp_offset
, MONO_ARCH_FRAME_ALIGNMENT
) - prev_sp_offset
;
6244 cfg
->stack_usage
= alloc_size
;
6246 if ((i
= mono_arm_is_rotated_imm8 (alloc_size
, &rot_amount
)) >= 0) {
6247 ARM_SUB_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
6249 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, alloc_size
);
6250 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
6252 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
+ alloc_size
);
6254 if (cfg
->frame_reg
!= ARMREG_SP
) {
6255 ARM_MOV_REG_REG (code
, cfg
->frame_reg
, ARMREG_SP
);
6256 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, cfg
->frame_reg
);
6258 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
6259 prev_sp_offset
+= alloc_size
;
6261 for (i
= 0; i
< alloc_size
- orig_alloc_size
; i
+= 4)
6262 mini_gc_set_slot_type_from_cfa (cfg
, (- prev_sp_offset
) + orig_alloc_size
+ i
, SLOT_NOREF
);
6264 /* compute max_offset in order to use short forward jumps
6265 * we could skip do it on arm because the immediate displacement
6266 * for jumps is large enough, it may be useful later for constant pools
6269 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
6270 MonoInst
*ins
= bb
->code
;
6271 bb
->max_offset
= max_offset
;
6273 MONO_BB_FOR_EACH_INS (bb
, ins
)
6274 max_offset
+= ins_get_size (ins
->opcode
);
6277 /* stack alignment check */
6281 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_SP);
6282 code = mono_arm_emit_load_imm (code, ARMREG_IP, MONO_ARCH_FRAME_ALIGNMENT -1);
6283 ARM_AND_REG_REG (code, ARMREG_LR, ARMREG_LR, ARMREG_IP);
6284 ARM_CMP_REG_IMM (code, ARMREG_LR, 0, 0);
6286 ARM_B_COND (code, ARMCOND_EQ, 0);
6287 if (cfg->compile_aot)
6288 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
6290 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
6291 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arm_unaligned_stack));
6292 code = emit_call_seq (cfg, code);
6293 arm_patch (buf [0], code);
6297 /* store runtime generic context */
6298 if (cfg
->rgctx_var
) {
6299 MonoInst
*ins
= cfg
->rgctx_var
;
6301 g_assert (ins
->opcode
== OP_REGOFFSET
);
6303 if (arm_is_imm12 (ins
->inst_offset
)) {
6304 ARM_STR_IMM (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ins
->inst_offset
);
6306 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
6307 ARM_STR_REG_REG (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ARMREG_LR
);
6310 mono_add_var_location (cfg
, cfg
->rgctx_var
, TRUE
, MONO_ARCH_RGCTX_REG
, 0, 0, code
- cfg
->native_code
);
6311 mono_add_var_location (cfg
, cfg
->rgctx_var
, FALSE
, ins
->inst_basereg
, ins
->inst_offset
, code
- cfg
->native_code
, 0);
6314 /* load arguments allocated to register from the stack */
6315 cinfo
= get_call_info (NULL
, sig
);
6317 if (cinfo
->ret
.storage
== RegTypeStructByAddr
) {
6318 ArgInfo
*ainfo
= &cinfo
->ret
;
6319 inst
= cfg
->vret_addr
;
6320 g_assert (arm_is_imm12 (inst
->inst_offset
));
6321 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6324 if (sig
->call_convention
== MONO_CALL_VARARG
) {
6325 ArgInfo
*cookie
= &cinfo
->sig_cookie
;
6327 /* Save the sig cookie address */
6328 g_assert (cookie
->storage
== RegTypeBase
);
6330 g_assert (arm_is_imm12 (prev_sp_offset
+ cookie
->offset
));
6331 g_assert (arm_is_imm12 (cfg
->sig_cookie
));
6332 ARM_ADD_REG_IMM8 (code
, ARMREG_IP
, cfg
->frame_reg
, prev_sp_offset
+ cookie
->offset
);
6333 ARM_STR_IMM (code
, ARMREG_IP
, cfg
->frame_reg
, cfg
->sig_cookie
);
6336 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
6337 ArgInfo
*ainfo
= cinfo
->args
+ i
;
6338 inst
= cfg
->args
[i
];
6340 if (cfg
->verbose_level
> 2)
6341 g_print ("Saving argument %d (type: %d)\n", i
, ainfo
->storage
);
6343 if (inst
->opcode
== OP_REGVAR
) {
6344 if (ainfo
->storage
== RegTypeGeneral
)
6345 ARM_MOV_REG_REG (code
, inst
->dreg
, ainfo
->reg
);
6346 else if (ainfo
->storage
== RegTypeFP
) {
6347 g_assert_not_reached ();
6348 } else if (ainfo
->storage
== RegTypeBase
) {
6349 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
6350 ARM_LDR_IMM (code
, inst
->dreg
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
6352 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
);
6353 ARM_LDR_REG_REG (code
, inst
->dreg
, ARMREG_SP
, ARMREG_IP
);
6356 g_assert_not_reached ();
6358 if (i
== 0 && sig
->hasthis
) {
6359 g_assert (ainfo
->storage
== RegTypeGeneral
);
6360 mono_add_var_location (cfg
, inst
, TRUE
, ainfo
->reg
, 0, 0, code
- cfg
->native_code
);
6361 mono_add_var_location (cfg
, inst
, TRUE
, inst
->dreg
, 0, code
- cfg
->native_code
, 0);
6364 if (cfg
->verbose_level
> 2)
6365 g_print ("Argument %d assigned to register %s\n", i
, mono_arch_regname (inst
->dreg
));
6367 switch (ainfo
->storage
) {
6369 for (part
= 0; part
< ainfo
->nregs
; part
++) {
6370 if (ainfo
->esize
== 4)
6371 ARM_FSTS (code
, ainfo
->reg
+ part
, inst
->inst_basereg
, inst
->inst_offset
+ (part
* ainfo
->esize
));
6373 ARM_FSTD (code
, ainfo
->reg
+ (part
* 2), inst
->inst_basereg
, inst
->inst_offset
+ (part
* ainfo
->esize
));
6376 case RegTypeGeneral
:
6377 case RegTypeIRegPair
:
6378 case RegTypeGSharedVtInReg
:
6379 case RegTypeStructByAddr
:
6380 switch (ainfo
->size
) {
6382 if (arm_is_imm12 (inst
->inst_offset
))
6383 ARM_STRB_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6385 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6386 ARM_STRB_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6390 if (arm_is_imm8 (inst
->inst_offset
)) {
6391 ARM_STRH_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6393 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6394 ARM_STRH_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6398 if (arm_is_imm12 (inst
->inst_offset
)) {
6399 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6401 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6402 ARM_STR_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6404 if (arm_is_imm12 (inst
->inst_offset
+ 4)) {
6405 ARM_STR_IMM (code
, ainfo
->reg
+ 1, inst
->inst_basereg
, inst
->inst_offset
+ 4);
6407 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
+ 4);
6408 ARM_STR_REG_REG (code
, ainfo
->reg
+ 1, inst
->inst_basereg
, ARMREG_IP
);
6412 if (arm_is_imm12 (inst
->inst_offset
)) {
6413 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6415 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6416 ARM_STR_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6420 if (i
== 0 && sig
->hasthis
) {
6421 g_assert (ainfo
->storage
== RegTypeGeneral
);
6422 mono_add_var_location (cfg
, inst
, TRUE
, ainfo
->reg
, 0, 0, code
- cfg
->native_code
);
6423 mono_add_var_location (cfg
, inst
, FALSE
, inst
->inst_basereg
, inst
->inst_offset
, code
- cfg
->native_code
, 0);
6426 case RegTypeBaseGen
:
6427 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
6428 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
6430 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
);
6431 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
6433 if (arm_is_imm12 (inst
->inst_offset
+ 4)) {
6434 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
6435 ARM_STR_IMM (code
, ARMREG_R3
, inst
->inst_basereg
, inst
->inst_offset
);
6437 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
+ 4);
6438 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6439 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6440 ARM_STR_REG_REG (code
, ARMREG_R3
, inst
->inst_basereg
, ARMREG_IP
);
6444 case RegTypeGSharedVtOnStack
:
6445 case RegTypeStructByAddrOnStack
:
6446 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
6447 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
6449 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
);
6450 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
6453 switch (ainfo
->size
) {
6455 if (arm_is_imm8 (inst
->inst_offset
)) {
6456 ARM_STRB_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6458 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6459 ARM_STRB_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6463 if (arm_is_imm8 (inst
->inst_offset
)) {
6464 ARM_STRH_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6466 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6467 ARM_STRH_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6471 if (arm_is_imm12 (inst
->inst_offset
)) {
6472 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6474 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6475 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6477 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
+ 4)) {
6478 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
+ 4));
6480 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
+ 4);
6481 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
6483 if (arm_is_imm12 (inst
->inst_offset
+ 4)) {
6484 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
6486 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
+ 4);
6487 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6491 if (arm_is_imm12 (inst
->inst_offset
)) {
6492 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6494 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6495 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6501 int imm8
, rot_amount
;
6503 if ((imm8
= mono_arm_is_rotated_imm8 (inst
->inst_offset
, &rot_amount
)) == -1) {
6504 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6505 ARM_ADD_REG_REG (code
, ARMREG_IP
, ARMREG_IP
, inst
->inst_basereg
);
6507 ARM_ADD_REG_IMM (code
, ARMREG_IP
, inst
->inst_basereg
, imm8
, rot_amount
);
6509 if (ainfo
->size
== 8)
6510 ARM_FSTD (code
, ainfo
->reg
, ARMREG_IP
, 0);
6512 ARM_FSTS (code
, ainfo
->reg
, ARMREG_IP
, 0);
6515 case RegTypeStructByVal
: {
6516 int doffset
= inst
->inst_offset
;
6520 size
= mini_type_stack_size_full (inst
->inst_vtype
, NULL
, sig
->pinvoke
);
6521 for (cur_reg
= 0; cur_reg
< ainfo
->size
; ++cur_reg
) {
6522 if (arm_is_imm12 (doffset
)) {
6523 ARM_STR_IMM (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, doffset
);
6525 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, doffset
);
6526 ARM_STR_REG_REG (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, ARMREG_IP
);
6528 soffset
+= sizeof (target_mgreg_t
);
6529 doffset
+= sizeof (target_mgreg_t
);
6531 if (ainfo
->vtsize
) {
6532 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6533 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6534 code
= emit_memcpy (code
, ainfo
->vtsize
* sizeof (target_mgreg_t
), inst
->inst_basereg
, doffset
, ARMREG_SP
, prev_sp_offset
+ ainfo
->offset
);
6539 g_assert_not_reached ();
6545 if (method
->save_lmf
)
6546 code
= emit_save_lmf (cfg
, code
, alloc_size
- lmf_offset
);
6548 if (cfg
->arch
.seq_point_info_var
) {
6549 MonoInst
*ins
= cfg
->arch
.seq_point_info_var
;
6551 /* Initialize the variable from a GOT slot */
6552 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_SEQ_POINT_INFO
, cfg
->method
);
6553 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_PC
, 0);
6555 *(gpointer
*)code
= NULL
;
6557 ARM_LDR_REG_REG (code
, ARMREG_R0
, ARMREG_PC
, ARMREG_R0
);
6559 g_assert (ins
->opcode
== OP_REGOFFSET
);
6561 if (arm_is_imm12 (ins
->inst_offset
)) {
6562 ARM_STR_IMM (code
, ARMREG_R0
, ins
->inst_basereg
, ins
->inst_offset
);
6564 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
6565 ARM_STR_REG_REG (code
, ARMREG_R0
, ins
->inst_basereg
, ARMREG_LR
);
6569 /* Initialize ss_trigger_page_var */
6570 if (!cfg
->soft_breakpoints
) {
6571 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
6572 MonoInst
*ss_trigger_page_var
= cfg
->arch
.ss_trigger_page_var
;
6573 int dreg
= ARMREG_LR
;
6576 g_assert (info_var
->opcode
== OP_REGOFFSET
);
6578 code
= emit_ldr_imm (code
, dreg
, info_var
->inst_basereg
, info_var
->inst_offset
);
6579 /* Load the trigger page addr */
6580 ARM_LDR_IMM (code
, dreg
, dreg
, MONO_STRUCT_OFFSET (SeqPointInfo
, ss_trigger_page
));
6581 ARM_STR_IMM (code
, dreg
, ss_trigger_page_var
->inst_basereg
, ss_trigger_page_var
->inst_offset
);
6585 if (cfg
->arch
.seq_point_ss_method_var
) {
6586 MonoInst
*ss_method_ins
= cfg
->arch
.seq_point_ss_method_var
;
6587 MonoInst
*bp_method_ins
= cfg
->arch
.seq_point_bp_method_var
;
6589 g_assert (ss_method_ins
->opcode
== OP_REGOFFSET
);
6590 g_assert (arm_is_imm12 (ss_method_ins
->inst_offset
));
6592 if (cfg
->compile_aot
) {
6593 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
6594 int dreg
= ARMREG_LR
;
6596 g_assert (info_var
->opcode
== OP_REGOFFSET
);
6597 g_assert (arm_is_imm12 (info_var
->inst_offset
));
6599 ARM_LDR_IMM (code
, dreg
, info_var
->inst_basereg
, info_var
->inst_offset
);
6600 ARM_LDR_IMM (code
, dreg
, dreg
, MONO_STRUCT_OFFSET (SeqPointInfo
, ss_tramp_addr
));
6601 ARM_STR_IMM (code
, dreg
, ss_method_ins
->inst_basereg
, ss_method_ins
->inst_offset
);
6603 g_assert (bp_method_ins
->opcode
== OP_REGOFFSET
);
6604 g_assert (arm_is_imm12 (bp_method_ins
->inst_offset
));
6606 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
6608 *(gpointer
*)code
= &single_step_tramp
;
6610 *(gpointer
*)code
= breakpoint_tramp
;
6613 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_LR
, 0);
6614 ARM_STR_IMM (code
, ARMREG_IP
, ss_method_ins
->inst_basereg
, ss_method_ins
->inst_offset
);
6615 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_LR
, 4);
6616 ARM_STR_IMM (code
, ARMREG_IP
, bp_method_ins
->inst_basereg
, bp_method_ins
->inst_offset
);
6620 set_code_cursor (cfg
, code
);
6627 mono_arch_emit_epilog (MonoCompile
*cfg
)
6629 MonoMethod
*method
= cfg
->method
;
6630 int pos
, i
, rot_amount
;
6631 int max_epilog_size
= 16 + 20*4;
6635 if (cfg
->method
->save_lmf
)
6636 max_epilog_size
+= 128;
6638 code
= realloc_code (cfg
, max_epilog_size
);
6640 /* Save the uwind state which is needed by the out-of-line code */
6641 mono_emit_unwind_op_remember_state (cfg
, code
);
6645 /* Load returned vtypes into registers if needed */
6646 cinfo
= cfg
->arch
.cinfo
;
6647 switch (cinfo
->ret
.storage
) {
6648 case RegTypeStructByVal
: {
6649 MonoInst
*ins
= cfg
->ret
;
6651 if (cinfo
->ret
.nregs
== 1) {
6652 if (arm_is_imm12 (ins
->inst_offset
)) {
6653 ARM_LDR_IMM (code
, ARMREG_R0
, ins
->inst_basereg
, ins
->inst_offset
);
6655 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
6656 ARM_LDR_REG_REG (code
, ARMREG_R0
, ins
->inst_basereg
, ARMREG_LR
);
6659 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
6660 int offset
= ins
->inst_offset
+ (i
* 4);
6661 if (arm_is_imm12 (offset
)) {
6662 ARM_LDR_IMM (code
, i
, ins
->inst_basereg
, offset
);
6664 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, offset
);
6665 ARM_LDR_REG_REG (code
, i
, ins
->inst_basereg
, ARMREG_LR
);
6672 MonoInst
*ins
= cfg
->ret
;
6674 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
6675 if (cinfo
->ret
.esize
== 4)
6676 ARM_FLDS (code
, cinfo
->ret
.reg
+ i
, ins
->inst_basereg
, ins
->inst_offset
+ (i
* cinfo
->ret
.esize
));
6678 ARM_FLDD (code
, cinfo
->ret
.reg
+ (i
* 2), ins
->inst_basereg
, ins
->inst_offset
+ (i
* cinfo
->ret
.esize
));
6686 if (method
->save_lmf
) {
6687 int lmf_offset
, reg
, sp_adj
, regmask
, nused_int_regs
= 0;
6688 /* all but r0-r3, sp and pc */
6689 pos
+= MONO_ABI_SIZEOF (MonoLMF
) - (MONO_ARM_NUM_SAVED_REGS
* sizeof (target_mgreg_t
));
6692 code
= emit_restore_lmf (cfg
, code
, cfg
->stack_usage
- lmf_offset
);
6694 /* This points to r4 inside MonoLMF->iregs */
6695 sp_adj
= (MONO_ABI_SIZEOF (MonoLMF
) - MONO_ARM_NUM_SAVED_REGS
* sizeof (target_mgreg_t
));
6697 regmask
= 0x9ff0; /* restore lr to pc */
6698 /* Skip caller saved registers not used by the method */
6699 while (!(cfg
->used_int_regs
& (1 << reg
)) && reg
< ARMREG_FP
) {
6700 regmask
&= ~(1 << reg
);
6705 /* Restored later */
6706 regmask
&= ~(1 << ARMREG_PC
);
6707 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6708 code
= emit_big_add (code
, ARMREG_SP
, cfg
->frame_reg
, cfg
->stack_usage
- lmf_offset
+ sp_adj
);
6709 for (i
= 0; i
< 16; i
++) {
6710 if (regmask
& (1 << i
))
6713 mono_emit_unwind_op_def_cfa (cfg
, code
, ARMREG_SP
, ((iphone_abi
? 3 : 0) + nused_int_regs
) * 4);
6715 ARM_POP (code
, regmask
);
6717 for (i
= 0; i
< 16; i
++) {
6718 if (regmask
& (1 << i
))
6719 mono_emit_unwind_op_same_value (cfg
, code
, i
);
6721 /* Restore saved r7, restore LR to PC */
6722 /* Skip lr from the lmf */
6723 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, 3 * 4);
6724 ARM_ADD_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, sizeof (target_mgreg_t
), 0);
6725 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, 2 * 4);
6726 ARM_POP (code
, (1 << ARMREG_R7
) | (1 << ARMREG_PC
));
6729 int i
, nused_int_regs
= 0;
6731 for (i
= 0; i
< 16; i
++) {
6732 if (cfg
->used_int_regs
& (1 << i
))
6736 if ((i
= mono_arm_is_rotated_imm8 (cfg
->stack_usage
, &rot_amount
)) >= 0) {
6737 ARM_ADD_REG_IMM (code
, ARMREG_SP
, cfg
->frame_reg
, i
, rot_amount
);
6739 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, cfg
->stack_usage
);
6740 ARM_ADD_REG_REG (code
, ARMREG_SP
, cfg
->frame_reg
, ARMREG_IP
);
6743 if (cfg
->frame_reg
!= ARMREG_SP
) {
6744 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, ARMREG_SP
);
6748 /* Restore saved gregs */
6749 if (cfg
->used_int_regs
) {
6750 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, (2 + nused_int_regs
) * 4);
6751 ARM_POP (code
, cfg
->used_int_regs
);
6752 for (i
= 0; i
< 16; i
++) {
6753 if (cfg
->used_int_regs
& (1 << i
))
6754 mono_emit_unwind_op_same_value (cfg
, code
, i
);
6757 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, 2 * 4);
6758 /* Restore saved r7, restore LR to PC */
6759 ARM_POP (code
, (1 << ARMREG_R7
) | (1 << ARMREG_PC
));
6761 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, (nused_int_regs
+ 1) * 4);
6762 ARM_POP (code
, cfg
->used_int_regs
| (1 << ARMREG_PC
));
6766 /* Restore the unwind state to be the same as before the epilog */
6767 mono_emit_unwind_op_restore_state (cfg
, code
);
6769 set_code_cursor (cfg
, code
);
6774 mono_arch_emit_exceptions (MonoCompile
*cfg
)
6776 MonoJumpInfo
*patch_info
;
6779 guint8
* exc_throw_pos
[MONO_EXC_INTRINS_NUM
];
6780 guint8 exc_throw_found
[MONO_EXC_INTRINS_NUM
];
6781 int max_epilog_size
= 50;
6783 for (i
= 0; i
< MONO_EXC_INTRINS_NUM
; i
++) {
6784 exc_throw_pos
[i
] = NULL
;
6785 exc_throw_found
[i
] = 0;
6788 /* count the number of exception infos */
6791 * make sure we have enough space for exceptions
6793 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
6794 if (patch_info
->type
== MONO_PATCH_INFO_EXC
) {
6795 i
= mini_exception_id_by_name ((const char*)patch_info
->data
.target
);
6796 if (!exc_throw_found
[i
]) {
6797 max_epilog_size
+= 32;
6798 exc_throw_found
[i
] = TRUE
;
6803 code
= realloc_code (cfg
, max_epilog_size
);
6805 /* add code to raise exceptions */
6806 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
6807 switch (patch_info
->type
) {
6808 case MONO_PATCH_INFO_EXC
: {
6809 MonoClass
*exc_class
;
6810 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
6812 i
= mini_exception_id_by_name ((const char*)patch_info
->data
.target
);
6813 if (exc_throw_pos
[i
]) {
6814 arm_patch (ip
, exc_throw_pos
[i
]);
6815 patch_info
->type
= MONO_PATCH_INFO_NONE
;
6818 exc_throw_pos
[i
] = code
;
6820 arm_patch (ip
, code
);
6822 exc_class
= mono_class_load_from_name (mono_defaults
.corlib
, "System", patch_info
->data
.name
);
6824 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_LR
);
6825 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_PC
, 0);
6826 patch_info
->type
= MONO_PATCH_INFO_JIT_ICALL_ID
;
6827 patch_info
->data
.jit_icall_id
= MONO_JIT_ICALL_mono_arch_throw_corlib_exception
;
6828 patch_info
->ip
.i
= code
- cfg
->native_code
;
6830 cfg
->thunk_area
+= THUNK_SIZE
;
6831 *(guint32
*)(gpointer
)code
= m_class_get_type_token (exc_class
) - MONO_TOKEN_TYPE_DEF
;
6841 set_code_cursor (cfg
, code
);
6844 #endif /* #ifndef DISABLE_JIT */
6847 mono_arch_finish_init (void)
6852 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
6863 mono_arch_get_patch_offset (guint8
*code
)
6870 mono_arch_flush_register_windows (void)
6875 mono_arch_find_imt_method (host_mgreg_t
*regs
, guint8
*code
)
6877 return (MonoMethod
*)regs
[MONO_ARCH_IMT_REG
];
6881 mono_arch_find_static_call_vtable (host_mgreg_t
*regs
, guint8
*code
)
6883 return (MonoVTable
*)(gsize
)regs
[MONO_ARCH_RGCTX_REG
];
6887 mono_arch_get_cie_program (void)
6891 mono_add_unwind_op_def_cfa (l
, (guint8
*)NULL
, (guint8
*)NULL
, ARMREG_SP
, 0);
6896 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6897 #define BASE_SIZE (6 * 4)
6898 #define BSEARCH_ENTRY_SIZE (4 * 4)
6899 #define CMP_SIZE (3 * 4)
6900 #define BRANCH_SIZE (1 * 4)
6901 #define CALL_SIZE (2 * 4)
6902 #define WMC_SIZE (8 * 4)
6903 #define DISTANCE(A, B) (((gint32)(gssize)(B)) - ((gint32)(gssize)(A)))
6906 arm_emit_value_and_patch_ldr (arminstr_t
*code
, arminstr_t
*target
, guint32 value
)
6908 guint32 delta
= DISTANCE (target
, code
);
6910 g_assert (delta
>= 0 && delta
<= 0xFFF);
6911 *target
= *target
| delta
;
6916 #ifdef ENABLE_WRONG_METHOD_CHECK
6918 mini_dump_bad_imt (int input_imt
, int compared_imt
, int pc
)
6920 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt
, compared_imt
, pc
);
6926 mono_arch_build_imt_trampoline (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
6927 gpointer fail_tramp
)
6930 arminstr_t
*code
, *start
;
6931 gboolean large_offsets
= FALSE
;
6932 guint32
**constant_pool_starts
;
6933 arminstr_t
*vtable_target
= NULL
;
6934 int extra_space
= 0;
6935 #ifdef ENABLE_WRONG_METHOD_CHECK
6941 constant_pool_starts
= g_new0 (guint32
*, count
);
6943 for (i
= 0; i
< count
; ++i
) {
6944 MonoIMTCheckItem
*item
= imt_entries
[i
];
6945 if (item
->is_equals
) {
6946 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
6948 if (item
->has_target_code
|| !arm_is_imm12 (DISTANCE (vtable
, &vtable
->vtable
[item
->value
.vtable_slot
]))) {
6949 item
->chunk_size
+= 32;
6950 large_offsets
= TRUE
;
6953 if (item
->check_target_idx
|| fail_case
) {
6954 if (!item
->compare_done
|| fail_case
)
6955 item
->chunk_size
+= CMP_SIZE
;
6956 item
->chunk_size
+= BRANCH_SIZE
;
6958 #ifdef ENABLE_WRONG_METHOD_CHECK
6959 item
->chunk_size
+= WMC_SIZE
;
6963 item
->chunk_size
+= 16;
6964 large_offsets
= TRUE
;
6966 item
->chunk_size
+= CALL_SIZE
;
6968 item
->chunk_size
+= BSEARCH_ENTRY_SIZE
;
6969 imt_entries
[item
->check_target_idx
]->compare_done
= TRUE
;
6971 size
+= item
->chunk_size
;
6975 size
+= 4 * count
; /* The ARM_ADD_REG_IMM to pop the stack */
6978 code
= mono_method_alloc_generic_virtual_trampoline (mono_domain_ambient_memory_manager (domain
), size
);
6980 MonoMemoryManager
*mem_manager
= m_class_get_mem_manager (domain
, vtable
->klass
);
6981 code
= mono_mem_manager_code_reserve (mem_manager
, size
);
6985 unwind_ops
= mono_arch_get_cie_program ();
6988 g_print ("Building IMT trampoline for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", m_class_get_name_space (vtable
->klass
), m_class_get_name (vtable
->klass
), count
, size
, start
, ((guint8
*)start
) + size
, vtable
, fail_tramp
);
6989 for (i
= 0; i
< count
; ++i
) {
6990 MonoIMTCheckItem
*item
= imt_entries
[i
];
6991 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i
, item
->key
, ((MonoMethod
*)item
->key
)->name
, &vtable
->vtable
[item
->value
.vtable_slot
], item
->is_equals
, item
->chunk_size
);
6995 if (large_offsets
) {
6996 ARM_PUSH4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
6997 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 4 * sizeof (target_mgreg_t
));
6999 ARM_PUSH2 (code
, ARMREG_R0
, ARMREG_R1
);
7000 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 2 * sizeof (target_mgreg_t
));
7002 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_LR
, -4);
7003 vtable_target
= code
;
7004 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
7005 ARM_MOV_REG_REG (code
, ARMREG_R0
, ARMREG_V5
);
7007 for (i
= 0; i
< count
; ++i
) {
7008 MonoIMTCheckItem
*item
= imt_entries
[i
];
7009 arminstr_t
*imt_method
= NULL
, *vtable_offset_ins
= NULL
, *target_code_ins
= NULL
;
7010 gint32 vtable_offset
;
7012 item
->code_target
= (guint8
*)code
;
7014 if (item
->is_equals
) {
7015 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
7017 if (item
->check_target_idx
|| fail_case
) {
7018 if (!item
->compare_done
|| fail_case
) {
7020 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7021 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
7023 item
->jmp_code
= (guint8
*)code
;
7024 ARM_B_COND (code
, ARMCOND_NE
, 0);
7026 /*Enable the commented code to assert on wrong method*/
7027 #ifdef ENABLE_WRONG_METHOD_CHECK
7029 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7030 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
7032 ARM_B_COND (code
, ARMCOND_EQ
, 0);
7034 /* Define this if your system is so bad that gdb is failing. */
7035 #ifdef BROKEN_DEV_ENV
7036 ARM_MOV_REG_REG (code
, ARMREG_R2
, ARMREG_PC
);
7038 arm_patch (code
- 1, mini_dump_bad_imt
);
7042 arm_patch (cond
, code
);
7046 if (item
->has_target_code
) {
7047 /* Load target address */
7048 target_code_ins
= code
;
7049 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7050 /* Save it to the fourth slot */
7051 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (target_mgreg_t
));
7052 /* Restore registers and branch */
7053 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
7055 code
= arm_emit_value_and_patch_ldr (code
, target_code_ins
, (gsize
)item
->value
.target_code
);
7057 vtable_offset
= DISTANCE (vtable
, &vtable
->vtable
[item
->value
.vtable_slot
]);
7058 if (!arm_is_imm12 (vtable_offset
)) {
7060 * We need to branch to a computed address but we don't have
7061 * a free register to store it, since IP must contain the
7062 * vtable address. So we push the two values to the stack, and
7063 * load them both using LDM.
7065 /* Compute target address */
7066 vtable_offset_ins
= code
;
7067 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7068 ARM_LDR_REG_REG (code
, ARMREG_R1
, ARMREG_IP
, ARMREG_R1
);
7069 /* Save it to the fourth slot */
7070 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (target_mgreg_t
));
7071 /* Restore registers and branch */
7072 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
7074 code
= arm_emit_value_and_patch_ldr (code
, vtable_offset_ins
, vtable_offset
);
7076 ARM_POP2 (code
, ARMREG_R0
, ARMREG_R1
);
7077 if (large_offsets
) {
7078 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 2 * sizeof (target_mgreg_t
));
7079 ARM_ADD_REG_IMM8 (code
, ARMREG_SP
, ARMREG_SP
, 2 * sizeof (target_mgreg_t
));
7081 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 0);
7082 ARM_LDR_IMM (code
, ARMREG_PC
, ARMREG_IP
, vtable_offset
);
7087 arm_patch (item
->jmp_code
, (guchar
*)code
);
7089 target_code_ins
= code
;
7090 /* Load target address */
7091 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7092 /* Save it to the fourth slot */
7093 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (target_mgreg_t
));
7094 /* Restore registers and branch */
7095 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
7097 code
= arm_emit_value_and_patch_ldr (code
, target_code_ins
, (gsize
)fail_tramp
);
7098 item
->jmp_code
= NULL
;
7102 code
= arm_emit_value_and_patch_ldr (code
, imt_method
, (guint32
)(gsize
)item
->key
);
7104 /*must emit after unconditional branch*/
7105 if (vtable_target
) {
7106 code
= arm_emit_value_and_patch_ldr (code
, vtable_target
, (guint32
)(gsize
)vtable
);
7107 item
->chunk_size
+= 4;
7108 vtable_target
= NULL
;
7111 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
7112 constant_pool_starts
[i
] = code
;
7114 code
+= extra_space
;
7118 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7119 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
7121 item
->jmp_code
= (guint8
*)code
;
7122 ARM_B_COND (code
, ARMCOND_HS
, 0);
7127 for (i
= 0; i
< count
; ++i
) {
7128 MonoIMTCheckItem
*item
= imt_entries
[i
];
7129 if (item
->jmp_code
) {
7130 if (item
->check_target_idx
)
7131 arm_patch (item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
);
7133 if (i
> 0 && item
->is_equals
) {
7135 arminstr_t
*space_start
= constant_pool_starts
[i
];
7136 for (j
= i
- 1; j
>= 0 && !imt_entries
[j
]->is_equals
; --j
) {
7137 space_start
= arm_emit_value_and_patch_ldr (space_start
, (arminstr_t
*)imt_entries
[j
]->code_target
, (guint32
)(gsize
)imt_entries
[j
]->key
);
7144 char *buff
= g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", m_class_get_name_space (vtable
->klass
), m_class_get_name (vtable
->klass
), count
);
7145 mono_disassemble_code (NULL
, (guint8
*)start
, size
, buff
);
7150 g_free (constant_pool_starts
);
7152 mono_arch_flush_icache ((guint8
*)start
, size
);
7153 MONO_PROFILER_RAISE (jit_code_buffer
, ((guint8
*)start
, code
- start
, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE
, NULL
));
7154 UnlockedAdd (&mono_stats
.imt_trampolines_size
, code
- start
);
7156 g_assert (DISTANCE (start
, code
) <= size
);
7158 mono_tramp_info_register (mono_tramp_info_create (NULL
, (guint8
*)start
, DISTANCE (start
, code
), NULL
, unwind_ops
), domain
);
7164 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
7166 return ctx
->regs
[reg
];
7170 mono_arch_context_set_int_reg (MonoContext
*ctx
, int reg
, host_mgreg_t val
)
7172 ctx
->regs
[reg
] = val
;
7176 * mono_arch_get_trampolines:
7178 * Return a list of MonoTrampInfo structures describing arch specific trampolines
7182 mono_arch_get_trampolines (gboolean aot
)
7184 return mono_arm_get_exception_trampolines (aot
);
7187 #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
7189 * mono_arch_set_breakpoint:
7191 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
7192 * The location should contain code emitted by OP_SEQ_POINT.
7195 mono_arch_set_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
7198 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
7201 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), (guint8
*)ji
->code_start
);
7203 if (!breakpoint_tramp
)
7204 breakpoint_tramp
= mini_get_breakpoint_trampoline ();
7206 g_assert (native_offset
% 4 == 0);
7207 g_assert (info
->bp_addrs
[native_offset
/ 4] == 0);
7208 info
->bp_addrs
[native_offset
/ 4] = (guint8
*)(mini_debug_options
.soft_breakpoints
? breakpoint_tramp
: bp_trigger_page
);
7209 } else if (mini_debug_options
.soft_breakpoints
) {
7211 ARM_BLX_REG (code
, ARMREG_LR
);
7212 mono_arch_flush_icache (code
- 4, 4);
7214 int dreg
= ARMREG_LR
;
7216 /* Read from another trigger page */
7217 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
7219 *(int*)code
= (int)(gssize
)bp_trigger_page
;
7221 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
7223 mono_arch_flush_icache (code
- 16, 16);
7226 /* This is currently implemented by emitting an SWI instruction, which
7227 * qemu/linux seems to convert to a SIGILL.
7229 *(int*)code
= (0xef << 24) | 8;
7231 mono_arch_flush_icache (code
- 4, 4);
7237 * mono_arch_clear_breakpoint:
7239 * Clear the breakpoint at IP.
7242 mono_arch_clear_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
7248 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
7249 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), (guint8
*)ji
->code_start
);
7251 if (!breakpoint_tramp
)
7252 breakpoint_tramp
= mini_get_breakpoint_trampoline ();
7254 g_assert (native_offset
% 4 == 0);
7255 g_assert (info
->bp_addrs
[native_offset
/ 4] == (guint8
*)(mini_debug_options
.soft_breakpoints
? breakpoint_tramp
: bp_trigger_page
));
7256 info
->bp_addrs
[native_offset
/ 4] = 0;
7257 } else if (mini_debug_options
.soft_breakpoints
) {
7260 mono_arch_flush_icache (code
- 4, 4);
7262 for (i
= 0; i
< 4; ++i
)
7265 mono_arch_flush_icache (ip
, code
- ip
);
7270 * mono_arch_start_single_stepping:
7272 * Start single stepping.
7275 mono_arch_start_single_stepping (void)
7277 if (ss_trigger_page
)
7278 mono_mprotect (ss_trigger_page
, mono_pagesize (), 0);
7280 single_step_tramp
= mini_get_single_step_trampoline ();
7284 * mono_arch_stop_single_stepping:
7286 * Stop single stepping.
7289 mono_arch_stop_single_stepping (void)
7291 if (ss_trigger_page
)
7292 mono_mprotect (ss_trigger_page
, mono_pagesize (), MONO_MMAP_READ
);
7294 single_step_tramp
= NULL
;
7298 #define DBG_SIGNAL SIGBUS
7300 #define DBG_SIGNAL SIGSEGV
7304 * mono_arch_is_single_step_event:
7306 * Return whenever the machine state in SIGCTX corresponds to a single
7310 mono_arch_is_single_step_event (void *info
, void *sigctx
)
7312 siginfo_t
*sinfo
= (siginfo_t
*)info
;
7314 if (!ss_trigger_page
)
7317 /* Sometimes the address is off by 4 */
7318 if (sinfo
->si_addr
>= ss_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)ss_trigger_page
+ 128)
7325 * mono_arch_is_breakpoint_event:
7327 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7330 mono_arch_is_breakpoint_event (void *info
, void *sigctx
)
7332 siginfo_t
*sinfo
= (siginfo_t
*)info
;
7334 if (!ss_trigger_page
)
7337 if (sinfo
->si_signo
== DBG_SIGNAL
) {
7338 /* Sometimes the address is off by 4 */
7339 if (sinfo
->si_addr
>= bp_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)bp_trigger_page
+ 128)
7349 * mono_arch_skip_breakpoint:
7351 * See mini-amd64.c for docs.
7354 mono_arch_skip_breakpoint (MonoContext
*ctx
, MonoJitInfo
*ji
)
7356 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
7360 * mono_arch_skip_single_step:
7362 * See mini-amd64.c for docs.
7365 mono_arch_skip_single_step (MonoContext
*ctx
)
7367 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
7371 * mono_arch_get_seq_point_info:
7373 * See mini-amd64.c for docs.
7376 mono_arch_get_seq_point_info (MonoDomain
*domain
, guint8
*code
)
7381 // FIXME: Add a free function
7383 mono_domain_lock (domain
);
7384 info
= (SeqPointInfo
*)g_hash_table_lookup (domain_jit_info (domain
)->arch_seq_points
,
7386 mono_domain_unlock (domain
);
7389 ji
= mono_jit_info_table_find (domain
, code
);
7392 info
= g_malloc0 (sizeof (SeqPointInfo
) + ji
->code_size
);
7394 info
->ss_trigger_page
= ss_trigger_page
;
7395 info
->bp_trigger_page
= bp_trigger_page
;
7396 info
->ss_tramp_addr
= &single_step_tramp
;
7398 mono_domain_lock (domain
);
7399 g_hash_table_insert (domain_jit_info (domain
)->arch_seq_points
,
7401 mono_domain_unlock (domain
);
7407 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7410 * mono_arch_set_target:
7412 * Set the target architecture the JIT backend should generate code for, in the form
7413 * of a GNU target triplet. Only used in AOT mode.
7416 mono_arch_set_target (char *mtriple
)
7418 /* The GNU target triple format is not very well documented */
7419 if (strstr (mtriple
, "armv7")) {
7420 v5_supported
= TRUE
;
7421 v6_supported
= TRUE
;
7422 v7_supported
= TRUE
;
7424 if (strstr (mtriple
, "armv6")) {
7425 v5_supported
= TRUE
;
7426 v6_supported
= TRUE
;
7428 if (strstr (mtriple
, "armv7s")) {
7429 v7s_supported
= TRUE
;
7431 if (strstr (mtriple
, "armv7k")) {
7432 v7k_supported
= TRUE
;
7434 if (strstr (mtriple
, "thumbv7s")) {
7435 v5_supported
= TRUE
;
7436 v6_supported
= TRUE
;
7437 v7_supported
= TRUE
;
7438 v7s_supported
= TRUE
;
7439 thumb_supported
= TRUE
;
7440 thumb2_supported
= TRUE
;
7442 if (strstr (mtriple
, "darwin") || strstr (mtriple
, "ios")) {
7443 v5_supported
= TRUE
;
7444 v6_supported
= TRUE
;
7445 thumb_supported
= TRUE
;
7448 if (strstr (mtriple
, "gnueabi"))
7449 eabi_supported
= TRUE
;
7453 mono_arch_opcode_supported (int opcode
)
7456 case OP_ATOMIC_ADD_I4
:
7457 case OP_ATOMIC_EXCHANGE_I4
:
7458 case OP_ATOMIC_CAS_I4
:
7459 case OP_ATOMIC_LOAD_I1
:
7460 case OP_ATOMIC_LOAD_I2
:
7461 case OP_ATOMIC_LOAD_I4
:
7462 case OP_ATOMIC_LOAD_U1
:
7463 case OP_ATOMIC_LOAD_U2
:
7464 case OP_ATOMIC_LOAD_U4
:
7465 case OP_ATOMIC_STORE_I1
:
7466 case OP_ATOMIC_STORE_I2
:
7467 case OP_ATOMIC_STORE_I4
:
7468 case OP_ATOMIC_STORE_U1
:
7469 case OP_ATOMIC_STORE_U2
:
7470 case OP_ATOMIC_STORE_U4
:
7471 return v7_supported
;
7472 case OP_ATOMIC_LOAD_R4
:
7473 case OP_ATOMIC_LOAD_R8
:
7474 case OP_ATOMIC_STORE_R4
:
7475 case OP_ATOMIC_STORE_R8
:
7476 return v7_supported
&& IS_VFP
;
7483 mono_arch_get_call_info (MonoMemPool
*mp
, MonoMethodSignature
*sig
)
7485 return get_call_info (mp
, sig
);
7489 mono_arch_get_get_tls_tramp (void)
7494 static G_GNUC_UNUSED guint8
*
7495 emit_aotconst (MonoCompile
*cfg
, guint8
*code
, int dreg
, int patch_type
, gpointer data
)
7498 mono_add_patch_info (cfg
, code
- cfg
->native_code
, (MonoJumpInfoType
)patch_type
, data
);
7499 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
7501 *(gpointer
*)code
= NULL
;
7503 /* Load the value from the GOT */
7504 ARM_LDR_REG_REG (code
, dreg
, ARMREG_PC
, dreg
);
7509 mono_arm_emit_aotconst (gpointer ji_list
, guint8
*code
, guint8
*buf
, int dreg
, int patch_type
, gconstpointer data
)
7511 MonoJumpInfo
**ji
= (MonoJumpInfo
**)ji_list
;
7513 *ji
= mono_patch_info_list_prepend (*ji
, code
- buf
, (MonoJumpInfoType
)patch_type
, data
);
7514 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
7516 *(gpointer
*)code
= NULL
;
7518 ARM_LDR_REG_REG (code
, dreg
, ARMREG_PC
, dreg
);
7523 mono_arch_load_function (MonoJitICallId jit_icall_id
)
7525 gpointer target
= NULL
;
7526 switch (jit_icall_id
) {
7527 #undef MONO_AOT_ICALL
7528 #define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break;
7529 MONO_AOT_ICALL (mono_arm_resume_unwind
)
7530 MONO_AOT_ICALL (mono_arm_start_gsharedvt_call
)
7531 MONO_AOT_ICALL (mono_arm_throw_exception
)
7532 MONO_AOT_ICALL (mono_arm_throw_exception_by_token
)
7533 MONO_AOT_ICALL (mono_arm_unaligned_stack
)