3 * ARM backend for the Mono code generator
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2003 Ximian, Inc.
10 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
17 #include <mono/metadata/abi-details.h>
18 #include <mono/metadata/appdomain.h>
19 #include <mono/metadata/profiler-private.h>
20 #include <mono/metadata/debug-helpers.h>
21 #include <mono/utils/mono-mmap.h>
22 #include <mono/utils/mono-hwcap.h>
23 #include <mono/utils/mono-memory-model.h>
24 #include <mono/utils/mono-threads-coop.h>
25 #include <mono/utils/unlocked.h>
27 #include "interp/interp.h"
32 #include "debugger-agent.h"
34 #include "mini-runtime.h"
35 #include "aot-runtime.h"
36 #include "mono/arch/arm/arm-vfp-codegen.h"
37 #include "mono/utils/mono-tls-inline.h"
39 /* Sanity check: This makes no sense */
40 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
41 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
45 * IS_SOFT_FLOAT: Is full software floating point used?
46 * IS_HARD_FLOAT: Is full hardware floating point used?
47 * IS_VFP: Is hardware floating point with software ABI used?
49 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
50 * IS_VFP may delegate to mono_arch_is_soft_float ().
53 #if defined(ARM_FPU_VFP_HARD)
54 #define IS_SOFT_FLOAT (FALSE)
55 #define IS_HARD_FLOAT (TRUE)
57 #elif defined(ARM_FPU_NONE)
58 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
59 #define IS_HARD_FLOAT (FALSE)
60 #define IS_VFP (!mono_arch_is_soft_float ())
62 #define IS_SOFT_FLOAT (FALSE)
63 #define IS_HARD_FLOAT (FALSE)
67 #define THUNK_SIZE (3 * 4)
71 void sys_icache_invalidate (void *start
, size_t len
);
75 /* This mutex protects architecture specific caches */
76 #define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex)
77 #define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex)
78 static mono_mutex_t mini_arch_mutex
;
80 static gboolean v5_supported
= FALSE
;
81 static gboolean v6_supported
= FALSE
;
82 static gboolean v7_supported
= FALSE
;
83 static gboolean v7s_supported
= FALSE
;
84 static gboolean v7k_supported
= FALSE
;
85 static gboolean thumb_supported
= FALSE
;
86 static gboolean thumb2_supported
= FALSE
;
88 * Whenever to use the ARM EABI
90 static gboolean eabi_supported
= FALSE
;
93 * Whenever to use the iphone ABI extensions:
94 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
95 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
96 * This is required for debugging/profiling tools to work, but it has some overhead so it should
97 * only be turned on in debug builds.
99 static gboolean iphone_abi
= FALSE
;
102 * The FPU we are generating code for. This is NOT runtime configurable right now,
103 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
105 static MonoArmFPU arm_fpu
;
107 #if defined(ARM_FPU_VFP_HARD)
109 * On armhf, d0-d7 are used for argument passing and d8-d15
110 * must be preserved across calls, which leaves us no room
111 * for scratch registers. So we use d14-d15 but back up their
112 * previous contents to a stack slot before using them - see
113 * mono_arm_emit_vfp_scratch_save/_restore ().
115 static int vfp_scratch1
= ARM_VFP_D14
;
116 static int vfp_scratch2
= ARM_VFP_D15
;
119 * On armel, d0-d7 do not need to be preserved, so we can
120 * freely make use of them as scratch registers.
122 static int vfp_scratch1
= ARM_VFP_D0
;
123 static int vfp_scratch2
= ARM_VFP_D1
;
128 static gpointer single_step_tramp
, breakpoint_tramp
;
131 * The code generated for sequence points reads from this location, which is
132 * made read-only when single stepping is enabled.
134 static gpointer ss_trigger_page
;
136 /* Enabled breakpoints read from this trigger page */
137 static gpointer bp_trigger_page
;
141 * floating point support: on ARM it is a mess, there are at least 3
142 * different setups, each of which binary incompat with the other.
143 * 1) FPA: old and ugly, but unfortunately what current distros use
144 * the double binary format has the two words swapped. 8 double registers.
145 * Implemented usually by kernel emulation.
146 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
147 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
148 * 3) VFP: the new and actually sensible and useful FP support. Implemented
149 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
151 * We do not care about FPA. We will support soft float and VFP.
153 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
154 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
155 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
157 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
158 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
159 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
161 //#define DEBUG_IMT 0
164 static void mono_arch_compute_omit_fp (MonoCompile
*cfg
);
168 emit_aotconst (MonoCompile
*cfg
, guint8
*code
, int dreg
, int patch_type
, gpointer data
);
171 mono_arch_regname (int reg
)
173 static const char * rnames
[] = {
174 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
175 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
176 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
179 if (reg
>= 0 && reg
< 16)
185 mono_arch_fregname (int reg
)
187 static const char * rnames
[] = {
188 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
189 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
190 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
191 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
192 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
193 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
196 if (reg
>= 0 && reg
< 32)
204 emit_big_add_temp (guint8
*code
, int dreg
, int sreg
, int imm
, int temp
)
206 int imm8
, rot_amount
;
208 g_assert (temp
== ARMREG_IP
|| temp
== ARMREG_LR
);
212 ARM_MOV_REG_REG (code
, dreg
, sreg
);
213 } else if ((imm8
= mono_arm_is_rotated_imm8 (imm
, &rot_amount
)) >= 0) {
214 ARM_ADD_REG_IMM (code
, dreg
, sreg
, imm8
, rot_amount
);
218 code
= mono_arm_emit_load_imm (code
, temp
, imm
);
219 ARM_ADD_REG_REG (code
, dreg
, sreg
, temp
);
221 code
= mono_arm_emit_load_imm (code
, dreg
, imm
);
222 ARM_ADD_REG_REG (code
, dreg
, dreg
, sreg
);
228 emit_big_add (guint8
*code
, int dreg
, int sreg
, int imm
)
230 return emit_big_add_temp (code
, dreg
, sreg
, imm
, ARMREG_IP
);
234 emit_ldr_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
236 if (!arm_is_imm12 (imm
)) {
237 g_assert (dreg
!= sreg
);
238 code
= emit_big_add (code
, dreg
, sreg
, imm
);
239 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
241 ARM_LDR_IMM (code
, dreg
, sreg
, imm
);
246 /* If dreg == sreg, this clobbers IP */
248 emit_sub_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
250 int imm8
, rot_amount
;
251 if ((imm8
= mono_arm_is_rotated_imm8 (imm
, &rot_amount
)) >= 0) {
252 ARM_SUB_REG_IMM (code
, dreg
, sreg
, imm8
, rot_amount
);
256 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, imm
);
257 ARM_SUB_REG_REG (code
, dreg
, sreg
, ARMREG_IP
);
259 code
= mono_arm_emit_load_imm (code
, dreg
, imm
);
260 ARM_SUB_REG_REG (code
, dreg
, dreg
, sreg
);
266 emit_memcpy (guint8
*code
, int size
, int dreg
, int doffset
, int sreg
, int soffset
)
268 /* we can use r0-r3, since this is called only for incoming args on the stack */
269 if (size
> sizeof (target_mgreg_t
) * 4) {
271 code
= emit_big_add (code
, ARMREG_R0
, sreg
, soffset
);
272 code
= emit_big_add (code
, ARMREG_R1
, dreg
, doffset
);
273 start_loop
= code
= mono_arm_emit_load_imm (code
, ARMREG_R2
, size
);
274 ARM_LDR_IMM (code
, ARMREG_R3
, ARMREG_R0
, 0);
275 ARM_STR_IMM (code
, ARMREG_R3
, ARMREG_R1
, 0);
276 ARM_ADD_REG_IMM8 (code
, ARMREG_R0
, ARMREG_R0
, 4);
277 ARM_ADD_REG_IMM8 (code
, ARMREG_R1
, ARMREG_R1
, 4);
278 ARM_SUBS_REG_IMM8 (code
, ARMREG_R2
, ARMREG_R2
, 4);
279 ARM_B_COND (code
, ARMCOND_NE
, 0);
280 arm_patch (code
- 4, start_loop
);
283 if (arm_is_imm12 (doffset
) && arm_is_imm12 (doffset
+ size
) &&
284 arm_is_imm12 (soffset
) && arm_is_imm12 (soffset
+ size
)) {
286 ARM_LDR_IMM (code
, ARMREG_LR
, sreg
, soffset
);
287 ARM_STR_IMM (code
, ARMREG_LR
, dreg
, doffset
);
293 code
= emit_big_add (code
, ARMREG_R0
, sreg
, soffset
);
294 code
= emit_big_add (code
, ARMREG_R1
, dreg
, doffset
);
295 doffset
= soffset
= 0;
297 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_R0
, soffset
);
298 ARM_STR_IMM (code
, ARMREG_LR
, ARMREG_R1
, doffset
);
304 g_assert (size
== 0);
309 emit_jmp_reg (guint8
*code
, int reg
)
314 ARM_MOV_REG_REG (code
, ARMREG_PC
, reg
);
319 emit_call_reg (guint8
*code
, int reg
)
322 ARM_BLX_REG (code
, reg
);
324 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
325 return emit_jmp_reg (code
, reg
);
331 emit_call_seq (MonoCompile
*cfg
, guint8
*code
)
333 if (cfg
->method
->dynamic
) {
334 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
336 *(gpointer
*)code
= NULL
;
338 code
= emit_call_reg (code
, ARMREG_IP
);
342 cfg
->thunk_area
+= THUNK_SIZE
;
347 mono_arm_patchable_b (guint8
*code
, int cond
)
349 ARM_B_COND (code
, cond
, 0);
354 mono_arm_patchable_bl (guint8
*code
, int cond
)
356 ARM_BL_COND (code
, cond
, 0);
360 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(HOST_ANDROID) && !defined(MONO_CROSS_COMPILE)
361 #define HAVE_AEABI_READ_TP 1
364 #ifdef HAVE_AEABI_READ_TP
366 gpointer
__aeabi_read_tp (void);
371 mono_arch_have_fast_tls (void)
373 #ifdef HAVE_AEABI_READ_TP
374 static gboolean have_fast_tls
= FALSE
;
375 static gboolean inited
= FALSE
;
377 if (mini_debug_options
.use_fallback_tls
)
381 return have_fast_tls
;
386 tp1
= __aeabi_read_tp ();
387 asm volatile("mrc p15, 0, %0, c13, c0, 3" : "=r" (tp2
));
389 have_fast_tls
= tp1
&& tp1
== tp2
;
392 return have_fast_tls
;
399 emit_tls_get (guint8
*code
, int dreg
, int tls_offset
)
401 g_assert (v7_supported
);
402 ARM_MRC (code
, 15, 0, dreg
, 13, 0, 3);
403 ARM_LDR_IMM (code
, dreg
, dreg
, tls_offset
);
408 emit_tls_set (guint8
*code
, int sreg
, int tls_offset
)
410 int tp_reg
= (sreg
!= ARMREG_R0
) ? ARMREG_R0
: ARMREG_R1
;
411 g_assert (v7_supported
);
412 ARM_MRC (code
, 15, 0, tp_reg
, 13, 0, 3);
413 ARM_STR_IMM (code
, sreg
, tp_reg
, tls_offset
);
420 * Emit code to push an LMF structure on the LMF stack.
421 * On arm, this is intermixed with the initialization of other fields of the structure.
424 emit_save_lmf (MonoCompile
*cfg
, guint8
*code
, gint32 lmf_offset
)
428 if (mono_arch_have_fast_tls () && mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR
) != -1) {
429 code
= emit_tls_get (code
, ARMREG_R0
, mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR
));
431 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
432 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_tls_get_lmf_addr_extern
));
433 code
= emit_call_seq (cfg
, code
);
435 /* we build the MonoLMF structure on the stack - see mini-arm.h */
436 /* lmf_offset is the offset from the previous stack pointer,
437 * alloc_size is the total stack space allocated, so the offset
438 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
439 * The pointer to the struct is put in r1 (new_lmf).
440 * ip is used as scratch
441 * The callee-saved registers are already in the MonoLMF structure
443 code
= emit_big_add (code
, ARMREG_R1
, ARMREG_SP
, lmf_offset
);
444 /* r0 is the result from mono_get_lmf_addr () */
445 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
446 /* new_lmf->previous_lmf = *lmf_addr */
447 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
448 ARM_STR_IMM (code
, ARMREG_IP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
449 /* *(lmf_addr) = r1 */
450 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
451 /* Skip method (only needed for trampoline LMF frames) */
452 ARM_STR_IMM (code
, ARMREG_SP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, sp
));
453 ARM_STR_IMM (code
, ARMREG_FP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, fp
));
454 /* save the current IP */
455 ARM_MOV_REG_REG (code
, ARMREG_IP
, ARMREG_PC
);
456 ARM_STR_IMM (code
, ARMREG_IP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, ip
));
458 for (i
= 0; i
< MONO_ABI_SIZEOF (MonoLMF
); i
+= sizeof (target_mgreg_t
))
459 mini_gc_set_slot_type_from_fp (cfg
, lmf_offset
+ i
, SLOT_NOREF
);
470 emit_float_args (MonoCompile
*cfg
, MonoCallInst
*inst
, guint8
*code
, int *max_len
, guint
*offset
)
474 set_code_cursor (cfg
, code
);
476 for (list
= inst
->float_args
; list
; list
= list
->next
) {
477 FloatArgData
*fad
= (FloatArgData
*)list
->data
;
478 MonoInst
*var
= get_vreg_to_inst (cfg
, fad
->vreg
);
479 gboolean imm
= arm_is_fpimm8 (var
->inst_offset
);
481 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
487 code
= realloc_code (cfg
, *max_len
);
490 code
= emit_big_add (code
, ARMREG_LR
, var
->inst_basereg
, var
->inst_offset
);
491 ARM_FLDS (code
, fad
->hreg
, ARMREG_LR
, 0);
493 ARM_FLDS (code
, fad
->hreg
, var
->inst_basereg
, var
->inst_offset
);
495 set_code_cursor (cfg
, code
);
496 *offset
= code
- cfg
->native_code
;
503 mono_arm_emit_vfp_scratch_save (MonoCompile
*cfg
, guint8
*code
, int reg
)
507 g_assert (reg
== vfp_scratch1
|| reg
== vfp_scratch2
);
509 inst
= cfg
->arch
.vfp_scratch_slots
[reg
== vfp_scratch1
? 0 : 1];
512 if (!arm_is_fpimm8 (inst
->inst_offset
)) {
513 code
= emit_big_add (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
514 ARM_FSTD (code
, reg
, ARMREG_LR
, 0);
516 ARM_FSTD (code
, reg
, inst
->inst_basereg
, inst
->inst_offset
);
523 mono_arm_emit_vfp_scratch_restore (MonoCompile
*cfg
, guint8
*code
, int reg
)
527 g_assert (reg
== vfp_scratch1
|| reg
== vfp_scratch2
);
529 inst
= cfg
->arch
.vfp_scratch_slots
[reg
== vfp_scratch1
? 0 : 1];
532 if (!arm_is_fpimm8 (inst
->inst_offset
)) {
533 code
= emit_big_add (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
534 ARM_FLDD (code
, reg
, ARMREG_LR
, 0);
536 ARM_FLDD (code
, reg
, inst
->inst_basereg
, inst
->inst_offset
);
545 * Emit code to pop an LMF structure from the LMF stack.
548 emit_restore_lmf (MonoCompile
*cfg
, guint8
*code
, gint32 lmf_offset
)
552 if (lmf_offset
< 32) {
553 basereg
= cfg
->frame_reg
;
558 code
= emit_big_add (code
, ARMREG_R2
, cfg
->frame_reg
, lmf_offset
);
561 /* ip = previous_lmf */
562 ARM_LDR_IMM (code
, ARMREG_IP
, basereg
, offset
+ MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
564 ARM_LDR_IMM (code
, ARMREG_LR
, basereg
, offset
+ MONO_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
565 /* *(lmf_addr) = previous_lmf */
566 ARM_STR_IMM (code
, ARMREG_IP
, ARMREG_LR
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
571 #endif /* #ifndef DISABLE_JIT */
574 * mono_arch_get_argument_info:
575 * @csig: a method signature
576 * @param_count: the number of parameters to consider
577 * @arg_info: an array to store the result infos
579 * Gathers information on parameters such as size, alignment and
580 * padding. arg_info should be large enought to hold param_count + 1 entries.
582 * Returns the size of the activation frame.
585 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
587 int k
, frame_size
= 0;
588 guint32 size
, align
, pad
;
592 t
= mini_get_underlying_type (csig
->ret
);
593 if (MONO_TYPE_ISSTRUCT (t
)) {
594 frame_size
+= sizeof (target_mgreg_t
);
598 arg_info
[0].offset
= offset
;
601 frame_size
+= sizeof (target_mgreg_t
);
605 arg_info
[0].size
= frame_size
;
607 for (k
= 0; k
< param_count
; k
++) {
608 size
= mini_type_stack_size_full (csig
->params
[k
], &align
, csig
->pinvoke
);
610 /* ignore alignment for now */
613 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
614 arg_info
[k
].pad
= pad
;
616 arg_info
[k
+ 1].pad
= 0;
617 arg_info
[k
+ 1].size
= size
;
619 arg_info
[k
+ 1].offset
= offset
;
623 align
= MONO_ARCH_FRAME_ALIGNMENT
;
624 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
625 arg_info
[k
].pad
= pad
;
630 #define MAX_ARCH_DELEGATE_PARAMS 3
633 get_delegate_invoke_impl (MonoTrampInfo
**info
, gboolean has_target
, gboolean param_count
)
635 guint8
*code
, *start
;
636 GSList
*unwind_ops
= mono_arch_get_cie_program ();
639 start
= code
= mono_global_codeman_reserve (12);
641 /* Replace the this argument with the target */
642 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
643 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, target
));
644 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
646 g_assert ((code
- start
) <= 12);
648 mono_arch_flush_icache (start
, 12);
649 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
));
653 size
= 8 + param_count
* 4;
654 start
= code
= mono_global_codeman_reserve (size
);
656 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
657 /* slide down the arguments */
658 for (i
= 0; i
< param_count
; ++i
) {
659 ARM_MOV_REG_REG (code
, (ARMREG_R0
+ i
), (ARMREG_R0
+ i
+ 1));
661 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
663 g_assert ((code
- start
) <= size
);
665 mono_arch_flush_icache (start
, size
);
666 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
));
670 *info
= mono_tramp_info_create ("delegate_invoke_impl_has_target", start
, code
- start
, NULL
, unwind_ops
);
672 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", param_count
);
673 *info
= mono_tramp_info_create (name
, start
, code
- start
, NULL
, unwind_ops
);
677 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
));
683 * mono_arch_get_delegate_invoke_impls:
685 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
689 mono_arch_get_delegate_invoke_impls (void)
695 get_delegate_invoke_impl (&info
, TRUE
, 0);
696 res
= g_slist_prepend (res
, info
);
698 for (i
= 0; i
<= MAX_ARCH_DELEGATE_PARAMS
; ++i
) {
699 get_delegate_invoke_impl (&info
, FALSE
, i
);
700 res
= g_slist_prepend (res
, info
);
707 mono_arch_get_delegate_invoke_impl (MonoMethodSignature
*sig
, gboolean has_target
)
709 guint8
*code
, *start
;
712 /* FIXME: Support more cases */
713 sig_ret
= mini_get_underlying_type (sig
->ret
);
714 if (MONO_TYPE_ISSTRUCT (sig_ret
))
718 static guint8
* cached
= NULL
;
719 mono_mini_arch_lock ();
721 mono_mini_arch_unlock ();
725 if (mono_ee_features
.use_aot_trampolines
) {
726 start
= (guint8
*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
729 start
= get_delegate_invoke_impl (&info
, TRUE
, 0);
730 mono_tramp_info_register (info
, NULL
);
733 mono_mini_arch_unlock ();
736 static guint8
* cache
[MAX_ARCH_DELEGATE_PARAMS
+ 1] = {NULL
};
739 if (sig
->param_count
> MAX_ARCH_DELEGATE_PARAMS
)
741 for (i
= 0; i
< sig
->param_count
; ++i
)
742 if (!mono_is_regsize_var (sig
->params
[i
]))
745 mono_mini_arch_lock ();
746 code
= cache
[sig
->param_count
];
748 mono_mini_arch_unlock ();
752 if (mono_ee_features
.use_aot_trampolines
) {
753 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", sig
->param_count
);
754 start
= (guint8
*)mono_aot_get_trampoline (name
);
758 start
= get_delegate_invoke_impl (&info
, FALSE
, sig
->param_count
);
759 mono_tramp_info_register (info
, NULL
);
761 cache
[sig
->param_count
] = start
;
762 mono_mini_arch_unlock ();
770 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature
*sig
, MonoMethod
*method
, int offset
, gboolean load_imt_reg
)
776 mono_arch_get_this_arg_from_call (host_mgreg_t
*regs
, guint8
*code
)
778 return (gpointer
)regs
[ARMREG_R0
];
782 * Initialize the cpu to execute managed code.
785 mono_arch_cpu_init (void)
787 i8_align
= MONO_ABI_ALIGNOF (gint64
);
788 #ifdef MONO_CROSS_COMPILE
789 /* Need to set the alignment of i8 since it can different on the target */
790 #ifdef TARGET_ANDROID
792 mono_type_set_alignment (MONO_TYPE_I8
, i8_align
);
798 * Initialize architecture specific code.
801 mono_arch_init (void)
805 #ifdef TARGET_WATCHOS
806 mini_debug_options
.soft_breakpoints
= TRUE
;
809 mono_os_mutex_init_recursive (&mini_arch_mutex
);
810 if (mini_debug_options
.soft_breakpoints
) {
812 breakpoint_tramp
= mini_get_breakpoint_trampoline ();
814 ss_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
, MONO_MEM_ACCOUNT_OTHER
);
815 bp_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
, MONO_MEM_ACCOUNT_OTHER
);
816 mono_mprotect (bp_trigger_page
, mono_pagesize (), 0);
819 #if defined(__ARM_EABI__)
820 eabi_supported
= TRUE
;
823 #if defined(ARM_FPU_VFP_HARD)
824 arm_fpu
= MONO_ARM_FPU_VFP_HARD
;
826 arm_fpu
= MONO_ARM_FPU_VFP
;
828 #if defined(ARM_FPU_NONE) && !defined(TARGET_IOS)
830 * If we're compiling with a soft float fallback and it
831 * turns out that no VFP unit is available, we need to
832 * switch to soft float. We don't do this for iOS, since
833 * iOS devices always have a VFP unit.
835 if (!mono_hwcap_arm_has_vfp
)
836 arm_fpu
= MONO_ARM_FPU_NONE
;
839 * This environment variable can be useful in testing
840 * environments to make sure the soft float fallback
841 * works. Most ARM devices have VFP units these days, so
842 * normally soft float code would not be exercised much.
844 char *soft
= g_getenv ("MONO_ARM_FORCE_SOFT_FLOAT");
846 if (soft
&& !strncmp (soft
, "1", 1))
847 arm_fpu
= MONO_ARM_FPU_NONE
;
852 v5_supported
= mono_hwcap_arm_is_v5
;
853 v6_supported
= mono_hwcap_arm_is_v6
;
854 v7_supported
= mono_hwcap_arm_is_v7
;
857 * On weird devices, the hwcap code may fail to detect
858 * the ARM version. In that case, we can at least safely
859 * assume the version the runtime was compiled for.
871 #if defined(TARGET_IOS)
872 /* iOS is special-cased here because we don't yet
873 have a way to properly detect CPU features on it. */
874 thumb_supported
= TRUE
;
877 thumb_supported
= mono_hwcap_arm_has_thumb
;
878 thumb2_supported
= mono_hwcap_arm_has_thumb2
;
881 /* Format: armv(5|6|7[s])[-thumb[2]] */
882 cpu_arch
= g_getenv ("MONO_CPU_ARCH");
884 /* Do this here so it overrides any detection. */
886 if (strncmp (cpu_arch
, "armv", 4) == 0) {
887 v5_supported
= cpu_arch
[4] >= '5';
888 v6_supported
= cpu_arch
[4] >= '6';
889 v7_supported
= cpu_arch
[4] >= '7';
890 v7s_supported
= strncmp (cpu_arch
, "armv7s", 6) == 0;
891 v7k_supported
= strncmp (cpu_arch
, "armv7k", 6) == 0;
894 thumb_supported
= strstr (cpu_arch
, "thumb") != NULL
;
895 thumb2_supported
= strstr (cpu_arch
, "thumb2") != NULL
;
901 * Cleanup architecture specific code.
904 mono_arch_cleanup (void)
909 * This function returns the optimizations supported on this cpu.
912 mono_arch_cpu_optimizations (guint32
*exclude_mask
)
914 /* no arm-specific optimizations yet */
920 * This function test for all SIMD functions supported.
922 * Returns a bitmask corresponding to all supported versions.
926 mono_arch_cpu_enumerate_simd_versions (void)
928 /* SIMD is currently unimplemented */
933 mono_arm_is_hard_float (void)
935 return arm_fpu
== MONO_ARM_FPU_VFP_HARD
;
941 mono_arch_opcode_needs_emulation (MonoCompile
*cfg
, int opcode
)
943 if (v7s_supported
|| v7k_supported
) {
957 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
959 mono_arch_is_soft_float (void)
961 return arm_fpu
== MONO_ARM_FPU_NONE
;
966 is_regsize_var (MonoType
*t
)
970 t
= mini_get_underlying_type (t
);
977 case MONO_TYPE_FNPTR
:
979 case MONO_TYPE_OBJECT
:
981 case MONO_TYPE_GENERICINST
:
982 if (!mono_type_generic_inst_is_valuetype (t
))
985 case MONO_TYPE_VALUETYPE
:
992 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
997 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
998 MonoInst
*ins
= cfg
->varinfo
[i
];
999 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
1002 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
1005 if (ins
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
) || (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
1008 /* we can only allocate 32 bit values */
1009 if (is_regsize_var (ins
->inst_vtype
)) {
1010 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
1011 g_assert (i
== vmv
->idx
);
1012 vars
= mono_varlist_insert_sorted (cfg
, vars
, vmv
, FALSE
);
1020 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
1024 mono_arch_compute_omit_fp (cfg
);
1027 * FIXME: Interface calls might go through a static rgctx trampoline which
1028 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1031 if (cfg
->flags
& MONO_CFG_HAS_CALLS
)
1032 cfg
->uses_rgctx_reg
= TRUE
;
1034 if (cfg
->arch
.omit_fp
)
1035 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_FP
));
1036 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V1
));
1037 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V2
));
1038 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V3
));
1040 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1041 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V7
));
1043 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V4
));
1044 if (!(cfg
->compile_aot
|| cfg
->uses_rgctx_reg
|| COMPILE_LLVM (cfg
)))
1045 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1046 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V5
));
1047 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1048 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1054 * mono_arch_regalloc_cost:
1056 * Return the cost, in number of memory references, of the action of
1057 * allocating the variable VMV into a register during global register
1061 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
1067 #endif /* #ifndef DISABLE_JIT */
1070 mono_arch_flush_icache (guint8
*code
, gint size
)
1072 #if defined(MONO_CROSS_COMPILE)
1074 sys_icache_invalidate (code
, size
);
1076 __builtin___clear_cache ((char*)code
, (char*)code
+ size
);
1083 add_general (guint
*gr
, guint
*stack_size
, ArgInfo
*ainfo
, gboolean simple
)
1086 if (*gr
> ARMREG_R3
) {
1088 ainfo
->offset
= *stack_size
;
1089 ainfo
->reg
= ARMREG_SP
; /* in the caller */
1090 ainfo
->storage
= RegTypeBase
;
1093 ainfo
->storage
= RegTypeGeneral
;
1100 split
= i8_align
== 4;
1105 if (*gr
== ARMREG_R3
&& split
) {
1106 /* first word in r3 and the second on the stack */
1107 ainfo
->offset
= *stack_size
;
1108 ainfo
->reg
= ARMREG_SP
; /* in the caller */
1109 ainfo
->storage
= RegTypeBaseGen
;
1111 } else if (*gr
>= ARMREG_R3
) {
1112 if (eabi_supported
) {
1113 /* darwin aligns longs to 4 byte only */
1114 if (i8_align
== 8) {
1119 ainfo
->offset
= *stack_size
;
1120 ainfo
->reg
= ARMREG_SP
; /* in the caller */
1121 ainfo
->storage
= RegTypeBase
;
1124 if (eabi_supported
) {
1125 if (i8_align
== 8 && ((*gr
) & 1))
1128 ainfo
->storage
= RegTypeIRegPair
;
1137 add_float (guint
*fpr
, guint
*stack_size
, ArgInfo
*ainfo
, gboolean is_double
, gint
*float_spare
)
1140 * If we're calling a function like this:
1142 * void foo(float a, double b, float c)
1144 * We pass a in s0 and b in d1. That leaves us
1145 * with s1 being unused. The armhf ABI recognizes
1146 * this and requires register assignment to then
1147 * use that for the next single-precision arg,
1148 * i.e. c in this example. So float_spare either
1149 * tells us which reg to use for the next single-
1150 * precision arg, or it's -1, meaning use *fpr.
1152 * Note that even though most of the JIT speaks
1153 * double-precision, fpr represents single-
1154 * precision registers.
1156 * See parts 5.5 and 6.1.2 of the AAPCS for how
1160 if (*fpr
< ARM_VFP_F16
|| (!is_double
&& *float_spare
>= 0)) {
1161 ainfo
->storage
= RegTypeFP
;
1165 * If we're passing a double-precision value
1166 * and *fpr is odd (e.g. it's s1, s3, ...)
1167 * we need to use the next even register. So
1168 * we mark the current *fpr as a spare that
1169 * can be used for the next single-precision
1173 *float_spare
= *fpr
;
1178 * At this point, we have an even register
1179 * so we assign that and move along.
1183 } else if (*float_spare
>= 0) {
1185 * We're passing a single-precision value
1186 * and it looks like a spare single-
1187 * precision register is available. Let's
1191 ainfo
->reg
= *float_spare
;
1195 * If we hit this branch, we're passing a
1196 * single-precision value and we can simply
1197 * use the next available register.
1205 * We've exhausted available floating point
1206 * regs, so pass the rest on the stack.
1214 ainfo
->offset
= *stack_size
;
1215 ainfo
->reg
= ARMREG_SP
;
1216 ainfo
->storage
= RegTypeBase
;
1218 *stack_size
+= is_double
? 8 : 4;
1223 is_hfa (MonoType
*t
, int *out_nfields
, int *out_esize
)
1227 MonoClassField
*field
;
1228 MonoType
*ftype
, *prev_ftype
= NULL
;
1231 klass
= mono_class_from_mono_type_internal (t
);
1233 while ((field
= mono_class_get_fields_internal (klass
, &iter
))) {
1234 if (field
->type
->attrs
& FIELD_ATTRIBUTE_STATIC
)
1236 ftype
= mono_field_get_type_internal (field
);
1237 ftype
= mini_get_underlying_type (ftype
);
1239 if (MONO_TYPE_ISSTRUCT (ftype
)) {
1240 int nested_nfields
, nested_esize
;
1242 if (!is_hfa (ftype
, &nested_nfields
, &nested_esize
))
1244 if (nested_esize
== 4)
1245 ftype
= m_class_get_byval_arg (mono_defaults
.single_class
);
1247 ftype
= m_class_get_byval_arg (mono_defaults
.double_class
);
1248 if (prev_ftype
&& prev_ftype
->type
!= ftype
->type
)
1251 nfields
+= nested_nfields
;
1253 if (!(!ftype
->byref
&& (ftype
->type
== MONO_TYPE_R4
|| ftype
->type
== MONO_TYPE_R8
)))
1255 if (prev_ftype
&& prev_ftype
->type
!= ftype
->type
)
1261 if (nfields
== 0 || nfields
> 4)
1263 *out_nfields
= nfields
;
1264 *out_esize
= prev_ftype
->type
== MONO_TYPE_R4
? 4 : 8;
1269 get_call_info (MonoMemPool
*mp
, MonoMethodSignature
*sig
)
1271 guint i
, gr
, fpr
, pstart
;
1273 int n
= sig
->hasthis
+ sig
->param_count
;
1277 guint32 stack_size
= 0;
1279 gboolean is_pinvoke
= sig
->pinvoke
;
1280 gboolean vtype_retaddr
= FALSE
;
1283 cinfo
= mono_mempool_alloc0 (mp
, sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
1285 cinfo
= g_malloc0 (sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
1292 t
= mini_get_underlying_type (sig
->ret
);
1303 case MONO_TYPE_FNPTR
:
1304 case MONO_TYPE_OBJECT
:
1305 cinfo
->ret
.storage
= RegTypeGeneral
;
1306 cinfo
->ret
.reg
= ARMREG_R0
;
1310 cinfo
->ret
.storage
= RegTypeIRegPair
;
1311 cinfo
->ret
.reg
= ARMREG_R0
;
1315 cinfo
->ret
.storage
= RegTypeFP
;
1317 if (t
->type
== MONO_TYPE_R4
)
1318 cinfo
->ret
.size
= 4;
1320 cinfo
->ret
.size
= 8;
1322 if (IS_HARD_FLOAT
) {
1323 cinfo
->ret
.reg
= ARM_VFP_F0
;
1325 cinfo
->ret
.reg
= ARMREG_R0
;
1328 case MONO_TYPE_GENERICINST
:
1329 if (!mono_type_generic_inst_is_valuetype (t
)) {
1330 cinfo
->ret
.storage
= RegTypeGeneral
;
1331 cinfo
->ret
.reg
= ARMREG_R0
;
1334 if (mini_is_gsharedvt_variable_type (t
)) {
1335 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1339 case MONO_TYPE_VALUETYPE
:
1340 case MONO_TYPE_TYPEDBYREF
:
1341 if (IS_HARD_FLOAT
&& sig
->pinvoke
&& is_hfa (t
, &nfields
, &esize
)) {
1342 cinfo
->ret
.storage
= RegTypeHFA
;
1344 cinfo
->ret
.nregs
= nfields
;
1345 cinfo
->ret
.esize
= esize
;
1348 int native_size
= mono_class_native_size (mono_class_from_mono_type_internal (t
), &align
);
1351 #ifdef TARGET_WATCHOS
1356 if (native_size
<= max_size
) {
1357 cinfo
->ret
.storage
= RegTypeStructByVal
;
1358 cinfo
->ret
.struct_size
= native_size
;
1359 cinfo
->ret
.nregs
= ALIGN_TO (native_size
, 4) / 4;
1361 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1364 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1369 case MONO_TYPE_MVAR
:
1370 g_assert (mini_is_gsharedvt_type (t
));
1371 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1373 case MONO_TYPE_VOID
:
1376 g_error ("Can't handle as return value 0x%x", sig
->ret
->type
);
1379 vtype_retaddr
= cinfo
->ret
.storage
== RegTypeStructByAddr
;
1384 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1385 * the first argument, allowing 'this' to be always passed in the first arg reg.
1386 * Also do this if the first argument is a reference type, since virtual calls
1387 * are sometimes made using calli without sig->hasthis set, like in the delegate
1390 if (vtype_retaddr
&& !is_pinvoke
&& (sig
->hasthis
|| (sig
->param_count
> 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig
->params
[0]))))) {
1392 add_general (&gr
, &stack_size
, cinfo
->args
+ 0, TRUE
);
1394 add_general (&gr
, &stack_size
, &cinfo
->args
[sig
->hasthis
+ 0], TRUE
);
1398 cinfo
->ret
.reg
= gr
;
1400 cinfo
->vret_arg_index
= 1;
1404 add_general (&gr
, &stack_size
, cinfo
->args
+ 0, TRUE
);
1407 if (vtype_retaddr
) {
1408 cinfo
->ret
.reg
= gr
;
1413 DEBUG(g_print("params: %d\n", sig
->param_count
));
1414 for (i
= pstart
; i
< sig
->param_count
; ++i
) {
1415 ArgInfo
*ainfo
= &cinfo
->args
[n
];
1417 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1418 /* Prevent implicit arguments and sig_cookie from
1419 being passed in registers */
1422 /* Emit the signature cookie just before the implicit arguments */
1423 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
1425 DEBUG(g_print("param %d: ", i
));
1426 if (sig
->params
[i
]->byref
) {
1427 DEBUG(g_print("byref\n"));
1428 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1432 t
= mini_get_underlying_type (sig
->params
[i
]);
1435 cinfo
->args
[n
].is_signed
= 1;
1437 cinfo
->args
[n
].size
= 1;
1438 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1441 cinfo
->args
[n
].is_signed
= 1;
1443 cinfo
->args
[n
].size
= 2;
1444 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1448 cinfo
->args
[n
].size
= 4;
1449 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1454 case MONO_TYPE_FNPTR
:
1455 case MONO_TYPE_OBJECT
:
1456 cinfo
->args
[n
].size
= sizeof (target_mgreg_t
);
1457 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1459 case MONO_TYPE_GENERICINST
:
1460 if (!mono_type_generic_inst_is_valuetype (t
)) {
1461 cinfo
->args
[n
].size
= sizeof (target_mgreg_t
);
1462 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1465 if (mini_is_gsharedvt_variable_type (t
)) {
1466 /* gsharedvt arguments are passed by ref */
1467 g_assert (mini_is_gsharedvt_type (t
));
1468 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1469 switch (ainfo
->storage
) {
1470 case RegTypeGeneral
:
1471 ainfo
->storage
= RegTypeGSharedVtInReg
;
1474 ainfo
->storage
= RegTypeGSharedVtOnStack
;
1477 g_assert_not_reached ();
1482 case MONO_TYPE_TYPEDBYREF
:
1483 case MONO_TYPE_VALUETYPE
: {
1486 int nwords
, nfields
, esize
;
1489 if (IS_HARD_FLOAT
&& sig
->pinvoke
&& is_hfa (t
, &nfields
, &esize
)) {
1490 if (fpr
+ nfields
< ARM_VFP_F16
) {
1491 ainfo
->storage
= RegTypeHFA
;
1493 ainfo
->nregs
= nfields
;
1494 ainfo
->esize
= esize
;
1505 if (t
->type
== MONO_TYPE_TYPEDBYREF
) {
1506 size
= MONO_ABI_SIZEOF (MonoTypedRef
);
1507 align
= sizeof (target_mgreg_t
);
1509 MonoClass
*klass
= mono_class_from_mono_type_internal (sig
->params
[i
]);
1511 size
= mono_class_native_size (klass
, &align
);
1513 size
= mini_type_stack_size_full (t
, &align
, FALSE
);
1515 DEBUG(g_print ("load %d bytes struct\n", size
));
1517 #ifdef TARGET_WATCHOS
1518 /* Watchos pass large structures by ref */
1519 /* We only do this for pinvoke to make gsharedvt/dyncall simpler */
1520 if (sig
->pinvoke
&& size
> 16) {
1521 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1522 switch (ainfo
->storage
) {
1523 case RegTypeGeneral
:
1524 ainfo
->storage
= RegTypeStructByAddr
;
1527 ainfo
->storage
= RegTypeStructByAddrOnStack
;
1530 g_assert_not_reached ();
1539 align_size
+= (sizeof (target_mgreg_t
) - 1);
1540 align_size
&= ~(sizeof (target_mgreg_t
) - 1);
1541 nwords
= (align_size
+ sizeof (target_mgreg_t
) -1 ) / sizeof (target_mgreg_t
);
1542 ainfo
->storage
= RegTypeStructByVal
;
1543 ainfo
->struct_size
= size
;
1544 ainfo
->align
= align
;
1546 if (eabi_supported
) {
1547 if (align
>= 8 && (gr
& 1))
1550 if (gr
> ARMREG_R3
) {
1552 ainfo
->vtsize
= nwords
;
1554 int rest
= ARMREG_R3
- gr
+ 1;
1555 int n_in_regs
= rest
>= nwords
? nwords
: rest
;
1557 ainfo
->size
= n_in_regs
;
1558 ainfo
->vtsize
= nwords
- n_in_regs
;
1561 nwords
-= n_in_regs
;
1563 stack_size
= ALIGN_TO (stack_size
, align
);
1565 ainfo
->offset
= stack_size
;
1566 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1567 stack_size
+= nwords
* sizeof (target_mgreg_t
);
1573 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
1579 add_float (&fpr
, &stack_size
, ainfo
, FALSE
, &float_spare
);
1581 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1587 add_float (&fpr
, &stack_size
, ainfo
, TRUE
, &float_spare
);
1589 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
1592 case MONO_TYPE_MVAR
:
1593 /* gsharedvt arguments are passed by ref */
1594 g_assert (mini_is_gsharedvt_type (t
));
1595 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1596 switch (ainfo
->storage
) {
1597 case RegTypeGeneral
:
1598 ainfo
->storage
= RegTypeGSharedVtInReg
;
1601 ainfo
->storage
= RegTypeGSharedVtOnStack
;
1604 g_assert_not_reached ();
1608 g_error ("Can't handle 0x%x", sig
->params
[i
]->type
);
1613 /* Handle the case where there are no implicit arguments */
1614 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1615 /* Prevent implicit arguments and sig_cookie from
1616 being passed in registers */
1619 /* Emit the signature cookie just before the implicit arguments */
1620 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
1623 DEBUG (g_print (" stack size: %d (%d)\n", (stack_size
+ 15) & ~15, stack_size
));
1624 stack_size
= ALIGN_TO (stack_size
, MONO_ARCH_FRAME_ALIGNMENT
);
1626 cinfo
->stack_usage
= stack_size
;
1631 * We need to create a temporary value if the argument is not stored in
1632 * a linear memory range in the ccontext (this normally happens for
1633 * value types if they are passed both by stack and regs).
1636 arg_need_temp (ArgInfo
*ainfo
)
1638 if (ainfo
->storage
== RegTypeStructByVal
&& ainfo
->vtsize
)
1639 return ainfo
->struct_size
;
1644 arg_get_storage (CallContext
*ccontext
, ArgInfo
*ainfo
)
1646 switch (ainfo
->storage
) {
1647 case RegTypeIRegPair
:
1648 case RegTypeGeneral
:
1649 case RegTypeStructByVal
:
1650 return &ccontext
->gregs
[ainfo
->reg
];
1653 return &ccontext
->fregs
[ainfo
->reg
];
1655 return ccontext
->stack
+ ainfo
->offset
;
1657 g_error ("Arg storage type not yet supported");
1662 arg_get_val (CallContext
*ccontext
, ArgInfo
*ainfo
, gpointer dest
)
1664 int reg_size
= ainfo
->size
* sizeof (host_mgreg_t
);
1665 g_assert (arg_need_temp (ainfo
));
1666 memcpy (dest
, &ccontext
->gregs
[ainfo
->reg
], reg_size
);
1667 memcpy ((host_mgreg_t
*)dest
+ ainfo
->size
, ccontext
->stack
+ ainfo
->offset
, ainfo
->struct_size
- reg_size
);
1671 arg_set_val (CallContext
*ccontext
, ArgInfo
*ainfo
, gpointer src
)
1673 int reg_size
= ainfo
->size
* sizeof (host_mgreg_t
);
1674 g_assert (arg_need_temp (ainfo
));
1675 memcpy (&ccontext
->gregs
[ainfo
->reg
], src
, reg_size
);
1676 memcpy (ccontext
->stack
+ ainfo
->offset
, (host_mgreg_t
*)src
+ ainfo
->size
, ainfo
->struct_size
- reg_size
);
1679 /* Set arguments in the ccontext (for i2n entry) */
1681 mono_arch_set_native_call_context_args (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1683 const MonoEECallbacks
*interp_cb
= mini_get_interp_callbacks ();
1684 CallInfo
*cinfo
= get_call_info (NULL
, sig
);
1688 memset (ccontext
, 0, sizeof (CallContext
));
1690 ccontext
->stack_size
= ALIGN_TO (cinfo
->stack_usage
, MONO_ARCH_FRAME_ALIGNMENT
);
1691 if (ccontext
->stack_size
)
1692 ccontext
->stack
= (guint8
*)g_calloc (1, ccontext
->stack_size
);
1694 if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
1695 ainfo
= &cinfo
->ret
;
1696 if (ainfo
->storage
== RegTypeStructByAddr
) {
1697 storage
= interp_cb
->frame_arg_to_storage ((MonoInterpFrameHandle
)frame
, sig
, -1);
1698 ccontext
->gregs
[cinfo
->ret
.reg
] = (host_mgreg_t
)(gsize
)storage
;
1702 g_assert (!sig
->hasthis
);
1704 for (int i
= 0; i
< sig
->param_count
; i
++) {
1705 ainfo
= &cinfo
->args
[i
];
1706 int temp_size
= arg_need_temp (ainfo
);
1709 storage
= alloca (temp_size
); // FIXME? alloca in a loop
1711 storage
= arg_get_storage (ccontext
, ainfo
);
1713 interp_cb
->frame_arg_to_data ((MonoInterpFrameHandle
)frame
, sig
, i
, storage
);
1715 arg_set_val (ccontext
, ainfo
, storage
);
1721 /* Set return value in the ccontext (for n2i return) */
1723 mono_arch_set_native_call_context_ret (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1725 const MonoEECallbacks
*interp_cb
;
1730 if (sig
->ret
->type
== MONO_TYPE_VOID
)
1733 interp_cb
= mini_get_interp_callbacks ();
1734 cinfo
= get_call_info (NULL
, sig
);
1735 ainfo
= &cinfo
->ret
;
1737 if (ainfo
->storage
!= RegTypeStructByAddr
) {
1738 g_assert (!arg_need_temp (ainfo
));
1739 storage
= arg_get_storage (ccontext
, ainfo
);
1740 memset (ccontext
, 0, sizeof (CallContext
)); // FIXME
1741 interp_cb
->frame_arg_to_data ((MonoInterpFrameHandle
)frame
, sig
, -1, storage
);
1747 /* Gets the arguments from ccontext (for n2i entry) */
1749 mono_arch_get_native_call_context_args (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1751 const MonoEECallbacks
*interp_cb
= mini_get_interp_callbacks ();
1752 CallInfo
*cinfo
= get_call_info (NULL
, sig
);
1756 if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
1757 ainfo
= &cinfo
->ret
;
1758 if (ainfo
->storage
== RegTypeStructByAddr
) {
1759 storage
= (gpointer
)(gsize
)ccontext
->gregs
[cinfo
->ret
.reg
];
1760 interp_cb
->frame_arg_set_storage ((MonoInterpFrameHandle
)frame
, sig
, -1, storage
);
1764 for (int i
= 0; i
< sig
->param_count
+ sig
->hasthis
; i
++) {
1765 ainfo
= &cinfo
->args
[i
];
1766 int temp_size
= arg_need_temp (ainfo
);
1769 storage
= alloca (temp_size
); // FIXME? alloca in a loop
1770 arg_get_val (ccontext
, ainfo
, storage
);
1772 storage
= arg_get_storage (ccontext
, ainfo
);
1774 interp_cb
->data_to_frame_arg ((MonoInterpFrameHandle
)frame
, sig
, i
, storage
);
1780 /* Gets the return value from ccontext (for i2n exit) */
1782 mono_arch_get_native_call_context_ret (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1784 const MonoEECallbacks
*interp_cb
;
1789 if (sig
->ret
->type
== MONO_TYPE_VOID
)
1792 interp_cb
= mini_get_interp_callbacks ();
1793 cinfo
= get_call_info (NULL
, sig
);
1794 ainfo
= &cinfo
->ret
;
1796 if (ainfo
->storage
!= RegTypeStructByAddr
) {
1797 g_assert (!arg_need_temp (ainfo
));
1798 storage
= arg_get_storage (ccontext
, ainfo
);
1799 interp_cb
->data_to_frame_arg ((MonoInterpFrameHandle
)frame
, sig
, -1, storage
);
1808 mono_arch_tailcall_supported (MonoCompile
*cfg
, MonoMethodSignature
*caller_sig
, MonoMethodSignature
*callee_sig
, gboolean virtual_
)
1810 g_assert (caller_sig
);
1811 g_assert (callee_sig
);
1813 CallInfo
*caller_info
= get_call_info (NULL
, caller_sig
);
1814 CallInfo
*callee_info
= get_call_info (NULL
, callee_sig
);
1817 * Tailcalls with more callee stack usage than the caller cannot be supported, since
1818 * the extra stack space would be left on the stack after the tailcall.
1820 gboolean res
= IS_SUPPORTED_TAILCALL (callee_info
->stack_usage
<= caller_info
->stack_usage
)
1821 && IS_SUPPORTED_TAILCALL (caller_info
->ret
.storage
== callee_info
->ret
.storage
);
1823 // FIXME The limit here is that moving the parameters requires addressing the parameters
1824 // with 12bit (4K) immediate offsets. - 4 for TAILCALL_REG/MEMBASE
1825 res
&= IS_SUPPORTED_TAILCALL (callee_info
->stack_usage
< (4096 - 4));
1826 res
&= IS_SUPPORTED_TAILCALL (caller_info
->stack_usage
< (4096 - 4));
1828 g_free (caller_info
);
1829 g_free (callee_info
);
1835 debug_omit_fp (void)
1838 return mono_debug_count ();
1845 * mono_arch_compute_omit_fp:
1846 * Determine whether the frame pointer can be eliminated.
1849 mono_arch_compute_omit_fp (MonoCompile
*cfg
)
1851 MonoMethodSignature
*sig
;
1852 MonoMethodHeader
*header
;
1856 if (cfg
->arch
.omit_fp_computed
)
1859 header
= cfg
->header
;
1861 sig
= mono_method_signature_internal (cfg
->method
);
1863 if (!cfg
->arch
.cinfo
)
1864 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
1865 cinfo
= cfg
->arch
.cinfo
;
1868 * FIXME: Remove some of the restrictions.
1870 cfg
->arch
.omit_fp
= TRUE
;
1871 cfg
->arch
.omit_fp_computed
= TRUE
;
1873 if (cfg
->disable_omit_fp
)
1874 cfg
->arch
.omit_fp
= FALSE
;
1875 if (!debug_omit_fp ())
1876 cfg
->arch
.omit_fp
= FALSE
;
1878 if (cfg->method->save_lmf)
1879 cfg->arch.omit_fp = FALSE;
1881 if (cfg
->flags
& MONO_CFG_HAS_ALLOCA
)
1882 cfg
->arch
.omit_fp
= FALSE
;
1883 if (header
->num_clauses
)
1884 cfg
->arch
.omit_fp
= FALSE
;
1885 if (cfg
->param_area
)
1886 cfg
->arch
.omit_fp
= FALSE
;
1887 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
))
1888 cfg
->arch
.omit_fp
= FALSE
;
1889 if ((mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
)))
1890 cfg
->arch
.omit_fp
= FALSE
;
1891 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
1892 ArgInfo
*ainfo
= &cinfo
->args
[i
];
1894 if (ainfo
->storage
== RegTypeBase
|| ainfo
->storage
== RegTypeBaseGen
|| ainfo
->storage
== RegTypeStructByVal
) {
1896 * The stack offset can only be determined when the frame
1899 cfg
->arch
.omit_fp
= FALSE
;
1904 for (i
= cfg
->locals_start
; i
< cfg
->num_varinfo
; i
++) {
1905 MonoInst
*ins
= cfg
->varinfo
[i
];
1908 locals_size
+= mono_type_size (ins
->inst_vtype
, &ialign
);
1913 * Set var information according to the calling convention. arm version.
1914 * The locals var stuff should most likely be split in another method.
1917 mono_arch_allocate_vars (MonoCompile
*cfg
)
1919 MonoMethodSignature
*sig
;
1920 MonoMethodHeader
*header
;
1923 int i
, offset
, size
, align
, curinst
;
1928 sig
= mono_method_signature_internal (cfg
->method
);
1930 if (!cfg
->arch
.cinfo
)
1931 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
1932 cinfo
= cfg
->arch
.cinfo
;
1933 sig_ret
= mini_get_underlying_type (sig
->ret
);
1935 mono_arch_compute_omit_fp (cfg
);
1937 if (cfg
->arch
.omit_fp
)
1938 cfg
->frame_reg
= ARMREG_SP
;
1940 cfg
->frame_reg
= ARMREG_FP
;
1942 cfg
->flags
|= MONO_CFG_HAS_SPILLUP
;
1944 /* allow room for the vararg method args: void* and long/double */
1945 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
))
1946 cfg
->param_area
= MAX (cfg
->param_area
, sizeof (target_mgreg_t
)*8);
1948 header
= cfg
->header
;
1950 /* See mono_arch_get_global_int_regs () */
1951 if (cfg
->flags
& MONO_CFG_HAS_CALLS
)
1952 cfg
->uses_rgctx_reg
= TRUE
;
1954 if (cfg
->frame_reg
!= ARMREG_SP
)
1955 cfg
->used_int_regs
|= 1 << cfg
->frame_reg
;
1957 if (cfg
->compile_aot
|| cfg
->uses_rgctx_reg
|| COMPILE_LLVM (cfg
))
1958 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1959 cfg
->used_int_regs
|= (1 << MONO_ARCH_IMT_REG
);
1963 if (!MONO_TYPE_ISSTRUCT (sig_ret
) && cinfo
->ret
.storage
!= RegTypeStructByAddr
) {
1964 if (sig_ret
->type
!= MONO_TYPE_VOID
) {
1965 cfg
->ret
->opcode
= OP_REGVAR
;
1966 cfg
->ret
->inst_c0
= ARMREG_R0
;
1969 /* local vars are at a positive offset from the stack pointer */
1971 * also note that if the function uses alloca, we use FP
1972 * to point at the local variables.
1974 offset
= 0; /* linkage area */
1975 /* align the offset to 16 bytes: not sure this is needed here */
1977 //offset &= ~(8 - 1);
1979 /* add parameter area size for called functions */
1980 offset
+= cfg
->param_area
;
1983 if (cfg
->flags
& MONO_CFG_HAS_FPOUT
)
1986 /* allow room to save the return value */
1987 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
))
1990 switch (cinfo
->ret
.storage
) {
1991 case RegTypeStructByVal
:
1993 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
1994 offset
= ALIGN_TO (offset
, 8);
1995 cfg
->ret
->opcode
= OP_REGOFFSET
;
1996 cfg
->ret
->inst_basereg
= cfg
->frame_reg
;
1997 cfg
->ret
->inst_offset
= offset
;
1998 if (cinfo
->ret
.storage
== RegTypeStructByVal
)
1999 offset
+= cinfo
->ret
.nregs
* sizeof (target_mgreg_t
);
2003 case RegTypeStructByAddr
:
2004 ins
= cfg
->vret_addr
;
2005 offset
+= sizeof (target_mgreg_t
) - 1;
2006 offset
&= ~(sizeof (target_mgreg_t
) - 1);
2007 ins
->inst_offset
= offset
;
2008 ins
->opcode
= OP_REGOFFSET
;
2009 ins
->inst_basereg
= cfg
->frame_reg
;
2010 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
2011 g_print ("vret_addr =");
2012 mono_print_ins (cfg
->vret_addr
);
2014 offset
+= sizeof (target_mgreg_t
);
2020 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
2021 if (cfg
->arch
.seq_point_info_var
) {
2024 ins
= cfg
->arch
.seq_point_info_var
;
2028 offset
+= align
- 1;
2029 offset
&= ~(align
- 1);
2030 ins
->opcode
= OP_REGOFFSET
;
2031 ins
->inst_basereg
= cfg
->frame_reg
;
2032 ins
->inst_offset
= offset
;
2035 if (cfg
->arch
.ss_trigger_page_var
) {
2038 ins
= cfg
->arch
.ss_trigger_page_var
;
2041 offset
+= align
- 1;
2042 offset
&= ~(align
- 1);
2043 ins
->opcode
= OP_REGOFFSET
;
2044 ins
->inst_basereg
= cfg
->frame_reg
;
2045 ins
->inst_offset
= offset
;
2049 if (cfg
->arch
.seq_point_ss_method_var
) {
2052 ins
= cfg
->arch
.seq_point_ss_method_var
;
2055 offset
+= align
- 1;
2056 offset
&= ~(align
- 1);
2057 ins
->opcode
= OP_REGOFFSET
;
2058 ins
->inst_basereg
= cfg
->frame_reg
;
2059 ins
->inst_offset
= offset
;
2062 if (cfg
->arch
.seq_point_bp_method_var
) {
2065 ins
= cfg
->arch
.seq_point_bp_method_var
;
2068 offset
+= align
- 1;
2069 offset
&= ~(align
- 1);
2070 ins
->opcode
= OP_REGOFFSET
;
2071 ins
->inst_basereg
= cfg
->frame_reg
;
2072 ins
->inst_offset
= offset
;
2076 if (cfg
->has_atomic_exchange_i4
|| cfg
->has_atomic_cas_i4
|| cfg
->has_atomic_add_i4
) {
2077 /* Allocate a temporary used by the atomic ops */
2081 /* Allocate a local slot to hold the sig cookie address */
2082 offset
+= align
- 1;
2083 offset
&= ~(align
- 1);
2084 cfg
->arch
.atomic_tmp_offset
= offset
;
2087 cfg
->arch
.atomic_tmp_offset
= -1;
2090 cfg
->locals_min_stack_offset
= offset
;
2092 curinst
= cfg
->locals_start
;
2093 for (i
= curinst
; i
< cfg
->num_varinfo
; ++i
) {
2096 ins
= cfg
->varinfo
[i
];
2097 if ((ins
->flags
& MONO_INST_IS_DEAD
) || ins
->opcode
== OP_REGVAR
|| ins
->opcode
== OP_REGOFFSET
)
2100 t
= ins
->inst_vtype
;
2101 if (cfg
->gsharedvt
&& mini_is_gsharedvt_variable_type (t
))
2104 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
2105 * pinvoke wrappers when they call functions returning structure */
2106 if (ins
->backend
.is_pinvoke
&& MONO_TYPE_ISSTRUCT (t
) && t
->type
!= MONO_TYPE_TYPEDBYREF
) {
2107 size
= mono_class_native_size (mono_class_from_mono_type_internal (t
), &ualign
);
2111 size
= mono_type_size (t
, &align
);
2113 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2114 * since it loads/stores misaligned words, which don't do the right thing.
2116 if (align
< 4 && size
>= 4)
2118 if (ALIGN_TO (offset
, align
) > ALIGN_TO (offset
, 4))
2119 mini_gc_set_slot_type_from_fp (cfg
, ALIGN_TO (offset
, 4), SLOT_NOREF
);
2120 offset
+= align
- 1;
2121 offset
&= ~(align
- 1);
2122 ins
->opcode
= OP_REGOFFSET
;
2123 ins
->inst_offset
= offset
;
2124 ins
->inst_basereg
= cfg
->frame_reg
;
2126 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2129 cfg
->locals_max_stack_offset
= offset
;
2133 ins
= cfg
->args
[curinst
];
2134 if (ins
->opcode
!= OP_REGVAR
) {
2135 ins
->opcode
= OP_REGOFFSET
;
2136 ins
->inst_basereg
= cfg
->frame_reg
;
2137 offset
+= sizeof (target_mgreg_t
) - 1;
2138 offset
&= ~(sizeof (target_mgreg_t
) - 1);
2139 ins
->inst_offset
= offset
;
2140 offset
+= sizeof (target_mgreg_t
);
2145 if (sig
->call_convention
== MONO_CALL_VARARG
) {
2149 /* Allocate a local slot to hold the sig cookie address */
2150 offset
+= align
- 1;
2151 offset
&= ~(align
- 1);
2152 cfg
->sig_cookie
= offset
;
2156 for (i
= 0; i
< sig
->param_count
; ++i
) {
2157 ainfo
= cinfo
->args
+ i
;
2159 ins
= cfg
->args
[curinst
];
2161 switch (ainfo
->storage
) {
2163 offset
= ALIGN_TO (offset
, 8);
2164 ins
->opcode
= OP_REGOFFSET
;
2165 ins
->inst_basereg
= cfg
->frame_reg
;
2166 /* These arguments are saved to the stack in the prolog */
2167 ins
->inst_offset
= offset
;
2168 if (cfg
->verbose_level
>= 2)
2169 g_print ("arg %d allocated to %s+0x%0x.\n", i
, mono_arch_regname (ins
->inst_basereg
), (int)ins
->inst_offset
);
2177 if (ins
->opcode
!= OP_REGVAR
) {
2178 ins
->opcode
= OP_REGOFFSET
;
2179 ins
->inst_basereg
= cfg
->frame_reg
;
2180 size
= mini_type_stack_size_full (sig
->params
[i
], &ualign
, sig
->pinvoke
);
2182 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2183 * since it loads/stores misaligned words, which don't do the right thing.
2185 if (align
< 4 && size
>= 4)
2187 /* The code in the prolog () stores words when storing vtypes received in a register */
2188 if (MONO_TYPE_ISSTRUCT (sig
->params
[i
]))
2190 if (ALIGN_TO (offset
, align
) > ALIGN_TO (offset
, 4))
2191 mini_gc_set_slot_type_from_fp (cfg
, ALIGN_TO (offset
, 4), SLOT_NOREF
);
2192 offset
+= align
- 1;
2193 offset
&= ~(align
- 1);
2194 ins
->inst_offset
= offset
;
2200 /* align the offset to 8 bytes */
2201 if (ALIGN_TO (offset
, 8) > ALIGN_TO (offset
, 4))
2202 mini_gc_set_slot_type_from_fp (cfg
, ALIGN_TO (offset
, 4), SLOT_NOREF
);
2207 cfg
->stack_offset
= offset
;
2211 mono_arch_create_vars (MonoCompile
*cfg
)
2213 MonoMethodSignature
*sig
;
2217 sig
= mono_method_signature_internal (cfg
->method
);
2219 if (!cfg
->arch
.cinfo
)
2220 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
2221 cinfo
= cfg
->arch
.cinfo
;
2223 if (IS_HARD_FLOAT
) {
2224 for (i
= 0; i
< 2; i
++) {
2225 MonoInst
*inst
= mono_compile_create_var (cfg
, m_class_get_byval_arg (mono_defaults
.double_class
), OP_LOCAL
);
2226 inst
->flags
|= MONO_INST_VOLATILE
;
2228 cfg
->arch
.vfp_scratch_slots
[i
] = inst
;
2232 if (cinfo
->ret
.storage
== RegTypeStructByVal
)
2233 cfg
->ret_var_is_local
= TRUE
;
2235 if (cinfo
->ret
.storage
== RegTypeStructByAddr
) {
2236 cfg
->vret_addr
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_ARG
);
2237 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
2238 g_print ("vret_addr = ");
2239 mono_print_ins (cfg
->vret_addr
);
2243 if (cfg
->gen_sdb_seq_points
) {
2244 if (cfg
->compile_aot
) {
2245 MonoInst
*ins
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2246 ins
->flags
|= MONO_INST_VOLATILE
;
2247 cfg
->arch
.seq_point_info_var
= ins
;
2249 if (!cfg
->soft_breakpoints
) {
2250 /* Allocate a separate variable for this to save 1 load per seq point */
2251 ins
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2252 ins
->flags
|= MONO_INST_VOLATILE
;
2253 cfg
->arch
.ss_trigger_page_var
= ins
;
2256 if (cfg
->soft_breakpoints
) {
2259 ins
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2260 ins
->flags
|= MONO_INST_VOLATILE
;
2261 cfg
->arch
.seq_point_ss_method_var
= ins
;
2263 ins
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2264 ins
->flags
|= MONO_INST_VOLATILE
;
2265 cfg
->arch
.seq_point_bp_method_var
= ins
;
2271 emit_sig_cookie (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
)
2273 MonoMethodSignature
*tmp_sig
;
2276 if (MONO_IS_TAILCALL_OPCODE (call
))
2279 g_assert (cinfo
->sig_cookie
.storage
== RegTypeBase
);
2282 * mono_ArgIterator_Setup assumes the signature cookie is
2283 * passed first and all the arguments which were before it are
2284 * passed on the stack after the signature. So compensate by
2285 * passing a different signature.
2287 tmp_sig
= mono_metadata_signature_dup (call
->signature
);
2288 tmp_sig
->param_count
-= call
->signature
->sentinelpos
;
2289 tmp_sig
->sentinelpos
= 0;
2290 memcpy (tmp_sig
->params
, call
->signature
->params
+ call
->signature
->sentinelpos
, tmp_sig
->param_count
* sizeof (MonoType
*));
2292 sig_reg
= mono_alloc_ireg (cfg
);
2293 MONO_EMIT_NEW_SIGNATURECONST (cfg
, sig_reg
, tmp_sig
);
2295 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, cinfo
->sig_cookie
.offset
, sig_reg
);
2300 mono_arch_get_llvm_call_info (MonoCompile
*cfg
, MonoMethodSignature
*sig
)
2305 LLVMCallInfo
*linfo
;
2307 n
= sig
->param_count
+ sig
->hasthis
;
2309 cinfo
= get_call_info (cfg
->mempool
, sig
);
2311 linfo
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (LLVMCallInfo
) + (sizeof (LLVMArgInfo
) * n
));
2314 * LLVM always uses the native ABI while we use our own ABI, the
2315 * only difference is the handling of vtypes:
2316 * - we only pass/receive them in registers in some cases, and only
2317 * in 1 or 2 integer registers.
2319 switch (cinfo
->ret
.storage
) {
2320 case RegTypeGeneral
:
2323 case RegTypeIRegPair
:
2325 case RegTypeStructByAddr
:
2327 linfo
->ret
.storage
= LLVMArgVtypeByRef
;
2329 /* Vtype returned using a hidden argument */
2330 linfo
->ret
.storage
= LLVMArgVtypeRetAddr
;
2331 linfo
->vret_arg_index
= cinfo
->vret_arg_index
;
2335 case RegTypeStructByVal
:
2336 /* LLVM models this by returning an int array */
2337 linfo
->ret
.storage
= LLVMArgAsIArgs
;
2338 linfo
->ret
.nslots
= cinfo
->ret
.nregs
;
2342 linfo
->ret
.storage
= LLVMArgFpStruct
;
2343 linfo
->ret
.nslots
= cinfo
->ret
.nregs
;
2344 linfo
->ret
.esize
= cinfo
->ret
.esize
;
2347 cfg
->exception_message
= g_strdup_printf ("unknown ret conv (%d)", cinfo
->ret
.storage
);
2348 cfg
->disable_llvm
= TRUE
;
2352 for (i
= 0; i
< n
; ++i
) {
2353 LLVMArgInfo
*lainfo
= &linfo
->args
[i
];
2354 ainfo
= cinfo
->args
+ i
;
2356 lainfo
->storage
= LLVMArgNone
;
2358 switch (ainfo
->storage
) {
2359 case RegTypeGeneral
:
2360 case RegTypeIRegPair
:
2362 case RegTypeBaseGen
:
2364 lainfo
->storage
= LLVMArgNormal
;
2366 case RegTypeStructByVal
: {
2367 lainfo
->storage
= LLVMArgAsIArgs
;
2368 int slotsize
= eabi_supported
&& ainfo
->align
== 8 ? 8 : 4;
2369 lainfo
->nslots
= ALIGN_TO (ainfo
->struct_size
, slotsize
) / slotsize
;
2370 lainfo
->esize
= slotsize
;
2373 case RegTypeStructByAddr
:
2374 case RegTypeStructByAddrOnStack
:
2375 lainfo
->storage
= LLVMArgVtypeByRef
;
2380 lainfo
->storage
= LLVMArgAsFpArgs
;
2381 lainfo
->nslots
= ainfo
->nregs
;
2382 lainfo
->esize
= ainfo
->esize
;
2383 for (j
= 0; j
< ainfo
->nregs
; ++j
)
2384 lainfo
->pair_storage
[j
] = LLVMArgInFPReg
;
2388 cfg
->exception_message
= g_strdup_printf ("ainfo->storage (%d)", ainfo
->storage
);
2389 cfg
->disable_llvm
= TRUE
;
2399 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
2402 MonoMethodSignature
*sig
;
2406 sig
= call
->signature
;
2407 n
= sig
->param_count
+ sig
->hasthis
;
2409 cinfo
= get_call_info (cfg
->mempool
, sig
);
2411 switch (cinfo
->ret
.storage
) {
2412 case RegTypeStructByVal
:
2414 if (cinfo
->ret
.storage
== RegTypeStructByVal
&& cinfo
->ret
.nregs
== 1) {
2415 /* The JIT will transform this into a normal call */
2416 call
->vret_in_reg
= TRUE
;
2419 if (MONO_IS_TAILCALL_OPCODE (call
))
2422 * The vtype is returned in registers, save the return area address in a local, and save the vtype into
2423 * the location pointed to by it after call in emit_move_return_value ().
2425 if (!cfg
->arch
.vret_addr_loc
) {
2426 cfg
->arch
.vret_addr_loc
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2427 /* Prevent it from being register allocated or optimized away */
2428 cfg
->arch
.vret_addr_loc
->flags
|= MONO_INST_VOLATILE
;
2431 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->arch
.vret_addr_loc
->dreg
, call
->vret_var
->dreg
);
2433 case RegTypeStructByAddr
: {
2435 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
2436 vtarg
->sreg1
= call
->vret_var
->dreg
;
2437 vtarg
->dreg
= mono_alloc_preg (cfg
);
2438 MONO_ADD_INS (cfg
->cbb
, vtarg
);
2440 mono_call_inst_add_outarg_reg (cfg
, call
, vtarg
->dreg
, cinfo
->ret
.reg
, FALSE
);
2447 for (i
= 0; i
< n
; ++i
) {
2448 ArgInfo
*ainfo
= cinfo
->args
+ i
;
2451 if (i
>= sig
->hasthis
)
2452 t
= sig
->params
[i
- sig
->hasthis
];
2454 t
= mono_get_int_type ();
2455 t
= mini_get_underlying_type (t
);
2457 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
2458 /* Emit the signature cookie just before the implicit arguments */
2459 emit_sig_cookie (cfg
, call
, cinfo
);
2462 in
= call
->args
[i
];
2464 switch (ainfo
->storage
) {
2465 case RegTypeGeneral
:
2466 case RegTypeIRegPair
:
2467 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
2468 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2469 ins
->dreg
= mono_alloc_ireg (cfg
);
2470 ins
->sreg1
= MONO_LVREG_LS (in
->dreg
);
2471 MONO_ADD_INS (cfg
->cbb
, ins
);
2472 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2474 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2475 ins
->dreg
= mono_alloc_ireg (cfg
);
2476 ins
->sreg1
= MONO_LVREG_MS (in
->dreg
);
2477 MONO_ADD_INS (cfg
->cbb
, ins
);
2478 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
2479 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R8
) || (t
->type
== MONO_TYPE_R4
))) {
2480 if (ainfo
->size
== 4) {
2481 if (IS_SOFT_FLOAT
) {
2482 /* mono_emit_call_args () have already done the r8->r4 conversion */
2483 /* The converted value is in an int vreg */
2484 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2485 ins
->dreg
= mono_alloc_ireg (cfg
);
2486 ins
->sreg1
= in
->dreg
;
2487 MONO_ADD_INS (cfg
->cbb
, ins
);
2488 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2492 cfg
->param_area
= MAX (cfg
->param_area
, 8);
2493 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
2494 creg
= mono_alloc_ireg (cfg
);
2495 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
2496 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
, FALSE
);
2499 if (IS_SOFT_FLOAT
) {
2500 MONO_INST_NEW (cfg
, ins
, OP_FGETLOW32
);
2501 ins
->dreg
= mono_alloc_ireg (cfg
);
2502 ins
->sreg1
= in
->dreg
;
2503 MONO_ADD_INS (cfg
->cbb
, ins
);
2504 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2506 MONO_INST_NEW (cfg
, ins
, OP_FGETHIGH32
);
2507 ins
->dreg
= mono_alloc_ireg (cfg
);
2508 ins
->sreg1
= in
->dreg
;
2509 MONO_ADD_INS (cfg
->cbb
, ins
);
2510 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
2514 cfg
->param_area
= MAX (cfg
->param_area
, 8);
2515 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
2516 creg
= mono_alloc_ireg (cfg
);
2517 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
2518 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
, FALSE
);
2519 creg
= mono_alloc_ireg (cfg
);
2520 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8 + 4));
2521 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
+ 1, FALSE
);
2524 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
2526 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2527 ins
->dreg
= mono_alloc_ireg (cfg
);
2528 ins
->sreg1
= in
->dreg
;
2529 MONO_ADD_INS (cfg
->cbb
, ins
);
2531 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2534 case RegTypeStructByVal
:
2535 case RegTypeGSharedVtInReg
:
2536 case RegTypeGSharedVtOnStack
:
2538 case RegTypeStructByAddr
:
2539 case RegTypeStructByAddrOnStack
:
2540 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
2541 ins
->opcode
= OP_OUTARG_VT
;
2542 ins
->sreg1
= in
->dreg
;
2543 ins
->klass
= in
->klass
;
2544 ins
->inst_p0
= call
;
2545 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
2546 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
2547 mono_call_inst_add_outarg_vt (cfg
, call
, ins
);
2548 MONO_ADD_INS (cfg
->cbb
, ins
);
2551 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
2552 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2553 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R4
) || (t
->type
== MONO_TYPE_R8
))) {
2554 if (t
->type
== MONO_TYPE_R8
) {
2555 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2558 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2560 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2563 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2566 case RegTypeBaseGen
:
2567 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
2568 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, (G_BYTE_ORDER
== G_BIG_ENDIAN
) ? MONO_LVREG_LS (in
->dreg
) : MONO_LVREG_MS (in
->dreg
));
2569 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2570 ins
->dreg
= mono_alloc_ireg (cfg
);
2571 ins
->sreg1
= G_BYTE_ORDER
== G_BIG_ENDIAN
? MONO_LVREG_MS (in
->dreg
) : MONO_LVREG_LS (in
->dreg
);
2572 MONO_ADD_INS (cfg
->cbb
, ins
);
2573 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ARMREG_R3
, FALSE
);
2574 } else if (!t
->byref
&& (t
->type
== MONO_TYPE_R8
)) {
2577 /* This should work for soft-float as well */
2579 cfg
->param_area
= MAX (cfg
->param_area
, 8);
2580 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
2581 creg
= mono_alloc_ireg (cfg
);
2582 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ARMREG_R3
, FALSE
);
2583 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
2584 creg
= mono_alloc_ireg (cfg
);
2585 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 4));
2586 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, creg
);
2587 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
2589 g_assert_not_reached ();
2593 int fdreg
= mono_alloc_freg (cfg
);
2595 if (ainfo
->size
== 8) {
2596 MONO_INST_NEW (cfg
, ins
, OP_FMOVE
);
2597 ins
->sreg1
= in
->dreg
;
2599 MONO_ADD_INS (cfg
->cbb
, ins
);
2601 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, TRUE
);
2606 * Mono's register allocator doesn't speak single-precision registers that
2607 * overlap double-precision registers (i.e. armhf). So we have to work around
2608 * the register allocator and load the value from memory manually.
2610 * So we create a variable for the float argument and an instruction to store
2611 * the argument into the variable. We then store the list of these arguments
2612 * in call->float_args. This list is then used by emit_float_args later to
2613 * pass the arguments in the various call opcodes.
2615 * This is not very nice, and we should really try to fix the allocator.
2618 MonoInst
*float_arg
= mono_compile_create_var (cfg
, m_class_get_byval_arg (mono_defaults
.single_class
), OP_LOCAL
);
2620 /* Make sure the instruction isn't seen as pointless and removed.
2622 float_arg
->flags
|= MONO_INST_VOLATILE
;
2624 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, float_arg
->dreg
, in
->dreg
);
2626 /* We use the dreg to look up the instruction later. The hreg is used to
2627 * emit the instruction that loads the value into the FP reg.
2629 fad
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (FloatArgData
));
2630 fad
->vreg
= float_arg
->dreg
;
2631 fad
->hreg
= ainfo
->reg
;
2633 call
->float_args
= g_slist_append_mempool (cfg
->mempool
, call
->float_args
, fad
);
2636 call
->used_iregs
|= 1 << ainfo
->reg
;
2637 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
2641 g_assert_not_reached ();
2645 /* Handle the case where there are no implicit arguments */
2646 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== sig
->sentinelpos
))
2647 emit_sig_cookie (cfg
, call
, cinfo
);
2649 call
->call_info
= cinfo
;
2650 call
->stack_usage
= cinfo
->stack_usage
;
2654 add_outarg_reg (MonoCompile
*cfg
, MonoCallInst
*call
, ArgStorage storage
, int reg
, MonoInst
*arg
)
2660 MONO_INST_NEW (cfg
, ins
, OP_FMOVE
);
2661 ins
->dreg
= mono_alloc_freg (cfg
);
2662 ins
->sreg1
= arg
->dreg
;
2663 MONO_ADD_INS (cfg
->cbb
, ins
);
2664 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, reg
, TRUE
);
2667 g_assert_not_reached ();
2673 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
2675 MonoCallInst
*call
= (MonoCallInst
*)ins
->inst_p0
;
2677 ArgInfo
*ainfo
= (ArgInfo
*)ins
->inst_p1
;
2678 int ovf_size
= ainfo
->vtsize
;
2679 int doffset
= ainfo
->offset
;
2680 int struct_size
= ainfo
->struct_size
;
2681 int i
, soffset
, dreg
, tmpreg
;
2683 switch (ainfo
->storage
) {
2684 case RegTypeGSharedVtInReg
:
2685 case RegTypeStructByAddr
:
2687 mono_call_inst_add_outarg_reg (cfg
, call
, src
->dreg
, ainfo
->reg
, FALSE
);
2689 case RegTypeGSharedVtOnStack
:
2690 case RegTypeStructByAddrOnStack
:
2691 /* Pass by addr on stack */
2692 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, src
->dreg
);
2695 for (i
= 0; i
< ainfo
->nregs
; ++i
) {
2696 if (ainfo
->esize
== 4)
2697 MONO_INST_NEW (cfg
, load
, OP_LOADR4_MEMBASE
);
2699 MONO_INST_NEW (cfg
, load
, OP_LOADR8_MEMBASE
);
2700 load
->dreg
= mono_alloc_freg (cfg
);
2701 load
->inst_basereg
= src
->dreg
;
2702 load
->inst_offset
= i
* ainfo
->esize
;
2703 MONO_ADD_INS (cfg
->cbb
, load
);
2705 if (ainfo
->esize
== 4) {
2708 /* See RegTypeFP in mono_arch_emit_call () */
2709 MonoInst
*float_arg
= mono_compile_create_var (cfg
, m_class_get_byval_arg (mono_defaults
.single_class
), OP_LOCAL
);
2710 float_arg
->flags
|= MONO_INST_VOLATILE
;
2711 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, float_arg
->dreg
, load
->dreg
);
2713 fad
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (FloatArgData
));
2714 fad
->vreg
= float_arg
->dreg
;
2715 fad
->hreg
= ainfo
->reg
+ i
;
2717 call
->float_args
= g_slist_append_mempool (cfg
->mempool
, call
->float_args
, fad
);
2719 add_outarg_reg (cfg
, call
, RegTypeFP
, ainfo
->reg
+ (i
* 2), load
);
2725 for (i
= 0; i
< ainfo
->size
; ++i
) {
2726 dreg
= mono_alloc_ireg (cfg
);
2727 switch (struct_size
) {
2729 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, dreg
, src
->dreg
, soffset
);
2732 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, dreg
, src
->dreg
, soffset
);
2735 tmpreg
= mono_alloc_ireg (cfg
);
2736 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, dreg
, src
->dreg
, soffset
);
2737 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, tmpreg
, src
->dreg
, soffset
+ 1);
2738 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, tmpreg
, tmpreg
, 8);
2739 MONO_EMIT_NEW_BIALU (cfg
, OP_IOR
, dreg
, dreg
, tmpreg
);
2740 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, tmpreg
, src
->dreg
, soffset
+ 2);
2741 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, tmpreg
, tmpreg
, 16);
2742 MONO_EMIT_NEW_BIALU (cfg
, OP_IOR
, dreg
, dreg
, tmpreg
);
2745 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, src
->dreg
, soffset
);
2748 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
+ i
, FALSE
);
2749 soffset
+= sizeof (target_mgreg_t
);
2750 struct_size
-= sizeof (target_mgreg_t
);
2752 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2754 mini_emit_memcpy (cfg
, ARMREG_SP
, doffset
, src
->dreg
, soffset
, MIN (ovf_size
* sizeof (target_mgreg_t
), struct_size
), struct_size
< 4 ? 1 : 4);
2760 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
2762 MonoType
*ret
= mini_get_underlying_type (mono_method_signature_internal (method
)->ret
);
2765 if (ret
->type
== MONO_TYPE_I8
|| ret
->type
== MONO_TYPE_U8
) {
2768 if (COMPILE_LLVM (cfg
)) {
2769 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
2771 MONO_INST_NEW (cfg
, ins
, OP_SETLRET
);
2772 ins
->sreg1
= MONO_LVREG_LS (val
->dreg
);
2773 ins
->sreg2
= MONO_LVREG_MS (val
->dreg
);
2774 MONO_ADD_INS (cfg
->cbb
, ins
);
2779 case MONO_ARM_FPU_NONE
:
2780 if (ret
->type
== MONO_TYPE_R8
) {
2783 MONO_INST_NEW (cfg
, ins
, OP_SETFRET
);
2784 ins
->dreg
= cfg
->ret
->dreg
;
2785 ins
->sreg1
= val
->dreg
;
2786 MONO_ADD_INS (cfg
->cbb
, ins
);
2789 if (ret
->type
== MONO_TYPE_R4
) {
2790 /* Already converted to an int in method_to_ir () */
2791 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
2795 case MONO_ARM_FPU_VFP
:
2796 case MONO_ARM_FPU_VFP_HARD
:
2797 if (ret
->type
== MONO_TYPE_R8
|| ret
->type
== MONO_TYPE_R4
) {
2800 MONO_INST_NEW (cfg
, ins
, OP_SETFRET
);
2801 ins
->dreg
= cfg
->ret
->dreg
;
2802 ins
->sreg1
= val
->dreg
;
2803 MONO_ADD_INS (cfg
->cbb
, ins
);
2808 g_assert_not_reached ();
2812 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
2815 #endif /* #ifndef DISABLE_JIT */
2818 mono_arch_is_inst_imm (int opcode
, int imm_opcode
, gint64 imm
)
2824 MonoMethodSignature
*sig
;
2827 MonoType
**param_types
;
2831 dyn_call_supported (CallInfo
*cinfo
, MonoMethodSignature
*sig
)
2835 switch (cinfo
->ret
.storage
) {
2837 case RegTypeGeneral
:
2838 case RegTypeIRegPair
:
2839 case RegTypeStructByAddr
:
2850 for (i
= 0; i
< cinfo
->nargs
; ++i
) {
2851 ArgInfo
*ainfo
= &cinfo
->args
[i
];
2854 switch (ainfo
->storage
) {
2855 case RegTypeGeneral
:
2856 case RegTypeIRegPair
:
2857 case RegTypeBaseGen
:
2862 case RegTypeStructByVal
:
2863 if (ainfo
->size
== 0)
2864 last_slot
= PARAM_REGS
+ (ainfo
->offset
/ 4) + ainfo
->vtsize
;
2866 last_slot
= ainfo
->reg
+ ainfo
->size
+ ainfo
->vtsize
;
2873 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2874 for (i
= 0; i
< sig
->param_count
; ++i
) {
2875 MonoType
*t
= sig
->params
[i
];
2880 t
= mini_get_underlying_type (t
);
2903 mono_arch_dyn_call_prepare (MonoMethodSignature
*sig
)
2905 ArchDynCallInfo
*info
;
2909 cinfo
= get_call_info (NULL
, sig
);
2911 if (!dyn_call_supported (cinfo
, sig
)) {
2916 info
= g_new0 (ArchDynCallInfo
, 1);
2917 // FIXME: Preprocess the info to speed up start_dyn_call ()
2919 info
->cinfo
= cinfo
;
2920 info
->rtype
= mini_get_underlying_type (sig
->ret
);
2921 info
->param_types
= g_new0 (MonoType
*, sig
->param_count
);
2922 for (i
= 0; i
< sig
->param_count
; ++i
)
2923 info
->param_types
[i
] = mini_get_underlying_type (sig
->params
[i
]);
2925 return (MonoDynCallInfo
*)info
;
2929 mono_arch_dyn_call_free (MonoDynCallInfo
*info
)
2931 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
2933 g_free (ainfo
->cinfo
);
2938 mono_arch_dyn_call_get_buf_size (MonoDynCallInfo
*info
)
2940 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
2942 g_assert (ainfo
->cinfo
->stack_usage
% MONO_ARCH_FRAME_ALIGNMENT
== 0);
2943 return sizeof (DynCallArgs
) + ainfo
->cinfo
->stack_usage
;
2947 mono_arch_start_dyn_call (MonoDynCallInfo
*info
, gpointer
**args
, guint8
*ret
, guint8
*buf
)
2949 ArchDynCallInfo
*dinfo
= (ArchDynCallInfo
*)info
;
2950 CallInfo
*cinfo
= dinfo
->cinfo
;
2951 DynCallArgs
*p
= (DynCallArgs
*)buf
;
2952 int arg_index
, greg
, i
, j
, pindex
;
2953 MonoMethodSignature
*sig
= dinfo
->sig
;
2958 p
->n_stackargs
= cinfo
->stack_usage
/ sizeof (host_mgreg_t
);
2964 if (sig
->hasthis
|| dinfo
->cinfo
->vret_arg_index
== 1) {
2965 p
->regs
[greg
++] = (host_mgreg_t
)(gsize
)*(args
[arg_index
++]);
2970 if (dinfo
->cinfo
->ret
.storage
== RegTypeStructByAddr
)
2971 p
->regs
[greg
++] = (host_mgreg_t
)(gsize
)ret
;
2973 for (i
= pindex
; i
< sig
->param_count
; i
++) {
2974 MonoType
*t
= dinfo
->param_types
[i
];
2975 gpointer
*arg
= args
[arg_index
++];
2976 ArgInfo
*ainfo
= &dinfo
->cinfo
->args
[i
+ sig
->hasthis
];
2979 if (ainfo
->storage
== RegTypeGeneral
|| ainfo
->storage
== RegTypeIRegPair
|| ainfo
->storage
== RegTypeStructByVal
) {
2981 } else if (ainfo
->storage
== RegTypeFP
) {
2982 } else if (ainfo
->storage
== RegTypeBase
) {
2983 slot
= PARAM_REGS
+ (ainfo
->offset
/ 4);
2984 } else if (ainfo
->storage
== RegTypeBaseGen
) {
2985 /* slot + 1 is the first stack slot, so the code below will work */
2988 g_assert_not_reached ();
2992 p
->regs
[slot
] = (host_mgreg_t
)(gsize
)*arg
;
2997 case MONO_TYPE_OBJECT
:
3001 p
->regs
[slot
] = (host_mgreg_t
)(gsize
)*arg
;
3004 p
->regs
[slot
] = *(guint8
*)arg
;
3007 p
->regs
[slot
] = *(gint8
*)arg
;
3010 p
->regs
[slot
] = *(gint16
*)arg
;
3013 p
->regs
[slot
] = *(guint16
*)arg
;
3016 p
->regs
[slot
] = *(gint32
*)arg
;
3019 p
->regs
[slot
] = *(guint32
*)arg
;
3023 p
->regs
[slot
++] = (host_mgreg_t
)(gsize
)arg
[0];
3024 p
->regs
[slot
] = (host_mgreg_t
)(gsize
)arg
[1];
3027 if (ainfo
->storage
== RegTypeFP
) {
3028 float f
= *(float*)arg
;
3029 p
->fpregs
[ainfo
->reg
/ 2] = *(double*)&f
;
3032 p
->regs
[slot
] = *(host_mgreg_t
*)arg
;
3036 if (ainfo
->storage
== RegTypeFP
) {
3037 p
->fpregs
[ainfo
->reg
/ 2] = *(double*)arg
;
3040 p
->regs
[slot
++] = (host_mgreg_t
)(gsize
)arg
[0];
3041 p
->regs
[slot
] = (host_mgreg_t
)(gsize
)arg
[1];
3044 case MONO_TYPE_GENERICINST
:
3045 if (MONO_TYPE_IS_REFERENCE (t
)) {
3046 p
->regs
[slot
] = (host_mgreg_t
)(gsize
)*arg
;
3049 if (t
->type
== MONO_TYPE_GENERICINST
&& mono_class_is_nullable (mono_class_from_mono_type_internal (t
))) {
3050 MonoClass
*klass
= mono_class_from_mono_type_internal (t
);
3051 guint8
*nullable_buf
;
3054 size
= mono_class_value_size (klass
, NULL
);
3055 nullable_buf
= g_alloca (size
);
3056 g_assert (nullable_buf
);
3058 /* The argument pointed to by arg is either a boxed vtype or null */
3059 mono_nullable_init (nullable_buf
, (MonoObject
*)arg
, klass
);
3061 arg
= (gpointer
*)nullable_buf
;
3067 case MONO_TYPE_VALUETYPE
:
3068 g_assert (ainfo
->storage
== RegTypeStructByVal
);
3070 if (ainfo
->size
== 0)
3071 slot
= PARAM_REGS
+ (ainfo
->offset
/ 4);
3075 for (j
= 0; j
< ainfo
->size
+ ainfo
->vtsize
; ++j
)
3076 p
->regs
[slot
++] = ((host_mgreg_t
*)arg
) [j
];
3079 g_assert_not_reached ();
3085 mono_arch_finish_dyn_call (MonoDynCallInfo
*info
, guint8
*buf
)
3087 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
3088 DynCallArgs
*p
= (DynCallArgs
*)buf
;
3089 MonoType
*ptype
= ainfo
->rtype
;
3090 guint8
*ret
= p
->ret
;
3091 host_mgreg_t res
= p
->res
;
3092 host_mgreg_t res2
= p
->res2
;
3094 switch (ptype
->type
) {
3095 case MONO_TYPE_VOID
:
3096 *(gpointer
*)ret
= NULL
;
3098 case MONO_TYPE_OBJECT
:
3102 *(gpointer
*)ret
= (gpointer
)(gsize
)res
;
3108 *(guint8
*)ret
= res
;
3111 *(gint16
*)ret
= res
;
3114 *(guint16
*)ret
= res
;
3117 *(gint32
*)ret
= res
;
3120 *(guint32
*)ret
= res
;
3124 /* This handles endianness as well */
3125 ((gint32
*)ret
) [0] = res
;
3126 ((gint32
*)ret
) [1] = res2
;
3128 case MONO_TYPE_GENERICINST
:
3129 if (MONO_TYPE_IS_REFERENCE (ptype
)) {
3130 *(gpointer
*)ret
= (gpointer
)res
;
3135 case MONO_TYPE_VALUETYPE
:
3136 g_assert (ainfo
->cinfo
->ret
.storage
== RegTypeStructByAddr
);
3142 *(float*)ret
= *(float*)&p
->fpregs
[0];
3144 *(float*)ret
= *(float*)&res
;
3146 case MONO_TYPE_R8
: {
3147 host_mgreg_t regs
[2];
3150 if (IS_HARD_FLOAT
) {
3151 *(double*)ret
= p
->fpregs
[0];
3156 *(double*)ret
= *(double*)®s
;
3161 g_assert_not_reached ();
3168 * The immediate field for cond branches is big enough for all reasonable methods
3170 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3171 if (0 && ins->inst_true_bb->native_offset) { \
3172 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3174 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3175 ARM_B_COND (code, (condcode), 0); \
3178 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3180 /* emit an exception if condition is fail
3182 * We assign the extra code used to throw the implicit exceptions
3183 * to cfg->bb_exit as far as the big branch handling is concerned
3185 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3187 mono_add_patch_info (cfg, code - cfg->native_code, \
3188 MONO_PATCH_INFO_EXC, exc_name); \
3189 ARM_BL_COND (code, (condcode), 0); \
3192 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3195 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3200 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3204 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
3205 MonoInst
*last_ins
= mono_inst_prev (ins
, FILTER_IL_SEQ_POINT
);
3207 switch (ins
->opcode
) {
3210 /* Already done by an arch-independent pass */
3212 case OP_LOAD_MEMBASE
:
3213 case OP_LOADI4_MEMBASE
:
3215 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3216 * OP_LOAD_MEMBASE offset(basereg), reg
3218 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_REG
3219 || last_ins
->opcode
== OP_STORE_MEMBASE_REG
) &&
3220 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3221 ins
->inst_offset
== last_ins
->inst_offset
) {
3222 if (ins
->dreg
== last_ins
->sreg1
) {
3223 MONO_DELETE_INS (bb
, ins
);
3226 //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
3227 ins
->opcode
= OP_MOVE
;
3228 ins
->sreg1
= last_ins
->sreg1
;
3232 * Note: reg1 must be different from the basereg in the second load
3233 * OP_LOAD_MEMBASE offset(basereg), reg1
3234 * OP_LOAD_MEMBASE offset(basereg), reg2
3236 * OP_LOAD_MEMBASE offset(basereg), reg1
3237 * OP_MOVE reg1, reg2
3239 } if (last_ins
&& (last_ins
->opcode
== OP_LOADI4_MEMBASE
3240 || last_ins
->opcode
== OP_LOAD_MEMBASE
) &&
3241 ins
->inst_basereg
!= last_ins
->dreg
&&
3242 ins
->inst_basereg
== last_ins
->inst_basereg
&&
3243 ins
->inst_offset
== last_ins
->inst_offset
) {
3245 if (ins
->dreg
== last_ins
->dreg
) {
3246 MONO_DELETE_INS (bb
, ins
);
3249 ins
->opcode
= OP_MOVE
;
3250 ins
->sreg1
= last_ins
->dreg
;
3253 //g_assert_not_reached ();
3257 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3258 * OP_LOAD_MEMBASE offset(basereg), reg
3260 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3261 * OP_ICONST reg, imm
3263 } else if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_IMM
3264 || last_ins
->opcode
== OP_STORE_MEMBASE_IMM
) &&
3265 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3266 ins
->inst_offset
== last_ins
->inst_offset
) {
3267 //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
3268 ins
->opcode
= OP_ICONST
;
3269 ins
->inst_c0
= last_ins
->inst_imm
;
3270 g_assert_not_reached (); // check this rule
3274 case OP_LOADU1_MEMBASE
:
3275 case OP_LOADI1_MEMBASE
:
3276 if (last_ins
&& (last_ins
->opcode
== OP_STOREI1_MEMBASE_REG
) &&
3277 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3278 ins
->inst_offset
== last_ins
->inst_offset
) {
3279 ins
->opcode
= (ins
->opcode
== OP_LOADI1_MEMBASE
) ? OP_ICONV_TO_I1
: OP_ICONV_TO_U1
;
3280 ins
->sreg1
= last_ins
->sreg1
;
3283 case OP_LOADU2_MEMBASE
:
3284 case OP_LOADI2_MEMBASE
:
3285 if (last_ins
&& (last_ins
->opcode
== OP_STOREI2_MEMBASE_REG
) &&
3286 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3287 ins
->inst_offset
== last_ins
->inst_offset
) {
3288 ins
->opcode
= (ins
->opcode
== OP_LOADI2_MEMBASE
) ? OP_ICONV_TO_I2
: OP_ICONV_TO_U2
;
3289 ins
->sreg1
= last_ins
->sreg1
;
3293 ins
->opcode
= OP_MOVE
;
3297 if (ins
->dreg
== ins
->sreg1
) {
3298 MONO_DELETE_INS (bb
, ins
);
3302 * OP_MOVE sreg, dreg
3303 * OP_MOVE dreg, sreg
3305 if (last_ins
&& last_ins
->opcode
== OP_MOVE
&&
3306 ins
->sreg1
== last_ins
->dreg
&&
3307 ins
->dreg
== last_ins
->sreg1
) {
3308 MONO_DELETE_INS (bb
, ins
);
3317 * the branch_cc_table should maintain the order of these
3331 branch_cc_table
[] = {
3345 #define ADD_NEW_INS(cfg,dest,op) do { \
3346 MONO_INST_NEW ((cfg), (dest), (op)); \
3347 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3351 map_to_reg_reg_op (int op
)
3360 case OP_COMPARE_IMM
:
3362 case OP_ICOMPARE_IMM
:
3376 case OP_LOAD_MEMBASE
:
3377 return OP_LOAD_MEMINDEX
;
3378 case OP_LOADI4_MEMBASE
:
3379 return OP_LOADI4_MEMINDEX
;
3380 case OP_LOADU4_MEMBASE
:
3381 return OP_LOADU4_MEMINDEX
;
3382 case OP_LOADU1_MEMBASE
:
3383 return OP_LOADU1_MEMINDEX
;
3384 case OP_LOADI2_MEMBASE
:
3385 return OP_LOADI2_MEMINDEX
;
3386 case OP_LOADU2_MEMBASE
:
3387 return OP_LOADU2_MEMINDEX
;
3388 case OP_LOADI1_MEMBASE
:
3389 return OP_LOADI1_MEMINDEX
;
3390 case OP_STOREI1_MEMBASE_REG
:
3391 return OP_STOREI1_MEMINDEX
;
3392 case OP_STOREI2_MEMBASE_REG
:
3393 return OP_STOREI2_MEMINDEX
;
3394 case OP_STOREI4_MEMBASE_REG
:
3395 return OP_STOREI4_MEMINDEX
;
3396 case OP_STORE_MEMBASE_REG
:
3397 return OP_STORE_MEMINDEX
;
3398 case OP_STORER4_MEMBASE_REG
:
3399 return OP_STORER4_MEMINDEX
;
3400 case OP_STORER8_MEMBASE_REG
:
3401 return OP_STORER8_MEMINDEX
;
3402 case OP_STORE_MEMBASE_IMM
:
3403 return OP_STORE_MEMBASE_REG
;
3404 case OP_STOREI1_MEMBASE_IMM
:
3405 return OP_STOREI1_MEMBASE_REG
;
3406 case OP_STOREI2_MEMBASE_IMM
:
3407 return OP_STOREI2_MEMBASE_REG
;
3408 case OP_STOREI4_MEMBASE_IMM
:
3409 return OP_STOREI4_MEMBASE_REG
;
3411 g_assert_not_reached ();
3415 * Remove from the instruction list the instructions that can't be
3416 * represented with very simple instructions with no register
3420 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3422 MonoInst
*ins
, *temp
, *last_ins
= NULL
;
3423 int rot_amount
, imm8
, low_imm
;
3425 MONO_BB_FOR_EACH_INS (bb
, ins
) {
3427 switch (ins
->opcode
) {
3431 case OP_COMPARE_IMM
:
3432 case OP_ICOMPARE_IMM
:
3446 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
)) < 0) {
3447 int opcode2
= mono_op_imm_to_op (ins
->opcode
);
3448 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3449 temp
->inst_c0
= ins
->inst_imm
;
3450 temp
->dreg
= mono_alloc_ireg (cfg
);
3451 ins
->sreg2
= temp
->dreg
;
3453 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins
->opcode
));
3454 ins
->opcode
= opcode2
;
3456 if (ins
->opcode
== OP_SBB
|| ins
->opcode
== OP_ISBB
|| ins
->opcode
== OP_SUBCC
)
3462 if (ins
->inst_imm
== 1) {
3463 ins
->opcode
= OP_MOVE
;
3466 if (ins
->inst_imm
== 0) {
3467 ins
->opcode
= OP_ICONST
;
3471 imm8
= mono_is_power_of_two (ins
->inst_imm
);
3473 ins
->opcode
= OP_SHL_IMM
;
3474 ins
->inst_imm
= imm8
;
3477 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3478 temp
->inst_c0
= ins
->inst_imm
;
3479 temp
->dreg
= mono_alloc_ireg (cfg
);
3480 ins
->sreg2
= temp
->dreg
;
3481 ins
->opcode
= OP_IMUL
;
3488 MonoInst
*current
= ins
;
3490 /* may require a look-ahead of a couple instructions due to spilling */
3491 while (try_count
-- && current
->next
) {
3492 if (current
->next
->opcode
== OP_COND_EXC_C
|| current
->next
->opcode
== OP_COND_EXC_IC
) {
3493 /* ARM sets the C flag to 1 if there was _no_ overflow */
3494 current
->next
->opcode
= OP_COND_EXC_NC
;
3497 current
= current
->next
;
3502 case OP_IDIV_UN_IMM
:
3504 case OP_IREM_UN_IMM
: {
3505 int opcode2
= mono_op_imm_to_op (ins
->opcode
);
3506 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3507 temp
->inst_c0
= ins
->inst_imm
;
3508 temp
->dreg
= mono_alloc_ireg (cfg
);
3509 ins
->sreg2
= temp
->dreg
;
3511 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins
->opcode
));
3512 ins
->opcode
= opcode2
;
3515 case OP_LOCALLOC_IMM
:
3516 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3517 temp
->inst_c0
= ins
->inst_imm
;
3518 temp
->dreg
= mono_alloc_ireg (cfg
);
3519 ins
->sreg1
= temp
->dreg
;
3520 ins
->opcode
= OP_LOCALLOC
;
3522 case OP_LOAD_MEMBASE
:
3523 case OP_LOADI4_MEMBASE
:
3524 case OP_LOADU4_MEMBASE
:
3525 case OP_LOADU1_MEMBASE
:
3526 /* we can do two things: load the immed in a register
3527 * and use an indexed load, or see if the immed can be
3528 * represented as an ad_imm + a load with a smaller offset
3529 * that fits. We just do the first for now, optimize later.
3531 if (arm_is_imm12 (ins
->inst_offset
))
3533 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3534 temp
->inst_c0
= ins
->inst_offset
;
3535 temp
->dreg
= mono_alloc_ireg (cfg
);
3536 ins
->sreg2
= temp
->dreg
;
3537 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3539 case OP_LOADI2_MEMBASE
:
3540 case OP_LOADU2_MEMBASE
:
3541 case OP_LOADI1_MEMBASE
:
3542 if (arm_is_imm8 (ins
->inst_offset
))
3544 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3545 temp
->inst_c0
= ins
->inst_offset
;
3546 temp
->dreg
= mono_alloc_ireg (cfg
);
3547 ins
->sreg2
= temp
->dreg
;
3548 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3550 case OP_LOADR4_MEMBASE
:
3551 case OP_LOADR8_MEMBASE
:
3552 if (arm_is_fpimm8 (ins
->inst_offset
))
3554 low_imm
= ins
->inst_offset
& 0x1ff;
3555 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_offset
& ~0x1ff, &rot_amount
)) >= 0) {
3556 ADD_NEW_INS (cfg
, temp
, OP_ADD_IMM
);
3557 temp
->inst_imm
= ins
->inst_offset
& ~0x1ff;
3558 temp
->sreg1
= ins
->inst_basereg
;
3559 temp
->dreg
= mono_alloc_ireg (cfg
);
3560 ins
->inst_basereg
= temp
->dreg
;
3561 ins
->inst_offset
= low_imm
;
3565 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3566 temp
->inst_c0
= ins
->inst_offset
;
3567 temp
->dreg
= mono_alloc_ireg (cfg
);
3569 ADD_NEW_INS (cfg
, add_ins
, OP_IADD
);
3570 add_ins
->sreg1
= ins
->inst_basereg
;
3571 add_ins
->sreg2
= temp
->dreg
;
3572 add_ins
->dreg
= mono_alloc_ireg (cfg
);
3574 ins
->inst_basereg
= add_ins
->dreg
;
3575 ins
->inst_offset
= 0;
3578 case OP_STORE_MEMBASE_REG
:
3579 case OP_STOREI4_MEMBASE_REG
:
3580 case OP_STOREI1_MEMBASE_REG
:
3581 if (arm_is_imm12 (ins
->inst_offset
))
3583 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3584 temp
->inst_c0
= ins
->inst_offset
;
3585 temp
->dreg
= mono_alloc_ireg (cfg
);
3586 ins
->sreg2
= temp
->dreg
;
3587 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3589 case OP_STOREI2_MEMBASE_REG
:
3590 if (arm_is_imm8 (ins
->inst_offset
))
3592 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3593 temp
->inst_c0
= ins
->inst_offset
;
3594 temp
->dreg
= mono_alloc_ireg (cfg
);
3595 ins
->sreg2
= temp
->dreg
;
3596 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3598 case OP_STORER4_MEMBASE_REG
:
3599 case OP_STORER8_MEMBASE_REG
:
3600 if (arm_is_fpimm8 (ins
->inst_offset
))
3602 low_imm
= ins
->inst_offset
& 0x1ff;
3603 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_offset
& ~ 0x1ff, &rot_amount
)) >= 0 && arm_is_fpimm8 (low_imm
)) {
3604 ADD_NEW_INS (cfg
, temp
, OP_ADD_IMM
);
3605 temp
->inst_imm
= ins
->inst_offset
& ~0x1ff;
3606 temp
->sreg1
= ins
->inst_destbasereg
;
3607 temp
->dreg
= mono_alloc_ireg (cfg
);
3608 ins
->inst_destbasereg
= temp
->dreg
;
3609 ins
->inst_offset
= low_imm
;
3613 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3614 temp
->inst_c0
= ins
->inst_offset
;
3615 temp
->dreg
= mono_alloc_ireg (cfg
);
3617 ADD_NEW_INS (cfg
, add_ins
, OP_IADD
);
3618 add_ins
->sreg1
= ins
->inst_destbasereg
;
3619 add_ins
->sreg2
= temp
->dreg
;
3620 add_ins
->dreg
= mono_alloc_ireg (cfg
);
3622 ins
->inst_destbasereg
= add_ins
->dreg
;
3623 ins
->inst_offset
= 0;
3626 case OP_STORE_MEMBASE_IMM
:
3627 case OP_STOREI1_MEMBASE_IMM
:
3628 case OP_STOREI2_MEMBASE_IMM
:
3629 case OP_STOREI4_MEMBASE_IMM
:
3630 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3631 temp
->inst_c0
= ins
->inst_imm
;
3632 temp
->dreg
= mono_alloc_ireg (cfg
);
3633 ins
->sreg1
= temp
->dreg
;
3634 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3636 goto loop_start
; /* make it handle the possibly big ins->inst_offset */
3639 gboolean swap
= FALSE
;
3643 /* Optimized away */
3648 /* Some fp compares require swapped operands */
3649 switch (ins
->next
->opcode
) {
3651 ins
->next
->opcode
= OP_FBLT
;
3655 ins
->next
->opcode
= OP_FBLT_UN
;
3659 ins
->next
->opcode
= OP_FBGE
;
3663 ins
->next
->opcode
= OP_FBGE_UN
;
3671 ins
->sreg1
= ins
->sreg2
;
3680 bb
->last_ins
= last_ins
;
3681 bb
->max_vreg
= cfg
->next_vreg
;
3685 mono_arch_decompose_long_opts (MonoCompile
*cfg
, MonoInst
*long_ins
)
3689 if (long_ins
->opcode
== OP_LNEG
) {
3691 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ARM_RSBS_IMM
, MONO_LVREG_LS (ins
->dreg
), MONO_LVREG_LS (ins
->sreg1
), 0);
3692 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ARM_RSC_IMM
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->sreg1
), 0);
3698 emit_float_to_int (MonoCompile
*cfg
, guchar
*code
, int dreg
, int sreg
, int size
, gboolean is_signed
)
3700 /* sreg is a float, dreg is an integer reg */
3702 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
3704 ARM_TOSIZD (code
, vfp_scratch1
, sreg
);
3706 ARM_TOUIZD (code
, vfp_scratch1
, sreg
);
3707 ARM_FMRS (code
, dreg
, vfp_scratch1
);
3708 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
3712 ARM_AND_REG_IMM8 (code
, dreg
, dreg
, 0xff);
3713 else if (size
== 2) {
3714 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3715 ARM_SHR_IMM (code
, dreg
, dreg
, 16);
3719 ARM_SHL_IMM (code
, dreg
, dreg
, 24);
3720 ARM_SAR_IMM (code
, dreg
, dreg
, 24);
3721 } else if (size
== 2) {
3722 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3723 ARM_SAR_IMM (code
, dreg
, dreg
, 16);
3730 emit_r4_to_int (MonoCompile
*cfg
, guchar
*code
, int dreg
, int sreg
, int size
, gboolean is_signed
)
3732 /* sreg is a float, dreg is an integer reg */
3734 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
3736 ARM_TOSIZS (code
, vfp_scratch1
, sreg
);
3738 ARM_TOUIZS (code
, vfp_scratch1
, sreg
);
3739 ARM_FMRS (code
, dreg
, vfp_scratch1
);
3740 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
3744 ARM_AND_REG_IMM8 (code
, dreg
, dreg
, 0xff);
3745 else if (size
== 2) {
3746 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3747 ARM_SHR_IMM (code
, dreg
, dreg
, 16);
3751 ARM_SHL_IMM (code
, dreg
, dreg
, 24);
3752 ARM_SAR_IMM (code
, dreg
, dreg
, 24);
3753 } else if (size
== 2) {
3754 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3755 ARM_SAR_IMM (code
, dreg
, dreg
, 16);
3761 #endif /* #ifndef DISABLE_JIT */
3763 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3766 emit_thunk (guint8
*code
, gconstpointer target
)
3770 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
3771 if (thumb_supported
)
3772 ARM_BX (code
, ARMREG_IP
);
3774 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
3775 *(guint32
*)code
= (guint32
)(gsize
)target
;
3777 mono_arch_flush_icache (p
, code
- p
);
3781 handle_thunk (MonoCompile
*cfg
, MonoDomain
*domain
, guchar
*code
, const guchar
*target
)
3783 MonoJitInfo
*ji
= NULL
;
3784 MonoThunkJitInfo
*info
;
3787 guint8
*orig_target
;
3788 guint8
*target_thunk
;
3791 domain
= mono_domain_get ();
3795 * This can be called multiple times during JITting,
3796 * save the current position in cfg->arch to avoid
3797 * doing a O(n^2) search.
3799 if (!cfg
->arch
.thunks
) {
3800 cfg
->arch
.thunks
= cfg
->thunks
;
3801 cfg
->arch
.thunks_size
= cfg
->thunk_area
;
3803 thunks
= cfg
->arch
.thunks
;
3804 thunks_size
= cfg
->arch
.thunks_size
;
3806 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, mono_method_full_name (cfg
->method
, TRUE
));
3807 g_assert_not_reached ();
3810 g_assert (*(guint32
*)thunks
== 0);
3811 emit_thunk (thunks
, target
);
3812 arm_patch (code
, thunks
);
3814 cfg
->arch
.thunks
+= THUNK_SIZE
;
3815 cfg
->arch
.thunks_size
-= THUNK_SIZE
;
3817 ji
= mini_jit_info_table_find (domain
, (char*)code
, NULL
);
3819 info
= mono_jit_info_get_thunk_info (ji
);
3822 thunks
= (guint8
*)ji
->code_start
+ info
->thunks_offset
;
3823 thunks_size
= info
->thunks_size
;
3825 orig_target
= mono_arch_get_call_target (code
+ 4);
3827 mono_mini_arch_lock ();
3829 target_thunk
= NULL
;
3830 if (orig_target
>= thunks
&& orig_target
< thunks
+ thunks_size
) {
3831 /* The call already points to a thunk, because of trampolines etc. */
3832 target_thunk
= orig_target
;
3834 for (p
= thunks
; p
< thunks
+ thunks_size
; p
+= THUNK_SIZE
) {
3835 if (((guint32
*)p
) [0] == 0) {
3839 } else if (((guint32
*)p
) [2] == (guint32
)(gsize
)target
) {
3840 /* Thunk already points to target */
3847 //g_print ("THUNK: %p %p %p\n", code, target, target_thunk);
3849 if (!target_thunk
) {
3850 mono_mini_arch_unlock ();
3851 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, cfg
? mono_method_full_name (cfg
->method
, TRUE
) : mono_method_full_name (jinfo_get_method (ji
), TRUE
));
3852 g_assert_not_reached ();
3855 emit_thunk (target_thunk
, target
);
3856 arm_patch (code
, target_thunk
);
3857 mono_arch_flush_icache (code
, 4);
3859 mono_mini_arch_unlock ();
3864 arm_patch_general (MonoCompile
*cfg
, MonoDomain
*domain
, guchar
*code
, const guchar
*target
)
3866 guint32
*code32
= (guint32
*)code
;
3867 guint32 ins
= *code32
;
3868 guint32 prim
= (ins
>> 25) & 7;
3869 guint32 tval
= GPOINTER_TO_UINT (target
);
3871 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3872 if (prim
== 5) { /* 101b */
3873 /* the diff starts 8 bytes from the branch opcode */
3874 gint diff
= target
- code
- 8;
3876 gint tmask
= 0xffffffff;
3877 if (tval
& 1) { /* entering thumb mode */
3878 diff
= target
- 1 - code
- 8;
3879 g_assert (thumb_supported
);
3880 tbits
= 0xf << 28; /* bl->blx bit pattern */
3881 g_assert ((ins
& (1 << 24))); /* it must be a bl, not b instruction */
3882 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3886 tmask
= ~(1 << 24); /* clear the link bit */
3887 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3892 if (diff
<= 33554431) {
3894 ins
= (ins
& 0xff000000) | diff
;
3896 *code32
= ins
| tbits
;
3900 /* diff between 0 and -33554432 */
3901 if (diff
>= -33554432) {
3903 ins
= (ins
& 0xff000000) | (diff
& ~0xff000000);
3905 *code32
= ins
| tbits
;
3910 handle_thunk (cfg
, domain
, code
, target
);
3915 * The alternative call sequences looks like this:
3917 * ldr ip, [pc] // loads the address constant
3918 * b 1f // jumps around the constant
3919 * address constant embedded in the code
3924 * There are two cases for patching:
3925 * a) at the end of method emission: in this case code points to the start
3926 * of the call sequence
3927 * b) during runtime patching of the call site: in this case code points
3928 * to the mov pc, ip instruction
3930 * We have to handle also the thunk jump code sequence:
3934 * address constant // execution never reaches here
3936 if ((ins
& 0x0ffffff0) == 0x12fff10) {
3937 /* Branch and exchange: the address is constructed in a reg
3938 * We can patch BX when the code sequence is the following:
3939 * ldr ip, [pc, #0] ; 0x8
3946 guint8
*emit
= (guint8
*)ccode
;
3947 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
3949 ARM_MOV_REG_REG (emit
, ARMREG_LR
, ARMREG_PC
);
3950 ARM_BX (emit
, ARMREG_IP
);
3952 /*patching from magic trampoline*/
3953 if (ins
== ccode
[3]) {
3954 g_assert (code32
[-4] == ccode
[0]);
3955 g_assert (code32
[-3] == ccode
[1]);
3956 g_assert (code32
[-1] == ccode
[2]);
3957 code32
[-2] = (guint32
)(gsize
)target
;
3960 /*patching from JIT*/
3961 if (ins
== ccode
[0]) {
3962 g_assert (code32
[1] == ccode
[1]);
3963 g_assert (code32
[3] == ccode
[2]);
3964 g_assert (code32
[4] == ccode
[3]);
3965 code32
[2] = (guint32
)(gsize
)target
;
3968 g_assert_not_reached ();
3969 } else if ((ins
& 0x0ffffff0) == 0x12fff30) {
3977 guint8
*emit
= (guint8
*)ccode
;
3978 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
3980 ARM_BLX_REG (emit
, ARMREG_IP
);
3982 g_assert (code32
[-3] == ccode
[0]);
3983 g_assert (code32
[-2] == ccode
[1]);
3984 g_assert (code32
[0] == ccode
[2]);
3986 code32
[-1] = (guint32
)(gsize
)target
;
3989 guint32
*tmp
= ccode
;
3990 guint8
*emit
= (guint8
*)tmp
;
3991 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
3992 ARM_MOV_REG_REG (emit
, ARMREG_LR
, ARMREG_PC
);
3993 ARM_MOV_REG_REG (emit
, ARMREG_PC
, ARMREG_IP
);
3994 ARM_BX (emit
, ARMREG_IP
);
3995 if (ins
== ccode
[2]) {
3996 g_assert_not_reached (); // should be -2 ...
3997 code32
[-1] = (guint32
)(gsize
)target
;
4000 if (ins
== ccode
[0]) {
4001 /* handles both thunk jump code and the far call sequence */
4002 code32
[2] = (guint32
)(gsize
)target
;
4005 g_assert_not_reached ();
4007 // g_print ("patched with 0x%08x\n", ins);
4011 arm_patch (guchar
*code
, const guchar
*target
)
4013 arm_patch_general (NULL
, NULL
, code
, target
);
4017 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
4018 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
4019 * to be used with the emit macros.
4020 * Return -1 otherwise.
4023 mono_arm_is_rotated_imm8 (guint32 val
, gint
*rot_amount
)
4026 for (i
= 0; i
< 31; i
+= 2) {
4030 res
= (val
<< (32 - i
)) | (val
>> i
);
4033 *rot_amount
= i
? 32 - i
: 0;
4040 * Emits in code a sequence of instructions that load the value 'val'
4041 * into the dreg register. Uses at most 4 instructions.
4044 mono_arm_emit_load_imm (guint8
*code
, int dreg
, guint32 val
)
4046 int imm8
, rot_amount
;
4048 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
4049 /* skip the constant pool */
4055 if (mini_debug_options
.single_imm_size
&& v7_supported
) {
4056 ARM_MOVW_REG_IMM (code
, dreg
, val
& 0xffff);
4057 ARM_MOVT_REG_IMM (code
, dreg
, (val
>> 16) & 0xffff);
4061 if ((imm8
= mono_arm_is_rotated_imm8 (val
, &rot_amount
)) >= 0) {
4062 ARM_MOV_REG_IMM (code
, dreg
, imm8
, rot_amount
);
4063 } else if ((imm8
= mono_arm_is_rotated_imm8 (~val
, &rot_amount
)) >= 0) {
4064 ARM_MVN_REG_IMM (code
, dreg
, imm8
, rot_amount
);
4067 ARM_MOVW_REG_IMM (code
, dreg
, val
& 0xffff);
4069 ARM_MOVT_REG_IMM (code
, dreg
, (val
>> 16) & 0xffff);
4073 ARM_MOV_REG_IMM8 (code
, dreg
, (val
& 0xFF));
4075 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF00) >> 8, 24);
4077 if (val
& 0xFF0000) {
4078 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
4080 if (val
& 0xFF000000) {
4081 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
4083 } else if (val
& 0xFF00) {
4084 ARM_MOV_REG_IMM (code
, dreg
, (val
& 0xFF00) >> 8, 24);
4085 if (val
& 0xFF0000) {
4086 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
4088 if (val
& 0xFF000000) {
4089 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
4091 } else if (val
& 0xFF0000) {
4092 ARM_MOV_REG_IMM (code
, dreg
, (val
& 0xFF0000) >> 16, 16);
4093 if (val
& 0xFF000000) {
4094 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
4097 //g_assert_not_reached ();
4103 mono_arm_thumb_supported (void)
4105 return thumb_supported
;
4109 mono_arm_eabi_supported (void)
4111 return eabi_supported
;
4115 mono_arm_i8_align (void)
4123 emit_move_return_value (MonoCompile
*cfg
, MonoInst
*ins
, guint8
*code
)
4128 call
= (MonoCallInst
*)ins
;
4129 cinfo
= call
->call_info
;
4131 switch (cinfo
->ret
.storage
) {
4132 case RegTypeStructByVal
:
4134 MonoInst
*loc
= cfg
->arch
.vret_addr_loc
;
4137 if (cinfo
->ret
.storage
== RegTypeStructByVal
&& cinfo
->ret
.nregs
== 1) {
4138 /* The JIT treats this as a normal call */
4142 /* Load the destination address */
4143 g_assert (loc
&& loc
->opcode
== OP_REGOFFSET
);
4145 if (arm_is_imm12 (loc
->inst_offset
)) {
4146 ARM_LDR_IMM (code
, ARMREG_LR
, loc
->inst_basereg
, loc
->inst_offset
);
4148 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, loc
->inst_offset
);
4149 ARM_LDR_REG_REG (code
, ARMREG_LR
, loc
->inst_basereg
, ARMREG_LR
);
4152 if (cinfo
->ret
.storage
== RegTypeStructByVal
) {
4153 int rsize
= cinfo
->ret
.struct_size
;
4155 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
4156 g_assert (rsize
>= 0);
4161 ARM_STRB_IMM (code
, i
, ARMREG_LR
, i
* 4);
4164 ARM_STRH_IMM (code
, i
, ARMREG_LR
, i
* 4);
4167 ARM_STR_IMM (code
, i
, ARMREG_LR
, i
* 4);
4173 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
4174 if (cinfo
->ret
.esize
== 4)
4175 ARM_FSTS (code
, cinfo
->ret
.reg
+ i
, ARMREG_LR
, i
* 4);
4177 ARM_FSTD (code
, cinfo
->ret
.reg
+ (i
* 2), ARMREG_LR
, i
* 8);
4186 switch (ins
->opcode
) {
4189 case OP_FCALL_MEMBASE
:
4191 MonoType
*sig_ret
= mini_get_underlying_type (((MonoCallInst
*)ins
)->signature
->ret
);
4192 if (sig_ret
->type
== MONO_TYPE_R4
) {
4193 if (IS_HARD_FLOAT
) {
4194 ARM_CVTS (code
, ins
->dreg
, ARM_VFP_F0
);
4196 ARM_FMSR (code
, ins
->dreg
, ARMREG_R0
);
4197 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
4200 if (IS_HARD_FLOAT
) {
4201 ARM_CPYD (code
, ins
->dreg
, ARM_VFP_D0
);
4203 ARM_FMDRR (code
, ARMREG_R0
, ARMREG_R1
, ins
->dreg
);
4210 case OP_RCALL_MEMBASE
: {
4215 sig_ret
= mini_get_underlying_type (((MonoCallInst
*)ins
)->signature
->ret
);
4216 g_assert (sig_ret
->type
== MONO_TYPE_R4
);
4217 if (IS_HARD_FLOAT
) {
4218 ARM_CPYS (code
, ins
->dreg
, ARM_VFP_F0
);
4220 ARM_FMSR (code
, ins
->dreg
, ARMREG_R0
);
4221 ARM_CPYS (code
, ins
->dreg
, ins
->dreg
);
4233 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
4237 guint8
*code
= cfg
->native_code
+ cfg
->code_len
;
4238 MonoInst
*last_ins
= NULL
;
4240 int imm8
, rot_amount
;
4242 /* we don't align basic blocks of loops on arm */
4244 if (cfg
->verbose_level
> 2)
4245 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
4247 cpos
= bb
->max_offset
;
4249 if (mono_break_at_bb_method
&& mono_method_desc_full_match (mono_break_at_bb_method
, cfg
->method
) && bb
->block_num
== mono_break_at_bb_bb_num
) {
4250 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
4251 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break
));
4252 code
= emit_call_seq (cfg
, code
);
4255 MONO_BB_FOR_EACH_INS (bb
, ins
) {
4256 guint offset
= code
- cfg
->native_code
;
4257 set_code_cursor (cfg
, code
);
4258 max_len
= ins_get_size (ins
->opcode
);
4259 code
= realloc_code (cfg
, max_len
);
4260 // if (ins->cil_code)
4261 // g_print ("cil code\n");
4262 mono_debug_record_line_number (cfg
, ins
, offset
);
4264 switch (ins
->opcode
) {
4265 case OP_MEMORY_BARRIER
:
4267 ARM_DMB (code
, ARM_DMB_ISH
);
4268 } else if (v6_supported
) {
4269 ARM_MOV_REG_IMM8 (code
, ARMREG_R0
, 0);
4270 ARM_MCR (code
, 15, 0, ARMREG_R0
, 7, 10, 5);
4274 code
= emit_tls_get (code
, ins
->dreg
, ins
->inst_offset
);
4277 code
= emit_tls_set (code
, ins
->sreg1
, ins
->inst_offset
);
4279 case OP_ATOMIC_EXCHANGE_I4
:
4280 case OP_ATOMIC_CAS_I4
:
4281 case OP_ATOMIC_ADD_I4
: {
4285 g_assert (v7_supported
);
4288 if (ins
->sreg1
!= ARMREG_IP
&& ins
->sreg2
!= ARMREG_IP
&& ins
->sreg3
!= ARMREG_IP
)
4290 else if (ins
->sreg1
!= ARMREG_R0
&& ins
->sreg2
!= ARMREG_R0
&& ins
->sreg3
!= ARMREG_R0
)
4292 else if (ins
->sreg1
!= ARMREG_R1
&& ins
->sreg2
!= ARMREG_R1
&& ins
->sreg3
!= ARMREG_R1
)
4296 g_assert (cfg
->arch
.atomic_tmp_offset
!= -1);
4297 ARM_STR_IMM (code
, tmpreg
, cfg
->frame_reg
, cfg
->arch
.atomic_tmp_offset
);
4299 switch (ins
->opcode
) {
4300 case OP_ATOMIC_EXCHANGE_I4
:
4302 ARM_DMB (code
, ARM_DMB_ISH
);
4303 ARM_LDREX_REG (code
, ARMREG_LR
, ins
->sreg1
);
4304 ARM_STREX_REG (code
, tmpreg
, ins
->sreg2
, ins
->sreg1
);
4305 ARM_CMP_REG_IMM (code
, tmpreg
, 0, 0);
4307 ARM_B_COND (code
, ARMCOND_NE
, 0);
4308 arm_patch (buf
[1], buf
[0]);
4310 case OP_ATOMIC_CAS_I4
:
4311 ARM_DMB (code
, ARM_DMB_ISH
);
4313 ARM_LDREX_REG (code
, ARMREG_LR
, ins
->sreg1
);
4314 ARM_CMP_REG_REG (code
, ARMREG_LR
, ins
->sreg3
);
4316 ARM_B_COND (code
, ARMCOND_NE
, 0);
4317 ARM_STREX_REG (code
, tmpreg
, ins
->sreg2
, ins
->sreg1
);
4318 ARM_CMP_REG_IMM (code
, tmpreg
, 0, 0);
4320 ARM_B_COND (code
, ARMCOND_NE
, 0);
4321 arm_patch (buf
[2], buf
[0]);
4322 arm_patch (buf
[1], code
);
4324 case OP_ATOMIC_ADD_I4
:
4326 ARM_DMB (code
, ARM_DMB_ISH
);
4327 ARM_LDREX_REG (code
, ARMREG_LR
, ins
->sreg1
);
4328 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->sreg2
);
4329 ARM_STREX_REG (code
, tmpreg
, ARMREG_LR
, ins
->sreg1
);
4330 ARM_CMP_REG_IMM (code
, tmpreg
, 0, 0);
4332 ARM_B_COND (code
, ARMCOND_NE
, 0);
4333 arm_patch (buf
[1], buf
[0]);
4336 g_assert_not_reached ();
4339 ARM_DMB (code
, ARM_DMB_ISH
);
4340 if (tmpreg
!= ins
->dreg
)
4341 ARM_LDR_IMM (code
, tmpreg
, cfg
->frame_reg
, cfg
->arch
.atomic_tmp_offset
);
4342 ARM_MOV_REG_REG (code
, ins
->dreg
, ARMREG_LR
);
4345 case OP_ATOMIC_LOAD_I1
:
4346 case OP_ATOMIC_LOAD_U1
:
4347 case OP_ATOMIC_LOAD_I2
:
4348 case OP_ATOMIC_LOAD_U2
:
4349 case OP_ATOMIC_LOAD_I4
:
4350 case OP_ATOMIC_LOAD_U4
:
4351 case OP_ATOMIC_LOAD_R4
:
4352 case OP_ATOMIC_LOAD_R8
: {
4353 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4354 ARM_DMB (code
, ARM_DMB_ISH
);
4356 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4358 switch (ins
->opcode
) {
4359 case OP_ATOMIC_LOAD_I1
:
4360 ARM_LDRSB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4362 case OP_ATOMIC_LOAD_U1
:
4363 ARM_LDRB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4365 case OP_ATOMIC_LOAD_I2
:
4366 ARM_LDRSH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4368 case OP_ATOMIC_LOAD_U2
:
4369 ARM_LDRH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4371 case OP_ATOMIC_LOAD_I4
:
4372 case OP_ATOMIC_LOAD_U4
:
4373 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4375 case OP_ATOMIC_LOAD_R4
:
4377 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_basereg
, ARMREG_LR
);
4378 ARM_FLDS (code
, ins
->dreg
, ARMREG_LR
, 0);
4380 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
4381 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_basereg
, ARMREG_LR
);
4382 ARM_FLDS (code
, vfp_scratch1
, ARMREG_LR
, 0);
4383 ARM_CVTS (code
, ins
->dreg
, vfp_scratch1
);
4384 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
4387 case OP_ATOMIC_LOAD_R8
:
4388 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_basereg
, ARMREG_LR
);
4389 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
4393 if (ins
->backend
.memory_barrier_kind
!= MONO_MEMORY_BARRIER_NONE
)
4394 ARM_DMB (code
, ARM_DMB_ISH
);
4397 case OP_ATOMIC_STORE_I1
:
4398 case OP_ATOMIC_STORE_U1
:
4399 case OP_ATOMIC_STORE_I2
:
4400 case OP_ATOMIC_STORE_U2
:
4401 case OP_ATOMIC_STORE_I4
:
4402 case OP_ATOMIC_STORE_U4
:
4403 case OP_ATOMIC_STORE_R4
:
4404 case OP_ATOMIC_STORE_R8
: {
4405 if (ins
->backend
.memory_barrier_kind
!= MONO_MEMORY_BARRIER_NONE
)
4406 ARM_DMB (code
, ARM_DMB_ISH
);
4408 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4410 switch (ins
->opcode
) {
4411 case OP_ATOMIC_STORE_I1
:
4412 case OP_ATOMIC_STORE_U1
:
4413 ARM_STRB_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4415 case OP_ATOMIC_STORE_I2
:
4416 case OP_ATOMIC_STORE_U2
:
4417 ARM_STRH_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4419 case OP_ATOMIC_STORE_I4
:
4420 case OP_ATOMIC_STORE_U4
:
4421 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4423 case OP_ATOMIC_STORE_R4
:
4425 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_destbasereg
, ARMREG_LR
);
4426 ARM_FSTS (code
, ins
->sreg1
, ARMREG_LR
, 0);
4428 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
4429 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_destbasereg
, ARMREG_LR
);
4430 ARM_CVTD (code
, vfp_scratch1
, ins
->sreg1
);
4431 ARM_FSTS (code
, vfp_scratch1
, ARMREG_LR
, 0);
4432 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
4435 case OP_ATOMIC_STORE_R8
:
4436 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_destbasereg
, ARMREG_LR
);
4437 ARM_FSTD (code
, ins
->sreg1
, ARMREG_LR
, 0);
4441 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4442 ARM_DMB (code
, ARM_DMB_ISH
);
4446 ARM_SMULL_REG_REG (code
, ins
->backend
.reg3
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4449 ARM_UMULL_REG_REG (code
, ins
->backend
.reg3
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4451 case OP_STOREI1_MEMBASE_IMM
:
4452 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
& 0xFF);
4453 g_assert (arm_is_imm12 (ins
->inst_offset
));
4454 ARM_STRB_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4456 case OP_STOREI2_MEMBASE_IMM
:
4457 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
& 0xFFFF);
4458 g_assert (arm_is_imm8 (ins
->inst_offset
));
4459 ARM_STRH_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4461 case OP_STORE_MEMBASE_IMM
:
4462 case OP_STOREI4_MEMBASE_IMM
:
4463 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
);
4464 g_assert (arm_is_imm12 (ins
->inst_offset
));
4465 ARM_STR_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4467 case OP_STOREI1_MEMBASE_REG
:
4468 g_assert (arm_is_imm12 (ins
->inst_offset
));
4469 ARM_STRB_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4471 case OP_STOREI2_MEMBASE_REG
:
4472 g_assert (arm_is_imm8 (ins
->inst_offset
));
4473 ARM_STRH_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4475 case OP_STORE_MEMBASE_REG
:
4476 case OP_STOREI4_MEMBASE_REG
:
4477 /* this case is special, since it happens for spill code after lowering has been called */
4478 if (arm_is_imm12 (ins
->inst_offset
)) {
4479 ARM_STR_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4481 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4482 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4485 case OP_STOREI1_MEMINDEX
:
4486 ARM_STRB_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4488 case OP_STOREI2_MEMINDEX
:
4489 ARM_STRH_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4491 case OP_STORE_MEMINDEX
:
4492 case OP_STOREI4_MEMINDEX
:
4493 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4496 g_assert_not_reached ();
4498 case OP_LOAD_MEMINDEX
:
4499 case OP_LOADI4_MEMINDEX
:
4500 case OP_LOADU4_MEMINDEX
:
4501 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4503 case OP_LOADI1_MEMINDEX
:
4504 ARM_LDRSB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4506 case OP_LOADU1_MEMINDEX
:
4507 ARM_LDRB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4509 case OP_LOADI2_MEMINDEX
:
4510 ARM_LDRSH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4512 case OP_LOADU2_MEMINDEX
:
4513 ARM_LDRH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4515 case OP_LOAD_MEMBASE
:
4516 case OP_LOADI4_MEMBASE
:
4517 case OP_LOADU4_MEMBASE
:
4518 /* this case is special, since it happens for spill code after lowering has been called */
4519 if (arm_is_imm12 (ins
->inst_offset
)) {
4520 ARM_LDR_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4522 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4523 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4526 case OP_LOADI1_MEMBASE
:
4527 g_assert (arm_is_imm8 (ins
->inst_offset
));
4528 ARM_LDRSB_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4530 case OP_LOADU1_MEMBASE
:
4531 g_assert (arm_is_imm12 (ins
->inst_offset
));
4532 ARM_LDRB_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4534 case OP_LOADU2_MEMBASE
:
4535 g_assert (arm_is_imm8 (ins
->inst_offset
));
4536 ARM_LDRH_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4538 case OP_LOADI2_MEMBASE
:
4539 g_assert (arm_is_imm8 (ins
->inst_offset
));
4540 ARM_LDRSH_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4542 case OP_ICONV_TO_I1
:
4543 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 24);
4544 ARM_SAR_IMM (code
, ins
->dreg
, ins
->dreg
, 24);
4546 case OP_ICONV_TO_I2
:
4547 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 16);
4548 ARM_SAR_IMM (code
, ins
->dreg
, ins
->dreg
, 16);
4550 case OP_ICONV_TO_U1
:
4551 ARM_AND_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 0xff);
4553 case OP_ICONV_TO_U2
:
4554 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 16);
4555 ARM_SHR_IMM (code
, ins
->dreg
, ins
->dreg
, 16);
4559 ARM_CMP_REG_REG (code
, ins
->sreg1
, ins
->sreg2
);
4561 case OP_COMPARE_IMM
:
4562 case OP_ICOMPARE_IMM
:
4563 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4564 g_assert (imm8
>= 0);
4565 ARM_CMP_REG_IMM (code
, ins
->sreg1
, imm8
, rot_amount
);
4569 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4570 * So instead of emitting a trap, we emit a call a C function and place a
4573 //*(int*)code = 0xef9f0001;
4576 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
4577 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break
));
4578 code
= emit_call_seq (cfg
, code
);
4580 case OP_RELAXED_NOP
:
4585 case OP_DUMMY_ICONST
:
4586 case OP_DUMMY_R8CONST
:
4587 case OP_DUMMY_R4CONST
:
4588 case OP_NOT_REACHED
:
4591 case OP_IL_SEQ_POINT
:
4592 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
4594 case OP_SEQ_POINT
: {
4596 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
4597 MonoInst
*ss_trigger_page_var
= cfg
->arch
.ss_trigger_page_var
;
4598 MonoInst
*ss_method_var
= cfg
->arch
.seq_point_ss_method_var
;
4599 MonoInst
*bp_method_var
= cfg
->arch
.seq_point_bp_method_var
;
4601 int dreg
= ARMREG_LR
;
4604 if (cfg
->soft_breakpoints
) {
4605 g_assert (!cfg
->compile_aot
);
4610 * For AOT, we use one got slot per method, which will point to a
4611 * SeqPointInfo structure, containing all the information required
4612 * by the code below.
4614 if (cfg
->compile_aot
) {
4615 g_assert (info_var
);
4616 g_assert (info_var
->opcode
== OP_REGOFFSET
);
4619 if (!cfg
->soft_breakpoints
&& !cfg
->compile_aot
) {
4621 * Read from the single stepping trigger page. This will cause a
4622 * SIGSEGV when single stepping is enabled.
4623 * We do this _before_ the breakpoint, so single stepping after
4624 * a breakpoint is hit will step to the next IL offset.
4626 g_assert (((guint64
)(gsize
)ss_trigger_page
>> 32) == 0);
4629 /* Single step check */
4630 if (ins
->flags
& MONO_INST_SINGLE_STEP_LOC
) {
4631 if (cfg
->soft_breakpoints
) {
4632 /* Load the address of the sequence point method variable. */
4633 var
= ss_method_var
;
4635 g_assert (var
->opcode
== OP_REGOFFSET
);
4636 code
= emit_ldr_imm (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
4637 /* Read the value and check whether it is non-zero. */
4638 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
4639 ARM_CMP_REG_IMM (code
, dreg
, 0, 0);
4640 /* Call it conditionally. */
4641 ARM_BLX_REG_COND (code
, ARMCOND_NE
, dreg
);
4643 if (cfg
->compile_aot
) {
4644 /* Load the trigger page addr from the variable initialized in the prolog */
4645 var
= ss_trigger_page_var
;
4647 g_assert (var
->opcode
== OP_REGOFFSET
);
4648 code
= emit_ldr_imm (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
4650 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
4652 *(int*)code
= (int)(gsize
)ss_trigger_page
;
4655 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
4659 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
4661 /* Breakpoint check */
4662 if (cfg
->compile_aot
) {
4663 const guint32 offset
= code
- cfg
->native_code
;
4667 code
= emit_ldr_imm (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
4668 /* Add the offset */
4669 val
= ((offset
/ 4) * sizeof (target_mgreg_t
)) + MONO_STRUCT_OFFSET (SeqPointInfo
, bp_addrs
);
4670 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4671 if (arm_is_imm12 ((int)val
)) {
4672 ARM_LDR_IMM (code
, dreg
, dreg
, val
);
4674 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF), 0);
4676 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF00) >> 8, 24);
4678 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
4679 g_assert (!(val
& 0xFF000000));
4681 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
4683 /* What is faster, a branch or a load ? */
4684 ARM_CMP_REG_IMM (code
, dreg
, 0, 0);
4685 /* The breakpoint instruction */
4686 if (cfg
->soft_breakpoints
)
4687 ARM_BLX_REG_COND (code
, ARMCOND_NE
, dreg
);
4689 ARM_LDR_IMM_COND (code
, dreg
, dreg
, 0, ARMCOND_NE
);
4690 } else if (cfg
->soft_breakpoints
) {
4691 /* Load the address of the breakpoint method into ip. */
4692 var
= bp_method_var
;
4694 g_assert (var
->opcode
== OP_REGOFFSET
);
4695 g_assert (arm_is_imm12 (var
->inst_offset
));
4696 ARM_LDR_IMM (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
4699 * A placeholder for a possible breakpoint inserted by
4700 * mono_arch_set_breakpoint ().
4705 * A placeholder for a possible breakpoint inserted by
4706 * mono_arch_set_breakpoint ().
4708 for (i
= 0; i
< 4; ++i
)
4713 * Add an additional nop so skipping the bp doesn't cause the ip to point
4714 * to another IL offset.
4722 ARM_ADDS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4725 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4729 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4732 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4733 g_assert (imm8
>= 0);
4734 ARM_ADDS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4738 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4739 g_assert (imm8
>= 0);
4740 ARM_ADD_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4744 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4745 g_assert (imm8
>= 0);
4746 ARM_ADCS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4749 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4750 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4752 case OP_IADD_OVF_UN
:
4753 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4754 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4757 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4758 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4760 case OP_ISUB_OVF_UN
:
4761 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4762 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4764 case OP_ADD_OVF_CARRY
:
4765 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4766 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4768 case OP_ADD_OVF_UN_CARRY
:
4769 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4770 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4772 case OP_SUB_OVF_CARRY
:
4773 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4774 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4776 case OP_SUB_OVF_UN_CARRY
:
4777 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4778 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4782 ARM_SUBS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4785 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4786 g_assert (imm8
>= 0);
4787 ARM_SUBS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4790 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4794 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4798 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4799 g_assert (imm8
>= 0);
4800 ARM_SUB_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4804 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4805 g_assert (imm8
>= 0);
4806 ARM_SBCS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4808 case OP_ARM_RSBS_IMM
:
4809 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4810 g_assert (imm8
>= 0);
4811 ARM_RSBS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4813 case OP_ARM_RSC_IMM
:
4814 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4815 g_assert (imm8
>= 0);
4816 ARM_RSC_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4819 ARM_AND_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4823 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4824 g_assert (imm8
>= 0);
4825 ARM_AND_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4828 g_assert (v7s_supported
|| v7k_supported
);
4829 ARM_SDIV (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4832 g_assert (v7s_supported
|| v7k_supported
);
4833 ARM_UDIV (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4836 g_assert (v7s_supported
|| v7k_supported
);
4837 ARM_SDIV (code
, ARMREG_LR
, ins
->sreg1
, ins
->sreg2
);
4838 ARM_MLS (code
, ins
->dreg
, ARMREG_LR
, ins
->sreg2
, ins
->sreg1
);
4841 g_assert (v7s_supported
|| v7k_supported
);
4842 ARM_UDIV (code
, ARMREG_LR
, ins
->sreg1
, ins
->sreg2
);
4843 ARM_MLS (code
, ins
->dreg
, ARMREG_LR
, ins
->sreg2
, ins
->sreg1
);
4847 g_assert_not_reached ();
4849 ARM_ORR_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4853 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4854 g_assert (imm8
>= 0);
4855 ARM_ORR_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4858 ARM_EOR_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4862 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4863 g_assert (imm8
>= 0);
4864 ARM_EOR_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4867 ARM_SHL_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4872 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4873 else if (ins
->dreg
!= ins
->sreg1
)
4874 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4877 ARM_SAR_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4882 ARM_SAR_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4883 else if (ins
->dreg
!= ins
->sreg1
)
4884 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4887 case OP_ISHR_UN_IMM
:
4889 ARM_SHR_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4890 else if (ins
->dreg
!= ins
->sreg1
)
4891 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4894 ARM_SHR_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4897 ARM_MVN_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4900 ARM_RSB_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 0);
4903 if (ins
->dreg
== ins
->sreg2
)
4904 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4906 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
4909 g_assert_not_reached ();
4912 /* FIXME: handle ovf/ sreg2 != dreg */
4913 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4914 /* FIXME: MUL doesn't set the C/O flags on ARM */
4916 case OP_IMUL_OVF_UN
:
4917 /* FIXME: handle ovf/ sreg2 != dreg */
4918 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4919 /* FIXME: MUL doesn't set the C/O flags on ARM */
4922 code
= mono_arm_emit_load_imm (code
, ins
->dreg
, ins
->inst_c0
);
4925 /* Load the GOT offset */
4926 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)(gsize
)ins
->inst_i1
, ins
->inst_p0
);
4927 ARM_LDR_IMM (code
, ins
->dreg
, ARMREG_PC
, 0);
4929 *(gpointer
*)code
= NULL
;
4931 /* Load the value from the GOT */
4932 ARM_LDR_REG_REG (code
, ins
->dreg
, ARMREG_PC
, ins
->dreg
);
4934 case OP_OBJC_GET_SELECTOR
:
4935 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_OBJC_SELECTOR_REF
, ins
->inst_p0
);
4936 ARM_LDR_IMM (code
, ins
->dreg
, ARMREG_PC
, 0);
4938 *(gpointer
*)code
= NULL
;
4940 ARM_LDR_REG_REG (code
, ins
->dreg
, ARMREG_PC
, ins
->dreg
);
4942 case OP_ICONV_TO_I4
:
4943 case OP_ICONV_TO_U4
:
4945 if (ins
->dreg
!= ins
->sreg1
)
4946 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4949 int saved
= ins
->sreg2
;
4950 if (ins
->sreg2
== ARM_LSW_REG
) {
4951 ARM_MOV_REG_REG (code
, ARMREG_LR
, ins
->sreg2
);
4954 if (ins
->sreg1
!= ARM_LSW_REG
)
4955 ARM_MOV_REG_REG (code
, ARM_LSW_REG
, ins
->sreg1
);
4956 if (saved
!= ARM_MSW_REG
)
4957 ARM_MOV_REG_REG (code
, ARM_MSW_REG
, saved
);
4961 if (IS_VFP
&& ins
->dreg
!= ins
->sreg1
)
4962 ARM_CPYD (code
, ins
->dreg
, ins
->sreg1
);
4965 if (IS_VFP
&& ins
->dreg
!= ins
->sreg1
)
4966 ARM_CPYS (code
, ins
->dreg
, ins
->sreg1
);
4968 case OP_MOVE_F_TO_I4
:
4970 ARM_FMRS (code
, ins
->dreg
, ins
->sreg1
);
4972 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
4973 ARM_CVTD (code
, vfp_scratch1
, ins
->sreg1
);
4974 ARM_FMRS (code
, ins
->dreg
, vfp_scratch1
);
4975 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
4978 case OP_MOVE_I4_TO_F
:
4980 ARM_FMSR (code
, ins
->dreg
, ins
->sreg1
);
4982 ARM_FMSR (code
, ins
->dreg
, ins
->sreg1
);
4983 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
4986 case OP_FCONV_TO_R4
:
4989 ARM_CVTD (code
, ins
->dreg
, ins
->sreg1
);
4991 ARM_CVTD (code
, ins
->dreg
, ins
->sreg1
);
4992 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
4997 case OP_TAILCALL_PARAMETER
:
4998 // This opcode helps compute sizes, i.e.
4999 // of the subsequent OP_TAILCALL, but contributes no code.
5000 g_assert (ins
->next
);
5004 case OP_TAILCALL_MEMBASE
:
5005 case OP_TAILCALL_REG
: {
5006 gboolean
const tailcall_membase
= ins
->opcode
== OP_TAILCALL_MEMBASE
;
5007 gboolean
const tailcall_reg
= ins
->opcode
== OP_TAILCALL_REG
;
5008 MonoCallInst
*call
= (MonoCallInst
*)ins
;
5010 max_len
+= call
->stack_usage
/ sizeof (target_mgreg_t
) * ins_get_size (OP_TAILCALL_PARAMETER
);
5013 code
= emit_float_args (cfg
, call
, code
, &max_len
, &offset
);
5015 code
= realloc_code (cfg
, max_len
);
5017 // For reg and membase, get destination in IP.
5020 g_assert (ins
->sreg1
> -1);
5021 if (ins
->sreg1
!= ARMREG_IP
)
5022 ARM_MOV_REG_REG (code
, ARMREG_IP
, ins
->sreg1
);
5023 } else if (tailcall_membase
) {
5024 g_assert (ins
->sreg1
> -1);
5025 if (!arm_is_imm12 (ins
->inst_offset
)) {
5026 g_assert (ins
->sreg1
!= ARMREG_IP
); // temp in emit_big_add
5027 code
= emit_big_add (code
, ARMREG_IP
, ins
->sreg1
, ins
->inst_offset
);
5028 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_IP
, 0);
5030 ARM_LDR_IMM (code
, ARMREG_IP
, ins
->sreg1
, ins
->inst_offset
);
5035 * The stack looks like the following:
5036 * <caller argument area>
5039 * <callee argument area>
5040 * <optionally saved IP> (about to be)
5041 * Need to copy the arguments from the callee argument area to
5042 * the caller argument area, and pop the frame.
5044 if (call
->stack_usage
) {
5045 int i
, prev_sp_offset
= 0;
5047 // When we get here, the parameters to the tailcall are already formed,
5048 // in registers and at the bottom of the grow-down stack.
5050 // Our goal is generally preserve parameters, and trim the stack,
5051 // and, before trimming stack, move parameters from the bottom of the
5052 // frame to the bottom of the trimmed frame.
5054 // For the case of large frames, and presently therefore always,
5055 // IP is used as an adjusted frame_reg.
5056 // Be conservative and save IP around the movement
5057 // of parameters from the bottom of frame to top of the frame.
5058 const gboolean save_ip
= tailcall_membase
|| tailcall_reg
;
5060 ARM_PUSH (code
, 1 << ARMREG_IP
);
5062 // When moving stacked parameters from the bottom
5063 // of the frame (sp) to the top of the frame (ip),
5064 // account, 0 or 4, for the conditional save of IP.
5065 const int offset_sp
= save_ip
? 4 : 0;
5066 const int offset_ip
= (save_ip
&& (cfg
->frame_reg
== ARMREG_SP
)) ? 4 : 0;
5068 /* Compute size of saved registers restored below */
5070 prev_sp_offset
= 2 * 4;
5072 prev_sp_offset
= 1 * 4;
5073 for (i
= 0; i
< 16; ++i
) {
5074 if (cfg
->used_int_regs
& (1 << i
))
5075 prev_sp_offset
+= 4;
5078 // Point IP at the start of where the parameters will go after trimming stack.
5079 // After locals and saved registers.
5080 code
= emit_big_add (code
, ARMREG_IP
, cfg
->frame_reg
, cfg
->stack_usage
+ prev_sp_offset
);
5082 /* Copy arguments on the stack to our argument area */
5083 // FIXME a fixed size memcpy is desirable here,
5084 // at least for larger values of stack_usage.
5086 // FIXME For most functions, with frames < 4K, we can use frame_reg directly here instead of IP.
5087 // See https://github.com/mono/mono/pull/12079
5088 // See https://github.com/mono/mono/pull/12079/commits/93e7007a9567b78fa8152ce404b372b26e735516
5089 for (i
= 0; i
< call
->stack_usage
; i
+= sizeof (target_mgreg_t
)) {
5090 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, i
+ offset_sp
);
5091 ARM_STR_IMM (code
, ARMREG_LR
, ARMREG_IP
, i
+ offset_ip
);
5095 ARM_POP (code
, 1 << ARMREG_IP
);
5099 * Keep in sync with mono_arch_emit_epilog
5101 g_assert (!cfg
->method
->save_lmf
);
5102 code
= emit_big_add_temp (code
, ARMREG_SP
, cfg
->frame_reg
, cfg
->stack_usage
, ARMREG_LR
);
5104 if (cfg
->used_int_regs
)
5105 ARM_POP (code
, cfg
->used_int_regs
);
5106 ARM_POP (code
, (1 << ARMREG_R7
) | (1 << ARMREG_LR
));
5108 ARM_POP (code
, cfg
->used_int_regs
| (1 << ARMREG_LR
));
5111 if (tailcall_reg
|| tailcall_membase
) {
5112 code
= emit_jmp_reg (code
, ARMREG_IP
);
5114 mono_add_patch_info (cfg
, (guint8
*) code
- cfg
->native_code
, MONO_PATCH_INFO_METHOD_JUMP
, call
->method
);
5116 if (cfg
->compile_aot
) {
5117 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
5119 *(gpointer
*)code
= NULL
;
5121 ARM_LDR_REG_REG (code
, ARMREG_PC
, ARMREG_PC
, ARMREG_IP
);
5123 code
= mono_arm_patchable_b (code
, ARMCOND_AL
);
5124 cfg
->thunk_area
+= THUNK_SIZE
;
5130 /* ensure ins->sreg1 is not NULL */
5131 ARM_LDRB_IMM (code
, ARMREG_LR
, ins
->sreg1
, 0);
5134 g_assert (cfg
->sig_cookie
< 128);
5135 ARM_LDR_IMM (code
, ARMREG_IP
, cfg
->frame_reg
, cfg
->sig_cookie
);
5136 ARM_STR_IMM (code
, ARMREG_IP
, ins
->sreg1
, 0);
5146 call
= (MonoCallInst
*)ins
;
5149 code
= emit_float_args (cfg
, call
, code
, &max_len
, &offset
);
5151 mono_call_add_patch_info (cfg
, call
, code
- cfg
->native_code
);
5153 code
= emit_call_seq (cfg
, code
);
5154 ins
->flags
|= MONO_INST_GC_CALLSITE
;
5155 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5156 code
= emit_move_return_value (cfg
, ins
, code
);
5163 case OP_VOIDCALL_REG
:
5166 code
= emit_float_args (cfg
, (MonoCallInst
*)ins
, code
, &max_len
, &offset
);
5168 code
= emit_call_reg (code
, ins
->sreg1
);
5169 ins
->flags
|= MONO_INST_GC_CALLSITE
;
5170 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5171 code
= emit_move_return_value (cfg
, ins
, code
);
5173 case OP_FCALL_MEMBASE
:
5174 case OP_RCALL_MEMBASE
:
5175 case OP_LCALL_MEMBASE
:
5176 case OP_VCALL_MEMBASE
:
5177 case OP_VCALL2_MEMBASE
:
5178 case OP_VOIDCALL_MEMBASE
:
5179 case OP_CALL_MEMBASE
: {
5180 g_assert (ins
->sreg1
!= ARMREG_LR
);
5181 call
= (MonoCallInst
*)ins
;
5184 code
= emit_float_args (cfg
, call
, code
, &max_len
, &offset
);
5185 if (!arm_is_imm12 (ins
->inst_offset
)) {
5186 /* sreg1 might be IP */
5187 ARM_MOV_REG_REG (code
, ARMREG_LR
, ins
->sreg1
);
5188 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, ins
->inst_offset
);
5189 ARM_ADD_REG_REG (code
, ARMREG_IP
, ARMREG_IP
, ARMREG_LR
);
5190 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
5191 ARM_LDR_IMM (code
, ARMREG_PC
, ARMREG_IP
, 0);
5193 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
5194 ARM_LDR_IMM (code
, ARMREG_PC
, ins
->sreg1
, ins
->inst_offset
);
5196 ins
->flags
|= MONO_INST_GC_CALLSITE
;
5197 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5198 code
= emit_move_return_value (cfg
, ins
, code
);
5201 case OP_GENERIC_CLASS_INIT
: {
5205 byte_offset
= MONO_STRUCT_OFFSET (MonoVTable
, initialized
);
5207 g_assert (arm_is_imm8 (byte_offset
));
5208 ARM_LDRSB_IMM (code
, ARMREG_IP
, ins
->sreg1
, byte_offset
);
5209 ARM_CMP_REG_IMM (code
, ARMREG_IP
, 0, 0);
5211 ARM_B_COND (code
, ARMCOND_NE
, 0);
5213 /* Uninitialized case */
5214 g_assert (ins
->sreg1
== ARMREG_R0
);
5216 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
5217 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_generic_class_init
));
5218 code
= emit_call_seq (cfg
, code
);
5220 /* Initialized case */
5221 arm_patch (jump
, code
);
5225 /* round the size to 8 bytes */
5226 ARM_ADD_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, (MONO_ARCH_FRAME_ALIGNMENT
- 1));
5227 ARM_BIC_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, (MONO_ARCH_FRAME_ALIGNMENT
- 1));
5228 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ins
->dreg
);
5229 /* memzero the area: dreg holds the size, sp is the pointer */
5230 if (ins
->flags
& MONO_INST_INIT
) {
5231 guint8
*start_loop
, *branch_to_cond
;
5232 ARM_MOV_REG_IMM8 (code
, ARMREG_LR
, 0);
5233 branch_to_cond
= code
;
5236 ARM_STR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ins
->dreg
);
5237 arm_patch (branch_to_cond
, code
);
5238 /* decrement by 4 and set flags */
5239 ARM_SUBS_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, sizeof (target_mgreg_t
));
5240 ARM_B_COND (code
, ARMCOND_GE
, 0);
5241 arm_patch (code
- 4, start_loop
);
5243 ARM_MOV_REG_REG (code
, ins
->dreg
, ARMREG_SP
);
5244 if (cfg
->param_area
)
5245 code
= emit_sub_imm (code
, ARMREG_SP
, ARMREG_SP
, ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
));
5250 MonoInst
*var
= cfg
->dyn_call_var
;
5251 guint8
*labels
[16];
5253 g_assert (var
->opcode
== OP_REGOFFSET
);
5254 g_assert (arm_is_imm12 (var
->inst_offset
));
5256 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
5257 ARM_MOV_REG_REG (code
, ARMREG_LR
, ins
->sreg1
);
5259 ARM_MOV_REG_REG (code
, ARMREG_IP
, ins
->sreg2
);
5261 /* Save args buffer */
5262 ARM_STR_IMM (code
, ARMREG_LR
, var
->inst_basereg
, var
->inst_offset
);
5264 /* Set fp argument registers */
5265 if (IS_HARD_FLOAT
) {
5266 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, has_fpregs
));
5267 ARM_CMP_REG_IMM (code
, ARMREG_R0
, 0, 0);
5269 ARM_B_COND (code
, ARMCOND_EQ
, 0);
5270 for (i
= 0; i
< FP_PARAM_REGS
; ++i
) {
5271 const int offset
= MONO_STRUCT_OFFSET (DynCallArgs
, fpregs
) + (i
* sizeof (double));
5272 g_assert (arm_is_fpimm8 (offset
));
5273 ARM_FLDD (code
, i
* 2, ARMREG_LR
, offset
);
5275 arm_patch (labels
[0], code
);
5278 /* Allocate callee area */
5279 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, n_stackargs
));
5280 ARM_SHL_IMM (code
, ARMREG_R1
, ARMREG_R1
, 2);
5281 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_R1
);
5283 /* Set stack args */
5285 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, n_stackargs
));
5286 /* R2 = pointer into regs */
5287 code
= emit_big_add (code
, ARMREG_R2
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, regs
) + (PARAM_REGS
* sizeof (target_mgreg_t
)));
5288 /* R3 = pointer to stack */
5289 ARM_MOV_REG_REG (code
, ARMREG_R3
, ARMREG_SP
);
5292 ARM_B_COND (code
, ARMCOND_AL
, 0);
5294 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_R2
, 0);
5295 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_R3
, 0);
5296 ARM_ADD_REG_IMM (code
, ARMREG_R2
, ARMREG_R2
, sizeof (target_mgreg_t
), 0);
5297 ARM_ADD_REG_IMM (code
, ARMREG_R3
, ARMREG_R3
, sizeof (target_mgreg_t
), 0);
5298 ARM_SUB_REG_IMM (code
, ARMREG_R1
, ARMREG_R1
, 1, 0);
5299 arm_patch (labels
[0], code
);
5300 ARM_CMP_REG_IMM (code
, ARMREG_R1
, 0, 0);
5302 ARM_B_COND (code
, ARMCOND_GT
, 0);
5303 arm_patch (labels
[2], labels
[1]);
5305 /* Set argument registers */
5306 for (i
= 0; i
< PARAM_REGS
; ++i
)
5307 ARM_LDR_IMM (code
, i
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, regs
) + (i
* sizeof (target_mgreg_t
)));
5310 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
5311 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
5314 ARM_LDR_IMM (code
, ARMREG_IP
, var
->inst_basereg
, var
->inst_offset
);
5315 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_IP
, MONO_STRUCT_OFFSET (DynCallArgs
, res
));
5316 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_IP
, MONO_STRUCT_OFFSET (DynCallArgs
, res2
));
5318 ARM_FSTD (code
, ARM_VFP_D0
, ARMREG_IP
, MONO_STRUCT_OFFSET (DynCallArgs
, fpregs
));
5322 if (ins
->sreg1
!= ARMREG_R0
)
5323 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
5324 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
5325 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception
));
5326 code
= emit_call_seq (cfg
, code
);
5330 if (ins
->sreg1
!= ARMREG_R0
)
5331 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
5332 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
5333 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception
));
5334 code
= emit_call_seq (cfg
, code
);
5337 case OP_START_HANDLER
: {
5338 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
5339 int param_area
= ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
);
5342 /* Reserve a param area, see filter-stack.exe */
5344 if ((i
= mono_arm_is_rotated_imm8 (param_area
, &rot_amount
)) >= 0) {
5345 ARM_SUB_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
5347 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, param_area
);
5348 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
5352 if (arm_is_imm12 (spvar
->inst_offset
)) {
5353 ARM_STR_IMM (code
, ARMREG_LR
, spvar
->inst_basereg
, spvar
->inst_offset
);
5355 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
5356 ARM_STR_REG_REG (code
, ARMREG_LR
, spvar
->inst_basereg
, ARMREG_IP
);
5360 case OP_ENDFILTER
: {
5361 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
5362 int param_area
= ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
);
5365 /* Free the param area */
5367 if ((i
= mono_arm_is_rotated_imm8 (param_area
, &rot_amount
)) >= 0) {
5368 ARM_ADD_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
5370 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, param_area
);
5371 ARM_ADD_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
5375 if (ins
->sreg1
!= ARMREG_R0
)
5376 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
5377 if (arm_is_imm12 (spvar
->inst_offset
)) {
5378 ARM_LDR_IMM (code
, ARMREG_IP
, spvar
->inst_basereg
, spvar
->inst_offset
);
5380 g_assert (ARMREG_IP
!= spvar
->inst_basereg
);
5381 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
5382 ARM_LDR_REG_REG (code
, ARMREG_IP
, spvar
->inst_basereg
, ARMREG_IP
);
5384 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
5387 case OP_ENDFINALLY
: {
5388 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
5389 int param_area
= ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
);
5392 /* Free the param area */
5394 if ((i
= mono_arm_is_rotated_imm8 (param_area
, &rot_amount
)) >= 0) {
5395 ARM_ADD_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
5397 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, param_area
);
5398 ARM_ADD_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
5402 if (arm_is_imm12 (spvar
->inst_offset
)) {
5403 ARM_LDR_IMM (code
, ARMREG_IP
, spvar
->inst_basereg
, spvar
->inst_offset
);
5405 g_assert (ARMREG_IP
!= spvar
->inst_basereg
);
5406 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
5407 ARM_LDR_REG_REG (code
, ARMREG_IP
, spvar
->inst_basereg
, ARMREG_IP
);
5409 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
5412 case OP_CALL_HANDLER
:
5413 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
5414 code
= mono_arm_patchable_bl (code
, ARMCOND_AL
);
5415 cfg
->thunk_area
+= THUNK_SIZE
;
5416 for (GList
*tmp
= ins
->inst_eh_blocks
; tmp
!= bb
->clause_holes
; tmp
= tmp
->prev
)
5417 mono_cfg_add_try_hole (cfg
, ((MonoLeaveClause
*) tmp
->data
)->clause
, code
, bb
);
5420 if (ins
->dreg
!= ARMREG_R0
)
5421 ARM_MOV_REG_REG (code
, ins
->dreg
, ARMREG_R0
);
5425 ins
->inst_c0
= code
- cfg
->native_code
;
5428 /*if (ins->inst_target_bb->native_offset) {
5430 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5432 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
5433 code
= mono_arm_patchable_b (code
, ARMCOND_AL
);
5437 ARM_MOV_REG_REG (code
, ARMREG_PC
, ins
->sreg1
);
5441 * In the normal case we have:
5442 * ldr pc, [pc, ins->sreg1 << 2]
5445 * ldr lr, [pc, ins->sreg1 << 2]
5447 * After follows the data.
5448 * FIXME: add aot support.
5450 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_SWITCH
, ins
->inst_p0
);
5451 max_len
+= 4 * GPOINTER_TO_INT (ins
->klass
);
5452 code
= realloc_code (cfg
, max_len
);
5453 ARM_LDR_REG_REG_SHIFT (code
, ARMREG_PC
, ARMREG_PC
, ins
->sreg1
, ARMSHIFT_LSL
, 2);
5455 code
+= 4 * GPOINTER_TO_INT (ins
->klass
);
5459 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
5460 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
5464 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5465 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_LT
);
5469 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5470 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_LO
);
5474 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5475 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_GT
);
5479 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5480 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_HI
);
5483 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_NE
);
5484 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_EQ
);
5487 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5488 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_LT
);
5491 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5492 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_GT
);
5495 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5496 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_LO
);
5499 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5500 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_HI
);
5502 case OP_COND_EXC_EQ
:
5503 case OP_COND_EXC_NE_UN
:
5504 case OP_COND_EXC_LT
:
5505 case OP_COND_EXC_LT_UN
:
5506 case OP_COND_EXC_GT
:
5507 case OP_COND_EXC_GT_UN
:
5508 case OP_COND_EXC_GE
:
5509 case OP_COND_EXC_GE_UN
:
5510 case OP_COND_EXC_LE
:
5511 case OP_COND_EXC_LE_UN
:
5512 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_EQ
, ins
->inst_p1
);
5514 case OP_COND_EXC_IEQ
:
5515 case OP_COND_EXC_INE_UN
:
5516 case OP_COND_EXC_ILT
:
5517 case OP_COND_EXC_ILT_UN
:
5518 case OP_COND_EXC_IGT
:
5519 case OP_COND_EXC_IGT_UN
:
5520 case OP_COND_EXC_IGE
:
5521 case OP_COND_EXC_IGE_UN
:
5522 case OP_COND_EXC_ILE
:
5523 case OP_COND_EXC_ILE_UN
:
5524 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_IEQ
, ins
->inst_p1
);
5527 case OP_COND_EXC_IC
:
5528 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS
, ins
->inst_p1
);
5530 case OP_COND_EXC_OV
:
5531 case OP_COND_EXC_IOV
:
5532 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS
, ins
->inst_p1
);
5534 case OP_COND_EXC_NC
:
5535 case OP_COND_EXC_INC
:
5536 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC
, ins
->inst_p1
);
5538 case OP_COND_EXC_NO
:
5539 case OP_COND_EXC_INO
:
5540 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC
, ins
->inst_p1
);
5552 EMIT_COND_BRANCH (ins
, ins
->opcode
- OP_IBEQ
);
5555 /* floating point opcodes */
5557 if (cfg
->compile_aot
) {
5558 ARM_FLDD (code
, ins
->dreg
, ARMREG_PC
, 0);
5560 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
5562 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[1];
5565 /* FIXME: we can optimize the imm load by dealing with part of
5566 * the displacement in LDFD (aligning to 512).
5568 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)(gsize
)ins
->inst_p0
);
5569 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
5573 if (cfg
->compile_aot
) {
5574 ARM_FLDS (code
, ins
->dreg
, ARMREG_PC
, 0);
5576 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
5579 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
5581 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)(gsize
)ins
->inst_p0
);
5582 ARM_FLDS (code
, ins
->dreg
, ARMREG_LR
, 0);
5584 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
5587 case OP_STORER8_MEMBASE_REG
:
5588 /* This is generated by the local regalloc pass which runs after the lowering pass */
5589 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
5590 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
5591 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_destbasereg
);
5592 ARM_FSTD (code
, ins
->sreg1
, ARMREG_LR
, 0);
5594 ARM_FSTD (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
5597 case OP_LOADR8_MEMBASE
:
5598 /* This is generated by the local regalloc pass which runs after the lowering pass */
5599 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
5600 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
5601 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_basereg
);
5602 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
5604 ARM_FLDD (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
5607 case OP_STORER4_MEMBASE_REG
:
5608 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
5610 ARM_FSTS (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
5612 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5613 ARM_CVTD (code
, vfp_scratch1
, ins
->sreg1
);
5614 ARM_FSTS (code
, vfp_scratch1
, ins
->inst_destbasereg
, ins
->inst_offset
);
5615 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5618 case OP_LOADR4_MEMBASE
:
5620 ARM_FLDS (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
5622 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
5623 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5624 ARM_FLDS (code
, vfp_scratch1
, ins
->inst_basereg
, ins
->inst_offset
);
5625 ARM_CVTS (code
, ins
->dreg
, vfp_scratch1
);
5626 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5629 case OP_ICONV_TO_R_UN
: {
5630 g_assert_not_reached ();
5633 case OP_ICONV_TO_R4
:
5635 ARM_FMSR (code
, ins
->dreg
, ins
->sreg1
);
5636 ARM_FSITOS (code
, ins
->dreg
, ins
->dreg
);
5638 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5639 ARM_FMSR (code
, vfp_scratch1
, ins
->sreg1
);
5640 ARM_FSITOS (code
, vfp_scratch1
, vfp_scratch1
);
5641 ARM_CVTS (code
, ins
->dreg
, vfp_scratch1
);
5642 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5645 case OP_ICONV_TO_R8
:
5646 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5647 ARM_FMSR (code
, vfp_scratch1
, ins
->sreg1
);
5648 ARM_FSITOD (code
, ins
->dreg
, vfp_scratch1
);
5649 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5653 MonoType
*sig_ret
= mini_get_underlying_type (mono_method_signature_internal (cfg
->method
)->ret
);
5654 if (sig_ret
->type
== MONO_TYPE_R4
) {
5656 if (IS_HARD_FLOAT
) {
5657 if (ins
->sreg1
!= ARM_VFP_D0
)
5658 ARM_CPYS (code
, ARM_VFP_D0
, ins
->sreg1
);
5660 ARM_FMRS (code
, ARMREG_R0
, ins
->sreg1
);
5663 ARM_CVTD (code
, ARM_VFP_F0
, ins
->sreg1
);
5666 ARM_FMRS (code
, ARMREG_R0
, ARM_VFP_F0
);
5670 ARM_CPYD (code
, ARM_VFP_D0
, ins
->sreg1
);
5672 ARM_FMRRD (code
, ARMREG_R0
, ARMREG_R1
, ins
->sreg1
);
5676 case OP_FCONV_TO_I1
:
5677 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, TRUE
);
5679 case OP_FCONV_TO_U1
:
5680 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, FALSE
);
5682 case OP_FCONV_TO_I2
:
5683 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, TRUE
);
5685 case OP_FCONV_TO_U2
:
5686 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, FALSE
);
5688 case OP_FCONV_TO_I4
:
5690 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, TRUE
);
5692 case OP_FCONV_TO_U4
:
5694 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, FALSE
);
5696 case OP_FCONV_TO_I8
:
5697 case OP_FCONV_TO_U8
:
5698 g_assert_not_reached ();
5699 /* Implemented as helper calls */
5701 case OP_LCONV_TO_R_UN
:
5702 g_assert_not_reached ();
5703 /* Implemented as helper calls */
5705 case OP_LCONV_TO_OVF_I4_2
: {
5706 guint8
*high_bit_not_set
, *valid_negative
, *invalid_negative
, *valid_positive
;
5708 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5711 ARM_CMP_REG_IMM8 (code
, ins
->sreg1
, 0);
5712 high_bit_not_set
= code
;
5713 ARM_B_COND (code
, ARMCOND_GE
, 0); /*branch if bit 31 of the lower part is not set*/
5715 ARM_CMN_REG_IMM8 (code
, ins
->sreg2
, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5716 valid_negative
= code
;
5717 ARM_B_COND (code
, ARMCOND_EQ
, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5718 invalid_negative
= code
;
5719 ARM_B_COND (code
, ARMCOND_AL
, 0);
5721 arm_patch (high_bit_not_set
, code
);
5723 ARM_CMP_REG_IMM8 (code
, ins
->sreg2
, 0);
5724 valid_positive
= code
;
5725 ARM_B_COND (code
, ARMCOND_EQ
, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5727 arm_patch (invalid_negative
, code
);
5728 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL
, "OverflowException");
5730 arm_patch (valid_negative
, code
);
5731 arm_patch (valid_positive
, code
);
5733 if (ins
->dreg
!= ins
->sreg1
)
5734 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
5738 ARM_VFP_ADDD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5741 ARM_VFP_SUBD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5744 ARM_VFP_MULD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5747 ARM_VFP_DIVD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5750 ARM_NEGD (code
, ins
->dreg
, ins
->sreg1
);
5754 g_assert_not_reached ();
5758 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5764 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5769 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5772 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
5773 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
5777 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5780 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5781 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5785 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5788 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5789 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5790 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5794 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
5797 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5798 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5802 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
5805 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5806 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5807 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5811 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5814 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_NE
);
5815 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_EQ
);
5819 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5822 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5823 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
5827 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
5830 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5831 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
5834 /* ARM FPA flags table:
5835 * N Less than ARMCOND_MI
5836 * Z Equal ARMCOND_EQ
5837 * C Greater Than or Equal ARMCOND_CS
5838 * V Unordered ARMCOND_VS
5841 EMIT_COND_BRANCH (ins
, OP_IBEQ
- OP_IBEQ
);
5844 EMIT_COND_BRANCH (ins
, OP_IBNE_UN
- OP_IBEQ
);
5847 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_MI
); /* N set */
5850 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_VS
); /* V set */
5851 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_MI
); /* N set */
5857 g_assert_not_reached ();
5861 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_GE
);
5863 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5864 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_EQ
);
5865 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_CS
);
5869 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_VS
); /* V set */
5870 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_GE
);
5875 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5876 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch2
);
5878 ARM_ABSD (code
, vfp_scratch2
, ins
->sreg1
);
5879 ARM_FLDD (code
, vfp_scratch1
, ARMREG_PC
, 0);
5881 *(guint32
*)code
= 0xffffffff;
5883 *(guint32
*)code
= 0x7fefffff;
5885 ARM_CMPD (code
, vfp_scratch2
, vfp_scratch1
);
5887 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT
, "OverflowException");
5888 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg1
);
5890 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS
, "OverflowException");
5891 ARM_CPYD (code
, ins
->dreg
, ins
->sreg1
);
5893 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5894 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch2
);
5899 case OP_RCONV_TO_I1
:
5900 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, TRUE
);
5902 case OP_RCONV_TO_U1
:
5903 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, FALSE
);
5905 case OP_RCONV_TO_I2
:
5906 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, TRUE
);
5908 case OP_RCONV_TO_U2
:
5909 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, FALSE
);
5911 case OP_RCONV_TO_I4
:
5912 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, TRUE
);
5914 case OP_RCONV_TO_U4
:
5915 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, FALSE
);
5917 case OP_RCONV_TO_R4
:
5919 if (ins
->dreg
!= ins
->sreg1
)
5920 ARM_CPYS (code
, ins
->dreg
, ins
->sreg1
);
5922 case OP_RCONV_TO_R8
:
5924 ARM_CVTS (code
, ins
->dreg
, ins
->sreg1
);
5927 ARM_VFP_ADDS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5930 ARM_VFP_SUBS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5933 ARM_VFP_MULS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5936 ARM_VFP_DIVS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5939 ARM_NEGS (code
, ins
->dreg
, ins
->sreg1
);
5943 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5946 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
5947 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
5951 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5954 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5955 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5959 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5962 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5963 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5964 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5968 ARM_CMPS (code
, ins
->sreg2
, ins
->sreg1
);
5971 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5972 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5976 ARM_CMPS (code
, ins
->sreg2
, ins
->sreg1
);
5979 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5980 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5981 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5985 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5988 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_NE
);
5989 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_EQ
);
5993 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5996 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5997 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
6001 ARM_CMPS (code
, ins
->sreg2
, ins
->sreg1
);
6004 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
6005 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
6008 case OP_GC_LIVENESS_DEF
:
6009 case OP_GC_LIVENESS_USE
:
6010 case OP_GC_PARAM_SLOT_LIVENESS_DEF
:
6011 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
6013 case OP_GC_SPILL_SLOT_LIVENESS_DEF
:
6014 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
6015 bb
->spill_slot_defs
= g_slist_prepend_mempool (cfg
->mempool
, bb
->spill_slot_defs
, ins
);
6017 case OP_LIVERANGE_START
: {
6018 if (cfg
->verbose_level
> 1)
6019 printf ("R%d START=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
6020 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_start
= code
- cfg
->native_code
;
6023 case OP_LIVERANGE_END
: {
6024 if (cfg
->verbose_level
> 1)
6025 printf ("R%d END=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
6026 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_end
= code
- cfg
->native_code
;
6029 case OP_GC_SAFE_POINT
: {
6032 ARM_LDR_IMM (code
, ARMREG_IP
, ins
->sreg1
, 0);
6033 ARM_CMP_REG_IMM (code
, ARMREG_IP
, 0, 0);
6035 ARM_B_COND (code
, ARMCOND_EQ
, 0);
6036 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll
));
6037 code
= emit_call_seq (cfg
, code
);
6038 arm_patch (buf
[0], code
);
6041 case OP_FILL_PROF_CALL_CTX
:
6042 for (int i
= 0; i
< ARMREG_MAX
; i
++)
6043 if ((MONO_ARCH_CALLEE_SAVED_REGS
& (1 << i
)) || i
== ARMREG_SP
|| i
== ARMREG_FP
)
6044 ARM_STR_IMM (code
, i
, ins
->sreg1
, MONO_STRUCT_OFFSET (MonoContext
, regs
) + i
* sizeof (target_mgreg_t
));
6047 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
6048 g_assert_not_reached ();
6051 if ((cfg
->opt
& MONO_OPT_BRANCH
) && ((code
- cfg
->native_code
- offset
) > max_len
)) {
6052 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
6053 mono_inst_name (ins
->opcode
), max_len
, code
- cfg
->native_code
- offset
);
6054 g_assert_not_reached ();
6062 set_code_cursor (cfg
, code
);
6065 #endif /* DISABLE_JIT */
6068 mono_arch_register_lowlevel_calls (void)
6070 /* The signature doesn't matter */
6071 mono_register_jit_icall (mono_arm_throw_exception
, mono_icall_sig_void
, TRUE
);
6072 mono_register_jit_icall (mono_arm_throw_exception_by_token
, mono_icall_sig_void
, TRUE
);
6073 mono_register_jit_icall (mono_arm_unaligned_stack
, mono_icall_sig_void
, TRUE
);
6076 #define patch_lis_ori(ip,val) do {\
6077 guint16 *__lis_ori = (guint16*)(ip); \
6078 __lis_ori [1] = (((guint32)(gsize)(val)) >> 16) & 0xffff; \
6079 __lis_ori [3] = ((guint32)(gsize)(val)) & 0xffff; \
6083 mono_arch_patch_code_new (MonoCompile
*cfg
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gpointer target
)
6085 unsigned char *ip
= ji
->ip
.i
+ code
;
6087 if (ji
->type
== MONO_PATCH_INFO_SWITCH
) {
6091 case MONO_PATCH_INFO_SWITCH
: {
6092 gpointer
*jt
= (gpointer
*)(ip
+ 8);
6094 /* jt is the inlined jump table, 2 instructions after ip
6095 * In the normal case we store the absolute addresses,
6096 * otherwise the displacements.
6098 for (i
= 0; i
< ji
->data
.table
->table_size
; i
++)
6099 jt
[i
] = code
+ (int)(gsize
)ji
->data
.table
->table
[i
];
6102 case MONO_PATCH_INFO_IP
:
6103 g_assert_not_reached ();
6104 patch_lis_ori (ip
, ip
);
6106 case MONO_PATCH_INFO_METHODCONST
:
6107 case MONO_PATCH_INFO_CLASS
:
6108 case MONO_PATCH_INFO_IMAGE
:
6109 case MONO_PATCH_INFO_FIELD
:
6110 case MONO_PATCH_INFO_VTABLE
:
6111 case MONO_PATCH_INFO_IID
:
6112 case MONO_PATCH_INFO_SFLDA
:
6113 case MONO_PATCH_INFO_LDSTR
:
6114 case MONO_PATCH_INFO_TYPE_FROM_HANDLE
:
6115 case MONO_PATCH_INFO_LDTOKEN
:
6116 g_assert_not_reached ();
6117 /* from OP_AOTCONST : lis + ori */
6118 patch_lis_ori (ip
, target
);
6120 case MONO_PATCH_INFO_R4
:
6121 case MONO_PATCH_INFO_R8
:
6122 g_assert_not_reached ();
6123 *((gconstpointer
*)(ip
+ 2)) = target
;
6125 case MONO_PATCH_INFO_EXC_NAME
:
6126 g_assert_not_reached ();
6127 *((gconstpointer
*)(ip
+ 1)) = target
;
6129 case MONO_PATCH_INFO_NONE
:
6130 case MONO_PATCH_INFO_BB_OVF
:
6131 case MONO_PATCH_INFO_EXC_OVF
:
6132 /* everything is dealt with at epilog output time */
6135 arm_patch_general (cfg
, domain
, ip
, (const guchar
*)target
);
6141 mono_arm_unaligned_stack (MonoMethod
*method
)
6143 g_assert_not_reached ();
6149 * Stack frame layout:
6151 * ------------------- fp
6152 * MonoLMF structure or saved registers
6153 * -------------------
6155 * -------------------
6157 * -------------------
6158 * param area size is cfg->param_area
6159 * ------------------- sp
6162 mono_arch_emit_prolog (MonoCompile
*cfg
)
6164 MonoMethod
*method
= cfg
->method
;
6166 MonoMethodSignature
*sig
;
6168 int alloc_size
, orig_alloc_size
, pos
, max_offset
, i
, rot_amount
, part
;
6172 int prev_sp_offset
, reg_offset
;
6174 sig
= mono_method_signature_internal (method
);
6175 cfg
->code_size
= 256 + sig
->param_count
* 64;
6176 code
= cfg
->native_code
= g_malloc (cfg
->code_size
);
6178 mono_emit_unwind_op_def_cfa (cfg
, code
, ARMREG_SP
, 0);
6180 alloc_size
= cfg
->stack_offset
;
6186 * The iphone uses R7 as the frame pointer, and it points at the saved
6191 * We can't use r7 as a frame pointer since it points into the middle of
6192 * the frame, so we keep using our own frame pointer.
6193 * FIXME: Optimize this.
6195 ARM_PUSH (code
, (1 << ARMREG_R7
) | (1 << ARMREG_LR
));
6196 prev_sp_offset
+= 8; /* r7 and lr */
6197 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
6198 mono_emit_unwind_op_offset (cfg
, code
, ARMREG_R7
, (- prev_sp_offset
) + 0);
6199 ARM_MOV_REG_REG (code
, ARMREG_R7
, ARMREG_SP
);
6202 if (!method
->save_lmf
) {
6204 /* No need to push LR again */
6205 if (cfg
->used_int_regs
)
6206 ARM_PUSH (code
, cfg
->used_int_regs
);
6208 ARM_PUSH (code
, cfg
->used_int_regs
| (1 << ARMREG_LR
));
6209 prev_sp_offset
+= 4;
6211 for (i
= 0; i
< 16; ++i
) {
6212 if (cfg
->used_int_regs
& (1 << i
))
6213 prev_sp_offset
+= 4;
6215 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
6217 for (i
= 0; i
< 16; ++i
) {
6218 if ((cfg
->used_int_regs
& (1 << i
))) {
6219 mono_emit_unwind_op_offset (cfg
, code
, i
, (- prev_sp_offset
) + reg_offset
);
6220 mini_gc_set_slot_type_from_cfa (cfg
, (- prev_sp_offset
) + reg_offset
, SLOT_NOREF
);
6224 mono_emit_unwind_op_offset (cfg
, code
, ARMREG_LR
, -4);
6225 mini_gc_set_slot_type_from_cfa (cfg
, -4, SLOT_NOREF
);
6227 ARM_MOV_REG_REG (code
, ARMREG_IP
, ARMREG_SP
);
6228 ARM_PUSH (code
, 0x5ff0);
6229 prev_sp_offset
+= 4 * 10; /* all but r0-r3, sp and pc */
6230 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
6232 for (i
= 0; i
< 16; ++i
) {
6233 if ((i
> ARMREG_R3
) && (i
!= ARMREG_SP
) && (i
!= ARMREG_PC
)) {
6234 /* The original r7 is saved at the start */
6235 if (!(iphone_abi
&& i
== ARMREG_R7
))
6236 mono_emit_unwind_op_offset (cfg
, code
, i
, (- prev_sp_offset
) + reg_offset
);
6240 g_assert (reg_offset
== 4 * 10);
6241 pos
+= MONO_ABI_SIZEOF (MonoLMF
) - (4 * 10);
6245 orig_alloc_size
= alloc_size
;
6246 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
6247 if (alloc_size
& (MONO_ARCH_FRAME_ALIGNMENT
- 1)) {
6248 alloc_size
+= MONO_ARCH_FRAME_ALIGNMENT
- 1;
6249 alloc_size
&= ~(MONO_ARCH_FRAME_ALIGNMENT
- 1);
6252 /* the stack used in the pushed regs */
6253 alloc_size
+= ALIGN_TO (prev_sp_offset
, MONO_ARCH_FRAME_ALIGNMENT
) - prev_sp_offset
;
6254 cfg
->stack_usage
= alloc_size
;
6256 if ((i
= mono_arm_is_rotated_imm8 (alloc_size
, &rot_amount
)) >= 0) {
6257 ARM_SUB_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
6259 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, alloc_size
);
6260 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
6262 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
+ alloc_size
);
6264 if (cfg
->frame_reg
!= ARMREG_SP
) {
6265 ARM_MOV_REG_REG (code
, cfg
->frame_reg
, ARMREG_SP
);
6266 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, cfg
->frame_reg
);
6268 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
6269 prev_sp_offset
+= alloc_size
;
6271 for (i
= 0; i
< alloc_size
- orig_alloc_size
; i
+= 4)
6272 mini_gc_set_slot_type_from_cfa (cfg
, (- prev_sp_offset
) + orig_alloc_size
+ i
, SLOT_NOREF
);
6274 /* compute max_offset in order to use short forward jumps
6275 * we could skip do it on arm because the immediate displacement
6276 * for jumps is large enough, it may be useful later for constant pools
6279 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
6280 MonoInst
*ins
= bb
->code
;
6281 bb
->max_offset
= max_offset
;
6283 MONO_BB_FOR_EACH_INS (bb
, ins
)
6284 max_offset
+= ins_get_size (ins
->opcode
);
6287 /* stack alignment check */
6291 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_SP);
6292 code = mono_arm_emit_load_imm (code, ARMREG_IP, MONO_ARCH_FRAME_ALIGNMENT -1);
6293 ARM_AND_REG_REG (code, ARMREG_LR, ARMREG_LR, ARMREG_IP);
6294 ARM_CMP_REG_IMM (code, ARMREG_LR, 0, 0);
6296 ARM_B_COND (code, ARMCOND_EQ, 0);
6297 if (cfg->compile_aot)
6298 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
6300 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
6301 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arm_unaligned_stack));
6302 code = emit_call_seq (cfg, code);
6303 arm_patch (buf [0], code);
6307 /* store runtime generic context */
6308 if (cfg
->rgctx_var
) {
6309 MonoInst
*ins
= cfg
->rgctx_var
;
6311 g_assert (ins
->opcode
== OP_REGOFFSET
);
6313 if (arm_is_imm12 (ins
->inst_offset
)) {
6314 ARM_STR_IMM (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ins
->inst_offset
);
6316 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
6317 ARM_STR_REG_REG (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ARMREG_LR
);
6320 mono_add_var_location (cfg
, cfg
->rgctx_var
, TRUE
, MONO_ARCH_RGCTX_REG
, 0, 0, code
- cfg
->native_code
);
6321 mono_add_var_location (cfg
, cfg
->rgctx_var
, FALSE
, ins
->inst_basereg
, ins
->inst_offset
, code
- cfg
->native_code
, 0);
6324 /* load arguments allocated to register from the stack */
6325 cinfo
= get_call_info (NULL
, sig
);
6327 if (cinfo
->ret
.storage
== RegTypeStructByAddr
) {
6328 ArgInfo
*ainfo
= &cinfo
->ret
;
6329 inst
= cfg
->vret_addr
;
6330 g_assert (arm_is_imm12 (inst
->inst_offset
));
6331 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6334 if (sig
->call_convention
== MONO_CALL_VARARG
) {
6335 ArgInfo
*cookie
= &cinfo
->sig_cookie
;
6337 /* Save the sig cookie address */
6338 g_assert (cookie
->storage
== RegTypeBase
);
6340 g_assert (arm_is_imm12 (prev_sp_offset
+ cookie
->offset
));
6341 g_assert (arm_is_imm12 (cfg
->sig_cookie
));
6342 ARM_ADD_REG_IMM8 (code
, ARMREG_IP
, cfg
->frame_reg
, prev_sp_offset
+ cookie
->offset
);
6343 ARM_STR_IMM (code
, ARMREG_IP
, cfg
->frame_reg
, cfg
->sig_cookie
);
6346 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
6347 ArgInfo
*ainfo
= cinfo
->args
+ i
;
6348 inst
= cfg
->args
[i
];
6350 if (cfg
->verbose_level
> 2)
6351 g_print ("Saving argument %d (type: %d)\n", i
, ainfo
->storage
);
6353 if (inst
->opcode
== OP_REGVAR
) {
6354 if (ainfo
->storage
== RegTypeGeneral
)
6355 ARM_MOV_REG_REG (code
, inst
->dreg
, ainfo
->reg
);
6356 else if (ainfo
->storage
== RegTypeFP
) {
6357 g_assert_not_reached ();
6358 } else if (ainfo
->storage
== RegTypeBase
) {
6359 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
6360 ARM_LDR_IMM (code
, inst
->dreg
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
6362 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
);
6363 ARM_LDR_REG_REG (code
, inst
->dreg
, ARMREG_SP
, ARMREG_IP
);
6366 g_assert_not_reached ();
6368 if (i
== 0 && sig
->hasthis
) {
6369 g_assert (ainfo
->storage
== RegTypeGeneral
);
6370 mono_add_var_location (cfg
, inst
, TRUE
, ainfo
->reg
, 0, 0, code
- cfg
->native_code
);
6371 mono_add_var_location (cfg
, inst
, TRUE
, inst
->dreg
, 0, code
- cfg
->native_code
, 0);
6374 if (cfg
->verbose_level
> 2)
6375 g_print ("Argument %d assigned to register %s\n", i
, mono_arch_regname (inst
->dreg
));
6377 switch (ainfo
->storage
) {
6379 for (part
= 0; part
< ainfo
->nregs
; part
++) {
6380 if (ainfo
->esize
== 4)
6381 ARM_FSTS (code
, ainfo
->reg
+ part
, inst
->inst_basereg
, inst
->inst_offset
+ (part
* ainfo
->esize
));
6383 ARM_FSTD (code
, ainfo
->reg
+ (part
* 2), inst
->inst_basereg
, inst
->inst_offset
+ (part
* ainfo
->esize
));
6386 case RegTypeGeneral
:
6387 case RegTypeIRegPair
:
6388 case RegTypeGSharedVtInReg
:
6389 case RegTypeStructByAddr
:
6390 switch (ainfo
->size
) {
6392 if (arm_is_imm12 (inst
->inst_offset
))
6393 ARM_STRB_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6395 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6396 ARM_STRB_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6400 if (arm_is_imm8 (inst
->inst_offset
)) {
6401 ARM_STRH_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6403 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6404 ARM_STRH_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6408 if (arm_is_imm12 (inst
->inst_offset
)) {
6409 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6411 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6412 ARM_STR_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6414 if (arm_is_imm12 (inst
->inst_offset
+ 4)) {
6415 ARM_STR_IMM (code
, ainfo
->reg
+ 1, inst
->inst_basereg
, inst
->inst_offset
+ 4);
6417 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
+ 4);
6418 ARM_STR_REG_REG (code
, ainfo
->reg
+ 1, inst
->inst_basereg
, ARMREG_IP
);
6422 if (arm_is_imm12 (inst
->inst_offset
)) {
6423 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6425 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6426 ARM_STR_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6430 if (i
== 0 && sig
->hasthis
) {
6431 g_assert (ainfo
->storage
== RegTypeGeneral
);
6432 mono_add_var_location (cfg
, inst
, TRUE
, ainfo
->reg
, 0, 0, code
- cfg
->native_code
);
6433 mono_add_var_location (cfg
, inst
, FALSE
, inst
->inst_basereg
, inst
->inst_offset
, code
- cfg
->native_code
, 0);
6436 case RegTypeBaseGen
:
6437 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
6438 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
6440 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
);
6441 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
6443 if (arm_is_imm12 (inst
->inst_offset
+ 4)) {
6444 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
6445 ARM_STR_IMM (code
, ARMREG_R3
, inst
->inst_basereg
, inst
->inst_offset
);
6447 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
+ 4);
6448 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6449 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6450 ARM_STR_REG_REG (code
, ARMREG_R3
, inst
->inst_basereg
, ARMREG_IP
);
6454 case RegTypeGSharedVtOnStack
:
6455 case RegTypeStructByAddrOnStack
:
6456 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
6457 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
6459 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
);
6460 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
6463 switch (ainfo
->size
) {
6465 if (arm_is_imm8 (inst
->inst_offset
)) {
6466 ARM_STRB_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6468 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6469 ARM_STRB_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6473 if (arm_is_imm8 (inst
->inst_offset
)) {
6474 ARM_STRH_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6476 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6477 ARM_STRH_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6481 if (arm_is_imm12 (inst
->inst_offset
)) {
6482 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6484 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6485 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6487 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
+ 4)) {
6488 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
+ 4));
6490 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
+ 4);
6491 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
6493 if (arm_is_imm12 (inst
->inst_offset
+ 4)) {
6494 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
6496 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
+ 4);
6497 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6501 if (arm_is_imm12 (inst
->inst_offset
)) {
6502 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6504 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6505 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6511 int imm8
, rot_amount
;
6513 if ((imm8
= mono_arm_is_rotated_imm8 (inst
->inst_offset
, &rot_amount
)) == -1) {
6514 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6515 ARM_ADD_REG_REG (code
, ARMREG_IP
, ARMREG_IP
, inst
->inst_basereg
);
6517 ARM_ADD_REG_IMM (code
, ARMREG_IP
, inst
->inst_basereg
, imm8
, rot_amount
);
6519 if (ainfo
->size
== 8)
6520 ARM_FSTD (code
, ainfo
->reg
, ARMREG_IP
, 0);
6522 ARM_FSTS (code
, ainfo
->reg
, ARMREG_IP
, 0);
6525 case RegTypeStructByVal
: {
6526 int doffset
= inst
->inst_offset
;
6530 size
= mini_type_stack_size_full (inst
->inst_vtype
, NULL
, sig
->pinvoke
);
6531 for (cur_reg
= 0; cur_reg
< ainfo
->size
; ++cur_reg
) {
6532 if (arm_is_imm12 (doffset
)) {
6533 ARM_STR_IMM (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, doffset
);
6535 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, doffset
);
6536 ARM_STR_REG_REG (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, ARMREG_IP
);
6538 soffset
+= sizeof (target_mgreg_t
);
6539 doffset
+= sizeof (target_mgreg_t
);
6541 if (ainfo
->vtsize
) {
6542 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6543 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6544 code
= emit_memcpy (code
, ainfo
->vtsize
* sizeof (target_mgreg_t
), inst
->inst_basereg
, doffset
, ARMREG_SP
, prev_sp_offset
+ ainfo
->offset
);
6549 g_assert_not_reached ();
6555 if (method
->save_lmf
)
6556 code
= emit_save_lmf (cfg
, code
, alloc_size
- lmf_offset
);
6558 if (cfg
->arch
.seq_point_info_var
) {
6559 MonoInst
*ins
= cfg
->arch
.seq_point_info_var
;
6561 /* Initialize the variable from a GOT slot */
6562 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_SEQ_POINT_INFO
, cfg
->method
);
6563 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_PC
, 0);
6565 *(gpointer
*)code
= NULL
;
6567 ARM_LDR_REG_REG (code
, ARMREG_R0
, ARMREG_PC
, ARMREG_R0
);
6569 g_assert (ins
->opcode
== OP_REGOFFSET
);
6571 if (arm_is_imm12 (ins
->inst_offset
)) {
6572 ARM_STR_IMM (code
, ARMREG_R0
, ins
->inst_basereg
, ins
->inst_offset
);
6574 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
6575 ARM_STR_REG_REG (code
, ARMREG_R0
, ins
->inst_basereg
, ARMREG_LR
);
6579 /* Initialize ss_trigger_page_var */
6580 if (!cfg
->soft_breakpoints
) {
6581 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
6582 MonoInst
*ss_trigger_page_var
= cfg
->arch
.ss_trigger_page_var
;
6583 int dreg
= ARMREG_LR
;
6586 g_assert (info_var
->opcode
== OP_REGOFFSET
);
6588 code
= emit_ldr_imm (code
, dreg
, info_var
->inst_basereg
, info_var
->inst_offset
);
6589 /* Load the trigger page addr */
6590 ARM_LDR_IMM (code
, dreg
, dreg
, MONO_STRUCT_OFFSET (SeqPointInfo
, ss_trigger_page
));
6591 ARM_STR_IMM (code
, dreg
, ss_trigger_page_var
->inst_basereg
, ss_trigger_page_var
->inst_offset
);
6595 if (cfg
->arch
.seq_point_ss_method_var
) {
6596 MonoInst
*ss_method_ins
= cfg
->arch
.seq_point_ss_method_var
;
6597 MonoInst
*bp_method_ins
= cfg
->arch
.seq_point_bp_method_var
;
6599 g_assert (ss_method_ins
->opcode
== OP_REGOFFSET
);
6600 g_assert (arm_is_imm12 (ss_method_ins
->inst_offset
));
6602 if (cfg
->compile_aot
) {
6603 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
6604 int dreg
= ARMREG_LR
;
6606 g_assert (info_var
->opcode
== OP_REGOFFSET
);
6607 g_assert (arm_is_imm12 (info_var
->inst_offset
));
6609 ARM_LDR_IMM (code
, dreg
, info_var
->inst_basereg
, info_var
->inst_offset
);
6610 ARM_LDR_IMM (code
, dreg
, dreg
, MONO_STRUCT_OFFSET (SeqPointInfo
, ss_tramp_addr
));
6611 ARM_STR_IMM (code
, dreg
, ss_method_ins
->inst_basereg
, ss_method_ins
->inst_offset
);
6613 g_assert (bp_method_ins
->opcode
== OP_REGOFFSET
);
6614 g_assert (arm_is_imm12 (bp_method_ins
->inst_offset
));
6616 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
6618 *(gpointer
*)code
= &single_step_tramp
;
6620 *(gpointer
*)code
= breakpoint_tramp
;
6623 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_LR
, 0);
6624 ARM_STR_IMM (code
, ARMREG_IP
, ss_method_ins
->inst_basereg
, ss_method_ins
->inst_offset
);
6625 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_LR
, 4);
6626 ARM_STR_IMM (code
, ARMREG_IP
, bp_method_ins
->inst_basereg
, bp_method_ins
->inst_offset
);
6630 set_code_cursor (cfg
, code
);
6637 mono_arch_emit_epilog (MonoCompile
*cfg
)
6639 MonoMethod
*method
= cfg
->method
;
6640 int pos
, i
, rot_amount
;
6641 int max_epilog_size
= 16 + 20*4;
6645 if (cfg
->method
->save_lmf
)
6646 max_epilog_size
+= 128;
6648 code
= realloc_code (cfg
, max_epilog_size
);
6650 /* Save the uwind state which is needed by the out-of-line code */
6651 mono_emit_unwind_op_remember_state (cfg
, code
);
6655 /* Load returned vtypes into registers if needed */
6656 cinfo
= cfg
->arch
.cinfo
;
6657 switch (cinfo
->ret
.storage
) {
6658 case RegTypeStructByVal
: {
6659 MonoInst
*ins
= cfg
->ret
;
6661 if (cinfo
->ret
.nregs
== 1) {
6662 if (arm_is_imm12 (ins
->inst_offset
)) {
6663 ARM_LDR_IMM (code
, ARMREG_R0
, ins
->inst_basereg
, ins
->inst_offset
);
6665 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
6666 ARM_LDR_REG_REG (code
, ARMREG_R0
, ins
->inst_basereg
, ARMREG_LR
);
6669 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
6670 int offset
= ins
->inst_offset
+ (i
* 4);
6671 if (arm_is_imm12 (offset
)) {
6672 ARM_LDR_IMM (code
, i
, ins
->inst_basereg
, offset
);
6674 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, offset
);
6675 ARM_LDR_REG_REG (code
, i
, ins
->inst_basereg
, ARMREG_LR
);
6682 MonoInst
*ins
= cfg
->ret
;
6684 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
6685 if (cinfo
->ret
.esize
== 4)
6686 ARM_FLDS (code
, cinfo
->ret
.reg
+ i
, ins
->inst_basereg
, ins
->inst_offset
+ (i
* cinfo
->ret
.esize
));
6688 ARM_FLDD (code
, cinfo
->ret
.reg
+ (i
* 2), ins
->inst_basereg
, ins
->inst_offset
+ (i
* cinfo
->ret
.esize
));
6696 if (method
->save_lmf
) {
6697 int lmf_offset
, reg
, sp_adj
, regmask
, nused_int_regs
= 0;
6698 /* all but r0-r3, sp and pc */
6699 pos
+= MONO_ABI_SIZEOF (MonoLMF
) - (MONO_ARM_NUM_SAVED_REGS
* sizeof (target_mgreg_t
));
6702 code
= emit_restore_lmf (cfg
, code
, cfg
->stack_usage
- lmf_offset
);
6704 /* This points to r4 inside MonoLMF->iregs */
6705 sp_adj
= (MONO_ABI_SIZEOF (MonoLMF
) - MONO_ARM_NUM_SAVED_REGS
* sizeof (target_mgreg_t
));
6707 regmask
= 0x9ff0; /* restore lr to pc */
6708 /* Skip caller saved registers not used by the method */
6709 while (!(cfg
->used_int_regs
& (1 << reg
)) && reg
< ARMREG_FP
) {
6710 regmask
&= ~(1 << reg
);
6715 /* Restored later */
6716 regmask
&= ~(1 << ARMREG_PC
);
6717 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6718 code
= emit_big_add (code
, ARMREG_SP
, cfg
->frame_reg
, cfg
->stack_usage
- lmf_offset
+ sp_adj
);
6719 for (i
= 0; i
< 16; i
++) {
6720 if (regmask
& (1 << i
))
6723 mono_emit_unwind_op_def_cfa (cfg
, code
, ARMREG_SP
, ((iphone_abi
? 3 : 0) + nused_int_regs
) * 4);
6725 ARM_POP (code
, regmask
);
6727 for (i
= 0; i
< 16; i
++) {
6728 if (regmask
& (1 << i
))
6729 mono_emit_unwind_op_same_value (cfg
, code
, i
);
6731 /* Restore saved r7, restore LR to PC */
6732 /* Skip lr from the lmf */
6733 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, 3 * 4);
6734 ARM_ADD_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, sizeof (target_mgreg_t
), 0);
6735 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, 2 * 4);
6736 ARM_POP (code
, (1 << ARMREG_R7
) | (1 << ARMREG_PC
));
6739 int i
, nused_int_regs
= 0;
6741 for (i
= 0; i
< 16; i
++) {
6742 if (cfg
->used_int_regs
& (1 << i
))
6746 if ((i
= mono_arm_is_rotated_imm8 (cfg
->stack_usage
, &rot_amount
)) >= 0) {
6747 ARM_ADD_REG_IMM (code
, ARMREG_SP
, cfg
->frame_reg
, i
, rot_amount
);
6749 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, cfg
->stack_usage
);
6750 ARM_ADD_REG_REG (code
, ARMREG_SP
, cfg
->frame_reg
, ARMREG_IP
);
6753 if (cfg
->frame_reg
!= ARMREG_SP
) {
6754 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, ARMREG_SP
);
6758 /* Restore saved gregs */
6759 if (cfg
->used_int_regs
) {
6760 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, (2 + nused_int_regs
) * 4);
6761 ARM_POP (code
, cfg
->used_int_regs
);
6762 for (i
= 0; i
< 16; i
++) {
6763 if (cfg
->used_int_regs
& (1 << i
))
6764 mono_emit_unwind_op_same_value (cfg
, code
, i
);
6767 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, 2 * 4);
6768 /* Restore saved r7, restore LR to PC */
6769 ARM_POP (code
, (1 << ARMREG_R7
) | (1 << ARMREG_PC
));
6771 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, (nused_int_regs
+ 1) * 4);
6772 ARM_POP (code
, cfg
->used_int_regs
| (1 << ARMREG_PC
));
6776 /* Restore the unwind state to be the same as before the epilog */
6777 mono_emit_unwind_op_restore_state (cfg
, code
);
6779 set_code_cursor (cfg
, code
);
6784 mono_arch_emit_exceptions (MonoCompile
*cfg
)
6786 MonoJumpInfo
*patch_info
;
6789 guint8
* exc_throw_pos
[MONO_EXC_INTRINS_NUM
];
6790 guint8 exc_throw_found
[MONO_EXC_INTRINS_NUM
];
6791 int max_epilog_size
= 50;
6793 for (i
= 0; i
< MONO_EXC_INTRINS_NUM
; i
++) {
6794 exc_throw_pos
[i
] = NULL
;
6795 exc_throw_found
[i
] = 0;
6798 /* count the number of exception infos */
6801 * make sure we have enough space for exceptions
6803 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
6804 if (patch_info
->type
== MONO_PATCH_INFO_EXC
) {
6805 i
= mini_exception_id_by_name ((const char*)patch_info
->data
.target
);
6806 if (!exc_throw_found
[i
]) {
6807 max_epilog_size
+= 32;
6808 exc_throw_found
[i
] = TRUE
;
6813 code
= realloc_code (cfg
, max_epilog_size
);
6815 /* add code to raise exceptions */
6816 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
6817 switch (patch_info
->type
) {
6818 case MONO_PATCH_INFO_EXC
: {
6819 MonoClass
*exc_class
;
6820 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
6822 i
= mini_exception_id_by_name ((const char*)patch_info
->data
.target
);
6823 if (exc_throw_pos
[i
]) {
6824 arm_patch (ip
, exc_throw_pos
[i
]);
6825 patch_info
->type
= MONO_PATCH_INFO_NONE
;
6828 exc_throw_pos
[i
] = code
;
6830 arm_patch (ip
, code
);
6832 exc_class
= mono_class_load_from_name (mono_defaults
.corlib
, "System", patch_info
->data
.name
);
6834 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_LR
);
6835 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_PC
, 0);
6836 patch_info
->type
= MONO_PATCH_INFO_JIT_ICALL_ID
;
6837 patch_info
->data
.jit_icall_id
= MONO_JIT_ICALL_mono_arch_throw_corlib_exception
;
6838 patch_info
->ip
.i
= code
- cfg
->native_code
;
6840 cfg
->thunk_area
+= THUNK_SIZE
;
6841 *(guint32
*)(gpointer
)code
= m_class_get_type_token (exc_class
) - MONO_TOKEN_TYPE_DEF
;
6851 set_code_cursor (cfg
, code
);
6854 #endif /* #ifndef DISABLE_JIT */
6857 mono_arch_finish_init (void)
6862 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
6873 mono_arch_get_patch_offset (guint8
*code
)
6880 mono_arch_flush_register_windows (void)
6885 mono_arch_find_imt_method (host_mgreg_t
*regs
, guint8
*code
)
6887 return (MonoMethod
*)regs
[MONO_ARCH_IMT_REG
];
6891 mono_arch_find_static_call_vtable (host_mgreg_t
*regs
, guint8
*code
)
6893 return (MonoVTable
*)(gsize
)regs
[MONO_ARCH_RGCTX_REG
];
6897 mono_arch_get_cie_program (void)
6901 mono_add_unwind_op_def_cfa (l
, (guint8
*)NULL
, (guint8
*)NULL
, ARMREG_SP
, 0);
6906 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6907 #define BASE_SIZE (6 * 4)
6908 #define BSEARCH_ENTRY_SIZE (4 * 4)
6909 #define CMP_SIZE (3 * 4)
6910 #define BRANCH_SIZE (1 * 4)
6911 #define CALL_SIZE (2 * 4)
6912 #define WMC_SIZE (8 * 4)
6913 #define DISTANCE(A, B) (((gint32)(gssize)(B)) - ((gint32)(gssize)(A)))
6916 arm_emit_value_and_patch_ldr (arminstr_t
*code
, arminstr_t
*target
, guint32 value
)
6918 guint32 delta
= DISTANCE (target
, code
);
6920 g_assert (delta
>= 0 && delta
<= 0xFFF);
6921 *target
= *target
| delta
;
6926 #ifdef ENABLE_WRONG_METHOD_CHECK
6928 mini_dump_bad_imt (int input_imt
, int compared_imt
, int pc
)
6930 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt
, compared_imt
, pc
);
6936 mono_arch_build_imt_trampoline (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
6937 gpointer fail_tramp
)
6940 arminstr_t
*code
, *start
;
6941 gboolean large_offsets
= FALSE
;
6942 guint32
**constant_pool_starts
;
6943 arminstr_t
*vtable_target
= NULL
;
6944 int extra_space
= 0;
6945 #ifdef ENABLE_WRONG_METHOD_CHECK
6951 constant_pool_starts
= g_new0 (guint32
*, count
);
6953 for (i
= 0; i
< count
; ++i
) {
6954 MonoIMTCheckItem
*item
= imt_entries
[i
];
6955 if (item
->is_equals
) {
6956 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
6958 if (item
->has_target_code
|| !arm_is_imm12 (DISTANCE (vtable
, &vtable
->vtable
[item
->value
.vtable_slot
]))) {
6959 item
->chunk_size
+= 32;
6960 large_offsets
= TRUE
;
6963 if (item
->check_target_idx
|| fail_case
) {
6964 if (!item
->compare_done
|| fail_case
)
6965 item
->chunk_size
+= CMP_SIZE
;
6966 item
->chunk_size
+= BRANCH_SIZE
;
6968 #ifdef ENABLE_WRONG_METHOD_CHECK
6969 item
->chunk_size
+= WMC_SIZE
;
6973 item
->chunk_size
+= 16;
6974 large_offsets
= TRUE
;
6976 item
->chunk_size
+= CALL_SIZE
;
6978 item
->chunk_size
+= BSEARCH_ENTRY_SIZE
;
6979 imt_entries
[item
->check_target_idx
]->compare_done
= TRUE
;
6981 size
+= item
->chunk_size
;
6985 size
+= 4 * count
; /* The ARM_ADD_REG_IMM to pop the stack */
6988 code
= mono_method_alloc_generic_virtual_trampoline (domain
, size
);
6990 code
= mono_domain_code_reserve (domain
, size
);
6993 unwind_ops
= mono_arch_get_cie_program ();
6996 g_print ("Building IMT trampoline for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", m_class_get_name_space (vtable
->klass
), m_class_get_name (vtable
->klass
), count
, size
, start
, ((guint8
*)start
) + size
, vtable
, fail_tramp
);
6997 for (i
= 0; i
< count
; ++i
) {
6998 MonoIMTCheckItem
*item
= imt_entries
[i
];
6999 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i
, item
->key
, ((MonoMethod
*)item
->key
)->name
, &vtable
->vtable
[item
->value
.vtable_slot
], item
->is_equals
, item
->chunk_size
);
7003 if (large_offsets
) {
7004 ARM_PUSH4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
7005 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 4 * sizeof (target_mgreg_t
));
7007 ARM_PUSH2 (code
, ARMREG_R0
, ARMREG_R1
);
7008 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 2 * sizeof (target_mgreg_t
));
7010 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_LR
, -4);
7011 vtable_target
= code
;
7012 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
7013 ARM_MOV_REG_REG (code
, ARMREG_R0
, ARMREG_V5
);
7015 for (i
= 0; i
< count
; ++i
) {
7016 MonoIMTCheckItem
*item
= imt_entries
[i
];
7017 arminstr_t
*imt_method
= NULL
, *vtable_offset_ins
= NULL
, *target_code_ins
= NULL
;
7018 gint32 vtable_offset
;
7020 item
->code_target
= (guint8
*)code
;
7022 if (item
->is_equals
) {
7023 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
7025 if (item
->check_target_idx
|| fail_case
) {
7026 if (!item
->compare_done
|| fail_case
) {
7028 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7029 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
7031 item
->jmp_code
= (guint8
*)code
;
7032 ARM_B_COND (code
, ARMCOND_NE
, 0);
7034 /*Enable the commented code to assert on wrong method*/
7035 #ifdef ENABLE_WRONG_METHOD_CHECK
7037 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7038 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
7040 ARM_B_COND (code
, ARMCOND_EQ
, 0);
7042 /* Define this if your system is so bad that gdb is failing. */
7043 #ifdef BROKEN_DEV_ENV
7044 ARM_MOV_REG_REG (code
, ARMREG_R2
, ARMREG_PC
);
7046 arm_patch (code
- 1, mini_dump_bad_imt
);
7050 arm_patch (cond
, code
);
7054 if (item
->has_target_code
) {
7055 /* Load target address */
7056 target_code_ins
= code
;
7057 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7058 /* Save it to the fourth slot */
7059 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (target_mgreg_t
));
7060 /* Restore registers and branch */
7061 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
7063 code
= arm_emit_value_and_patch_ldr (code
, target_code_ins
, (gsize
)item
->value
.target_code
);
7065 vtable_offset
= DISTANCE (vtable
, &vtable
->vtable
[item
->value
.vtable_slot
]);
7066 if (!arm_is_imm12 (vtable_offset
)) {
7068 * We need to branch to a computed address but we don't have
7069 * a free register to store it, since IP must contain the
7070 * vtable address. So we push the two values to the stack, and
7071 * load them both using LDM.
7073 /* Compute target address */
7074 vtable_offset_ins
= code
;
7075 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7076 ARM_LDR_REG_REG (code
, ARMREG_R1
, ARMREG_IP
, ARMREG_R1
);
7077 /* Save it to the fourth slot */
7078 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (target_mgreg_t
));
7079 /* Restore registers and branch */
7080 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
7082 code
= arm_emit_value_and_patch_ldr (code
, vtable_offset_ins
, vtable_offset
);
7084 ARM_POP2 (code
, ARMREG_R0
, ARMREG_R1
);
7085 if (large_offsets
) {
7086 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 2 * sizeof (target_mgreg_t
));
7087 ARM_ADD_REG_IMM8 (code
, ARMREG_SP
, ARMREG_SP
, 2 * sizeof (target_mgreg_t
));
7089 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 0);
7090 ARM_LDR_IMM (code
, ARMREG_PC
, ARMREG_IP
, vtable_offset
);
7095 arm_patch (item
->jmp_code
, (guchar
*)code
);
7097 target_code_ins
= code
;
7098 /* Load target address */
7099 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7100 /* Save it to the fourth slot */
7101 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (target_mgreg_t
));
7102 /* Restore registers and branch */
7103 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
7105 code
= arm_emit_value_and_patch_ldr (code
, target_code_ins
, (gsize
)fail_tramp
);
7106 item
->jmp_code
= NULL
;
7110 code
= arm_emit_value_and_patch_ldr (code
, imt_method
, (guint32
)(gsize
)item
->key
);
7112 /*must emit after unconditional branch*/
7113 if (vtable_target
) {
7114 code
= arm_emit_value_and_patch_ldr (code
, vtable_target
, (guint32
)(gsize
)vtable
);
7115 item
->chunk_size
+= 4;
7116 vtable_target
= NULL
;
7119 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
7120 constant_pool_starts
[i
] = code
;
7122 code
+= extra_space
;
7126 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7127 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
7129 item
->jmp_code
= (guint8
*)code
;
7130 ARM_B_COND (code
, ARMCOND_HS
, 0);
7135 for (i
= 0; i
< count
; ++i
) {
7136 MonoIMTCheckItem
*item
= imt_entries
[i
];
7137 if (item
->jmp_code
) {
7138 if (item
->check_target_idx
)
7139 arm_patch (item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
);
7141 if (i
> 0 && item
->is_equals
) {
7143 arminstr_t
*space_start
= constant_pool_starts
[i
];
7144 for (j
= i
- 1; j
>= 0 && !imt_entries
[j
]->is_equals
; --j
) {
7145 space_start
= arm_emit_value_and_patch_ldr (space_start
, (arminstr_t
*)imt_entries
[j
]->code_target
, (guint32
)(gsize
)imt_entries
[j
]->key
);
7152 char *buff
= g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", m_class_get_name_space (vtable
->klass
), m_class_get_name (vtable
->klass
), count
);
7153 mono_disassemble_code (NULL
, (guint8
*)start
, size
, buff
);
7158 g_free (constant_pool_starts
);
7160 mono_arch_flush_icache ((guint8
*)start
, size
);
7161 MONO_PROFILER_RAISE (jit_code_buffer
, ((guint8
*)start
, code
- start
, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE
, NULL
));
7162 UnlockedAdd (&mono_stats
.imt_trampolines_size
, code
- start
);
7164 g_assert (DISTANCE (start
, code
) <= size
);
7166 mono_tramp_info_register (mono_tramp_info_create (NULL
, (guint8
*)start
, DISTANCE (start
, code
), NULL
, unwind_ops
), domain
);
7172 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
7174 return ctx
->regs
[reg
];
7178 mono_arch_context_set_int_reg (MonoContext
*ctx
, int reg
, host_mgreg_t val
)
7180 ctx
->regs
[reg
] = val
;
7184 * mono_arch_get_trampolines:
7186 * Return a list of MonoTrampInfo structures describing arch specific trampolines
7190 mono_arch_get_trampolines (gboolean aot
)
7192 return mono_arm_get_exception_trampolines (aot
);
7195 #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
7197 * mono_arch_set_breakpoint:
7199 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
7200 * The location should contain code emitted by OP_SEQ_POINT.
7203 mono_arch_set_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
7206 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
7209 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), (guint8
*)ji
->code_start
);
7211 if (!breakpoint_tramp
)
7212 breakpoint_tramp
= mini_get_breakpoint_trampoline ();
7214 g_assert (native_offset
% 4 == 0);
7215 g_assert (info
->bp_addrs
[native_offset
/ 4] == 0);
7216 info
->bp_addrs
[native_offset
/ 4] = (guint8
*)(mini_debug_options
.soft_breakpoints
? breakpoint_tramp
: bp_trigger_page
);
7217 } else if (mini_debug_options
.soft_breakpoints
) {
7219 ARM_BLX_REG (code
, ARMREG_LR
);
7220 mono_arch_flush_icache (code
- 4, 4);
7222 int dreg
= ARMREG_LR
;
7224 /* Read from another trigger page */
7225 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
7227 *(int*)code
= (int)(gssize
)bp_trigger_page
;
7229 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
7231 mono_arch_flush_icache (code
- 16, 16);
7234 /* This is currently implemented by emitting an SWI instruction, which
7235 * qemu/linux seems to convert to a SIGILL.
7237 *(int*)code
= (0xef << 24) | 8;
7239 mono_arch_flush_icache (code
- 4, 4);
7245 * mono_arch_clear_breakpoint:
7247 * Clear the breakpoint at IP.
7250 mono_arch_clear_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
7256 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
7257 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), (guint8
*)ji
->code_start
);
7259 if (!breakpoint_tramp
)
7260 breakpoint_tramp
= mini_get_breakpoint_trampoline ();
7262 g_assert (native_offset
% 4 == 0);
7263 g_assert (info
->bp_addrs
[native_offset
/ 4] == (guint8
*)(mini_debug_options
.soft_breakpoints
? breakpoint_tramp
: bp_trigger_page
));
7264 info
->bp_addrs
[native_offset
/ 4] = 0;
7265 } else if (mini_debug_options
.soft_breakpoints
) {
7268 mono_arch_flush_icache (code
- 4, 4);
7270 for (i
= 0; i
< 4; ++i
)
7273 mono_arch_flush_icache (ip
, code
- ip
);
7278 * mono_arch_start_single_stepping:
7280 * Start single stepping.
7283 mono_arch_start_single_stepping (void)
7285 if (ss_trigger_page
)
7286 mono_mprotect (ss_trigger_page
, mono_pagesize (), 0);
7288 single_step_tramp
= mini_get_single_step_trampoline ();
7292 * mono_arch_stop_single_stepping:
7294 * Stop single stepping.
7297 mono_arch_stop_single_stepping (void)
7299 if (ss_trigger_page
)
7300 mono_mprotect (ss_trigger_page
, mono_pagesize (), MONO_MMAP_READ
);
7302 single_step_tramp
= NULL
;
7306 #define DBG_SIGNAL SIGBUS
7308 #define DBG_SIGNAL SIGSEGV
7312 * mono_arch_is_single_step_event:
7314 * Return whenever the machine state in SIGCTX corresponds to a single
7318 mono_arch_is_single_step_event (void *info
, void *sigctx
)
7320 siginfo_t
*sinfo
= (siginfo_t
*)info
;
7322 if (!ss_trigger_page
)
7325 /* Sometimes the address is off by 4 */
7326 if (sinfo
->si_addr
>= ss_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)ss_trigger_page
+ 128)
7333 * mono_arch_is_breakpoint_event:
7335 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7338 mono_arch_is_breakpoint_event (void *info
, void *sigctx
)
7340 siginfo_t
*sinfo
= (siginfo_t
*)info
;
7342 if (!ss_trigger_page
)
7345 if (sinfo
->si_signo
== DBG_SIGNAL
) {
7346 /* Sometimes the address is off by 4 */
7347 if (sinfo
->si_addr
>= bp_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)bp_trigger_page
+ 128)
7357 * mono_arch_skip_breakpoint:
7359 * See mini-amd64.c for docs.
7362 mono_arch_skip_breakpoint (MonoContext
*ctx
, MonoJitInfo
*ji
)
7364 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
7368 * mono_arch_skip_single_step:
7370 * See mini-amd64.c for docs.
7373 mono_arch_skip_single_step (MonoContext
*ctx
)
7375 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
7379 * mono_arch_get_seq_point_info:
7381 * See mini-amd64.c for docs.
7384 mono_arch_get_seq_point_info (MonoDomain
*domain
, guint8
*code
)
7389 // FIXME: Add a free function
7391 mono_domain_lock (domain
);
7392 info
= (SeqPointInfo
*)g_hash_table_lookup (domain_jit_info (domain
)->arch_seq_points
,
7394 mono_domain_unlock (domain
);
7397 ji
= mono_jit_info_table_find (domain
, code
);
7400 info
= g_malloc0 (sizeof (SeqPointInfo
) + ji
->code_size
);
7402 info
->ss_trigger_page
= ss_trigger_page
;
7403 info
->bp_trigger_page
= bp_trigger_page
;
7404 info
->ss_tramp_addr
= &single_step_tramp
;
7406 mono_domain_lock (domain
);
7407 g_hash_table_insert (domain_jit_info (domain
)->arch_seq_points
,
7409 mono_domain_unlock (domain
);
7415 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7418 * mono_arch_set_target:
7420 * Set the target architecture the JIT backend should generate code for, in the form
7421 * of a GNU target triplet. Only used in AOT mode.
7424 mono_arch_set_target (char *mtriple
)
7426 /* The GNU target triple format is not very well documented */
7427 if (strstr (mtriple
, "armv7")) {
7428 v5_supported
= TRUE
;
7429 v6_supported
= TRUE
;
7430 v7_supported
= TRUE
;
7432 if (strstr (mtriple
, "armv6")) {
7433 v5_supported
= TRUE
;
7434 v6_supported
= TRUE
;
7436 if (strstr (mtriple
, "armv7s")) {
7437 v7s_supported
= TRUE
;
7439 if (strstr (mtriple
, "armv7k")) {
7440 v7k_supported
= TRUE
;
7442 if (strstr (mtriple
, "thumbv7s")) {
7443 v5_supported
= TRUE
;
7444 v6_supported
= TRUE
;
7445 v7_supported
= TRUE
;
7446 v7s_supported
= TRUE
;
7447 thumb_supported
= TRUE
;
7448 thumb2_supported
= TRUE
;
7450 if (strstr (mtriple
, "darwin") || strstr (mtriple
, "ios")) {
7451 v5_supported
= TRUE
;
7452 v6_supported
= TRUE
;
7453 thumb_supported
= TRUE
;
7456 if (strstr (mtriple
, "gnueabi"))
7457 eabi_supported
= TRUE
;
7461 mono_arch_opcode_supported (int opcode
)
7464 case OP_ATOMIC_ADD_I4
:
7465 case OP_ATOMIC_EXCHANGE_I4
:
7466 case OP_ATOMIC_CAS_I4
:
7467 case OP_ATOMIC_LOAD_I1
:
7468 case OP_ATOMIC_LOAD_I2
:
7469 case OP_ATOMIC_LOAD_I4
:
7470 case OP_ATOMIC_LOAD_U1
:
7471 case OP_ATOMIC_LOAD_U2
:
7472 case OP_ATOMIC_LOAD_U4
:
7473 case OP_ATOMIC_STORE_I1
:
7474 case OP_ATOMIC_STORE_I2
:
7475 case OP_ATOMIC_STORE_I4
:
7476 case OP_ATOMIC_STORE_U1
:
7477 case OP_ATOMIC_STORE_U2
:
7478 case OP_ATOMIC_STORE_U4
:
7479 return v7_supported
;
7480 case OP_ATOMIC_LOAD_R4
:
7481 case OP_ATOMIC_LOAD_R8
:
7482 case OP_ATOMIC_STORE_R4
:
7483 case OP_ATOMIC_STORE_R8
:
7484 return v7_supported
&& IS_VFP
;
7491 mono_arch_get_call_info (MonoMemPool
*mp
, MonoMethodSignature
*sig
)
7493 return get_call_info (mp
, sig
);
7497 mono_arch_get_get_tls_tramp (void)
7502 static G_GNUC_UNUSED guint8
*
7503 emit_aotconst (MonoCompile
*cfg
, guint8
*code
, int dreg
, int patch_type
, gpointer data
)
7506 mono_add_patch_info (cfg
, code
- cfg
->native_code
, (MonoJumpInfoType
)patch_type
, data
);
7507 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
7509 *(gpointer
*)code
= NULL
;
7511 /* Load the value from the GOT */
7512 ARM_LDR_REG_REG (code
, dreg
, ARMREG_PC
, dreg
);
7517 mono_arm_emit_aotconst (gpointer ji_list
, guint8
*code
, guint8
*buf
, int dreg
, int patch_type
, gconstpointer data
)
7519 MonoJumpInfo
**ji
= (MonoJumpInfo
**)ji_list
;
7521 *ji
= mono_patch_info_list_prepend (*ji
, code
- buf
, (MonoJumpInfoType
)patch_type
, data
);
7522 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
7524 *(gpointer
*)code
= NULL
;
7526 ARM_LDR_REG_REG (code
, dreg
, ARMREG_PC
, dreg
);
7531 mono_arch_load_function (MonoJitICallId jit_icall_id
)
7533 gpointer target
= NULL
;
7534 switch (jit_icall_id
) {
7535 #undef MONO_AOT_ICALL
7536 #define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break;
7537 MONO_AOT_ICALL (mono_arm_resume_unwind
)
7538 MONO_AOT_ICALL (mono_arm_start_gsharedvt_call
)
7539 MONO_AOT_ICALL (mono_arm_throw_exception
)
7540 MONO_AOT_ICALL (mono_arm_throw_exception_by_token
)
7541 MONO_AOT_ICALL (mono_arm_unaligned_stack
)