3 * ARM backend for the Mono code generator
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2003 Ximian, Inc.
10 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
17 #include <mono/metadata/abi-details.h>
18 #include <mono/metadata/appdomain.h>
19 #include <mono/metadata/profiler-private.h>
20 #include <mono/metadata/debug-helpers.h>
21 #include <mono/utils/mono-mmap.h>
22 #include <mono/utils/mono-hwcap.h>
23 #include <mono/utils/mono-memory-model.h>
24 #include <mono/utils/mono-threads-coop.h>
25 #include <mono/utils/unlocked.h>
27 #include "interp/interp.h"
32 #include "debugger-agent.h"
34 #include "mini-runtime.h"
35 #include "aot-runtime.h"
36 #include "mono/arch/arm/arm-vfp-codegen.h"
38 /* Sanity check: This makes no sense */
39 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
40 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
44 * IS_SOFT_FLOAT: Is full software floating point used?
45 * IS_HARD_FLOAT: Is full hardware floating point used?
46 * IS_VFP: Is hardware floating point with software ABI used?
48 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
49 * IS_VFP may delegate to mono_arch_is_soft_float ().
52 #if defined(ARM_FPU_VFP_HARD)
53 #define IS_SOFT_FLOAT (FALSE)
54 #define IS_HARD_FLOAT (TRUE)
56 #elif defined(ARM_FPU_NONE)
57 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
58 #define IS_HARD_FLOAT (FALSE)
59 #define IS_VFP (!mono_arch_is_soft_float ())
61 #define IS_SOFT_FLOAT (FALSE)
62 #define IS_HARD_FLOAT (FALSE)
66 #define THUNK_SIZE (3 * 4)
70 void sys_icache_invalidate (void *start
, size_t len
);
74 /* This mutex protects architecture specific caches */
75 #define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex)
76 #define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex)
77 static mono_mutex_t mini_arch_mutex
;
79 static gboolean v5_supported
= FALSE
;
80 static gboolean v6_supported
= FALSE
;
81 static gboolean v7_supported
= FALSE
;
82 static gboolean v7s_supported
= FALSE
;
83 static gboolean v7k_supported
= FALSE
;
84 static gboolean thumb_supported
= FALSE
;
85 static gboolean thumb2_supported
= FALSE
;
87 * Whenever to use the ARM EABI
89 static gboolean eabi_supported
= FALSE
;
92 * Whenever to use the iphone ABI extensions:
93 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
94 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
95 * This is required for debugging/profiling tools to work, but it has some overhead so it should
96 * only be turned on in debug builds.
98 static gboolean iphone_abi
= FALSE
;
101 * The FPU we are generating code for. This is NOT runtime configurable right now,
102 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
104 static MonoArmFPU arm_fpu
;
106 #if defined(ARM_FPU_VFP_HARD)
108 * On armhf, d0-d7 are used for argument passing and d8-d15
109 * must be preserved across calls, which leaves us no room
110 * for scratch registers. So we use d14-d15 but back up their
111 * previous contents to a stack slot before using them - see
112 * mono_arm_emit_vfp_scratch_save/_restore ().
114 static int vfp_scratch1
= ARM_VFP_D14
;
115 static int vfp_scratch2
= ARM_VFP_D15
;
118 * On armel, d0-d7 do not need to be preserved, so we can
119 * freely make use of them as scratch registers.
121 static int vfp_scratch1
= ARM_VFP_D0
;
122 static int vfp_scratch2
= ARM_VFP_D1
;
127 static gpointer single_step_tramp
, breakpoint_tramp
;
130 * The code generated for sequence points reads from this location, which is
131 * made read-only when single stepping is enabled.
133 static gpointer ss_trigger_page
;
135 /* Enabled breakpoints read from this trigger page */
136 static gpointer bp_trigger_page
;
140 * floating point support: on ARM it is a mess, there are at least 3
141 * different setups, each of which binary incompat with the other.
142 * 1) FPA: old and ugly, but unfortunately what current distros use
143 * the double binary format has the two words swapped. 8 double registers.
144 * Implemented usually by kernel emulation.
145 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
146 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
147 * 3) VFP: the new and actually sensible and useful FP support. Implemented
148 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
150 * We do not care about FPA. We will support soft float and VFP.
152 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
153 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
154 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
156 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
157 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
158 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
160 //#define DEBUG_IMT 0
163 static void mono_arch_compute_omit_fp (MonoCompile
*cfg
);
167 emit_aotconst (MonoCompile
*cfg
, guint8
*code
, int dreg
, int patch_type
, gpointer data
);
170 mono_arch_regname (int reg
)
172 static const char * rnames
[] = {
173 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
174 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
175 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
178 if (reg
>= 0 && reg
< 16)
184 mono_arch_fregname (int reg
)
186 static const char * rnames
[] = {
187 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
188 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
189 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
190 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
191 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
192 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
195 if (reg
>= 0 && reg
< 32)
203 emit_big_add_temp (guint8
*code
, int dreg
, int sreg
, int imm
, int temp
)
205 int imm8
, rot_amount
;
207 g_assert (temp
== ARMREG_IP
|| temp
== ARMREG_LR
);
211 ARM_MOV_REG_REG (code
, dreg
, sreg
);
212 } else if ((imm8
= mono_arm_is_rotated_imm8 (imm
, &rot_amount
)) >= 0) {
213 ARM_ADD_REG_IMM (code
, dreg
, sreg
, imm8
, rot_amount
);
217 code
= mono_arm_emit_load_imm (code
, temp
, imm
);
218 ARM_ADD_REG_REG (code
, dreg
, sreg
, temp
);
220 code
= mono_arm_emit_load_imm (code
, dreg
, imm
);
221 ARM_ADD_REG_REG (code
, dreg
, dreg
, sreg
);
227 emit_big_add (guint8
*code
, int dreg
, int sreg
, int imm
)
229 return emit_big_add_temp (code
, dreg
, sreg
, imm
, ARMREG_IP
);
233 emit_ldr_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
235 if (!arm_is_imm12 (imm
)) {
236 g_assert (dreg
!= sreg
);
237 code
= emit_big_add (code
, dreg
, sreg
, imm
);
238 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
240 ARM_LDR_IMM (code
, dreg
, sreg
, imm
);
245 /* If dreg == sreg, this clobbers IP */
247 emit_sub_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
249 int imm8
, rot_amount
;
250 if ((imm8
= mono_arm_is_rotated_imm8 (imm
, &rot_amount
)) >= 0) {
251 ARM_SUB_REG_IMM (code
, dreg
, sreg
, imm8
, rot_amount
);
255 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, imm
);
256 ARM_SUB_REG_REG (code
, dreg
, sreg
, ARMREG_IP
);
258 code
= mono_arm_emit_load_imm (code
, dreg
, imm
);
259 ARM_SUB_REG_REG (code
, dreg
, dreg
, sreg
);
265 emit_memcpy (guint8
*code
, int size
, int dreg
, int doffset
, int sreg
, int soffset
)
267 /* we can use r0-r3, since this is called only for incoming args on the stack */
268 if (size
> sizeof (target_mgreg_t
) * 4) {
270 code
= emit_big_add (code
, ARMREG_R0
, sreg
, soffset
);
271 code
= emit_big_add (code
, ARMREG_R1
, dreg
, doffset
);
272 start_loop
= code
= mono_arm_emit_load_imm (code
, ARMREG_R2
, size
);
273 ARM_LDR_IMM (code
, ARMREG_R3
, ARMREG_R0
, 0);
274 ARM_STR_IMM (code
, ARMREG_R3
, ARMREG_R1
, 0);
275 ARM_ADD_REG_IMM8 (code
, ARMREG_R0
, ARMREG_R0
, 4);
276 ARM_ADD_REG_IMM8 (code
, ARMREG_R1
, ARMREG_R1
, 4);
277 ARM_SUBS_REG_IMM8 (code
, ARMREG_R2
, ARMREG_R2
, 4);
278 ARM_B_COND (code
, ARMCOND_NE
, 0);
279 arm_patch (code
- 4, start_loop
);
282 if (arm_is_imm12 (doffset
) && arm_is_imm12 (doffset
+ size
) &&
283 arm_is_imm12 (soffset
) && arm_is_imm12 (soffset
+ size
)) {
285 ARM_LDR_IMM (code
, ARMREG_LR
, sreg
, soffset
);
286 ARM_STR_IMM (code
, ARMREG_LR
, dreg
, doffset
);
292 code
= emit_big_add (code
, ARMREG_R0
, sreg
, soffset
);
293 code
= emit_big_add (code
, ARMREG_R1
, dreg
, doffset
);
294 doffset
= soffset
= 0;
296 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_R0
, soffset
);
297 ARM_STR_IMM (code
, ARMREG_LR
, ARMREG_R1
, doffset
);
303 g_assert (size
== 0);
308 emit_jmp_reg (guint8
*code
, int reg
)
313 ARM_MOV_REG_REG (code
, ARMREG_PC
, reg
);
318 emit_call_reg (guint8
*code
, int reg
)
321 ARM_BLX_REG (code
, reg
);
323 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
324 return emit_jmp_reg (code
, reg
);
330 emit_call_seq (MonoCompile
*cfg
, guint8
*code
)
332 if (cfg
->method
->dynamic
) {
333 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
335 *(gpointer
*)code
= NULL
;
337 code
= emit_call_reg (code
, ARMREG_IP
);
341 cfg
->thunk_area
+= THUNK_SIZE
;
346 mono_arm_patchable_b (guint8
*code
, int cond
)
348 ARM_B_COND (code
, cond
, 0);
353 mono_arm_patchable_bl (guint8
*code
, int cond
)
355 ARM_BL_COND (code
, cond
, 0);
359 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(HOST_ANDROID) && !defined(MONO_CROSS_COMPILE)
360 #define HAVE_AEABI_READ_TP 1
363 #ifdef HAVE_AEABI_READ_TP
365 gpointer
__aeabi_read_tp (void);
370 mono_arch_have_fast_tls (void)
372 #ifdef HAVE_AEABI_READ_TP
373 static gboolean have_fast_tls
= FALSE
;
374 static gboolean inited
= FALSE
;
376 if (mini_get_debug_options ()->use_fallback_tls
)
380 return have_fast_tls
;
385 tp1
= __aeabi_read_tp ();
386 asm volatile("mrc p15, 0, %0, c13, c0, 3" : "=r" (tp2
));
388 have_fast_tls
= tp1
&& tp1
== tp2
;
391 return have_fast_tls
;
398 emit_tls_get (guint8
*code
, int dreg
, int tls_offset
)
400 g_assert (v7_supported
);
401 ARM_MRC (code
, 15, 0, dreg
, 13, 0, 3);
402 ARM_LDR_IMM (code
, dreg
, dreg
, tls_offset
);
407 emit_tls_set (guint8
*code
, int sreg
, int tls_offset
)
409 int tp_reg
= (sreg
!= ARMREG_R0
) ? ARMREG_R0
: ARMREG_R1
;
410 g_assert (v7_supported
);
411 ARM_MRC (code
, 15, 0, tp_reg
, 13, 0, 3);
412 ARM_STR_IMM (code
, sreg
, tp_reg
, tls_offset
);
419 * Emit code to push an LMF structure on the LMF stack.
420 * On arm, this is intermixed with the initialization of other fields of the structure.
423 emit_save_lmf (MonoCompile
*cfg
, guint8
*code
, gint32 lmf_offset
)
427 if (mono_arch_have_fast_tls () && mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR
) != -1) {
428 code
= emit_tls_get (code
, ARMREG_R0
, mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR
));
430 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
431 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_tls_get_lmf_addr
));
432 code
= emit_call_seq (cfg
, code
);
434 /* we build the MonoLMF structure on the stack - see mini-arm.h */
435 /* lmf_offset is the offset from the previous stack pointer,
436 * alloc_size is the total stack space allocated, so the offset
437 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
438 * The pointer to the struct is put in r1 (new_lmf).
439 * ip is used as scratch
440 * The callee-saved registers are already in the MonoLMF structure
442 code
= emit_big_add (code
, ARMREG_R1
, ARMREG_SP
, lmf_offset
);
443 /* r0 is the result from mono_get_lmf_addr () */
444 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
445 /* new_lmf->previous_lmf = *lmf_addr */
446 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
447 ARM_STR_IMM (code
, ARMREG_IP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
448 /* *(lmf_addr) = r1 */
449 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
450 /* Skip method (only needed for trampoline LMF frames) */
451 ARM_STR_IMM (code
, ARMREG_SP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, sp
));
452 ARM_STR_IMM (code
, ARMREG_FP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, fp
));
453 /* save the current IP */
454 ARM_MOV_REG_REG (code
, ARMREG_IP
, ARMREG_PC
);
455 ARM_STR_IMM (code
, ARMREG_IP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, ip
));
457 for (i
= 0; i
< MONO_ABI_SIZEOF (MonoLMF
); i
+= sizeof (target_mgreg_t
))
458 mini_gc_set_slot_type_from_fp (cfg
, lmf_offset
+ i
, SLOT_NOREF
);
469 emit_float_args (MonoCompile
*cfg
, MonoCallInst
*inst
, guint8
*code
, int *max_len
, guint
*offset
)
473 set_code_cursor (cfg
, code
);
475 for (list
= inst
->float_args
; list
; list
= list
->next
) {
476 FloatArgData
*fad
= (FloatArgData
*)list
->data
;
477 MonoInst
*var
= get_vreg_to_inst (cfg
, fad
->vreg
);
478 gboolean imm
= arm_is_fpimm8 (var
->inst_offset
);
480 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
486 code
= realloc_code (cfg
, *max_len
);
489 code
= emit_big_add (code
, ARMREG_LR
, var
->inst_basereg
, var
->inst_offset
);
490 ARM_FLDS (code
, fad
->hreg
, ARMREG_LR
, 0);
492 ARM_FLDS (code
, fad
->hreg
, var
->inst_basereg
, var
->inst_offset
);
494 set_code_cursor (cfg
, code
);
495 *offset
= code
- cfg
->native_code
;
502 mono_arm_emit_vfp_scratch_save (MonoCompile
*cfg
, guint8
*code
, int reg
)
506 g_assert (reg
== vfp_scratch1
|| reg
== vfp_scratch2
);
508 inst
= cfg
->arch
.vfp_scratch_slots
[reg
== vfp_scratch1
? 0 : 1];
511 if (!arm_is_fpimm8 (inst
->inst_offset
)) {
512 code
= emit_big_add (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
513 ARM_FSTD (code
, reg
, ARMREG_LR
, 0);
515 ARM_FSTD (code
, reg
, inst
->inst_basereg
, inst
->inst_offset
);
522 mono_arm_emit_vfp_scratch_restore (MonoCompile
*cfg
, guint8
*code
, int reg
)
526 g_assert (reg
== vfp_scratch1
|| reg
== vfp_scratch2
);
528 inst
= cfg
->arch
.vfp_scratch_slots
[reg
== vfp_scratch1
? 0 : 1];
531 if (!arm_is_fpimm8 (inst
->inst_offset
)) {
532 code
= emit_big_add (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
533 ARM_FLDD (code
, reg
, ARMREG_LR
, 0);
535 ARM_FLDD (code
, reg
, inst
->inst_basereg
, inst
->inst_offset
);
544 * Emit code to pop an LMF structure from the LMF stack.
547 emit_restore_lmf (MonoCompile
*cfg
, guint8
*code
, gint32 lmf_offset
)
551 if (lmf_offset
< 32) {
552 basereg
= cfg
->frame_reg
;
557 code
= emit_big_add (code
, ARMREG_R2
, cfg
->frame_reg
, lmf_offset
);
560 /* ip = previous_lmf */
561 ARM_LDR_IMM (code
, ARMREG_IP
, basereg
, offset
+ MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
563 ARM_LDR_IMM (code
, ARMREG_LR
, basereg
, offset
+ MONO_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
564 /* *(lmf_addr) = previous_lmf */
565 ARM_STR_IMM (code
, ARMREG_IP
, ARMREG_LR
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
570 #endif /* #ifndef DISABLE_JIT */
573 * mono_arch_get_argument_info:
574 * @csig: a method signature
575 * @param_count: the number of parameters to consider
576 * @arg_info: an array to store the result infos
578 * Gathers information on parameters such as size, alignment and
579 * padding. arg_info should be large enought to hold param_count + 1 entries.
581 * Returns the size of the activation frame.
584 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
586 int k
, frame_size
= 0;
587 guint32 size
, align
, pad
;
591 t
= mini_get_underlying_type (csig
->ret
);
592 if (MONO_TYPE_ISSTRUCT (t
)) {
593 frame_size
+= sizeof (target_mgreg_t
);
597 arg_info
[0].offset
= offset
;
600 frame_size
+= sizeof (target_mgreg_t
);
604 arg_info
[0].size
= frame_size
;
606 for (k
= 0; k
< param_count
; k
++) {
607 size
= mini_type_stack_size_full (csig
->params
[k
], &align
, csig
->pinvoke
);
609 /* ignore alignment for now */
612 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
613 arg_info
[k
].pad
= pad
;
615 arg_info
[k
+ 1].pad
= 0;
616 arg_info
[k
+ 1].size
= size
;
618 arg_info
[k
+ 1].offset
= offset
;
622 align
= MONO_ARCH_FRAME_ALIGNMENT
;
623 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
624 arg_info
[k
].pad
= pad
;
629 #define MAX_ARCH_DELEGATE_PARAMS 3
632 get_delegate_invoke_impl (MonoTrampInfo
**info
, gboolean has_target
, gboolean param_count
)
634 guint8
*code
, *start
;
635 GSList
*unwind_ops
= mono_arch_get_cie_program ();
638 start
= code
= mono_global_codeman_reserve (12);
640 /* Replace the this argument with the target */
641 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
642 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, target
));
643 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
645 g_assert ((code
- start
) <= 12);
647 mono_arch_flush_icache (start
, 12);
648 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
));
652 size
= 8 + param_count
* 4;
653 start
= code
= mono_global_codeman_reserve (size
);
655 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
656 /* slide down the arguments */
657 for (i
= 0; i
< param_count
; ++i
) {
658 ARM_MOV_REG_REG (code
, (ARMREG_R0
+ i
), (ARMREG_R0
+ i
+ 1));
660 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
662 g_assert ((code
- start
) <= size
);
664 mono_arch_flush_icache (start
, size
);
665 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
));
669 *info
= mono_tramp_info_create ("delegate_invoke_impl_has_target", start
, code
- start
, NULL
, unwind_ops
);
671 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", param_count
);
672 *info
= mono_tramp_info_create (name
, start
, code
- start
, NULL
, unwind_ops
);
676 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
));
682 * mono_arch_get_delegate_invoke_impls:
684 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
688 mono_arch_get_delegate_invoke_impls (void)
694 get_delegate_invoke_impl (&info
, TRUE
, 0);
695 res
= g_slist_prepend (res
, info
);
697 for (i
= 0; i
<= MAX_ARCH_DELEGATE_PARAMS
; ++i
) {
698 get_delegate_invoke_impl (&info
, FALSE
, i
);
699 res
= g_slist_prepend (res
, info
);
706 mono_arch_get_delegate_invoke_impl (MonoMethodSignature
*sig
, gboolean has_target
)
708 guint8
*code
, *start
;
711 /* FIXME: Support more cases */
712 sig_ret
= mini_get_underlying_type (sig
->ret
);
713 if (MONO_TYPE_ISSTRUCT (sig_ret
))
717 static guint8
* cached
= NULL
;
718 mono_mini_arch_lock ();
720 mono_mini_arch_unlock ();
724 if (mono_ee_features
.use_aot_trampolines
) {
725 start
= (guint8
*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
728 start
= get_delegate_invoke_impl (&info
, TRUE
, 0);
729 mono_tramp_info_register (info
, NULL
);
732 mono_mini_arch_unlock ();
735 static guint8
* cache
[MAX_ARCH_DELEGATE_PARAMS
+ 1] = {NULL
};
738 if (sig
->param_count
> MAX_ARCH_DELEGATE_PARAMS
)
740 for (i
= 0; i
< sig
->param_count
; ++i
)
741 if (!mono_is_regsize_var (sig
->params
[i
]))
744 mono_mini_arch_lock ();
745 code
= cache
[sig
->param_count
];
747 mono_mini_arch_unlock ();
751 if (mono_ee_features
.use_aot_trampolines
) {
752 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", sig
->param_count
);
753 start
= (guint8
*)mono_aot_get_trampoline (name
);
757 start
= get_delegate_invoke_impl (&info
, FALSE
, sig
->param_count
);
758 mono_tramp_info_register (info
, NULL
);
760 cache
[sig
->param_count
] = start
;
761 mono_mini_arch_unlock ();
769 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature
*sig
, MonoMethod
*method
, int offset
, gboolean load_imt_reg
)
775 mono_arch_get_this_arg_from_call (host_mgreg_t
*regs
, guint8
*code
)
777 return (gpointer
)regs
[ARMREG_R0
];
781 * Initialize the cpu to execute managed code.
784 mono_arch_cpu_init (void)
786 i8_align
= MONO_ABI_ALIGNOF (gint64
);
787 #ifdef MONO_CROSS_COMPILE
788 /* Need to set the alignment of i8 since it can different on the target */
789 #ifdef TARGET_ANDROID
791 mono_type_set_alignment (MONO_TYPE_I8
, i8_align
);
797 * Initialize architecture specific code.
800 mono_arch_init (void)
804 #ifdef TARGET_WATCHOS
805 mini_get_debug_options ()->soft_breakpoints
= TRUE
;
808 mono_os_mutex_init_recursive (&mini_arch_mutex
);
809 if (mini_get_debug_options ()->soft_breakpoints
) {
811 breakpoint_tramp
= mini_get_breakpoint_trampoline ();
813 ss_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
|MONO_MMAP_32BIT
, MONO_MEM_ACCOUNT_OTHER
);
814 bp_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
|MONO_MMAP_32BIT
, MONO_MEM_ACCOUNT_OTHER
);
815 mono_mprotect (bp_trigger_page
, mono_pagesize (), 0);
818 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception
);
819 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token
);
820 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind
);
821 #if defined(MONO_ARCH_GSHAREDVT_SUPPORTED)
822 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call
);
824 mono_aot_register_jit_icall ("mono_arm_unaligned_stack", mono_arm_unaligned_stack
);
825 #if defined(__ARM_EABI__)
826 eabi_supported
= TRUE
;
829 #if defined(ARM_FPU_VFP_HARD)
830 arm_fpu
= MONO_ARM_FPU_VFP_HARD
;
832 arm_fpu
= MONO_ARM_FPU_VFP
;
834 #if defined(ARM_FPU_NONE) && !defined(TARGET_IOS)
836 * If we're compiling with a soft float fallback and it
837 * turns out that no VFP unit is available, we need to
838 * switch to soft float. We don't do this for iOS, since
839 * iOS devices always have a VFP unit.
841 if (!mono_hwcap_arm_has_vfp
)
842 arm_fpu
= MONO_ARM_FPU_NONE
;
845 * This environment variable can be useful in testing
846 * environments to make sure the soft float fallback
847 * works. Most ARM devices have VFP units these days, so
848 * normally soft float code would not be exercised much.
850 char *soft
= g_getenv ("MONO_ARM_FORCE_SOFT_FLOAT");
852 if (soft
&& !strncmp (soft
, "1", 1))
853 arm_fpu
= MONO_ARM_FPU_NONE
;
858 v5_supported
= mono_hwcap_arm_is_v5
;
859 v6_supported
= mono_hwcap_arm_is_v6
;
860 v7_supported
= mono_hwcap_arm_is_v7
;
863 * On weird devices, the hwcap code may fail to detect
864 * the ARM version. In that case, we can at least safely
865 * assume the version the runtime was compiled for.
877 #if defined(TARGET_IOS)
878 /* iOS is special-cased here because we don't yet
879 have a way to properly detect CPU features on it. */
880 thumb_supported
= TRUE
;
883 thumb_supported
= mono_hwcap_arm_has_thumb
;
884 thumb2_supported
= mono_hwcap_arm_has_thumb2
;
887 /* Format: armv(5|6|7[s])[-thumb[2]] */
888 cpu_arch
= g_getenv ("MONO_CPU_ARCH");
890 /* Do this here so it overrides any detection. */
892 if (strncmp (cpu_arch
, "armv", 4) == 0) {
893 v5_supported
= cpu_arch
[4] >= '5';
894 v6_supported
= cpu_arch
[4] >= '6';
895 v7_supported
= cpu_arch
[4] >= '7';
896 v7s_supported
= strncmp (cpu_arch
, "armv7s", 6) == 0;
897 v7k_supported
= strncmp (cpu_arch
, "armv7k", 6) == 0;
900 thumb_supported
= strstr (cpu_arch
, "thumb") != NULL
;
901 thumb2_supported
= strstr (cpu_arch
, "thumb2") != NULL
;
907 * Cleanup architecture specific code.
910 mono_arch_cleanup (void)
915 * This function returns the optimizations supported on this cpu.
918 mono_arch_cpu_optimizations (guint32
*exclude_mask
)
920 /* no arm-specific optimizations yet */
926 * This function test for all SIMD functions supported.
928 * Returns a bitmask corresponding to all supported versions.
932 mono_arch_cpu_enumerate_simd_versions (void)
934 /* SIMD is currently unimplemented */
939 mono_arm_is_hard_float (void)
941 return arm_fpu
== MONO_ARM_FPU_VFP_HARD
;
947 mono_arch_opcode_needs_emulation (MonoCompile
*cfg
, int opcode
)
949 if (v7s_supported
|| v7k_supported
) {
963 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
965 mono_arch_is_soft_float (void)
967 return arm_fpu
== MONO_ARM_FPU_NONE
;
972 is_regsize_var (MonoType
*t
)
976 t
= mini_get_underlying_type (t
);
983 case MONO_TYPE_FNPTR
:
985 case MONO_TYPE_OBJECT
:
987 case MONO_TYPE_GENERICINST
:
988 if (!mono_type_generic_inst_is_valuetype (t
))
991 case MONO_TYPE_VALUETYPE
:
998 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
1003 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
1004 MonoInst
*ins
= cfg
->varinfo
[i
];
1005 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
1008 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
1011 if (ins
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
) || (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
1014 /* we can only allocate 32 bit values */
1015 if (is_regsize_var (ins
->inst_vtype
)) {
1016 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
1017 g_assert (i
== vmv
->idx
);
1018 vars
= mono_varlist_insert_sorted (cfg
, vars
, vmv
, FALSE
);
1026 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
1030 mono_arch_compute_omit_fp (cfg
);
1033 * FIXME: Interface calls might go through a static rgctx trampoline which
1034 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1037 if (cfg
->flags
& MONO_CFG_HAS_CALLS
)
1038 cfg
->uses_rgctx_reg
= TRUE
;
1040 if (cfg
->arch
.omit_fp
)
1041 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_FP
));
1042 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V1
));
1043 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V2
));
1044 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V3
));
1046 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1047 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V7
));
1049 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V4
));
1050 if (!(cfg
->compile_aot
|| cfg
->uses_rgctx_reg
|| COMPILE_LLVM (cfg
)))
1051 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1052 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V5
));
1053 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1054 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1060 * mono_arch_regalloc_cost:
1062 * Return the cost, in number of memory references, of the action of
1063 * allocating the variable VMV into a register during global register
1067 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
1073 #endif /* #ifndef DISABLE_JIT */
1076 mono_arch_flush_icache (guint8
*code
, gint size
)
1078 #if defined(MONO_CROSS_COMPILE)
1080 sys_icache_invalidate (code
, size
);
1082 __builtin___clear_cache ((char*)code
, (char*)code
+ size
);
1089 add_general (guint
*gr
, guint
*stack_size
, ArgInfo
*ainfo
, gboolean simple
)
1092 if (*gr
> ARMREG_R3
) {
1094 ainfo
->offset
= *stack_size
;
1095 ainfo
->reg
= ARMREG_SP
; /* in the caller */
1096 ainfo
->storage
= RegTypeBase
;
1099 ainfo
->storage
= RegTypeGeneral
;
1106 split
= i8_align
== 4;
1111 if (*gr
== ARMREG_R3
&& split
) {
1112 /* first word in r3 and the second on the stack */
1113 ainfo
->offset
= *stack_size
;
1114 ainfo
->reg
= ARMREG_SP
; /* in the caller */
1115 ainfo
->storage
= RegTypeBaseGen
;
1117 } else if (*gr
>= ARMREG_R3
) {
1118 if (eabi_supported
) {
1119 /* darwin aligns longs to 4 byte only */
1120 if (i8_align
== 8) {
1125 ainfo
->offset
= *stack_size
;
1126 ainfo
->reg
= ARMREG_SP
; /* in the caller */
1127 ainfo
->storage
= RegTypeBase
;
1130 if (eabi_supported
) {
1131 if (i8_align
== 8 && ((*gr
) & 1))
1134 ainfo
->storage
= RegTypeIRegPair
;
1143 add_float (guint
*fpr
, guint
*stack_size
, ArgInfo
*ainfo
, gboolean is_double
, gint
*float_spare
)
1146 * If we're calling a function like this:
1148 * void foo(float a, double b, float c)
1150 * We pass a in s0 and b in d1. That leaves us
1151 * with s1 being unused. The armhf ABI recognizes
1152 * this and requires register assignment to then
1153 * use that for the next single-precision arg,
1154 * i.e. c in this example. So float_spare either
1155 * tells us which reg to use for the next single-
1156 * precision arg, or it's -1, meaning use *fpr.
1158 * Note that even though most of the JIT speaks
1159 * double-precision, fpr represents single-
1160 * precision registers.
1162 * See parts 5.5 and 6.1.2 of the AAPCS for how
1166 if (*fpr
< ARM_VFP_F16
|| (!is_double
&& *float_spare
>= 0)) {
1167 ainfo
->storage
= RegTypeFP
;
1171 * If we're passing a double-precision value
1172 * and *fpr is odd (e.g. it's s1, s3, ...)
1173 * we need to use the next even register. So
1174 * we mark the current *fpr as a spare that
1175 * can be used for the next single-precision
1179 *float_spare
= *fpr
;
1184 * At this point, we have an even register
1185 * so we assign that and move along.
1189 } else if (*float_spare
>= 0) {
1191 * We're passing a single-precision value
1192 * and it looks like a spare single-
1193 * precision register is available. Let's
1197 ainfo
->reg
= *float_spare
;
1201 * If we hit this branch, we're passing a
1202 * single-precision value and we can simply
1203 * use the next available register.
1211 * We've exhausted available floating point
1212 * regs, so pass the rest on the stack.
1220 ainfo
->offset
= *stack_size
;
1221 ainfo
->reg
= ARMREG_SP
;
1222 ainfo
->storage
= RegTypeBase
;
1229 is_hfa (MonoType
*t
, int *out_nfields
, int *out_esize
)
1233 MonoClassField
*field
;
1234 MonoType
*ftype
, *prev_ftype
= NULL
;
1237 klass
= mono_class_from_mono_type_internal (t
);
1239 while ((field
= mono_class_get_fields_internal (klass
, &iter
))) {
1240 if (field
->type
->attrs
& FIELD_ATTRIBUTE_STATIC
)
1242 ftype
= mono_field_get_type_internal (field
);
1243 ftype
= mini_get_underlying_type (ftype
);
1245 if (MONO_TYPE_ISSTRUCT (ftype
)) {
1246 int nested_nfields
, nested_esize
;
1248 if (!is_hfa (ftype
, &nested_nfields
, &nested_esize
))
1250 if (nested_esize
== 4)
1251 ftype
= m_class_get_byval_arg (mono_defaults
.single_class
);
1253 ftype
= m_class_get_byval_arg (mono_defaults
.double_class
);
1254 if (prev_ftype
&& prev_ftype
->type
!= ftype
->type
)
1257 nfields
+= nested_nfields
;
1259 if (!(!ftype
->byref
&& (ftype
->type
== MONO_TYPE_R4
|| ftype
->type
== MONO_TYPE_R8
)))
1261 if (prev_ftype
&& prev_ftype
->type
!= ftype
->type
)
1267 if (nfields
== 0 || nfields
> 4)
1269 *out_nfields
= nfields
;
1270 *out_esize
= prev_ftype
->type
== MONO_TYPE_R4
? 4 : 8;
1275 get_call_info (MonoMemPool
*mp
, MonoMethodSignature
*sig
)
1277 guint i
, gr
, fpr
, pstart
;
1279 int n
= sig
->hasthis
+ sig
->param_count
;
1283 guint32 stack_size
= 0;
1285 gboolean is_pinvoke
= sig
->pinvoke
;
1286 gboolean vtype_retaddr
= FALSE
;
1289 cinfo
= mono_mempool_alloc0 (mp
, sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
1291 cinfo
= g_malloc0 (sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
1298 t
= mini_get_underlying_type (sig
->ret
);
1309 case MONO_TYPE_FNPTR
:
1310 case MONO_TYPE_OBJECT
:
1311 cinfo
->ret
.storage
= RegTypeGeneral
;
1312 cinfo
->ret
.reg
= ARMREG_R0
;
1316 cinfo
->ret
.storage
= RegTypeIRegPair
;
1317 cinfo
->ret
.reg
= ARMREG_R0
;
1321 cinfo
->ret
.storage
= RegTypeFP
;
1323 if (t
->type
== MONO_TYPE_R4
)
1324 cinfo
->ret
.size
= 4;
1326 cinfo
->ret
.size
= 8;
1328 if (IS_HARD_FLOAT
) {
1329 cinfo
->ret
.reg
= ARM_VFP_F0
;
1331 cinfo
->ret
.reg
= ARMREG_R0
;
1334 case MONO_TYPE_GENERICINST
:
1335 if (!mono_type_generic_inst_is_valuetype (t
)) {
1336 cinfo
->ret
.storage
= RegTypeGeneral
;
1337 cinfo
->ret
.reg
= ARMREG_R0
;
1340 if (mini_is_gsharedvt_variable_type (t
)) {
1341 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1345 case MONO_TYPE_VALUETYPE
:
1346 case MONO_TYPE_TYPEDBYREF
:
1347 if (IS_HARD_FLOAT
&& sig
->pinvoke
&& is_hfa (t
, &nfields
, &esize
)) {
1348 cinfo
->ret
.storage
= RegTypeHFA
;
1350 cinfo
->ret
.nregs
= nfields
;
1351 cinfo
->ret
.esize
= esize
;
1354 int native_size
= mono_class_native_size (mono_class_from_mono_type_internal (t
), &align
);
1357 #ifdef TARGET_WATCHOS
1362 if (native_size
<= max_size
) {
1363 cinfo
->ret
.storage
= RegTypeStructByVal
;
1364 cinfo
->ret
.struct_size
= native_size
;
1365 cinfo
->ret
.nregs
= ALIGN_TO (native_size
, 4) / 4;
1367 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1370 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1375 case MONO_TYPE_MVAR
:
1376 g_assert (mini_is_gsharedvt_type (t
));
1377 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1379 case MONO_TYPE_VOID
:
1382 g_error ("Can't handle as return value 0x%x", sig
->ret
->type
);
1385 vtype_retaddr
= cinfo
->ret
.storage
== RegTypeStructByAddr
;
1390 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1391 * the first argument, allowing 'this' to be always passed in the first arg reg.
1392 * Also do this if the first argument is a reference type, since virtual calls
1393 * are sometimes made using calli without sig->hasthis set, like in the delegate
1396 if (vtype_retaddr
&& !is_pinvoke
&& (sig
->hasthis
|| (sig
->param_count
> 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig
->params
[0]))))) {
1398 add_general (&gr
, &stack_size
, cinfo
->args
+ 0, TRUE
);
1400 add_general (&gr
, &stack_size
, &cinfo
->args
[sig
->hasthis
+ 0], TRUE
);
1404 cinfo
->ret
.reg
= gr
;
1406 cinfo
->vret_arg_index
= 1;
1410 add_general (&gr
, &stack_size
, cinfo
->args
+ 0, TRUE
);
1413 if (vtype_retaddr
) {
1414 cinfo
->ret
.reg
= gr
;
1419 DEBUG(g_print("params: %d\n", sig
->param_count
));
1420 for (i
= pstart
; i
< sig
->param_count
; ++i
) {
1421 ArgInfo
*ainfo
= &cinfo
->args
[n
];
1423 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1424 /* Prevent implicit arguments and sig_cookie from
1425 being passed in registers */
1428 /* Emit the signature cookie just before the implicit arguments */
1429 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
1431 DEBUG(g_print("param %d: ", i
));
1432 if (sig
->params
[i
]->byref
) {
1433 DEBUG(g_print("byref\n"));
1434 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1438 t
= mini_get_underlying_type (sig
->params
[i
]);
1442 cinfo
->args
[n
].size
= 1;
1443 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1447 cinfo
->args
[n
].size
= 2;
1448 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1452 cinfo
->args
[n
].size
= 4;
1453 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1458 case MONO_TYPE_FNPTR
:
1459 case MONO_TYPE_OBJECT
:
1460 cinfo
->args
[n
].size
= sizeof (target_mgreg_t
);
1461 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1463 case MONO_TYPE_GENERICINST
:
1464 if (!mono_type_generic_inst_is_valuetype (t
)) {
1465 cinfo
->args
[n
].size
= sizeof (target_mgreg_t
);
1466 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1469 if (mini_is_gsharedvt_variable_type (t
)) {
1470 /* gsharedvt arguments are passed by ref */
1471 g_assert (mini_is_gsharedvt_type (t
));
1472 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1473 switch (ainfo
->storage
) {
1474 case RegTypeGeneral
:
1475 ainfo
->storage
= RegTypeGSharedVtInReg
;
1478 ainfo
->storage
= RegTypeGSharedVtOnStack
;
1481 g_assert_not_reached ();
1486 case MONO_TYPE_TYPEDBYREF
:
1487 case MONO_TYPE_VALUETYPE
: {
1490 int nwords
, nfields
, esize
;
1493 if (IS_HARD_FLOAT
&& sig
->pinvoke
&& is_hfa (t
, &nfields
, &esize
)) {
1494 if (fpr
+ nfields
< ARM_VFP_F16
) {
1495 ainfo
->storage
= RegTypeHFA
;
1497 ainfo
->nregs
= nfields
;
1498 ainfo
->esize
= esize
;
1509 if (t
->type
== MONO_TYPE_TYPEDBYREF
) {
1510 size
= MONO_ABI_SIZEOF (MonoTypedRef
);
1511 align
= sizeof (target_mgreg_t
);
1513 MonoClass
*klass
= mono_class_from_mono_type_internal (sig
->params
[i
]);
1515 size
= mono_class_native_size (klass
, &align
);
1517 size
= mini_type_stack_size_full (t
, &align
, FALSE
);
1519 DEBUG(g_print ("load %d bytes struct\n", size
));
1521 #ifdef TARGET_WATCHOS
1522 /* Watchos pass large structures by ref */
1523 /* We only do this for pinvoke to make gsharedvt/dyncall simpler */
1524 if (sig
->pinvoke
&& size
> 16) {
1525 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1526 switch (ainfo
->storage
) {
1527 case RegTypeGeneral
:
1528 ainfo
->storage
= RegTypeStructByAddr
;
1531 ainfo
->storage
= RegTypeStructByAddrOnStack
;
1534 g_assert_not_reached ();
1543 align_size
+= (sizeof (target_mgreg_t
) - 1);
1544 align_size
&= ~(sizeof (target_mgreg_t
) - 1);
1545 nwords
= (align_size
+ sizeof (target_mgreg_t
) -1 ) / sizeof (target_mgreg_t
);
1546 ainfo
->storage
= RegTypeStructByVal
;
1547 ainfo
->struct_size
= size
;
1548 ainfo
->align
= align
;
1550 if (eabi_supported
) {
1551 if (align
>= 8 && (gr
& 1))
1554 if (gr
> ARMREG_R3
) {
1556 ainfo
->vtsize
= nwords
;
1558 int rest
= ARMREG_R3
- gr
+ 1;
1559 int n_in_regs
= rest
>= nwords
? nwords
: rest
;
1561 ainfo
->size
= n_in_regs
;
1562 ainfo
->vtsize
= nwords
- n_in_regs
;
1565 nwords
-= n_in_regs
;
1567 stack_size
= ALIGN_TO (stack_size
, align
);
1569 ainfo
->offset
= stack_size
;
1570 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1571 stack_size
+= nwords
* sizeof (target_mgreg_t
);
1577 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
1583 add_float (&fpr
, &stack_size
, ainfo
, FALSE
, &float_spare
);
1585 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1591 add_float (&fpr
, &stack_size
, ainfo
, TRUE
, &float_spare
);
1593 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
1596 case MONO_TYPE_MVAR
:
1597 /* gsharedvt arguments are passed by ref */
1598 g_assert (mini_is_gsharedvt_type (t
));
1599 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1600 switch (ainfo
->storage
) {
1601 case RegTypeGeneral
:
1602 ainfo
->storage
= RegTypeGSharedVtInReg
;
1605 ainfo
->storage
= RegTypeGSharedVtOnStack
;
1608 g_assert_not_reached ();
1612 g_error ("Can't handle 0x%x", sig
->params
[i
]->type
);
1617 /* Handle the case where there are no implicit arguments */
1618 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1619 /* Prevent implicit arguments and sig_cookie from
1620 being passed in registers */
1623 /* Emit the signature cookie just before the implicit arguments */
1624 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
1627 DEBUG (g_print (" stack size: %d (%d)\n", (stack_size
+ 15) & ~15, stack_size
));
1628 stack_size
= ALIGN_TO (stack_size
, MONO_ARCH_FRAME_ALIGNMENT
);
1630 cinfo
->stack_usage
= stack_size
;
1635 * We need to create a temporary value if the argument is not stored in
1636 * a linear memory range in the ccontext (this normally happens for
1637 * value types if they are passed both by stack and regs).
1640 arg_need_temp (ArgInfo
*ainfo
)
1642 if (ainfo
->storage
== RegTypeStructByVal
&& ainfo
->vtsize
)
1643 return ainfo
->struct_size
;
1648 arg_get_storage (CallContext
*ccontext
, ArgInfo
*ainfo
)
1650 switch (ainfo
->storage
) {
1651 case RegTypeIRegPair
:
1652 case RegTypeGeneral
:
1653 case RegTypeStructByVal
:
1654 return &ccontext
->gregs
[ainfo
->reg
];
1657 return &ccontext
->fregs
[ainfo
->reg
];
1659 return ccontext
->stack
+ ainfo
->offset
;
1661 g_error ("Arg storage type not yet supported");
1666 arg_get_val (CallContext
*ccontext
, ArgInfo
*ainfo
, gpointer dest
)
1668 int reg_size
= ainfo
->size
* sizeof (host_mgreg_t
);
1669 g_assert (arg_need_temp (ainfo
));
1670 memcpy (dest
, &ccontext
->gregs
[ainfo
->reg
], reg_size
);
1671 memcpy ((host_mgreg_t
*)dest
+ ainfo
->size
, ccontext
->stack
+ ainfo
->offset
, ainfo
->struct_size
- reg_size
);
1675 arg_set_val (CallContext
*ccontext
, ArgInfo
*ainfo
, gpointer src
)
1677 int reg_size
= ainfo
->size
* sizeof (host_mgreg_t
);
1678 g_assert (arg_need_temp (ainfo
));
1679 memcpy (&ccontext
->gregs
[ainfo
->reg
], src
, reg_size
);
1680 memcpy (ccontext
->stack
+ ainfo
->offset
, (host_mgreg_t
*)src
+ ainfo
->size
, ainfo
->struct_size
- reg_size
);
1683 /* Set arguments in the ccontext (for i2n entry) */
1685 mono_arch_set_native_call_context_args (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1687 MonoEECallbacks
*interp_cb
= mini_get_interp_callbacks ();
1688 CallInfo
*cinfo
= get_call_info (NULL
, sig
);
1692 memset (ccontext
, 0, sizeof (CallContext
));
1694 ccontext
->stack_size
= ALIGN_TO (cinfo
->stack_usage
, MONO_ARCH_FRAME_ALIGNMENT
);
1695 if (ccontext
->stack_size
)
1696 ccontext
->stack
= (guint8
*)g_calloc (1, ccontext
->stack_size
);
1698 if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
1699 ainfo
= &cinfo
->ret
;
1700 if (ainfo
->storage
== RegTypeStructByAddr
) {
1701 storage
= interp_cb
->frame_arg_to_storage ((MonoInterpFrameHandle
)frame
, sig
, -1);
1702 ccontext
->gregs
[cinfo
->ret
.reg
] = (host_mgreg_t
)(gsize
)storage
;
1706 g_assert (!sig
->hasthis
);
1708 for (int i
= 0; i
< sig
->param_count
; i
++) {
1709 ainfo
= &cinfo
->args
[i
];
1710 int temp_size
= arg_need_temp (ainfo
);
1713 storage
= alloca (temp_size
); // FIXME? alloca in a loop
1715 storage
= arg_get_storage (ccontext
, ainfo
);
1717 interp_cb
->frame_arg_to_data ((MonoInterpFrameHandle
)frame
, sig
, i
, storage
);
1719 arg_set_val (ccontext
, ainfo
, storage
);
1725 /* Set return value in the ccontext (for n2i return) */
1727 mono_arch_set_native_call_context_ret (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1729 MonoEECallbacks
*interp_cb
;
1734 if (sig
->ret
->type
== MONO_TYPE_VOID
)
1737 interp_cb
= mini_get_interp_callbacks ();
1738 cinfo
= get_call_info (NULL
, sig
);
1739 ainfo
= &cinfo
->ret
;
1741 if (ainfo
->storage
!= RegTypeStructByAddr
) {
1742 g_assert (!arg_need_temp (ainfo
));
1743 storage
= arg_get_storage (ccontext
, ainfo
);
1744 memset (ccontext
, 0, sizeof (CallContext
)); // FIXME
1745 interp_cb
->frame_arg_to_data ((MonoInterpFrameHandle
)frame
, sig
, -1, storage
);
1751 /* Gets the arguments from ccontext (for n2i entry) */
1753 mono_arch_get_native_call_context_args (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1755 MonoEECallbacks
*interp_cb
= mini_get_interp_callbacks ();
1756 CallInfo
*cinfo
= get_call_info (NULL
, sig
);
1760 if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
1761 ainfo
= &cinfo
->ret
;
1762 if (ainfo
->storage
== RegTypeStructByAddr
) {
1763 storage
= (gpointer
)(gsize
)ccontext
->gregs
[cinfo
->ret
.reg
];
1764 interp_cb
->frame_arg_set_storage ((MonoInterpFrameHandle
)frame
, sig
, -1, storage
);
1768 for (int i
= 0; i
< sig
->param_count
+ sig
->hasthis
; i
++) {
1769 ainfo
= &cinfo
->args
[i
];
1770 int temp_size
= arg_need_temp (ainfo
);
1773 storage
= alloca (temp_size
); // FIXME? alloca in a loop
1774 arg_get_val (ccontext
, ainfo
, storage
);
1776 storage
= arg_get_storage (ccontext
, ainfo
);
1778 interp_cb
->data_to_frame_arg ((MonoInterpFrameHandle
)frame
, sig
, i
, storage
);
1784 /* Gets the return value from ccontext (for i2n exit) */
1786 mono_arch_get_native_call_context_ret (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1788 MonoEECallbacks
*interp_cb
;
1793 if (sig
->ret
->type
== MONO_TYPE_VOID
)
1796 interp_cb
= mini_get_interp_callbacks ();
1797 cinfo
= get_call_info (NULL
, sig
);
1798 ainfo
= &cinfo
->ret
;
1800 if (ainfo
->storage
!= RegTypeStructByAddr
) {
1801 g_assert (!arg_need_temp (ainfo
));
1802 storage
= arg_get_storage (ccontext
, ainfo
);
1803 interp_cb
->data_to_frame_arg ((MonoInterpFrameHandle
)frame
, sig
, -1, storage
);
1812 mono_arch_tailcall_supported (MonoCompile
*cfg
, MonoMethodSignature
*caller_sig
, MonoMethodSignature
*callee_sig
, gboolean virtual_
)
1814 g_assert (caller_sig
);
1815 g_assert (callee_sig
);
1817 CallInfo
*caller_info
= get_call_info (NULL
, caller_sig
);
1818 CallInfo
*callee_info
= get_call_info (NULL
, callee_sig
);
1821 * Tailcalls with more callee stack usage than the caller cannot be supported, since
1822 * the extra stack space would be left on the stack after the tailcall.
1824 gboolean res
= IS_SUPPORTED_TAILCALL (callee_info
->stack_usage
<= caller_info
->stack_usage
)
1825 && IS_SUPPORTED_TAILCALL (caller_info
->ret
.storage
== callee_info
->ret
.storage
);
1827 // FIXME The limit here is that moving the parameters requires addressing the parameters
1828 // with 12bit (4K) immediate offsets. - 4 for TAILCALL_REG/MEMBASE
1829 res
&= IS_SUPPORTED_TAILCALL (callee_info
->stack_usage
< (4096 - 4));
1830 res
&= IS_SUPPORTED_TAILCALL (caller_info
->stack_usage
< (4096 - 4));
1832 g_free (caller_info
);
1833 g_free (callee_info
);
1839 debug_omit_fp (void)
1842 return mono_debug_count ();
1849 * mono_arch_compute_omit_fp:
1850 * Determine whether the frame pointer can be eliminated.
1853 mono_arch_compute_omit_fp (MonoCompile
*cfg
)
1855 MonoMethodSignature
*sig
;
1856 MonoMethodHeader
*header
;
1860 if (cfg
->arch
.omit_fp_computed
)
1863 header
= cfg
->header
;
1865 sig
= mono_method_signature_internal (cfg
->method
);
1867 if (!cfg
->arch
.cinfo
)
1868 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
1869 cinfo
= cfg
->arch
.cinfo
;
1872 * FIXME: Remove some of the restrictions.
1874 cfg
->arch
.omit_fp
= TRUE
;
1875 cfg
->arch
.omit_fp_computed
= TRUE
;
1877 if (cfg
->disable_omit_fp
)
1878 cfg
->arch
.omit_fp
= FALSE
;
1879 if (!debug_omit_fp ())
1880 cfg
->arch
.omit_fp
= FALSE
;
1882 if (cfg->method->save_lmf)
1883 cfg->arch.omit_fp = FALSE;
1885 if (cfg
->flags
& MONO_CFG_HAS_ALLOCA
)
1886 cfg
->arch
.omit_fp
= FALSE
;
1887 if (header
->num_clauses
)
1888 cfg
->arch
.omit_fp
= FALSE
;
1889 if (cfg
->param_area
)
1890 cfg
->arch
.omit_fp
= FALSE
;
1891 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
))
1892 cfg
->arch
.omit_fp
= FALSE
;
1893 if ((mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
)))
1894 cfg
->arch
.omit_fp
= FALSE
;
1895 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
1896 ArgInfo
*ainfo
= &cinfo
->args
[i
];
1898 if (ainfo
->storage
== RegTypeBase
|| ainfo
->storage
== RegTypeBaseGen
|| ainfo
->storage
== RegTypeStructByVal
) {
1900 * The stack offset can only be determined when the frame
1903 cfg
->arch
.omit_fp
= FALSE
;
1908 for (i
= cfg
->locals_start
; i
< cfg
->num_varinfo
; i
++) {
1909 MonoInst
*ins
= cfg
->varinfo
[i
];
1912 locals_size
+= mono_type_size (ins
->inst_vtype
, &ialign
);
1917 * Set var information according to the calling convention. arm version.
1918 * The locals var stuff should most likely be split in another method.
1921 mono_arch_allocate_vars (MonoCompile
*cfg
)
1923 MonoMethodSignature
*sig
;
1924 MonoMethodHeader
*header
;
1927 int i
, offset
, size
, align
, curinst
;
1932 sig
= mono_method_signature_internal (cfg
->method
);
1934 if (!cfg
->arch
.cinfo
)
1935 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
1936 cinfo
= cfg
->arch
.cinfo
;
1937 sig_ret
= mini_get_underlying_type (sig
->ret
);
1939 mono_arch_compute_omit_fp (cfg
);
1941 if (cfg
->arch
.omit_fp
)
1942 cfg
->frame_reg
= ARMREG_SP
;
1944 cfg
->frame_reg
= ARMREG_FP
;
1946 cfg
->flags
|= MONO_CFG_HAS_SPILLUP
;
1948 /* allow room for the vararg method args: void* and long/double */
1949 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
))
1950 cfg
->param_area
= MAX (cfg
->param_area
, sizeof (target_mgreg_t
)*8);
1952 header
= cfg
->header
;
1954 /* See mono_arch_get_global_int_regs () */
1955 if (cfg
->flags
& MONO_CFG_HAS_CALLS
)
1956 cfg
->uses_rgctx_reg
= TRUE
;
1958 if (cfg
->frame_reg
!= ARMREG_SP
)
1959 cfg
->used_int_regs
|= 1 << cfg
->frame_reg
;
1961 if (cfg
->compile_aot
|| cfg
->uses_rgctx_reg
|| COMPILE_LLVM (cfg
))
1962 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1963 cfg
->used_int_regs
|= (1 << MONO_ARCH_IMT_REG
);
1967 if (!MONO_TYPE_ISSTRUCT (sig_ret
) && cinfo
->ret
.storage
!= RegTypeStructByAddr
) {
1968 if (sig_ret
->type
!= MONO_TYPE_VOID
) {
1969 cfg
->ret
->opcode
= OP_REGVAR
;
1970 cfg
->ret
->inst_c0
= ARMREG_R0
;
1973 /* local vars are at a positive offset from the stack pointer */
1975 * also note that if the function uses alloca, we use FP
1976 * to point at the local variables.
1978 offset
= 0; /* linkage area */
1979 /* align the offset to 16 bytes: not sure this is needed here */
1981 //offset &= ~(8 - 1);
1983 /* add parameter area size for called functions */
1984 offset
+= cfg
->param_area
;
1987 if (cfg
->flags
& MONO_CFG_HAS_FPOUT
)
1990 /* allow room to save the return value */
1991 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
))
1994 switch (cinfo
->ret
.storage
) {
1995 case RegTypeStructByVal
:
1997 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
1998 offset
= ALIGN_TO (offset
, 8);
1999 cfg
->ret
->opcode
= OP_REGOFFSET
;
2000 cfg
->ret
->inst_basereg
= cfg
->frame_reg
;
2001 cfg
->ret
->inst_offset
= offset
;
2002 if (cinfo
->ret
.storage
== RegTypeStructByVal
)
2003 offset
+= cinfo
->ret
.nregs
* sizeof (target_mgreg_t
);
2007 case RegTypeStructByAddr
:
2008 ins
= cfg
->vret_addr
;
2009 offset
+= sizeof (target_mgreg_t
) - 1;
2010 offset
&= ~(sizeof (target_mgreg_t
) - 1);
2011 ins
->inst_offset
= offset
;
2012 ins
->opcode
= OP_REGOFFSET
;
2013 ins
->inst_basereg
= cfg
->frame_reg
;
2014 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
2015 g_print ("vret_addr =");
2016 mono_print_ins (cfg
->vret_addr
);
2018 offset
+= sizeof (target_mgreg_t
);
2024 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
2025 if (cfg
->arch
.seq_point_info_var
) {
2028 ins
= cfg
->arch
.seq_point_info_var
;
2032 offset
+= align
- 1;
2033 offset
&= ~(align
- 1);
2034 ins
->opcode
= OP_REGOFFSET
;
2035 ins
->inst_basereg
= cfg
->frame_reg
;
2036 ins
->inst_offset
= offset
;
2039 if (cfg
->arch
.ss_trigger_page_var
) {
2042 ins
= cfg
->arch
.ss_trigger_page_var
;
2045 offset
+= align
- 1;
2046 offset
&= ~(align
- 1);
2047 ins
->opcode
= OP_REGOFFSET
;
2048 ins
->inst_basereg
= cfg
->frame_reg
;
2049 ins
->inst_offset
= offset
;
2053 if (cfg
->arch
.seq_point_ss_method_var
) {
2056 ins
= cfg
->arch
.seq_point_ss_method_var
;
2059 offset
+= align
- 1;
2060 offset
&= ~(align
- 1);
2061 ins
->opcode
= OP_REGOFFSET
;
2062 ins
->inst_basereg
= cfg
->frame_reg
;
2063 ins
->inst_offset
= offset
;
2066 if (cfg
->arch
.seq_point_bp_method_var
) {
2069 ins
= cfg
->arch
.seq_point_bp_method_var
;
2072 offset
+= align
- 1;
2073 offset
&= ~(align
- 1);
2074 ins
->opcode
= OP_REGOFFSET
;
2075 ins
->inst_basereg
= cfg
->frame_reg
;
2076 ins
->inst_offset
= offset
;
2080 if (cfg
->has_atomic_exchange_i4
|| cfg
->has_atomic_cas_i4
|| cfg
->has_atomic_add_i4
) {
2081 /* Allocate a temporary used by the atomic ops */
2085 /* Allocate a local slot to hold the sig cookie address */
2086 offset
+= align
- 1;
2087 offset
&= ~(align
- 1);
2088 cfg
->arch
.atomic_tmp_offset
= offset
;
2091 cfg
->arch
.atomic_tmp_offset
= -1;
2094 cfg
->locals_min_stack_offset
= offset
;
2096 curinst
= cfg
->locals_start
;
2097 for (i
= curinst
; i
< cfg
->num_varinfo
; ++i
) {
2100 ins
= cfg
->varinfo
[i
];
2101 if ((ins
->flags
& MONO_INST_IS_DEAD
) || ins
->opcode
== OP_REGVAR
|| ins
->opcode
== OP_REGOFFSET
)
2104 t
= ins
->inst_vtype
;
2105 if (cfg
->gsharedvt
&& mini_is_gsharedvt_variable_type (t
))
2108 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
2109 * pinvoke wrappers when they call functions returning structure */
2110 if (ins
->backend
.is_pinvoke
&& MONO_TYPE_ISSTRUCT (t
) && t
->type
!= MONO_TYPE_TYPEDBYREF
) {
2111 size
= mono_class_native_size (mono_class_from_mono_type_internal (t
), &ualign
);
2115 size
= mono_type_size (t
, &align
);
2117 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2118 * since it loads/stores misaligned words, which don't do the right thing.
2120 if (align
< 4 && size
>= 4)
2122 if (ALIGN_TO (offset
, align
) > ALIGN_TO (offset
, 4))
2123 mini_gc_set_slot_type_from_fp (cfg
, ALIGN_TO (offset
, 4), SLOT_NOREF
);
2124 offset
+= align
- 1;
2125 offset
&= ~(align
- 1);
2126 ins
->opcode
= OP_REGOFFSET
;
2127 ins
->inst_offset
= offset
;
2128 ins
->inst_basereg
= cfg
->frame_reg
;
2130 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2133 cfg
->locals_max_stack_offset
= offset
;
2137 ins
= cfg
->args
[curinst
];
2138 if (ins
->opcode
!= OP_REGVAR
) {
2139 ins
->opcode
= OP_REGOFFSET
;
2140 ins
->inst_basereg
= cfg
->frame_reg
;
2141 offset
+= sizeof (target_mgreg_t
) - 1;
2142 offset
&= ~(sizeof (target_mgreg_t
) - 1);
2143 ins
->inst_offset
= offset
;
2144 offset
+= sizeof (target_mgreg_t
);
2149 if (sig
->call_convention
== MONO_CALL_VARARG
) {
2153 /* Allocate a local slot to hold the sig cookie address */
2154 offset
+= align
- 1;
2155 offset
&= ~(align
- 1);
2156 cfg
->sig_cookie
= offset
;
2160 for (i
= 0; i
< sig
->param_count
; ++i
) {
2161 ainfo
= cinfo
->args
+ i
;
2163 ins
= cfg
->args
[curinst
];
2165 switch (ainfo
->storage
) {
2167 offset
= ALIGN_TO (offset
, 8);
2168 ins
->opcode
= OP_REGOFFSET
;
2169 ins
->inst_basereg
= cfg
->frame_reg
;
2170 /* These arguments are saved to the stack in the prolog */
2171 ins
->inst_offset
= offset
;
2172 if (cfg
->verbose_level
>= 2)
2173 g_print ("arg %d allocated to %s+0x%0x.\n", i
, mono_arch_regname (ins
->inst_basereg
), (int)ins
->inst_offset
);
2181 if (ins
->opcode
!= OP_REGVAR
) {
2182 ins
->opcode
= OP_REGOFFSET
;
2183 ins
->inst_basereg
= cfg
->frame_reg
;
2184 size
= mini_type_stack_size_full (sig
->params
[i
], &ualign
, sig
->pinvoke
);
2186 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2187 * since it loads/stores misaligned words, which don't do the right thing.
2189 if (align
< 4 && size
>= 4)
2191 /* The code in the prolog () stores words when storing vtypes received in a register */
2192 if (MONO_TYPE_ISSTRUCT (sig
->params
[i
]))
2194 if (ALIGN_TO (offset
, align
) > ALIGN_TO (offset
, 4))
2195 mini_gc_set_slot_type_from_fp (cfg
, ALIGN_TO (offset
, 4), SLOT_NOREF
);
2196 offset
+= align
- 1;
2197 offset
&= ~(align
- 1);
2198 ins
->inst_offset
= offset
;
2204 /* align the offset to 8 bytes */
2205 if (ALIGN_TO (offset
, 8) > ALIGN_TO (offset
, 4))
2206 mini_gc_set_slot_type_from_fp (cfg
, ALIGN_TO (offset
, 4), SLOT_NOREF
);
2211 cfg
->stack_offset
= offset
;
2215 mono_arch_create_vars (MonoCompile
*cfg
)
2217 MonoMethodSignature
*sig
;
2221 sig
= mono_method_signature_internal (cfg
->method
);
2223 if (!cfg
->arch
.cinfo
)
2224 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
2225 cinfo
= cfg
->arch
.cinfo
;
2227 if (IS_HARD_FLOAT
) {
2228 for (i
= 0; i
< 2; i
++) {
2229 MonoInst
*inst
= mono_compile_create_var (cfg
, m_class_get_byval_arg (mono_defaults
.double_class
), OP_LOCAL
);
2230 inst
->flags
|= MONO_INST_VOLATILE
;
2232 cfg
->arch
.vfp_scratch_slots
[i
] = inst
;
2236 if (cinfo
->ret
.storage
== RegTypeStructByVal
)
2237 cfg
->ret_var_is_local
= TRUE
;
2239 if (cinfo
->ret
.storage
== RegTypeStructByAddr
) {
2240 cfg
->vret_addr
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_ARG
);
2241 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
2242 g_print ("vret_addr = ");
2243 mono_print_ins (cfg
->vret_addr
);
2247 if (cfg
->gen_sdb_seq_points
) {
2248 if (cfg
->compile_aot
) {
2249 MonoInst
*ins
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2250 ins
->flags
|= MONO_INST_VOLATILE
;
2251 cfg
->arch
.seq_point_info_var
= ins
;
2253 if (!cfg
->soft_breakpoints
) {
2254 /* Allocate a separate variable for this to save 1 load per seq point */
2255 ins
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2256 ins
->flags
|= MONO_INST_VOLATILE
;
2257 cfg
->arch
.ss_trigger_page_var
= ins
;
2260 if (cfg
->soft_breakpoints
) {
2263 ins
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2264 ins
->flags
|= MONO_INST_VOLATILE
;
2265 cfg
->arch
.seq_point_ss_method_var
= ins
;
2267 ins
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2268 ins
->flags
|= MONO_INST_VOLATILE
;
2269 cfg
->arch
.seq_point_bp_method_var
= ins
;
2275 emit_sig_cookie (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
)
2277 MonoMethodSignature
*tmp_sig
;
2280 if (MONO_IS_TAILCALL_OPCODE (call
))
2283 g_assert (cinfo
->sig_cookie
.storage
== RegTypeBase
);
2286 * mono_ArgIterator_Setup assumes the signature cookie is
2287 * passed first and all the arguments which were before it are
2288 * passed on the stack after the signature. So compensate by
2289 * passing a different signature.
2291 tmp_sig
= mono_metadata_signature_dup (call
->signature
);
2292 tmp_sig
->param_count
-= call
->signature
->sentinelpos
;
2293 tmp_sig
->sentinelpos
= 0;
2294 memcpy (tmp_sig
->params
, call
->signature
->params
+ call
->signature
->sentinelpos
, tmp_sig
->param_count
* sizeof (MonoType
*));
2296 sig_reg
= mono_alloc_ireg (cfg
);
2297 MONO_EMIT_NEW_SIGNATURECONST (cfg
, sig_reg
, tmp_sig
);
2299 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, cinfo
->sig_cookie
.offset
, sig_reg
);
2304 mono_arch_get_llvm_call_info (MonoCompile
*cfg
, MonoMethodSignature
*sig
)
2309 LLVMCallInfo
*linfo
;
2311 n
= sig
->param_count
+ sig
->hasthis
;
2313 cinfo
= get_call_info (cfg
->mempool
, sig
);
2315 linfo
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (LLVMCallInfo
) + (sizeof (LLVMArgInfo
) * n
));
2318 * LLVM always uses the native ABI while we use our own ABI, the
2319 * only difference is the handling of vtypes:
2320 * - we only pass/receive them in registers in some cases, and only
2321 * in 1 or 2 integer registers.
2323 switch (cinfo
->ret
.storage
) {
2324 case RegTypeGeneral
:
2327 case RegTypeIRegPair
:
2329 case RegTypeStructByAddr
:
2331 linfo
->ret
.storage
= LLVMArgVtypeByRef
;
2333 /* Vtype returned using a hidden argument */
2334 linfo
->ret
.storage
= LLVMArgVtypeRetAddr
;
2335 linfo
->vret_arg_index
= cinfo
->vret_arg_index
;
2339 case RegTypeStructByVal
:
2340 /* LLVM models this by returning an int array */
2341 linfo
->ret
.storage
= LLVMArgAsIArgs
;
2342 linfo
->ret
.nslots
= cinfo
->ret
.nregs
;
2346 linfo
->ret
.storage
= LLVMArgFpStruct
;
2347 linfo
->ret
.nslots
= cinfo
->ret
.nregs
;
2348 linfo
->ret
.esize
= cinfo
->ret
.esize
;
2351 cfg
->exception_message
= g_strdup_printf ("unknown ret conv (%d)", cinfo
->ret
.storage
);
2352 cfg
->disable_llvm
= TRUE
;
2356 for (i
= 0; i
< n
; ++i
) {
2357 LLVMArgInfo
*lainfo
= &linfo
->args
[i
];
2358 ainfo
= cinfo
->args
+ i
;
2360 lainfo
->storage
= LLVMArgNone
;
2362 switch (ainfo
->storage
) {
2363 case RegTypeGeneral
:
2364 case RegTypeIRegPair
:
2366 case RegTypeBaseGen
:
2368 lainfo
->storage
= LLVMArgNormal
;
2370 case RegTypeStructByVal
: {
2371 lainfo
->storage
= LLVMArgAsIArgs
;
2372 int slotsize
= eabi_supported
&& ainfo
->align
== 8 ? 8 : 4;
2373 lainfo
->nslots
= ALIGN_TO (ainfo
->struct_size
, slotsize
) / slotsize
;
2374 lainfo
->esize
= slotsize
;
2377 case RegTypeStructByAddr
:
2378 case RegTypeStructByAddrOnStack
:
2379 lainfo
->storage
= LLVMArgVtypeByRef
;
2384 lainfo
->storage
= LLVMArgAsFpArgs
;
2385 lainfo
->nslots
= ainfo
->nregs
;
2386 lainfo
->esize
= ainfo
->esize
;
2387 for (j
= 0; j
< ainfo
->nregs
; ++j
)
2388 lainfo
->pair_storage
[j
] = LLVMArgInFPReg
;
2392 cfg
->exception_message
= g_strdup_printf ("ainfo->storage (%d)", ainfo
->storage
);
2393 cfg
->disable_llvm
= TRUE
;
2403 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
2406 MonoMethodSignature
*sig
;
2410 sig
= call
->signature
;
2411 n
= sig
->param_count
+ sig
->hasthis
;
2413 cinfo
= get_call_info (cfg
->mempool
, sig
);
2415 switch (cinfo
->ret
.storage
) {
2416 case RegTypeStructByVal
:
2418 if (cinfo
->ret
.storage
== RegTypeStructByVal
&& cinfo
->ret
.nregs
== 1) {
2419 /* The JIT will transform this into a normal call */
2420 call
->vret_in_reg
= TRUE
;
2423 if (MONO_IS_TAILCALL_OPCODE (call
))
2426 * The vtype is returned in registers, save the return area address in a local, and save the vtype into
2427 * the location pointed to by it after call in emit_move_return_value ().
2429 if (!cfg
->arch
.vret_addr_loc
) {
2430 cfg
->arch
.vret_addr_loc
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2431 /* Prevent it from being register allocated or optimized away */
2432 cfg
->arch
.vret_addr_loc
->flags
|= MONO_INST_VOLATILE
;
2435 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->arch
.vret_addr_loc
->dreg
, call
->vret_var
->dreg
);
2437 case RegTypeStructByAddr
: {
2439 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
2440 vtarg
->sreg1
= call
->vret_var
->dreg
;
2441 vtarg
->dreg
= mono_alloc_preg (cfg
);
2442 MONO_ADD_INS (cfg
->cbb
, vtarg
);
2444 mono_call_inst_add_outarg_reg (cfg
, call
, vtarg
->dreg
, cinfo
->ret
.reg
, FALSE
);
2451 for (i
= 0; i
< n
; ++i
) {
2452 ArgInfo
*ainfo
= cinfo
->args
+ i
;
2455 if (i
>= sig
->hasthis
)
2456 t
= sig
->params
[i
- sig
->hasthis
];
2458 t
= mono_get_int_type ();
2459 t
= mini_get_underlying_type (t
);
2461 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
2462 /* Emit the signature cookie just before the implicit arguments */
2463 emit_sig_cookie (cfg
, call
, cinfo
);
2466 in
= call
->args
[i
];
2468 switch (ainfo
->storage
) {
2469 case RegTypeGeneral
:
2470 case RegTypeIRegPair
:
2471 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
2472 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2473 ins
->dreg
= mono_alloc_ireg (cfg
);
2474 ins
->sreg1
= MONO_LVREG_LS (in
->dreg
);
2475 MONO_ADD_INS (cfg
->cbb
, ins
);
2476 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2478 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2479 ins
->dreg
= mono_alloc_ireg (cfg
);
2480 ins
->sreg1
= MONO_LVREG_MS (in
->dreg
);
2481 MONO_ADD_INS (cfg
->cbb
, ins
);
2482 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
2483 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R8
) || (t
->type
== MONO_TYPE_R4
))) {
2484 if (ainfo
->size
== 4) {
2485 if (IS_SOFT_FLOAT
) {
2486 /* mono_emit_call_args () have already done the r8->r4 conversion */
2487 /* The converted value is in an int vreg */
2488 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2489 ins
->dreg
= mono_alloc_ireg (cfg
);
2490 ins
->sreg1
= in
->dreg
;
2491 MONO_ADD_INS (cfg
->cbb
, ins
);
2492 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2496 cfg
->param_area
= MAX (cfg
->param_area
, 8);
2497 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
2498 creg
= mono_alloc_ireg (cfg
);
2499 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
2500 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
, FALSE
);
2503 if (IS_SOFT_FLOAT
) {
2504 MONO_INST_NEW (cfg
, ins
, OP_FGETLOW32
);
2505 ins
->dreg
= mono_alloc_ireg (cfg
);
2506 ins
->sreg1
= in
->dreg
;
2507 MONO_ADD_INS (cfg
->cbb
, ins
);
2508 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2510 MONO_INST_NEW (cfg
, ins
, OP_FGETHIGH32
);
2511 ins
->dreg
= mono_alloc_ireg (cfg
);
2512 ins
->sreg1
= in
->dreg
;
2513 MONO_ADD_INS (cfg
->cbb
, ins
);
2514 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
2518 cfg
->param_area
= MAX (cfg
->param_area
, 8);
2519 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
2520 creg
= mono_alloc_ireg (cfg
);
2521 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
2522 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
, FALSE
);
2523 creg
= mono_alloc_ireg (cfg
);
2524 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8 + 4));
2525 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
+ 1, FALSE
);
2528 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
2530 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2531 ins
->dreg
= mono_alloc_ireg (cfg
);
2532 ins
->sreg1
= in
->dreg
;
2533 MONO_ADD_INS (cfg
->cbb
, ins
);
2535 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2538 case RegTypeStructByVal
:
2539 case RegTypeGSharedVtInReg
:
2540 case RegTypeGSharedVtOnStack
:
2542 case RegTypeStructByAddr
:
2543 case RegTypeStructByAddrOnStack
:
2544 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
2545 ins
->opcode
= OP_OUTARG_VT
;
2546 ins
->sreg1
= in
->dreg
;
2547 ins
->klass
= in
->klass
;
2548 ins
->inst_p0
= call
;
2549 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
2550 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
2551 mono_call_inst_add_outarg_vt (cfg
, call
, ins
);
2552 MONO_ADD_INS (cfg
->cbb
, ins
);
2555 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
2556 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2557 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R4
) || (t
->type
== MONO_TYPE_R8
))) {
2558 if (t
->type
== MONO_TYPE_R8
) {
2559 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2562 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2564 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2567 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2570 case RegTypeBaseGen
:
2571 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
2572 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, (G_BYTE_ORDER
== G_BIG_ENDIAN
) ? MONO_LVREG_LS (in
->dreg
) : MONO_LVREG_MS (in
->dreg
));
2573 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2574 ins
->dreg
= mono_alloc_ireg (cfg
);
2575 ins
->sreg1
= G_BYTE_ORDER
== G_BIG_ENDIAN
? MONO_LVREG_MS (in
->dreg
) : MONO_LVREG_LS (in
->dreg
);
2576 MONO_ADD_INS (cfg
->cbb
, ins
);
2577 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ARMREG_R3
, FALSE
);
2578 } else if (!t
->byref
&& (t
->type
== MONO_TYPE_R8
)) {
2581 /* This should work for soft-float as well */
2583 cfg
->param_area
= MAX (cfg
->param_area
, 8);
2584 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
2585 creg
= mono_alloc_ireg (cfg
);
2586 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ARMREG_R3
, FALSE
);
2587 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
2588 creg
= mono_alloc_ireg (cfg
);
2589 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 4));
2590 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, creg
);
2591 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
2593 g_assert_not_reached ();
2597 int fdreg
= mono_alloc_freg (cfg
);
2599 if (ainfo
->size
== 8) {
2600 MONO_INST_NEW (cfg
, ins
, OP_FMOVE
);
2601 ins
->sreg1
= in
->dreg
;
2603 MONO_ADD_INS (cfg
->cbb
, ins
);
2605 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, TRUE
);
2610 * Mono's register allocator doesn't speak single-precision registers that
2611 * overlap double-precision registers (i.e. armhf). So we have to work around
2612 * the register allocator and load the value from memory manually.
2614 * So we create a variable for the float argument and an instruction to store
2615 * the argument into the variable. We then store the list of these arguments
2616 * in call->float_args. This list is then used by emit_float_args later to
2617 * pass the arguments in the various call opcodes.
2619 * This is not very nice, and we should really try to fix the allocator.
2622 MonoInst
*float_arg
= mono_compile_create_var (cfg
, m_class_get_byval_arg (mono_defaults
.single_class
), OP_LOCAL
);
2624 /* Make sure the instruction isn't seen as pointless and removed.
2626 float_arg
->flags
|= MONO_INST_VOLATILE
;
2628 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, float_arg
->dreg
, in
->dreg
);
2630 /* We use the dreg to look up the instruction later. The hreg is used to
2631 * emit the instruction that loads the value into the FP reg.
2633 fad
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (FloatArgData
));
2634 fad
->vreg
= float_arg
->dreg
;
2635 fad
->hreg
= ainfo
->reg
;
2637 call
->float_args
= g_slist_append_mempool (cfg
->mempool
, call
->float_args
, fad
);
2640 call
->used_iregs
|= 1 << ainfo
->reg
;
2641 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
2645 g_assert_not_reached ();
2649 /* Handle the case where there are no implicit arguments */
2650 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== sig
->sentinelpos
))
2651 emit_sig_cookie (cfg
, call
, cinfo
);
2653 call
->call_info
= cinfo
;
2654 call
->stack_usage
= cinfo
->stack_usage
;
2658 add_outarg_reg (MonoCompile
*cfg
, MonoCallInst
*call
, ArgStorage storage
, int reg
, MonoInst
*arg
)
2664 MONO_INST_NEW (cfg
, ins
, OP_FMOVE
);
2665 ins
->dreg
= mono_alloc_freg (cfg
);
2666 ins
->sreg1
= arg
->dreg
;
2667 MONO_ADD_INS (cfg
->cbb
, ins
);
2668 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, reg
, TRUE
);
2671 g_assert_not_reached ();
2677 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
2679 MonoCallInst
*call
= (MonoCallInst
*)ins
->inst_p0
;
2681 ArgInfo
*ainfo
= (ArgInfo
*)ins
->inst_p1
;
2682 int ovf_size
= ainfo
->vtsize
;
2683 int doffset
= ainfo
->offset
;
2684 int struct_size
= ainfo
->struct_size
;
2685 int i
, soffset
, dreg
, tmpreg
;
2687 switch (ainfo
->storage
) {
2688 case RegTypeGSharedVtInReg
:
2689 case RegTypeStructByAddr
:
2691 mono_call_inst_add_outarg_reg (cfg
, call
, src
->dreg
, ainfo
->reg
, FALSE
);
2693 case RegTypeGSharedVtOnStack
:
2694 case RegTypeStructByAddrOnStack
:
2695 /* Pass by addr on stack */
2696 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, src
->dreg
);
2699 for (i
= 0; i
< ainfo
->nregs
; ++i
) {
2700 if (ainfo
->esize
== 4)
2701 MONO_INST_NEW (cfg
, load
, OP_LOADR4_MEMBASE
);
2703 MONO_INST_NEW (cfg
, load
, OP_LOADR8_MEMBASE
);
2704 load
->dreg
= mono_alloc_freg (cfg
);
2705 load
->inst_basereg
= src
->dreg
;
2706 load
->inst_offset
= i
* ainfo
->esize
;
2707 MONO_ADD_INS (cfg
->cbb
, load
);
2709 if (ainfo
->esize
== 4) {
2712 /* See RegTypeFP in mono_arch_emit_call () */
2713 MonoInst
*float_arg
= mono_compile_create_var (cfg
, m_class_get_byval_arg (mono_defaults
.single_class
), OP_LOCAL
);
2714 float_arg
->flags
|= MONO_INST_VOLATILE
;
2715 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, float_arg
->dreg
, load
->dreg
);
2717 fad
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (FloatArgData
));
2718 fad
->vreg
= float_arg
->dreg
;
2719 fad
->hreg
= ainfo
->reg
+ i
;
2721 call
->float_args
= g_slist_append_mempool (cfg
->mempool
, call
->float_args
, fad
);
2723 add_outarg_reg (cfg
, call
, RegTypeFP
, ainfo
->reg
+ (i
* 2), load
);
2729 for (i
= 0; i
< ainfo
->size
; ++i
) {
2730 dreg
= mono_alloc_ireg (cfg
);
2731 switch (struct_size
) {
2733 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, dreg
, src
->dreg
, soffset
);
2736 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, dreg
, src
->dreg
, soffset
);
2739 tmpreg
= mono_alloc_ireg (cfg
);
2740 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, dreg
, src
->dreg
, soffset
);
2741 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, tmpreg
, src
->dreg
, soffset
+ 1);
2742 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, tmpreg
, tmpreg
, 8);
2743 MONO_EMIT_NEW_BIALU (cfg
, OP_IOR
, dreg
, dreg
, tmpreg
);
2744 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, tmpreg
, src
->dreg
, soffset
+ 2);
2745 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, tmpreg
, tmpreg
, 16);
2746 MONO_EMIT_NEW_BIALU (cfg
, OP_IOR
, dreg
, dreg
, tmpreg
);
2749 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, src
->dreg
, soffset
);
2752 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
+ i
, FALSE
);
2753 soffset
+= sizeof (target_mgreg_t
);
2754 struct_size
-= sizeof (target_mgreg_t
);
2756 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2758 mini_emit_memcpy (cfg
, ARMREG_SP
, doffset
, src
->dreg
, soffset
, MIN (ovf_size
* sizeof (target_mgreg_t
), struct_size
), struct_size
< 4 ? 1 : 4);
2764 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
2766 MonoType
*ret
= mini_get_underlying_type (mono_method_signature_internal (method
)->ret
);
2769 if (ret
->type
== MONO_TYPE_I8
|| ret
->type
== MONO_TYPE_U8
) {
2772 if (COMPILE_LLVM (cfg
)) {
2773 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
2775 MONO_INST_NEW (cfg
, ins
, OP_SETLRET
);
2776 ins
->sreg1
= MONO_LVREG_LS (val
->dreg
);
2777 ins
->sreg2
= MONO_LVREG_MS (val
->dreg
);
2778 MONO_ADD_INS (cfg
->cbb
, ins
);
2783 case MONO_ARM_FPU_NONE
:
2784 if (ret
->type
== MONO_TYPE_R8
) {
2787 MONO_INST_NEW (cfg
, ins
, OP_SETFRET
);
2788 ins
->dreg
= cfg
->ret
->dreg
;
2789 ins
->sreg1
= val
->dreg
;
2790 MONO_ADD_INS (cfg
->cbb
, ins
);
2793 if (ret
->type
== MONO_TYPE_R4
) {
2794 /* Already converted to an int in method_to_ir () */
2795 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
2799 case MONO_ARM_FPU_VFP
:
2800 case MONO_ARM_FPU_VFP_HARD
:
2801 if (ret
->type
== MONO_TYPE_R8
|| ret
->type
== MONO_TYPE_R4
) {
2804 MONO_INST_NEW (cfg
, ins
, OP_SETFRET
);
2805 ins
->dreg
= cfg
->ret
->dreg
;
2806 ins
->sreg1
= val
->dreg
;
2807 MONO_ADD_INS (cfg
->cbb
, ins
);
2812 g_assert_not_reached ();
2816 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
2819 #endif /* #ifndef DISABLE_JIT */
2822 mono_arch_is_inst_imm (int opcode
, int imm_opcode
, gint64 imm
)
2828 MonoMethodSignature
*sig
;
2831 MonoType
**param_types
;
2835 dyn_call_supported (CallInfo
*cinfo
, MonoMethodSignature
*sig
)
2839 switch (cinfo
->ret
.storage
) {
2841 case RegTypeGeneral
:
2842 case RegTypeIRegPair
:
2843 case RegTypeStructByAddr
:
2854 for (i
= 0; i
< cinfo
->nargs
; ++i
) {
2855 ArgInfo
*ainfo
= &cinfo
->args
[i
];
2858 switch (ainfo
->storage
) {
2859 case RegTypeGeneral
:
2860 case RegTypeIRegPair
:
2861 case RegTypeBaseGen
:
2866 case RegTypeStructByVal
:
2867 if (ainfo
->size
== 0)
2868 last_slot
= PARAM_REGS
+ (ainfo
->offset
/ 4) + ainfo
->vtsize
;
2870 last_slot
= ainfo
->reg
+ ainfo
->size
+ ainfo
->vtsize
;
2877 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2878 for (i
= 0; i
< sig
->param_count
; ++i
) {
2879 MonoType
*t
= sig
->params
[i
];
2884 t
= mini_get_underlying_type (t
);
2907 mono_arch_dyn_call_prepare (MonoMethodSignature
*sig
)
2909 ArchDynCallInfo
*info
;
2913 cinfo
= get_call_info (NULL
, sig
);
2915 if (!dyn_call_supported (cinfo
, sig
)) {
2920 info
= g_new0 (ArchDynCallInfo
, 1);
2921 // FIXME: Preprocess the info to speed up start_dyn_call ()
2923 info
->cinfo
= cinfo
;
2924 info
->rtype
= mini_get_underlying_type (sig
->ret
);
2925 info
->param_types
= g_new0 (MonoType
*, sig
->param_count
);
2926 for (i
= 0; i
< sig
->param_count
; ++i
)
2927 info
->param_types
[i
] = mini_get_underlying_type (sig
->params
[i
]);
2929 return (MonoDynCallInfo
*)info
;
2933 mono_arch_dyn_call_free (MonoDynCallInfo
*info
)
2935 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
2937 g_free (ainfo
->cinfo
);
2942 mono_arch_dyn_call_get_buf_size (MonoDynCallInfo
*info
)
2944 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
2946 g_assert (ainfo
->cinfo
->stack_usage
% MONO_ARCH_FRAME_ALIGNMENT
== 0);
2947 return sizeof (DynCallArgs
) + ainfo
->cinfo
->stack_usage
;
2951 mono_arch_start_dyn_call (MonoDynCallInfo
*info
, gpointer
**args
, guint8
*ret
, guint8
*buf
)
2953 ArchDynCallInfo
*dinfo
= (ArchDynCallInfo
*)info
;
2954 CallInfo
*cinfo
= dinfo
->cinfo
;
2955 DynCallArgs
*p
= (DynCallArgs
*)buf
;
2956 int arg_index
, greg
, i
, j
, pindex
;
2957 MonoMethodSignature
*sig
= dinfo
->sig
;
2962 p
->n_stackargs
= cinfo
->stack_usage
/ sizeof (host_mgreg_t
);
2968 if (sig
->hasthis
|| dinfo
->cinfo
->vret_arg_index
== 1) {
2969 p
->regs
[greg
++] = (host_mgreg_t
)(gsize
)*(args
[arg_index
++]);
2974 if (dinfo
->cinfo
->ret
.storage
== RegTypeStructByAddr
)
2975 p
->regs
[greg
++] = (host_mgreg_t
)(gsize
)ret
;
2977 for (i
= pindex
; i
< sig
->param_count
; i
++) {
2978 MonoType
*t
= dinfo
->param_types
[i
];
2979 gpointer
*arg
= args
[arg_index
++];
2980 ArgInfo
*ainfo
= &dinfo
->cinfo
->args
[i
+ sig
->hasthis
];
2983 if (ainfo
->storage
== RegTypeGeneral
|| ainfo
->storage
== RegTypeIRegPair
|| ainfo
->storage
== RegTypeStructByVal
) {
2985 } else if (ainfo
->storage
== RegTypeFP
) {
2986 } else if (ainfo
->storage
== RegTypeBase
) {
2987 slot
= PARAM_REGS
+ (ainfo
->offset
/ 4);
2988 } else if (ainfo
->storage
== RegTypeBaseGen
) {
2989 /* slot + 1 is the first stack slot, so the code below will work */
2992 g_assert_not_reached ();
2996 p
->regs
[slot
] = (host_mgreg_t
)(gsize
)*arg
;
3001 case MONO_TYPE_OBJECT
:
3005 p
->regs
[slot
] = (host_mgreg_t
)(gsize
)*arg
;
3008 p
->regs
[slot
] = *(guint8
*)arg
;
3011 p
->regs
[slot
] = *(gint8
*)arg
;
3014 p
->regs
[slot
] = *(gint16
*)arg
;
3017 p
->regs
[slot
] = *(guint16
*)arg
;
3020 p
->regs
[slot
] = *(gint32
*)arg
;
3023 p
->regs
[slot
] = *(guint32
*)arg
;
3027 p
->regs
[slot
++] = (host_mgreg_t
)(gsize
)arg
[0];
3028 p
->regs
[slot
] = (host_mgreg_t
)(gsize
)arg
[1];
3031 if (ainfo
->storage
== RegTypeFP
) {
3032 float f
= *(float*)arg
;
3033 p
->fpregs
[ainfo
->reg
/ 2] = *(double*)&f
;
3036 p
->regs
[slot
] = *(host_mgreg_t
*)arg
;
3040 if (ainfo
->storage
== RegTypeFP
) {
3041 p
->fpregs
[ainfo
->reg
/ 2] = *(double*)arg
;
3044 p
->regs
[slot
++] = (host_mgreg_t
)(gsize
)arg
[0];
3045 p
->regs
[slot
] = (host_mgreg_t
)(gsize
)arg
[1];
3048 case MONO_TYPE_GENERICINST
:
3049 if (MONO_TYPE_IS_REFERENCE (t
)) {
3050 p
->regs
[slot
] = (host_mgreg_t
)(gsize
)*arg
;
3053 if (t
->type
== MONO_TYPE_GENERICINST
&& mono_class_is_nullable (mono_class_from_mono_type_internal (t
))) {
3054 MonoClass
*klass
= mono_class_from_mono_type_internal (t
);
3055 guint8
*nullable_buf
;
3058 size
= mono_class_value_size (klass
, NULL
);
3059 nullable_buf
= g_alloca (size
);
3060 g_assert (nullable_buf
);
3062 /* The argument pointed to by arg is either a boxed vtype or null */
3063 mono_nullable_init (nullable_buf
, (MonoObject
*)arg
, klass
);
3065 arg
= (gpointer
*)nullable_buf
;
3071 case MONO_TYPE_VALUETYPE
:
3072 g_assert (ainfo
->storage
== RegTypeStructByVal
);
3074 if (ainfo
->size
== 0)
3075 slot
= PARAM_REGS
+ (ainfo
->offset
/ 4);
3079 for (j
= 0; j
< ainfo
->size
+ ainfo
->vtsize
; ++j
)
3080 p
->regs
[slot
++] = ((host_mgreg_t
*)arg
) [j
];
3083 g_assert_not_reached ();
3089 mono_arch_finish_dyn_call (MonoDynCallInfo
*info
, guint8
*buf
)
3091 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
3092 DynCallArgs
*p
= (DynCallArgs
*)buf
;
3093 MonoType
*ptype
= ainfo
->rtype
;
3094 guint8
*ret
= p
->ret
;
3095 host_mgreg_t res
= p
->res
;
3096 host_mgreg_t res2
= p
->res2
;
3098 switch (ptype
->type
) {
3099 case MONO_TYPE_VOID
:
3100 *(gpointer
*)ret
= NULL
;
3102 case MONO_TYPE_OBJECT
:
3106 *(gpointer
*)ret
= (gpointer
)(gsize
)res
;
3112 *(guint8
*)ret
= res
;
3115 *(gint16
*)ret
= res
;
3118 *(guint16
*)ret
= res
;
3121 *(gint32
*)ret
= res
;
3124 *(guint32
*)ret
= res
;
3128 /* This handles endianness as well */
3129 ((gint32
*)ret
) [0] = res
;
3130 ((gint32
*)ret
) [1] = res2
;
3132 case MONO_TYPE_GENERICINST
:
3133 if (MONO_TYPE_IS_REFERENCE (ptype
)) {
3134 *(gpointer
*)ret
= (gpointer
)res
;
3139 case MONO_TYPE_VALUETYPE
:
3140 g_assert (ainfo
->cinfo
->ret
.storage
== RegTypeStructByAddr
);
3146 *(float*)ret
= *(float*)&p
->fpregs
[0];
3148 *(float*)ret
= *(float*)&res
;
3150 case MONO_TYPE_R8
: {
3151 host_mgreg_t regs
[2];
3154 if (IS_HARD_FLOAT
) {
3155 *(double*)ret
= p
->fpregs
[0];
3160 *(double*)ret
= *(double*)®s
;
3165 g_assert_not_reached ();
3172 * The immediate field for cond branches is big enough for all reasonable methods
3174 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3175 if (0 && ins->inst_true_bb->native_offset) { \
3176 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3178 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3179 ARM_B_COND (code, (condcode), 0); \
3182 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3184 /* emit an exception if condition is fail
3186 * We assign the extra code used to throw the implicit exceptions
3187 * to cfg->bb_exit as far as the big branch handling is concerned
3189 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3191 mono_add_patch_info (cfg, code - cfg->native_code, \
3192 MONO_PATCH_INFO_EXC, exc_name); \
3193 ARM_BL_COND (code, (condcode), 0); \
3196 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3199 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3204 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3208 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
3209 MonoInst
*last_ins
= mono_inst_prev (ins
, FILTER_IL_SEQ_POINT
);
3211 switch (ins
->opcode
) {
3214 /* Already done by an arch-independent pass */
3216 case OP_LOAD_MEMBASE
:
3217 case OP_LOADI4_MEMBASE
:
3219 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3220 * OP_LOAD_MEMBASE offset(basereg), reg
3222 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_REG
3223 || last_ins
->opcode
== OP_STORE_MEMBASE_REG
) &&
3224 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3225 ins
->inst_offset
== last_ins
->inst_offset
) {
3226 if (ins
->dreg
== last_ins
->sreg1
) {
3227 MONO_DELETE_INS (bb
, ins
);
3230 //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
3231 ins
->opcode
= OP_MOVE
;
3232 ins
->sreg1
= last_ins
->sreg1
;
3236 * Note: reg1 must be different from the basereg in the second load
3237 * OP_LOAD_MEMBASE offset(basereg), reg1
3238 * OP_LOAD_MEMBASE offset(basereg), reg2
3240 * OP_LOAD_MEMBASE offset(basereg), reg1
3241 * OP_MOVE reg1, reg2
3243 } if (last_ins
&& (last_ins
->opcode
== OP_LOADI4_MEMBASE
3244 || last_ins
->opcode
== OP_LOAD_MEMBASE
) &&
3245 ins
->inst_basereg
!= last_ins
->dreg
&&
3246 ins
->inst_basereg
== last_ins
->inst_basereg
&&
3247 ins
->inst_offset
== last_ins
->inst_offset
) {
3249 if (ins
->dreg
== last_ins
->dreg
) {
3250 MONO_DELETE_INS (bb
, ins
);
3253 ins
->opcode
= OP_MOVE
;
3254 ins
->sreg1
= last_ins
->dreg
;
3257 //g_assert_not_reached ();
3261 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3262 * OP_LOAD_MEMBASE offset(basereg), reg
3264 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3265 * OP_ICONST reg, imm
3267 } else if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_IMM
3268 || last_ins
->opcode
== OP_STORE_MEMBASE_IMM
) &&
3269 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3270 ins
->inst_offset
== last_ins
->inst_offset
) {
3271 //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
3272 ins
->opcode
= OP_ICONST
;
3273 ins
->inst_c0
= last_ins
->inst_imm
;
3274 g_assert_not_reached (); // check this rule
3278 case OP_LOADU1_MEMBASE
:
3279 case OP_LOADI1_MEMBASE
:
3280 if (last_ins
&& (last_ins
->opcode
== OP_STOREI1_MEMBASE_REG
) &&
3281 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3282 ins
->inst_offset
== last_ins
->inst_offset
) {
3283 ins
->opcode
= (ins
->opcode
== OP_LOADI1_MEMBASE
) ? OP_ICONV_TO_I1
: OP_ICONV_TO_U1
;
3284 ins
->sreg1
= last_ins
->sreg1
;
3287 case OP_LOADU2_MEMBASE
:
3288 case OP_LOADI2_MEMBASE
:
3289 if (last_ins
&& (last_ins
->opcode
== OP_STOREI2_MEMBASE_REG
) &&
3290 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3291 ins
->inst_offset
== last_ins
->inst_offset
) {
3292 ins
->opcode
= (ins
->opcode
== OP_LOADI2_MEMBASE
) ? OP_ICONV_TO_I2
: OP_ICONV_TO_U2
;
3293 ins
->sreg1
= last_ins
->sreg1
;
3297 ins
->opcode
= OP_MOVE
;
3301 if (ins
->dreg
== ins
->sreg1
) {
3302 MONO_DELETE_INS (bb
, ins
);
3306 * OP_MOVE sreg, dreg
3307 * OP_MOVE dreg, sreg
3309 if (last_ins
&& last_ins
->opcode
== OP_MOVE
&&
3310 ins
->sreg1
== last_ins
->dreg
&&
3311 ins
->dreg
== last_ins
->sreg1
) {
3312 MONO_DELETE_INS (bb
, ins
);
3321 * the branch_cc_table should maintain the order of these
3335 branch_cc_table
[] = {
3349 #define ADD_NEW_INS(cfg,dest,op) do { \
3350 MONO_INST_NEW ((cfg), (dest), (op)); \
3351 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3355 map_to_reg_reg_op (int op
)
3364 case OP_COMPARE_IMM
:
3366 case OP_ICOMPARE_IMM
:
3380 case OP_LOAD_MEMBASE
:
3381 return OP_LOAD_MEMINDEX
;
3382 case OP_LOADI4_MEMBASE
:
3383 return OP_LOADI4_MEMINDEX
;
3384 case OP_LOADU4_MEMBASE
:
3385 return OP_LOADU4_MEMINDEX
;
3386 case OP_LOADU1_MEMBASE
:
3387 return OP_LOADU1_MEMINDEX
;
3388 case OP_LOADI2_MEMBASE
:
3389 return OP_LOADI2_MEMINDEX
;
3390 case OP_LOADU2_MEMBASE
:
3391 return OP_LOADU2_MEMINDEX
;
3392 case OP_LOADI1_MEMBASE
:
3393 return OP_LOADI1_MEMINDEX
;
3394 case OP_STOREI1_MEMBASE_REG
:
3395 return OP_STOREI1_MEMINDEX
;
3396 case OP_STOREI2_MEMBASE_REG
:
3397 return OP_STOREI2_MEMINDEX
;
3398 case OP_STOREI4_MEMBASE_REG
:
3399 return OP_STOREI4_MEMINDEX
;
3400 case OP_STORE_MEMBASE_REG
:
3401 return OP_STORE_MEMINDEX
;
3402 case OP_STORER4_MEMBASE_REG
:
3403 return OP_STORER4_MEMINDEX
;
3404 case OP_STORER8_MEMBASE_REG
:
3405 return OP_STORER8_MEMINDEX
;
3406 case OP_STORE_MEMBASE_IMM
:
3407 return OP_STORE_MEMBASE_REG
;
3408 case OP_STOREI1_MEMBASE_IMM
:
3409 return OP_STOREI1_MEMBASE_REG
;
3410 case OP_STOREI2_MEMBASE_IMM
:
3411 return OP_STOREI2_MEMBASE_REG
;
3412 case OP_STOREI4_MEMBASE_IMM
:
3413 return OP_STOREI4_MEMBASE_REG
;
3415 g_assert_not_reached ();
3419 * Remove from the instruction list the instructions that can't be
3420 * represented with very simple instructions with no register
3424 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3426 MonoInst
*ins
, *temp
, *last_ins
= NULL
;
3427 int rot_amount
, imm8
, low_imm
;
3429 MONO_BB_FOR_EACH_INS (bb
, ins
) {
3431 switch (ins
->opcode
) {
3435 case OP_COMPARE_IMM
:
3436 case OP_ICOMPARE_IMM
:
3450 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
)) < 0) {
3451 int opcode2
= mono_op_imm_to_op (ins
->opcode
);
3452 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3453 temp
->inst_c0
= ins
->inst_imm
;
3454 temp
->dreg
= mono_alloc_ireg (cfg
);
3455 ins
->sreg2
= temp
->dreg
;
3457 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins
->opcode
));
3458 ins
->opcode
= opcode2
;
3460 if (ins
->opcode
== OP_SBB
|| ins
->opcode
== OP_ISBB
|| ins
->opcode
== OP_SUBCC
)
3466 if (ins
->inst_imm
== 1) {
3467 ins
->opcode
= OP_MOVE
;
3470 if (ins
->inst_imm
== 0) {
3471 ins
->opcode
= OP_ICONST
;
3475 imm8
= mono_is_power_of_two (ins
->inst_imm
);
3477 ins
->opcode
= OP_SHL_IMM
;
3478 ins
->inst_imm
= imm8
;
3481 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3482 temp
->inst_c0
= ins
->inst_imm
;
3483 temp
->dreg
= mono_alloc_ireg (cfg
);
3484 ins
->sreg2
= temp
->dreg
;
3485 ins
->opcode
= OP_IMUL
;
3492 MonoInst
*current
= ins
;
3494 /* may require a look-ahead of a couple instructions due to spilling */
3495 while (try_count
-- && current
->next
) {
3496 if (current
->next
->opcode
== OP_COND_EXC_C
|| current
->next
->opcode
== OP_COND_EXC_IC
) {
3497 /* ARM sets the C flag to 1 if there was _no_ overflow */
3498 current
->next
->opcode
= OP_COND_EXC_NC
;
3501 current
= current
->next
;
3506 case OP_IDIV_UN_IMM
:
3508 case OP_IREM_UN_IMM
: {
3509 int opcode2
= mono_op_imm_to_op (ins
->opcode
);
3510 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3511 temp
->inst_c0
= ins
->inst_imm
;
3512 temp
->dreg
= mono_alloc_ireg (cfg
);
3513 ins
->sreg2
= temp
->dreg
;
3515 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins
->opcode
));
3516 ins
->opcode
= opcode2
;
3519 case OP_LOCALLOC_IMM
:
3520 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3521 temp
->inst_c0
= ins
->inst_imm
;
3522 temp
->dreg
= mono_alloc_ireg (cfg
);
3523 ins
->sreg1
= temp
->dreg
;
3524 ins
->opcode
= OP_LOCALLOC
;
3526 case OP_LOAD_MEMBASE
:
3527 case OP_LOADI4_MEMBASE
:
3528 case OP_LOADU4_MEMBASE
:
3529 case OP_LOADU1_MEMBASE
:
3530 /* we can do two things: load the immed in a register
3531 * and use an indexed load, or see if the immed can be
3532 * represented as an ad_imm + a load with a smaller offset
3533 * that fits. We just do the first for now, optimize later.
3535 if (arm_is_imm12 (ins
->inst_offset
))
3537 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3538 temp
->inst_c0
= ins
->inst_offset
;
3539 temp
->dreg
= mono_alloc_ireg (cfg
);
3540 ins
->sreg2
= temp
->dreg
;
3541 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3543 case OP_LOADI2_MEMBASE
:
3544 case OP_LOADU2_MEMBASE
:
3545 case OP_LOADI1_MEMBASE
:
3546 if (arm_is_imm8 (ins
->inst_offset
))
3548 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3549 temp
->inst_c0
= ins
->inst_offset
;
3550 temp
->dreg
= mono_alloc_ireg (cfg
);
3551 ins
->sreg2
= temp
->dreg
;
3552 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3554 case OP_LOADR4_MEMBASE
:
3555 case OP_LOADR8_MEMBASE
:
3556 if (arm_is_fpimm8 (ins
->inst_offset
))
3558 low_imm
= ins
->inst_offset
& 0x1ff;
3559 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_offset
& ~0x1ff, &rot_amount
)) >= 0) {
3560 ADD_NEW_INS (cfg
, temp
, OP_ADD_IMM
);
3561 temp
->inst_imm
= ins
->inst_offset
& ~0x1ff;
3562 temp
->sreg1
= ins
->inst_basereg
;
3563 temp
->dreg
= mono_alloc_ireg (cfg
);
3564 ins
->inst_basereg
= temp
->dreg
;
3565 ins
->inst_offset
= low_imm
;
3569 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3570 temp
->inst_c0
= ins
->inst_offset
;
3571 temp
->dreg
= mono_alloc_ireg (cfg
);
3573 ADD_NEW_INS (cfg
, add_ins
, OP_IADD
);
3574 add_ins
->sreg1
= ins
->inst_basereg
;
3575 add_ins
->sreg2
= temp
->dreg
;
3576 add_ins
->dreg
= mono_alloc_ireg (cfg
);
3578 ins
->inst_basereg
= add_ins
->dreg
;
3579 ins
->inst_offset
= 0;
3582 case OP_STORE_MEMBASE_REG
:
3583 case OP_STOREI4_MEMBASE_REG
:
3584 case OP_STOREI1_MEMBASE_REG
:
3585 if (arm_is_imm12 (ins
->inst_offset
))
3587 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3588 temp
->inst_c0
= ins
->inst_offset
;
3589 temp
->dreg
= mono_alloc_ireg (cfg
);
3590 ins
->sreg2
= temp
->dreg
;
3591 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3593 case OP_STOREI2_MEMBASE_REG
:
3594 if (arm_is_imm8 (ins
->inst_offset
))
3596 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3597 temp
->inst_c0
= ins
->inst_offset
;
3598 temp
->dreg
= mono_alloc_ireg (cfg
);
3599 ins
->sreg2
= temp
->dreg
;
3600 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3602 case OP_STORER4_MEMBASE_REG
:
3603 case OP_STORER8_MEMBASE_REG
:
3604 if (arm_is_fpimm8 (ins
->inst_offset
))
3606 low_imm
= ins
->inst_offset
& 0x1ff;
3607 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_offset
& ~ 0x1ff, &rot_amount
)) >= 0 && arm_is_fpimm8 (low_imm
)) {
3608 ADD_NEW_INS (cfg
, temp
, OP_ADD_IMM
);
3609 temp
->inst_imm
= ins
->inst_offset
& ~0x1ff;
3610 temp
->sreg1
= ins
->inst_destbasereg
;
3611 temp
->dreg
= mono_alloc_ireg (cfg
);
3612 ins
->inst_destbasereg
= temp
->dreg
;
3613 ins
->inst_offset
= low_imm
;
3617 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3618 temp
->inst_c0
= ins
->inst_offset
;
3619 temp
->dreg
= mono_alloc_ireg (cfg
);
3621 ADD_NEW_INS (cfg
, add_ins
, OP_IADD
);
3622 add_ins
->sreg1
= ins
->inst_destbasereg
;
3623 add_ins
->sreg2
= temp
->dreg
;
3624 add_ins
->dreg
= mono_alloc_ireg (cfg
);
3626 ins
->inst_destbasereg
= add_ins
->dreg
;
3627 ins
->inst_offset
= 0;
3630 case OP_STORE_MEMBASE_IMM
:
3631 case OP_STOREI1_MEMBASE_IMM
:
3632 case OP_STOREI2_MEMBASE_IMM
:
3633 case OP_STOREI4_MEMBASE_IMM
:
3634 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3635 temp
->inst_c0
= ins
->inst_imm
;
3636 temp
->dreg
= mono_alloc_ireg (cfg
);
3637 ins
->sreg1
= temp
->dreg
;
3638 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3640 goto loop_start
; /* make it handle the possibly big ins->inst_offset */
3643 gboolean swap
= FALSE
;
3647 /* Optimized away */
3652 /* Some fp compares require swapped operands */
3653 switch (ins
->next
->opcode
) {
3655 ins
->next
->opcode
= OP_FBLT
;
3659 ins
->next
->opcode
= OP_FBLT_UN
;
3663 ins
->next
->opcode
= OP_FBGE
;
3667 ins
->next
->opcode
= OP_FBGE_UN
;
3675 ins
->sreg1
= ins
->sreg2
;
3684 bb
->last_ins
= last_ins
;
3685 bb
->max_vreg
= cfg
->next_vreg
;
3689 mono_arch_decompose_long_opts (MonoCompile
*cfg
, MonoInst
*long_ins
)
3693 if (long_ins
->opcode
== OP_LNEG
) {
3695 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ARM_RSBS_IMM
, MONO_LVREG_LS (ins
->dreg
), MONO_LVREG_LS (ins
->sreg1
), 0);
3696 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ARM_RSC_IMM
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->sreg1
), 0);
3702 emit_float_to_int (MonoCompile
*cfg
, guchar
*code
, int dreg
, int sreg
, int size
, gboolean is_signed
)
3704 /* sreg is a float, dreg is an integer reg */
3706 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
3708 ARM_TOSIZD (code
, vfp_scratch1
, sreg
);
3710 ARM_TOUIZD (code
, vfp_scratch1
, sreg
);
3711 ARM_FMRS (code
, dreg
, vfp_scratch1
);
3712 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
3716 ARM_AND_REG_IMM8 (code
, dreg
, dreg
, 0xff);
3717 else if (size
== 2) {
3718 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3719 ARM_SHR_IMM (code
, dreg
, dreg
, 16);
3723 ARM_SHL_IMM (code
, dreg
, dreg
, 24);
3724 ARM_SAR_IMM (code
, dreg
, dreg
, 24);
3725 } else if (size
== 2) {
3726 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3727 ARM_SAR_IMM (code
, dreg
, dreg
, 16);
3734 emit_r4_to_int (MonoCompile
*cfg
, guchar
*code
, int dreg
, int sreg
, int size
, gboolean is_signed
)
3736 /* sreg is a float, dreg is an integer reg */
3738 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
3740 ARM_TOSIZS (code
, vfp_scratch1
, sreg
);
3742 ARM_TOUIZS (code
, vfp_scratch1
, sreg
);
3743 ARM_FMRS (code
, dreg
, vfp_scratch1
);
3744 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
3748 ARM_AND_REG_IMM8 (code
, dreg
, dreg
, 0xff);
3749 else if (size
== 2) {
3750 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3751 ARM_SHR_IMM (code
, dreg
, dreg
, 16);
3755 ARM_SHL_IMM (code
, dreg
, dreg
, 24);
3756 ARM_SAR_IMM (code
, dreg
, dreg
, 24);
3757 } else if (size
== 2) {
3758 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3759 ARM_SAR_IMM (code
, dreg
, dreg
, 16);
3765 #endif /* #ifndef DISABLE_JIT */
3767 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3770 emit_thunk (guint8
*code
, gconstpointer target
)
3774 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
3775 if (thumb_supported
)
3776 ARM_BX (code
, ARMREG_IP
);
3778 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
3779 *(guint32
*)code
= (guint32
)(gsize
)target
;
3781 mono_arch_flush_icache (p
, code
- p
);
3785 handle_thunk (MonoCompile
*cfg
, MonoDomain
*domain
, guchar
*code
, const guchar
*target
)
3787 MonoJitInfo
*ji
= NULL
;
3788 MonoThunkJitInfo
*info
;
3791 guint8
*orig_target
;
3792 guint8
*target_thunk
;
3795 domain
= mono_domain_get ();
3799 * This can be called multiple times during JITting,
3800 * save the current position in cfg->arch to avoid
3801 * doing a O(n^2) search.
3803 if (!cfg
->arch
.thunks
) {
3804 cfg
->arch
.thunks
= cfg
->thunks
;
3805 cfg
->arch
.thunks_size
= cfg
->thunk_area
;
3807 thunks
= cfg
->arch
.thunks
;
3808 thunks_size
= cfg
->arch
.thunks_size
;
3810 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, mono_method_full_name (cfg
->method
, TRUE
));
3811 g_assert_not_reached ();
3814 g_assert (*(guint32
*)thunks
== 0);
3815 emit_thunk (thunks
, target
);
3816 arm_patch (code
, thunks
);
3818 cfg
->arch
.thunks
+= THUNK_SIZE
;
3819 cfg
->arch
.thunks_size
-= THUNK_SIZE
;
3821 ji
= mini_jit_info_table_find (domain
, (char*)code
, NULL
);
3823 info
= mono_jit_info_get_thunk_info (ji
);
3826 thunks
= (guint8
*)ji
->code_start
+ info
->thunks_offset
;
3827 thunks_size
= info
->thunks_size
;
3829 orig_target
= mono_arch_get_call_target (code
+ 4);
3831 mono_mini_arch_lock ();
3833 target_thunk
= NULL
;
3834 if (orig_target
>= thunks
&& orig_target
< thunks
+ thunks_size
) {
3835 /* The call already points to a thunk, because of trampolines etc. */
3836 target_thunk
= orig_target
;
3838 for (p
= thunks
; p
< thunks
+ thunks_size
; p
+= THUNK_SIZE
) {
3839 if (((guint32
*)p
) [0] == 0) {
3843 } else if (((guint32
*)p
) [2] == (guint32
)(gsize
)target
) {
3844 /* Thunk already points to target */
3851 //g_print ("THUNK: %p %p %p\n", code, target, target_thunk);
3853 if (!target_thunk
) {
3854 mono_mini_arch_unlock ();
3855 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, cfg
? mono_method_full_name (cfg
->method
, TRUE
) : mono_method_full_name (jinfo_get_method (ji
), TRUE
));
3856 g_assert_not_reached ();
3859 emit_thunk (target_thunk
, target
);
3860 arm_patch (code
, target_thunk
);
3861 mono_arch_flush_icache (code
, 4);
3863 mono_mini_arch_unlock ();
3868 arm_patch_general (MonoCompile
*cfg
, MonoDomain
*domain
, guchar
*code
, const guchar
*target
)
3870 guint32
*code32
= (guint32
*)code
;
3871 guint32 ins
= *code32
;
3872 guint32 prim
= (ins
>> 25) & 7;
3873 guint32 tval
= GPOINTER_TO_UINT (target
);
3875 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3876 if (prim
== 5) { /* 101b */
3877 /* the diff starts 8 bytes from the branch opcode */
3878 gint diff
= target
- code
- 8;
3880 gint tmask
= 0xffffffff;
3881 if (tval
& 1) { /* entering thumb mode */
3882 diff
= target
- 1 - code
- 8;
3883 g_assert (thumb_supported
);
3884 tbits
= 0xf << 28; /* bl->blx bit pattern */
3885 g_assert ((ins
& (1 << 24))); /* it must be a bl, not b instruction */
3886 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3890 tmask
= ~(1 << 24); /* clear the link bit */
3891 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3896 if (diff
<= 33554431) {
3898 ins
= (ins
& 0xff000000) | diff
;
3900 *code32
= ins
| tbits
;
3904 /* diff between 0 and -33554432 */
3905 if (diff
>= -33554432) {
3907 ins
= (ins
& 0xff000000) | (diff
& ~0xff000000);
3909 *code32
= ins
| tbits
;
3914 handle_thunk (cfg
, domain
, code
, target
);
3919 * The alternative call sequences looks like this:
3921 * ldr ip, [pc] // loads the address constant
3922 * b 1f // jumps around the constant
3923 * address constant embedded in the code
3928 * There are two cases for patching:
3929 * a) at the end of method emission: in this case code points to the start
3930 * of the call sequence
3931 * b) during runtime patching of the call site: in this case code points
3932 * to the mov pc, ip instruction
3934 * We have to handle also the thunk jump code sequence:
3938 * address constant // execution never reaches here
3940 if ((ins
& 0x0ffffff0) == 0x12fff10) {
3941 /* Branch and exchange: the address is constructed in a reg
3942 * We can patch BX when the code sequence is the following:
3943 * ldr ip, [pc, #0] ; 0x8
3950 guint8
*emit
= (guint8
*)ccode
;
3951 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
3953 ARM_MOV_REG_REG (emit
, ARMREG_LR
, ARMREG_PC
);
3954 ARM_BX (emit
, ARMREG_IP
);
3956 /*patching from magic trampoline*/
3957 if (ins
== ccode
[3]) {
3958 g_assert (code32
[-4] == ccode
[0]);
3959 g_assert (code32
[-3] == ccode
[1]);
3960 g_assert (code32
[-1] == ccode
[2]);
3961 code32
[-2] = (guint32
)(gsize
)target
;
3964 /*patching from JIT*/
3965 if (ins
== ccode
[0]) {
3966 g_assert (code32
[1] == ccode
[1]);
3967 g_assert (code32
[3] == ccode
[2]);
3968 g_assert (code32
[4] == ccode
[3]);
3969 code32
[2] = (guint32
)(gsize
)target
;
3972 g_assert_not_reached ();
3973 } else if ((ins
& 0x0ffffff0) == 0x12fff30) {
3981 guint8
*emit
= (guint8
*)ccode
;
3982 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
3984 ARM_BLX_REG (emit
, ARMREG_IP
);
3986 g_assert (code32
[-3] == ccode
[0]);
3987 g_assert (code32
[-2] == ccode
[1]);
3988 g_assert (code32
[0] == ccode
[2]);
3990 code32
[-1] = (guint32
)(gsize
)target
;
3993 guint32
*tmp
= ccode
;
3994 guint8
*emit
= (guint8
*)tmp
;
3995 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
3996 ARM_MOV_REG_REG (emit
, ARMREG_LR
, ARMREG_PC
);
3997 ARM_MOV_REG_REG (emit
, ARMREG_PC
, ARMREG_IP
);
3998 ARM_BX (emit
, ARMREG_IP
);
3999 if (ins
== ccode
[2]) {
4000 g_assert_not_reached (); // should be -2 ...
4001 code32
[-1] = (guint32
)(gsize
)target
;
4004 if (ins
== ccode
[0]) {
4005 /* handles both thunk jump code and the far call sequence */
4006 code32
[2] = (guint32
)(gsize
)target
;
4009 g_assert_not_reached ();
4011 // g_print ("patched with 0x%08x\n", ins);
4015 arm_patch (guchar
*code
, const guchar
*target
)
4017 arm_patch_general (NULL
, NULL
, code
, target
);
4021 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
4022 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
4023 * to be used with the emit macros.
4024 * Return -1 otherwise.
4027 mono_arm_is_rotated_imm8 (guint32 val
, gint
*rot_amount
)
4030 for (i
= 0; i
< 31; i
+= 2) {
4034 res
= (val
<< (32 - i
)) | (val
>> i
);
4037 *rot_amount
= i
? 32 - i
: 0;
4044 * Emits in code a sequence of instructions that load the value 'val'
4045 * into the dreg register. Uses at most 4 instructions.
4048 mono_arm_emit_load_imm (guint8
*code
, int dreg
, guint32 val
)
4050 int imm8
, rot_amount
;
4052 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
4053 /* skip the constant pool */
4059 if (mini_get_debug_options()->single_imm_size
&& v7_supported
) {
4060 ARM_MOVW_REG_IMM (code
, dreg
, val
& 0xffff);
4061 ARM_MOVT_REG_IMM (code
, dreg
, (val
>> 16) & 0xffff);
4065 if ((imm8
= mono_arm_is_rotated_imm8 (val
, &rot_amount
)) >= 0) {
4066 ARM_MOV_REG_IMM (code
, dreg
, imm8
, rot_amount
);
4067 } else if ((imm8
= mono_arm_is_rotated_imm8 (~val
, &rot_amount
)) >= 0) {
4068 ARM_MVN_REG_IMM (code
, dreg
, imm8
, rot_amount
);
4071 ARM_MOVW_REG_IMM (code
, dreg
, val
& 0xffff);
4073 ARM_MOVT_REG_IMM (code
, dreg
, (val
>> 16) & 0xffff);
4077 ARM_MOV_REG_IMM8 (code
, dreg
, (val
& 0xFF));
4079 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF00) >> 8, 24);
4081 if (val
& 0xFF0000) {
4082 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
4084 if (val
& 0xFF000000) {
4085 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
4087 } else if (val
& 0xFF00) {
4088 ARM_MOV_REG_IMM (code
, dreg
, (val
& 0xFF00) >> 8, 24);
4089 if (val
& 0xFF0000) {
4090 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
4092 if (val
& 0xFF000000) {
4093 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
4095 } else if (val
& 0xFF0000) {
4096 ARM_MOV_REG_IMM (code
, dreg
, (val
& 0xFF0000) >> 16, 16);
4097 if (val
& 0xFF000000) {
4098 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
4101 //g_assert_not_reached ();
4107 mono_arm_thumb_supported (void)
4109 return thumb_supported
;
4113 mono_arm_eabi_supported (void)
4115 return eabi_supported
;
4119 mono_arm_i8_align (void)
4127 emit_move_return_value (MonoCompile
*cfg
, MonoInst
*ins
, guint8
*code
)
4132 call
= (MonoCallInst
*)ins
;
4133 cinfo
= call
->call_info
;
4135 switch (cinfo
->ret
.storage
) {
4136 case RegTypeStructByVal
:
4138 MonoInst
*loc
= cfg
->arch
.vret_addr_loc
;
4141 if (cinfo
->ret
.storage
== RegTypeStructByVal
&& cinfo
->ret
.nregs
== 1) {
4142 /* The JIT treats this as a normal call */
4146 /* Load the destination address */
4147 g_assert (loc
&& loc
->opcode
== OP_REGOFFSET
);
4149 if (arm_is_imm12 (loc
->inst_offset
)) {
4150 ARM_LDR_IMM (code
, ARMREG_LR
, loc
->inst_basereg
, loc
->inst_offset
);
4152 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, loc
->inst_offset
);
4153 ARM_LDR_REG_REG (code
, ARMREG_LR
, loc
->inst_basereg
, ARMREG_LR
);
4156 if (cinfo
->ret
.storage
== RegTypeStructByVal
) {
4157 int rsize
= cinfo
->ret
.struct_size
;
4159 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
4160 g_assert (rsize
>= 0);
4165 ARM_STRB_IMM (code
, i
, ARMREG_LR
, i
* 4);
4168 ARM_STRH_IMM (code
, i
, ARMREG_LR
, i
* 4);
4171 ARM_STR_IMM (code
, i
, ARMREG_LR
, i
* 4);
4177 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
4178 if (cinfo
->ret
.esize
== 4)
4179 ARM_FSTS (code
, cinfo
->ret
.reg
+ i
, ARMREG_LR
, i
* 4);
4181 ARM_FSTD (code
, cinfo
->ret
.reg
+ (i
* 2), ARMREG_LR
, i
* 8);
4190 switch (ins
->opcode
) {
4193 case OP_FCALL_MEMBASE
:
4195 MonoType
*sig_ret
= mini_get_underlying_type (((MonoCallInst
*)ins
)->signature
->ret
);
4196 if (sig_ret
->type
== MONO_TYPE_R4
) {
4197 if (IS_HARD_FLOAT
) {
4198 ARM_CVTS (code
, ins
->dreg
, ARM_VFP_F0
);
4200 ARM_FMSR (code
, ins
->dreg
, ARMREG_R0
);
4201 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
4204 if (IS_HARD_FLOAT
) {
4205 ARM_CPYD (code
, ins
->dreg
, ARM_VFP_D0
);
4207 ARM_FMDRR (code
, ARMREG_R0
, ARMREG_R1
, ins
->dreg
);
4214 case OP_RCALL_MEMBASE
: {
4219 sig_ret
= mini_get_underlying_type (((MonoCallInst
*)ins
)->signature
->ret
);
4220 g_assert (sig_ret
->type
== MONO_TYPE_R4
);
4221 if (IS_HARD_FLOAT
) {
4222 ARM_CPYS (code
, ins
->dreg
, ARM_VFP_F0
);
4224 ARM_FMSR (code
, ins
->dreg
, ARMREG_R0
);
4225 ARM_CPYS (code
, ins
->dreg
, ins
->dreg
);
4237 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
4241 guint8
*code
= cfg
->native_code
+ cfg
->code_len
;
4242 MonoInst
*last_ins
= NULL
;
4244 int imm8
, rot_amount
;
4246 /* we don't align basic blocks of loops on arm */
4248 if (cfg
->verbose_level
> 2)
4249 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
4251 cpos
= bb
->max_offset
;
4253 if (mono_break_at_bb_method
&& mono_method_desc_full_match (mono_break_at_bb_method
, cfg
->method
) && bb
->block_num
== mono_break_at_bb_bb_num
) {
4254 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
4255 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break
));
4256 code
= emit_call_seq (cfg
, code
);
4259 MONO_BB_FOR_EACH_INS (bb
, ins
) {
4260 guint offset
= code
- cfg
->native_code
;
4261 set_code_cursor (cfg
, code
);
4262 max_len
= ins_get_size (ins
->opcode
);
4263 code
= realloc_code (cfg
, max_len
);
4264 // if (ins->cil_code)
4265 // g_print ("cil code\n");
4266 mono_debug_record_line_number (cfg
, ins
, offset
);
4268 switch (ins
->opcode
) {
4269 case OP_MEMORY_BARRIER
:
4271 ARM_DMB (code
, ARM_DMB_ISH
);
4272 } else if (v6_supported
) {
4273 ARM_MOV_REG_IMM8 (code
, ARMREG_R0
, 0);
4274 ARM_MCR (code
, 15, 0, ARMREG_R0
, 7, 10, 5);
4278 code
= emit_tls_get (code
, ins
->dreg
, ins
->inst_offset
);
4281 code
= emit_tls_set (code
, ins
->sreg1
, ins
->inst_offset
);
4283 case OP_ATOMIC_EXCHANGE_I4
:
4284 case OP_ATOMIC_CAS_I4
:
4285 case OP_ATOMIC_ADD_I4
: {
4289 g_assert (v7_supported
);
4292 if (ins
->sreg1
!= ARMREG_IP
&& ins
->sreg2
!= ARMREG_IP
&& ins
->sreg3
!= ARMREG_IP
)
4294 else if (ins
->sreg1
!= ARMREG_R0
&& ins
->sreg2
!= ARMREG_R0
&& ins
->sreg3
!= ARMREG_R0
)
4296 else if (ins
->sreg1
!= ARMREG_R1
&& ins
->sreg2
!= ARMREG_R1
&& ins
->sreg3
!= ARMREG_R1
)
4300 g_assert (cfg
->arch
.atomic_tmp_offset
!= -1);
4301 ARM_STR_IMM (code
, tmpreg
, cfg
->frame_reg
, cfg
->arch
.atomic_tmp_offset
);
4303 switch (ins
->opcode
) {
4304 case OP_ATOMIC_EXCHANGE_I4
:
4306 ARM_DMB (code
, ARM_DMB_ISH
);
4307 ARM_LDREX_REG (code
, ARMREG_LR
, ins
->sreg1
);
4308 ARM_STREX_REG (code
, tmpreg
, ins
->sreg2
, ins
->sreg1
);
4309 ARM_CMP_REG_IMM (code
, tmpreg
, 0, 0);
4311 ARM_B_COND (code
, ARMCOND_NE
, 0);
4312 arm_patch (buf
[1], buf
[0]);
4314 case OP_ATOMIC_CAS_I4
:
4315 ARM_DMB (code
, ARM_DMB_ISH
);
4317 ARM_LDREX_REG (code
, ARMREG_LR
, ins
->sreg1
);
4318 ARM_CMP_REG_REG (code
, ARMREG_LR
, ins
->sreg3
);
4320 ARM_B_COND (code
, ARMCOND_NE
, 0);
4321 ARM_STREX_REG (code
, tmpreg
, ins
->sreg2
, ins
->sreg1
);
4322 ARM_CMP_REG_IMM (code
, tmpreg
, 0, 0);
4324 ARM_B_COND (code
, ARMCOND_NE
, 0);
4325 arm_patch (buf
[2], buf
[0]);
4326 arm_patch (buf
[1], code
);
4328 case OP_ATOMIC_ADD_I4
:
4330 ARM_DMB (code
, ARM_DMB_ISH
);
4331 ARM_LDREX_REG (code
, ARMREG_LR
, ins
->sreg1
);
4332 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->sreg2
);
4333 ARM_STREX_REG (code
, tmpreg
, ARMREG_LR
, ins
->sreg1
);
4334 ARM_CMP_REG_IMM (code
, tmpreg
, 0, 0);
4336 ARM_B_COND (code
, ARMCOND_NE
, 0);
4337 arm_patch (buf
[1], buf
[0]);
4340 g_assert_not_reached ();
4343 ARM_DMB (code
, ARM_DMB_ISH
);
4344 if (tmpreg
!= ins
->dreg
)
4345 ARM_LDR_IMM (code
, tmpreg
, cfg
->frame_reg
, cfg
->arch
.atomic_tmp_offset
);
4346 ARM_MOV_REG_REG (code
, ins
->dreg
, ARMREG_LR
);
4349 case OP_ATOMIC_LOAD_I1
:
4350 case OP_ATOMIC_LOAD_U1
:
4351 case OP_ATOMIC_LOAD_I2
:
4352 case OP_ATOMIC_LOAD_U2
:
4353 case OP_ATOMIC_LOAD_I4
:
4354 case OP_ATOMIC_LOAD_U4
:
4355 case OP_ATOMIC_LOAD_R4
:
4356 case OP_ATOMIC_LOAD_R8
: {
4357 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4358 ARM_DMB (code
, ARM_DMB_ISH
);
4360 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4362 switch (ins
->opcode
) {
4363 case OP_ATOMIC_LOAD_I1
:
4364 ARM_LDRSB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4366 case OP_ATOMIC_LOAD_U1
:
4367 ARM_LDRB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4369 case OP_ATOMIC_LOAD_I2
:
4370 ARM_LDRSH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4372 case OP_ATOMIC_LOAD_U2
:
4373 ARM_LDRH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4375 case OP_ATOMIC_LOAD_I4
:
4376 case OP_ATOMIC_LOAD_U4
:
4377 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4379 case OP_ATOMIC_LOAD_R4
:
4381 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_basereg
, ARMREG_LR
);
4382 ARM_FLDS (code
, ins
->dreg
, ARMREG_LR
, 0);
4384 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
4385 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_basereg
, ARMREG_LR
);
4386 ARM_FLDS (code
, vfp_scratch1
, ARMREG_LR
, 0);
4387 ARM_CVTS (code
, ins
->dreg
, vfp_scratch1
);
4388 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
4391 case OP_ATOMIC_LOAD_R8
:
4392 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_basereg
, ARMREG_LR
);
4393 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
4397 if (ins
->backend
.memory_barrier_kind
!= MONO_MEMORY_BARRIER_NONE
)
4398 ARM_DMB (code
, ARM_DMB_ISH
);
4401 case OP_ATOMIC_STORE_I1
:
4402 case OP_ATOMIC_STORE_U1
:
4403 case OP_ATOMIC_STORE_I2
:
4404 case OP_ATOMIC_STORE_U2
:
4405 case OP_ATOMIC_STORE_I4
:
4406 case OP_ATOMIC_STORE_U4
:
4407 case OP_ATOMIC_STORE_R4
:
4408 case OP_ATOMIC_STORE_R8
: {
4409 if (ins
->backend
.memory_barrier_kind
!= MONO_MEMORY_BARRIER_NONE
)
4410 ARM_DMB (code
, ARM_DMB_ISH
);
4412 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4414 switch (ins
->opcode
) {
4415 case OP_ATOMIC_STORE_I1
:
4416 case OP_ATOMIC_STORE_U1
:
4417 ARM_STRB_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4419 case OP_ATOMIC_STORE_I2
:
4420 case OP_ATOMIC_STORE_U2
:
4421 ARM_STRH_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4423 case OP_ATOMIC_STORE_I4
:
4424 case OP_ATOMIC_STORE_U4
:
4425 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4427 case OP_ATOMIC_STORE_R4
:
4429 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_destbasereg
, ARMREG_LR
);
4430 ARM_FSTS (code
, ins
->sreg1
, ARMREG_LR
, 0);
4432 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
4433 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_destbasereg
, ARMREG_LR
);
4434 ARM_CVTD (code
, vfp_scratch1
, ins
->sreg1
);
4435 ARM_FSTS (code
, vfp_scratch1
, ARMREG_LR
, 0);
4436 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
4439 case OP_ATOMIC_STORE_R8
:
4440 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_destbasereg
, ARMREG_LR
);
4441 ARM_FSTD (code
, ins
->sreg1
, ARMREG_LR
, 0);
4445 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4446 ARM_DMB (code
, ARM_DMB_ISH
);
4450 ARM_SMULL_REG_REG (code
, ins
->backend
.reg3
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4453 ARM_UMULL_REG_REG (code
, ins
->backend
.reg3
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4455 case OP_STOREI1_MEMBASE_IMM
:
4456 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
& 0xFF);
4457 g_assert (arm_is_imm12 (ins
->inst_offset
));
4458 ARM_STRB_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4460 case OP_STOREI2_MEMBASE_IMM
:
4461 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
& 0xFFFF);
4462 g_assert (arm_is_imm8 (ins
->inst_offset
));
4463 ARM_STRH_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4465 case OP_STORE_MEMBASE_IMM
:
4466 case OP_STOREI4_MEMBASE_IMM
:
4467 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
);
4468 g_assert (arm_is_imm12 (ins
->inst_offset
));
4469 ARM_STR_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4471 case OP_STOREI1_MEMBASE_REG
:
4472 g_assert (arm_is_imm12 (ins
->inst_offset
));
4473 ARM_STRB_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4475 case OP_STOREI2_MEMBASE_REG
:
4476 g_assert (arm_is_imm8 (ins
->inst_offset
));
4477 ARM_STRH_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4479 case OP_STORE_MEMBASE_REG
:
4480 case OP_STOREI4_MEMBASE_REG
:
4481 /* this case is special, since it happens for spill code after lowering has been called */
4482 if (arm_is_imm12 (ins
->inst_offset
)) {
4483 ARM_STR_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4485 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4486 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4489 case OP_STOREI1_MEMINDEX
:
4490 ARM_STRB_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4492 case OP_STOREI2_MEMINDEX
:
4493 ARM_STRH_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4495 case OP_STORE_MEMINDEX
:
4496 case OP_STOREI4_MEMINDEX
:
4497 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4500 g_assert_not_reached ();
4502 case OP_LOAD_MEMINDEX
:
4503 case OP_LOADI4_MEMINDEX
:
4504 case OP_LOADU4_MEMINDEX
:
4505 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4507 case OP_LOADI1_MEMINDEX
:
4508 ARM_LDRSB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4510 case OP_LOADU1_MEMINDEX
:
4511 ARM_LDRB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4513 case OP_LOADI2_MEMINDEX
:
4514 ARM_LDRSH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4516 case OP_LOADU2_MEMINDEX
:
4517 ARM_LDRH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4519 case OP_LOAD_MEMBASE
:
4520 case OP_LOADI4_MEMBASE
:
4521 case OP_LOADU4_MEMBASE
:
4522 /* this case is special, since it happens for spill code after lowering has been called */
4523 if (arm_is_imm12 (ins
->inst_offset
)) {
4524 ARM_LDR_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4526 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4527 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4530 case OP_LOADI1_MEMBASE
:
4531 g_assert (arm_is_imm8 (ins
->inst_offset
));
4532 ARM_LDRSB_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4534 case OP_LOADU1_MEMBASE
:
4535 g_assert (arm_is_imm12 (ins
->inst_offset
));
4536 ARM_LDRB_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4538 case OP_LOADU2_MEMBASE
:
4539 g_assert (arm_is_imm8 (ins
->inst_offset
));
4540 ARM_LDRH_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4542 case OP_LOADI2_MEMBASE
:
4543 g_assert (arm_is_imm8 (ins
->inst_offset
));
4544 ARM_LDRSH_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4546 case OP_ICONV_TO_I1
:
4547 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 24);
4548 ARM_SAR_IMM (code
, ins
->dreg
, ins
->dreg
, 24);
4550 case OP_ICONV_TO_I2
:
4551 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 16);
4552 ARM_SAR_IMM (code
, ins
->dreg
, ins
->dreg
, 16);
4554 case OP_ICONV_TO_U1
:
4555 ARM_AND_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 0xff);
4557 case OP_ICONV_TO_U2
:
4558 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 16);
4559 ARM_SHR_IMM (code
, ins
->dreg
, ins
->dreg
, 16);
4563 ARM_CMP_REG_REG (code
, ins
->sreg1
, ins
->sreg2
);
4565 case OP_COMPARE_IMM
:
4566 case OP_ICOMPARE_IMM
:
4567 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4568 g_assert (imm8
>= 0);
4569 ARM_CMP_REG_IMM (code
, ins
->sreg1
, imm8
, rot_amount
);
4573 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4574 * So instead of emitting a trap, we emit a call a C function and place a
4577 //*(int*)code = 0xef9f0001;
4580 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
4581 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break
));
4582 code
= emit_call_seq (cfg
, code
);
4584 case OP_RELAXED_NOP
:
4589 case OP_DUMMY_ICONST
:
4590 case OP_DUMMY_R8CONST
:
4591 case OP_DUMMY_R4CONST
:
4592 case OP_NOT_REACHED
:
4595 case OP_IL_SEQ_POINT
:
4596 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
4598 case OP_SEQ_POINT
: {
4600 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
4601 MonoInst
*ss_trigger_page_var
= cfg
->arch
.ss_trigger_page_var
;
4602 MonoInst
*ss_method_var
= cfg
->arch
.seq_point_ss_method_var
;
4603 MonoInst
*bp_method_var
= cfg
->arch
.seq_point_bp_method_var
;
4605 int dreg
= ARMREG_LR
;
4608 if (cfg
->soft_breakpoints
) {
4609 g_assert (!cfg
->compile_aot
);
4614 * For AOT, we use one got slot per method, which will point to a
4615 * SeqPointInfo structure, containing all the information required
4616 * by the code below.
4618 if (cfg
->compile_aot
) {
4619 g_assert (info_var
);
4620 g_assert (info_var
->opcode
== OP_REGOFFSET
);
4623 if (!cfg
->soft_breakpoints
&& !cfg
->compile_aot
) {
4625 * Read from the single stepping trigger page. This will cause a
4626 * SIGSEGV when single stepping is enabled.
4627 * We do this _before_ the breakpoint, so single stepping after
4628 * a breakpoint is hit will step to the next IL offset.
4630 g_assert (((guint64
)(gsize
)ss_trigger_page
>> 32) == 0);
4633 /* Single step check */
4634 if (ins
->flags
& MONO_INST_SINGLE_STEP_LOC
) {
4635 if (cfg
->soft_breakpoints
) {
4636 /* Load the address of the sequence point method variable. */
4637 var
= ss_method_var
;
4639 g_assert (var
->opcode
== OP_REGOFFSET
);
4640 code
= emit_ldr_imm (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
4641 /* Read the value and check whether it is non-zero. */
4642 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
4643 ARM_CMP_REG_IMM (code
, dreg
, 0, 0);
4644 /* Call it conditionally. */
4645 ARM_BLX_REG_COND (code
, ARMCOND_NE
, dreg
);
4647 if (cfg
->compile_aot
) {
4648 /* Load the trigger page addr from the variable initialized in the prolog */
4649 var
= ss_trigger_page_var
;
4651 g_assert (var
->opcode
== OP_REGOFFSET
);
4652 code
= emit_ldr_imm (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
4654 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
4656 *(int*)code
= (int)(gsize
)ss_trigger_page
;
4659 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
4663 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
4665 /* Breakpoint check */
4666 if (cfg
->compile_aot
) {
4667 const guint32 offset
= code
- cfg
->native_code
;
4671 code
= emit_ldr_imm (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
4672 /* Add the offset */
4673 val
= ((offset
/ 4) * sizeof (guint8
*)) + MONO_STRUCT_OFFSET (SeqPointInfo
, bp_addrs
);
4674 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4675 if (arm_is_imm12 ((int)val
)) {
4676 ARM_LDR_IMM (code
, dreg
, dreg
, val
);
4678 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF), 0);
4680 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF00) >> 8, 24);
4682 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
4683 g_assert (!(val
& 0xFF000000));
4685 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
4687 /* What is faster, a branch or a load ? */
4688 ARM_CMP_REG_IMM (code
, dreg
, 0, 0);
4689 /* The breakpoint instruction */
4690 if (cfg
->soft_breakpoints
)
4691 ARM_BLX_REG_COND (code
, ARMCOND_NE
, dreg
);
4693 ARM_LDR_IMM_COND (code
, dreg
, dreg
, 0, ARMCOND_NE
);
4694 } else if (cfg
->soft_breakpoints
) {
4695 /* Load the address of the breakpoint method into ip. */
4696 var
= bp_method_var
;
4698 g_assert (var
->opcode
== OP_REGOFFSET
);
4699 g_assert (arm_is_imm12 (var
->inst_offset
));
4700 ARM_LDR_IMM (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
4703 * A placeholder for a possible breakpoint inserted by
4704 * mono_arch_set_breakpoint ().
4709 * A placeholder for a possible breakpoint inserted by
4710 * mono_arch_set_breakpoint ().
4712 for (i
= 0; i
< 4; ++i
)
4717 * Add an additional nop so skipping the bp doesn't cause the ip to point
4718 * to another IL offset.
4726 ARM_ADDS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4729 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4733 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4736 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4737 g_assert (imm8
>= 0);
4738 ARM_ADDS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4742 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4743 g_assert (imm8
>= 0);
4744 ARM_ADD_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4748 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4749 g_assert (imm8
>= 0);
4750 ARM_ADCS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4753 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4754 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4756 case OP_IADD_OVF_UN
:
4757 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4758 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4761 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4762 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4764 case OP_ISUB_OVF_UN
:
4765 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4766 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4768 case OP_ADD_OVF_CARRY
:
4769 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4770 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4772 case OP_ADD_OVF_UN_CARRY
:
4773 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4774 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4776 case OP_SUB_OVF_CARRY
:
4777 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4778 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4780 case OP_SUB_OVF_UN_CARRY
:
4781 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4782 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4786 ARM_SUBS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4789 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4790 g_assert (imm8
>= 0);
4791 ARM_SUBS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4794 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4798 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4802 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4803 g_assert (imm8
>= 0);
4804 ARM_SUB_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4808 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4809 g_assert (imm8
>= 0);
4810 ARM_SBCS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4812 case OP_ARM_RSBS_IMM
:
4813 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4814 g_assert (imm8
>= 0);
4815 ARM_RSBS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4817 case OP_ARM_RSC_IMM
:
4818 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4819 g_assert (imm8
>= 0);
4820 ARM_RSC_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4823 ARM_AND_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4827 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4828 g_assert (imm8
>= 0);
4829 ARM_AND_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4832 g_assert (v7s_supported
|| v7k_supported
);
4833 ARM_SDIV (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4836 g_assert (v7s_supported
|| v7k_supported
);
4837 ARM_UDIV (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4840 g_assert (v7s_supported
|| v7k_supported
);
4841 ARM_SDIV (code
, ARMREG_LR
, ins
->sreg1
, ins
->sreg2
);
4842 ARM_MLS (code
, ins
->dreg
, ARMREG_LR
, ins
->sreg2
, ins
->sreg1
);
4845 g_assert (v7s_supported
|| v7k_supported
);
4846 ARM_UDIV (code
, ARMREG_LR
, ins
->sreg1
, ins
->sreg2
);
4847 ARM_MLS (code
, ins
->dreg
, ARMREG_LR
, ins
->sreg2
, ins
->sreg1
);
4851 g_assert_not_reached ();
4853 ARM_ORR_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4857 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4858 g_assert (imm8
>= 0);
4859 ARM_ORR_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4862 ARM_EOR_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4866 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4867 g_assert (imm8
>= 0);
4868 ARM_EOR_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4871 ARM_SHL_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4876 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4877 else if (ins
->dreg
!= ins
->sreg1
)
4878 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4881 ARM_SAR_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4886 ARM_SAR_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4887 else if (ins
->dreg
!= ins
->sreg1
)
4888 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4891 case OP_ISHR_UN_IMM
:
4893 ARM_SHR_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4894 else if (ins
->dreg
!= ins
->sreg1
)
4895 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4898 ARM_SHR_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4901 ARM_MVN_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4904 ARM_RSB_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 0);
4907 if (ins
->dreg
== ins
->sreg2
)
4908 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4910 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
4913 g_assert_not_reached ();
4916 /* FIXME: handle ovf/ sreg2 != dreg */
4917 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4918 /* FIXME: MUL doesn't set the C/O flags on ARM */
4920 case OP_IMUL_OVF_UN
:
4921 /* FIXME: handle ovf/ sreg2 != dreg */
4922 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4923 /* FIXME: MUL doesn't set the C/O flags on ARM */
4926 code
= mono_arm_emit_load_imm (code
, ins
->dreg
, ins
->inst_c0
);
4929 /* Load the GOT offset */
4930 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)(gsize
)ins
->inst_i1
, ins
->inst_p0
);
4931 ARM_LDR_IMM (code
, ins
->dreg
, ARMREG_PC
, 0);
4933 *(gpointer
*)code
= NULL
;
4935 /* Load the value from the GOT */
4936 ARM_LDR_REG_REG (code
, ins
->dreg
, ARMREG_PC
, ins
->dreg
);
4938 case OP_OBJC_GET_SELECTOR
:
4939 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_OBJC_SELECTOR_REF
, ins
->inst_p0
);
4940 ARM_LDR_IMM (code
, ins
->dreg
, ARMREG_PC
, 0);
4942 *(gpointer
*)code
= NULL
;
4944 ARM_LDR_REG_REG (code
, ins
->dreg
, ARMREG_PC
, ins
->dreg
);
4946 case OP_ICONV_TO_I4
:
4947 case OP_ICONV_TO_U4
:
4949 if (ins
->dreg
!= ins
->sreg1
)
4950 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4953 int saved
= ins
->sreg2
;
4954 if (ins
->sreg2
== ARM_LSW_REG
) {
4955 ARM_MOV_REG_REG (code
, ARMREG_LR
, ins
->sreg2
);
4958 if (ins
->sreg1
!= ARM_LSW_REG
)
4959 ARM_MOV_REG_REG (code
, ARM_LSW_REG
, ins
->sreg1
);
4960 if (saved
!= ARM_MSW_REG
)
4961 ARM_MOV_REG_REG (code
, ARM_MSW_REG
, saved
);
4965 if (IS_VFP
&& ins
->dreg
!= ins
->sreg1
)
4966 ARM_CPYD (code
, ins
->dreg
, ins
->sreg1
);
4969 if (IS_VFP
&& ins
->dreg
!= ins
->sreg1
)
4970 ARM_CPYS (code
, ins
->dreg
, ins
->sreg1
);
4972 case OP_MOVE_F_TO_I4
:
4974 ARM_FMRS (code
, ins
->dreg
, ins
->sreg1
);
4976 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
4977 ARM_CVTD (code
, vfp_scratch1
, ins
->sreg1
);
4978 ARM_FMRS (code
, ins
->dreg
, vfp_scratch1
);
4979 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
4982 case OP_MOVE_I4_TO_F
:
4984 ARM_FMSR (code
, ins
->dreg
, ins
->sreg1
);
4986 ARM_FMSR (code
, ins
->dreg
, ins
->sreg1
);
4987 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
4990 case OP_FCONV_TO_R4
:
4993 ARM_CVTD (code
, ins
->dreg
, ins
->sreg1
);
4995 ARM_CVTD (code
, ins
->dreg
, ins
->sreg1
);
4996 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
5001 case OP_TAILCALL_PARAMETER
:
5002 // This opcode helps compute sizes, i.e.
5003 // of the subsequent OP_TAILCALL, but contributes no code.
5004 g_assert (ins
->next
);
5008 case OP_TAILCALL_MEMBASE
:
5009 case OP_TAILCALL_REG
: {
5010 gboolean
const tailcall_membase
= ins
->opcode
== OP_TAILCALL_MEMBASE
;
5011 gboolean
const tailcall_reg
= ins
->opcode
== OP_TAILCALL_REG
;
5012 MonoCallInst
*call
= (MonoCallInst
*)ins
;
5014 max_len
+= call
->stack_usage
/ sizeof (target_mgreg_t
) * ins_get_size (OP_TAILCALL_PARAMETER
);
5017 code
= emit_float_args (cfg
, call
, code
, &max_len
, &offset
);
5019 code
= realloc_code (cfg
, max_len
);
5021 // For reg and membase, get destination in IP.
5024 g_assert (ins
->sreg1
> -1);
5025 if (ins
->sreg1
!= ARMREG_IP
)
5026 ARM_MOV_REG_REG (code
, ARMREG_IP
, ins
->sreg1
);
5027 } else if (tailcall_membase
) {
5028 g_assert (ins
->sreg1
> -1);
5029 if (!arm_is_imm12 (ins
->inst_offset
)) {
5030 g_assert (ins
->sreg1
!= ARMREG_IP
); // temp in emit_big_add
5031 code
= emit_big_add (code
, ARMREG_IP
, ins
->sreg1
, ins
->inst_offset
);
5032 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_IP
, 0);
5034 ARM_LDR_IMM (code
, ARMREG_IP
, ins
->sreg1
, ins
->inst_offset
);
5039 * The stack looks like the following:
5040 * <caller argument area>
5043 * <callee argument area>
5044 * <optionally saved IP> (about to be)
5045 * Need to copy the arguments from the callee argument area to
5046 * the caller argument area, and pop the frame.
5048 if (call
->stack_usage
) {
5049 int i
, prev_sp_offset
= 0;
5051 // When we get here, the parameters to the tailcall are already formed,
5052 // in registers and at the bottom of the grow-down stack.
5054 // Our goal is generally preserve parameters, and trim the stack,
5055 // and, before trimming stack, move parameters from the bottom of the
5056 // frame to the bottom of the trimmed frame.
5058 // For the case of large frames, and presently therefore always,
5059 // IP is used as an adjusted frame_reg.
5060 // Be conservative and save IP around the movement
5061 // of parameters from the bottom of frame to top of the frame.
5062 const gboolean save_ip
= tailcall_membase
|| tailcall_reg
;
5064 ARM_PUSH (code
, 1 << ARMREG_IP
);
5066 // When moving stacked parameters from the bottom
5067 // of the frame (sp) to the top of the frame (ip),
5068 // account, 0 or 4, for the conditional save of IP.
5069 const int offset_sp
= save_ip
? 4 : 0;
5070 const int offset_ip
= (save_ip
&& (cfg
->frame_reg
== ARMREG_SP
)) ? 4 : 0;
5072 /* Compute size of saved registers restored below */
5074 prev_sp_offset
= 2 * 4;
5076 prev_sp_offset
= 1 * 4;
5077 for (i
= 0; i
< 16; ++i
) {
5078 if (cfg
->used_int_regs
& (1 << i
))
5079 prev_sp_offset
+= 4;
5082 // Point IP at the start of where the parameters will go after trimming stack.
5083 // After locals and saved registers.
5084 code
= emit_big_add (code
, ARMREG_IP
, cfg
->frame_reg
, cfg
->stack_usage
+ prev_sp_offset
);
5086 /* Copy arguments on the stack to our argument area */
5087 // FIXME a fixed size memcpy is desirable here,
5088 // at least for larger values of stack_usage.
5090 // FIXME For most functions, with frames < 4K, we can use frame_reg directly here instead of IP.
5091 // See https://github.com/mono/mono/pull/12079
5092 // See https://github.com/mono/mono/pull/12079/commits/93e7007a9567b78fa8152ce404b372b26e735516
5093 for (i
= 0; i
< call
->stack_usage
; i
+= sizeof (target_mgreg_t
)) {
5094 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, i
+ offset_sp
);
5095 ARM_STR_IMM (code
, ARMREG_LR
, ARMREG_IP
, i
+ offset_ip
);
5099 ARM_POP (code
, 1 << ARMREG_IP
);
5103 * Keep in sync with mono_arch_emit_epilog
5105 g_assert (!cfg
->method
->save_lmf
);
5106 code
= emit_big_add_temp (code
, ARMREG_SP
, cfg
->frame_reg
, cfg
->stack_usage
, ARMREG_LR
);
5108 if (cfg
->used_int_regs
)
5109 ARM_POP (code
, cfg
->used_int_regs
);
5110 ARM_POP (code
, (1 << ARMREG_R7
) | (1 << ARMREG_LR
));
5112 ARM_POP (code
, cfg
->used_int_regs
| (1 << ARMREG_LR
));
5115 if (tailcall_reg
|| tailcall_membase
) {
5116 code
= emit_jmp_reg (code
, ARMREG_IP
);
5118 mono_add_patch_info (cfg
, (guint8
*) code
- cfg
->native_code
, MONO_PATCH_INFO_METHOD_JUMP
, call
->method
);
5120 if (cfg
->compile_aot
) {
5121 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
5123 *(gpointer
*)code
= NULL
;
5125 ARM_LDR_REG_REG (code
, ARMREG_PC
, ARMREG_PC
, ARMREG_IP
);
5127 code
= mono_arm_patchable_b (code
, ARMCOND_AL
);
5128 cfg
->thunk_area
+= THUNK_SIZE
;
5134 /* ensure ins->sreg1 is not NULL */
5135 ARM_LDRB_IMM (code
, ARMREG_LR
, ins
->sreg1
, 0);
5138 g_assert (cfg
->sig_cookie
< 128);
5139 ARM_LDR_IMM (code
, ARMREG_IP
, cfg
->frame_reg
, cfg
->sig_cookie
);
5140 ARM_STR_IMM (code
, ARMREG_IP
, ins
->sreg1
, 0);
5150 call
= (MonoCallInst
*)ins
;
5153 code
= emit_float_args (cfg
, call
, code
, &max_len
, &offset
);
5155 mono_call_add_patch_info (cfg
, call
, code
- cfg
->native_code
);
5157 code
= emit_call_seq (cfg
, code
);
5158 ins
->flags
|= MONO_INST_GC_CALLSITE
;
5159 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5160 code
= emit_move_return_value (cfg
, ins
, code
);
5167 case OP_VOIDCALL_REG
:
5170 code
= emit_float_args (cfg
, (MonoCallInst
*)ins
, code
, &max_len
, &offset
);
5172 code
= emit_call_reg (code
, ins
->sreg1
);
5173 ins
->flags
|= MONO_INST_GC_CALLSITE
;
5174 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5175 code
= emit_move_return_value (cfg
, ins
, code
);
5177 case OP_FCALL_MEMBASE
:
5178 case OP_RCALL_MEMBASE
:
5179 case OP_LCALL_MEMBASE
:
5180 case OP_VCALL_MEMBASE
:
5181 case OP_VCALL2_MEMBASE
:
5182 case OP_VOIDCALL_MEMBASE
:
5183 case OP_CALL_MEMBASE
: {
5184 g_assert (ins
->sreg1
!= ARMREG_LR
);
5185 call
= (MonoCallInst
*)ins
;
5188 code
= emit_float_args (cfg
, call
, code
, &max_len
, &offset
);
5189 if (!arm_is_imm12 (ins
->inst_offset
)) {
5190 /* sreg1 might be IP */
5191 ARM_MOV_REG_REG (code
, ARMREG_LR
, ins
->sreg1
);
5192 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, ins
->inst_offset
);
5193 ARM_ADD_REG_REG (code
, ARMREG_IP
, ARMREG_IP
, ARMREG_LR
);
5194 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
5195 ARM_LDR_IMM (code
, ARMREG_PC
, ARMREG_IP
, 0);
5197 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
5198 ARM_LDR_IMM (code
, ARMREG_PC
, ins
->sreg1
, ins
->inst_offset
);
5200 ins
->flags
|= MONO_INST_GC_CALLSITE
;
5201 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5202 code
= emit_move_return_value (cfg
, ins
, code
);
5205 case OP_GENERIC_CLASS_INIT
: {
5209 byte_offset
= MONO_STRUCT_OFFSET (MonoVTable
, initialized
);
5211 g_assert (arm_is_imm8 (byte_offset
));
5212 ARM_LDRSB_IMM (code
, ARMREG_IP
, ins
->sreg1
, byte_offset
);
5213 ARM_CMP_REG_IMM (code
, ARMREG_IP
, 0, 0);
5215 ARM_B_COND (code
, ARMCOND_NE
, 0);
5217 /* Uninitialized case */
5218 g_assert (ins
->sreg1
== ARMREG_R0
);
5220 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
5221 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_generic_class_init
));
5222 code
= emit_call_seq (cfg
, code
);
5224 /* Initialized case */
5225 arm_patch (jump
, code
);
5229 /* round the size to 8 bytes */
5230 ARM_ADD_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, (MONO_ARCH_FRAME_ALIGNMENT
- 1));
5231 ARM_BIC_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, (MONO_ARCH_FRAME_ALIGNMENT
- 1));
5232 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ins
->dreg
);
5233 /* memzero the area: dreg holds the size, sp is the pointer */
5234 if (ins
->flags
& MONO_INST_INIT
) {
5235 guint8
*start_loop
, *branch_to_cond
;
5236 ARM_MOV_REG_IMM8 (code
, ARMREG_LR
, 0);
5237 branch_to_cond
= code
;
5240 ARM_STR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ins
->dreg
);
5241 arm_patch (branch_to_cond
, code
);
5242 /* decrement by 4 and set flags */
5243 ARM_SUBS_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, sizeof (target_mgreg_t
));
5244 ARM_B_COND (code
, ARMCOND_GE
, 0);
5245 arm_patch (code
- 4, start_loop
);
5247 ARM_MOV_REG_REG (code
, ins
->dreg
, ARMREG_SP
);
5248 if (cfg
->param_area
)
5249 code
= emit_sub_imm (code
, ARMREG_SP
, ARMREG_SP
, ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
));
5254 MonoInst
*var
= cfg
->dyn_call_var
;
5255 guint8
*labels
[16];
5257 g_assert (var
->opcode
== OP_REGOFFSET
);
5258 g_assert (arm_is_imm12 (var
->inst_offset
));
5260 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
5261 ARM_MOV_REG_REG (code
, ARMREG_LR
, ins
->sreg1
);
5263 ARM_MOV_REG_REG (code
, ARMREG_IP
, ins
->sreg2
);
5265 /* Save args buffer */
5266 ARM_STR_IMM (code
, ARMREG_LR
, var
->inst_basereg
, var
->inst_offset
);
5268 /* Set fp argument registers */
5269 if (IS_HARD_FLOAT
) {
5270 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, has_fpregs
));
5271 ARM_CMP_REG_IMM (code
, ARMREG_R0
, 0, 0);
5273 ARM_B_COND (code
, ARMCOND_EQ
, 0);
5274 for (i
= 0; i
< FP_PARAM_REGS
; ++i
) {
5275 const int offset
= MONO_STRUCT_OFFSET (DynCallArgs
, fpregs
) + (i
* sizeof (double));
5276 g_assert (arm_is_fpimm8 (offset
));
5277 ARM_FLDD (code
, i
* 2, ARMREG_LR
, offset
);
5279 arm_patch (labels
[0], code
);
5282 /* Allocate callee area */
5283 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, n_stackargs
));
5284 ARM_SHL_IMM (code
, ARMREG_R1
, ARMREG_R1
, 2);
5285 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_R1
);
5287 /* Set stack args */
5289 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, n_stackargs
));
5290 /* R2 = pointer into regs */
5291 code
= emit_big_add (code
, ARMREG_R2
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, regs
) + (PARAM_REGS
* sizeof (target_mgreg_t
)));
5292 /* R3 = pointer to stack */
5293 ARM_MOV_REG_REG (code
, ARMREG_R3
, ARMREG_SP
);
5296 ARM_B_COND (code
, ARMCOND_AL
, 0);
5298 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_R2
, 0);
5299 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_R3
, 0);
5300 ARM_ADD_REG_IMM (code
, ARMREG_R2
, ARMREG_R2
, sizeof (target_mgreg_t
), 0);
5301 ARM_ADD_REG_IMM (code
, ARMREG_R3
, ARMREG_R3
, sizeof (target_mgreg_t
), 0);
5302 ARM_SUB_REG_IMM (code
, ARMREG_R1
, ARMREG_R1
, 1, 0);
5303 arm_patch (labels
[0], code
);
5304 ARM_CMP_REG_IMM (code
, ARMREG_R1
, 0, 0);
5306 ARM_B_COND (code
, ARMCOND_GT
, 0);
5307 arm_patch (labels
[2], labels
[1]);
5309 /* Set argument registers */
5310 for (i
= 0; i
< PARAM_REGS
; ++i
)
5311 ARM_LDR_IMM (code
, i
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, regs
) + (i
* sizeof (target_mgreg_t
)));
5314 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
5315 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
5318 ARM_LDR_IMM (code
, ARMREG_IP
, var
->inst_basereg
, var
->inst_offset
);
5319 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_IP
, MONO_STRUCT_OFFSET (DynCallArgs
, res
));
5320 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_IP
, MONO_STRUCT_OFFSET (DynCallArgs
, res2
));
5322 ARM_FSTD (code
, ARM_VFP_D0
, ARMREG_IP
, MONO_STRUCT_OFFSET (DynCallArgs
, fpregs
));
5326 if (ins
->sreg1
!= ARMREG_R0
)
5327 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
5328 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
5329 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception
));
5330 code
= emit_call_seq (cfg
, code
);
5334 if (ins
->sreg1
!= ARMREG_R0
)
5335 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
5336 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
5337 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception
));
5338 code
= emit_call_seq (cfg
, code
);
5341 case OP_START_HANDLER
: {
5342 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
5343 int param_area
= ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
);
5346 /* Reserve a param area, see filter-stack.exe */
5348 if ((i
= mono_arm_is_rotated_imm8 (param_area
, &rot_amount
)) >= 0) {
5349 ARM_SUB_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
5351 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, param_area
);
5352 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
5356 if (arm_is_imm12 (spvar
->inst_offset
)) {
5357 ARM_STR_IMM (code
, ARMREG_LR
, spvar
->inst_basereg
, spvar
->inst_offset
);
5359 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
5360 ARM_STR_REG_REG (code
, ARMREG_LR
, spvar
->inst_basereg
, ARMREG_IP
);
5364 case OP_ENDFILTER
: {
5365 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
5366 int param_area
= ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
);
5369 /* Free the param area */
5371 if ((i
= mono_arm_is_rotated_imm8 (param_area
, &rot_amount
)) >= 0) {
5372 ARM_ADD_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
5374 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, param_area
);
5375 ARM_ADD_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
5379 if (ins
->sreg1
!= ARMREG_R0
)
5380 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
5381 if (arm_is_imm12 (spvar
->inst_offset
)) {
5382 ARM_LDR_IMM (code
, ARMREG_IP
, spvar
->inst_basereg
, spvar
->inst_offset
);
5384 g_assert (ARMREG_IP
!= spvar
->inst_basereg
);
5385 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
5386 ARM_LDR_REG_REG (code
, ARMREG_IP
, spvar
->inst_basereg
, ARMREG_IP
);
5388 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
5391 case OP_ENDFINALLY
: {
5392 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
5393 int param_area
= ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
);
5396 /* Free the param area */
5398 if ((i
= mono_arm_is_rotated_imm8 (param_area
, &rot_amount
)) >= 0) {
5399 ARM_ADD_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
5401 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, param_area
);
5402 ARM_ADD_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
5406 if (arm_is_imm12 (spvar
->inst_offset
)) {
5407 ARM_LDR_IMM (code
, ARMREG_IP
, spvar
->inst_basereg
, spvar
->inst_offset
);
5409 g_assert (ARMREG_IP
!= spvar
->inst_basereg
);
5410 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
5411 ARM_LDR_REG_REG (code
, ARMREG_IP
, spvar
->inst_basereg
, ARMREG_IP
);
5413 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
5416 case OP_CALL_HANDLER
:
5417 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
5418 code
= mono_arm_patchable_bl (code
, ARMCOND_AL
);
5419 cfg
->thunk_area
+= THUNK_SIZE
;
5420 for (GList
*tmp
= ins
->inst_eh_blocks
; tmp
!= bb
->clause_holes
; tmp
= tmp
->prev
)
5421 mono_cfg_add_try_hole (cfg
, ((MonoLeaveClause
*) tmp
->data
)->clause
, code
, bb
);
5424 if (ins
->dreg
!= ARMREG_R0
)
5425 ARM_MOV_REG_REG (code
, ins
->dreg
, ARMREG_R0
);
5429 ins
->inst_c0
= code
- cfg
->native_code
;
5432 /*if (ins->inst_target_bb->native_offset) {
5434 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5436 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
5437 code
= mono_arm_patchable_b (code
, ARMCOND_AL
);
5441 ARM_MOV_REG_REG (code
, ARMREG_PC
, ins
->sreg1
);
5445 * In the normal case we have:
5446 * ldr pc, [pc, ins->sreg1 << 2]
5449 * ldr lr, [pc, ins->sreg1 << 2]
5451 * After follows the data.
5452 * FIXME: add aot support.
5454 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_SWITCH
, ins
->inst_p0
);
5455 max_len
+= 4 * GPOINTER_TO_INT (ins
->klass
);
5456 code
= realloc_code (cfg
, max_len
);
5457 ARM_LDR_REG_REG_SHIFT (code
, ARMREG_PC
, ARMREG_PC
, ins
->sreg1
, ARMSHIFT_LSL
, 2);
5459 code
+= 4 * GPOINTER_TO_INT (ins
->klass
);
5463 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
5464 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
5468 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5469 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_LT
);
5473 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5474 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_LO
);
5478 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5479 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_GT
);
5483 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5484 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_HI
);
5487 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_NE
);
5488 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_EQ
);
5491 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5492 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_LT
);
5495 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5496 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_GT
);
5499 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5500 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_LO
);
5503 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5504 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_HI
);
5506 case OP_COND_EXC_EQ
:
5507 case OP_COND_EXC_NE_UN
:
5508 case OP_COND_EXC_LT
:
5509 case OP_COND_EXC_LT_UN
:
5510 case OP_COND_EXC_GT
:
5511 case OP_COND_EXC_GT_UN
:
5512 case OP_COND_EXC_GE
:
5513 case OP_COND_EXC_GE_UN
:
5514 case OP_COND_EXC_LE
:
5515 case OP_COND_EXC_LE_UN
:
5516 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_EQ
, ins
->inst_p1
);
5518 case OP_COND_EXC_IEQ
:
5519 case OP_COND_EXC_INE_UN
:
5520 case OP_COND_EXC_ILT
:
5521 case OP_COND_EXC_ILT_UN
:
5522 case OP_COND_EXC_IGT
:
5523 case OP_COND_EXC_IGT_UN
:
5524 case OP_COND_EXC_IGE
:
5525 case OP_COND_EXC_IGE_UN
:
5526 case OP_COND_EXC_ILE
:
5527 case OP_COND_EXC_ILE_UN
:
5528 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_IEQ
, ins
->inst_p1
);
5531 case OP_COND_EXC_IC
:
5532 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS
, ins
->inst_p1
);
5534 case OP_COND_EXC_OV
:
5535 case OP_COND_EXC_IOV
:
5536 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS
, ins
->inst_p1
);
5538 case OP_COND_EXC_NC
:
5539 case OP_COND_EXC_INC
:
5540 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC
, ins
->inst_p1
);
5542 case OP_COND_EXC_NO
:
5543 case OP_COND_EXC_INO
:
5544 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC
, ins
->inst_p1
);
5556 EMIT_COND_BRANCH (ins
, ins
->opcode
- OP_IBEQ
);
5559 /* floating point opcodes */
5561 if (cfg
->compile_aot
) {
5562 ARM_FLDD (code
, ins
->dreg
, ARMREG_PC
, 0);
5564 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
5566 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[1];
5569 /* FIXME: we can optimize the imm load by dealing with part of
5570 * the displacement in LDFD (aligning to 512).
5572 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)(gsize
)ins
->inst_p0
);
5573 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
5577 if (cfg
->compile_aot
) {
5578 ARM_FLDS (code
, ins
->dreg
, ARMREG_PC
, 0);
5580 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
5583 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
5585 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)(gsize
)ins
->inst_p0
);
5586 ARM_FLDS (code
, ins
->dreg
, ARMREG_LR
, 0);
5588 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
5591 case OP_STORER8_MEMBASE_REG
:
5592 /* This is generated by the local regalloc pass which runs after the lowering pass */
5593 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
5594 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
5595 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_destbasereg
);
5596 ARM_FSTD (code
, ins
->sreg1
, ARMREG_LR
, 0);
5598 ARM_FSTD (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
5601 case OP_LOADR8_MEMBASE
:
5602 /* This is generated by the local regalloc pass which runs after the lowering pass */
5603 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
5604 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
5605 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_basereg
);
5606 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
5608 ARM_FLDD (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
5611 case OP_STORER4_MEMBASE_REG
:
5612 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
5614 ARM_FSTS (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
5616 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5617 ARM_CVTD (code
, vfp_scratch1
, ins
->sreg1
);
5618 ARM_FSTS (code
, vfp_scratch1
, ins
->inst_destbasereg
, ins
->inst_offset
);
5619 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5622 case OP_LOADR4_MEMBASE
:
5624 ARM_FLDS (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
5626 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
5627 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5628 ARM_FLDS (code
, vfp_scratch1
, ins
->inst_basereg
, ins
->inst_offset
);
5629 ARM_CVTS (code
, ins
->dreg
, vfp_scratch1
);
5630 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5633 case OP_ICONV_TO_R_UN
: {
5634 g_assert_not_reached ();
5637 case OP_ICONV_TO_R4
:
5639 ARM_FMSR (code
, ins
->dreg
, ins
->sreg1
);
5640 ARM_FSITOS (code
, ins
->dreg
, ins
->dreg
);
5642 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5643 ARM_FMSR (code
, vfp_scratch1
, ins
->sreg1
);
5644 ARM_FSITOS (code
, vfp_scratch1
, vfp_scratch1
);
5645 ARM_CVTS (code
, ins
->dreg
, vfp_scratch1
);
5646 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5649 case OP_ICONV_TO_R8
:
5650 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5651 ARM_FMSR (code
, vfp_scratch1
, ins
->sreg1
);
5652 ARM_FSITOD (code
, ins
->dreg
, vfp_scratch1
);
5653 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5657 MonoType
*sig_ret
= mini_get_underlying_type (mono_method_signature_internal (cfg
->method
)->ret
);
5658 if (sig_ret
->type
== MONO_TYPE_R4
) {
5660 if (IS_HARD_FLOAT
) {
5661 if (ins
->sreg1
!= ARM_VFP_D0
)
5662 ARM_CPYS (code
, ARM_VFP_D0
, ins
->sreg1
);
5664 ARM_FMRS (code
, ARMREG_R0
, ins
->sreg1
);
5667 ARM_CVTD (code
, ARM_VFP_F0
, ins
->sreg1
);
5670 ARM_FMRS (code
, ARMREG_R0
, ARM_VFP_F0
);
5674 ARM_CPYD (code
, ARM_VFP_D0
, ins
->sreg1
);
5676 ARM_FMRRD (code
, ARMREG_R0
, ARMREG_R1
, ins
->sreg1
);
5680 case OP_FCONV_TO_I1
:
5681 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, TRUE
);
5683 case OP_FCONV_TO_U1
:
5684 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, FALSE
);
5686 case OP_FCONV_TO_I2
:
5687 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, TRUE
);
5689 case OP_FCONV_TO_U2
:
5690 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, FALSE
);
5692 case OP_FCONV_TO_I4
:
5694 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, TRUE
);
5696 case OP_FCONV_TO_U4
:
5698 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, FALSE
);
5700 case OP_FCONV_TO_I8
:
5701 case OP_FCONV_TO_U8
:
5702 g_assert_not_reached ();
5703 /* Implemented as helper calls */
5705 case OP_LCONV_TO_R_UN
:
5706 g_assert_not_reached ();
5707 /* Implemented as helper calls */
5709 case OP_LCONV_TO_OVF_I4_2
: {
5710 guint8
*high_bit_not_set
, *valid_negative
, *invalid_negative
, *valid_positive
;
5712 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5715 ARM_CMP_REG_IMM8 (code
, ins
->sreg1
, 0);
5716 high_bit_not_set
= code
;
5717 ARM_B_COND (code
, ARMCOND_GE
, 0); /*branch if bit 31 of the lower part is not set*/
5719 ARM_CMN_REG_IMM8 (code
, ins
->sreg2
, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5720 valid_negative
= code
;
5721 ARM_B_COND (code
, ARMCOND_EQ
, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5722 invalid_negative
= code
;
5723 ARM_B_COND (code
, ARMCOND_AL
, 0);
5725 arm_patch (high_bit_not_set
, code
);
5727 ARM_CMP_REG_IMM8 (code
, ins
->sreg2
, 0);
5728 valid_positive
= code
;
5729 ARM_B_COND (code
, ARMCOND_EQ
, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5731 arm_patch (invalid_negative
, code
);
5732 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL
, "OverflowException");
5734 arm_patch (valid_negative
, code
);
5735 arm_patch (valid_positive
, code
);
5737 if (ins
->dreg
!= ins
->sreg1
)
5738 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
5742 ARM_VFP_ADDD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5745 ARM_VFP_SUBD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5748 ARM_VFP_MULD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5751 ARM_VFP_DIVD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5754 ARM_NEGD (code
, ins
->dreg
, ins
->sreg1
);
5758 g_assert_not_reached ();
5762 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5768 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5773 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5776 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
5777 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
5781 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5784 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5785 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5789 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5792 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5793 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5794 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5798 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
5801 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5802 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5806 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
5809 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5810 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5811 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5815 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5818 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_NE
);
5819 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_EQ
);
5823 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5826 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5827 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
5831 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
5834 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5835 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
5838 /* ARM FPA flags table:
5839 * N Less than ARMCOND_MI
5840 * Z Equal ARMCOND_EQ
5841 * C Greater Than or Equal ARMCOND_CS
5842 * V Unordered ARMCOND_VS
5845 EMIT_COND_BRANCH (ins
, OP_IBEQ
- OP_IBEQ
);
5848 EMIT_COND_BRANCH (ins
, OP_IBNE_UN
- OP_IBEQ
);
5851 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_MI
); /* N set */
5854 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_VS
); /* V set */
5855 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_MI
); /* N set */
5861 g_assert_not_reached ();
5865 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_GE
);
5867 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5868 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_EQ
);
5869 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_CS
);
5873 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_VS
); /* V set */
5874 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_GE
);
5879 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5880 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch2
);
5882 ARM_ABSD (code
, vfp_scratch2
, ins
->sreg1
);
5883 ARM_FLDD (code
, vfp_scratch1
, ARMREG_PC
, 0);
5885 *(guint32
*)code
= 0xffffffff;
5887 *(guint32
*)code
= 0x7fefffff;
5889 ARM_CMPD (code
, vfp_scratch2
, vfp_scratch1
);
5891 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT
, "OverflowException");
5892 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg1
);
5894 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS
, "OverflowException");
5895 ARM_CPYD (code
, ins
->dreg
, ins
->sreg1
);
5897 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5898 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch2
);
5903 case OP_RCONV_TO_I1
:
5904 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, TRUE
);
5906 case OP_RCONV_TO_U1
:
5907 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, FALSE
);
5909 case OP_RCONV_TO_I2
:
5910 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, TRUE
);
5912 case OP_RCONV_TO_U2
:
5913 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, FALSE
);
5915 case OP_RCONV_TO_I4
:
5916 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, TRUE
);
5918 case OP_RCONV_TO_U4
:
5919 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, FALSE
);
5921 case OP_RCONV_TO_R4
:
5923 if (ins
->dreg
!= ins
->sreg1
)
5924 ARM_CPYS (code
, ins
->dreg
, ins
->sreg1
);
5926 case OP_RCONV_TO_R8
:
5928 ARM_CVTS (code
, ins
->dreg
, ins
->sreg1
);
5931 ARM_VFP_ADDS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5934 ARM_VFP_SUBS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5937 ARM_VFP_MULS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5940 ARM_VFP_DIVS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5943 ARM_NEGS (code
, ins
->dreg
, ins
->sreg1
);
5947 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5950 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
5951 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
5955 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5958 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5959 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5963 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5966 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5967 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5968 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5972 ARM_CMPS (code
, ins
->sreg2
, ins
->sreg1
);
5975 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5976 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5980 ARM_CMPS (code
, ins
->sreg2
, ins
->sreg1
);
5983 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5984 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5985 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5989 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5992 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_NE
);
5993 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_EQ
);
5997 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
6000 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
6001 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
6005 ARM_CMPS (code
, ins
->sreg2
, ins
->sreg1
);
6008 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
6009 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
6012 case OP_GC_LIVENESS_DEF
:
6013 case OP_GC_LIVENESS_USE
:
6014 case OP_GC_PARAM_SLOT_LIVENESS_DEF
:
6015 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
6017 case OP_GC_SPILL_SLOT_LIVENESS_DEF
:
6018 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
6019 bb
->spill_slot_defs
= g_slist_prepend_mempool (cfg
->mempool
, bb
->spill_slot_defs
, ins
);
6021 case OP_LIVERANGE_START
: {
6022 if (cfg
->verbose_level
> 1)
6023 printf ("R%d START=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
6024 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_start
= code
- cfg
->native_code
;
6027 case OP_LIVERANGE_END
: {
6028 if (cfg
->verbose_level
> 1)
6029 printf ("R%d END=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
6030 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_end
= code
- cfg
->native_code
;
6033 case OP_GC_SAFE_POINT
: {
6036 ARM_LDR_IMM (code
, ARMREG_IP
, ins
->sreg1
, 0);
6037 ARM_CMP_REG_IMM (code
, ARMREG_IP
, 0, 0);
6039 ARM_B_COND (code
, ARMCOND_EQ
, 0);
6040 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll
));
6041 code
= emit_call_seq (cfg
, code
);
6042 arm_patch (buf
[0], code
);
6045 case OP_FILL_PROF_CALL_CTX
:
6046 for (int i
= 0; i
< ARMREG_MAX
; i
++)
6047 if ((MONO_ARCH_CALLEE_SAVED_REGS
& (1 << i
)) || i
== ARMREG_SP
|| i
== ARMREG_FP
)
6048 ARM_STR_IMM (code
, i
, ins
->sreg1
, MONO_STRUCT_OFFSET (MonoContext
, regs
) + i
* sizeof (target_mgreg_t
));
6051 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
6052 g_assert_not_reached ();
6055 if ((cfg
->opt
& MONO_OPT_BRANCH
) && ((code
- cfg
->native_code
- offset
) > max_len
)) {
6056 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
6057 mono_inst_name (ins
->opcode
), max_len
, code
- cfg
->native_code
- offset
);
6058 g_assert_not_reached ();
6066 set_code_cursor (cfg
, code
);
6069 #endif /* DISABLE_JIT */
6072 mono_arch_register_lowlevel_calls (void)
6074 /* The signature doesn't matter */
6075 mono_register_jit_icall (mono_arm_throw_exception
, mono_icall_sig_void
, TRUE
);
6076 mono_register_jit_icall (mono_arm_throw_exception_by_token
, mono_icall_sig_void
, TRUE
);
6077 mono_register_jit_icall (mono_arm_unaligned_stack
, mono_icall_sig_void
, TRUE
);
6080 #define patch_lis_ori(ip,val) do {\
6081 guint16 *__lis_ori = (guint16*)(ip); \
6082 __lis_ori [1] = (((guint32)(gsize)(val)) >> 16) & 0xffff; \
6083 __lis_ori [3] = ((guint32)(gsize)(val)) & 0xffff; \
6087 mono_arch_patch_code_new (MonoCompile
*cfg
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gpointer target
)
6089 unsigned char *ip
= ji
->ip
.i
+ code
;
6091 if (ji
->type
== MONO_PATCH_INFO_SWITCH
) {
6095 case MONO_PATCH_INFO_SWITCH
: {
6096 gpointer
*jt
= (gpointer
*)(ip
+ 8);
6098 /* jt is the inlined jump table, 2 instructions after ip
6099 * In the normal case we store the absolute addresses,
6100 * otherwise the displacements.
6102 for (i
= 0; i
< ji
->data
.table
->table_size
; i
++)
6103 jt
[i
] = code
+ (int)(gsize
)ji
->data
.table
->table
[i
];
6106 case MONO_PATCH_INFO_IP
:
6107 g_assert_not_reached ();
6108 patch_lis_ori (ip
, ip
);
6110 case MONO_PATCH_INFO_METHODCONST
:
6111 case MONO_PATCH_INFO_CLASS
:
6112 case MONO_PATCH_INFO_IMAGE
:
6113 case MONO_PATCH_INFO_FIELD
:
6114 case MONO_PATCH_INFO_VTABLE
:
6115 case MONO_PATCH_INFO_IID
:
6116 case MONO_PATCH_INFO_SFLDA
:
6117 case MONO_PATCH_INFO_LDSTR
:
6118 case MONO_PATCH_INFO_TYPE_FROM_HANDLE
:
6119 case MONO_PATCH_INFO_LDTOKEN
:
6120 g_assert_not_reached ();
6121 /* from OP_AOTCONST : lis + ori */
6122 patch_lis_ori (ip
, target
);
6124 case MONO_PATCH_INFO_R4
:
6125 case MONO_PATCH_INFO_R8
:
6126 g_assert_not_reached ();
6127 *((gconstpointer
*)(ip
+ 2)) = target
;
6129 case MONO_PATCH_INFO_EXC_NAME
:
6130 g_assert_not_reached ();
6131 *((gconstpointer
*)(ip
+ 1)) = target
;
6133 case MONO_PATCH_INFO_NONE
:
6134 case MONO_PATCH_INFO_BB_OVF
:
6135 case MONO_PATCH_INFO_EXC_OVF
:
6136 /* everything is dealt with at epilog output time */
6139 arm_patch_general (cfg
, domain
, ip
, (const guchar
*)target
);
6145 mono_arm_unaligned_stack (MonoMethod
*method
)
6147 g_assert_not_reached ();
6153 * Stack frame layout:
6155 * ------------------- fp
6156 * MonoLMF structure or saved registers
6157 * -------------------
6159 * -------------------
6161 * -------------------
6162 * param area size is cfg->param_area
6163 * ------------------- sp
6166 mono_arch_emit_prolog (MonoCompile
*cfg
)
6168 MonoMethod
*method
= cfg
->method
;
6170 MonoMethodSignature
*sig
;
6172 int alloc_size
, orig_alloc_size
, pos
, max_offset
, i
, rot_amount
, part
;
6176 int prev_sp_offset
, reg_offset
;
6178 sig
= mono_method_signature_internal (method
);
6179 cfg
->code_size
= 256 + sig
->param_count
* 64;
6180 code
= cfg
->native_code
= g_malloc (cfg
->code_size
);
6182 mono_emit_unwind_op_def_cfa (cfg
, code
, ARMREG_SP
, 0);
6184 alloc_size
= cfg
->stack_offset
;
6190 * The iphone uses R7 as the frame pointer, and it points at the saved
6195 * We can't use r7 as a frame pointer since it points into the middle of
6196 * the frame, so we keep using our own frame pointer.
6197 * FIXME: Optimize this.
6199 ARM_PUSH (code
, (1 << ARMREG_R7
) | (1 << ARMREG_LR
));
6200 prev_sp_offset
+= 8; /* r7 and lr */
6201 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
6202 mono_emit_unwind_op_offset (cfg
, code
, ARMREG_R7
, (- prev_sp_offset
) + 0);
6203 ARM_MOV_REG_REG (code
, ARMREG_R7
, ARMREG_SP
);
6206 if (!method
->save_lmf
) {
6208 /* No need to push LR again */
6209 if (cfg
->used_int_regs
)
6210 ARM_PUSH (code
, cfg
->used_int_regs
);
6212 ARM_PUSH (code
, cfg
->used_int_regs
| (1 << ARMREG_LR
));
6213 prev_sp_offset
+= 4;
6215 for (i
= 0; i
< 16; ++i
) {
6216 if (cfg
->used_int_regs
& (1 << i
))
6217 prev_sp_offset
+= 4;
6219 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
6221 for (i
= 0; i
< 16; ++i
) {
6222 if ((cfg
->used_int_regs
& (1 << i
))) {
6223 mono_emit_unwind_op_offset (cfg
, code
, i
, (- prev_sp_offset
) + reg_offset
);
6224 mini_gc_set_slot_type_from_cfa (cfg
, (- prev_sp_offset
) + reg_offset
, SLOT_NOREF
);
6228 mono_emit_unwind_op_offset (cfg
, code
, ARMREG_LR
, -4);
6229 mini_gc_set_slot_type_from_cfa (cfg
, -4, SLOT_NOREF
);
6231 ARM_MOV_REG_REG (code
, ARMREG_IP
, ARMREG_SP
);
6232 ARM_PUSH (code
, 0x5ff0);
6233 prev_sp_offset
+= 4 * 10; /* all but r0-r3, sp and pc */
6234 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
6236 for (i
= 0; i
< 16; ++i
) {
6237 if ((i
> ARMREG_R3
) && (i
!= ARMREG_SP
) && (i
!= ARMREG_PC
)) {
6238 /* The original r7 is saved at the start */
6239 if (!(iphone_abi
&& i
== ARMREG_R7
))
6240 mono_emit_unwind_op_offset (cfg
, code
, i
, (- prev_sp_offset
) + reg_offset
);
6244 g_assert (reg_offset
== 4 * 10);
6245 pos
+= MONO_ABI_SIZEOF (MonoLMF
) - (4 * 10);
6249 orig_alloc_size
= alloc_size
;
6250 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
6251 if (alloc_size
& (MONO_ARCH_FRAME_ALIGNMENT
- 1)) {
6252 alloc_size
+= MONO_ARCH_FRAME_ALIGNMENT
- 1;
6253 alloc_size
&= ~(MONO_ARCH_FRAME_ALIGNMENT
- 1);
6256 /* the stack used in the pushed regs */
6257 alloc_size
+= ALIGN_TO (prev_sp_offset
, MONO_ARCH_FRAME_ALIGNMENT
) - prev_sp_offset
;
6258 cfg
->stack_usage
= alloc_size
;
6260 if ((i
= mono_arm_is_rotated_imm8 (alloc_size
, &rot_amount
)) >= 0) {
6261 ARM_SUB_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
6263 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, alloc_size
);
6264 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
6266 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
+ alloc_size
);
6268 if (cfg
->frame_reg
!= ARMREG_SP
) {
6269 ARM_MOV_REG_REG (code
, cfg
->frame_reg
, ARMREG_SP
);
6270 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, cfg
->frame_reg
);
6272 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
6273 prev_sp_offset
+= alloc_size
;
6275 for (i
= 0; i
< alloc_size
- orig_alloc_size
; i
+= 4)
6276 mini_gc_set_slot_type_from_cfa (cfg
, (- prev_sp_offset
) + orig_alloc_size
+ i
, SLOT_NOREF
);
6278 /* compute max_offset in order to use short forward jumps
6279 * we could skip do it on arm because the immediate displacement
6280 * for jumps is large enough, it may be useful later for constant pools
6283 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
6284 MonoInst
*ins
= bb
->code
;
6285 bb
->max_offset
= max_offset
;
6287 MONO_BB_FOR_EACH_INS (bb
, ins
)
6288 max_offset
+= ins_get_size (ins
->opcode
);
6291 /* stack alignment check */
6295 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_SP);
6296 code = mono_arm_emit_load_imm (code, ARMREG_IP, MONO_ARCH_FRAME_ALIGNMENT -1);
6297 ARM_AND_REG_REG (code, ARMREG_LR, ARMREG_LR, ARMREG_IP);
6298 ARM_CMP_REG_IMM (code, ARMREG_LR, 0, 0);
6300 ARM_B_COND (code, ARMCOND_EQ, 0);
6301 if (cfg->compile_aot)
6302 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
6304 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
6305 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arm_unaligned_stack));
6306 code = emit_call_seq (cfg, code);
6307 arm_patch (buf [0], code);
6311 /* store runtime generic context */
6312 if (cfg
->rgctx_var
) {
6313 MonoInst
*ins
= cfg
->rgctx_var
;
6315 g_assert (ins
->opcode
== OP_REGOFFSET
);
6317 if (arm_is_imm12 (ins
->inst_offset
)) {
6318 ARM_STR_IMM (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ins
->inst_offset
);
6320 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
6321 ARM_STR_REG_REG (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ARMREG_LR
);
6325 /* load arguments allocated to register from the stack */
6328 cinfo
= get_call_info (NULL
, sig
);
6330 if (cinfo
->ret
.storage
== RegTypeStructByAddr
) {
6331 ArgInfo
*ainfo
= &cinfo
->ret
;
6332 inst
= cfg
->vret_addr
;
6333 g_assert (arm_is_imm12 (inst
->inst_offset
));
6334 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6337 if (sig
->call_convention
== MONO_CALL_VARARG
) {
6338 ArgInfo
*cookie
= &cinfo
->sig_cookie
;
6340 /* Save the sig cookie address */
6341 g_assert (cookie
->storage
== RegTypeBase
);
6343 g_assert (arm_is_imm12 (prev_sp_offset
+ cookie
->offset
));
6344 g_assert (arm_is_imm12 (cfg
->sig_cookie
));
6345 ARM_ADD_REG_IMM8 (code
, ARMREG_IP
, cfg
->frame_reg
, prev_sp_offset
+ cookie
->offset
);
6346 ARM_STR_IMM (code
, ARMREG_IP
, cfg
->frame_reg
, cfg
->sig_cookie
);
6349 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
6350 ArgInfo
*ainfo
= cinfo
->args
+ i
;
6351 inst
= cfg
->args
[pos
];
6353 if (cfg
->verbose_level
> 2)
6354 g_print ("Saving argument %d (type: %d)\n", i
, ainfo
->storage
);
6356 if (inst
->opcode
== OP_REGVAR
) {
6357 if (ainfo
->storage
== RegTypeGeneral
)
6358 ARM_MOV_REG_REG (code
, inst
->dreg
, ainfo
->reg
);
6359 else if (ainfo
->storage
== RegTypeFP
) {
6360 g_assert_not_reached ();
6361 } else if (ainfo
->storage
== RegTypeBase
) {
6362 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
6363 ARM_LDR_IMM (code
, inst
->dreg
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
6365 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
);
6366 ARM_LDR_REG_REG (code
, inst
->dreg
, ARMREG_SP
, ARMREG_IP
);
6369 g_assert_not_reached ();
6371 if (cfg
->verbose_level
> 2)
6372 g_print ("Argument %d assigned to register %s\n", pos
, mono_arch_regname (inst
->dreg
));
6374 switch (ainfo
->storage
) {
6376 for (part
= 0; part
< ainfo
->nregs
; part
++) {
6377 if (ainfo
->esize
== 4)
6378 ARM_FSTS (code
, ainfo
->reg
+ part
, inst
->inst_basereg
, inst
->inst_offset
+ (part
* ainfo
->esize
));
6380 ARM_FSTD (code
, ainfo
->reg
+ (part
* 2), inst
->inst_basereg
, inst
->inst_offset
+ (part
* ainfo
->esize
));
6383 case RegTypeGeneral
:
6384 case RegTypeIRegPair
:
6385 case RegTypeGSharedVtInReg
:
6386 case RegTypeStructByAddr
:
6387 switch (ainfo
->size
) {
6389 if (arm_is_imm12 (inst
->inst_offset
))
6390 ARM_STRB_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6392 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6393 ARM_STRB_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6397 if (arm_is_imm8 (inst
->inst_offset
)) {
6398 ARM_STRH_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6400 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6401 ARM_STRH_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6405 if (arm_is_imm12 (inst
->inst_offset
)) {
6406 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6408 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6409 ARM_STR_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6411 if (arm_is_imm12 (inst
->inst_offset
+ 4)) {
6412 ARM_STR_IMM (code
, ainfo
->reg
+ 1, inst
->inst_basereg
, inst
->inst_offset
+ 4);
6414 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
+ 4);
6415 ARM_STR_REG_REG (code
, ainfo
->reg
+ 1, inst
->inst_basereg
, ARMREG_IP
);
6419 if (arm_is_imm12 (inst
->inst_offset
)) {
6420 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6422 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6423 ARM_STR_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6428 case RegTypeBaseGen
:
6429 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
6430 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
6432 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
);
6433 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
6435 if (arm_is_imm12 (inst
->inst_offset
+ 4)) {
6436 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
6437 ARM_STR_IMM (code
, ARMREG_R3
, inst
->inst_basereg
, inst
->inst_offset
);
6439 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
+ 4);
6440 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6441 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6442 ARM_STR_REG_REG (code
, ARMREG_R3
, inst
->inst_basereg
, ARMREG_IP
);
6446 case RegTypeGSharedVtOnStack
:
6447 case RegTypeStructByAddrOnStack
:
6448 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
6449 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
6451 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
);
6452 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
6455 switch (ainfo
->size
) {
6457 if (arm_is_imm8 (inst
->inst_offset
)) {
6458 ARM_STRB_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6460 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6461 ARM_STRB_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6465 if (arm_is_imm8 (inst
->inst_offset
)) {
6466 ARM_STRH_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6468 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6469 ARM_STRH_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6473 if (arm_is_imm12 (inst
->inst_offset
)) {
6474 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6476 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6477 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6479 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
+ 4)) {
6480 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
+ 4));
6482 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
+ 4);
6483 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
6485 if (arm_is_imm12 (inst
->inst_offset
+ 4)) {
6486 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
6488 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
+ 4);
6489 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6493 if (arm_is_imm12 (inst
->inst_offset
)) {
6494 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6496 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6497 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6503 int imm8
, rot_amount
;
6505 if ((imm8
= mono_arm_is_rotated_imm8 (inst
->inst_offset
, &rot_amount
)) == -1) {
6506 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6507 ARM_ADD_REG_REG (code
, ARMREG_IP
, ARMREG_IP
, inst
->inst_basereg
);
6509 ARM_ADD_REG_IMM (code
, ARMREG_IP
, inst
->inst_basereg
, imm8
, rot_amount
);
6511 if (ainfo
->size
== 8)
6512 ARM_FSTD (code
, ainfo
->reg
, ARMREG_IP
, 0);
6514 ARM_FSTS (code
, ainfo
->reg
, ARMREG_IP
, 0);
6517 case RegTypeStructByVal
: {
6518 int doffset
= inst
->inst_offset
;
6522 size
= mini_type_stack_size_full (inst
->inst_vtype
, NULL
, sig
->pinvoke
);
6523 for (cur_reg
= 0; cur_reg
< ainfo
->size
; ++cur_reg
) {
6524 if (arm_is_imm12 (doffset
)) {
6525 ARM_STR_IMM (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, doffset
);
6527 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, doffset
);
6528 ARM_STR_REG_REG (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, ARMREG_IP
);
6530 soffset
+= sizeof (target_mgreg_t
);
6531 doffset
+= sizeof (target_mgreg_t
);
6533 if (ainfo
->vtsize
) {
6534 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6535 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6536 code
= emit_memcpy (code
, ainfo
->vtsize
* sizeof (target_mgreg_t
), inst
->inst_basereg
, doffset
, ARMREG_SP
, prev_sp_offset
+ ainfo
->offset
);
6541 g_assert_not_reached ();
6548 if (method
->save_lmf
)
6549 code
= emit_save_lmf (cfg
, code
, alloc_size
- lmf_offset
);
6551 if (cfg
->arch
.seq_point_info_var
) {
6552 MonoInst
*ins
= cfg
->arch
.seq_point_info_var
;
6554 /* Initialize the variable from a GOT slot */
6555 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_SEQ_POINT_INFO
, cfg
->method
);
6556 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_PC
, 0);
6558 *(gpointer
*)code
= NULL
;
6560 ARM_LDR_REG_REG (code
, ARMREG_R0
, ARMREG_PC
, ARMREG_R0
);
6562 g_assert (ins
->opcode
== OP_REGOFFSET
);
6564 if (arm_is_imm12 (ins
->inst_offset
)) {
6565 ARM_STR_IMM (code
, ARMREG_R0
, ins
->inst_basereg
, ins
->inst_offset
);
6567 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
6568 ARM_STR_REG_REG (code
, ARMREG_R0
, ins
->inst_basereg
, ARMREG_LR
);
6572 /* Initialize ss_trigger_page_var */
6573 if (!cfg
->soft_breakpoints
) {
6574 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
6575 MonoInst
*ss_trigger_page_var
= cfg
->arch
.ss_trigger_page_var
;
6576 int dreg
= ARMREG_LR
;
6579 g_assert (info_var
->opcode
== OP_REGOFFSET
);
6581 code
= emit_ldr_imm (code
, dreg
, info_var
->inst_basereg
, info_var
->inst_offset
);
6582 /* Load the trigger page addr */
6583 ARM_LDR_IMM (code
, dreg
, dreg
, MONO_STRUCT_OFFSET (SeqPointInfo
, ss_trigger_page
));
6584 ARM_STR_IMM (code
, dreg
, ss_trigger_page_var
->inst_basereg
, ss_trigger_page_var
->inst_offset
);
6588 if (cfg
->arch
.seq_point_ss_method_var
) {
6589 MonoInst
*ss_method_ins
= cfg
->arch
.seq_point_ss_method_var
;
6590 MonoInst
*bp_method_ins
= cfg
->arch
.seq_point_bp_method_var
;
6592 g_assert (ss_method_ins
->opcode
== OP_REGOFFSET
);
6593 g_assert (arm_is_imm12 (ss_method_ins
->inst_offset
));
6595 if (cfg
->compile_aot
) {
6596 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
6597 int dreg
= ARMREG_LR
;
6599 g_assert (info_var
->opcode
== OP_REGOFFSET
);
6600 g_assert (arm_is_imm12 (info_var
->inst_offset
));
6602 ARM_LDR_IMM (code
, dreg
, info_var
->inst_basereg
, info_var
->inst_offset
);
6603 ARM_LDR_IMM (code
, dreg
, dreg
, MONO_STRUCT_OFFSET (SeqPointInfo
, ss_tramp_addr
));
6604 ARM_STR_IMM (code
, dreg
, ss_method_ins
->inst_basereg
, ss_method_ins
->inst_offset
);
6606 g_assert (bp_method_ins
->opcode
== OP_REGOFFSET
);
6607 g_assert (arm_is_imm12 (bp_method_ins
->inst_offset
));
6609 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
6611 *(gpointer
*)code
= &single_step_tramp
;
6613 *(gpointer
*)code
= breakpoint_tramp
;
6616 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_LR
, 0);
6617 ARM_STR_IMM (code
, ARMREG_IP
, ss_method_ins
->inst_basereg
, ss_method_ins
->inst_offset
);
6618 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_LR
, 4);
6619 ARM_STR_IMM (code
, ARMREG_IP
, bp_method_ins
->inst_basereg
, bp_method_ins
->inst_offset
);
6623 set_code_cursor (cfg
, code
);
6630 mono_arch_emit_epilog (MonoCompile
*cfg
)
6632 MonoMethod
*method
= cfg
->method
;
6633 int pos
, i
, rot_amount
;
6634 int max_epilog_size
= 16 + 20*4;
6638 if (cfg
->method
->save_lmf
)
6639 max_epilog_size
+= 128;
6641 code
= realloc_code (cfg
, max_epilog_size
);
6643 /* Save the uwind state which is needed by the out-of-line code */
6644 mono_emit_unwind_op_remember_state (cfg
, code
);
6648 /* Load returned vtypes into registers if needed */
6649 cinfo
= cfg
->arch
.cinfo
;
6650 switch (cinfo
->ret
.storage
) {
6651 case RegTypeStructByVal
: {
6652 MonoInst
*ins
= cfg
->ret
;
6654 if (cinfo
->ret
.nregs
== 1) {
6655 if (arm_is_imm12 (ins
->inst_offset
)) {
6656 ARM_LDR_IMM (code
, ARMREG_R0
, ins
->inst_basereg
, ins
->inst_offset
);
6658 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
6659 ARM_LDR_REG_REG (code
, ARMREG_R0
, ins
->inst_basereg
, ARMREG_LR
);
6662 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
6663 int offset
= ins
->inst_offset
+ (i
* 4);
6664 if (arm_is_imm12 (offset
)) {
6665 ARM_LDR_IMM (code
, i
, ins
->inst_basereg
, offset
);
6667 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, offset
);
6668 ARM_LDR_REG_REG (code
, i
, ins
->inst_basereg
, ARMREG_LR
);
6675 MonoInst
*ins
= cfg
->ret
;
6677 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
6678 if (cinfo
->ret
.esize
== 4)
6679 ARM_FLDS (code
, cinfo
->ret
.reg
+ i
, ins
->inst_basereg
, ins
->inst_offset
+ (i
* cinfo
->ret
.esize
));
6681 ARM_FLDD (code
, cinfo
->ret
.reg
+ (i
* 2), ins
->inst_basereg
, ins
->inst_offset
+ (i
* cinfo
->ret
.esize
));
6689 if (method
->save_lmf
) {
6690 int lmf_offset
, reg
, sp_adj
, regmask
, nused_int_regs
= 0;
6691 /* all but r0-r3, sp and pc */
6692 pos
+= MONO_ABI_SIZEOF (MonoLMF
) - (MONO_ARM_NUM_SAVED_REGS
* sizeof (target_mgreg_t
));
6695 code
= emit_restore_lmf (cfg
, code
, cfg
->stack_usage
- lmf_offset
);
6697 /* This points to r4 inside MonoLMF->iregs */
6698 sp_adj
= (MONO_ABI_SIZEOF (MonoLMF
) - MONO_ARM_NUM_SAVED_REGS
* sizeof (target_mgreg_t
));
6700 regmask
= 0x9ff0; /* restore lr to pc */
6701 /* Skip caller saved registers not used by the method */
6702 while (!(cfg
->used_int_regs
& (1 << reg
)) && reg
< ARMREG_FP
) {
6703 regmask
&= ~(1 << reg
);
6708 /* Restored later */
6709 regmask
&= ~(1 << ARMREG_PC
);
6710 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6711 code
= emit_big_add (code
, ARMREG_SP
, cfg
->frame_reg
, cfg
->stack_usage
- lmf_offset
+ sp_adj
);
6712 for (i
= 0; i
< 16; i
++) {
6713 if (regmask
& (1 << i
))
6716 mono_emit_unwind_op_def_cfa (cfg
, code
, ARMREG_SP
, ((iphone_abi
? 3 : 0) + nused_int_regs
) * 4);
6718 ARM_POP (code
, regmask
);
6720 for (i
= 0; i
< 16; i
++) {
6721 if (regmask
& (1 << i
))
6722 mono_emit_unwind_op_same_value (cfg
, code
, i
);
6724 /* Restore saved r7, restore LR to PC */
6725 /* Skip lr from the lmf */
6726 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, 3 * 4);
6727 ARM_ADD_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, sizeof (target_mgreg_t
), 0);
6728 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, 2 * 4);
6729 ARM_POP (code
, (1 << ARMREG_R7
) | (1 << ARMREG_PC
));
6732 int i
, nused_int_regs
= 0;
6734 for (i
= 0; i
< 16; i
++) {
6735 if (cfg
->used_int_regs
& (1 << i
))
6739 if ((i
= mono_arm_is_rotated_imm8 (cfg
->stack_usage
, &rot_amount
)) >= 0) {
6740 ARM_ADD_REG_IMM (code
, ARMREG_SP
, cfg
->frame_reg
, i
, rot_amount
);
6742 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, cfg
->stack_usage
);
6743 ARM_ADD_REG_REG (code
, ARMREG_SP
, cfg
->frame_reg
, ARMREG_IP
);
6746 if (cfg
->frame_reg
!= ARMREG_SP
) {
6747 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, ARMREG_SP
);
6751 /* Restore saved gregs */
6752 if (cfg
->used_int_regs
) {
6753 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, (2 + nused_int_regs
) * 4);
6754 ARM_POP (code
, cfg
->used_int_regs
);
6755 for (i
= 0; i
< 16; i
++) {
6756 if (cfg
->used_int_regs
& (1 << i
))
6757 mono_emit_unwind_op_same_value (cfg
, code
, i
);
6760 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, 2 * 4);
6761 /* Restore saved r7, restore LR to PC */
6762 ARM_POP (code
, (1 << ARMREG_R7
) | (1 << ARMREG_PC
));
6764 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, (nused_int_regs
+ 1) * 4);
6765 ARM_POP (code
, cfg
->used_int_regs
| (1 << ARMREG_PC
));
6769 /* Restore the unwind state to be the same as before the epilog */
6770 mono_emit_unwind_op_restore_state (cfg
, code
);
6772 set_code_cursor (cfg
, code
);
6777 mono_arch_emit_exceptions (MonoCompile
*cfg
)
6779 MonoJumpInfo
*patch_info
;
6782 guint8
* exc_throw_pos
[MONO_EXC_INTRINS_NUM
];
6783 guint8 exc_throw_found
[MONO_EXC_INTRINS_NUM
];
6784 int max_epilog_size
= 50;
6786 for (i
= 0; i
< MONO_EXC_INTRINS_NUM
; i
++) {
6787 exc_throw_pos
[i
] = NULL
;
6788 exc_throw_found
[i
] = 0;
6791 /* count the number of exception infos */
6794 * make sure we have enough space for exceptions
6796 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
6797 if (patch_info
->type
== MONO_PATCH_INFO_EXC
) {
6798 i
= mini_exception_id_by_name ((const char*)patch_info
->data
.target
);
6799 if (!exc_throw_found
[i
]) {
6800 max_epilog_size
+= 32;
6801 exc_throw_found
[i
] = TRUE
;
6806 code
= realloc_code (cfg
, max_epilog_size
);
6808 /* add code to raise exceptions */
6809 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
6810 switch (patch_info
->type
) {
6811 case MONO_PATCH_INFO_EXC
: {
6812 MonoClass
*exc_class
;
6813 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
6815 i
= mini_exception_id_by_name ((const char*)patch_info
->data
.target
);
6816 if (exc_throw_pos
[i
]) {
6817 arm_patch (ip
, exc_throw_pos
[i
]);
6818 patch_info
->type
= MONO_PATCH_INFO_NONE
;
6821 exc_throw_pos
[i
] = code
;
6823 arm_patch (ip
, code
);
6825 exc_class
= mono_class_load_from_name (mono_defaults
.corlib
, "System", patch_info
->data
.name
);
6827 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_LR
);
6828 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_PC
, 0);
6829 patch_info
->type
= MONO_PATCH_INFO_JIT_ICALL_ID
;
6830 patch_info
->data
.jit_icall_id
= MONO_JIT_ICALL_mono_arch_throw_corlib_exception
;
6831 patch_info
->ip
.i
= code
- cfg
->native_code
;
6833 cfg
->thunk_area
+= THUNK_SIZE
;
6834 *(guint32
*)(gpointer
)code
= m_class_get_type_token (exc_class
) - MONO_TOKEN_TYPE_DEF
;
6844 set_code_cursor (cfg
, code
);
6847 #endif /* #ifndef DISABLE_JIT */
6850 mono_arch_finish_init (void)
6855 mono_arch_free_jit_tls_data (MonoJitTlsData
*tls
)
6860 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
6871 mono_arch_get_patch_offset (guint8
*code
)
6878 mono_arch_flush_register_windows (void)
6883 mono_arch_find_imt_method (host_mgreg_t
*regs
, guint8
*code
)
6885 return (MonoMethod
*)regs
[MONO_ARCH_IMT_REG
];
6889 mono_arch_find_static_call_vtable (host_mgreg_t
*regs
, guint8
*code
)
6891 return (MonoVTable
*)(gsize
)regs
[MONO_ARCH_RGCTX_REG
];
6895 mono_arch_get_cie_program (void)
6899 mono_add_unwind_op_def_cfa (l
, (guint8
*)NULL
, (guint8
*)NULL
, ARMREG_SP
, 0);
6904 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6905 #define BASE_SIZE (6 * 4)
6906 #define BSEARCH_ENTRY_SIZE (4 * 4)
6907 #define CMP_SIZE (3 * 4)
6908 #define BRANCH_SIZE (1 * 4)
6909 #define CALL_SIZE (2 * 4)
6910 #define WMC_SIZE (8 * 4)
6911 #define DISTANCE(A, B) (((gint32)(gssize)(B)) - ((gint32)(gssize)(A)))
6914 arm_emit_value_and_patch_ldr (arminstr_t
*code
, arminstr_t
*target
, guint32 value
)
6916 guint32 delta
= DISTANCE (target
, code
);
6918 g_assert (delta
>= 0 && delta
<= 0xFFF);
6919 *target
= *target
| delta
;
6924 #ifdef ENABLE_WRONG_METHOD_CHECK
6926 mini_dump_bad_imt (int input_imt
, int compared_imt
, int pc
)
6928 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt
, compared_imt
, pc
);
6934 mono_arch_build_imt_trampoline (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
6935 gpointer fail_tramp
)
6938 arminstr_t
*code
, *start
;
6939 gboolean large_offsets
= FALSE
;
6940 guint32
**constant_pool_starts
;
6941 arminstr_t
*vtable_target
= NULL
;
6942 int extra_space
= 0;
6943 #ifdef ENABLE_WRONG_METHOD_CHECK
6949 constant_pool_starts
= g_new0 (guint32
*, count
);
6951 for (i
= 0; i
< count
; ++i
) {
6952 MonoIMTCheckItem
*item
= imt_entries
[i
];
6953 if (item
->is_equals
) {
6954 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
6956 if (item
->has_target_code
|| !arm_is_imm12 (DISTANCE (vtable
, &vtable
->vtable
[item
->value
.vtable_slot
]))) {
6957 item
->chunk_size
+= 32;
6958 large_offsets
= TRUE
;
6961 if (item
->check_target_idx
|| fail_case
) {
6962 if (!item
->compare_done
|| fail_case
)
6963 item
->chunk_size
+= CMP_SIZE
;
6964 item
->chunk_size
+= BRANCH_SIZE
;
6966 #ifdef ENABLE_WRONG_METHOD_CHECK
6967 item
->chunk_size
+= WMC_SIZE
;
6971 item
->chunk_size
+= 16;
6972 large_offsets
= TRUE
;
6974 item
->chunk_size
+= CALL_SIZE
;
6976 item
->chunk_size
+= BSEARCH_ENTRY_SIZE
;
6977 imt_entries
[item
->check_target_idx
]->compare_done
= TRUE
;
6979 size
+= item
->chunk_size
;
6983 size
+= 4 * count
; /* The ARM_ADD_REG_IMM to pop the stack */
6986 code
= mono_method_alloc_generic_virtual_trampoline (domain
, size
);
6988 code
= mono_domain_code_reserve (domain
, size
);
6991 unwind_ops
= mono_arch_get_cie_program ();
6994 g_print ("Building IMT trampoline for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", m_class_get_name_space (vtable
->klass
), m_class_get_name (vtable
->klass
), count
, size
, start
, ((guint8
*)start
) + size
, vtable
, fail_tramp
);
6995 for (i
= 0; i
< count
; ++i
) {
6996 MonoIMTCheckItem
*item
= imt_entries
[i
];
6997 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i
, item
->key
, ((MonoMethod
*)item
->key
)->name
, &vtable
->vtable
[item
->value
.vtable_slot
], item
->is_equals
, item
->chunk_size
);
7001 if (large_offsets
) {
7002 ARM_PUSH4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
7003 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 4 * sizeof (host_mgreg_t
));
7005 ARM_PUSH2 (code
, ARMREG_R0
, ARMREG_R1
);
7006 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 2 * sizeof (host_mgreg_t
));
7008 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_LR
, -4);
7009 vtable_target
= code
;
7010 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
7011 ARM_MOV_REG_REG (code
, ARMREG_R0
, ARMREG_V5
);
7013 for (i
= 0; i
< count
; ++i
) {
7014 MonoIMTCheckItem
*item
= imt_entries
[i
];
7015 arminstr_t
*imt_method
= NULL
, *vtable_offset_ins
= NULL
, *target_code_ins
= NULL
;
7016 gint32 vtable_offset
;
7018 item
->code_target
= (guint8
*)code
;
7020 if (item
->is_equals
) {
7021 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
7023 if (item
->check_target_idx
|| fail_case
) {
7024 if (!item
->compare_done
|| fail_case
) {
7026 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7027 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
7029 item
->jmp_code
= (guint8
*)code
;
7030 ARM_B_COND (code
, ARMCOND_NE
, 0);
7032 /*Enable the commented code to assert on wrong method*/
7033 #ifdef ENABLE_WRONG_METHOD_CHECK
7035 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7036 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
7038 ARM_B_COND (code
, ARMCOND_EQ
, 0);
7040 /* Define this if your system is so bad that gdb is failing. */
7041 #ifdef BROKEN_DEV_ENV
7042 ARM_MOV_REG_REG (code
, ARMREG_R2
, ARMREG_PC
);
7044 arm_patch (code
- 1, mini_dump_bad_imt
);
7048 arm_patch (cond
, code
);
7052 if (item
->has_target_code
) {
7053 /* Load target address */
7054 target_code_ins
= code
;
7055 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7056 /* Save it to the fourth slot */
7057 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (target_mgreg_t
));
7058 /* Restore registers and branch */
7059 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
7061 code
= arm_emit_value_and_patch_ldr (code
, target_code_ins
, (gsize
)item
->value
.target_code
);
7063 vtable_offset
= DISTANCE (vtable
, &vtable
->vtable
[item
->value
.vtable_slot
]);
7064 if (!arm_is_imm12 (vtable_offset
)) {
7066 * We need to branch to a computed address but we don't have
7067 * a free register to store it, since IP must contain the
7068 * vtable address. So we push the two values to the stack, and
7069 * load them both using LDM.
7071 /* Compute target address */
7072 vtable_offset_ins
= code
;
7073 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7074 ARM_LDR_REG_REG (code
, ARMREG_R1
, ARMREG_IP
, ARMREG_R1
);
7075 /* Save it to the fourth slot */
7076 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (target_mgreg_t
));
7077 /* Restore registers and branch */
7078 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
7080 code
= arm_emit_value_and_patch_ldr (code
, vtable_offset_ins
, vtable_offset
);
7082 ARM_POP2 (code
, ARMREG_R0
, ARMREG_R1
);
7083 if (large_offsets
) {
7084 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 2 * sizeof (host_mgreg_t
));
7085 ARM_ADD_REG_IMM8 (code
, ARMREG_SP
, ARMREG_SP
, 2 * sizeof (host_mgreg_t
));
7087 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 0);
7088 ARM_LDR_IMM (code
, ARMREG_PC
, ARMREG_IP
, vtable_offset
);
7093 arm_patch (item
->jmp_code
, (guchar
*)code
);
7095 target_code_ins
= code
;
7096 /* Load target address */
7097 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7098 /* Save it to the fourth slot */
7099 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (host_mgreg_t
));
7100 /* Restore registers and branch */
7101 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
7103 code
= arm_emit_value_and_patch_ldr (code
, target_code_ins
, (gsize
)fail_tramp
);
7104 item
->jmp_code
= NULL
;
7108 code
= arm_emit_value_and_patch_ldr (code
, imt_method
, (guint32
)(gsize
)item
->key
);
7110 /*must emit after unconditional branch*/
7111 if (vtable_target
) {
7112 code
= arm_emit_value_and_patch_ldr (code
, vtable_target
, (guint32
)(gsize
)vtable
);
7113 item
->chunk_size
+= 4;
7114 vtable_target
= NULL
;
7117 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
7118 constant_pool_starts
[i
] = code
;
7120 code
+= extra_space
;
7124 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7125 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
7127 item
->jmp_code
= (guint8
*)code
;
7128 ARM_B_COND (code
, ARMCOND_HS
, 0);
7133 for (i
= 0; i
< count
; ++i
) {
7134 MonoIMTCheckItem
*item
= imt_entries
[i
];
7135 if (item
->jmp_code
) {
7136 if (item
->check_target_idx
)
7137 arm_patch (item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
);
7139 if (i
> 0 && item
->is_equals
) {
7141 arminstr_t
*space_start
= constant_pool_starts
[i
];
7142 for (j
= i
- 1; j
>= 0 && !imt_entries
[j
]->is_equals
; --j
) {
7143 space_start
= arm_emit_value_and_patch_ldr (space_start
, (arminstr_t
*)imt_entries
[j
]->code_target
, (guint32
)(gsize
)imt_entries
[j
]->key
);
7150 char *buff
= g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", m_class_get_name_space (vtable
->klass
), m_class_get_name (vtable
->klass
), count
);
7151 mono_disassemble_code (NULL
, (guint8
*)start
, size
, buff
);
7156 g_free (constant_pool_starts
);
7158 mono_arch_flush_icache ((guint8
*)start
, size
);
7159 MONO_PROFILER_RAISE (jit_code_buffer
, ((guint8
*)start
, code
- start
, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE
, NULL
));
7160 UnlockedAdd (&mono_stats
.imt_trampolines_size
, code
- start
);
7162 g_assert (DISTANCE (start
, code
) <= size
);
7164 mono_tramp_info_register (mono_tramp_info_create (NULL
, (guint8
*)start
, DISTANCE (start
, code
), NULL
, unwind_ops
), domain
);
7170 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
7172 return ctx
->regs
[reg
];
7176 mono_arch_context_set_int_reg (MonoContext
*ctx
, int reg
, host_mgreg_t val
)
7178 ctx
->regs
[reg
] = val
;
7182 * mono_arch_get_trampolines:
7184 * Return a list of MonoTrampInfo structures describing arch specific trampolines
7188 mono_arch_get_trampolines (gboolean aot
)
7190 return mono_arm_get_exception_trampolines (aot
);
7193 #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
7195 * mono_arch_set_breakpoint:
7197 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
7198 * The location should contain code emitted by OP_SEQ_POINT.
7201 mono_arch_set_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
7204 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
7205 MonoDebugOptions
*opt
= mini_get_debug_options ();
7208 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), (guint8
*)ji
->code_start
);
7210 if (!breakpoint_tramp
)
7211 breakpoint_tramp
= mini_get_breakpoint_trampoline ();
7213 g_assert (native_offset
% 4 == 0);
7214 g_assert (info
->bp_addrs
[native_offset
/ 4] == 0);
7215 info
->bp_addrs
[native_offset
/ 4] = (guint8
*)(opt
->soft_breakpoints
? breakpoint_tramp
: bp_trigger_page
);
7216 } else if (opt
->soft_breakpoints
) {
7218 ARM_BLX_REG (code
, ARMREG_LR
);
7219 mono_arch_flush_icache (code
- 4, 4);
7221 int dreg
= ARMREG_LR
;
7223 /* Read from another trigger page */
7224 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
7226 *(int*)code
= (int)(gssize
)bp_trigger_page
;
7228 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
7230 mono_arch_flush_icache (code
- 16, 16);
7233 /* This is currently implemented by emitting an SWI instruction, which
7234 * qemu/linux seems to convert to a SIGILL.
7236 *(int*)code
= (0xef << 24) | 8;
7238 mono_arch_flush_icache (code
- 4, 4);
7244 * mono_arch_clear_breakpoint:
7246 * Clear the breakpoint at IP.
7249 mono_arch_clear_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
7251 MonoDebugOptions
*opt
= mini_get_debug_options ();
7256 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
7257 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), (guint8
*)ji
->code_start
);
7259 if (!breakpoint_tramp
)
7260 breakpoint_tramp
= mini_get_breakpoint_trampoline ();
7262 g_assert (native_offset
% 4 == 0);
7263 g_assert (info
->bp_addrs
[native_offset
/ 4] == (guint8
*)(opt
->soft_breakpoints
? breakpoint_tramp
: bp_trigger_page
));
7264 info
->bp_addrs
[native_offset
/ 4] = 0;
7265 } else if (opt
->soft_breakpoints
) {
7268 mono_arch_flush_icache (code
- 4, 4);
7270 for (i
= 0; i
< 4; ++i
)
7273 mono_arch_flush_icache (ip
, code
- ip
);
7278 * mono_arch_start_single_stepping:
7280 * Start single stepping.
7283 mono_arch_start_single_stepping (void)
7285 if (ss_trigger_page
)
7286 mono_mprotect (ss_trigger_page
, mono_pagesize (), 0);
7288 single_step_tramp
= mini_get_single_step_trampoline ();
7292 * mono_arch_stop_single_stepping:
7294 * Stop single stepping.
7297 mono_arch_stop_single_stepping (void)
7299 if (ss_trigger_page
)
7300 mono_mprotect (ss_trigger_page
, mono_pagesize (), MONO_MMAP_READ
);
7302 single_step_tramp
= NULL
;
7306 #define DBG_SIGNAL SIGBUS
7308 #define DBG_SIGNAL SIGSEGV
7312 * mono_arch_is_single_step_event:
7314 * Return whenever the machine state in SIGCTX corresponds to a single
7318 mono_arch_is_single_step_event (void *info
, void *sigctx
)
7320 siginfo_t
*sinfo
= (siginfo_t
*)info
;
7322 if (!ss_trigger_page
)
7325 /* Sometimes the address is off by 4 */
7326 if (sinfo
->si_addr
>= ss_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)ss_trigger_page
+ 128)
7333 * mono_arch_is_breakpoint_event:
7335 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7338 mono_arch_is_breakpoint_event (void *info
, void *sigctx
)
7340 siginfo_t
*sinfo
= (siginfo_t
*)info
;
7342 if (!ss_trigger_page
)
7345 if (sinfo
->si_signo
== DBG_SIGNAL
) {
7346 /* Sometimes the address is off by 4 */
7347 if (sinfo
->si_addr
>= bp_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)bp_trigger_page
+ 128)
7357 * mono_arch_skip_breakpoint:
7359 * See mini-amd64.c for docs.
7362 mono_arch_skip_breakpoint (MonoContext
*ctx
, MonoJitInfo
*ji
)
7364 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
7368 * mono_arch_skip_single_step:
7370 * See mini-amd64.c for docs.
7373 mono_arch_skip_single_step (MonoContext
*ctx
)
7375 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
7379 * mono_arch_get_seq_point_info:
7381 * See mini-amd64.c for docs.
7384 mono_arch_get_seq_point_info (MonoDomain
*domain
, guint8
*code
)
7389 // FIXME: Add a free function
7391 mono_domain_lock (domain
);
7392 info
= (SeqPointInfo
*)g_hash_table_lookup (domain_jit_info (domain
)->arch_seq_points
,
7394 mono_domain_unlock (domain
);
7397 ji
= mono_jit_info_table_find (domain
, code
);
7400 info
= g_malloc0 (sizeof (SeqPointInfo
) + ji
->code_size
);
7402 info
->ss_trigger_page
= ss_trigger_page
;
7403 info
->bp_trigger_page
= bp_trigger_page
;
7404 info
->ss_tramp_addr
= &single_step_tramp
;
7406 mono_domain_lock (domain
);
7407 g_hash_table_insert (domain_jit_info (domain
)->arch_seq_points
,
7409 mono_domain_unlock (domain
);
7415 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7418 * mono_arch_set_target:
7420 * Set the target architecture the JIT backend should generate code for, in the form
7421 * of a GNU target triplet. Only used in AOT mode.
7424 mono_arch_set_target (char *mtriple
)
7426 /* The GNU target triple format is not very well documented */
7427 if (strstr (mtriple
, "armv7")) {
7428 v5_supported
= TRUE
;
7429 v6_supported
= TRUE
;
7430 v7_supported
= TRUE
;
7432 if (strstr (mtriple
, "armv6")) {
7433 v5_supported
= TRUE
;
7434 v6_supported
= TRUE
;
7436 if (strstr (mtriple
, "armv7s")) {
7437 v7s_supported
= TRUE
;
7439 if (strstr (mtriple
, "armv7k")) {
7440 v7k_supported
= TRUE
;
7442 if (strstr (mtriple
, "thumbv7s")) {
7443 v5_supported
= TRUE
;
7444 v6_supported
= TRUE
;
7445 v7_supported
= TRUE
;
7446 v7s_supported
= TRUE
;
7447 thumb_supported
= TRUE
;
7448 thumb2_supported
= TRUE
;
7450 if (strstr (mtriple
, "darwin") || strstr (mtriple
, "ios")) {
7451 v5_supported
= TRUE
;
7452 v6_supported
= TRUE
;
7453 thumb_supported
= TRUE
;
7456 if (strstr (mtriple
, "gnueabi"))
7457 eabi_supported
= TRUE
;
7461 mono_arch_opcode_supported (int opcode
)
7464 case OP_ATOMIC_ADD_I4
:
7465 case OP_ATOMIC_EXCHANGE_I4
:
7466 case OP_ATOMIC_CAS_I4
:
7467 case OP_ATOMIC_LOAD_I1
:
7468 case OP_ATOMIC_LOAD_I2
:
7469 case OP_ATOMIC_LOAD_I4
:
7470 case OP_ATOMIC_LOAD_U1
:
7471 case OP_ATOMIC_LOAD_U2
:
7472 case OP_ATOMIC_LOAD_U4
:
7473 case OP_ATOMIC_STORE_I1
:
7474 case OP_ATOMIC_STORE_I2
:
7475 case OP_ATOMIC_STORE_I4
:
7476 case OP_ATOMIC_STORE_U1
:
7477 case OP_ATOMIC_STORE_U2
:
7478 case OP_ATOMIC_STORE_U4
:
7479 return v7_supported
;
7480 case OP_ATOMIC_LOAD_R4
:
7481 case OP_ATOMIC_LOAD_R8
:
7482 case OP_ATOMIC_STORE_R4
:
7483 case OP_ATOMIC_STORE_R8
:
7484 return v7_supported
&& IS_VFP
;
7491 mono_arch_get_call_info (MonoMemPool
*mp
, MonoMethodSignature
*sig
)
7493 return get_call_info (mp
, sig
);
7497 mono_arch_get_get_tls_tramp (void)
7502 static G_GNUC_UNUSED guint8
*
7503 emit_aotconst (MonoCompile
*cfg
, guint8
*code
, int dreg
, int patch_type
, gpointer data
)
7506 mono_add_patch_info (cfg
, code
- cfg
->native_code
, (MonoJumpInfoType
)patch_type
, data
);
7507 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
7509 *(gpointer
*)code
= NULL
;
7511 /* Load the value from the GOT */
7512 ARM_LDR_REG_REG (code
, dreg
, ARMREG_PC
, dreg
);
7517 mono_arm_emit_aotconst (gpointer ji_list
, guint8
*code
, guint8
*buf
, int dreg
, int patch_type
, gconstpointer data
)
7519 MonoJumpInfo
**ji
= (MonoJumpInfo
**)ji_list
;
7521 *ji
= mono_patch_info_list_prepend (*ji
, code
- buf
, (MonoJumpInfoType
)patch_type
, data
);
7522 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
7524 *(gpointer
*)code
= NULL
;
7526 ARM_LDR_REG_REG (code
, dreg
, ARMREG_PC
, dreg
);