3 * ARM backend for the Mono code generator
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
9 * (C) 2003 Ximian, Inc.
10 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
11 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
12 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
17 #include <mono/metadata/abi-details.h>
18 #include <mono/metadata/appdomain.h>
19 #include <mono/metadata/profiler-private.h>
20 #include <mono/metadata/debug-helpers.h>
21 #include <mono/utils/mono-mmap.h>
22 #include <mono/utils/mono-hwcap.h>
23 #include <mono/utils/mono-memory-model.h>
24 #include <mono/utils/mono-threads-coop.h>
25 #include <mono/utils/unlocked.h>
27 #include "interp/interp.h"
32 #include "debugger-agent.h"
34 #include "mini-runtime.h"
35 #include "aot-runtime.h"
36 #include "mono/arch/arm/arm-vfp-codegen.h"
38 /* Sanity check: This makes no sense */
39 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
40 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
44 * IS_SOFT_FLOAT: Is full software floating point used?
45 * IS_HARD_FLOAT: Is full hardware floating point used?
46 * IS_VFP: Is hardware floating point with software ABI used?
48 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
49 * IS_VFP may delegate to mono_arch_is_soft_float ().
52 #if defined(ARM_FPU_VFP_HARD)
53 #define IS_SOFT_FLOAT (FALSE)
54 #define IS_HARD_FLOAT (TRUE)
56 #elif defined(ARM_FPU_NONE)
57 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
58 #define IS_HARD_FLOAT (FALSE)
59 #define IS_VFP (!mono_arch_is_soft_float ())
61 #define IS_SOFT_FLOAT (FALSE)
62 #define IS_HARD_FLOAT (FALSE)
66 #define THUNK_SIZE (3 * 4)
70 void sys_icache_invalidate (void *start
, size_t len
);
74 /* This mutex protects architecture specific caches */
75 #define mono_mini_arch_lock() mono_os_mutex_lock (&mini_arch_mutex)
76 #define mono_mini_arch_unlock() mono_os_mutex_unlock (&mini_arch_mutex)
77 static mono_mutex_t mini_arch_mutex
;
79 static gboolean v5_supported
= FALSE
;
80 static gboolean v6_supported
= FALSE
;
81 static gboolean v7_supported
= FALSE
;
82 static gboolean v7s_supported
= FALSE
;
83 static gboolean v7k_supported
= FALSE
;
84 static gboolean thumb_supported
= FALSE
;
85 static gboolean thumb2_supported
= FALSE
;
87 * Whenever to use the ARM EABI
89 static gboolean eabi_supported
= FALSE
;
92 * Whenever to use the iphone ABI extensions:
93 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
94 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
95 * This is required for debugging/profiling tools to work, but it has some overhead so it should
96 * only be turned on in debug builds.
98 static gboolean iphone_abi
= FALSE
;
101 * The FPU we are generating code for. This is NOT runtime configurable right now,
102 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
104 static MonoArmFPU arm_fpu
;
106 #if defined(ARM_FPU_VFP_HARD)
108 * On armhf, d0-d7 are used for argument passing and d8-d15
109 * must be preserved across calls, which leaves us no room
110 * for scratch registers. So we use d14-d15 but back up their
111 * previous contents to a stack slot before using them - see
112 * mono_arm_emit_vfp_scratch_save/_restore ().
114 static int vfp_scratch1
= ARM_VFP_D14
;
115 static int vfp_scratch2
= ARM_VFP_D15
;
118 * On armel, d0-d7 do not need to be preserved, so we can
119 * freely make use of them as scratch registers.
121 static int vfp_scratch1
= ARM_VFP_D0
;
122 static int vfp_scratch2
= ARM_VFP_D1
;
127 static gpointer single_step_tramp
, breakpoint_tramp
;
130 * The code generated for sequence points reads from this location, which is
131 * made read-only when single stepping is enabled.
133 static gpointer ss_trigger_page
;
135 /* Enabled breakpoints read from this trigger page */
136 static gpointer bp_trigger_page
;
140 * floating point support: on ARM it is a mess, there are at least 3
141 * different setups, each of which binary incompat with the other.
142 * 1) FPA: old and ugly, but unfortunately what current distros use
143 * the double binary format has the two words swapped. 8 double registers.
144 * Implemented usually by kernel emulation.
145 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
146 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
147 * 3) VFP: the new and actually sensible and useful FP support. Implemented
148 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
150 * We do not care about FPA. We will support soft float and VFP.
152 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
153 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
154 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
156 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
157 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
158 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
160 //#define DEBUG_IMT 0
163 static void mono_arch_compute_omit_fp (MonoCompile
*cfg
);
167 emit_aotconst (MonoCompile
*cfg
, guint8
*code
, int dreg
, int patch_type
, gpointer data
);
170 mono_arch_regname (int reg
)
172 static const char * rnames
[] = {
173 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
174 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
175 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
178 if (reg
>= 0 && reg
< 16)
184 mono_arch_fregname (int reg
)
186 static const char * rnames
[] = {
187 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
188 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
189 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
190 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
191 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
192 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
195 if (reg
>= 0 && reg
< 32)
203 emit_big_add_temp (guint8
*code
, int dreg
, int sreg
, int imm
, int temp
)
205 int imm8
, rot_amount
;
207 g_assert (temp
== ARMREG_IP
|| temp
== ARMREG_LR
);
211 ARM_MOV_REG_REG (code
, dreg
, sreg
);
212 } else if ((imm8
= mono_arm_is_rotated_imm8 (imm
, &rot_amount
)) >= 0) {
213 ARM_ADD_REG_IMM (code
, dreg
, sreg
, imm8
, rot_amount
);
217 code
= mono_arm_emit_load_imm (code
, temp
, imm
);
218 ARM_ADD_REG_REG (code
, dreg
, sreg
, temp
);
220 code
= mono_arm_emit_load_imm (code
, dreg
, imm
);
221 ARM_ADD_REG_REG (code
, dreg
, dreg
, sreg
);
227 emit_big_add (guint8
*code
, int dreg
, int sreg
, int imm
)
229 return emit_big_add_temp (code
, dreg
, sreg
, imm
, ARMREG_IP
);
233 emit_ldr_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
235 if (!arm_is_imm12 (imm
)) {
236 g_assert (dreg
!= sreg
);
237 code
= emit_big_add (code
, dreg
, sreg
, imm
);
238 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
240 ARM_LDR_IMM (code
, dreg
, sreg
, imm
);
245 /* If dreg == sreg, this clobbers IP */
247 emit_sub_imm (guint8
*code
, int dreg
, int sreg
, int imm
)
249 int imm8
, rot_amount
;
250 if ((imm8
= mono_arm_is_rotated_imm8 (imm
, &rot_amount
)) >= 0) {
251 ARM_SUB_REG_IMM (code
, dreg
, sreg
, imm8
, rot_amount
);
255 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, imm
);
256 ARM_SUB_REG_REG (code
, dreg
, sreg
, ARMREG_IP
);
258 code
= mono_arm_emit_load_imm (code
, dreg
, imm
);
259 ARM_SUB_REG_REG (code
, dreg
, dreg
, sreg
);
265 emit_memcpy (guint8
*code
, int size
, int dreg
, int doffset
, int sreg
, int soffset
)
267 /* we can use r0-r3, since this is called only for incoming args on the stack */
268 if (size
> sizeof (target_mgreg_t
) * 4) {
270 code
= emit_big_add (code
, ARMREG_R0
, sreg
, soffset
);
271 code
= emit_big_add (code
, ARMREG_R1
, dreg
, doffset
);
272 start_loop
= code
= mono_arm_emit_load_imm (code
, ARMREG_R2
, size
);
273 ARM_LDR_IMM (code
, ARMREG_R3
, ARMREG_R0
, 0);
274 ARM_STR_IMM (code
, ARMREG_R3
, ARMREG_R1
, 0);
275 ARM_ADD_REG_IMM8 (code
, ARMREG_R0
, ARMREG_R0
, 4);
276 ARM_ADD_REG_IMM8 (code
, ARMREG_R1
, ARMREG_R1
, 4);
277 ARM_SUBS_REG_IMM8 (code
, ARMREG_R2
, ARMREG_R2
, 4);
278 ARM_B_COND (code
, ARMCOND_NE
, 0);
279 arm_patch (code
- 4, start_loop
);
282 if (arm_is_imm12 (doffset
) && arm_is_imm12 (doffset
+ size
) &&
283 arm_is_imm12 (soffset
) && arm_is_imm12 (soffset
+ size
)) {
285 ARM_LDR_IMM (code
, ARMREG_LR
, sreg
, soffset
);
286 ARM_STR_IMM (code
, ARMREG_LR
, dreg
, doffset
);
292 code
= emit_big_add (code
, ARMREG_R0
, sreg
, soffset
);
293 code
= emit_big_add (code
, ARMREG_R1
, dreg
, doffset
);
294 doffset
= soffset
= 0;
296 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_R0
, soffset
);
297 ARM_STR_IMM (code
, ARMREG_LR
, ARMREG_R1
, doffset
);
303 g_assert (size
== 0);
308 emit_jmp_reg (guint8
*code
, int reg
)
313 ARM_MOV_REG_REG (code
, ARMREG_PC
, reg
);
318 emit_call_reg (guint8
*code
, int reg
)
321 ARM_BLX_REG (code
, reg
);
323 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
324 return emit_jmp_reg (code
, reg
);
330 emit_call_seq (MonoCompile
*cfg
, guint8
*code
)
332 if (cfg
->method
->dynamic
) {
333 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
335 *(gpointer
*)code
= NULL
;
337 code
= emit_call_reg (code
, ARMREG_IP
);
341 cfg
->thunk_area
+= THUNK_SIZE
;
346 mono_arm_patchable_b (guint8
*code
, int cond
)
348 ARM_B_COND (code
, cond
, 0);
353 mono_arm_patchable_bl (guint8
*code
, int cond
)
355 ARM_BL_COND (code
, cond
, 0);
359 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(HOST_ANDROID) && !defined(MONO_CROSS_COMPILE)
360 #define HAVE_AEABI_READ_TP 1
363 #ifdef HAVE_AEABI_READ_TP
365 gpointer
__aeabi_read_tp (void);
370 mono_arch_have_fast_tls (void)
372 #ifdef HAVE_AEABI_READ_TP
373 static gboolean have_fast_tls
= FALSE
;
374 static gboolean inited
= FALSE
;
376 if (mini_get_debug_options ()->use_fallback_tls
)
380 return have_fast_tls
;
385 tp1
= __aeabi_read_tp ();
386 asm volatile("mrc p15, 0, %0, c13, c0, 3" : "=r" (tp2
));
388 have_fast_tls
= tp1
&& tp1
== tp2
;
391 return have_fast_tls
;
398 emit_tls_get (guint8
*code
, int dreg
, int tls_offset
)
400 g_assert (v7_supported
);
401 ARM_MRC (code
, 15, 0, dreg
, 13, 0, 3);
402 ARM_LDR_IMM (code
, dreg
, dreg
, tls_offset
);
407 emit_tls_set (guint8
*code
, int sreg
, int tls_offset
)
409 int tp_reg
= (sreg
!= ARMREG_R0
) ? ARMREG_R0
: ARMREG_R1
;
410 g_assert (v7_supported
);
411 ARM_MRC (code
, 15, 0, tp_reg
, 13, 0, 3);
412 ARM_STR_IMM (code
, sreg
, tp_reg
, tls_offset
);
419 * Emit code to push an LMF structure on the LMF stack.
420 * On arm, this is intermixed with the initialization of other fields of the structure.
423 emit_save_lmf (MonoCompile
*cfg
, guint8
*code
, gint32 lmf_offset
)
427 if (mono_arch_have_fast_tls () && mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR
) != -1) {
428 code
= emit_tls_get (code
, ARMREG_R0
, mono_tls_get_tls_offset (TLS_KEY_LMF_ADDR
));
430 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
431 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_tls_get_lmf_addr
));
432 code
= emit_call_seq (cfg
, code
);
434 /* we build the MonoLMF structure on the stack - see mini-arm.h */
435 /* lmf_offset is the offset from the previous stack pointer,
436 * alloc_size is the total stack space allocated, so the offset
437 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
438 * The pointer to the struct is put in r1 (new_lmf).
439 * ip is used as scratch
440 * The callee-saved registers are already in the MonoLMF structure
442 code
= emit_big_add (code
, ARMREG_R1
, ARMREG_SP
, lmf_offset
);
443 /* r0 is the result from mono_get_lmf_addr () */
444 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
445 /* new_lmf->previous_lmf = *lmf_addr */
446 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
447 ARM_STR_IMM (code
, ARMREG_IP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
448 /* *(lmf_addr) = r1 */
449 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
450 /* Skip method (only needed for trampoline LMF frames) */
451 ARM_STR_IMM (code
, ARMREG_SP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, sp
));
452 ARM_STR_IMM (code
, ARMREG_FP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, fp
));
453 /* save the current IP */
454 ARM_MOV_REG_REG (code
, ARMREG_IP
, ARMREG_PC
);
455 ARM_STR_IMM (code
, ARMREG_IP
, ARMREG_R1
, MONO_STRUCT_OFFSET (MonoLMF
, ip
));
457 for (i
= 0; i
< MONO_ABI_SIZEOF (MonoLMF
); i
+= sizeof (target_mgreg_t
))
458 mini_gc_set_slot_type_from_fp (cfg
, lmf_offset
+ i
, SLOT_NOREF
);
469 emit_float_args (MonoCompile
*cfg
, MonoCallInst
*inst
, guint8
*code
, int *max_len
, guint
*offset
)
473 set_code_cursor (cfg
, code
);
475 for (list
= inst
->float_args
; list
; list
= list
->next
) {
476 FloatArgData
*fad
= (FloatArgData
*)list
->data
;
477 MonoInst
*var
= get_vreg_to_inst (cfg
, fad
->vreg
);
478 gboolean imm
= arm_is_fpimm8 (var
->inst_offset
);
480 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
486 code
= realloc_code (cfg
, *max_len
);
489 code
= emit_big_add (code
, ARMREG_LR
, var
->inst_basereg
, var
->inst_offset
);
490 ARM_FLDS (code
, fad
->hreg
, ARMREG_LR
, 0);
492 ARM_FLDS (code
, fad
->hreg
, var
->inst_basereg
, var
->inst_offset
);
494 set_code_cursor (cfg
, code
);
495 *offset
= code
- cfg
->native_code
;
502 mono_arm_emit_vfp_scratch_save (MonoCompile
*cfg
, guint8
*code
, int reg
)
506 g_assert (reg
== vfp_scratch1
|| reg
== vfp_scratch2
);
508 inst
= cfg
->arch
.vfp_scratch_slots
[reg
== vfp_scratch1
? 0 : 1];
511 if (!arm_is_fpimm8 (inst
->inst_offset
)) {
512 code
= emit_big_add (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
513 ARM_FSTD (code
, reg
, ARMREG_LR
, 0);
515 ARM_FSTD (code
, reg
, inst
->inst_basereg
, inst
->inst_offset
);
522 mono_arm_emit_vfp_scratch_restore (MonoCompile
*cfg
, guint8
*code
, int reg
)
526 g_assert (reg
== vfp_scratch1
|| reg
== vfp_scratch2
);
528 inst
= cfg
->arch
.vfp_scratch_slots
[reg
== vfp_scratch1
? 0 : 1];
531 if (!arm_is_fpimm8 (inst
->inst_offset
)) {
532 code
= emit_big_add (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
533 ARM_FLDD (code
, reg
, ARMREG_LR
, 0);
535 ARM_FLDD (code
, reg
, inst
->inst_basereg
, inst
->inst_offset
);
544 * Emit code to pop an LMF structure from the LMF stack.
547 emit_restore_lmf (MonoCompile
*cfg
, guint8
*code
, gint32 lmf_offset
)
551 if (lmf_offset
< 32) {
552 basereg
= cfg
->frame_reg
;
557 code
= emit_big_add (code
, ARMREG_R2
, cfg
->frame_reg
, lmf_offset
);
560 /* ip = previous_lmf */
561 ARM_LDR_IMM (code
, ARMREG_IP
, basereg
, offset
+ MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
563 ARM_LDR_IMM (code
, ARMREG_LR
, basereg
, offset
+ MONO_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
564 /* *(lmf_addr) = previous_lmf */
565 ARM_STR_IMM (code
, ARMREG_IP
, ARMREG_LR
, MONO_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
570 #endif /* #ifndef DISABLE_JIT */
573 * mono_arch_get_argument_info:
574 * @csig: a method signature
575 * @param_count: the number of parameters to consider
576 * @arg_info: an array to store the result infos
578 * Gathers information on parameters such as size, alignment and
579 * padding. arg_info should be large enought to hold param_count + 1 entries.
581 * Returns the size of the activation frame.
584 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
586 int k
, frame_size
= 0;
587 guint32 size
, align
, pad
;
591 t
= mini_get_underlying_type (csig
->ret
);
592 if (MONO_TYPE_ISSTRUCT (t
)) {
593 frame_size
+= sizeof (target_mgreg_t
);
597 arg_info
[0].offset
= offset
;
600 frame_size
+= sizeof (target_mgreg_t
);
604 arg_info
[0].size
= frame_size
;
606 for (k
= 0; k
< param_count
; k
++) {
607 size
= mini_type_stack_size_full (csig
->params
[k
], &align
, csig
->pinvoke
);
609 /* ignore alignment for now */
612 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
613 arg_info
[k
].pad
= pad
;
615 arg_info
[k
+ 1].pad
= 0;
616 arg_info
[k
+ 1].size
= size
;
618 arg_info
[k
+ 1].offset
= offset
;
622 align
= MONO_ARCH_FRAME_ALIGNMENT
;
623 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
624 arg_info
[k
].pad
= pad
;
629 #define MAX_ARCH_DELEGATE_PARAMS 3
632 get_delegate_invoke_impl (MonoTrampInfo
**info
, gboolean has_target
, gboolean param_count
)
634 guint8
*code
, *start
;
635 GSList
*unwind_ops
= mono_arch_get_cie_program ();
638 start
= code
= mono_global_codeman_reserve (12);
640 /* Replace the this argument with the target */
641 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
642 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, target
));
643 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
645 g_assert ((code
- start
) <= 12);
647 mono_arch_flush_icache (start
, 12);
648 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
));
652 size
= 8 + param_count
* 4;
653 start
= code
= mono_global_codeman_reserve (size
);
655 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, MONO_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
656 /* slide down the arguments */
657 for (i
= 0; i
< param_count
; ++i
) {
658 ARM_MOV_REG_REG (code
, (ARMREG_R0
+ i
), (ARMREG_R0
+ i
+ 1));
660 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
662 g_assert ((code
- start
) <= size
);
664 mono_arch_flush_icache (start
, size
);
665 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
));
669 *info
= mono_tramp_info_create ("delegate_invoke_impl_has_target", start
, code
- start
, NULL
, unwind_ops
);
671 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", param_count
);
672 *info
= mono_tramp_info_create (name
, start
, code
- start
, NULL
, unwind_ops
);
676 MONO_PROFILER_RAISE (jit_code_buffer
, (start
, code
- start
, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE
, NULL
));
682 * mono_arch_get_delegate_invoke_impls:
684 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
688 mono_arch_get_delegate_invoke_impls (void)
694 get_delegate_invoke_impl (&info
, TRUE
, 0);
695 res
= g_slist_prepend (res
, info
);
697 for (i
= 0; i
<= MAX_ARCH_DELEGATE_PARAMS
; ++i
) {
698 get_delegate_invoke_impl (&info
, FALSE
, i
);
699 res
= g_slist_prepend (res
, info
);
706 mono_arch_get_delegate_invoke_impl (MonoMethodSignature
*sig
, gboolean has_target
)
708 guint8
*code
, *start
;
711 /* FIXME: Support more cases */
712 sig_ret
= mini_get_underlying_type (sig
->ret
);
713 if (MONO_TYPE_ISSTRUCT (sig_ret
))
717 static guint8
* cached
= NULL
;
718 mono_mini_arch_lock ();
720 mono_mini_arch_unlock ();
724 if (mono_ee_features
.use_aot_trampolines
) {
725 start
= (guint8
*)mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
728 start
= get_delegate_invoke_impl (&info
, TRUE
, 0);
729 mono_tramp_info_register (info
, NULL
);
732 mono_mini_arch_unlock ();
735 static guint8
* cache
[MAX_ARCH_DELEGATE_PARAMS
+ 1] = {NULL
};
738 if (sig
->param_count
> MAX_ARCH_DELEGATE_PARAMS
)
740 for (i
= 0; i
< sig
->param_count
; ++i
)
741 if (!mono_is_regsize_var (sig
->params
[i
]))
744 mono_mini_arch_lock ();
745 code
= cache
[sig
->param_count
];
747 mono_mini_arch_unlock ();
751 if (mono_ee_features
.use_aot_trampolines
) {
752 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", sig
->param_count
);
753 start
= (guint8
*)mono_aot_get_trampoline (name
);
757 start
= get_delegate_invoke_impl (&info
, FALSE
, sig
->param_count
);
758 mono_tramp_info_register (info
, NULL
);
760 cache
[sig
->param_count
] = start
;
761 mono_mini_arch_unlock ();
769 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature
*sig
, MonoMethod
*method
, int offset
, gboolean load_imt_reg
)
775 mono_arch_get_this_arg_from_call (host_mgreg_t
*regs
, guint8
*code
)
777 return (gpointer
)regs
[ARMREG_R0
];
781 * Initialize the cpu to execute managed code.
784 mono_arch_cpu_init (void)
786 i8_align
= MONO_ABI_ALIGNOF (gint64
);
787 #ifdef MONO_CROSS_COMPILE
788 /* Need to set the alignment of i8 since it can different on the target */
789 #ifdef TARGET_ANDROID
791 mono_type_set_alignment (MONO_TYPE_I8
, i8_align
);
797 * Initialize architecture specific code.
800 mono_arch_init (void)
804 #ifdef TARGET_WATCHOS
805 mini_get_debug_options ()->soft_breakpoints
= TRUE
;
808 mono_os_mutex_init_recursive (&mini_arch_mutex
);
809 if (mini_get_debug_options ()->soft_breakpoints
) {
811 breakpoint_tramp
= mini_get_breakpoint_trampoline ();
813 ss_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
|MONO_MMAP_32BIT
, MONO_MEM_ACCOUNT_OTHER
);
814 bp_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
|MONO_MMAP_32BIT
, MONO_MEM_ACCOUNT_OTHER
);
815 mono_mprotect (bp_trigger_page
, mono_pagesize (), 0);
818 #if defined(__ARM_EABI__)
819 eabi_supported
= TRUE
;
822 #if defined(ARM_FPU_VFP_HARD)
823 arm_fpu
= MONO_ARM_FPU_VFP_HARD
;
825 arm_fpu
= MONO_ARM_FPU_VFP
;
827 #if defined(ARM_FPU_NONE) && !defined(TARGET_IOS)
829 * If we're compiling with a soft float fallback and it
830 * turns out that no VFP unit is available, we need to
831 * switch to soft float. We don't do this for iOS, since
832 * iOS devices always have a VFP unit.
834 if (!mono_hwcap_arm_has_vfp
)
835 arm_fpu
= MONO_ARM_FPU_NONE
;
838 * This environment variable can be useful in testing
839 * environments to make sure the soft float fallback
840 * works. Most ARM devices have VFP units these days, so
841 * normally soft float code would not be exercised much.
843 char *soft
= g_getenv ("MONO_ARM_FORCE_SOFT_FLOAT");
845 if (soft
&& !strncmp (soft
, "1", 1))
846 arm_fpu
= MONO_ARM_FPU_NONE
;
851 v5_supported
= mono_hwcap_arm_is_v5
;
852 v6_supported
= mono_hwcap_arm_is_v6
;
853 v7_supported
= mono_hwcap_arm_is_v7
;
856 * On weird devices, the hwcap code may fail to detect
857 * the ARM version. In that case, we can at least safely
858 * assume the version the runtime was compiled for.
870 #if defined(TARGET_IOS)
871 /* iOS is special-cased here because we don't yet
872 have a way to properly detect CPU features on it. */
873 thumb_supported
= TRUE
;
876 thumb_supported
= mono_hwcap_arm_has_thumb
;
877 thumb2_supported
= mono_hwcap_arm_has_thumb2
;
880 /* Format: armv(5|6|7[s])[-thumb[2]] */
881 cpu_arch
= g_getenv ("MONO_CPU_ARCH");
883 /* Do this here so it overrides any detection. */
885 if (strncmp (cpu_arch
, "armv", 4) == 0) {
886 v5_supported
= cpu_arch
[4] >= '5';
887 v6_supported
= cpu_arch
[4] >= '6';
888 v7_supported
= cpu_arch
[4] >= '7';
889 v7s_supported
= strncmp (cpu_arch
, "armv7s", 6) == 0;
890 v7k_supported
= strncmp (cpu_arch
, "armv7k", 6) == 0;
893 thumb_supported
= strstr (cpu_arch
, "thumb") != NULL
;
894 thumb2_supported
= strstr (cpu_arch
, "thumb2") != NULL
;
900 * Cleanup architecture specific code.
903 mono_arch_cleanup (void)
908 * This function returns the optimizations supported on this cpu.
911 mono_arch_cpu_optimizations (guint32
*exclude_mask
)
913 /* no arm-specific optimizations yet */
919 * This function test for all SIMD functions supported.
921 * Returns a bitmask corresponding to all supported versions.
925 mono_arch_cpu_enumerate_simd_versions (void)
927 /* SIMD is currently unimplemented */
932 mono_arm_is_hard_float (void)
934 return arm_fpu
== MONO_ARM_FPU_VFP_HARD
;
940 mono_arch_opcode_needs_emulation (MonoCompile
*cfg
, int opcode
)
942 if (v7s_supported
|| v7k_supported
) {
956 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
958 mono_arch_is_soft_float (void)
960 return arm_fpu
== MONO_ARM_FPU_NONE
;
965 is_regsize_var (MonoType
*t
)
969 t
= mini_get_underlying_type (t
);
976 case MONO_TYPE_FNPTR
:
978 case MONO_TYPE_OBJECT
:
980 case MONO_TYPE_GENERICINST
:
981 if (!mono_type_generic_inst_is_valuetype (t
))
984 case MONO_TYPE_VALUETYPE
:
991 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
996 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
997 MonoInst
*ins
= cfg
->varinfo
[i
];
998 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
1001 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
1004 if (ins
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
) || (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
1007 /* we can only allocate 32 bit values */
1008 if (is_regsize_var (ins
->inst_vtype
)) {
1009 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
1010 g_assert (i
== vmv
->idx
);
1011 vars
= mono_varlist_insert_sorted (cfg
, vars
, vmv
, FALSE
);
1019 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
1023 mono_arch_compute_omit_fp (cfg
);
1026 * FIXME: Interface calls might go through a static rgctx trampoline which
1027 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1030 if (cfg
->flags
& MONO_CFG_HAS_CALLS
)
1031 cfg
->uses_rgctx_reg
= TRUE
;
1033 if (cfg
->arch
.omit_fp
)
1034 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_FP
));
1035 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V1
));
1036 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V2
));
1037 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V3
));
1039 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1040 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V7
));
1042 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V4
));
1043 if (!(cfg
->compile_aot
|| cfg
->uses_rgctx_reg
|| COMPILE_LLVM (cfg
)))
1044 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1045 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V5
));
1046 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1047 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1053 * mono_arch_regalloc_cost:
1055 * Return the cost, in number of memory references, of the action of
1056 * allocating the variable VMV into a register during global register
1060 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
1066 #endif /* #ifndef DISABLE_JIT */
1069 mono_arch_flush_icache (guint8
*code
, gint size
)
1071 #if defined(MONO_CROSS_COMPILE)
1073 sys_icache_invalidate (code
, size
);
1075 __builtin___clear_cache ((char*)code
, (char*)code
+ size
);
1082 add_general (guint
*gr
, guint
*stack_size
, ArgInfo
*ainfo
, gboolean simple
)
1085 if (*gr
> ARMREG_R3
) {
1087 ainfo
->offset
= *stack_size
;
1088 ainfo
->reg
= ARMREG_SP
; /* in the caller */
1089 ainfo
->storage
= RegTypeBase
;
1092 ainfo
->storage
= RegTypeGeneral
;
1099 split
= i8_align
== 4;
1104 if (*gr
== ARMREG_R3
&& split
) {
1105 /* first word in r3 and the second on the stack */
1106 ainfo
->offset
= *stack_size
;
1107 ainfo
->reg
= ARMREG_SP
; /* in the caller */
1108 ainfo
->storage
= RegTypeBaseGen
;
1110 } else if (*gr
>= ARMREG_R3
) {
1111 if (eabi_supported
) {
1112 /* darwin aligns longs to 4 byte only */
1113 if (i8_align
== 8) {
1118 ainfo
->offset
= *stack_size
;
1119 ainfo
->reg
= ARMREG_SP
; /* in the caller */
1120 ainfo
->storage
= RegTypeBase
;
1123 if (eabi_supported
) {
1124 if (i8_align
== 8 && ((*gr
) & 1))
1127 ainfo
->storage
= RegTypeIRegPair
;
1136 add_float (guint
*fpr
, guint
*stack_size
, ArgInfo
*ainfo
, gboolean is_double
, gint
*float_spare
)
1139 * If we're calling a function like this:
1141 * void foo(float a, double b, float c)
1143 * We pass a in s0 and b in d1. That leaves us
1144 * with s1 being unused. The armhf ABI recognizes
1145 * this and requires register assignment to then
1146 * use that for the next single-precision arg,
1147 * i.e. c in this example. So float_spare either
1148 * tells us which reg to use for the next single-
1149 * precision arg, or it's -1, meaning use *fpr.
1151 * Note that even though most of the JIT speaks
1152 * double-precision, fpr represents single-
1153 * precision registers.
1155 * See parts 5.5 and 6.1.2 of the AAPCS for how
1159 if (*fpr
< ARM_VFP_F16
|| (!is_double
&& *float_spare
>= 0)) {
1160 ainfo
->storage
= RegTypeFP
;
1164 * If we're passing a double-precision value
1165 * and *fpr is odd (e.g. it's s1, s3, ...)
1166 * we need to use the next even register. So
1167 * we mark the current *fpr as a spare that
1168 * can be used for the next single-precision
1172 *float_spare
= *fpr
;
1177 * At this point, we have an even register
1178 * so we assign that and move along.
1182 } else if (*float_spare
>= 0) {
1184 * We're passing a single-precision value
1185 * and it looks like a spare single-
1186 * precision register is available. Let's
1190 ainfo
->reg
= *float_spare
;
1194 * If we hit this branch, we're passing a
1195 * single-precision value and we can simply
1196 * use the next available register.
1204 * We've exhausted available floating point
1205 * regs, so pass the rest on the stack.
1213 ainfo
->offset
= *stack_size
;
1214 ainfo
->reg
= ARMREG_SP
;
1215 ainfo
->storage
= RegTypeBase
;
1222 is_hfa (MonoType
*t
, int *out_nfields
, int *out_esize
)
1226 MonoClassField
*field
;
1227 MonoType
*ftype
, *prev_ftype
= NULL
;
1230 klass
= mono_class_from_mono_type_internal (t
);
1232 while ((field
= mono_class_get_fields_internal (klass
, &iter
))) {
1233 if (field
->type
->attrs
& FIELD_ATTRIBUTE_STATIC
)
1235 ftype
= mono_field_get_type_internal (field
);
1236 ftype
= mini_get_underlying_type (ftype
);
1238 if (MONO_TYPE_ISSTRUCT (ftype
)) {
1239 int nested_nfields
, nested_esize
;
1241 if (!is_hfa (ftype
, &nested_nfields
, &nested_esize
))
1243 if (nested_esize
== 4)
1244 ftype
= m_class_get_byval_arg (mono_defaults
.single_class
);
1246 ftype
= m_class_get_byval_arg (mono_defaults
.double_class
);
1247 if (prev_ftype
&& prev_ftype
->type
!= ftype
->type
)
1250 nfields
+= nested_nfields
;
1252 if (!(!ftype
->byref
&& (ftype
->type
== MONO_TYPE_R4
|| ftype
->type
== MONO_TYPE_R8
)))
1254 if (prev_ftype
&& prev_ftype
->type
!= ftype
->type
)
1260 if (nfields
== 0 || nfields
> 4)
1262 *out_nfields
= nfields
;
1263 *out_esize
= prev_ftype
->type
== MONO_TYPE_R4
? 4 : 8;
1268 get_call_info (MonoMemPool
*mp
, MonoMethodSignature
*sig
)
1270 guint i
, gr
, fpr
, pstart
;
1272 int n
= sig
->hasthis
+ sig
->param_count
;
1276 guint32 stack_size
= 0;
1278 gboolean is_pinvoke
= sig
->pinvoke
;
1279 gboolean vtype_retaddr
= FALSE
;
1282 cinfo
= mono_mempool_alloc0 (mp
, sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
1284 cinfo
= g_malloc0 (sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
1291 t
= mini_get_underlying_type (sig
->ret
);
1302 case MONO_TYPE_FNPTR
:
1303 case MONO_TYPE_OBJECT
:
1304 cinfo
->ret
.storage
= RegTypeGeneral
;
1305 cinfo
->ret
.reg
= ARMREG_R0
;
1309 cinfo
->ret
.storage
= RegTypeIRegPair
;
1310 cinfo
->ret
.reg
= ARMREG_R0
;
1314 cinfo
->ret
.storage
= RegTypeFP
;
1316 if (t
->type
== MONO_TYPE_R4
)
1317 cinfo
->ret
.size
= 4;
1319 cinfo
->ret
.size
= 8;
1321 if (IS_HARD_FLOAT
) {
1322 cinfo
->ret
.reg
= ARM_VFP_F0
;
1324 cinfo
->ret
.reg
= ARMREG_R0
;
1327 case MONO_TYPE_GENERICINST
:
1328 if (!mono_type_generic_inst_is_valuetype (t
)) {
1329 cinfo
->ret
.storage
= RegTypeGeneral
;
1330 cinfo
->ret
.reg
= ARMREG_R0
;
1333 if (mini_is_gsharedvt_variable_type (t
)) {
1334 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1338 case MONO_TYPE_VALUETYPE
:
1339 case MONO_TYPE_TYPEDBYREF
:
1340 if (IS_HARD_FLOAT
&& sig
->pinvoke
&& is_hfa (t
, &nfields
, &esize
)) {
1341 cinfo
->ret
.storage
= RegTypeHFA
;
1343 cinfo
->ret
.nregs
= nfields
;
1344 cinfo
->ret
.esize
= esize
;
1347 int native_size
= mono_class_native_size (mono_class_from_mono_type_internal (t
), &align
);
1350 #ifdef TARGET_WATCHOS
1355 if (native_size
<= max_size
) {
1356 cinfo
->ret
.storage
= RegTypeStructByVal
;
1357 cinfo
->ret
.struct_size
= native_size
;
1358 cinfo
->ret
.nregs
= ALIGN_TO (native_size
, 4) / 4;
1360 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1363 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1368 case MONO_TYPE_MVAR
:
1369 g_assert (mini_is_gsharedvt_type (t
));
1370 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1372 case MONO_TYPE_VOID
:
1375 g_error ("Can't handle as return value 0x%x", sig
->ret
->type
);
1378 vtype_retaddr
= cinfo
->ret
.storage
== RegTypeStructByAddr
;
1383 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1384 * the first argument, allowing 'this' to be always passed in the first arg reg.
1385 * Also do this if the first argument is a reference type, since virtual calls
1386 * are sometimes made using calli without sig->hasthis set, like in the delegate
1389 if (vtype_retaddr
&& !is_pinvoke
&& (sig
->hasthis
|| (sig
->param_count
> 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig
->params
[0]))))) {
1391 add_general (&gr
, &stack_size
, cinfo
->args
+ 0, TRUE
);
1393 add_general (&gr
, &stack_size
, &cinfo
->args
[sig
->hasthis
+ 0], TRUE
);
1397 cinfo
->ret
.reg
= gr
;
1399 cinfo
->vret_arg_index
= 1;
1403 add_general (&gr
, &stack_size
, cinfo
->args
+ 0, TRUE
);
1406 if (vtype_retaddr
) {
1407 cinfo
->ret
.reg
= gr
;
1412 DEBUG(g_print("params: %d\n", sig
->param_count
));
1413 for (i
= pstart
; i
< sig
->param_count
; ++i
) {
1414 ArgInfo
*ainfo
= &cinfo
->args
[n
];
1416 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1417 /* Prevent implicit arguments and sig_cookie from
1418 being passed in registers */
1421 /* Emit the signature cookie just before the implicit arguments */
1422 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
1424 DEBUG(g_print("param %d: ", i
));
1425 if (sig
->params
[i
]->byref
) {
1426 DEBUG(g_print("byref\n"));
1427 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1431 t
= mini_get_underlying_type (sig
->params
[i
]);
1435 cinfo
->args
[n
].size
= 1;
1436 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1440 cinfo
->args
[n
].size
= 2;
1441 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1445 cinfo
->args
[n
].size
= 4;
1446 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1451 case MONO_TYPE_FNPTR
:
1452 case MONO_TYPE_OBJECT
:
1453 cinfo
->args
[n
].size
= sizeof (target_mgreg_t
);
1454 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1456 case MONO_TYPE_GENERICINST
:
1457 if (!mono_type_generic_inst_is_valuetype (t
)) {
1458 cinfo
->args
[n
].size
= sizeof (target_mgreg_t
);
1459 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1462 if (mini_is_gsharedvt_variable_type (t
)) {
1463 /* gsharedvt arguments are passed by ref */
1464 g_assert (mini_is_gsharedvt_type (t
));
1465 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1466 switch (ainfo
->storage
) {
1467 case RegTypeGeneral
:
1468 ainfo
->storage
= RegTypeGSharedVtInReg
;
1471 ainfo
->storage
= RegTypeGSharedVtOnStack
;
1474 g_assert_not_reached ();
1479 case MONO_TYPE_TYPEDBYREF
:
1480 case MONO_TYPE_VALUETYPE
: {
1483 int nwords
, nfields
, esize
;
1486 if (IS_HARD_FLOAT
&& sig
->pinvoke
&& is_hfa (t
, &nfields
, &esize
)) {
1487 if (fpr
+ nfields
< ARM_VFP_F16
) {
1488 ainfo
->storage
= RegTypeHFA
;
1490 ainfo
->nregs
= nfields
;
1491 ainfo
->esize
= esize
;
1502 if (t
->type
== MONO_TYPE_TYPEDBYREF
) {
1503 size
= MONO_ABI_SIZEOF (MonoTypedRef
);
1504 align
= sizeof (target_mgreg_t
);
1506 MonoClass
*klass
= mono_class_from_mono_type_internal (sig
->params
[i
]);
1508 size
= mono_class_native_size (klass
, &align
);
1510 size
= mini_type_stack_size_full (t
, &align
, FALSE
);
1512 DEBUG(g_print ("load %d bytes struct\n", size
));
1514 #ifdef TARGET_WATCHOS
1515 /* Watchos pass large structures by ref */
1516 /* We only do this for pinvoke to make gsharedvt/dyncall simpler */
1517 if (sig
->pinvoke
&& size
> 16) {
1518 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1519 switch (ainfo
->storage
) {
1520 case RegTypeGeneral
:
1521 ainfo
->storage
= RegTypeStructByAddr
;
1524 ainfo
->storage
= RegTypeStructByAddrOnStack
;
1527 g_assert_not_reached ();
1536 align_size
+= (sizeof (target_mgreg_t
) - 1);
1537 align_size
&= ~(sizeof (target_mgreg_t
) - 1);
1538 nwords
= (align_size
+ sizeof (target_mgreg_t
) -1 ) / sizeof (target_mgreg_t
);
1539 ainfo
->storage
= RegTypeStructByVal
;
1540 ainfo
->struct_size
= size
;
1541 ainfo
->align
= align
;
1543 if (eabi_supported
) {
1544 if (align
>= 8 && (gr
& 1))
1547 if (gr
> ARMREG_R3
) {
1549 ainfo
->vtsize
= nwords
;
1551 int rest
= ARMREG_R3
- gr
+ 1;
1552 int n_in_regs
= rest
>= nwords
? nwords
: rest
;
1554 ainfo
->size
= n_in_regs
;
1555 ainfo
->vtsize
= nwords
- n_in_regs
;
1558 nwords
-= n_in_regs
;
1560 stack_size
= ALIGN_TO (stack_size
, align
);
1562 ainfo
->offset
= stack_size
;
1563 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1564 stack_size
+= nwords
* sizeof (target_mgreg_t
);
1570 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
1576 add_float (&fpr
, &stack_size
, ainfo
, FALSE
, &float_spare
);
1578 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1584 add_float (&fpr
, &stack_size
, ainfo
, TRUE
, &float_spare
);
1586 add_general (&gr
, &stack_size
, ainfo
, FALSE
);
1589 case MONO_TYPE_MVAR
:
1590 /* gsharedvt arguments are passed by ref */
1591 g_assert (mini_is_gsharedvt_type (t
));
1592 add_general (&gr
, &stack_size
, ainfo
, TRUE
);
1593 switch (ainfo
->storage
) {
1594 case RegTypeGeneral
:
1595 ainfo
->storage
= RegTypeGSharedVtInReg
;
1598 ainfo
->storage
= RegTypeGSharedVtOnStack
;
1601 g_assert_not_reached ();
1605 g_error ("Can't handle 0x%x", sig
->params
[i
]->type
);
1610 /* Handle the case where there are no implicit arguments */
1611 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1612 /* Prevent implicit arguments and sig_cookie from
1613 being passed in registers */
1616 /* Emit the signature cookie just before the implicit arguments */
1617 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
1620 DEBUG (g_print (" stack size: %d (%d)\n", (stack_size
+ 15) & ~15, stack_size
));
1621 stack_size
= ALIGN_TO (stack_size
, MONO_ARCH_FRAME_ALIGNMENT
);
1623 cinfo
->stack_usage
= stack_size
;
1628 * We need to create a temporary value if the argument is not stored in
1629 * a linear memory range in the ccontext (this normally happens for
1630 * value types if they are passed both by stack and regs).
1633 arg_need_temp (ArgInfo
*ainfo
)
1635 if (ainfo
->storage
== RegTypeStructByVal
&& ainfo
->vtsize
)
1636 return ainfo
->struct_size
;
1641 arg_get_storage (CallContext
*ccontext
, ArgInfo
*ainfo
)
1643 switch (ainfo
->storage
) {
1644 case RegTypeIRegPair
:
1645 case RegTypeGeneral
:
1646 case RegTypeStructByVal
:
1647 return &ccontext
->gregs
[ainfo
->reg
];
1650 return &ccontext
->fregs
[ainfo
->reg
];
1652 return ccontext
->stack
+ ainfo
->offset
;
1654 g_error ("Arg storage type not yet supported");
1659 arg_get_val (CallContext
*ccontext
, ArgInfo
*ainfo
, gpointer dest
)
1661 int reg_size
= ainfo
->size
* sizeof (host_mgreg_t
);
1662 g_assert (arg_need_temp (ainfo
));
1663 memcpy (dest
, &ccontext
->gregs
[ainfo
->reg
], reg_size
);
1664 memcpy ((host_mgreg_t
*)dest
+ ainfo
->size
, ccontext
->stack
+ ainfo
->offset
, ainfo
->struct_size
- reg_size
);
1668 arg_set_val (CallContext
*ccontext
, ArgInfo
*ainfo
, gpointer src
)
1670 int reg_size
= ainfo
->size
* sizeof (host_mgreg_t
);
1671 g_assert (arg_need_temp (ainfo
));
1672 memcpy (&ccontext
->gregs
[ainfo
->reg
], src
, reg_size
);
1673 memcpy (ccontext
->stack
+ ainfo
->offset
, (host_mgreg_t
*)src
+ ainfo
->size
, ainfo
->struct_size
- reg_size
);
1676 /* Set arguments in the ccontext (for i2n entry) */
1678 mono_arch_set_native_call_context_args (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1680 MonoEECallbacks
*interp_cb
= mini_get_interp_callbacks ();
1681 CallInfo
*cinfo
= get_call_info (NULL
, sig
);
1685 memset (ccontext
, 0, sizeof (CallContext
));
1687 ccontext
->stack_size
= ALIGN_TO (cinfo
->stack_usage
, MONO_ARCH_FRAME_ALIGNMENT
);
1688 if (ccontext
->stack_size
)
1689 ccontext
->stack
= (guint8
*)g_calloc (1, ccontext
->stack_size
);
1691 if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
1692 ainfo
= &cinfo
->ret
;
1693 if (ainfo
->storage
== RegTypeStructByAddr
) {
1694 storage
= interp_cb
->frame_arg_to_storage ((MonoInterpFrameHandle
)frame
, sig
, -1);
1695 ccontext
->gregs
[cinfo
->ret
.reg
] = (host_mgreg_t
)(gsize
)storage
;
1699 g_assert (!sig
->hasthis
);
1701 for (int i
= 0; i
< sig
->param_count
; i
++) {
1702 ainfo
= &cinfo
->args
[i
];
1703 int temp_size
= arg_need_temp (ainfo
);
1706 storage
= alloca (temp_size
); // FIXME? alloca in a loop
1708 storage
= arg_get_storage (ccontext
, ainfo
);
1710 interp_cb
->frame_arg_to_data ((MonoInterpFrameHandle
)frame
, sig
, i
, storage
);
1712 arg_set_val (ccontext
, ainfo
, storage
);
1718 /* Set return value in the ccontext (for n2i return) */
1720 mono_arch_set_native_call_context_ret (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1722 MonoEECallbacks
*interp_cb
;
1727 if (sig
->ret
->type
== MONO_TYPE_VOID
)
1730 interp_cb
= mini_get_interp_callbacks ();
1731 cinfo
= get_call_info (NULL
, sig
);
1732 ainfo
= &cinfo
->ret
;
1734 if (ainfo
->storage
!= RegTypeStructByAddr
) {
1735 g_assert (!arg_need_temp (ainfo
));
1736 storage
= arg_get_storage (ccontext
, ainfo
);
1737 memset (ccontext
, 0, sizeof (CallContext
)); // FIXME
1738 interp_cb
->frame_arg_to_data ((MonoInterpFrameHandle
)frame
, sig
, -1, storage
);
1744 /* Gets the arguments from ccontext (for n2i entry) */
1746 mono_arch_get_native_call_context_args (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1748 MonoEECallbacks
*interp_cb
= mini_get_interp_callbacks ();
1749 CallInfo
*cinfo
= get_call_info (NULL
, sig
);
1753 if (sig
->ret
->type
!= MONO_TYPE_VOID
) {
1754 ainfo
= &cinfo
->ret
;
1755 if (ainfo
->storage
== RegTypeStructByAddr
) {
1756 storage
= (gpointer
)(gsize
)ccontext
->gregs
[cinfo
->ret
.reg
];
1757 interp_cb
->frame_arg_set_storage ((MonoInterpFrameHandle
)frame
, sig
, -1, storage
);
1761 for (int i
= 0; i
< sig
->param_count
+ sig
->hasthis
; i
++) {
1762 ainfo
= &cinfo
->args
[i
];
1763 int temp_size
= arg_need_temp (ainfo
);
1766 storage
= alloca (temp_size
); // FIXME? alloca in a loop
1767 arg_get_val (ccontext
, ainfo
, storage
);
1769 storage
= arg_get_storage (ccontext
, ainfo
);
1771 interp_cb
->data_to_frame_arg ((MonoInterpFrameHandle
)frame
, sig
, i
, storage
);
1777 /* Gets the return value from ccontext (for i2n exit) */
1779 mono_arch_get_native_call_context_ret (CallContext
*ccontext
, gpointer frame
, MonoMethodSignature
*sig
)
1781 MonoEECallbacks
*interp_cb
;
1786 if (sig
->ret
->type
== MONO_TYPE_VOID
)
1789 interp_cb
= mini_get_interp_callbacks ();
1790 cinfo
= get_call_info (NULL
, sig
);
1791 ainfo
= &cinfo
->ret
;
1793 if (ainfo
->storage
!= RegTypeStructByAddr
) {
1794 g_assert (!arg_need_temp (ainfo
));
1795 storage
= arg_get_storage (ccontext
, ainfo
);
1796 interp_cb
->data_to_frame_arg ((MonoInterpFrameHandle
)frame
, sig
, -1, storage
);
1805 mono_arch_tailcall_supported (MonoCompile
*cfg
, MonoMethodSignature
*caller_sig
, MonoMethodSignature
*callee_sig
, gboolean virtual_
)
1807 g_assert (caller_sig
);
1808 g_assert (callee_sig
);
1810 CallInfo
*caller_info
= get_call_info (NULL
, caller_sig
);
1811 CallInfo
*callee_info
= get_call_info (NULL
, callee_sig
);
1814 * Tailcalls with more callee stack usage than the caller cannot be supported, since
1815 * the extra stack space would be left on the stack after the tailcall.
1817 gboolean res
= IS_SUPPORTED_TAILCALL (callee_info
->stack_usage
<= caller_info
->stack_usage
)
1818 && IS_SUPPORTED_TAILCALL (caller_info
->ret
.storage
== callee_info
->ret
.storage
);
1820 // FIXME The limit here is that moving the parameters requires addressing the parameters
1821 // with 12bit (4K) immediate offsets. - 4 for TAILCALL_REG/MEMBASE
1822 res
&= IS_SUPPORTED_TAILCALL (callee_info
->stack_usage
< (4096 - 4));
1823 res
&= IS_SUPPORTED_TAILCALL (caller_info
->stack_usage
< (4096 - 4));
1825 g_free (caller_info
);
1826 g_free (callee_info
);
1832 debug_omit_fp (void)
1835 return mono_debug_count ();
1842 * mono_arch_compute_omit_fp:
1843 * Determine whether the frame pointer can be eliminated.
1846 mono_arch_compute_omit_fp (MonoCompile
*cfg
)
1848 MonoMethodSignature
*sig
;
1849 MonoMethodHeader
*header
;
1853 if (cfg
->arch
.omit_fp_computed
)
1856 header
= cfg
->header
;
1858 sig
= mono_method_signature_internal (cfg
->method
);
1860 if (!cfg
->arch
.cinfo
)
1861 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
1862 cinfo
= cfg
->arch
.cinfo
;
1865 * FIXME: Remove some of the restrictions.
1867 cfg
->arch
.omit_fp
= TRUE
;
1868 cfg
->arch
.omit_fp_computed
= TRUE
;
1870 if (cfg
->disable_omit_fp
)
1871 cfg
->arch
.omit_fp
= FALSE
;
1872 if (!debug_omit_fp ())
1873 cfg
->arch
.omit_fp
= FALSE
;
1875 if (cfg->method->save_lmf)
1876 cfg->arch.omit_fp = FALSE;
1878 if (cfg
->flags
& MONO_CFG_HAS_ALLOCA
)
1879 cfg
->arch
.omit_fp
= FALSE
;
1880 if (header
->num_clauses
)
1881 cfg
->arch
.omit_fp
= FALSE
;
1882 if (cfg
->param_area
)
1883 cfg
->arch
.omit_fp
= FALSE
;
1884 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
))
1885 cfg
->arch
.omit_fp
= FALSE
;
1886 if ((mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
)))
1887 cfg
->arch
.omit_fp
= FALSE
;
1888 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
1889 ArgInfo
*ainfo
= &cinfo
->args
[i
];
1891 if (ainfo
->storage
== RegTypeBase
|| ainfo
->storage
== RegTypeBaseGen
|| ainfo
->storage
== RegTypeStructByVal
) {
1893 * The stack offset can only be determined when the frame
1896 cfg
->arch
.omit_fp
= FALSE
;
1901 for (i
= cfg
->locals_start
; i
< cfg
->num_varinfo
; i
++) {
1902 MonoInst
*ins
= cfg
->varinfo
[i
];
1905 locals_size
+= mono_type_size (ins
->inst_vtype
, &ialign
);
1910 * Set var information according to the calling convention. arm version.
1911 * The locals var stuff should most likely be split in another method.
1914 mono_arch_allocate_vars (MonoCompile
*cfg
)
1916 MonoMethodSignature
*sig
;
1917 MonoMethodHeader
*header
;
1920 int i
, offset
, size
, align
, curinst
;
1925 sig
= mono_method_signature_internal (cfg
->method
);
1927 if (!cfg
->arch
.cinfo
)
1928 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
1929 cinfo
= cfg
->arch
.cinfo
;
1930 sig_ret
= mini_get_underlying_type (sig
->ret
);
1932 mono_arch_compute_omit_fp (cfg
);
1934 if (cfg
->arch
.omit_fp
)
1935 cfg
->frame_reg
= ARMREG_SP
;
1937 cfg
->frame_reg
= ARMREG_FP
;
1939 cfg
->flags
|= MONO_CFG_HAS_SPILLUP
;
1941 /* allow room for the vararg method args: void* and long/double */
1942 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
))
1943 cfg
->param_area
= MAX (cfg
->param_area
, sizeof (target_mgreg_t
)*8);
1945 header
= cfg
->header
;
1947 /* See mono_arch_get_global_int_regs () */
1948 if (cfg
->flags
& MONO_CFG_HAS_CALLS
)
1949 cfg
->uses_rgctx_reg
= TRUE
;
1951 if (cfg
->frame_reg
!= ARMREG_SP
)
1952 cfg
->used_int_regs
|= 1 << cfg
->frame_reg
;
1954 if (cfg
->compile_aot
|| cfg
->uses_rgctx_reg
|| COMPILE_LLVM (cfg
))
1955 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1956 cfg
->used_int_regs
|= (1 << MONO_ARCH_IMT_REG
);
1960 if (!MONO_TYPE_ISSTRUCT (sig_ret
) && cinfo
->ret
.storage
!= RegTypeStructByAddr
) {
1961 if (sig_ret
->type
!= MONO_TYPE_VOID
) {
1962 cfg
->ret
->opcode
= OP_REGVAR
;
1963 cfg
->ret
->inst_c0
= ARMREG_R0
;
1966 /* local vars are at a positive offset from the stack pointer */
1968 * also note that if the function uses alloca, we use FP
1969 * to point at the local variables.
1971 offset
= 0; /* linkage area */
1972 /* align the offset to 16 bytes: not sure this is needed here */
1974 //offset &= ~(8 - 1);
1976 /* add parameter area size for called functions */
1977 offset
+= cfg
->param_area
;
1980 if (cfg
->flags
& MONO_CFG_HAS_FPOUT
)
1983 /* allow room to save the return value */
1984 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
))
1987 switch (cinfo
->ret
.storage
) {
1988 case RegTypeStructByVal
:
1990 /* Allocate a local to hold the result, the epilog will copy it to the correct place */
1991 offset
= ALIGN_TO (offset
, 8);
1992 cfg
->ret
->opcode
= OP_REGOFFSET
;
1993 cfg
->ret
->inst_basereg
= cfg
->frame_reg
;
1994 cfg
->ret
->inst_offset
= offset
;
1995 if (cinfo
->ret
.storage
== RegTypeStructByVal
)
1996 offset
+= cinfo
->ret
.nregs
* sizeof (target_mgreg_t
);
2000 case RegTypeStructByAddr
:
2001 ins
= cfg
->vret_addr
;
2002 offset
+= sizeof (target_mgreg_t
) - 1;
2003 offset
&= ~(sizeof (target_mgreg_t
) - 1);
2004 ins
->inst_offset
= offset
;
2005 ins
->opcode
= OP_REGOFFSET
;
2006 ins
->inst_basereg
= cfg
->frame_reg
;
2007 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
2008 g_print ("vret_addr =");
2009 mono_print_ins (cfg
->vret_addr
);
2011 offset
+= sizeof (target_mgreg_t
);
2017 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
2018 if (cfg
->arch
.seq_point_info_var
) {
2021 ins
= cfg
->arch
.seq_point_info_var
;
2025 offset
+= align
- 1;
2026 offset
&= ~(align
- 1);
2027 ins
->opcode
= OP_REGOFFSET
;
2028 ins
->inst_basereg
= cfg
->frame_reg
;
2029 ins
->inst_offset
= offset
;
2032 if (cfg
->arch
.ss_trigger_page_var
) {
2035 ins
= cfg
->arch
.ss_trigger_page_var
;
2038 offset
+= align
- 1;
2039 offset
&= ~(align
- 1);
2040 ins
->opcode
= OP_REGOFFSET
;
2041 ins
->inst_basereg
= cfg
->frame_reg
;
2042 ins
->inst_offset
= offset
;
2046 if (cfg
->arch
.seq_point_ss_method_var
) {
2049 ins
= cfg
->arch
.seq_point_ss_method_var
;
2052 offset
+= align
- 1;
2053 offset
&= ~(align
- 1);
2054 ins
->opcode
= OP_REGOFFSET
;
2055 ins
->inst_basereg
= cfg
->frame_reg
;
2056 ins
->inst_offset
= offset
;
2059 if (cfg
->arch
.seq_point_bp_method_var
) {
2062 ins
= cfg
->arch
.seq_point_bp_method_var
;
2065 offset
+= align
- 1;
2066 offset
&= ~(align
- 1);
2067 ins
->opcode
= OP_REGOFFSET
;
2068 ins
->inst_basereg
= cfg
->frame_reg
;
2069 ins
->inst_offset
= offset
;
2073 if (cfg
->has_atomic_exchange_i4
|| cfg
->has_atomic_cas_i4
|| cfg
->has_atomic_add_i4
) {
2074 /* Allocate a temporary used by the atomic ops */
2078 /* Allocate a local slot to hold the sig cookie address */
2079 offset
+= align
- 1;
2080 offset
&= ~(align
- 1);
2081 cfg
->arch
.atomic_tmp_offset
= offset
;
2084 cfg
->arch
.atomic_tmp_offset
= -1;
2087 cfg
->locals_min_stack_offset
= offset
;
2089 curinst
= cfg
->locals_start
;
2090 for (i
= curinst
; i
< cfg
->num_varinfo
; ++i
) {
2093 ins
= cfg
->varinfo
[i
];
2094 if ((ins
->flags
& MONO_INST_IS_DEAD
) || ins
->opcode
== OP_REGVAR
|| ins
->opcode
== OP_REGOFFSET
)
2097 t
= ins
->inst_vtype
;
2098 if (cfg
->gsharedvt
&& mini_is_gsharedvt_variable_type (t
))
2101 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
2102 * pinvoke wrappers when they call functions returning structure */
2103 if (ins
->backend
.is_pinvoke
&& MONO_TYPE_ISSTRUCT (t
) && t
->type
!= MONO_TYPE_TYPEDBYREF
) {
2104 size
= mono_class_native_size (mono_class_from_mono_type_internal (t
), &ualign
);
2108 size
= mono_type_size (t
, &align
);
2110 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2111 * since it loads/stores misaligned words, which don't do the right thing.
2113 if (align
< 4 && size
>= 4)
2115 if (ALIGN_TO (offset
, align
) > ALIGN_TO (offset
, 4))
2116 mini_gc_set_slot_type_from_fp (cfg
, ALIGN_TO (offset
, 4), SLOT_NOREF
);
2117 offset
+= align
- 1;
2118 offset
&= ~(align
- 1);
2119 ins
->opcode
= OP_REGOFFSET
;
2120 ins
->inst_offset
= offset
;
2121 ins
->inst_basereg
= cfg
->frame_reg
;
2123 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2126 cfg
->locals_max_stack_offset
= offset
;
2130 ins
= cfg
->args
[curinst
];
2131 if (ins
->opcode
!= OP_REGVAR
) {
2132 ins
->opcode
= OP_REGOFFSET
;
2133 ins
->inst_basereg
= cfg
->frame_reg
;
2134 offset
+= sizeof (target_mgreg_t
) - 1;
2135 offset
&= ~(sizeof (target_mgreg_t
) - 1);
2136 ins
->inst_offset
= offset
;
2137 offset
+= sizeof (target_mgreg_t
);
2142 if (sig
->call_convention
== MONO_CALL_VARARG
) {
2146 /* Allocate a local slot to hold the sig cookie address */
2147 offset
+= align
- 1;
2148 offset
&= ~(align
- 1);
2149 cfg
->sig_cookie
= offset
;
2153 for (i
= 0; i
< sig
->param_count
; ++i
) {
2154 ainfo
= cinfo
->args
+ i
;
2156 ins
= cfg
->args
[curinst
];
2158 switch (ainfo
->storage
) {
2160 offset
= ALIGN_TO (offset
, 8);
2161 ins
->opcode
= OP_REGOFFSET
;
2162 ins
->inst_basereg
= cfg
->frame_reg
;
2163 /* These arguments are saved to the stack in the prolog */
2164 ins
->inst_offset
= offset
;
2165 if (cfg
->verbose_level
>= 2)
2166 g_print ("arg %d allocated to %s+0x%0x.\n", i
, mono_arch_regname (ins
->inst_basereg
), (int)ins
->inst_offset
);
2174 if (ins
->opcode
!= OP_REGVAR
) {
2175 ins
->opcode
= OP_REGOFFSET
;
2176 ins
->inst_basereg
= cfg
->frame_reg
;
2177 size
= mini_type_stack_size_full (sig
->params
[i
], &ualign
, sig
->pinvoke
);
2179 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2180 * since it loads/stores misaligned words, which don't do the right thing.
2182 if (align
< 4 && size
>= 4)
2184 /* The code in the prolog () stores words when storing vtypes received in a register */
2185 if (MONO_TYPE_ISSTRUCT (sig
->params
[i
]))
2187 if (ALIGN_TO (offset
, align
) > ALIGN_TO (offset
, 4))
2188 mini_gc_set_slot_type_from_fp (cfg
, ALIGN_TO (offset
, 4), SLOT_NOREF
);
2189 offset
+= align
- 1;
2190 offset
&= ~(align
- 1);
2191 ins
->inst_offset
= offset
;
2197 /* align the offset to 8 bytes */
2198 if (ALIGN_TO (offset
, 8) > ALIGN_TO (offset
, 4))
2199 mini_gc_set_slot_type_from_fp (cfg
, ALIGN_TO (offset
, 4), SLOT_NOREF
);
2204 cfg
->stack_offset
= offset
;
2208 mono_arch_create_vars (MonoCompile
*cfg
)
2210 MonoMethodSignature
*sig
;
2214 sig
= mono_method_signature_internal (cfg
->method
);
2216 if (!cfg
->arch
.cinfo
)
2217 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
);
2218 cinfo
= cfg
->arch
.cinfo
;
2220 if (IS_HARD_FLOAT
) {
2221 for (i
= 0; i
< 2; i
++) {
2222 MonoInst
*inst
= mono_compile_create_var (cfg
, m_class_get_byval_arg (mono_defaults
.double_class
), OP_LOCAL
);
2223 inst
->flags
|= MONO_INST_VOLATILE
;
2225 cfg
->arch
.vfp_scratch_slots
[i
] = inst
;
2229 if (cinfo
->ret
.storage
== RegTypeStructByVal
)
2230 cfg
->ret_var_is_local
= TRUE
;
2232 if (cinfo
->ret
.storage
== RegTypeStructByAddr
) {
2233 cfg
->vret_addr
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_ARG
);
2234 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
2235 g_print ("vret_addr = ");
2236 mono_print_ins (cfg
->vret_addr
);
2240 if (cfg
->gen_sdb_seq_points
) {
2241 if (cfg
->compile_aot
) {
2242 MonoInst
*ins
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2243 ins
->flags
|= MONO_INST_VOLATILE
;
2244 cfg
->arch
.seq_point_info_var
= ins
;
2246 if (!cfg
->soft_breakpoints
) {
2247 /* Allocate a separate variable for this to save 1 load per seq point */
2248 ins
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2249 ins
->flags
|= MONO_INST_VOLATILE
;
2250 cfg
->arch
.ss_trigger_page_var
= ins
;
2253 if (cfg
->soft_breakpoints
) {
2256 ins
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2257 ins
->flags
|= MONO_INST_VOLATILE
;
2258 cfg
->arch
.seq_point_ss_method_var
= ins
;
2260 ins
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2261 ins
->flags
|= MONO_INST_VOLATILE
;
2262 cfg
->arch
.seq_point_bp_method_var
= ins
;
2268 emit_sig_cookie (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
)
2270 MonoMethodSignature
*tmp_sig
;
2273 if (MONO_IS_TAILCALL_OPCODE (call
))
2276 g_assert (cinfo
->sig_cookie
.storage
== RegTypeBase
);
2279 * mono_ArgIterator_Setup assumes the signature cookie is
2280 * passed first and all the arguments which were before it are
2281 * passed on the stack after the signature. So compensate by
2282 * passing a different signature.
2284 tmp_sig
= mono_metadata_signature_dup (call
->signature
);
2285 tmp_sig
->param_count
-= call
->signature
->sentinelpos
;
2286 tmp_sig
->sentinelpos
= 0;
2287 memcpy (tmp_sig
->params
, call
->signature
->params
+ call
->signature
->sentinelpos
, tmp_sig
->param_count
* sizeof (MonoType
*));
2289 sig_reg
= mono_alloc_ireg (cfg
);
2290 MONO_EMIT_NEW_SIGNATURECONST (cfg
, sig_reg
, tmp_sig
);
2292 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, cinfo
->sig_cookie
.offset
, sig_reg
);
2297 mono_arch_get_llvm_call_info (MonoCompile
*cfg
, MonoMethodSignature
*sig
)
2302 LLVMCallInfo
*linfo
;
2304 n
= sig
->param_count
+ sig
->hasthis
;
2306 cinfo
= get_call_info (cfg
->mempool
, sig
);
2308 linfo
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (LLVMCallInfo
) + (sizeof (LLVMArgInfo
) * n
));
2311 * LLVM always uses the native ABI while we use our own ABI, the
2312 * only difference is the handling of vtypes:
2313 * - we only pass/receive them in registers in some cases, and only
2314 * in 1 or 2 integer registers.
2316 switch (cinfo
->ret
.storage
) {
2317 case RegTypeGeneral
:
2320 case RegTypeIRegPair
:
2322 case RegTypeStructByAddr
:
2324 linfo
->ret
.storage
= LLVMArgVtypeByRef
;
2326 /* Vtype returned using a hidden argument */
2327 linfo
->ret
.storage
= LLVMArgVtypeRetAddr
;
2328 linfo
->vret_arg_index
= cinfo
->vret_arg_index
;
2332 case RegTypeStructByVal
:
2333 /* LLVM models this by returning an int array */
2334 linfo
->ret
.storage
= LLVMArgAsIArgs
;
2335 linfo
->ret
.nslots
= cinfo
->ret
.nregs
;
2339 linfo
->ret
.storage
= LLVMArgFpStruct
;
2340 linfo
->ret
.nslots
= cinfo
->ret
.nregs
;
2341 linfo
->ret
.esize
= cinfo
->ret
.esize
;
2344 cfg
->exception_message
= g_strdup_printf ("unknown ret conv (%d)", cinfo
->ret
.storage
);
2345 cfg
->disable_llvm
= TRUE
;
2349 for (i
= 0; i
< n
; ++i
) {
2350 LLVMArgInfo
*lainfo
= &linfo
->args
[i
];
2351 ainfo
= cinfo
->args
+ i
;
2353 lainfo
->storage
= LLVMArgNone
;
2355 switch (ainfo
->storage
) {
2356 case RegTypeGeneral
:
2357 case RegTypeIRegPair
:
2359 case RegTypeBaseGen
:
2361 lainfo
->storage
= LLVMArgNormal
;
2363 case RegTypeStructByVal
: {
2364 lainfo
->storage
= LLVMArgAsIArgs
;
2365 int slotsize
= eabi_supported
&& ainfo
->align
== 8 ? 8 : 4;
2366 lainfo
->nslots
= ALIGN_TO (ainfo
->struct_size
, slotsize
) / slotsize
;
2367 lainfo
->esize
= slotsize
;
2370 case RegTypeStructByAddr
:
2371 case RegTypeStructByAddrOnStack
:
2372 lainfo
->storage
= LLVMArgVtypeByRef
;
2377 lainfo
->storage
= LLVMArgAsFpArgs
;
2378 lainfo
->nslots
= ainfo
->nregs
;
2379 lainfo
->esize
= ainfo
->esize
;
2380 for (j
= 0; j
< ainfo
->nregs
; ++j
)
2381 lainfo
->pair_storage
[j
] = LLVMArgInFPReg
;
2385 cfg
->exception_message
= g_strdup_printf ("ainfo->storage (%d)", ainfo
->storage
);
2386 cfg
->disable_llvm
= TRUE
;
2396 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
2399 MonoMethodSignature
*sig
;
2403 sig
= call
->signature
;
2404 n
= sig
->param_count
+ sig
->hasthis
;
2406 cinfo
= get_call_info (cfg
->mempool
, sig
);
2408 switch (cinfo
->ret
.storage
) {
2409 case RegTypeStructByVal
:
2411 if (cinfo
->ret
.storage
== RegTypeStructByVal
&& cinfo
->ret
.nregs
== 1) {
2412 /* The JIT will transform this into a normal call */
2413 call
->vret_in_reg
= TRUE
;
2416 if (MONO_IS_TAILCALL_OPCODE (call
))
2419 * The vtype is returned in registers, save the return area address in a local, and save the vtype into
2420 * the location pointed to by it after call in emit_move_return_value ().
2422 if (!cfg
->arch
.vret_addr_loc
) {
2423 cfg
->arch
.vret_addr_loc
= mono_compile_create_var (cfg
, mono_get_int_type (), OP_LOCAL
);
2424 /* Prevent it from being register allocated or optimized away */
2425 cfg
->arch
.vret_addr_loc
->flags
|= MONO_INST_VOLATILE
;
2428 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->arch
.vret_addr_loc
->dreg
, call
->vret_var
->dreg
);
2430 case RegTypeStructByAddr
: {
2432 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
2433 vtarg
->sreg1
= call
->vret_var
->dreg
;
2434 vtarg
->dreg
= mono_alloc_preg (cfg
);
2435 MONO_ADD_INS (cfg
->cbb
, vtarg
);
2437 mono_call_inst_add_outarg_reg (cfg
, call
, vtarg
->dreg
, cinfo
->ret
.reg
, FALSE
);
2444 for (i
= 0; i
< n
; ++i
) {
2445 ArgInfo
*ainfo
= cinfo
->args
+ i
;
2448 if (i
>= sig
->hasthis
)
2449 t
= sig
->params
[i
- sig
->hasthis
];
2451 t
= mono_get_int_type ();
2452 t
= mini_get_underlying_type (t
);
2454 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
2455 /* Emit the signature cookie just before the implicit arguments */
2456 emit_sig_cookie (cfg
, call
, cinfo
);
2459 in
= call
->args
[i
];
2461 switch (ainfo
->storage
) {
2462 case RegTypeGeneral
:
2463 case RegTypeIRegPair
:
2464 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
2465 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2466 ins
->dreg
= mono_alloc_ireg (cfg
);
2467 ins
->sreg1
= MONO_LVREG_LS (in
->dreg
);
2468 MONO_ADD_INS (cfg
->cbb
, ins
);
2469 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2471 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2472 ins
->dreg
= mono_alloc_ireg (cfg
);
2473 ins
->sreg1
= MONO_LVREG_MS (in
->dreg
);
2474 MONO_ADD_INS (cfg
->cbb
, ins
);
2475 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
2476 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R8
) || (t
->type
== MONO_TYPE_R4
))) {
2477 if (ainfo
->size
== 4) {
2478 if (IS_SOFT_FLOAT
) {
2479 /* mono_emit_call_args () have already done the r8->r4 conversion */
2480 /* The converted value is in an int vreg */
2481 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2482 ins
->dreg
= mono_alloc_ireg (cfg
);
2483 ins
->sreg1
= in
->dreg
;
2484 MONO_ADD_INS (cfg
->cbb
, ins
);
2485 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2489 cfg
->param_area
= MAX (cfg
->param_area
, 8);
2490 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
2491 creg
= mono_alloc_ireg (cfg
);
2492 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
2493 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
, FALSE
);
2496 if (IS_SOFT_FLOAT
) {
2497 MONO_INST_NEW (cfg
, ins
, OP_FGETLOW32
);
2498 ins
->dreg
= mono_alloc_ireg (cfg
);
2499 ins
->sreg1
= in
->dreg
;
2500 MONO_ADD_INS (cfg
->cbb
, ins
);
2501 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2503 MONO_INST_NEW (cfg
, ins
, OP_FGETHIGH32
);
2504 ins
->dreg
= mono_alloc_ireg (cfg
);
2505 ins
->sreg1
= in
->dreg
;
2506 MONO_ADD_INS (cfg
->cbb
, ins
);
2507 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
2511 cfg
->param_area
= MAX (cfg
->param_area
, 8);
2512 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
2513 creg
= mono_alloc_ireg (cfg
);
2514 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
2515 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
, FALSE
);
2516 creg
= mono_alloc_ireg (cfg
);
2517 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8 + 4));
2518 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
+ 1, FALSE
);
2521 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
2523 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2524 ins
->dreg
= mono_alloc_ireg (cfg
);
2525 ins
->sreg1
= in
->dreg
;
2526 MONO_ADD_INS (cfg
->cbb
, ins
);
2528 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
2531 case RegTypeStructByVal
:
2532 case RegTypeGSharedVtInReg
:
2533 case RegTypeGSharedVtOnStack
:
2535 case RegTypeStructByAddr
:
2536 case RegTypeStructByAddrOnStack
:
2537 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
2538 ins
->opcode
= OP_OUTARG_VT
;
2539 ins
->sreg1
= in
->dreg
;
2540 ins
->klass
= in
->klass
;
2541 ins
->inst_p0
= call
;
2542 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
2543 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
2544 mono_call_inst_add_outarg_vt (cfg
, call
, ins
);
2545 MONO_ADD_INS (cfg
->cbb
, ins
);
2548 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
2549 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2550 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R4
) || (t
->type
== MONO_TYPE_R8
))) {
2551 if (t
->type
== MONO_TYPE_R8
) {
2552 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2555 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2557 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2560 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
2563 case RegTypeBaseGen
:
2564 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
2565 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, (G_BYTE_ORDER
== G_BIG_ENDIAN
) ? MONO_LVREG_LS (in
->dreg
) : MONO_LVREG_MS (in
->dreg
));
2566 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
2567 ins
->dreg
= mono_alloc_ireg (cfg
);
2568 ins
->sreg1
= G_BYTE_ORDER
== G_BIG_ENDIAN
? MONO_LVREG_MS (in
->dreg
) : MONO_LVREG_LS (in
->dreg
);
2569 MONO_ADD_INS (cfg
->cbb
, ins
);
2570 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ARMREG_R3
, FALSE
);
2571 } else if (!t
->byref
&& (t
->type
== MONO_TYPE_R8
)) {
2574 /* This should work for soft-float as well */
2576 cfg
->param_area
= MAX (cfg
->param_area
, 8);
2577 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
2578 creg
= mono_alloc_ireg (cfg
);
2579 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ARMREG_R3
, FALSE
);
2580 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
2581 creg
= mono_alloc_ireg (cfg
);
2582 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 4));
2583 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, creg
);
2584 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
2586 g_assert_not_reached ();
2590 int fdreg
= mono_alloc_freg (cfg
);
2592 if (ainfo
->size
== 8) {
2593 MONO_INST_NEW (cfg
, ins
, OP_FMOVE
);
2594 ins
->sreg1
= in
->dreg
;
2596 MONO_ADD_INS (cfg
->cbb
, ins
);
2598 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, TRUE
);
2603 * Mono's register allocator doesn't speak single-precision registers that
2604 * overlap double-precision registers (i.e. armhf). So we have to work around
2605 * the register allocator and load the value from memory manually.
2607 * So we create a variable for the float argument and an instruction to store
2608 * the argument into the variable. We then store the list of these arguments
2609 * in call->float_args. This list is then used by emit_float_args later to
2610 * pass the arguments in the various call opcodes.
2612 * This is not very nice, and we should really try to fix the allocator.
2615 MonoInst
*float_arg
= mono_compile_create_var (cfg
, m_class_get_byval_arg (mono_defaults
.single_class
), OP_LOCAL
);
2617 /* Make sure the instruction isn't seen as pointless and removed.
2619 float_arg
->flags
|= MONO_INST_VOLATILE
;
2621 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, float_arg
->dreg
, in
->dreg
);
2623 /* We use the dreg to look up the instruction later. The hreg is used to
2624 * emit the instruction that loads the value into the FP reg.
2626 fad
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (FloatArgData
));
2627 fad
->vreg
= float_arg
->dreg
;
2628 fad
->hreg
= ainfo
->reg
;
2630 call
->float_args
= g_slist_append_mempool (cfg
->mempool
, call
->float_args
, fad
);
2633 call
->used_iregs
|= 1 << ainfo
->reg
;
2634 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
2638 g_assert_not_reached ();
2642 /* Handle the case where there are no implicit arguments */
2643 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== sig
->sentinelpos
))
2644 emit_sig_cookie (cfg
, call
, cinfo
);
2646 call
->call_info
= cinfo
;
2647 call
->stack_usage
= cinfo
->stack_usage
;
2651 add_outarg_reg (MonoCompile
*cfg
, MonoCallInst
*call
, ArgStorage storage
, int reg
, MonoInst
*arg
)
2657 MONO_INST_NEW (cfg
, ins
, OP_FMOVE
);
2658 ins
->dreg
= mono_alloc_freg (cfg
);
2659 ins
->sreg1
= arg
->dreg
;
2660 MONO_ADD_INS (cfg
->cbb
, ins
);
2661 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, reg
, TRUE
);
2664 g_assert_not_reached ();
2670 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
2672 MonoCallInst
*call
= (MonoCallInst
*)ins
->inst_p0
;
2674 ArgInfo
*ainfo
= (ArgInfo
*)ins
->inst_p1
;
2675 int ovf_size
= ainfo
->vtsize
;
2676 int doffset
= ainfo
->offset
;
2677 int struct_size
= ainfo
->struct_size
;
2678 int i
, soffset
, dreg
, tmpreg
;
2680 switch (ainfo
->storage
) {
2681 case RegTypeGSharedVtInReg
:
2682 case RegTypeStructByAddr
:
2684 mono_call_inst_add_outarg_reg (cfg
, call
, src
->dreg
, ainfo
->reg
, FALSE
);
2686 case RegTypeGSharedVtOnStack
:
2687 case RegTypeStructByAddrOnStack
:
2688 /* Pass by addr on stack */
2689 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, src
->dreg
);
2692 for (i
= 0; i
< ainfo
->nregs
; ++i
) {
2693 if (ainfo
->esize
== 4)
2694 MONO_INST_NEW (cfg
, load
, OP_LOADR4_MEMBASE
);
2696 MONO_INST_NEW (cfg
, load
, OP_LOADR8_MEMBASE
);
2697 load
->dreg
= mono_alloc_freg (cfg
);
2698 load
->inst_basereg
= src
->dreg
;
2699 load
->inst_offset
= i
* ainfo
->esize
;
2700 MONO_ADD_INS (cfg
->cbb
, load
);
2702 if (ainfo
->esize
== 4) {
2705 /* See RegTypeFP in mono_arch_emit_call () */
2706 MonoInst
*float_arg
= mono_compile_create_var (cfg
, m_class_get_byval_arg (mono_defaults
.single_class
), OP_LOCAL
);
2707 float_arg
->flags
|= MONO_INST_VOLATILE
;
2708 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, float_arg
->dreg
, load
->dreg
);
2710 fad
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (FloatArgData
));
2711 fad
->vreg
= float_arg
->dreg
;
2712 fad
->hreg
= ainfo
->reg
+ i
;
2714 call
->float_args
= g_slist_append_mempool (cfg
->mempool
, call
->float_args
, fad
);
2716 add_outarg_reg (cfg
, call
, RegTypeFP
, ainfo
->reg
+ (i
* 2), load
);
2722 for (i
= 0; i
< ainfo
->size
; ++i
) {
2723 dreg
= mono_alloc_ireg (cfg
);
2724 switch (struct_size
) {
2726 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, dreg
, src
->dreg
, soffset
);
2729 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU2_MEMBASE
, dreg
, src
->dreg
, soffset
);
2732 tmpreg
= mono_alloc_ireg (cfg
);
2733 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, dreg
, src
->dreg
, soffset
);
2734 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, tmpreg
, src
->dreg
, soffset
+ 1);
2735 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, tmpreg
, tmpreg
, 8);
2736 MONO_EMIT_NEW_BIALU (cfg
, OP_IOR
, dreg
, dreg
, tmpreg
);
2737 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOADU1_MEMBASE
, tmpreg
, src
->dreg
, soffset
+ 2);
2738 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_SHL_IMM
, tmpreg
, tmpreg
, 16);
2739 MONO_EMIT_NEW_BIALU (cfg
, OP_IOR
, dreg
, dreg
, tmpreg
);
2742 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, src
->dreg
, soffset
);
2745 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
+ i
, FALSE
);
2746 soffset
+= sizeof (target_mgreg_t
);
2747 struct_size
-= sizeof (target_mgreg_t
);
2749 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2751 mini_emit_memcpy (cfg
, ARMREG_SP
, doffset
, src
->dreg
, soffset
, MIN (ovf_size
* sizeof (target_mgreg_t
), struct_size
), struct_size
< 4 ? 1 : 4);
2757 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
2759 MonoType
*ret
= mini_get_underlying_type (mono_method_signature_internal (method
)->ret
);
2762 if (ret
->type
== MONO_TYPE_I8
|| ret
->type
== MONO_TYPE_U8
) {
2765 if (COMPILE_LLVM (cfg
)) {
2766 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
2768 MONO_INST_NEW (cfg
, ins
, OP_SETLRET
);
2769 ins
->sreg1
= MONO_LVREG_LS (val
->dreg
);
2770 ins
->sreg2
= MONO_LVREG_MS (val
->dreg
);
2771 MONO_ADD_INS (cfg
->cbb
, ins
);
2776 case MONO_ARM_FPU_NONE
:
2777 if (ret
->type
== MONO_TYPE_R8
) {
2780 MONO_INST_NEW (cfg
, ins
, OP_SETFRET
);
2781 ins
->dreg
= cfg
->ret
->dreg
;
2782 ins
->sreg1
= val
->dreg
;
2783 MONO_ADD_INS (cfg
->cbb
, ins
);
2786 if (ret
->type
== MONO_TYPE_R4
) {
2787 /* Already converted to an int in method_to_ir () */
2788 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
2792 case MONO_ARM_FPU_VFP
:
2793 case MONO_ARM_FPU_VFP_HARD
:
2794 if (ret
->type
== MONO_TYPE_R8
|| ret
->type
== MONO_TYPE_R4
) {
2797 MONO_INST_NEW (cfg
, ins
, OP_SETFRET
);
2798 ins
->dreg
= cfg
->ret
->dreg
;
2799 ins
->sreg1
= val
->dreg
;
2800 MONO_ADD_INS (cfg
->cbb
, ins
);
2805 g_assert_not_reached ();
2809 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
2812 #endif /* #ifndef DISABLE_JIT */
2815 mono_arch_is_inst_imm (int opcode
, int imm_opcode
, gint64 imm
)
2821 MonoMethodSignature
*sig
;
2824 MonoType
**param_types
;
2828 dyn_call_supported (CallInfo
*cinfo
, MonoMethodSignature
*sig
)
2832 switch (cinfo
->ret
.storage
) {
2834 case RegTypeGeneral
:
2835 case RegTypeIRegPair
:
2836 case RegTypeStructByAddr
:
2847 for (i
= 0; i
< cinfo
->nargs
; ++i
) {
2848 ArgInfo
*ainfo
= &cinfo
->args
[i
];
2851 switch (ainfo
->storage
) {
2852 case RegTypeGeneral
:
2853 case RegTypeIRegPair
:
2854 case RegTypeBaseGen
:
2859 case RegTypeStructByVal
:
2860 if (ainfo
->size
== 0)
2861 last_slot
= PARAM_REGS
+ (ainfo
->offset
/ 4) + ainfo
->vtsize
;
2863 last_slot
= ainfo
->reg
+ ainfo
->size
+ ainfo
->vtsize
;
2870 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2871 for (i
= 0; i
< sig
->param_count
; ++i
) {
2872 MonoType
*t
= sig
->params
[i
];
2877 t
= mini_get_underlying_type (t
);
2900 mono_arch_dyn_call_prepare (MonoMethodSignature
*sig
)
2902 ArchDynCallInfo
*info
;
2906 cinfo
= get_call_info (NULL
, sig
);
2908 if (!dyn_call_supported (cinfo
, sig
)) {
2913 info
= g_new0 (ArchDynCallInfo
, 1);
2914 // FIXME: Preprocess the info to speed up start_dyn_call ()
2916 info
->cinfo
= cinfo
;
2917 info
->rtype
= mini_get_underlying_type (sig
->ret
);
2918 info
->param_types
= g_new0 (MonoType
*, sig
->param_count
);
2919 for (i
= 0; i
< sig
->param_count
; ++i
)
2920 info
->param_types
[i
] = mini_get_underlying_type (sig
->params
[i
]);
2922 return (MonoDynCallInfo
*)info
;
2926 mono_arch_dyn_call_free (MonoDynCallInfo
*info
)
2928 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
2930 g_free (ainfo
->cinfo
);
2935 mono_arch_dyn_call_get_buf_size (MonoDynCallInfo
*info
)
2937 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
2939 g_assert (ainfo
->cinfo
->stack_usage
% MONO_ARCH_FRAME_ALIGNMENT
== 0);
2940 return sizeof (DynCallArgs
) + ainfo
->cinfo
->stack_usage
;
2944 mono_arch_start_dyn_call (MonoDynCallInfo
*info
, gpointer
**args
, guint8
*ret
, guint8
*buf
)
2946 ArchDynCallInfo
*dinfo
= (ArchDynCallInfo
*)info
;
2947 CallInfo
*cinfo
= dinfo
->cinfo
;
2948 DynCallArgs
*p
= (DynCallArgs
*)buf
;
2949 int arg_index
, greg
, i
, j
, pindex
;
2950 MonoMethodSignature
*sig
= dinfo
->sig
;
2955 p
->n_stackargs
= cinfo
->stack_usage
/ sizeof (host_mgreg_t
);
2961 if (sig
->hasthis
|| dinfo
->cinfo
->vret_arg_index
== 1) {
2962 p
->regs
[greg
++] = (host_mgreg_t
)(gsize
)*(args
[arg_index
++]);
2967 if (dinfo
->cinfo
->ret
.storage
== RegTypeStructByAddr
)
2968 p
->regs
[greg
++] = (host_mgreg_t
)(gsize
)ret
;
2970 for (i
= pindex
; i
< sig
->param_count
; i
++) {
2971 MonoType
*t
= dinfo
->param_types
[i
];
2972 gpointer
*arg
= args
[arg_index
++];
2973 ArgInfo
*ainfo
= &dinfo
->cinfo
->args
[i
+ sig
->hasthis
];
2976 if (ainfo
->storage
== RegTypeGeneral
|| ainfo
->storage
== RegTypeIRegPair
|| ainfo
->storage
== RegTypeStructByVal
) {
2978 } else if (ainfo
->storage
== RegTypeFP
) {
2979 } else if (ainfo
->storage
== RegTypeBase
) {
2980 slot
= PARAM_REGS
+ (ainfo
->offset
/ 4);
2981 } else if (ainfo
->storage
== RegTypeBaseGen
) {
2982 /* slot + 1 is the first stack slot, so the code below will work */
2985 g_assert_not_reached ();
2989 p
->regs
[slot
] = (host_mgreg_t
)(gsize
)*arg
;
2994 case MONO_TYPE_OBJECT
:
2998 p
->regs
[slot
] = (host_mgreg_t
)(gsize
)*arg
;
3001 p
->regs
[slot
] = *(guint8
*)arg
;
3004 p
->regs
[slot
] = *(gint8
*)arg
;
3007 p
->regs
[slot
] = *(gint16
*)arg
;
3010 p
->regs
[slot
] = *(guint16
*)arg
;
3013 p
->regs
[slot
] = *(gint32
*)arg
;
3016 p
->regs
[slot
] = *(guint32
*)arg
;
3020 p
->regs
[slot
++] = (host_mgreg_t
)(gsize
)arg
[0];
3021 p
->regs
[slot
] = (host_mgreg_t
)(gsize
)arg
[1];
3024 if (ainfo
->storage
== RegTypeFP
) {
3025 float f
= *(float*)arg
;
3026 p
->fpregs
[ainfo
->reg
/ 2] = *(double*)&f
;
3029 p
->regs
[slot
] = *(host_mgreg_t
*)arg
;
3033 if (ainfo
->storage
== RegTypeFP
) {
3034 p
->fpregs
[ainfo
->reg
/ 2] = *(double*)arg
;
3037 p
->regs
[slot
++] = (host_mgreg_t
)(gsize
)arg
[0];
3038 p
->regs
[slot
] = (host_mgreg_t
)(gsize
)arg
[1];
3041 case MONO_TYPE_GENERICINST
:
3042 if (MONO_TYPE_IS_REFERENCE (t
)) {
3043 p
->regs
[slot
] = (host_mgreg_t
)(gsize
)*arg
;
3046 if (t
->type
== MONO_TYPE_GENERICINST
&& mono_class_is_nullable (mono_class_from_mono_type_internal (t
))) {
3047 MonoClass
*klass
= mono_class_from_mono_type_internal (t
);
3048 guint8
*nullable_buf
;
3051 size
= mono_class_value_size (klass
, NULL
);
3052 nullable_buf
= g_alloca (size
);
3053 g_assert (nullable_buf
);
3055 /* The argument pointed to by arg is either a boxed vtype or null */
3056 mono_nullable_init (nullable_buf
, (MonoObject
*)arg
, klass
);
3058 arg
= (gpointer
*)nullable_buf
;
3064 case MONO_TYPE_VALUETYPE
:
3065 g_assert (ainfo
->storage
== RegTypeStructByVal
);
3067 if (ainfo
->size
== 0)
3068 slot
= PARAM_REGS
+ (ainfo
->offset
/ 4);
3072 for (j
= 0; j
< ainfo
->size
+ ainfo
->vtsize
; ++j
)
3073 p
->regs
[slot
++] = ((host_mgreg_t
*)arg
) [j
];
3076 g_assert_not_reached ();
3082 mono_arch_finish_dyn_call (MonoDynCallInfo
*info
, guint8
*buf
)
3084 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
3085 DynCallArgs
*p
= (DynCallArgs
*)buf
;
3086 MonoType
*ptype
= ainfo
->rtype
;
3087 guint8
*ret
= p
->ret
;
3088 host_mgreg_t res
= p
->res
;
3089 host_mgreg_t res2
= p
->res2
;
3091 switch (ptype
->type
) {
3092 case MONO_TYPE_VOID
:
3093 *(gpointer
*)ret
= NULL
;
3095 case MONO_TYPE_OBJECT
:
3099 *(gpointer
*)ret
= (gpointer
)(gsize
)res
;
3105 *(guint8
*)ret
= res
;
3108 *(gint16
*)ret
= res
;
3111 *(guint16
*)ret
= res
;
3114 *(gint32
*)ret
= res
;
3117 *(guint32
*)ret
= res
;
3121 /* This handles endianness as well */
3122 ((gint32
*)ret
) [0] = res
;
3123 ((gint32
*)ret
) [1] = res2
;
3125 case MONO_TYPE_GENERICINST
:
3126 if (MONO_TYPE_IS_REFERENCE (ptype
)) {
3127 *(gpointer
*)ret
= (gpointer
)res
;
3132 case MONO_TYPE_VALUETYPE
:
3133 g_assert (ainfo
->cinfo
->ret
.storage
== RegTypeStructByAddr
);
3139 *(float*)ret
= *(float*)&p
->fpregs
[0];
3141 *(float*)ret
= *(float*)&res
;
3143 case MONO_TYPE_R8
: {
3144 host_mgreg_t regs
[2];
3147 if (IS_HARD_FLOAT
) {
3148 *(double*)ret
= p
->fpregs
[0];
3153 *(double*)ret
= *(double*)®s
;
3158 g_assert_not_reached ();
3165 * The immediate field for cond branches is big enough for all reasonable methods
3167 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3168 if (0 && ins->inst_true_bb->native_offset) { \
3169 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3171 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3172 ARM_B_COND (code, (condcode), 0); \
3175 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3177 /* emit an exception if condition is fail
3179 * We assign the extra code used to throw the implicit exceptions
3180 * to cfg->bb_exit as far as the big branch handling is concerned
3182 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3184 mono_add_patch_info (cfg, code - cfg->native_code, \
3185 MONO_PATCH_INFO_EXC, exc_name); \
3186 ARM_BL_COND (code, (condcode), 0); \
3189 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3192 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3197 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3201 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
3202 MonoInst
*last_ins
= mono_inst_prev (ins
, FILTER_IL_SEQ_POINT
);
3204 switch (ins
->opcode
) {
3207 /* Already done by an arch-independent pass */
3209 case OP_LOAD_MEMBASE
:
3210 case OP_LOADI4_MEMBASE
:
3212 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3213 * OP_LOAD_MEMBASE offset(basereg), reg
3215 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_REG
3216 || last_ins
->opcode
== OP_STORE_MEMBASE_REG
) &&
3217 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3218 ins
->inst_offset
== last_ins
->inst_offset
) {
3219 if (ins
->dreg
== last_ins
->sreg1
) {
3220 MONO_DELETE_INS (bb
, ins
);
3223 //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
3224 ins
->opcode
= OP_MOVE
;
3225 ins
->sreg1
= last_ins
->sreg1
;
3229 * Note: reg1 must be different from the basereg in the second load
3230 * OP_LOAD_MEMBASE offset(basereg), reg1
3231 * OP_LOAD_MEMBASE offset(basereg), reg2
3233 * OP_LOAD_MEMBASE offset(basereg), reg1
3234 * OP_MOVE reg1, reg2
3236 } if (last_ins
&& (last_ins
->opcode
== OP_LOADI4_MEMBASE
3237 || last_ins
->opcode
== OP_LOAD_MEMBASE
) &&
3238 ins
->inst_basereg
!= last_ins
->dreg
&&
3239 ins
->inst_basereg
== last_ins
->inst_basereg
&&
3240 ins
->inst_offset
== last_ins
->inst_offset
) {
3242 if (ins
->dreg
== last_ins
->dreg
) {
3243 MONO_DELETE_INS (bb
, ins
);
3246 ins
->opcode
= OP_MOVE
;
3247 ins
->sreg1
= last_ins
->dreg
;
3250 //g_assert_not_reached ();
3254 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3255 * OP_LOAD_MEMBASE offset(basereg), reg
3257 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3258 * OP_ICONST reg, imm
3260 } else if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_IMM
3261 || last_ins
->opcode
== OP_STORE_MEMBASE_IMM
) &&
3262 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3263 ins
->inst_offset
== last_ins
->inst_offset
) {
3264 //static int c = 0; g_print ("MATCHX %s %d\n", cfg->method->name,c++);
3265 ins
->opcode
= OP_ICONST
;
3266 ins
->inst_c0
= last_ins
->inst_imm
;
3267 g_assert_not_reached (); // check this rule
3271 case OP_LOADU1_MEMBASE
:
3272 case OP_LOADI1_MEMBASE
:
3273 if (last_ins
&& (last_ins
->opcode
== OP_STOREI1_MEMBASE_REG
) &&
3274 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3275 ins
->inst_offset
== last_ins
->inst_offset
) {
3276 ins
->opcode
= (ins
->opcode
== OP_LOADI1_MEMBASE
) ? OP_ICONV_TO_I1
: OP_ICONV_TO_U1
;
3277 ins
->sreg1
= last_ins
->sreg1
;
3280 case OP_LOADU2_MEMBASE
:
3281 case OP_LOADI2_MEMBASE
:
3282 if (last_ins
&& (last_ins
->opcode
== OP_STOREI2_MEMBASE_REG
) &&
3283 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
3284 ins
->inst_offset
== last_ins
->inst_offset
) {
3285 ins
->opcode
= (ins
->opcode
== OP_LOADI2_MEMBASE
) ? OP_ICONV_TO_I2
: OP_ICONV_TO_U2
;
3286 ins
->sreg1
= last_ins
->sreg1
;
3290 ins
->opcode
= OP_MOVE
;
3294 if (ins
->dreg
== ins
->sreg1
) {
3295 MONO_DELETE_INS (bb
, ins
);
3299 * OP_MOVE sreg, dreg
3300 * OP_MOVE dreg, sreg
3302 if (last_ins
&& last_ins
->opcode
== OP_MOVE
&&
3303 ins
->sreg1
== last_ins
->dreg
&&
3304 ins
->dreg
== last_ins
->sreg1
) {
3305 MONO_DELETE_INS (bb
, ins
);
3314 * the branch_cc_table should maintain the order of these
3328 branch_cc_table
[] = {
3342 #define ADD_NEW_INS(cfg,dest,op) do { \
3343 MONO_INST_NEW ((cfg), (dest), (op)); \
3344 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3348 map_to_reg_reg_op (int op
)
3357 case OP_COMPARE_IMM
:
3359 case OP_ICOMPARE_IMM
:
3373 case OP_LOAD_MEMBASE
:
3374 return OP_LOAD_MEMINDEX
;
3375 case OP_LOADI4_MEMBASE
:
3376 return OP_LOADI4_MEMINDEX
;
3377 case OP_LOADU4_MEMBASE
:
3378 return OP_LOADU4_MEMINDEX
;
3379 case OP_LOADU1_MEMBASE
:
3380 return OP_LOADU1_MEMINDEX
;
3381 case OP_LOADI2_MEMBASE
:
3382 return OP_LOADI2_MEMINDEX
;
3383 case OP_LOADU2_MEMBASE
:
3384 return OP_LOADU2_MEMINDEX
;
3385 case OP_LOADI1_MEMBASE
:
3386 return OP_LOADI1_MEMINDEX
;
3387 case OP_STOREI1_MEMBASE_REG
:
3388 return OP_STOREI1_MEMINDEX
;
3389 case OP_STOREI2_MEMBASE_REG
:
3390 return OP_STOREI2_MEMINDEX
;
3391 case OP_STOREI4_MEMBASE_REG
:
3392 return OP_STOREI4_MEMINDEX
;
3393 case OP_STORE_MEMBASE_REG
:
3394 return OP_STORE_MEMINDEX
;
3395 case OP_STORER4_MEMBASE_REG
:
3396 return OP_STORER4_MEMINDEX
;
3397 case OP_STORER8_MEMBASE_REG
:
3398 return OP_STORER8_MEMINDEX
;
3399 case OP_STORE_MEMBASE_IMM
:
3400 return OP_STORE_MEMBASE_REG
;
3401 case OP_STOREI1_MEMBASE_IMM
:
3402 return OP_STOREI1_MEMBASE_REG
;
3403 case OP_STOREI2_MEMBASE_IMM
:
3404 return OP_STOREI2_MEMBASE_REG
;
3405 case OP_STOREI4_MEMBASE_IMM
:
3406 return OP_STOREI4_MEMBASE_REG
;
3408 g_assert_not_reached ();
3412 * Remove from the instruction list the instructions that can't be
3413 * represented with very simple instructions with no register
3417 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3419 MonoInst
*ins
, *temp
, *last_ins
= NULL
;
3420 int rot_amount
, imm8
, low_imm
;
3422 MONO_BB_FOR_EACH_INS (bb
, ins
) {
3424 switch (ins
->opcode
) {
3428 case OP_COMPARE_IMM
:
3429 case OP_ICOMPARE_IMM
:
3443 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
)) < 0) {
3444 int opcode2
= mono_op_imm_to_op (ins
->opcode
);
3445 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3446 temp
->inst_c0
= ins
->inst_imm
;
3447 temp
->dreg
= mono_alloc_ireg (cfg
);
3448 ins
->sreg2
= temp
->dreg
;
3450 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins
->opcode
));
3451 ins
->opcode
= opcode2
;
3453 if (ins
->opcode
== OP_SBB
|| ins
->opcode
== OP_ISBB
|| ins
->opcode
== OP_SUBCC
)
3459 if (ins
->inst_imm
== 1) {
3460 ins
->opcode
= OP_MOVE
;
3463 if (ins
->inst_imm
== 0) {
3464 ins
->opcode
= OP_ICONST
;
3468 imm8
= mono_is_power_of_two (ins
->inst_imm
);
3470 ins
->opcode
= OP_SHL_IMM
;
3471 ins
->inst_imm
= imm8
;
3474 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3475 temp
->inst_c0
= ins
->inst_imm
;
3476 temp
->dreg
= mono_alloc_ireg (cfg
);
3477 ins
->sreg2
= temp
->dreg
;
3478 ins
->opcode
= OP_IMUL
;
3485 MonoInst
*current
= ins
;
3487 /* may require a look-ahead of a couple instructions due to spilling */
3488 while (try_count
-- && current
->next
) {
3489 if (current
->next
->opcode
== OP_COND_EXC_C
|| current
->next
->opcode
== OP_COND_EXC_IC
) {
3490 /* ARM sets the C flag to 1 if there was _no_ overflow */
3491 current
->next
->opcode
= OP_COND_EXC_NC
;
3494 current
= current
->next
;
3499 case OP_IDIV_UN_IMM
:
3501 case OP_IREM_UN_IMM
: {
3502 int opcode2
= mono_op_imm_to_op (ins
->opcode
);
3503 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3504 temp
->inst_c0
= ins
->inst_imm
;
3505 temp
->dreg
= mono_alloc_ireg (cfg
);
3506 ins
->sreg2
= temp
->dreg
;
3508 g_error ("mono_op_imm_to_op failed for %s\n", mono_inst_name (ins
->opcode
));
3509 ins
->opcode
= opcode2
;
3512 case OP_LOCALLOC_IMM
:
3513 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3514 temp
->inst_c0
= ins
->inst_imm
;
3515 temp
->dreg
= mono_alloc_ireg (cfg
);
3516 ins
->sreg1
= temp
->dreg
;
3517 ins
->opcode
= OP_LOCALLOC
;
3519 case OP_LOAD_MEMBASE
:
3520 case OP_LOADI4_MEMBASE
:
3521 case OP_LOADU4_MEMBASE
:
3522 case OP_LOADU1_MEMBASE
:
3523 /* we can do two things: load the immed in a register
3524 * and use an indexed load, or see if the immed can be
3525 * represented as an ad_imm + a load with a smaller offset
3526 * that fits. We just do the first for now, optimize later.
3528 if (arm_is_imm12 (ins
->inst_offset
))
3530 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3531 temp
->inst_c0
= ins
->inst_offset
;
3532 temp
->dreg
= mono_alloc_ireg (cfg
);
3533 ins
->sreg2
= temp
->dreg
;
3534 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3536 case OP_LOADI2_MEMBASE
:
3537 case OP_LOADU2_MEMBASE
:
3538 case OP_LOADI1_MEMBASE
:
3539 if (arm_is_imm8 (ins
->inst_offset
))
3541 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3542 temp
->inst_c0
= ins
->inst_offset
;
3543 temp
->dreg
= mono_alloc_ireg (cfg
);
3544 ins
->sreg2
= temp
->dreg
;
3545 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3547 case OP_LOADR4_MEMBASE
:
3548 case OP_LOADR8_MEMBASE
:
3549 if (arm_is_fpimm8 (ins
->inst_offset
))
3551 low_imm
= ins
->inst_offset
& 0x1ff;
3552 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_offset
& ~0x1ff, &rot_amount
)) >= 0) {
3553 ADD_NEW_INS (cfg
, temp
, OP_ADD_IMM
);
3554 temp
->inst_imm
= ins
->inst_offset
& ~0x1ff;
3555 temp
->sreg1
= ins
->inst_basereg
;
3556 temp
->dreg
= mono_alloc_ireg (cfg
);
3557 ins
->inst_basereg
= temp
->dreg
;
3558 ins
->inst_offset
= low_imm
;
3562 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3563 temp
->inst_c0
= ins
->inst_offset
;
3564 temp
->dreg
= mono_alloc_ireg (cfg
);
3566 ADD_NEW_INS (cfg
, add_ins
, OP_IADD
);
3567 add_ins
->sreg1
= ins
->inst_basereg
;
3568 add_ins
->sreg2
= temp
->dreg
;
3569 add_ins
->dreg
= mono_alloc_ireg (cfg
);
3571 ins
->inst_basereg
= add_ins
->dreg
;
3572 ins
->inst_offset
= 0;
3575 case OP_STORE_MEMBASE_REG
:
3576 case OP_STOREI4_MEMBASE_REG
:
3577 case OP_STOREI1_MEMBASE_REG
:
3578 if (arm_is_imm12 (ins
->inst_offset
))
3580 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3581 temp
->inst_c0
= ins
->inst_offset
;
3582 temp
->dreg
= mono_alloc_ireg (cfg
);
3583 ins
->sreg2
= temp
->dreg
;
3584 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3586 case OP_STOREI2_MEMBASE_REG
:
3587 if (arm_is_imm8 (ins
->inst_offset
))
3589 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3590 temp
->inst_c0
= ins
->inst_offset
;
3591 temp
->dreg
= mono_alloc_ireg (cfg
);
3592 ins
->sreg2
= temp
->dreg
;
3593 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3595 case OP_STORER4_MEMBASE_REG
:
3596 case OP_STORER8_MEMBASE_REG
:
3597 if (arm_is_fpimm8 (ins
->inst_offset
))
3599 low_imm
= ins
->inst_offset
& 0x1ff;
3600 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_offset
& ~ 0x1ff, &rot_amount
)) >= 0 && arm_is_fpimm8 (low_imm
)) {
3601 ADD_NEW_INS (cfg
, temp
, OP_ADD_IMM
);
3602 temp
->inst_imm
= ins
->inst_offset
& ~0x1ff;
3603 temp
->sreg1
= ins
->inst_destbasereg
;
3604 temp
->dreg
= mono_alloc_ireg (cfg
);
3605 ins
->inst_destbasereg
= temp
->dreg
;
3606 ins
->inst_offset
= low_imm
;
3610 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3611 temp
->inst_c0
= ins
->inst_offset
;
3612 temp
->dreg
= mono_alloc_ireg (cfg
);
3614 ADD_NEW_INS (cfg
, add_ins
, OP_IADD
);
3615 add_ins
->sreg1
= ins
->inst_destbasereg
;
3616 add_ins
->sreg2
= temp
->dreg
;
3617 add_ins
->dreg
= mono_alloc_ireg (cfg
);
3619 ins
->inst_destbasereg
= add_ins
->dreg
;
3620 ins
->inst_offset
= 0;
3623 case OP_STORE_MEMBASE_IMM
:
3624 case OP_STOREI1_MEMBASE_IMM
:
3625 case OP_STOREI2_MEMBASE_IMM
:
3626 case OP_STOREI4_MEMBASE_IMM
:
3627 ADD_NEW_INS (cfg
, temp
, OP_ICONST
);
3628 temp
->inst_c0
= ins
->inst_imm
;
3629 temp
->dreg
= mono_alloc_ireg (cfg
);
3630 ins
->sreg1
= temp
->dreg
;
3631 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
3633 goto loop_start
; /* make it handle the possibly big ins->inst_offset */
3636 gboolean swap
= FALSE
;
3640 /* Optimized away */
3645 /* Some fp compares require swapped operands */
3646 switch (ins
->next
->opcode
) {
3648 ins
->next
->opcode
= OP_FBLT
;
3652 ins
->next
->opcode
= OP_FBLT_UN
;
3656 ins
->next
->opcode
= OP_FBGE
;
3660 ins
->next
->opcode
= OP_FBGE_UN
;
3668 ins
->sreg1
= ins
->sreg2
;
3677 bb
->last_ins
= last_ins
;
3678 bb
->max_vreg
= cfg
->next_vreg
;
3682 mono_arch_decompose_long_opts (MonoCompile
*cfg
, MonoInst
*long_ins
)
3686 if (long_ins
->opcode
== OP_LNEG
) {
3688 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ARM_RSBS_IMM
, MONO_LVREG_LS (ins
->dreg
), MONO_LVREG_LS (ins
->sreg1
), 0);
3689 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ARM_RSC_IMM
, MONO_LVREG_MS (ins
->dreg
), MONO_LVREG_MS (ins
->sreg1
), 0);
3695 emit_float_to_int (MonoCompile
*cfg
, guchar
*code
, int dreg
, int sreg
, int size
, gboolean is_signed
)
3697 /* sreg is a float, dreg is an integer reg */
3699 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
3701 ARM_TOSIZD (code
, vfp_scratch1
, sreg
);
3703 ARM_TOUIZD (code
, vfp_scratch1
, sreg
);
3704 ARM_FMRS (code
, dreg
, vfp_scratch1
);
3705 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
3709 ARM_AND_REG_IMM8 (code
, dreg
, dreg
, 0xff);
3710 else if (size
== 2) {
3711 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3712 ARM_SHR_IMM (code
, dreg
, dreg
, 16);
3716 ARM_SHL_IMM (code
, dreg
, dreg
, 24);
3717 ARM_SAR_IMM (code
, dreg
, dreg
, 24);
3718 } else if (size
== 2) {
3719 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3720 ARM_SAR_IMM (code
, dreg
, dreg
, 16);
3727 emit_r4_to_int (MonoCompile
*cfg
, guchar
*code
, int dreg
, int sreg
, int size
, gboolean is_signed
)
3729 /* sreg is a float, dreg is an integer reg */
3731 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
3733 ARM_TOSIZS (code
, vfp_scratch1
, sreg
);
3735 ARM_TOUIZS (code
, vfp_scratch1
, sreg
);
3736 ARM_FMRS (code
, dreg
, vfp_scratch1
);
3737 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
3741 ARM_AND_REG_IMM8 (code
, dreg
, dreg
, 0xff);
3742 else if (size
== 2) {
3743 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3744 ARM_SHR_IMM (code
, dreg
, dreg
, 16);
3748 ARM_SHL_IMM (code
, dreg
, dreg
, 24);
3749 ARM_SAR_IMM (code
, dreg
, dreg
, 24);
3750 } else if (size
== 2) {
3751 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
3752 ARM_SAR_IMM (code
, dreg
, dreg
, 16);
3758 #endif /* #ifndef DISABLE_JIT */
3760 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3763 emit_thunk (guint8
*code
, gconstpointer target
)
3767 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
3768 if (thumb_supported
)
3769 ARM_BX (code
, ARMREG_IP
);
3771 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
3772 *(guint32
*)code
= (guint32
)(gsize
)target
;
3774 mono_arch_flush_icache (p
, code
- p
);
3778 handle_thunk (MonoCompile
*cfg
, MonoDomain
*domain
, guchar
*code
, const guchar
*target
)
3780 MonoJitInfo
*ji
= NULL
;
3781 MonoThunkJitInfo
*info
;
3784 guint8
*orig_target
;
3785 guint8
*target_thunk
;
3788 domain
= mono_domain_get ();
3792 * This can be called multiple times during JITting,
3793 * save the current position in cfg->arch to avoid
3794 * doing a O(n^2) search.
3796 if (!cfg
->arch
.thunks
) {
3797 cfg
->arch
.thunks
= cfg
->thunks
;
3798 cfg
->arch
.thunks_size
= cfg
->thunk_area
;
3800 thunks
= cfg
->arch
.thunks
;
3801 thunks_size
= cfg
->arch
.thunks_size
;
3803 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, mono_method_full_name (cfg
->method
, TRUE
));
3804 g_assert_not_reached ();
3807 g_assert (*(guint32
*)thunks
== 0);
3808 emit_thunk (thunks
, target
);
3809 arm_patch (code
, thunks
);
3811 cfg
->arch
.thunks
+= THUNK_SIZE
;
3812 cfg
->arch
.thunks_size
-= THUNK_SIZE
;
3814 ji
= mini_jit_info_table_find (domain
, (char*)code
, NULL
);
3816 info
= mono_jit_info_get_thunk_info (ji
);
3819 thunks
= (guint8
*)ji
->code_start
+ info
->thunks_offset
;
3820 thunks_size
= info
->thunks_size
;
3822 orig_target
= mono_arch_get_call_target (code
+ 4);
3824 mono_mini_arch_lock ();
3826 target_thunk
= NULL
;
3827 if (orig_target
>= thunks
&& orig_target
< thunks
+ thunks_size
) {
3828 /* The call already points to a thunk, because of trampolines etc. */
3829 target_thunk
= orig_target
;
3831 for (p
= thunks
; p
< thunks
+ thunks_size
; p
+= THUNK_SIZE
) {
3832 if (((guint32
*)p
) [0] == 0) {
3836 } else if (((guint32
*)p
) [2] == (guint32
)(gsize
)target
) {
3837 /* Thunk already points to target */
3844 //g_print ("THUNK: %p %p %p\n", code, target, target_thunk);
3846 if (!target_thunk
) {
3847 mono_mini_arch_unlock ();
3848 g_print ("thunk failed %p->%p, thunk space=%d method %s", code
, target
, thunks_size
, cfg
? mono_method_full_name (cfg
->method
, TRUE
) : mono_method_full_name (jinfo_get_method (ji
), TRUE
));
3849 g_assert_not_reached ();
3852 emit_thunk (target_thunk
, target
);
3853 arm_patch (code
, target_thunk
);
3854 mono_arch_flush_icache (code
, 4);
3856 mono_mini_arch_unlock ();
3861 arm_patch_general (MonoCompile
*cfg
, MonoDomain
*domain
, guchar
*code
, const guchar
*target
)
3863 guint32
*code32
= (guint32
*)code
;
3864 guint32 ins
= *code32
;
3865 guint32 prim
= (ins
>> 25) & 7;
3866 guint32 tval
= GPOINTER_TO_UINT (target
);
3868 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3869 if (prim
== 5) { /* 101b */
3870 /* the diff starts 8 bytes from the branch opcode */
3871 gint diff
= target
- code
- 8;
3873 gint tmask
= 0xffffffff;
3874 if (tval
& 1) { /* entering thumb mode */
3875 diff
= target
- 1 - code
- 8;
3876 g_assert (thumb_supported
);
3877 tbits
= 0xf << 28; /* bl->blx bit pattern */
3878 g_assert ((ins
& (1 << 24))); /* it must be a bl, not b instruction */
3879 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3883 tmask
= ~(1 << 24); /* clear the link bit */
3884 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3889 if (diff
<= 33554431) {
3891 ins
= (ins
& 0xff000000) | diff
;
3893 *code32
= ins
| tbits
;
3897 /* diff between 0 and -33554432 */
3898 if (diff
>= -33554432) {
3900 ins
= (ins
& 0xff000000) | (diff
& ~0xff000000);
3902 *code32
= ins
| tbits
;
3907 handle_thunk (cfg
, domain
, code
, target
);
3912 * The alternative call sequences looks like this:
3914 * ldr ip, [pc] // loads the address constant
3915 * b 1f // jumps around the constant
3916 * address constant embedded in the code
3921 * There are two cases for patching:
3922 * a) at the end of method emission: in this case code points to the start
3923 * of the call sequence
3924 * b) during runtime patching of the call site: in this case code points
3925 * to the mov pc, ip instruction
3927 * We have to handle also the thunk jump code sequence:
3931 * address constant // execution never reaches here
3933 if ((ins
& 0x0ffffff0) == 0x12fff10) {
3934 /* Branch and exchange: the address is constructed in a reg
3935 * We can patch BX when the code sequence is the following:
3936 * ldr ip, [pc, #0] ; 0x8
3943 guint8
*emit
= (guint8
*)ccode
;
3944 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
3946 ARM_MOV_REG_REG (emit
, ARMREG_LR
, ARMREG_PC
);
3947 ARM_BX (emit
, ARMREG_IP
);
3949 /*patching from magic trampoline*/
3950 if (ins
== ccode
[3]) {
3951 g_assert (code32
[-4] == ccode
[0]);
3952 g_assert (code32
[-3] == ccode
[1]);
3953 g_assert (code32
[-1] == ccode
[2]);
3954 code32
[-2] = (guint32
)(gsize
)target
;
3957 /*patching from JIT*/
3958 if (ins
== ccode
[0]) {
3959 g_assert (code32
[1] == ccode
[1]);
3960 g_assert (code32
[3] == ccode
[2]);
3961 g_assert (code32
[4] == ccode
[3]);
3962 code32
[2] = (guint32
)(gsize
)target
;
3965 g_assert_not_reached ();
3966 } else if ((ins
& 0x0ffffff0) == 0x12fff30) {
3974 guint8
*emit
= (guint8
*)ccode
;
3975 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
3977 ARM_BLX_REG (emit
, ARMREG_IP
);
3979 g_assert (code32
[-3] == ccode
[0]);
3980 g_assert (code32
[-2] == ccode
[1]);
3981 g_assert (code32
[0] == ccode
[2]);
3983 code32
[-1] = (guint32
)(gsize
)target
;
3986 guint32
*tmp
= ccode
;
3987 guint8
*emit
= (guint8
*)tmp
;
3988 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
3989 ARM_MOV_REG_REG (emit
, ARMREG_LR
, ARMREG_PC
);
3990 ARM_MOV_REG_REG (emit
, ARMREG_PC
, ARMREG_IP
);
3991 ARM_BX (emit
, ARMREG_IP
);
3992 if (ins
== ccode
[2]) {
3993 g_assert_not_reached (); // should be -2 ...
3994 code32
[-1] = (guint32
)(gsize
)target
;
3997 if (ins
== ccode
[0]) {
3998 /* handles both thunk jump code and the far call sequence */
3999 code32
[2] = (guint32
)(gsize
)target
;
4002 g_assert_not_reached ();
4004 // g_print ("patched with 0x%08x\n", ins);
4008 arm_patch (guchar
*code
, const guchar
*target
)
4010 arm_patch_general (NULL
, NULL
, code
, target
);
4014 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
4015 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
4016 * to be used with the emit macros.
4017 * Return -1 otherwise.
4020 mono_arm_is_rotated_imm8 (guint32 val
, gint
*rot_amount
)
4023 for (i
= 0; i
< 31; i
+= 2) {
4027 res
= (val
<< (32 - i
)) | (val
>> i
);
4030 *rot_amount
= i
? 32 - i
: 0;
4037 * Emits in code a sequence of instructions that load the value 'val'
4038 * into the dreg register. Uses at most 4 instructions.
4041 mono_arm_emit_load_imm (guint8
*code
, int dreg
, guint32 val
)
4043 int imm8
, rot_amount
;
4045 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
4046 /* skip the constant pool */
4052 if (mini_get_debug_options()->single_imm_size
&& v7_supported
) {
4053 ARM_MOVW_REG_IMM (code
, dreg
, val
& 0xffff);
4054 ARM_MOVT_REG_IMM (code
, dreg
, (val
>> 16) & 0xffff);
4058 if ((imm8
= mono_arm_is_rotated_imm8 (val
, &rot_amount
)) >= 0) {
4059 ARM_MOV_REG_IMM (code
, dreg
, imm8
, rot_amount
);
4060 } else if ((imm8
= mono_arm_is_rotated_imm8 (~val
, &rot_amount
)) >= 0) {
4061 ARM_MVN_REG_IMM (code
, dreg
, imm8
, rot_amount
);
4064 ARM_MOVW_REG_IMM (code
, dreg
, val
& 0xffff);
4066 ARM_MOVT_REG_IMM (code
, dreg
, (val
>> 16) & 0xffff);
4070 ARM_MOV_REG_IMM8 (code
, dreg
, (val
& 0xFF));
4072 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF00) >> 8, 24);
4074 if (val
& 0xFF0000) {
4075 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
4077 if (val
& 0xFF000000) {
4078 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
4080 } else if (val
& 0xFF00) {
4081 ARM_MOV_REG_IMM (code
, dreg
, (val
& 0xFF00) >> 8, 24);
4082 if (val
& 0xFF0000) {
4083 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
4085 if (val
& 0xFF000000) {
4086 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
4088 } else if (val
& 0xFF0000) {
4089 ARM_MOV_REG_IMM (code
, dreg
, (val
& 0xFF0000) >> 16, 16);
4090 if (val
& 0xFF000000) {
4091 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
4094 //g_assert_not_reached ();
4100 mono_arm_thumb_supported (void)
4102 return thumb_supported
;
4106 mono_arm_eabi_supported (void)
4108 return eabi_supported
;
4112 mono_arm_i8_align (void)
4120 emit_move_return_value (MonoCompile
*cfg
, MonoInst
*ins
, guint8
*code
)
4125 call
= (MonoCallInst
*)ins
;
4126 cinfo
= call
->call_info
;
4128 switch (cinfo
->ret
.storage
) {
4129 case RegTypeStructByVal
:
4131 MonoInst
*loc
= cfg
->arch
.vret_addr_loc
;
4134 if (cinfo
->ret
.storage
== RegTypeStructByVal
&& cinfo
->ret
.nregs
== 1) {
4135 /* The JIT treats this as a normal call */
4139 /* Load the destination address */
4140 g_assert (loc
&& loc
->opcode
== OP_REGOFFSET
);
4142 if (arm_is_imm12 (loc
->inst_offset
)) {
4143 ARM_LDR_IMM (code
, ARMREG_LR
, loc
->inst_basereg
, loc
->inst_offset
);
4145 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, loc
->inst_offset
);
4146 ARM_LDR_REG_REG (code
, ARMREG_LR
, loc
->inst_basereg
, ARMREG_LR
);
4149 if (cinfo
->ret
.storage
== RegTypeStructByVal
) {
4150 int rsize
= cinfo
->ret
.struct_size
;
4152 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
4153 g_assert (rsize
>= 0);
4158 ARM_STRB_IMM (code
, i
, ARMREG_LR
, i
* 4);
4161 ARM_STRH_IMM (code
, i
, ARMREG_LR
, i
* 4);
4164 ARM_STR_IMM (code
, i
, ARMREG_LR
, i
* 4);
4170 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
4171 if (cinfo
->ret
.esize
== 4)
4172 ARM_FSTS (code
, cinfo
->ret
.reg
+ i
, ARMREG_LR
, i
* 4);
4174 ARM_FSTD (code
, cinfo
->ret
.reg
+ (i
* 2), ARMREG_LR
, i
* 8);
4183 switch (ins
->opcode
) {
4186 case OP_FCALL_MEMBASE
:
4188 MonoType
*sig_ret
= mini_get_underlying_type (((MonoCallInst
*)ins
)->signature
->ret
);
4189 if (sig_ret
->type
== MONO_TYPE_R4
) {
4190 if (IS_HARD_FLOAT
) {
4191 ARM_CVTS (code
, ins
->dreg
, ARM_VFP_F0
);
4193 ARM_FMSR (code
, ins
->dreg
, ARMREG_R0
);
4194 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
4197 if (IS_HARD_FLOAT
) {
4198 ARM_CPYD (code
, ins
->dreg
, ARM_VFP_D0
);
4200 ARM_FMDRR (code
, ARMREG_R0
, ARMREG_R1
, ins
->dreg
);
4207 case OP_RCALL_MEMBASE
: {
4212 sig_ret
= mini_get_underlying_type (((MonoCallInst
*)ins
)->signature
->ret
);
4213 g_assert (sig_ret
->type
== MONO_TYPE_R4
);
4214 if (IS_HARD_FLOAT
) {
4215 ARM_CPYS (code
, ins
->dreg
, ARM_VFP_F0
);
4217 ARM_FMSR (code
, ins
->dreg
, ARMREG_R0
);
4218 ARM_CPYS (code
, ins
->dreg
, ins
->dreg
);
4230 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
4234 guint8
*code
= cfg
->native_code
+ cfg
->code_len
;
4235 MonoInst
*last_ins
= NULL
;
4237 int imm8
, rot_amount
;
4239 /* we don't align basic blocks of loops on arm */
4241 if (cfg
->verbose_level
> 2)
4242 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
4244 cpos
= bb
->max_offset
;
4246 if (mono_break_at_bb_method
&& mono_method_desc_full_match (mono_break_at_bb_method
, cfg
->method
) && bb
->block_num
== mono_break_at_bb_bb_num
) {
4247 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
4248 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break
));
4249 code
= emit_call_seq (cfg
, code
);
4252 MONO_BB_FOR_EACH_INS (bb
, ins
) {
4253 guint offset
= code
- cfg
->native_code
;
4254 set_code_cursor (cfg
, code
);
4255 max_len
= ins_get_size (ins
->opcode
);
4256 code
= realloc_code (cfg
, max_len
);
4257 // if (ins->cil_code)
4258 // g_print ("cil code\n");
4259 mono_debug_record_line_number (cfg
, ins
, offset
);
4261 switch (ins
->opcode
) {
4262 case OP_MEMORY_BARRIER
:
4264 ARM_DMB (code
, ARM_DMB_ISH
);
4265 } else if (v6_supported
) {
4266 ARM_MOV_REG_IMM8 (code
, ARMREG_R0
, 0);
4267 ARM_MCR (code
, 15, 0, ARMREG_R0
, 7, 10, 5);
4271 code
= emit_tls_get (code
, ins
->dreg
, ins
->inst_offset
);
4274 code
= emit_tls_set (code
, ins
->sreg1
, ins
->inst_offset
);
4276 case OP_ATOMIC_EXCHANGE_I4
:
4277 case OP_ATOMIC_CAS_I4
:
4278 case OP_ATOMIC_ADD_I4
: {
4282 g_assert (v7_supported
);
4285 if (ins
->sreg1
!= ARMREG_IP
&& ins
->sreg2
!= ARMREG_IP
&& ins
->sreg3
!= ARMREG_IP
)
4287 else if (ins
->sreg1
!= ARMREG_R0
&& ins
->sreg2
!= ARMREG_R0
&& ins
->sreg3
!= ARMREG_R0
)
4289 else if (ins
->sreg1
!= ARMREG_R1
&& ins
->sreg2
!= ARMREG_R1
&& ins
->sreg3
!= ARMREG_R1
)
4293 g_assert (cfg
->arch
.atomic_tmp_offset
!= -1);
4294 ARM_STR_IMM (code
, tmpreg
, cfg
->frame_reg
, cfg
->arch
.atomic_tmp_offset
);
4296 switch (ins
->opcode
) {
4297 case OP_ATOMIC_EXCHANGE_I4
:
4299 ARM_DMB (code
, ARM_DMB_ISH
);
4300 ARM_LDREX_REG (code
, ARMREG_LR
, ins
->sreg1
);
4301 ARM_STREX_REG (code
, tmpreg
, ins
->sreg2
, ins
->sreg1
);
4302 ARM_CMP_REG_IMM (code
, tmpreg
, 0, 0);
4304 ARM_B_COND (code
, ARMCOND_NE
, 0);
4305 arm_patch (buf
[1], buf
[0]);
4307 case OP_ATOMIC_CAS_I4
:
4308 ARM_DMB (code
, ARM_DMB_ISH
);
4310 ARM_LDREX_REG (code
, ARMREG_LR
, ins
->sreg1
);
4311 ARM_CMP_REG_REG (code
, ARMREG_LR
, ins
->sreg3
);
4313 ARM_B_COND (code
, ARMCOND_NE
, 0);
4314 ARM_STREX_REG (code
, tmpreg
, ins
->sreg2
, ins
->sreg1
);
4315 ARM_CMP_REG_IMM (code
, tmpreg
, 0, 0);
4317 ARM_B_COND (code
, ARMCOND_NE
, 0);
4318 arm_patch (buf
[2], buf
[0]);
4319 arm_patch (buf
[1], code
);
4321 case OP_ATOMIC_ADD_I4
:
4323 ARM_DMB (code
, ARM_DMB_ISH
);
4324 ARM_LDREX_REG (code
, ARMREG_LR
, ins
->sreg1
);
4325 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->sreg2
);
4326 ARM_STREX_REG (code
, tmpreg
, ARMREG_LR
, ins
->sreg1
);
4327 ARM_CMP_REG_IMM (code
, tmpreg
, 0, 0);
4329 ARM_B_COND (code
, ARMCOND_NE
, 0);
4330 arm_patch (buf
[1], buf
[0]);
4333 g_assert_not_reached ();
4336 ARM_DMB (code
, ARM_DMB_ISH
);
4337 if (tmpreg
!= ins
->dreg
)
4338 ARM_LDR_IMM (code
, tmpreg
, cfg
->frame_reg
, cfg
->arch
.atomic_tmp_offset
);
4339 ARM_MOV_REG_REG (code
, ins
->dreg
, ARMREG_LR
);
4342 case OP_ATOMIC_LOAD_I1
:
4343 case OP_ATOMIC_LOAD_U1
:
4344 case OP_ATOMIC_LOAD_I2
:
4345 case OP_ATOMIC_LOAD_U2
:
4346 case OP_ATOMIC_LOAD_I4
:
4347 case OP_ATOMIC_LOAD_U4
:
4348 case OP_ATOMIC_LOAD_R4
:
4349 case OP_ATOMIC_LOAD_R8
: {
4350 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4351 ARM_DMB (code
, ARM_DMB_ISH
);
4353 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4355 switch (ins
->opcode
) {
4356 case OP_ATOMIC_LOAD_I1
:
4357 ARM_LDRSB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4359 case OP_ATOMIC_LOAD_U1
:
4360 ARM_LDRB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4362 case OP_ATOMIC_LOAD_I2
:
4363 ARM_LDRSH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4365 case OP_ATOMIC_LOAD_U2
:
4366 ARM_LDRH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4368 case OP_ATOMIC_LOAD_I4
:
4369 case OP_ATOMIC_LOAD_U4
:
4370 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4372 case OP_ATOMIC_LOAD_R4
:
4374 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_basereg
, ARMREG_LR
);
4375 ARM_FLDS (code
, ins
->dreg
, ARMREG_LR
, 0);
4377 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
4378 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_basereg
, ARMREG_LR
);
4379 ARM_FLDS (code
, vfp_scratch1
, ARMREG_LR
, 0);
4380 ARM_CVTS (code
, ins
->dreg
, vfp_scratch1
);
4381 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
4384 case OP_ATOMIC_LOAD_R8
:
4385 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_basereg
, ARMREG_LR
);
4386 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
4390 if (ins
->backend
.memory_barrier_kind
!= MONO_MEMORY_BARRIER_NONE
)
4391 ARM_DMB (code
, ARM_DMB_ISH
);
4394 case OP_ATOMIC_STORE_I1
:
4395 case OP_ATOMIC_STORE_U1
:
4396 case OP_ATOMIC_STORE_I2
:
4397 case OP_ATOMIC_STORE_U2
:
4398 case OP_ATOMIC_STORE_I4
:
4399 case OP_ATOMIC_STORE_U4
:
4400 case OP_ATOMIC_STORE_R4
:
4401 case OP_ATOMIC_STORE_R8
: {
4402 if (ins
->backend
.memory_barrier_kind
!= MONO_MEMORY_BARRIER_NONE
)
4403 ARM_DMB (code
, ARM_DMB_ISH
);
4405 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4407 switch (ins
->opcode
) {
4408 case OP_ATOMIC_STORE_I1
:
4409 case OP_ATOMIC_STORE_U1
:
4410 ARM_STRB_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4412 case OP_ATOMIC_STORE_I2
:
4413 case OP_ATOMIC_STORE_U2
:
4414 ARM_STRH_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4416 case OP_ATOMIC_STORE_I4
:
4417 case OP_ATOMIC_STORE_U4
:
4418 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4420 case OP_ATOMIC_STORE_R4
:
4422 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_destbasereg
, ARMREG_LR
);
4423 ARM_FSTS (code
, ins
->sreg1
, ARMREG_LR
, 0);
4425 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
4426 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_destbasereg
, ARMREG_LR
);
4427 ARM_CVTD (code
, vfp_scratch1
, ins
->sreg1
);
4428 ARM_FSTS (code
, vfp_scratch1
, ARMREG_LR
, 0);
4429 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
4432 case OP_ATOMIC_STORE_R8
:
4433 ARM_ADD_REG_REG (code
, ARMREG_LR
, ins
->inst_destbasereg
, ARMREG_LR
);
4434 ARM_FSTD (code
, ins
->sreg1
, ARMREG_LR
, 0);
4438 if (ins
->backend
.memory_barrier_kind
== MONO_MEMORY_BARRIER_SEQ
)
4439 ARM_DMB (code
, ARM_DMB_ISH
);
4443 ARM_SMULL_REG_REG (code
, ins
->backend
.reg3
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4446 ARM_UMULL_REG_REG (code
, ins
->backend
.reg3
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4448 case OP_STOREI1_MEMBASE_IMM
:
4449 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
& 0xFF);
4450 g_assert (arm_is_imm12 (ins
->inst_offset
));
4451 ARM_STRB_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4453 case OP_STOREI2_MEMBASE_IMM
:
4454 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
& 0xFFFF);
4455 g_assert (arm_is_imm8 (ins
->inst_offset
));
4456 ARM_STRH_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4458 case OP_STORE_MEMBASE_IMM
:
4459 case OP_STOREI4_MEMBASE_IMM
:
4460 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
);
4461 g_assert (arm_is_imm12 (ins
->inst_offset
));
4462 ARM_STR_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
4464 case OP_STOREI1_MEMBASE_REG
:
4465 g_assert (arm_is_imm12 (ins
->inst_offset
));
4466 ARM_STRB_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4468 case OP_STOREI2_MEMBASE_REG
:
4469 g_assert (arm_is_imm8 (ins
->inst_offset
));
4470 ARM_STRH_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4472 case OP_STORE_MEMBASE_REG
:
4473 case OP_STOREI4_MEMBASE_REG
:
4474 /* this case is special, since it happens for spill code after lowering has been called */
4475 if (arm_is_imm12 (ins
->inst_offset
)) {
4476 ARM_STR_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4478 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4479 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
4482 case OP_STOREI1_MEMINDEX
:
4483 ARM_STRB_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4485 case OP_STOREI2_MEMINDEX
:
4486 ARM_STRH_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4488 case OP_STORE_MEMINDEX
:
4489 case OP_STOREI4_MEMINDEX
:
4490 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
4493 g_assert_not_reached ();
4495 case OP_LOAD_MEMINDEX
:
4496 case OP_LOADI4_MEMINDEX
:
4497 case OP_LOADU4_MEMINDEX
:
4498 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4500 case OP_LOADI1_MEMINDEX
:
4501 ARM_LDRSB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4503 case OP_LOADU1_MEMINDEX
:
4504 ARM_LDRB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4506 case OP_LOADI2_MEMINDEX
:
4507 ARM_LDRSH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4509 case OP_LOADU2_MEMINDEX
:
4510 ARM_LDRH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
4512 case OP_LOAD_MEMBASE
:
4513 case OP_LOADI4_MEMBASE
:
4514 case OP_LOADU4_MEMBASE
:
4515 /* this case is special, since it happens for spill code after lowering has been called */
4516 if (arm_is_imm12 (ins
->inst_offset
)) {
4517 ARM_LDR_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4519 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4520 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
4523 case OP_LOADI1_MEMBASE
:
4524 g_assert (arm_is_imm8 (ins
->inst_offset
));
4525 ARM_LDRSB_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4527 case OP_LOADU1_MEMBASE
:
4528 g_assert (arm_is_imm12 (ins
->inst_offset
));
4529 ARM_LDRB_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4531 case OP_LOADU2_MEMBASE
:
4532 g_assert (arm_is_imm8 (ins
->inst_offset
));
4533 ARM_LDRH_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4535 case OP_LOADI2_MEMBASE
:
4536 g_assert (arm_is_imm8 (ins
->inst_offset
));
4537 ARM_LDRSH_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4539 case OP_ICONV_TO_I1
:
4540 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 24);
4541 ARM_SAR_IMM (code
, ins
->dreg
, ins
->dreg
, 24);
4543 case OP_ICONV_TO_I2
:
4544 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 16);
4545 ARM_SAR_IMM (code
, ins
->dreg
, ins
->dreg
, 16);
4547 case OP_ICONV_TO_U1
:
4548 ARM_AND_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 0xff);
4550 case OP_ICONV_TO_U2
:
4551 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 16);
4552 ARM_SHR_IMM (code
, ins
->dreg
, ins
->dreg
, 16);
4556 ARM_CMP_REG_REG (code
, ins
->sreg1
, ins
->sreg2
);
4558 case OP_COMPARE_IMM
:
4559 case OP_ICOMPARE_IMM
:
4560 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4561 g_assert (imm8
>= 0);
4562 ARM_CMP_REG_IMM (code
, ins
->sreg1
, imm8
, rot_amount
);
4566 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4567 * So instead of emitting a trap, we emit a call a C function and place a
4570 //*(int*)code = 0xef9f0001;
4573 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
4574 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_break
));
4575 code
= emit_call_seq (cfg
, code
);
4577 case OP_RELAXED_NOP
:
4582 case OP_DUMMY_ICONST
:
4583 case OP_DUMMY_R8CONST
:
4584 case OP_DUMMY_R4CONST
:
4585 case OP_NOT_REACHED
:
4588 case OP_IL_SEQ_POINT
:
4589 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
4591 case OP_SEQ_POINT
: {
4593 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
4594 MonoInst
*ss_trigger_page_var
= cfg
->arch
.ss_trigger_page_var
;
4595 MonoInst
*ss_method_var
= cfg
->arch
.seq_point_ss_method_var
;
4596 MonoInst
*bp_method_var
= cfg
->arch
.seq_point_bp_method_var
;
4598 int dreg
= ARMREG_LR
;
4601 if (cfg
->soft_breakpoints
) {
4602 g_assert (!cfg
->compile_aot
);
4607 * For AOT, we use one got slot per method, which will point to a
4608 * SeqPointInfo structure, containing all the information required
4609 * by the code below.
4611 if (cfg
->compile_aot
) {
4612 g_assert (info_var
);
4613 g_assert (info_var
->opcode
== OP_REGOFFSET
);
4616 if (!cfg
->soft_breakpoints
&& !cfg
->compile_aot
) {
4618 * Read from the single stepping trigger page. This will cause a
4619 * SIGSEGV when single stepping is enabled.
4620 * We do this _before_ the breakpoint, so single stepping after
4621 * a breakpoint is hit will step to the next IL offset.
4623 g_assert (((guint64
)(gsize
)ss_trigger_page
>> 32) == 0);
4626 /* Single step check */
4627 if (ins
->flags
& MONO_INST_SINGLE_STEP_LOC
) {
4628 if (cfg
->soft_breakpoints
) {
4629 /* Load the address of the sequence point method variable. */
4630 var
= ss_method_var
;
4632 g_assert (var
->opcode
== OP_REGOFFSET
);
4633 code
= emit_ldr_imm (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
4634 /* Read the value and check whether it is non-zero. */
4635 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
4636 ARM_CMP_REG_IMM (code
, dreg
, 0, 0);
4637 /* Call it conditionally. */
4638 ARM_BLX_REG_COND (code
, ARMCOND_NE
, dreg
);
4640 if (cfg
->compile_aot
) {
4641 /* Load the trigger page addr from the variable initialized in the prolog */
4642 var
= ss_trigger_page_var
;
4644 g_assert (var
->opcode
== OP_REGOFFSET
);
4645 code
= emit_ldr_imm (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
4647 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
4649 *(int*)code
= (int)(gsize
)ss_trigger_page
;
4652 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
4656 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
4658 /* Breakpoint check */
4659 if (cfg
->compile_aot
) {
4660 const guint32 offset
= code
- cfg
->native_code
;
4664 code
= emit_ldr_imm (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
4665 /* Add the offset */
4666 val
= ((offset
/ 4) * sizeof (target_mgreg_t
)) + MONO_STRUCT_OFFSET (SeqPointInfo
, bp_addrs
);
4667 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4668 if (arm_is_imm12 ((int)val
)) {
4669 ARM_LDR_IMM (code
, dreg
, dreg
, val
);
4671 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF), 0);
4673 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF00) >> 8, 24);
4675 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
4676 g_assert (!(val
& 0xFF000000));
4678 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
4680 /* What is faster, a branch or a load ? */
4681 ARM_CMP_REG_IMM (code
, dreg
, 0, 0);
4682 /* The breakpoint instruction */
4683 if (cfg
->soft_breakpoints
)
4684 ARM_BLX_REG_COND (code
, ARMCOND_NE
, dreg
);
4686 ARM_LDR_IMM_COND (code
, dreg
, dreg
, 0, ARMCOND_NE
);
4687 } else if (cfg
->soft_breakpoints
) {
4688 /* Load the address of the breakpoint method into ip. */
4689 var
= bp_method_var
;
4691 g_assert (var
->opcode
== OP_REGOFFSET
);
4692 g_assert (arm_is_imm12 (var
->inst_offset
));
4693 ARM_LDR_IMM (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
4696 * A placeholder for a possible breakpoint inserted by
4697 * mono_arch_set_breakpoint ().
4702 * A placeholder for a possible breakpoint inserted by
4703 * mono_arch_set_breakpoint ().
4705 for (i
= 0; i
< 4; ++i
)
4710 * Add an additional nop so skipping the bp doesn't cause the ip to point
4711 * to another IL offset.
4719 ARM_ADDS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4722 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4726 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4729 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4730 g_assert (imm8
>= 0);
4731 ARM_ADDS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4735 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4736 g_assert (imm8
>= 0);
4737 ARM_ADD_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4741 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4742 g_assert (imm8
>= 0);
4743 ARM_ADCS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4746 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4747 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4749 case OP_IADD_OVF_UN
:
4750 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4751 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4754 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4755 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4757 case OP_ISUB_OVF_UN
:
4758 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4759 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4761 case OP_ADD_OVF_CARRY
:
4762 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4763 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4765 case OP_ADD_OVF_UN_CARRY
:
4766 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4767 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4769 case OP_SUB_OVF_CARRY
:
4770 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4771 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4773 case OP_SUB_OVF_UN_CARRY
:
4774 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4775 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4779 ARM_SUBS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4782 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4783 g_assert (imm8
>= 0);
4784 ARM_SUBS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4787 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4791 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4795 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4796 g_assert (imm8
>= 0);
4797 ARM_SUB_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4801 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4802 g_assert (imm8
>= 0);
4803 ARM_SBCS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4805 case OP_ARM_RSBS_IMM
:
4806 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4807 g_assert (imm8
>= 0);
4808 ARM_RSBS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4810 case OP_ARM_RSC_IMM
:
4811 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4812 g_assert (imm8
>= 0);
4813 ARM_RSC_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4816 ARM_AND_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4820 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4821 g_assert (imm8
>= 0);
4822 ARM_AND_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4825 g_assert (v7s_supported
|| v7k_supported
);
4826 ARM_SDIV (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4829 g_assert (v7s_supported
|| v7k_supported
);
4830 ARM_UDIV (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4833 g_assert (v7s_supported
|| v7k_supported
);
4834 ARM_SDIV (code
, ARMREG_LR
, ins
->sreg1
, ins
->sreg2
);
4835 ARM_MLS (code
, ins
->dreg
, ARMREG_LR
, ins
->sreg2
, ins
->sreg1
);
4838 g_assert (v7s_supported
|| v7k_supported
);
4839 ARM_UDIV (code
, ARMREG_LR
, ins
->sreg1
, ins
->sreg2
);
4840 ARM_MLS (code
, ins
->dreg
, ARMREG_LR
, ins
->sreg2
, ins
->sreg1
);
4844 g_assert_not_reached ();
4846 ARM_ORR_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4850 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4851 g_assert (imm8
>= 0);
4852 ARM_ORR_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4855 ARM_EOR_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4859 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
4860 g_assert (imm8
>= 0);
4861 ARM_EOR_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
4864 ARM_SHL_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4869 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4870 else if (ins
->dreg
!= ins
->sreg1
)
4871 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4874 ARM_SAR_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4879 ARM_SAR_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4880 else if (ins
->dreg
!= ins
->sreg1
)
4881 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4884 case OP_ISHR_UN_IMM
:
4886 ARM_SHR_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
4887 else if (ins
->dreg
!= ins
->sreg1
)
4888 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4891 ARM_SHR_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4894 ARM_MVN_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4897 ARM_RSB_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 0);
4900 if (ins
->dreg
== ins
->sreg2
)
4901 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4903 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
4906 g_assert_not_reached ();
4909 /* FIXME: handle ovf/ sreg2 != dreg */
4910 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4911 /* FIXME: MUL doesn't set the C/O flags on ARM */
4913 case OP_IMUL_OVF_UN
:
4914 /* FIXME: handle ovf/ sreg2 != dreg */
4915 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4916 /* FIXME: MUL doesn't set the C/O flags on ARM */
4919 code
= mono_arm_emit_load_imm (code
, ins
->dreg
, ins
->inst_c0
);
4922 /* Load the GOT offset */
4923 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)(gsize
)ins
->inst_i1
, ins
->inst_p0
);
4924 ARM_LDR_IMM (code
, ins
->dreg
, ARMREG_PC
, 0);
4926 *(gpointer
*)code
= NULL
;
4928 /* Load the value from the GOT */
4929 ARM_LDR_REG_REG (code
, ins
->dreg
, ARMREG_PC
, ins
->dreg
);
4931 case OP_OBJC_GET_SELECTOR
:
4932 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_OBJC_SELECTOR_REF
, ins
->inst_p0
);
4933 ARM_LDR_IMM (code
, ins
->dreg
, ARMREG_PC
, 0);
4935 *(gpointer
*)code
= NULL
;
4937 ARM_LDR_REG_REG (code
, ins
->dreg
, ARMREG_PC
, ins
->dreg
);
4939 case OP_ICONV_TO_I4
:
4940 case OP_ICONV_TO_U4
:
4942 if (ins
->dreg
!= ins
->sreg1
)
4943 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4946 int saved
= ins
->sreg2
;
4947 if (ins
->sreg2
== ARM_LSW_REG
) {
4948 ARM_MOV_REG_REG (code
, ARMREG_LR
, ins
->sreg2
);
4951 if (ins
->sreg1
!= ARM_LSW_REG
)
4952 ARM_MOV_REG_REG (code
, ARM_LSW_REG
, ins
->sreg1
);
4953 if (saved
!= ARM_MSW_REG
)
4954 ARM_MOV_REG_REG (code
, ARM_MSW_REG
, saved
);
4958 if (IS_VFP
&& ins
->dreg
!= ins
->sreg1
)
4959 ARM_CPYD (code
, ins
->dreg
, ins
->sreg1
);
4962 if (IS_VFP
&& ins
->dreg
!= ins
->sreg1
)
4963 ARM_CPYS (code
, ins
->dreg
, ins
->sreg1
);
4965 case OP_MOVE_F_TO_I4
:
4967 ARM_FMRS (code
, ins
->dreg
, ins
->sreg1
);
4969 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
4970 ARM_CVTD (code
, vfp_scratch1
, ins
->sreg1
);
4971 ARM_FMRS (code
, ins
->dreg
, vfp_scratch1
);
4972 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
4975 case OP_MOVE_I4_TO_F
:
4977 ARM_FMSR (code
, ins
->dreg
, ins
->sreg1
);
4979 ARM_FMSR (code
, ins
->dreg
, ins
->sreg1
);
4980 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
4983 case OP_FCONV_TO_R4
:
4986 ARM_CVTD (code
, ins
->dreg
, ins
->sreg1
);
4988 ARM_CVTD (code
, ins
->dreg
, ins
->sreg1
);
4989 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
4994 case OP_TAILCALL_PARAMETER
:
4995 // This opcode helps compute sizes, i.e.
4996 // of the subsequent OP_TAILCALL, but contributes no code.
4997 g_assert (ins
->next
);
5001 case OP_TAILCALL_MEMBASE
:
5002 case OP_TAILCALL_REG
: {
5003 gboolean
const tailcall_membase
= ins
->opcode
== OP_TAILCALL_MEMBASE
;
5004 gboolean
const tailcall_reg
= ins
->opcode
== OP_TAILCALL_REG
;
5005 MonoCallInst
*call
= (MonoCallInst
*)ins
;
5007 max_len
+= call
->stack_usage
/ sizeof (target_mgreg_t
) * ins_get_size (OP_TAILCALL_PARAMETER
);
5010 code
= emit_float_args (cfg
, call
, code
, &max_len
, &offset
);
5012 code
= realloc_code (cfg
, max_len
);
5014 // For reg and membase, get destination in IP.
5017 g_assert (ins
->sreg1
> -1);
5018 if (ins
->sreg1
!= ARMREG_IP
)
5019 ARM_MOV_REG_REG (code
, ARMREG_IP
, ins
->sreg1
);
5020 } else if (tailcall_membase
) {
5021 g_assert (ins
->sreg1
> -1);
5022 if (!arm_is_imm12 (ins
->inst_offset
)) {
5023 g_assert (ins
->sreg1
!= ARMREG_IP
); // temp in emit_big_add
5024 code
= emit_big_add (code
, ARMREG_IP
, ins
->sreg1
, ins
->inst_offset
);
5025 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_IP
, 0);
5027 ARM_LDR_IMM (code
, ARMREG_IP
, ins
->sreg1
, ins
->inst_offset
);
5032 * The stack looks like the following:
5033 * <caller argument area>
5036 * <callee argument area>
5037 * <optionally saved IP> (about to be)
5038 * Need to copy the arguments from the callee argument area to
5039 * the caller argument area, and pop the frame.
5041 if (call
->stack_usage
) {
5042 int i
, prev_sp_offset
= 0;
5044 // When we get here, the parameters to the tailcall are already formed,
5045 // in registers and at the bottom of the grow-down stack.
5047 // Our goal is generally preserve parameters, and trim the stack,
5048 // and, before trimming stack, move parameters from the bottom of the
5049 // frame to the bottom of the trimmed frame.
5051 // For the case of large frames, and presently therefore always,
5052 // IP is used as an adjusted frame_reg.
5053 // Be conservative and save IP around the movement
5054 // of parameters from the bottom of frame to top of the frame.
5055 const gboolean save_ip
= tailcall_membase
|| tailcall_reg
;
5057 ARM_PUSH (code
, 1 << ARMREG_IP
);
5059 // When moving stacked parameters from the bottom
5060 // of the frame (sp) to the top of the frame (ip),
5061 // account, 0 or 4, for the conditional save of IP.
5062 const int offset_sp
= save_ip
? 4 : 0;
5063 const int offset_ip
= (save_ip
&& (cfg
->frame_reg
== ARMREG_SP
)) ? 4 : 0;
5065 /* Compute size of saved registers restored below */
5067 prev_sp_offset
= 2 * 4;
5069 prev_sp_offset
= 1 * 4;
5070 for (i
= 0; i
< 16; ++i
) {
5071 if (cfg
->used_int_regs
& (1 << i
))
5072 prev_sp_offset
+= 4;
5075 // Point IP at the start of where the parameters will go after trimming stack.
5076 // After locals and saved registers.
5077 code
= emit_big_add (code
, ARMREG_IP
, cfg
->frame_reg
, cfg
->stack_usage
+ prev_sp_offset
);
5079 /* Copy arguments on the stack to our argument area */
5080 // FIXME a fixed size memcpy is desirable here,
5081 // at least for larger values of stack_usage.
5083 // FIXME For most functions, with frames < 4K, we can use frame_reg directly here instead of IP.
5084 // See https://github.com/mono/mono/pull/12079
5085 // See https://github.com/mono/mono/pull/12079/commits/93e7007a9567b78fa8152ce404b372b26e735516
5086 for (i
= 0; i
< call
->stack_usage
; i
+= sizeof (target_mgreg_t
)) {
5087 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, i
+ offset_sp
);
5088 ARM_STR_IMM (code
, ARMREG_LR
, ARMREG_IP
, i
+ offset_ip
);
5092 ARM_POP (code
, 1 << ARMREG_IP
);
5096 * Keep in sync with mono_arch_emit_epilog
5098 g_assert (!cfg
->method
->save_lmf
);
5099 code
= emit_big_add_temp (code
, ARMREG_SP
, cfg
->frame_reg
, cfg
->stack_usage
, ARMREG_LR
);
5101 if (cfg
->used_int_regs
)
5102 ARM_POP (code
, cfg
->used_int_regs
);
5103 ARM_POP (code
, (1 << ARMREG_R7
) | (1 << ARMREG_LR
));
5105 ARM_POP (code
, cfg
->used_int_regs
| (1 << ARMREG_LR
));
5108 if (tailcall_reg
|| tailcall_membase
) {
5109 code
= emit_jmp_reg (code
, ARMREG_IP
);
5111 mono_add_patch_info (cfg
, (guint8
*) code
- cfg
->native_code
, MONO_PATCH_INFO_METHOD_JUMP
, call
->method
);
5113 if (cfg
->compile_aot
) {
5114 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
5116 *(gpointer
*)code
= NULL
;
5118 ARM_LDR_REG_REG (code
, ARMREG_PC
, ARMREG_PC
, ARMREG_IP
);
5120 code
= mono_arm_patchable_b (code
, ARMCOND_AL
);
5121 cfg
->thunk_area
+= THUNK_SIZE
;
5127 /* ensure ins->sreg1 is not NULL */
5128 ARM_LDRB_IMM (code
, ARMREG_LR
, ins
->sreg1
, 0);
5131 g_assert (cfg
->sig_cookie
< 128);
5132 ARM_LDR_IMM (code
, ARMREG_IP
, cfg
->frame_reg
, cfg
->sig_cookie
);
5133 ARM_STR_IMM (code
, ARMREG_IP
, ins
->sreg1
, 0);
5143 call
= (MonoCallInst
*)ins
;
5146 code
= emit_float_args (cfg
, call
, code
, &max_len
, &offset
);
5148 mono_call_add_patch_info (cfg
, call
, code
- cfg
->native_code
);
5150 code
= emit_call_seq (cfg
, code
);
5151 ins
->flags
|= MONO_INST_GC_CALLSITE
;
5152 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5153 code
= emit_move_return_value (cfg
, ins
, code
);
5160 case OP_VOIDCALL_REG
:
5163 code
= emit_float_args (cfg
, (MonoCallInst
*)ins
, code
, &max_len
, &offset
);
5165 code
= emit_call_reg (code
, ins
->sreg1
);
5166 ins
->flags
|= MONO_INST_GC_CALLSITE
;
5167 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5168 code
= emit_move_return_value (cfg
, ins
, code
);
5170 case OP_FCALL_MEMBASE
:
5171 case OP_RCALL_MEMBASE
:
5172 case OP_LCALL_MEMBASE
:
5173 case OP_VCALL_MEMBASE
:
5174 case OP_VCALL2_MEMBASE
:
5175 case OP_VOIDCALL_MEMBASE
:
5176 case OP_CALL_MEMBASE
: {
5177 g_assert (ins
->sreg1
!= ARMREG_LR
);
5178 call
= (MonoCallInst
*)ins
;
5181 code
= emit_float_args (cfg
, call
, code
, &max_len
, &offset
);
5182 if (!arm_is_imm12 (ins
->inst_offset
)) {
5183 /* sreg1 might be IP */
5184 ARM_MOV_REG_REG (code
, ARMREG_LR
, ins
->sreg1
);
5185 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, ins
->inst_offset
);
5186 ARM_ADD_REG_REG (code
, ARMREG_IP
, ARMREG_IP
, ARMREG_LR
);
5187 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
5188 ARM_LDR_IMM (code
, ARMREG_PC
, ARMREG_IP
, 0);
5190 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
5191 ARM_LDR_IMM (code
, ARMREG_PC
, ins
->sreg1
, ins
->inst_offset
);
5193 ins
->flags
|= MONO_INST_GC_CALLSITE
;
5194 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
5195 code
= emit_move_return_value (cfg
, ins
, code
);
5198 case OP_GENERIC_CLASS_INIT
: {
5202 byte_offset
= MONO_STRUCT_OFFSET (MonoVTable
, initialized
);
5204 g_assert (arm_is_imm8 (byte_offset
));
5205 ARM_LDRSB_IMM (code
, ARMREG_IP
, ins
->sreg1
, byte_offset
);
5206 ARM_CMP_REG_IMM (code
, ARMREG_IP
, 0, 0);
5208 ARM_B_COND (code
, ARMCOND_NE
, 0);
5210 /* Uninitialized case */
5211 g_assert (ins
->sreg1
== ARMREG_R0
);
5213 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
5214 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_generic_class_init
));
5215 code
= emit_call_seq (cfg
, code
);
5217 /* Initialized case */
5218 arm_patch (jump
, code
);
5222 /* round the size to 8 bytes */
5223 ARM_ADD_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, (MONO_ARCH_FRAME_ALIGNMENT
- 1));
5224 ARM_BIC_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, (MONO_ARCH_FRAME_ALIGNMENT
- 1));
5225 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ins
->dreg
);
5226 /* memzero the area: dreg holds the size, sp is the pointer */
5227 if (ins
->flags
& MONO_INST_INIT
) {
5228 guint8
*start_loop
, *branch_to_cond
;
5229 ARM_MOV_REG_IMM8 (code
, ARMREG_LR
, 0);
5230 branch_to_cond
= code
;
5233 ARM_STR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ins
->dreg
);
5234 arm_patch (branch_to_cond
, code
);
5235 /* decrement by 4 and set flags */
5236 ARM_SUBS_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, sizeof (target_mgreg_t
));
5237 ARM_B_COND (code
, ARMCOND_GE
, 0);
5238 arm_patch (code
- 4, start_loop
);
5240 ARM_MOV_REG_REG (code
, ins
->dreg
, ARMREG_SP
);
5241 if (cfg
->param_area
)
5242 code
= emit_sub_imm (code
, ARMREG_SP
, ARMREG_SP
, ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
));
5247 MonoInst
*var
= cfg
->dyn_call_var
;
5248 guint8
*labels
[16];
5250 g_assert (var
->opcode
== OP_REGOFFSET
);
5251 g_assert (arm_is_imm12 (var
->inst_offset
));
5253 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
5254 ARM_MOV_REG_REG (code
, ARMREG_LR
, ins
->sreg1
);
5256 ARM_MOV_REG_REG (code
, ARMREG_IP
, ins
->sreg2
);
5258 /* Save args buffer */
5259 ARM_STR_IMM (code
, ARMREG_LR
, var
->inst_basereg
, var
->inst_offset
);
5261 /* Set fp argument registers */
5262 if (IS_HARD_FLOAT
) {
5263 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, has_fpregs
));
5264 ARM_CMP_REG_IMM (code
, ARMREG_R0
, 0, 0);
5266 ARM_B_COND (code
, ARMCOND_EQ
, 0);
5267 for (i
= 0; i
< FP_PARAM_REGS
; ++i
) {
5268 const int offset
= MONO_STRUCT_OFFSET (DynCallArgs
, fpregs
) + (i
* sizeof (double));
5269 g_assert (arm_is_fpimm8 (offset
));
5270 ARM_FLDD (code
, i
* 2, ARMREG_LR
, offset
);
5272 arm_patch (labels
[0], code
);
5275 /* Allocate callee area */
5276 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, n_stackargs
));
5277 ARM_SHL_IMM (code
, ARMREG_R1
, ARMREG_R1
, 2);
5278 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_R1
);
5280 /* Set stack args */
5282 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, n_stackargs
));
5283 /* R2 = pointer into regs */
5284 code
= emit_big_add (code
, ARMREG_R2
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, regs
) + (PARAM_REGS
* sizeof (target_mgreg_t
)));
5285 /* R3 = pointer to stack */
5286 ARM_MOV_REG_REG (code
, ARMREG_R3
, ARMREG_SP
);
5289 ARM_B_COND (code
, ARMCOND_AL
, 0);
5291 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_R2
, 0);
5292 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_R3
, 0);
5293 ARM_ADD_REG_IMM (code
, ARMREG_R2
, ARMREG_R2
, sizeof (target_mgreg_t
), 0);
5294 ARM_ADD_REG_IMM (code
, ARMREG_R3
, ARMREG_R3
, sizeof (target_mgreg_t
), 0);
5295 ARM_SUB_REG_IMM (code
, ARMREG_R1
, ARMREG_R1
, 1, 0);
5296 arm_patch (labels
[0], code
);
5297 ARM_CMP_REG_IMM (code
, ARMREG_R1
, 0, 0);
5299 ARM_B_COND (code
, ARMCOND_GT
, 0);
5300 arm_patch (labels
[2], labels
[1]);
5302 /* Set argument registers */
5303 for (i
= 0; i
< PARAM_REGS
; ++i
)
5304 ARM_LDR_IMM (code
, i
, ARMREG_LR
, MONO_STRUCT_OFFSET (DynCallArgs
, regs
) + (i
* sizeof (target_mgreg_t
)));
5307 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
5308 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
5311 ARM_LDR_IMM (code
, ARMREG_IP
, var
->inst_basereg
, var
->inst_offset
);
5312 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_IP
, MONO_STRUCT_OFFSET (DynCallArgs
, res
));
5313 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_IP
, MONO_STRUCT_OFFSET (DynCallArgs
, res2
));
5315 ARM_FSTD (code
, ARM_VFP_D0
, ARMREG_IP
, MONO_STRUCT_OFFSET (DynCallArgs
, fpregs
));
5319 if (ins
->sreg1
!= ARMREG_R0
)
5320 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
5321 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
5322 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_throw_exception
));
5323 code
= emit_call_seq (cfg
, code
);
5327 if (ins
->sreg1
!= ARMREG_R0
)
5328 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
5329 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
,
5330 GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arch_rethrow_exception
));
5331 code
= emit_call_seq (cfg
, code
);
5334 case OP_START_HANDLER
: {
5335 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
5336 int param_area
= ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
);
5339 /* Reserve a param area, see filter-stack.exe */
5341 if ((i
= mono_arm_is_rotated_imm8 (param_area
, &rot_amount
)) >= 0) {
5342 ARM_SUB_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
5344 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, param_area
);
5345 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
5349 if (arm_is_imm12 (spvar
->inst_offset
)) {
5350 ARM_STR_IMM (code
, ARMREG_LR
, spvar
->inst_basereg
, spvar
->inst_offset
);
5352 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
5353 ARM_STR_REG_REG (code
, ARMREG_LR
, spvar
->inst_basereg
, ARMREG_IP
);
5357 case OP_ENDFILTER
: {
5358 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
5359 int param_area
= ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
);
5362 /* Free the param area */
5364 if ((i
= mono_arm_is_rotated_imm8 (param_area
, &rot_amount
)) >= 0) {
5365 ARM_ADD_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
5367 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, param_area
);
5368 ARM_ADD_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
5372 if (ins
->sreg1
!= ARMREG_R0
)
5373 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
5374 if (arm_is_imm12 (spvar
->inst_offset
)) {
5375 ARM_LDR_IMM (code
, ARMREG_IP
, spvar
->inst_basereg
, spvar
->inst_offset
);
5377 g_assert (ARMREG_IP
!= spvar
->inst_basereg
);
5378 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
5379 ARM_LDR_REG_REG (code
, ARMREG_IP
, spvar
->inst_basereg
, ARMREG_IP
);
5381 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
5384 case OP_ENDFINALLY
: {
5385 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
5386 int param_area
= ALIGN_TO (cfg
->param_area
, MONO_ARCH_FRAME_ALIGNMENT
);
5389 /* Free the param area */
5391 if ((i
= mono_arm_is_rotated_imm8 (param_area
, &rot_amount
)) >= 0) {
5392 ARM_ADD_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
5394 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, param_area
);
5395 ARM_ADD_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
5399 if (arm_is_imm12 (spvar
->inst_offset
)) {
5400 ARM_LDR_IMM (code
, ARMREG_IP
, spvar
->inst_basereg
, spvar
->inst_offset
);
5402 g_assert (ARMREG_IP
!= spvar
->inst_basereg
);
5403 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
5404 ARM_LDR_REG_REG (code
, ARMREG_IP
, spvar
->inst_basereg
, ARMREG_IP
);
5406 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
5409 case OP_CALL_HANDLER
:
5410 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
5411 code
= mono_arm_patchable_bl (code
, ARMCOND_AL
);
5412 cfg
->thunk_area
+= THUNK_SIZE
;
5413 for (GList
*tmp
= ins
->inst_eh_blocks
; tmp
!= bb
->clause_holes
; tmp
= tmp
->prev
)
5414 mono_cfg_add_try_hole (cfg
, ((MonoLeaveClause
*) tmp
->data
)->clause
, code
, bb
);
5417 if (ins
->dreg
!= ARMREG_R0
)
5418 ARM_MOV_REG_REG (code
, ins
->dreg
, ARMREG_R0
);
5422 ins
->inst_c0
= code
- cfg
->native_code
;
5425 /*if (ins->inst_target_bb->native_offset) {
5427 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5429 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
5430 code
= mono_arm_patchable_b (code
, ARMCOND_AL
);
5434 ARM_MOV_REG_REG (code
, ARMREG_PC
, ins
->sreg1
);
5438 * In the normal case we have:
5439 * ldr pc, [pc, ins->sreg1 << 2]
5442 * ldr lr, [pc, ins->sreg1 << 2]
5444 * After follows the data.
5445 * FIXME: add aot support.
5447 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_SWITCH
, ins
->inst_p0
);
5448 max_len
+= 4 * GPOINTER_TO_INT (ins
->klass
);
5449 code
= realloc_code (cfg
, max_len
);
5450 ARM_LDR_REG_REG_SHIFT (code
, ARMREG_PC
, ARMREG_PC
, ins
->sreg1
, ARMSHIFT_LSL
, 2);
5452 code
+= 4 * GPOINTER_TO_INT (ins
->klass
);
5456 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
5457 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
5461 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5462 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_LT
);
5466 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5467 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_LO
);
5471 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5472 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_GT
);
5476 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5477 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_HI
);
5480 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_NE
);
5481 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_EQ
);
5484 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5485 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_LT
);
5488 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5489 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_GT
);
5492 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5493 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_LO
);
5496 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5497 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_HI
);
5499 case OP_COND_EXC_EQ
:
5500 case OP_COND_EXC_NE_UN
:
5501 case OP_COND_EXC_LT
:
5502 case OP_COND_EXC_LT_UN
:
5503 case OP_COND_EXC_GT
:
5504 case OP_COND_EXC_GT_UN
:
5505 case OP_COND_EXC_GE
:
5506 case OP_COND_EXC_GE_UN
:
5507 case OP_COND_EXC_LE
:
5508 case OP_COND_EXC_LE_UN
:
5509 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_EQ
, ins
->inst_p1
);
5511 case OP_COND_EXC_IEQ
:
5512 case OP_COND_EXC_INE_UN
:
5513 case OP_COND_EXC_ILT
:
5514 case OP_COND_EXC_ILT_UN
:
5515 case OP_COND_EXC_IGT
:
5516 case OP_COND_EXC_IGT_UN
:
5517 case OP_COND_EXC_IGE
:
5518 case OP_COND_EXC_IGE_UN
:
5519 case OP_COND_EXC_ILE
:
5520 case OP_COND_EXC_ILE_UN
:
5521 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_IEQ
, ins
->inst_p1
);
5524 case OP_COND_EXC_IC
:
5525 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS
, ins
->inst_p1
);
5527 case OP_COND_EXC_OV
:
5528 case OP_COND_EXC_IOV
:
5529 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS
, ins
->inst_p1
);
5531 case OP_COND_EXC_NC
:
5532 case OP_COND_EXC_INC
:
5533 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC
, ins
->inst_p1
);
5535 case OP_COND_EXC_NO
:
5536 case OP_COND_EXC_INO
:
5537 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC
, ins
->inst_p1
);
5549 EMIT_COND_BRANCH (ins
, ins
->opcode
- OP_IBEQ
);
5552 /* floating point opcodes */
5554 if (cfg
->compile_aot
) {
5555 ARM_FLDD (code
, ins
->dreg
, ARMREG_PC
, 0);
5557 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
5559 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[1];
5562 /* FIXME: we can optimize the imm load by dealing with part of
5563 * the displacement in LDFD (aligning to 512).
5565 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)(gsize
)ins
->inst_p0
);
5566 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
5570 if (cfg
->compile_aot
) {
5571 ARM_FLDS (code
, ins
->dreg
, ARMREG_PC
, 0);
5573 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
5576 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
5578 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)(gsize
)ins
->inst_p0
);
5579 ARM_FLDS (code
, ins
->dreg
, ARMREG_LR
, 0);
5581 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
5584 case OP_STORER8_MEMBASE_REG
:
5585 /* This is generated by the local regalloc pass which runs after the lowering pass */
5586 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
5587 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
5588 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_destbasereg
);
5589 ARM_FSTD (code
, ins
->sreg1
, ARMREG_LR
, 0);
5591 ARM_FSTD (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
5594 case OP_LOADR8_MEMBASE
:
5595 /* This is generated by the local regalloc pass which runs after the lowering pass */
5596 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
5597 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
5598 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_basereg
);
5599 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
5601 ARM_FLDD (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
5604 case OP_STORER4_MEMBASE_REG
:
5605 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
5607 ARM_FSTS (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
5609 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5610 ARM_CVTD (code
, vfp_scratch1
, ins
->sreg1
);
5611 ARM_FSTS (code
, vfp_scratch1
, ins
->inst_destbasereg
, ins
->inst_offset
);
5612 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5615 case OP_LOADR4_MEMBASE
:
5617 ARM_FLDS (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
5619 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
5620 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5621 ARM_FLDS (code
, vfp_scratch1
, ins
->inst_basereg
, ins
->inst_offset
);
5622 ARM_CVTS (code
, ins
->dreg
, vfp_scratch1
);
5623 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5626 case OP_ICONV_TO_R_UN
: {
5627 g_assert_not_reached ();
5630 case OP_ICONV_TO_R4
:
5632 ARM_FMSR (code
, ins
->dreg
, ins
->sreg1
);
5633 ARM_FSITOS (code
, ins
->dreg
, ins
->dreg
);
5635 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5636 ARM_FMSR (code
, vfp_scratch1
, ins
->sreg1
);
5637 ARM_FSITOS (code
, vfp_scratch1
, vfp_scratch1
);
5638 ARM_CVTS (code
, ins
->dreg
, vfp_scratch1
);
5639 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5642 case OP_ICONV_TO_R8
:
5643 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5644 ARM_FMSR (code
, vfp_scratch1
, ins
->sreg1
);
5645 ARM_FSITOD (code
, ins
->dreg
, vfp_scratch1
);
5646 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5650 MonoType
*sig_ret
= mini_get_underlying_type (mono_method_signature_internal (cfg
->method
)->ret
);
5651 if (sig_ret
->type
== MONO_TYPE_R4
) {
5653 if (IS_HARD_FLOAT
) {
5654 if (ins
->sreg1
!= ARM_VFP_D0
)
5655 ARM_CPYS (code
, ARM_VFP_D0
, ins
->sreg1
);
5657 ARM_FMRS (code
, ARMREG_R0
, ins
->sreg1
);
5660 ARM_CVTD (code
, ARM_VFP_F0
, ins
->sreg1
);
5663 ARM_FMRS (code
, ARMREG_R0
, ARM_VFP_F0
);
5667 ARM_CPYD (code
, ARM_VFP_D0
, ins
->sreg1
);
5669 ARM_FMRRD (code
, ARMREG_R0
, ARMREG_R1
, ins
->sreg1
);
5673 case OP_FCONV_TO_I1
:
5674 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, TRUE
);
5676 case OP_FCONV_TO_U1
:
5677 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, FALSE
);
5679 case OP_FCONV_TO_I2
:
5680 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, TRUE
);
5682 case OP_FCONV_TO_U2
:
5683 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, FALSE
);
5685 case OP_FCONV_TO_I4
:
5687 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, TRUE
);
5689 case OP_FCONV_TO_U4
:
5691 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, FALSE
);
5693 case OP_FCONV_TO_I8
:
5694 case OP_FCONV_TO_U8
:
5695 g_assert_not_reached ();
5696 /* Implemented as helper calls */
5698 case OP_LCONV_TO_R_UN
:
5699 g_assert_not_reached ();
5700 /* Implemented as helper calls */
5702 case OP_LCONV_TO_OVF_I4_2
: {
5703 guint8
*high_bit_not_set
, *valid_negative
, *invalid_negative
, *valid_positive
;
5705 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5708 ARM_CMP_REG_IMM8 (code
, ins
->sreg1
, 0);
5709 high_bit_not_set
= code
;
5710 ARM_B_COND (code
, ARMCOND_GE
, 0); /*branch if bit 31 of the lower part is not set*/
5712 ARM_CMN_REG_IMM8 (code
, ins
->sreg2
, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5713 valid_negative
= code
;
5714 ARM_B_COND (code
, ARMCOND_EQ
, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5715 invalid_negative
= code
;
5716 ARM_B_COND (code
, ARMCOND_AL
, 0);
5718 arm_patch (high_bit_not_set
, code
);
5720 ARM_CMP_REG_IMM8 (code
, ins
->sreg2
, 0);
5721 valid_positive
= code
;
5722 ARM_B_COND (code
, ARMCOND_EQ
, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5724 arm_patch (invalid_negative
, code
);
5725 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL
, "OverflowException");
5727 arm_patch (valid_negative
, code
);
5728 arm_patch (valid_positive
, code
);
5730 if (ins
->dreg
!= ins
->sreg1
)
5731 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
5735 ARM_VFP_ADDD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5738 ARM_VFP_SUBD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5741 ARM_VFP_MULD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5744 ARM_VFP_DIVD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5747 ARM_NEGD (code
, ins
->dreg
, ins
->sreg1
);
5751 g_assert_not_reached ();
5755 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5761 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5766 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5769 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
5770 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
5774 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5777 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5778 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5782 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5785 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5786 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5787 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5791 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
5794 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5795 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5799 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
5802 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5803 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5804 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5808 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5811 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_NE
);
5812 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_EQ
);
5816 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
5819 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5820 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
5824 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
5827 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5828 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
5831 /* ARM FPA flags table:
5832 * N Less than ARMCOND_MI
5833 * Z Equal ARMCOND_EQ
5834 * C Greater Than or Equal ARMCOND_CS
5835 * V Unordered ARMCOND_VS
5838 EMIT_COND_BRANCH (ins
, OP_IBEQ
- OP_IBEQ
);
5841 EMIT_COND_BRANCH (ins
, OP_IBNE_UN
- OP_IBEQ
);
5844 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_MI
); /* N set */
5847 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_VS
); /* V set */
5848 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_MI
); /* N set */
5854 g_assert_not_reached ();
5858 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_GE
);
5860 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5861 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_EQ
);
5862 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_CS
);
5866 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_VS
); /* V set */
5867 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_GE
);
5872 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch1
);
5873 code
= mono_arm_emit_vfp_scratch_save (cfg
, code
, vfp_scratch2
);
5875 ARM_ABSD (code
, vfp_scratch2
, ins
->sreg1
);
5876 ARM_FLDD (code
, vfp_scratch1
, ARMREG_PC
, 0);
5878 *(guint32
*)code
= 0xffffffff;
5880 *(guint32
*)code
= 0x7fefffff;
5882 ARM_CMPD (code
, vfp_scratch2
, vfp_scratch1
);
5884 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT
, "OverflowException");
5885 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg1
);
5887 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS
, "OverflowException");
5888 ARM_CPYD (code
, ins
->dreg
, ins
->sreg1
);
5890 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch1
);
5891 code
= mono_arm_emit_vfp_scratch_restore (cfg
, code
, vfp_scratch2
);
5896 case OP_RCONV_TO_I1
:
5897 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, TRUE
);
5899 case OP_RCONV_TO_U1
:
5900 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, FALSE
);
5902 case OP_RCONV_TO_I2
:
5903 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, TRUE
);
5905 case OP_RCONV_TO_U2
:
5906 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, FALSE
);
5908 case OP_RCONV_TO_I4
:
5909 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, TRUE
);
5911 case OP_RCONV_TO_U4
:
5912 code
= emit_r4_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, FALSE
);
5914 case OP_RCONV_TO_R4
:
5916 if (ins
->dreg
!= ins
->sreg1
)
5917 ARM_CPYS (code
, ins
->dreg
, ins
->sreg1
);
5919 case OP_RCONV_TO_R8
:
5921 ARM_CVTS (code
, ins
->dreg
, ins
->sreg1
);
5924 ARM_VFP_ADDS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5927 ARM_VFP_SUBS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5930 ARM_VFP_MULS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5933 ARM_VFP_DIVS (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
5936 ARM_NEGS (code
, ins
->dreg
, ins
->sreg1
);
5940 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5943 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
5944 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
5948 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5951 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5952 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5956 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5959 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5960 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5961 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5965 ARM_CMPS (code
, ins
->sreg2
, ins
->sreg1
);
5968 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5969 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5973 ARM_CMPS (code
, ins
->sreg2
, ins
->sreg1
);
5976 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
5977 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
5978 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
5982 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5985 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_NE
);
5986 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_EQ
);
5990 ARM_CMPS (code
, ins
->sreg1
, ins
->sreg2
);
5993 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
5994 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
5998 ARM_CMPS (code
, ins
->sreg2
, ins
->sreg1
);
6001 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 1);
6002 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_MI
);
6005 case OP_GC_LIVENESS_DEF
:
6006 case OP_GC_LIVENESS_USE
:
6007 case OP_GC_PARAM_SLOT_LIVENESS_DEF
:
6008 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
6010 case OP_GC_SPILL_SLOT_LIVENESS_DEF
:
6011 ins
->backend
.pc_offset
= code
- cfg
->native_code
;
6012 bb
->spill_slot_defs
= g_slist_prepend_mempool (cfg
->mempool
, bb
->spill_slot_defs
, ins
);
6014 case OP_LIVERANGE_START
: {
6015 if (cfg
->verbose_level
> 1)
6016 printf ("R%d START=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
6017 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_start
= code
- cfg
->native_code
;
6020 case OP_LIVERANGE_END
: {
6021 if (cfg
->verbose_level
> 1)
6022 printf ("R%d END=0x%x\n", MONO_VARINFO (cfg
, ins
->inst_c0
)->vreg
, (int)(code
- cfg
->native_code
));
6023 MONO_VARINFO (cfg
, ins
->inst_c0
)->live_range_end
= code
- cfg
->native_code
;
6026 case OP_GC_SAFE_POINT
: {
6029 ARM_LDR_IMM (code
, ARMREG_IP
, ins
->sreg1
, 0);
6030 ARM_CMP_REG_IMM (code
, ARMREG_IP
, 0, 0);
6032 ARM_B_COND (code
, ARMCOND_EQ
, 0);
6033 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_JIT_ICALL_ID
, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_threads_state_poll
));
6034 code
= emit_call_seq (cfg
, code
);
6035 arm_patch (buf
[0], code
);
6038 case OP_FILL_PROF_CALL_CTX
:
6039 for (int i
= 0; i
< ARMREG_MAX
; i
++)
6040 if ((MONO_ARCH_CALLEE_SAVED_REGS
& (1 << i
)) || i
== ARMREG_SP
|| i
== ARMREG_FP
)
6041 ARM_STR_IMM (code
, i
, ins
->sreg1
, MONO_STRUCT_OFFSET (MonoContext
, regs
) + i
* sizeof (target_mgreg_t
));
6044 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
6045 g_assert_not_reached ();
6048 if ((cfg
->opt
& MONO_OPT_BRANCH
) && ((code
- cfg
->native_code
- offset
) > max_len
)) {
6049 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
6050 mono_inst_name (ins
->opcode
), max_len
, code
- cfg
->native_code
- offset
);
6051 g_assert_not_reached ();
6059 set_code_cursor (cfg
, code
);
6062 #endif /* DISABLE_JIT */
6065 mono_arch_register_lowlevel_calls (void)
6067 /* The signature doesn't matter */
6068 mono_register_jit_icall (mono_arm_throw_exception
, mono_icall_sig_void
, TRUE
);
6069 mono_register_jit_icall (mono_arm_throw_exception_by_token
, mono_icall_sig_void
, TRUE
);
6070 mono_register_jit_icall (mono_arm_unaligned_stack
, mono_icall_sig_void
, TRUE
);
6073 #define patch_lis_ori(ip,val) do {\
6074 guint16 *__lis_ori = (guint16*)(ip); \
6075 __lis_ori [1] = (((guint32)(gsize)(val)) >> 16) & 0xffff; \
6076 __lis_ori [3] = ((guint32)(gsize)(val)) & 0xffff; \
6080 mono_arch_patch_code_new (MonoCompile
*cfg
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gpointer target
)
6082 unsigned char *ip
= ji
->ip
.i
+ code
;
6084 if (ji
->type
== MONO_PATCH_INFO_SWITCH
) {
6088 case MONO_PATCH_INFO_SWITCH
: {
6089 gpointer
*jt
= (gpointer
*)(ip
+ 8);
6091 /* jt is the inlined jump table, 2 instructions after ip
6092 * In the normal case we store the absolute addresses,
6093 * otherwise the displacements.
6095 for (i
= 0; i
< ji
->data
.table
->table_size
; i
++)
6096 jt
[i
] = code
+ (int)(gsize
)ji
->data
.table
->table
[i
];
6099 case MONO_PATCH_INFO_IP
:
6100 g_assert_not_reached ();
6101 patch_lis_ori (ip
, ip
);
6103 case MONO_PATCH_INFO_METHODCONST
:
6104 case MONO_PATCH_INFO_CLASS
:
6105 case MONO_PATCH_INFO_IMAGE
:
6106 case MONO_PATCH_INFO_FIELD
:
6107 case MONO_PATCH_INFO_VTABLE
:
6108 case MONO_PATCH_INFO_IID
:
6109 case MONO_PATCH_INFO_SFLDA
:
6110 case MONO_PATCH_INFO_LDSTR
:
6111 case MONO_PATCH_INFO_TYPE_FROM_HANDLE
:
6112 case MONO_PATCH_INFO_LDTOKEN
:
6113 g_assert_not_reached ();
6114 /* from OP_AOTCONST : lis + ori */
6115 patch_lis_ori (ip
, target
);
6117 case MONO_PATCH_INFO_R4
:
6118 case MONO_PATCH_INFO_R8
:
6119 g_assert_not_reached ();
6120 *((gconstpointer
*)(ip
+ 2)) = target
;
6122 case MONO_PATCH_INFO_EXC_NAME
:
6123 g_assert_not_reached ();
6124 *((gconstpointer
*)(ip
+ 1)) = target
;
6126 case MONO_PATCH_INFO_NONE
:
6127 case MONO_PATCH_INFO_BB_OVF
:
6128 case MONO_PATCH_INFO_EXC_OVF
:
6129 /* everything is dealt with at epilog output time */
6132 arm_patch_general (cfg
, domain
, ip
, (const guchar
*)target
);
6138 mono_arm_unaligned_stack (MonoMethod
*method
)
6140 g_assert_not_reached ();
6146 * Stack frame layout:
6148 * ------------------- fp
6149 * MonoLMF structure or saved registers
6150 * -------------------
6152 * -------------------
6154 * -------------------
6155 * param area size is cfg->param_area
6156 * ------------------- sp
6159 mono_arch_emit_prolog (MonoCompile
*cfg
)
6161 MonoMethod
*method
= cfg
->method
;
6163 MonoMethodSignature
*sig
;
6165 int alloc_size
, orig_alloc_size
, pos
, max_offset
, i
, rot_amount
, part
;
6169 int prev_sp_offset
, reg_offset
;
6171 sig
= mono_method_signature_internal (method
);
6172 cfg
->code_size
= 256 + sig
->param_count
* 64;
6173 code
= cfg
->native_code
= g_malloc (cfg
->code_size
);
6175 mono_emit_unwind_op_def_cfa (cfg
, code
, ARMREG_SP
, 0);
6177 alloc_size
= cfg
->stack_offset
;
6183 * The iphone uses R7 as the frame pointer, and it points at the saved
6188 * We can't use r7 as a frame pointer since it points into the middle of
6189 * the frame, so we keep using our own frame pointer.
6190 * FIXME: Optimize this.
6192 ARM_PUSH (code
, (1 << ARMREG_R7
) | (1 << ARMREG_LR
));
6193 prev_sp_offset
+= 8; /* r7 and lr */
6194 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
6195 mono_emit_unwind_op_offset (cfg
, code
, ARMREG_R7
, (- prev_sp_offset
) + 0);
6196 ARM_MOV_REG_REG (code
, ARMREG_R7
, ARMREG_SP
);
6199 if (!method
->save_lmf
) {
6201 /* No need to push LR again */
6202 if (cfg
->used_int_regs
)
6203 ARM_PUSH (code
, cfg
->used_int_regs
);
6205 ARM_PUSH (code
, cfg
->used_int_regs
| (1 << ARMREG_LR
));
6206 prev_sp_offset
+= 4;
6208 for (i
= 0; i
< 16; ++i
) {
6209 if (cfg
->used_int_regs
& (1 << i
))
6210 prev_sp_offset
+= 4;
6212 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
6214 for (i
= 0; i
< 16; ++i
) {
6215 if ((cfg
->used_int_regs
& (1 << i
))) {
6216 mono_emit_unwind_op_offset (cfg
, code
, i
, (- prev_sp_offset
) + reg_offset
);
6217 mini_gc_set_slot_type_from_cfa (cfg
, (- prev_sp_offset
) + reg_offset
, SLOT_NOREF
);
6221 mono_emit_unwind_op_offset (cfg
, code
, ARMREG_LR
, -4);
6222 mini_gc_set_slot_type_from_cfa (cfg
, -4, SLOT_NOREF
);
6224 ARM_MOV_REG_REG (code
, ARMREG_IP
, ARMREG_SP
);
6225 ARM_PUSH (code
, 0x5ff0);
6226 prev_sp_offset
+= 4 * 10; /* all but r0-r3, sp and pc */
6227 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
6229 for (i
= 0; i
< 16; ++i
) {
6230 if ((i
> ARMREG_R3
) && (i
!= ARMREG_SP
) && (i
!= ARMREG_PC
)) {
6231 /* The original r7 is saved at the start */
6232 if (!(iphone_abi
&& i
== ARMREG_R7
))
6233 mono_emit_unwind_op_offset (cfg
, code
, i
, (- prev_sp_offset
) + reg_offset
);
6237 g_assert (reg_offset
== 4 * 10);
6238 pos
+= MONO_ABI_SIZEOF (MonoLMF
) - (4 * 10);
6242 orig_alloc_size
= alloc_size
;
6243 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
6244 if (alloc_size
& (MONO_ARCH_FRAME_ALIGNMENT
- 1)) {
6245 alloc_size
+= MONO_ARCH_FRAME_ALIGNMENT
- 1;
6246 alloc_size
&= ~(MONO_ARCH_FRAME_ALIGNMENT
- 1);
6249 /* the stack used in the pushed regs */
6250 alloc_size
+= ALIGN_TO (prev_sp_offset
, MONO_ARCH_FRAME_ALIGNMENT
) - prev_sp_offset
;
6251 cfg
->stack_usage
= alloc_size
;
6253 if ((i
= mono_arm_is_rotated_imm8 (alloc_size
, &rot_amount
)) >= 0) {
6254 ARM_SUB_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
6256 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, alloc_size
);
6257 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
6259 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
+ alloc_size
);
6261 if (cfg
->frame_reg
!= ARMREG_SP
) {
6262 ARM_MOV_REG_REG (code
, cfg
->frame_reg
, ARMREG_SP
);
6263 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, cfg
->frame_reg
);
6265 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
6266 prev_sp_offset
+= alloc_size
;
6268 for (i
= 0; i
< alloc_size
- orig_alloc_size
; i
+= 4)
6269 mini_gc_set_slot_type_from_cfa (cfg
, (- prev_sp_offset
) + orig_alloc_size
+ i
, SLOT_NOREF
);
6271 /* compute max_offset in order to use short forward jumps
6272 * we could skip do it on arm because the immediate displacement
6273 * for jumps is large enough, it may be useful later for constant pools
6276 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
6277 MonoInst
*ins
= bb
->code
;
6278 bb
->max_offset
= max_offset
;
6280 MONO_BB_FOR_EACH_INS (bb
, ins
)
6281 max_offset
+= ins_get_size (ins
->opcode
);
6284 /* stack alignment check */
6288 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_SP);
6289 code = mono_arm_emit_load_imm (code, ARMREG_IP, MONO_ARCH_FRAME_ALIGNMENT -1);
6290 ARM_AND_REG_REG (code, ARMREG_LR, ARMREG_LR, ARMREG_IP);
6291 ARM_CMP_REG_IMM (code, ARMREG_LR, 0, 0);
6293 ARM_B_COND (code, ARMCOND_EQ, 0);
6294 if (cfg->compile_aot)
6295 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
6297 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
6298 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_ICALL_ID, GUINT_TO_POINTER (MONO_JIT_ICALL_mono_arm_unaligned_stack));
6299 code = emit_call_seq (cfg, code);
6300 arm_patch (buf [0], code);
6304 /* store runtime generic context */
6305 if (cfg
->rgctx_var
) {
6306 MonoInst
*ins
= cfg
->rgctx_var
;
6308 g_assert (ins
->opcode
== OP_REGOFFSET
);
6310 if (arm_is_imm12 (ins
->inst_offset
)) {
6311 ARM_STR_IMM (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ins
->inst_offset
);
6313 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
6314 ARM_STR_REG_REG (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ARMREG_LR
);
6318 /* load arguments allocated to register from the stack */
6321 cinfo
= get_call_info (NULL
, sig
);
6323 if (cinfo
->ret
.storage
== RegTypeStructByAddr
) {
6324 ArgInfo
*ainfo
= &cinfo
->ret
;
6325 inst
= cfg
->vret_addr
;
6326 g_assert (arm_is_imm12 (inst
->inst_offset
));
6327 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6330 if (sig
->call_convention
== MONO_CALL_VARARG
) {
6331 ArgInfo
*cookie
= &cinfo
->sig_cookie
;
6333 /* Save the sig cookie address */
6334 g_assert (cookie
->storage
== RegTypeBase
);
6336 g_assert (arm_is_imm12 (prev_sp_offset
+ cookie
->offset
));
6337 g_assert (arm_is_imm12 (cfg
->sig_cookie
));
6338 ARM_ADD_REG_IMM8 (code
, ARMREG_IP
, cfg
->frame_reg
, prev_sp_offset
+ cookie
->offset
);
6339 ARM_STR_IMM (code
, ARMREG_IP
, cfg
->frame_reg
, cfg
->sig_cookie
);
6342 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
6343 ArgInfo
*ainfo
= cinfo
->args
+ i
;
6344 inst
= cfg
->args
[pos
];
6346 if (cfg
->verbose_level
> 2)
6347 g_print ("Saving argument %d (type: %d)\n", i
, ainfo
->storage
);
6349 if (inst
->opcode
== OP_REGVAR
) {
6350 if (ainfo
->storage
== RegTypeGeneral
)
6351 ARM_MOV_REG_REG (code
, inst
->dreg
, ainfo
->reg
);
6352 else if (ainfo
->storage
== RegTypeFP
) {
6353 g_assert_not_reached ();
6354 } else if (ainfo
->storage
== RegTypeBase
) {
6355 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
6356 ARM_LDR_IMM (code
, inst
->dreg
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
6358 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
);
6359 ARM_LDR_REG_REG (code
, inst
->dreg
, ARMREG_SP
, ARMREG_IP
);
6362 g_assert_not_reached ();
6364 if (cfg
->verbose_level
> 2)
6365 g_print ("Argument %d assigned to register %s\n", pos
, mono_arch_regname (inst
->dreg
));
6367 switch (ainfo
->storage
) {
6369 for (part
= 0; part
< ainfo
->nregs
; part
++) {
6370 if (ainfo
->esize
== 4)
6371 ARM_FSTS (code
, ainfo
->reg
+ part
, inst
->inst_basereg
, inst
->inst_offset
+ (part
* ainfo
->esize
));
6373 ARM_FSTD (code
, ainfo
->reg
+ (part
* 2), inst
->inst_basereg
, inst
->inst_offset
+ (part
* ainfo
->esize
));
6376 case RegTypeGeneral
:
6377 case RegTypeIRegPair
:
6378 case RegTypeGSharedVtInReg
:
6379 case RegTypeStructByAddr
:
6380 switch (ainfo
->size
) {
6382 if (arm_is_imm12 (inst
->inst_offset
))
6383 ARM_STRB_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6385 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6386 ARM_STRB_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6390 if (arm_is_imm8 (inst
->inst_offset
)) {
6391 ARM_STRH_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6393 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6394 ARM_STRH_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6398 if (arm_is_imm12 (inst
->inst_offset
)) {
6399 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6401 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6402 ARM_STR_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6404 if (arm_is_imm12 (inst
->inst_offset
+ 4)) {
6405 ARM_STR_IMM (code
, ainfo
->reg
+ 1, inst
->inst_basereg
, inst
->inst_offset
+ 4);
6407 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
+ 4);
6408 ARM_STR_REG_REG (code
, ainfo
->reg
+ 1, inst
->inst_basereg
, ARMREG_IP
);
6412 if (arm_is_imm12 (inst
->inst_offset
)) {
6413 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
6415 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6416 ARM_STR_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
6421 case RegTypeBaseGen
:
6422 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
6423 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
6425 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
);
6426 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
6428 if (arm_is_imm12 (inst
->inst_offset
+ 4)) {
6429 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
6430 ARM_STR_IMM (code
, ARMREG_R3
, inst
->inst_basereg
, inst
->inst_offset
);
6432 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
+ 4);
6433 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6434 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6435 ARM_STR_REG_REG (code
, ARMREG_R3
, inst
->inst_basereg
, ARMREG_IP
);
6439 case RegTypeGSharedVtOnStack
:
6440 case RegTypeStructByAddrOnStack
:
6441 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
6442 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
6444 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
);
6445 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
6448 switch (ainfo
->size
) {
6450 if (arm_is_imm8 (inst
->inst_offset
)) {
6451 ARM_STRB_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6453 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6454 ARM_STRB_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6458 if (arm_is_imm8 (inst
->inst_offset
)) {
6459 ARM_STRH_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6461 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6462 ARM_STRH_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6466 if (arm_is_imm12 (inst
->inst_offset
)) {
6467 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6469 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6470 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6472 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
+ 4)) {
6473 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
+ 4));
6475 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
+ 4);
6476 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
6478 if (arm_is_imm12 (inst
->inst_offset
+ 4)) {
6479 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
6481 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
+ 4);
6482 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6486 if (arm_is_imm12 (inst
->inst_offset
)) {
6487 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
6489 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6490 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
6496 int imm8
, rot_amount
;
6498 if ((imm8
= mono_arm_is_rotated_imm8 (inst
->inst_offset
, &rot_amount
)) == -1) {
6499 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
6500 ARM_ADD_REG_REG (code
, ARMREG_IP
, ARMREG_IP
, inst
->inst_basereg
);
6502 ARM_ADD_REG_IMM (code
, ARMREG_IP
, inst
->inst_basereg
, imm8
, rot_amount
);
6504 if (ainfo
->size
== 8)
6505 ARM_FSTD (code
, ainfo
->reg
, ARMREG_IP
, 0);
6507 ARM_FSTS (code
, ainfo
->reg
, ARMREG_IP
, 0);
6510 case RegTypeStructByVal
: {
6511 int doffset
= inst
->inst_offset
;
6515 size
= mini_type_stack_size_full (inst
->inst_vtype
, NULL
, sig
->pinvoke
);
6516 for (cur_reg
= 0; cur_reg
< ainfo
->size
; ++cur_reg
) {
6517 if (arm_is_imm12 (doffset
)) {
6518 ARM_STR_IMM (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, doffset
);
6520 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, doffset
);
6521 ARM_STR_REG_REG (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, ARMREG_IP
);
6523 soffset
+= sizeof (target_mgreg_t
);
6524 doffset
+= sizeof (target_mgreg_t
);
6526 if (ainfo
->vtsize
) {
6527 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6528 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6529 code
= emit_memcpy (code
, ainfo
->vtsize
* sizeof (target_mgreg_t
), inst
->inst_basereg
, doffset
, ARMREG_SP
, prev_sp_offset
+ ainfo
->offset
);
6534 g_assert_not_reached ();
6541 if (method
->save_lmf
)
6542 code
= emit_save_lmf (cfg
, code
, alloc_size
- lmf_offset
);
6544 if (cfg
->arch
.seq_point_info_var
) {
6545 MonoInst
*ins
= cfg
->arch
.seq_point_info_var
;
6547 /* Initialize the variable from a GOT slot */
6548 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_SEQ_POINT_INFO
, cfg
->method
);
6549 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_PC
, 0);
6551 *(gpointer
*)code
= NULL
;
6553 ARM_LDR_REG_REG (code
, ARMREG_R0
, ARMREG_PC
, ARMREG_R0
);
6555 g_assert (ins
->opcode
== OP_REGOFFSET
);
6557 if (arm_is_imm12 (ins
->inst_offset
)) {
6558 ARM_STR_IMM (code
, ARMREG_R0
, ins
->inst_basereg
, ins
->inst_offset
);
6560 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
6561 ARM_STR_REG_REG (code
, ARMREG_R0
, ins
->inst_basereg
, ARMREG_LR
);
6565 /* Initialize ss_trigger_page_var */
6566 if (!cfg
->soft_breakpoints
) {
6567 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
6568 MonoInst
*ss_trigger_page_var
= cfg
->arch
.ss_trigger_page_var
;
6569 int dreg
= ARMREG_LR
;
6572 g_assert (info_var
->opcode
== OP_REGOFFSET
);
6574 code
= emit_ldr_imm (code
, dreg
, info_var
->inst_basereg
, info_var
->inst_offset
);
6575 /* Load the trigger page addr */
6576 ARM_LDR_IMM (code
, dreg
, dreg
, MONO_STRUCT_OFFSET (SeqPointInfo
, ss_trigger_page
));
6577 ARM_STR_IMM (code
, dreg
, ss_trigger_page_var
->inst_basereg
, ss_trigger_page_var
->inst_offset
);
6581 if (cfg
->arch
.seq_point_ss_method_var
) {
6582 MonoInst
*ss_method_ins
= cfg
->arch
.seq_point_ss_method_var
;
6583 MonoInst
*bp_method_ins
= cfg
->arch
.seq_point_bp_method_var
;
6585 g_assert (ss_method_ins
->opcode
== OP_REGOFFSET
);
6586 g_assert (arm_is_imm12 (ss_method_ins
->inst_offset
));
6588 if (cfg
->compile_aot
) {
6589 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
6590 int dreg
= ARMREG_LR
;
6592 g_assert (info_var
->opcode
== OP_REGOFFSET
);
6593 g_assert (arm_is_imm12 (info_var
->inst_offset
));
6595 ARM_LDR_IMM (code
, dreg
, info_var
->inst_basereg
, info_var
->inst_offset
);
6596 ARM_LDR_IMM (code
, dreg
, dreg
, MONO_STRUCT_OFFSET (SeqPointInfo
, ss_tramp_addr
));
6597 ARM_STR_IMM (code
, dreg
, ss_method_ins
->inst_basereg
, ss_method_ins
->inst_offset
);
6599 g_assert (bp_method_ins
->opcode
== OP_REGOFFSET
);
6600 g_assert (arm_is_imm12 (bp_method_ins
->inst_offset
));
6602 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
6604 *(gpointer
*)code
= &single_step_tramp
;
6606 *(gpointer
*)code
= breakpoint_tramp
;
6609 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_LR
, 0);
6610 ARM_STR_IMM (code
, ARMREG_IP
, ss_method_ins
->inst_basereg
, ss_method_ins
->inst_offset
);
6611 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_LR
, 4);
6612 ARM_STR_IMM (code
, ARMREG_IP
, bp_method_ins
->inst_basereg
, bp_method_ins
->inst_offset
);
6616 set_code_cursor (cfg
, code
);
6623 mono_arch_emit_epilog (MonoCompile
*cfg
)
6625 MonoMethod
*method
= cfg
->method
;
6626 int pos
, i
, rot_amount
;
6627 int max_epilog_size
= 16 + 20*4;
6631 if (cfg
->method
->save_lmf
)
6632 max_epilog_size
+= 128;
6634 code
= realloc_code (cfg
, max_epilog_size
);
6636 /* Save the uwind state which is needed by the out-of-line code */
6637 mono_emit_unwind_op_remember_state (cfg
, code
);
6641 /* Load returned vtypes into registers if needed */
6642 cinfo
= cfg
->arch
.cinfo
;
6643 switch (cinfo
->ret
.storage
) {
6644 case RegTypeStructByVal
: {
6645 MonoInst
*ins
= cfg
->ret
;
6647 if (cinfo
->ret
.nregs
== 1) {
6648 if (arm_is_imm12 (ins
->inst_offset
)) {
6649 ARM_LDR_IMM (code
, ARMREG_R0
, ins
->inst_basereg
, ins
->inst_offset
);
6651 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
6652 ARM_LDR_REG_REG (code
, ARMREG_R0
, ins
->inst_basereg
, ARMREG_LR
);
6655 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
6656 int offset
= ins
->inst_offset
+ (i
* 4);
6657 if (arm_is_imm12 (offset
)) {
6658 ARM_LDR_IMM (code
, i
, ins
->inst_basereg
, offset
);
6660 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, offset
);
6661 ARM_LDR_REG_REG (code
, i
, ins
->inst_basereg
, ARMREG_LR
);
6668 MonoInst
*ins
= cfg
->ret
;
6670 for (i
= 0; i
< cinfo
->ret
.nregs
; ++i
) {
6671 if (cinfo
->ret
.esize
== 4)
6672 ARM_FLDS (code
, cinfo
->ret
.reg
+ i
, ins
->inst_basereg
, ins
->inst_offset
+ (i
* cinfo
->ret
.esize
));
6674 ARM_FLDD (code
, cinfo
->ret
.reg
+ (i
* 2), ins
->inst_basereg
, ins
->inst_offset
+ (i
* cinfo
->ret
.esize
));
6682 if (method
->save_lmf
) {
6683 int lmf_offset
, reg
, sp_adj
, regmask
, nused_int_regs
= 0;
6684 /* all but r0-r3, sp and pc */
6685 pos
+= MONO_ABI_SIZEOF (MonoLMF
) - (MONO_ARM_NUM_SAVED_REGS
* sizeof (target_mgreg_t
));
6688 code
= emit_restore_lmf (cfg
, code
, cfg
->stack_usage
- lmf_offset
);
6690 /* This points to r4 inside MonoLMF->iregs */
6691 sp_adj
= (MONO_ABI_SIZEOF (MonoLMF
) - MONO_ARM_NUM_SAVED_REGS
* sizeof (target_mgreg_t
));
6693 regmask
= 0x9ff0; /* restore lr to pc */
6694 /* Skip caller saved registers not used by the method */
6695 while (!(cfg
->used_int_regs
& (1 << reg
)) && reg
< ARMREG_FP
) {
6696 regmask
&= ~(1 << reg
);
6701 /* Restored later */
6702 regmask
&= ~(1 << ARMREG_PC
);
6703 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6704 code
= emit_big_add (code
, ARMREG_SP
, cfg
->frame_reg
, cfg
->stack_usage
- lmf_offset
+ sp_adj
);
6705 for (i
= 0; i
< 16; i
++) {
6706 if (regmask
& (1 << i
))
6709 mono_emit_unwind_op_def_cfa (cfg
, code
, ARMREG_SP
, ((iphone_abi
? 3 : 0) + nused_int_regs
) * 4);
6711 ARM_POP (code
, regmask
);
6713 for (i
= 0; i
< 16; i
++) {
6714 if (regmask
& (1 << i
))
6715 mono_emit_unwind_op_same_value (cfg
, code
, i
);
6717 /* Restore saved r7, restore LR to PC */
6718 /* Skip lr from the lmf */
6719 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, 3 * 4);
6720 ARM_ADD_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, sizeof (target_mgreg_t
), 0);
6721 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, 2 * 4);
6722 ARM_POP (code
, (1 << ARMREG_R7
) | (1 << ARMREG_PC
));
6725 int i
, nused_int_regs
= 0;
6727 for (i
= 0; i
< 16; i
++) {
6728 if (cfg
->used_int_regs
& (1 << i
))
6732 if ((i
= mono_arm_is_rotated_imm8 (cfg
->stack_usage
, &rot_amount
)) >= 0) {
6733 ARM_ADD_REG_IMM (code
, ARMREG_SP
, cfg
->frame_reg
, i
, rot_amount
);
6735 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, cfg
->stack_usage
);
6736 ARM_ADD_REG_REG (code
, ARMREG_SP
, cfg
->frame_reg
, ARMREG_IP
);
6739 if (cfg
->frame_reg
!= ARMREG_SP
) {
6740 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, ARMREG_SP
);
6744 /* Restore saved gregs */
6745 if (cfg
->used_int_regs
) {
6746 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, (2 + nused_int_regs
) * 4);
6747 ARM_POP (code
, cfg
->used_int_regs
);
6748 for (i
= 0; i
< 16; i
++) {
6749 if (cfg
->used_int_regs
& (1 << i
))
6750 mono_emit_unwind_op_same_value (cfg
, code
, i
);
6753 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, 2 * 4);
6754 /* Restore saved r7, restore LR to PC */
6755 ARM_POP (code
, (1 << ARMREG_R7
) | (1 << ARMREG_PC
));
6757 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, (nused_int_regs
+ 1) * 4);
6758 ARM_POP (code
, cfg
->used_int_regs
| (1 << ARMREG_PC
));
6762 /* Restore the unwind state to be the same as before the epilog */
6763 mono_emit_unwind_op_restore_state (cfg
, code
);
6765 set_code_cursor (cfg
, code
);
6770 mono_arch_emit_exceptions (MonoCompile
*cfg
)
6772 MonoJumpInfo
*patch_info
;
6775 guint8
* exc_throw_pos
[MONO_EXC_INTRINS_NUM
];
6776 guint8 exc_throw_found
[MONO_EXC_INTRINS_NUM
];
6777 int max_epilog_size
= 50;
6779 for (i
= 0; i
< MONO_EXC_INTRINS_NUM
; i
++) {
6780 exc_throw_pos
[i
] = NULL
;
6781 exc_throw_found
[i
] = 0;
6784 /* count the number of exception infos */
6787 * make sure we have enough space for exceptions
6789 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
6790 if (patch_info
->type
== MONO_PATCH_INFO_EXC
) {
6791 i
= mini_exception_id_by_name ((const char*)patch_info
->data
.target
);
6792 if (!exc_throw_found
[i
]) {
6793 max_epilog_size
+= 32;
6794 exc_throw_found
[i
] = TRUE
;
6799 code
= realloc_code (cfg
, max_epilog_size
);
6801 /* add code to raise exceptions */
6802 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
6803 switch (patch_info
->type
) {
6804 case MONO_PATCH_INFO_EXC
: {
6805 MonoClass
*exc_class
;
6806 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
6808 i
= mini_exception_id_by_name ((const char*)patch_info
->data
.target
);
6809 if (exc_throw_pos
[i
]) {
6810 arm_patch (ip
, exc_throw_pos
[i
]);
6811 patch_info
->type
= MONO_PATCH_INFO_NONE
;
6814 exc_throw_pos
[i
] = code
;
6816 arm_patch (ip
, code
);
6818 exc_class
= mono_class_load_from_name (mono_defaults
.corlib
, "System", patch_info
->data
.name
);
6820 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_LR
);
6821 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_PC
, 0);
6822 patch_info
->type
= MONO_PATCH_INFO_JIT_ICALL_ID
;
6823 patch_info
->data
.jit_icall_id
= MONO_JIT_ICALL_mono_arch_throw_corlib_exception
;
6824 patch_info
->ip
.i
= code
- cfg
->native_code
;
6826 cfg
->thunk_area
+= THUNK_SIZE
;
6827 *(guint32
*)(gpointer
)code
= m_class_get_type_token (exc_class
) - MONO_TOKEN_TYPE_DEF
;
6837 set_code_cursor (cfg
, code
);
6840 #endif /* #ifndef DISABLE_JIT */
6843 mono_arch_finish_init (void)
6848 mono_arch_free_jit_tls_data (MonoJitTlsData
*tls
)
6853 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
6864 mono_arch_get_patch_offset (guint8
*code
)
6871 mono_arch_flush_register_windows (void)
6876 mono_arch_find_imt_method (host_mgreg_t
*regs
, guint8
*code
)
6878 return (MonoMethod
*)regs
[MONO_ARCH_IMT_REG
];
6882 mono_arch_find_static_call_vtable (host_mgreg_t
*regs
, guint8
*code
)
6884 return (MonoVTable
*)(gsize
)regs
[MONO_ARCH_RGCTX_REG
];
6888 mono_arch_get_cie_program (void)
6892 mono_add_unwind_op_def_cfa (l
, (guint8
*)NULL
, (guint8
*)NULL
, ARMREG_SP
, 0);
6897 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6898 #define BASE_SIZE (6 * 4)
6899 #define BSEARCH_ENTRY_SIZE (4 * 4)
6900 #define CMP_SIZE (3 * 4)
6901 #define BRANCH_SIZE (1 * 4)
6902 #define CALL_SIZE (2 * 4)
6903 #define WMC_SIZE (8 * 4)
6904 #define DISTANCE(A, B) (((gint32)(gssize)(B)) - ((gint32)(gssize)(A)))
6907 arm_emit_value_and_patch_ldr (arminstr_t
*code
, arminstr_t
*target
, guint32 value
)
6909 guint32 delta
= DISTANCE (target
, code
);
6911 g_assert (delta
>= 0 && delta
<= 0xFFF);
6912 *target
= *target
| delta
;
6917 #ifdef ENABLE_WRONG_METHOD_CHECK
6919 mini_dump_bad_imt (int input_imt
, int compared_imt
, int pc
)
6921 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt
, compared_imt
, pc
);
6927 mono_arch_build_imt_trampoline (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
6928 gpointer fail_tramp
)
6931 arminstr_t
*code
, *start
;
6932 gboolean large_offsets
= FALSE
;
6933 guint32
**constant_pool_starts
;
6934 arminstr_t
*vtable_target
= NULL
;
6935 int extra_space
= 0;
6936 #ifdef ENABLE_WRONG_METHOD_CHECK
6942 constant_pool_starts
= g_new0 (guint32
*, count
);
6944 for (i
= 0; i
< count
; ++i
) {
6945 MonoIMTCheckItem
*item
= imt_entries
[i
];
6946 if (item
->is_equals
) {
6947 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
6949 if (item
->has_target_code
|| !arm_is_imm12 (DISTANCE (vtable
, &vtable
->vtable
[item
->value
.vtable_slot
]))) {
6950 item
->chunk_size
+= 32;
6951 large_offsets
= TRUE
;
6954 if (item
->check_target_idx
|| fail_case
) {
6955 if (!item
->compare_done
|| fail_case
)
6956 item
->chunk_size
+= CMP_SIZE
;
6957 item
->chunk_size
+= BRANCH_SIZE
;
6959 #ifdef ENABLE_WRONG_METHOD_CHECK
6960 item
->chunk_size
+= WMC_SIZE
;
6964 item
->chunk_size
+= 16;
6965 large_offsets
= TRUE
;
6967 item
->chunk_size
+= CALL_SIZE
;
6969 item
->chunk_size
+= BSEARCH_ENTRY_SIZE
;
6970 imt_entries
[item
->check_target_idx
]->compare_done
= TRUE
;
6972 size
+= item
->chunk_size
;
6976 size
+= 4 * count
; /* The ARM_ADD_REG_IMM to pop the stack */
6979 code
= mono_method_alloc_generic_virtual_trampoline (domain
, size
);
6981 code
= mono_domain_code_reserve (domain
, size
);
6984 unwind_ops
= mono_arch_get_cie_program ();
6987 g_print ("Building IMT trampoline for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", m_class_get_name_space (vtable
->klass
), m_class_get_name (vtable
->klass
), count
, size
, start
, ((guint8
*)start
) + size
, vtable
, fail_tramp
);
6988 for (i
= 0; i
< count
; ++i
) {
6989 MonoIMTCheckItem
*item
= imt_entries
[i
];
6990 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i
, item
->key
, ((MonoMethod
*)item
->key
)->name
, &vtable
->vtable
[item
->value
.vtable_slot
], item
->is_equals
, item
->chunk_size
);
6994 if (large_offsets
) {
6995 ARM_PUSH4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
6996 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 4 * sizeof (target_mgreg_t
));
6998 ARM_PUSH2 (code
, ARMREG_R0
, ARMREG_R1
);
6999 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 2 * sizeof (target_mgreg_t
));
7001 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_LR
, -4);
7002 vtable_target
= code
;
7003 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
7004 ARM_MOV_REG_REG (code
, ARMREG_R0
, ARMREG_V5
);
7006 for (i
= 0; i
< count
; ++i
) {
7007 MonoIMTCheckItem
*item
= imt_entries
[i
];
7008 arminstr_t
*imt_method
= NULL
, *vtable_offset_ins
= NULL
, *target_code_ins
= NULL
;
7009 gint32 vtable_offset
;
7011 item
->code_target
= (guint8
*)code
;
7013 if (item
->is_equals
) {
7014 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
7016 if (item
->check_target_idx
|| fail_case
) {
7017 if (!item
->compare_done
|| fail_case
) {
7019 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7020 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
7022 item
->jmp_code
= (guint8
*)code
;
7023 ARM_B_COND (code
, ARMCOND_NE
, 0);
7025 /*Enable the commented code to assert on wrong method*/
7026 #ifdef ENABLE_WRONG_METHOD_CHECK
7028 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7029 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
7031 ARM_B_COND (code
, ARMCOND_EQ
, 0);
7033 /* Define this if your system is so bad that gdb is failing. */
7034 #ifdef BROKEN_DEV_ENV
7035 ARM_MOV_REG_REG (code
, ARMREG_R2
, ARMREG_PC
);
7037 arm_patch (code
- 1, mini_dump_bad_imt
);
7041 arm_patch (cond
, code
);
7045 if (item
->has_target_code
) {
7046 /* Load target address */
7047 target_code_ins
= code
;
7048 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7049 /* Save it to the fourth slot */
7050 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (target_mgreg_t
));
7051 /* Restore registers and branch */
7052 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
7054 code
= arm_emit_value_and_patch_ldr (code
, target_code_ins
, (gsize
)item
->value
.target_code
);
7056 vtable_offset
= DISTANCE (vtable
, &vtable
->vtable
[item
->value
.vtable_slot
]);
7057 if (!arm_is_imm12 (vtable_offset
)) {
7059 * We need to branch to a computed address but we don't have
7060 * a free register to store it, since IP must contain the
7061 * vtable address. So we push the two values to the stack, and
7062 * load them both using LDM.
7064 /* Compute target address */
7065 vtable_offset_ins
= code
;
7066 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7067 ARM_LDR_REG_REG (code
, ARMREG_R1
, ARMREG_IP
, ARMREG_R1
);
7068 /* Save it to the fourth slot */
7069 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (target_mgreg_t
));
7070 /* Restore registers and branch */
7071 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
7073 code
= arm_emit_value_and_patch_ldr (code
, vtable_offset_ins
, vtable_offset
);
7075 ARM_POP2 (code
, ARMREG_R0
, ARMREG_R1
);
7076 if (large_offsets
) {
7077 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 2 * sizeof (target_mgreg_t
));
7078 ARM_ADD_REG_IMM8 (code
, ARMREG_SP
, ARMREG_SP
, 2 * sizeof (target_mgreg_t
));
7080 mono_add_unwind_op_def_cfa_offset (unwind_ops
, code
, start
, 0);
7081 ARM_LDR_IMM (code
, ARMREG_PC
, ARMREG_IP
, vtable_offset
);
7086 arm_patch (item
->jmp_code
, (guchar
*)code
);
7088 target_code_ins
= code
;
7089 /* Load target address */
7090 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7091 /* Save it to the fourth slot */
7092 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (target_mgreg_t
));
7093 /* Restore registers and branch */
7094 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
7096 code
= arm_emit_value_and_patch_ldr (code
, target_code_ins
, (gsize
)fail_tramp
);
7097 item
->jmp_code
= NULL
;
7101 code
= arm_emit_value_and_patch_ldr (code
, imt_method
, (guint32
)(gsize
)item
->key
);
7103 /*must emit after unconditional branch*/
7104 if (vtable_target
) {
7105 code
= arm_emit_value_and_patch_ldr (code
, vtable_target
, (guint32
)(gsize
)vtable
);
7106 item
->chunk_size
+= 4;
7107 vtable_target
= NULL
;
7110 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
7111 constant_pool_starts
[i
] = code
;
7113 code
+= extra_space
;
7117 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
7118 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
7120 item
->jmp_code
= (guint8
*)code
;
7121 ARM_B_COND (code
, ARMCOND_HS
, 0);
7126 for (i
= 0; i
< count
; ++i
) {
7127 MonoIMTCheckItem
*item
= imt_entries
[i
];
7128 if (item
->jmp_code
) {
7129 if (item
->check_target_idx
)
7130 arm_patch (item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
);
7132 if (i
> 0 && item
->is_equals
) {
7134 arminstr_t
*space_start
= constant_pool_starts
[i
];
7135 for (j
= i
- 1; j
>= 0 && !imt_entries
[j
]->is_equals
; --j
) {
7136 space_start
= arm_emit_value_and_patch_ldr (space_start
, (arminstr_t
*)imt_entries
[j
]->code_target
, (guint32
)(gsize
)imt_entries
[j
]->key
);
7143 char *buff
= g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", m_class_get_name_space (vtable
->klass
), m_class_get_name (vtable
->klass
), count
);
7144 mono_disassemble_code (NULL
, (guint8
*)start
, size
, buff
);
7149 g_free (constant_pool_starts
);
7151 mono_arch_flush_icache ((guint8
*)start
, size
);
7152 MONO_PROFILER_RAISE (jit_code_buffer
, ((guint8
*)start
, code
- start
, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE
, NULL
));
7153 UnlockedAdd (&mono_stats
.imt_trampolines_size
, code
- start
);
7155 g_assert (DISTANCE (start
, code
) <= size
);
7157 mono_tramp_info_register (mono_tramp_info_create (NULL
, (guint8
*)start
, DISTANCE (start
, code
), NULL
, unwind_ops
), domain
);
7163 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
7165 return ctx
->regs
[reg
];
7169 mono_arch_context_set_int_reg (MonoContext
*ctx
, int reg
, host_mgreg_t val
)
7171 ctx
->regs
[reg
] = val
;
7175 * mono_arch_get_trampolines:
7177 * Return a list of MonoTrampInfo structures describing arch specific trampolines
7181 mono_arch_get_trampolines (gboolean aot
)
7183 return mono_arm_get_exception_trampolines (aot
);
7186 #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
7188 * mono_arch_set_breakpoint:
7190 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
7191 * The location should contain code emitted by OP_SEQ_POINT.
7194 mono_arch_set_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
7197 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
7198 MonoDebugOptions
*opt
= mini_get_debug_options ();
7201 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), (guint8
*)ji
->code_start
);
7203 if (!breakpoint_tramp
)
7204 breakpoint_tramp
= mini_get_breakpoint_trampoline ();
7206 g_assert (native_offset
% 4 == 0);
7207 g_assert (info
->bp_addrs
[native_offset
/ 4] == 0);
7208 info
->bp_addrs
[native_offset
/ 4] = (guint8
*)(opt
->soft_breakpoints
? breakpoint_tramp
: bp_trigger_page
);
7209 } else if (opt
->soft_breakpoints
) {
7211 ARM_BLX_REG (code
, ARMREG_LR
);
7212 mono_arch_flush_icache (code
- 4, 4);
7214 int dreg
= ARMREG_LR
;
7216 /* Read from another trigger page */
7217 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
7219 *(int*)code
= (int)(gssize
)bp_trigger_page
;
7221 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
7223 mono_arch_flush_icache (code
- 16, 16);
7226 /* This is currently implemented by emitting an SWI instruction, which
7227 * qemu/linux seems to convert to a SIGILL.
7229 *(int*)code
= (0xef << 24) | 8;
7231 mono_arch_flush_icache (code
- 4, 4);
7237 * mono_arch_clear_breakpoint:
7239 * Clear the breakpoint at IP.
7242 mono_arch_clear_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
7244 MonoDebugOptions
*opt
= mini_get_debug_options ();
7249 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
7250 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), (guint8
*)ji
->code_start
);
7252 if (!breakpoint_tramp
)
7253 breakpoint_tramp
= mini_get_breakpoint_trampoline ();
7255 g_assert (native_offset
% 4 == 0);
7256 g_assert (info
->bp_addrs
[native_offset
/ 4] == (guint8
*)(opt
->soft_breakpoints
? breakpoint_tramp
: bp_trigger_page
));
7257 info
->bp_addrs
[native_offset
/ 4] = 0;
7258 } else if (opt
->soft_breakpoints
) {
7261 mono_arch_flush_icache (code
- 4, 4);
7263 for (i
= 0; i
< 4; ++i
)
7266 mono_arch_flush_icache (ip
, code
- ip
);
7271 * mono_arch_start_single_stepping:
7273 * Start single stepping.
7276 mono_arch_start_single_stepping (void)
7278 if (ss_trigger_page
)
7279 mono_mprotect (ss_trigger_page
, mono_pagesize (), 0);
7281 single_step_tramp
= mini_get_single_step_trampoline ();
7285 * mono_arch_stop_single_stepping:
7287 * Stop single stepping.
7290 mono_arch_stop_single_stepping (void)
7292 if (ss_trigger_page
)
7293 mono_mprotect (ss_trigger_page
, mono_pagesize (), MONO_MMAP_READ
);
7295 single_step_tramp
= NULL
;
7299 #define DBG_SIGNAL SIGBUS
7301 #define DBG_SIGNAL SIGSEGV
7305 * mono_arch_is_single_step_event:
7307 * Return whenever the machine state in SIGCTX corresponds to a single
7311 mono_arch_is_single_step_event (void *info
, void *sigctx
)
7313 siginfo_t
*sinfo
= (siginfo_t
*)info
;
7315 if (!ss_trigger_page
)
7318 /* Sometimes the address is off by 4 */
7319 if (sinfo
->si_addr
>= ss_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)ss_trigger_page
+ 128)
7326 * mono_arch_is_breakpoint_event:
7328 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7331 mono_arch_is_breakpoint_event (void *info
, void *sigctx
)
7333 siginfo_t
*sinfo
= (siginfo_t
*)info
;
7335 if (!ss_trigger_page
)
7338 if (sinfo
->si_signo
== DBG_SIGNAL
) {
7339 /* Sometimes the address is off by 4 */
7340 if (sinfo
->si_addr
>= bp_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)bp_trigger_page
+ 128)
7350 * mono_arch_skip_breakpoint:
7352 * See mini-amd64.c for docs.
7355 mono_arch_skip_breakpoint (MonoContext
*ctx
, MonoJitInfo
*ji
)
7357 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
7361 * mono_arch_skip_single_step:
7363 * See mini-amd64.c for docs.
7366 mono_arch_skip_single_step (MonoContext
*ctx
)
7368 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
7372 * mono_arch_get_seq_point_info:
7374 * See mini-amd64.c for docs.
7377 mono_arch_get_seq_point_info (MonoDomain
*domain
, guint8
*code
)
7382 // FIXME: Add a free function
7384 mono_domain_lock (domain
);
7385 info
= (SeqPointInfo
*)g_hash_table_lookup (domain_jit_info (domain
)->arch_seq_points
,
7387 mono_domain_unlock (domain
);
7390 ji
= mono_jit_info_table_find (domain
, code
);
7393 info
= g_malloc0 (sizeof (SeqPointInfo
) + ji
->code_size
);
7395 info
->ss_trigger_page
= ss_trigger_page
;
7396 info
->bp_trigger_page
= bp_trigger_page
;
7397 info
->ss_tramp_addr
= &single_step_tramp
;
7399 mono_domain_lock (domain
);
7400 g_hash_table_insert (domain_jit_info (domain
)->arch_seq_points
,
7402 mono_domain_unlock (domain
);
7408 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7411 * mono_arch_set_target:
7413 * Set the target architecture the JIT backend should generate code for, in the form
7414 * of a GNU target triplet. Only used in AOT mode.
7417 mono_arch_set_target (char *mtriple
)
7419 /* The GNU target triple format is not very well documented */
7420 if (strstr (mtriple
, "armv7")) {
7421 v5_supported
= TRUE
;
7422 v6_supported
= TRUE
;
7423 v7_supported
= TRUE
;
7425 if (strstr (mtriple
, "armv6")) {
7426 v5_supported
= TRUE
;
7427 v6_supported
= TRUE
;
7429 if (strstr (mtriple
, "armv7s")) {
7430 v7s_supported
= TRUE
;
7432 if (strstr (mtriple
, "armv7k")) {
7433 v7k_supported
= TRUE
;
7435 if (strstr (mtriple
, "thumbv7s")) {
7436 v5_supported
= TRUE
;
7437 v6_supported
= TRUE
;
7438 v7_supported
= TRUE
;
7439 v7s_supported
= TRUE
;
7440 thumb_supported
= TRUE
;
7441 thumb2_supported
= TRUE
;
7443 if (strstr (mtriple
, "darwin") || strstr (mtriple
, "ios")) {
7444 v5_supported
= TRUE
;
7445 v6_supported
= TRUE
;
7446 thumb_supported
= TRUE
;
7449 if (strstr (mtriple
, "gnueabi"))
7450 eabi_supported
= TRUE
;
7454 mono_arch_opcode_supported (int opcode
)
7457 case OP_ATOMIC_ADD_I4
:
7458 case OP_ATOMIC_EXCHANGE_I4
:
7459 case OP_ATOMIC_CAS_I4
:
7460 case OP_ATOMIC_LOAD_I1
:
7461 case OP_ATOMIC_LOAD_I2
:
7462 case OP_ATOMIC_LOAD_I4
:
7463 case OP_ATOMIC_LOAD_U1
:
7464 case OP_ATOMIC_LOAD_U2
:
7465 case OP_ATOMIC_LOAD_U4
:
7466 case OP_ATOMIC_STORE_I1
:
7467 case OP_ATOMIC_STORE_I2
:
7468 case OP_ATOMIC_STORE_I4
:
7469 case OP_ATOMIC_STORE_U1
:
7470 case OP_ATOMIC_STORE_U2
:
7471 case OP_ATOMIC_STORE_U4
:
7472 return v7_supported
;
7473 case OP_ATOMIC_LOAD_R4
:
7474 case OP_ATOMIC_LOAD_R8
:
7475 case OP_ATOMIC_STORE_R4
:
7476 case OP_ATOMIC_STORE_R8
:
7477 return v7_supported
&& IS_VFP
;
7484 mono_arch_get_call_info (MonoMemPool
*mp
, MonoMethodSignature
*sig
)
7486 return get_call_info (mp
, sig
);
7490 mono_arch_get_get_tls_tramp (void)
7495 static G_GNUC_UNUSED guint8
*
7496 emit_aotconst (MonoCompile
*cfg
, guint8
*code
, int dreg
, int patch_type
, gpointer data
)
7499 mono_add_patch_info (cfg
, code
- cfg
->native_code
, (MonoJumpInfoType
)patch_type
, data
);
7500 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
7502 *(gpointer
*)code
= NULL
;
7504 /* Load the value from the GOT */
7505 ARM_LDR_REG_REG (code
, dreg
, ARMREG_PC
, dreg
);
7510 mono_arm_emit_aotconst (gpointer ji_list
, guint8
*code
, guint8
*buf
, int dreg
, int patch_type
, gconstpointer data
)
7512 MonoJumpInfo
**ji
= (MonoJumpInfo
**)ji_list
;
7514 *ji
= mono_patch_info_list_prepend (*ji
, code
- buf
, (MonoJumpInfoType
)patch_type
, data
);
7515 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
7517 *(gpointer
*)code
= NULL
;
7519 ARM_LDR_REG_REG (code
, dreg
, ARMREG_PC
, dreg
);
7524 mono_arch_load_function (MonoJitICallId jit_icall_id
)
7526 gpointer target
= NULL
;
7527 switch (jit_icall_id
) {
7528 #undef MONO_AOT_ICALL
7529 #define MONO_AOT_ICALL(x) case MONO_JIT_ICALL_ ## x: target = (gpointer)x; break;
7530 MONO_AOT_ICALL (mono_arm_resume_unwind
)
7531 MONO_AOT_ICALL (mono_arm_start_gsharedvt_call
)
7532 MONO_AOT_ICALL (mono_arm_throw_exception
)
7533 MONO_AOT_ICALL (mono_arm_throw_exception_by_token
)
7534 MONO_AOT_ICALL (mono_arm_unaligned_stack
)