2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
15 #include <mono/utils/mono-mmap.h>
22 #include "mono/arch/arm/arm-fpa-codegen.h"
23 #elif defined(ARM_FPU_VFP)
24 #include "mono/arch/arm/arm-vfp-codegen.h"
27 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID)
28 #define HAVE_AEABI_READ_TP 1
31 static gint lmf_tls_offset
= -1;
32 static gint lmf_addr_tls_offset
= -1;
34 /* This mutex protects architecture specific caches */
35 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
36 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
37 static CRITICAL_SECTION mini_arch_mutex
;
39 static int v5_supported
= 0;
40 static int v7_supported
= 0;
41 static int thumb_supported
= 0;
44 * The code generated for sequence points reads from this location, which is
45 * made read-only when single stepping is enabled.
47 static gpointer ss_trigger_page
;
49 /* Enabled breakpoints read from this trigger page */
50 static gpointer bp_trigger_page
;
52 /* Structure used by the sequence points in AOTed code */
54 gpointer ss_trigger_page
;
55 gpointer bp_trigger_page
;
56 guint8
* bp_addrs
[MONO_ZERO_LEN_ARRAY
];
61 * floating point support: on ARM it is a mess, there are at least 3
62 * different setups, each of which binary incompat with the other.
63 * 1) FPA: old and ugly, but unfortunately what current distros use
64 * the double binary format has the two words swapped. 8 double registers.
65 * Implemented usually by kernel emulation.
66 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
67 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
68 * 3) VFP: the new and actually sensible and useful FP support. Implemented
69 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
71 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
73 int mono_exc_esp_offset
= 0;
75 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
76 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
77 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
79 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
80 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
81 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
83 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
84 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
87 /* A variant of ARM_LDR_IMM which can handle large offsets */
88 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
89 if (arm_is_imm12 ((offset))) { \
90 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
92 g_assert ((scratch_reg) != (basereg)); \
93 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
94 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
98 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
99 if (arm_is_imm12 ((offset))) { \
100 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
102 g_assert ((scratch_reg) != (basereg)); \
103 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
104 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
109 mono_arch_regname (int reg
)
111 static const char * rnames
[] = {
112 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
113 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
114 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
117 if (reg
>= 0 && reg
< 16)
123 mono_arch_fregname (int reg
)
125 static const char * rnames
[] = {
126 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
127 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
128 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
129 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
130 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
131 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
134 if (reg
>= 0 && reg
< 32)
140 emit_big_add (guint8
*code
, int dreg
, int sreg
, int imm
)
142 int imm8
, rot_amount
;
143 if ((imm8
= mono_arm_is_rotated_imm8 (imm
, &rot_amount
)) >= 0) {
144 ARM_ADD_REG_IMM (code
, dreg
, sreg
, imm8
, rot_amount
);
147 g_assert (dreg
!= sreg
);
148 code
= mono_arm_emit_load_imm (code
, dreg
, imm
);
149 ARM_ADD_REG_REG (code
, dreg
, dreg
, sreg
);
154 emit_memcpy (guint8
*code
, int size
, int dreg
, int doffset
, int sreg
, int soffset
)
156 /* we can use r0-r3, since this is called only for incoming args on the stack */
157 if (size
> sizeof (gpointer
) * 4) {
159 code
= emit_big_add (code
, ARMREG_R0
, sreg
, soffset
);
160 code
= emit_big_add (code
, ARMREG_R1
, dreg
, doffset
);
161 start_loop
= code
= mono_arm_emit_load_imm (code
, ARMREG_R2
, size
);
162 ARM_LDR_IMM (code
, ARMREG_R3
, ARMREG_R0
, 0);
163 ARM_STR_IMM (code
, ARMREG_R3
, ARMREG_R1
, 0);
164 ARM_ADD_REG_IMM8 (code
, ARMREG_R0
, ARMREG_R0
, 4);
165 ARM_ADD_REG_IMM8 (code
, ARMREG_R1
, ARMREG_R1
, 4);
166 ARM_SUBS_REG_IMM8 (code
, ARMREG_R2
, ARMREG_R2
, 4);
167 ARM_B_COND (code
, ARMCOND_NE
, 0);
168 arm_patch (code
- 4, start_loop
);
171 if (arm_is_imm12 (doffset
) && arm_is_imm12 (doffset
+ size
) &&
172 arm_is_imm12 (soffset
) && arm_is_imm12 (soffset
+ size
)) {
174 ARM_LDR_IMM (code
, ARMREG_LR
, sreg
, soffset
);
175 ARM_STR_IMM (code
, ARMREG_LR
, dreg
, doffset
);
181 code
= emit_big_add (code
, ARMREG_R0
, sreg
, soffset
);
182 code
= emit_big_add (code
, ARMREG_R1
, dreg
, doffset
);
183 doffset
= soffset
= 0;
185 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_R0
, soffset
);
186 ARM_STR_IMM (code
, ARMREG_LR
, ARMREG_R1
, doffset
);
192 g_assert (size
== 0);
197 emit_call_reg (guint8
*code
, int reg
)
200 ARM_BLX_REG (code
, reg
);
202 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
206 ARM_MOV_REG_REG (code
, ARMREG_PC
, reg
);
212 emit_call_seq (MonoCompile
*cfg
, guint8
*code
)
214 if (cfg
->method
->dynamic
) {
215 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
217 *(gpointer
*)code
= NULL
;
219 code
= emit_call_reg (code
, ARMREG_IP
);
227 emit_move_return_value (MonoCompile
*cfg
, MonoInst
*ins
, guint8
*code
)
229 switch (ins
->opcode
) {
232 case OP_FCALL_MEMBASE
:
234 if (ins
->dreg
!= ARM_FPA_F0
)
235 ARM_MVFD (code
, ins
->dreg
, ARM_FPA_F0
);
236 #elif defined(ARM_FPU_VFP)
237 if (((MonoCallInst
*)ins
)->signature
->ret
->type
== MONO_TYPE_R4
) {
238 ARM_FMSR (code
, ins
->dreg
, ARMREG_R0
);
239 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
241 ARM_FMDRR (code
, ARMREG_R0
, ARMREG_R1
, ins
->dreg
);
251 * mono_arch_get_argument_info:
252 * @csig: a method signature
253 * @param_count: the number of parameters to consider
254 * @arg_info: an array to store the result infos
256 * Gathers information on parameters such as size, alignment and
257 * padding. arg_info should be large enought to hold param_count + 1 entries.
259 * Returns the size of the activation frame.
262 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
264 int k
, frame_size
= 0;
265 guint32 size
, align
, pad
;
268 if (MONO_TYPE_ISSTRUCT (csig
->ret
)) {
269 frame_size
+= sizeof (gpointer
);
273 arg_info
[0].offset
= offset
;
276 frame_size
+= sizeof (gpointer
);
280 arg_info
[0].size
= frame_size
;
282 for (k
= 0; k
< param_count
; k
++) {
283 size
= mini_type_stack_size_full (NULL
, csig
->params
[k
], &align
, csig
->pinvoke
);
285 /* ignore alignment for now */
288 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
289 arg_info
[k
].pad
= pad
;
291 arg_info
[k
+ 1].pad
= 0;
292 arg_info
[k
+ 1].size
= size
;
294 arg_info
[k
+ 1].offset
= offset
;
298 align
= MONO_ARCH_FRAME_ALIGNMENT
;
299 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
300 arg_info
[k
].pad
= pad
;
306 decode_vcall_slot_from_ldr (guint32 ldr
, mgreg_t
*regs
, int *displacement
)
310 reg
= (ldr
>> 16 ) & 0xf;
311 offset
= ldr
& 0xfff;
312 if (((ldr
>> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
314 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
315 o
= (gpointer
)regs
[reg
];
317 *displacement
= offset
;
322 mono_arch_get_vcall_slot (guint8
*code_ptr
, mgreg_t
*regs
, int *displacement
)
324 guint32
* code
= (guint32
*)code_ptr
;
326 /* Locate the address of the method-specific trampoline. The call using
327 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
328 looks something like this:
337 The call sequence could be also:
340 function pointer literal
344 Note that on ARM5+ we can use one instruction instead of the last two.
345 Therefore, we need to locate the 'ldr rA' instruction to know which
346 register was used to hold the method addrs.
349 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
352 /* Three possible code sequences can happen here:
356 * ldr pc, [rX - #offset]
362 * ldr pc, [rX - #offset]
364 * direct branch with bl:
368 * direct branch with mov:
372 * We only need to identify interface and virtual calls, the others can be ignored.
375 if (IS_LDR_PC (code
[-1]) && code
[-2] == ADD_LR_PC_4
)
376 return decode_vcall_slot_from_ldr (code
[-1], regs
, displacement
);
378 if (IS_LDR_PC (code
[0]) && code
[-1] == MOV_LR_PC
)
379 return decode_vcall_slot_from_ldr (code
[0], regs
, displacement
);
384 #define MAX_ARCH_DELEGATE_PARAMS 3
387 get_delegate_invoke_impl (gboolean has_target
, gboolean param_count
, guint32
*code_size
)
389 guint8
*code
, *start
;
392 start
= code
= mono_global_codeman_reserve (12);
394 /* Replace the this argument with the target */
395 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, G_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
396 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_R0
, G_STRUCT_OFFSET (MonoDelegate
, target
));
397 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
399 g_assert ((code
- start
) <= 12);
401 mono_arch_flush_icache (start
, 12);
405 size
= 8 + param_count
* 4;
406 start
= code
= mono_global_codeman_reserve (size
);
408 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, G_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
409 /* slide down the arguments */
410 for (i
= 0; i
< param_count
; ++i
) {
411 ARM_MOV_REG_REG (code
, (ARMREG_R0
+ i
), (ARMREG_R0
+ i
+ 1));
413 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
415 g_assert ((code
- start
) <= size
);
417 mono_arch_flush_icache (start
, size
);
421 *code_size
= code
- start
;
427 * mono_arch_get_delegate_invoke_impls:
429 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
433 mono_arch_get_delegate_invoke_impls (void)
440 code
= get_delegate_invoke_impl (TRUE
, 0, &code_len
);
441 res
= g_slist_prepend (res
, mono_aot_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code
, code_len
));
443 for (i
= 0; i
<= MAX_ARCH_DELEGATE_PARAMS
; ++i
) {
444 code
= get_delegate_invoke_impl (FALSE
, i
, &code_len
);
445 res
= g_slist_prepend (res
, mono_aot_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i
), code
, code_len
));
452 mono_arch_get_delegate_invoke_impl (MonoMethodSignature
*sig
, gboolean has_target
)
454 guint8
*code
, *start
;
456 /* FIXME: Support more cases */
457 if (MONO_TYPE_ISSTRUCT (sig
->ret
))
461 static guint8
* cached
= NULL
;
462 mono_mini_arch_lock ();
464 mono_mini_arch_unlock ();
469 start
= mono_aot_get_named_code ("delegate_invoke_impl_has_target");
471 start
= get_delegate_invoke_impl (TRUE
, 0, NULL
);
473 mono_mini_arch_unlock ();
476 static guint8
* cache
[MAX_ARCH_DELEGATE_PARAMS
+ 1] = {NULL
};
479 if (sig
->param_count
> MAX_ARCH_DELEGATE_PARAMS
)
481 for (i
= 0; i
< sig
->param_count
; ++i
)
482 if (!mono_is_regsize_var (sig
->params
[i
]))
485 mono_mini_arch_lock ();
486 code
= cache
[sig
->param_count
];
488 mono_mini_arch_unlock ();
493 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", sig
->param_count
);
494 start
= mono_aot_get_named_code (name
);
497 start
= get_delegate_invoke_impl (FALSE
, sig
->param_count
, NULL
);
499 cache
[sig
->param_count
] = start
;
500 mono_mini_arch_unlock ();
508 mono_arch_get_this_arg_from_call (MonoGenericSharingContext
*gsctx
, MonoMethodSignature
*sig
, mgreg_t
*regs
, guint8
*code
)
510 /* FIXME: handle returning a struct */
511 if (MONO_TYPE_ISSTRUCT (sig
->ret
))
512 return (gpointer
)regs
[ARMREG_R1
];
513 return (gpointer
)regs
[ARMREG_R0
];
517 * Initialize the cpu to execute managed code.
520 mono_arch_cpu_init (void)
525 * Initialize architecture specific code.
528 mono_arch_init (void)
530 InitializeCriticalSection (&mini_arch_mutex
);
532 ss_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
|MONO_MMAP_32BIT
);
533 bp_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
|MONO_MMAP_32BIT
);
534 mono_mprotect (bp_trigger_page
, mono_pagesize (), 0);
538 * Cleanup architecture specific code.
541 mono_arch_cleanup (void)
546 * This function returns the optimizations supported on this cpu.
549 mono_arch_cpu_optimizazions (guint32
*exclude_mask
)
553 thumb_supported
= TRUE
;
558 FILE *file
= fopen ("/proc/cpuinfo", "r");
560 while ((line
= fgets (buf
, 512, file
))) {
561 if (strncmp (line
, "Processor", 9) == 0) {
562 char *ver
= strstr (line
, "(v");
563 if (ver
&& (ver
[2] == '5' || ver
[2] == '6' || ver
[2] == '7'))
565 if (ver
&& (ver
[2] == '7'))
569 if (strncmp (line
, "Features", 8) == 0) {
570 char *th
= strstr (line
, "thumb");
572 thumb_supported
= TRUE
;
580 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
584 /* no arm-specific optimizations yet */
590 is_regsize_var (MonoType
*t
) {
593 t
= mini_type_get_underlying_type (NULL
, t
);
600 case MONO_TYPE_FNPTR
:
602 case MONO_TYPE_OBJECT
:
603 case MONO_TYPE_STRING
:
604 case MONO_TYPE_CLASS
:
605 case MONO_TYPE_SZARRAY
:
606 case MONO_TYPE_ARRAY
:
608 case MONO_TYPE_GENERICINST
:
609 if (!mono_type_generic_inst_is_valuetype (t
))
612 case MONO_TYPE_VALUETYPE
:
619 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
624 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
625 MonoInst
*ins
= cfg
->varinfo
[i
];
626 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
629 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
632 if (ins
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
) || (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
635 /* we can only allocate 32 bit values */
636 if (is_regsize_var (ins
->inst_vtype
)) {
637 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
638 g_assert (i
== vmv
->idx
);
639 vars
= mono_varlist_insert_sorted (cfg
, vars
, vmv
, FALSE
);
646 #define USE_EXTRA_TEMPS 0
649 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
654 * FIXME: Interface calls might go through a static rgctx trampoline which
655 * sets V5, but it doesn't save it, so we need to save it ourselves, and
658 if (cfg
->flags
& MONO_CFG_HAS_CALLS
)
659 cfg
->uses_rgctx_reg
= TRUE
;
661 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V1
));
662 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V2
));
663 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V3
));
664 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V4
));
665 if (!(cfg
->compile_aot
|| cfg
->uses_rgctx_reg
))
666 /* V5 is reserved for passing the vtable/rgctx/IMT method */
667 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V5
));
668 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
669 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
675 * mono_arch_regalloc_cost:
677 * Return the cost, in number of memory references, of the action of
678 * allocating the variable VMV into a register during global register
682 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
688 #ifndef __GNUC_PREREQ
689 #define __GNUC_PREREQ(maj, min) (0)
693 mono_arch_flush_icache (guint8
*code
, gint size
)
696 sys_icache_invalidate (code
, size
);
697 #elif __GNUC_PREREQ(4, 1)
698 __clear_cache (code
, code
+ size
);
699 #elif defined(PLATFORM_ANDROID)
700 const int syscall
= 0xf0002;
708 : "r" (code
), "r" (code
+ size
), "r" (syscall
)
709 : "r0", "r1", "r7", "r2"
712 __asm
__volatile ("mov r0, %0\n"
715 "swi 0x9f0002 @ sys_cacheflush"
717 : "r" (code
), "r" (code
+ size
), "r" (0)
718 : "r0", "r1", "r3" );
735 guint16 vtsize
; /* in param area */
738 guint8 size
: 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
745 gboolean vtype_retaddr
;
754 /*#define __alignof__(a) sizeof(a)*/
755 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
761 add_general (guint
*gr
, guint
*stack_size
, ArgInfo
*ainfo
, gboolean simple
)
764 if (*gr
> ARMREG_R3
) {
765 ainfo
->offset
= *stack_size
;
766 ainfo
->reg
= ARMREG_SP
; /* in the caller */
767 ainfo
->storage
= RegTypeBase
;
770 ainfo
->storage
= RegTypeGeneral
;
774 #if defined(__APPLE__) && defined(MONO_CROSS_COMPILE)
777 int i8_align
= __alignof__ (gint64
);
781 gboolean split
= i8_align
== 4;
783 gboolean split
= TRUE
;
786 if (*gr
== ARMREG_R3
&& split
) {
787 /* first word in r3 and the second on the stack */
788 ainfo
->offset
= *stack_size
;
789 ainfo
->reg
= ARMREG_SP
; /* in the caller */
790 ainfo
->storage
= RegTypeBaseGen
;
792 } else if (*gr
>= ARMREG_R3
) {
794 /* darwin aligns longs to 4 byte only */
800 ainfo
->offset
= *stack_size
;
801 ainfo
->reg
= ARMREG_SP
; /* in the caller */
802 ainfo
->storage
= RegTypeBase
;
806 if (i8_align
== 8 && ((*gr
) & 1))
809 ainfo
->storage
= RegTypeIRegPair
;
818 get_call_info (MonoMemPool
*mp
, MonoMethodSignature
*sig
, gboolean is_pinvoke
)
821 int n
= sig
->hasthis
+ sig
->param_count
;
822 MonoType
*simpletype
;
823 guint32 stack_size
= 0;
827 cinfo
= mono_mempool_alloc0 (mp
, sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
829 cinfo
= g_malloc0 (sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
834 /* FIXME: handle returning a struct */
835 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
838 if (is_pinvoke
&& mono_class_native_size (mono_class_from_mono_type (sig
->ret
), &align
) <= sizeof (gpointer
)) {
839 cinfo
->ret
.storage
= RegTypeStructByVal
;
841 add_general (&gr
, &stack_size
, &cinfo
->ret
, TRUE
);
842 cinfo
->struct_ret
= ARMREG_R0
;
843 cinfo
->vtype_retaddr
= TRUE
;
849 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
852 DEBUG(printf("params: %d\n", sig
->param_count
));
853 for (i
= 0; i
< sig
->param_count
; ++i
) {
854 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
855 /* Prevent implicit arguments and sig_cookie from
856 being passed in registers */
858 /* Emit the signature cookie just before the implicit arguments */
859 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
861 DEBUG(printf("param %d: ", i
));
862 if (sig
->params
[i
]->byref
) {
863 DEBUG(printf("byref\n"));
864 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
868 simpletype
= mini_type_get_underlying_type (NULL
, sig
->params
[i
]);
869 switch (simpletype
->type
) {
870 case MONO_TYPE_BOOLEAN
:
873 cinfo
->args
[n
].size
= 1;
874 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
880 cinfo
->args
[n
].size
= 2;
881 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
886 cinfo
->args
[n
].size
= 4;
887 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
893 case MONO_TYPE_FNPTR
:
894 case MONO_TYPE_CLASS
:
895 case MONO_TYPE_OBJECT
:
896 case MONO_TYPE_STRING
:
897 case MONO_TYPE_SZARRAY
:
898 case MONO_TYPE_ARRAY
:
900 cinfo
->args
[n
].size
= sizeof (gpointer
);
901 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
904 case MONO_TYPE_GENERICINST
:
905 if (!mono_type_generic_inst_is_valuetype (simpletype
)) {
906 cinfo
->args
[n
].size
= sizeof (gpointer
);
907 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
912 case MONO_TYPE_TYPEDBYREF
:
913 case MONO_TYPE_VALUETYPE
: {
919 if (simpletype
->type
== MONO_TYPE_TYPEDBYREF
) {
920 size
= sizeof (MonoTypedRef
);
921 align
= sizeof (gpointer
);
923 MonoClass
*klass
= mono_class_from_mono_type (sig
->params
[i
]);
925 size
= mono_class_native_size (klass
, &align
);
927 size
= mono_class_value_size (klass
, &align
);
929 DEBUG(printf ("load %d bytes struct\n",
930 mono_class_native_size (sig
->params
[i
]->data
.klass
, NULL
)));
933 align_size
+= (sizeof (gpointer
) - 1);
934 align_size
&= ~(sizeof (gpointer
) - 1);
935 nwords
= (align_size
+ sizeof (gpointer
) -1 ) / sizeof (gpointer
);
936 cinfo
->args
[n
].storage
= RegTypeStructByVal
;
937 /* FIXME: align stack_size if needed */
939 if (align
>= 8 && (gr
& 1))
942 if (gr
> ARMREG_R3
) {
943 cinfo
->args
[n
].size
= 0;
944 cinfo
->args
[n
].vtsize
= nwords
;
946 int rest
= ARMREG_R3
- gr
+ 1;
947 int n_in_regs
= rest
>= nwords
? nwords
: rest
;
949 cinfo
->args
[n
].size
= n_in_regs
;
950 cinfo
->args
[n
].vtsize
= nwords
- n_in_regs
;
951 cinfo
->args
[n
].reg
= gr
;
955 cinfo
->args
[n
].offset
= stack_size
;
956 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
957 stack_size
+= nwords
* sizeof (gpointer
);
964 cinfo
->args
[n
].size
= 8;
965 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, FALSE
);
969 g_error ("Can't trampoline 0x%x", sig
->params
[i
]->type
);
973 /* Handle the case where there are no implicit arguments */
974 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
975 /* Prevent implicit arguments and sig_cookie from
976 being passed in registers */
978 /* Emit the signature cookie just before the implicit arguments */
979 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
983 simpletype
= mini_type_get_underlying_type (NULL
, sig
->ret
);
984 switch (simpletype
->type
) {
985 case MONO_TYPE_BOOLEAN
:
996 case MONO_TYPE_FNPTR
:
997 case MONO_TYPE_CLASS
:
998 case MONO_TYPE_OBJECT
:
999 case MONO_TYPE_SZARRAY
:
1000 case MONO_TYPE_ARRAY
:
1001 case MONO_TYPE_STRING
:
1002 cinfo
->ret
.storage
= RegTypeGeneral
;
1003 cinfo
->ret
.reg
= ARMREG_R0
;
1007 cinfo
->ret
.storage
= RegTypeIRegPair
;
1008 cinfo
->ret
.reg
= ARMREG_R0
;
1012 cinfo
->ret
.storage
= RegTypeFP
;
1013 cinfo
->ret
.reg
= ARMREG_R0
;
1014 /* FIXME: cinfo->ret.reg = ???;
1015 cinfo->ret.storage = RegTypeFP;*/
1017 case MONO_TYPE_GENERICINST
:
1018 if (!mono_type_generic_inst_is_valuetype (simpletype
)) {
1019 cinfo
->ret
.storage
= RegTypeGeneral
;
1020 cinfo
->ret
.reg
= ARMREG_R0
;
1024 case MONO_TYPE_VALUETYPE
:
1025 case MONO_TYPE_TYPEDBYREF
:
1026 if (cinfo
->ret
.storage
!= RegTypeStructByVal
)
1027 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1029 case MONO_TYPE_VOID
:
1032 g_error ("Can't handle as return value 0x%x", sig
->ret
->type
);
1036 /* align stack size to 8 */
1037 DEBUG (printf (" stack size: %d (%d)\n", (stack_size
+ 15) & ~15, stack_size
));
1038 stack_size
= (stack_size
+ 7) & ~7;
1040 cinfo
->stack_usage
= stack_size
;
1046 * Set var information according to the calling convention. arm version.
1047 * The locals var stuff should most likely be split in another method.
1050 mono_arch_allocate_vars (MonoCompile
*cfg
)
1052 MonoMethodSignature
*sig
;
1053 MonoMethodHeader
*header
;
1055 int i
, offset
, size
, align
, curinst
;
1056 int frame_reg
= ARMREG_FP
;
1060 sig
= mono_method_signature (cfg
->method
);
1062 if (!cfg
->arch
.cinfo
)
1063 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
, sig
->pinvoke
);
1064 cinfo
= cfg
->arch
.cinfo
;
1066 /* FIXME: this will change when we use FP as gcc does */
1067 cfg
->flags
|= MONO_CFG_HAS_SPILLUP
;
1069 /* allow room for the vararg method args: void* and long/double */
1070 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
))
1071 cfg
->param_area
= MAX (cfg
->param_area
, sizeof (gpointer
)*8);
1073 header
= mono_method_get_header (cfg
->method
);
1076 * We use the frame register also for any method that has
1077 * exception clauses. This way, when the handlers are called,
1078 * the code will reference local variables using the frame reg instead of
1079 * the stack pointer: if we had to restore the stack pointer, we'd
1080 * corrupt the method frames that are already on the stack (since
1081 * filters get called before stack unwinding happens) when the filter
1082 * code would call any method (this also applies to finally etc.).
1084 if ((cfg
->flags
& MONO_CFG_HAS_ALLOCA
) || header
->num_clauses
)
1085 frame_reg
= ARMREG_FP
;
1086 cfg
->frame_reg
= frame_reg
;
1087 if (frame_reg
!= ARMREG_SP
) {
1088 cfg
->used_int_regs
|= 1 << frame_reg
;
1091 if (cfg
->compile_aot
|| cfg
->uses_rgctx_reg
)
1092 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1093 cfg
->used_int_regs
|= (1 << ARMREG_V5
);
1097 if (!MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1098 switch (mini_type_get_underlying_type (NULL
, sig
->ret
)->type
) {
1099 case MONO_TYPE_VOID
:
1102 cfg
->ret
->opcode
= OP_REGVAR
;
1103 cfg
->ret
->inst_c0
= ARMREG_R0
;
1107 /* local vars are at a positive offset from the stack pointer */
1109 * also note that if the function uses alloca, we use FP
1110 * to point at the local variables.
1112 offset
= 0; /* linkage area */
1113 /* align the offset to 16 bytes: not sure this is needed here */
1115 //offset &= ~(8 - 1);
1117 /* add parameter area size for called functions */
1118 offset
+= cfg
->param_area
;
1121 if (cfg
->flags
& MONO_CFG_HAS_FPOUT
)
1124 /* allow room to save the return value */
1125 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
))
1128 /* the MonoLMF structure is stored just below the stack pointer */
1129 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1130 if (cinfo
->ret
.storage
== RegTypeStructByVal
) {
1131 cfg
->ret
->opcode
= OP_REGOFFSET
;
1132 cfg
->ret
->inst_basereg
= cfg
->frame_reg
;
1133 offset
+= sizeof (gpointer
) - 1;
1134 offset
&= ~(sizeof (gpointer
) - 1);
1135 cfg
->ret
->inst_offset
= - offset
;
1137 inst
= cfg
->vret_addr
;
1138 offset
+= sizeof(gpointer
) - 1;
1139 offset
&= ~(sizeof(gpointer
) - 1);
1140 inst
->inst_offset
= offset
;
1141 inst
->opcode
= OP_REGOFFSET
;
1142 inst
->inst_basereg
= frame_reg
;
1143 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
1144 printf ("vret_addr =");
1145 mono_print_ins (cfg
->vret_addr
);
1148 offset
+= sizeof(gpointer
);
1151 curinst
= cfg
->locals_start
;
1152 for (i
= curinst
; i
< cfg
->num_varinfo
; ++i
) {
1153 inst
= cfg
->varinfo
[i
];
1154 if ((inst
->flags
& MONO_INST_IS_DEAD
) || inst
->opcode
== OP_REGVAR
)
1157 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1158 * pinvoke wrappers when they call functions returning structure */
1159 if (inst
->backend
.is_pinvoke
&& MONO_TYPE_ISSTRUCT (inst
->inst_vtype
) && inst
->inst_vtype
->type
!= MONO_TYPE_TYPEDBYREF
) {
1160 size
= mono_class_native_size (mono_class_from_mono_type (inst
->inst_vtype
), &ualign
);
1164 size
= mono_type_size (inst
->inst_vtype
, &align
);
1166 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1167 * since it loads/stores misaligned words, which don't do the right thing.
1169 if (align
< 4 && size
>= 4)
1171 offset
+= align
- 1;
1172 offset
&= ~(align
- 1);
1173 inst
->inst_offset
= offset
;
1174 inst
->opcode
= OP_REGOFFSET
;
1175 inst
->inst_basereg
= frame_reg
;
1177 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1182 inst
= cfg
->args
[curinst
];
1183 if (inst
->opcode
!= OP_REGVAR
) {
1184 inst
->opcode
= OP_REGOFFSET
;
1185 inst
->inst_basereg
= frame_reg
;
1186 offset
+= sizeof (gpointer
) - 1;
1187 offset
&= ~(sizeof (gpointer
) - 1);
1188 inst
->inst_offset
= offset
;
1189 offset
+= sizeof (gpointer
);
1194 if (sig
->call_convention
== MONO_CALL_VARARG
) {
1198 /* Allocate a local slot to hold the sig cookie address */
1199 offset
+= align
- 1;
1200 offset
&= ~(align
- 1);
1201 cfg
->sig_cookie
= offset
;
1205 for (i
= 0; i
< sig
->param_count
; ++i
) {
1206 inst
= cfg
->args
[curinst
];
1208 if (inst
->opcode
!= OP_REGVAR
) {
1209 inst
->opcode
= OP_REGOFFSET
;
1210 inst
->inst_basereg
= frame_reg
;
1211 size
= mini_type_stack_size_full (NULL
, sig
->params
[i
], &ualign
, sig
->pinvoke
);
1213 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1214 * since it loads/stores misaligned words, which don't do the right thing.
1216 if (align
< 4 && size
>= 4)
1218 /* The code in the prolog () stores words when storing vtypes received in a register */
1219 if (MONO_TYPE_ISSTRUCT (sig
->params
[i
]))
1221 offset
+= align
- 1;
1222 offset
&= ~(align
- 1);
1223 inst
->inst_offset
= offset
;
1229 /* align the offset to 8 bytes */
1234 cfg
->stack_offset
= offset
;
1238 mono_arch_create_vars (MonoCompile
*cfg
)
1240 MonoMethodSignature
*sig
;
1243 sig
= mono_method_signature (cfg
->method
);
1245 if (!cfg
->arch
.cinfo
)
1246 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
, sig
->pinvoke
);
1247 cinfo
= cfg
->arch
.cinfo
;
1249 if (cinfo
->ret
.storage
== RegTypeStructByVal
)
1250 cfg
->ret_var_is_local
= TRUE
;
1252 if (MONO_TYPE_ISSTRUCT (sig
->ret
) && cinfo
->ret
.storage
!= RegTypeStructByVal
) {
1253 cfg
->vret_addr
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_ARG
);
1254 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
1255 printf ("vret_addr = ");
1256 mono_print_ins (cfg
->vret_addr
);
1260 if (cfg
->gen_seq_points
&& cfg
->compile_aot
) {
1261 MonoInst
*ins
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1262 ins
->flags
|= MONO_INST_VOLATILE
;
1263 cfg
->arch
.seq_point_info_var
= ins
;
1265 /* Allocate a separate variable for this to save 1 load per seq point */
1266 ins
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1267 ins
->flags
|= MONO_INST_VOLATILE
;
1268 cfg
->arch
.ss_trigger_page_var
= ins
;
1273 emit_sig_cookie (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
)
1275 MonoMethodSignature
*tmp_sig
;
1278 if (call
->tail_call
)
1281 /* FIXME: Add support for signature tokens to AOT */
1282 cfg
->disable_aot
= TRUE
;
1284 g_assert (cinfo
->sig_cookie
.storage
== RegTypeBase
);
1287 * mono_ArgIterator_Setup assumes the signature cookie is
1288 * passed first and all the arguments which were before it are
1289 * passed on the stack after the signature. So compensate by
1290 * passing a different signature.
1292 tmp_sig
= mono_metadata_signature_dup (call
->signature
);
1293 tmp_sig
->param_count
-= call
->signature
->sentinelpos
;
1294 tmp_sig
->sentinelpos
= 0;
1295 memcpy (tmp_sig
->params
, call
->signature
->params
+ call
->signature
->sentinelpos
, tmp_sig
->param_count
* sizeof (MonoType
*));
1297 MONO_INST_NEW (cfg
, sig_arg
, OP_ICONST
);
1298 sig_arg
->dreg
= mono_alloc_ireg (cfg
);
1299 sig_arg
->inst_p0
= tmp_sig
;
1300 MONO_ADD_INS (cfg
->cbb
, sig_arg
);
1302 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, cinfo
->sig_cookie
.offset
, sig_arg
->dreg
);
1307 mono_arch_get_llvm_call_info (MonoCompile
*cfg
, MonoMethodSignature
*sig
)
1312 LLVMCallInfo
*linfo
;
1314 n
= sig
->param_count
+ sig
->hasthis
;
1316 cinfo
= get_call_info (cfg
->mempool
, sig
, sig
->pinvoke
);
1318 linfo
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (LLVMCallInfo
) + (sizeof (LLVMArgInfo
) * n
));
1321 * LLVM always uses the native ABI while we use our own ABI, the
1322 * only difference is the handling of vtypes:
1323 * - we only pass/receive them in registers in some cases, and only
1324 * in 1 or 2 integer registers.
1326 if (cinfo
->ret
.storage
!= RegTypeGeneral
&& cinfo
->ret
.storage
!= RegTypeNone
&& cinfo
->ret
.storage
!= RegTypeFP
) {
1327 cfg
->exception_message
= g_strdup ("unknown ret conv");
1328 cfg
->disable_llvm
= TRUE
;
1332 for (i
= 0; i
< n
; ++i
) {
1333 ainfo
= cinfo
->args
+ i
;
1335 linfo
->args
[i
].storage
= LLVMArgNone
;
1337 switch (ainfo
->storage
) {
1338 case RegTypeGeneral
:
1339 case RegTypeIRegPair
:
1340 linfo
->args
[i
].storage
= LLVMArgInIReg
;
1343 cfg
->exception_message
= g_strdup_printf ("ainfo->storage (%d)", ainfo
->storage
);
1344 cfg
->disable_llvm
= TRUE
;
1354 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
1357 MonoMethodSignature
*sig
;
1361 sig
= call
->signature
;
1362 n
= sig
->param_count
+ sig
->hasthis
;
1364 cinfo
= get_call_info (NULL
, sig
, sig
->pinvoke
);
1366 for (i
= 0; i
< n
; ++i
) {
1367 ArgInfo
*ainfo
= cinfo
->args
+ i
;
1370 if (i
>= sig
->hasthis
)
1371 t
= sig
->params
[i
- sig
->hasthis
];
1373 t
= &mono_defaults
.int_class
->byval_arg
;
1374 t
= mini_type_get_underlying_type (NULL
, t
);
1376 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1377 /* Emit the signature cookie just before the implicit arguments */
1378 emit_sig_cookie (cfg
, call
, cinfo
);
1381 in
= call
->args
[i
];
1383 switch (ainfo
->storage
) {
1384 case RegTypeGeneral
:
1385 case RegTypeIRegPair
:
1386 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
1387 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1388 ins
->dreg
= mono_alloc_ireg (cfg
);
1389 ins
->sreg1
= in
->dreg
+ 1;
1390 MONO_ADD_INS (cfg
->cbb
, ins
);
1391 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
1393 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1394 ins
->dreg
= mono_alloc_ireg (cfg
);
1395 ins
->sreg1
= in
->dreg
+ 2;
1396 MONO_ADD_INS (cfg
->cbb
, ins
);
1397 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
1398 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R8
) || (t
->type
== MONO_TYPE_R4
))) {
1399 #ifndef MONO_ARCH_SOFT_FLOAT
1403 if (ainfo
->size
== 4) {
1404 #ifdef MONO_ARCH_SOFT_FLOAT
1405 /* mono_emit_call_args () have already done the r8->r4 conversion */
1406 /* The converted value is in an int vreg */
1407 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1408 ins
->dreg
= mono_alloc_ireg (cfg
);
1409 ins
->sreg1
= in
->dreg
;
1410 MONO_ADD_INS (cfg
->cbb
, ins
);
1411 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
1413 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
1414 creg
= mono_alloc_ireg (cfg
);
1415 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
1416 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
, FALSE
);
1419 #ifdef MONO_ARCH_SOFT_FLOAT
1420 MONO_INST_NEW (cfg
, ins
, OP_FGETLOW32
);
1421 ins
->dreg
= mono_alloc_ireg (cfg
);
1422 ins
->sreg1
= in
->dreg
;
1423 MONO_ADD_INS (cfg
->cbb
, ins
);
1424 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
1426 MONO_INST_NEW (cfg
, ins
, OP_FGETHIGH32
);
1427 ins
->dreg
= mono_alloc_ireg (cfg
);
1428 ins
->sreg1
= in
->dreg
;
1429 MONO_ADD_INS (cfg
->cbb
, ins
);
1430 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
1432 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
1433 creg
= mono_alloc_ireg (cfg
);
1434 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
1435 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
, FALSE
);
1436 creg
= mono_alloc_ireg (cfg
);
1437 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8 + 4));
1438 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
+ 1, FALSE
);
1441 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
1443 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1444 ins
->dreg
= mono_alloc_ireg (cfg
);
1445 ins
->sreg1
= in
->dreg
;
1446 MONO_ADD_INS (cfg
->cbb
, ins
);
1448 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
1451 case RegTypeStructByAddr
:
1454 /* FIXME: where si the data allocated? */
1455 arg
->backend
.reg3
= ainfo
->reg
;
1456 call
->used_iregs
|= 1 << ainfo
->reg
;
1457 g_assert_not_reached ();
1460 case RegTypeStructByVal
:
1461 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
1462 ins
->opcode
= OP_OUTARG_VT
;
1463 ins
->sreg1
= in
->dreg
;
1464 ins
->klass
= in
->klass
;
1465 ins
->inst_p0
= call
;
1466 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1467 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1468 MONO_ADD_INS (cfg
->cbb
, ins
);
1471 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
1472 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
1473 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R4
) || (t
->type
== MONO_TYPE_R8
))) {
1474 if (t
->type
== MONO_TYPE_R8
) {
1475 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
1477 #ifdef MONO_ARCH_SOFT_FLOAT
1478 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
1480 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
1484 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
1487 case RegTypeBaseGen
:
1488 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
1489 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, (G_BYTE_ORDER
== G_BIG_ENDIAN
) ? in
->dreg
+ 1 : in
->dreg
+ 2);
1490 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1491 ins
->dreg
= mono_alloc_ireg (cfg
);
1492 ins
->sreg1
= G_BYTE_ORDER
== G_BIG_ENDIAN
? in
->dreg
+ 2 : in
->dreg
+ 1;
1493 MONO_ADD_INS (cfg
->cbb
, ins
);
1494 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ARMREG_R3
, FALSE
);
1495 } else if (!t
->byref
&& (t
->type
== MONO_TYPE_R8
)) {
1498 #ifdef MONO_ARCH_SOFT_FLOAT
1499 g_assert_not_reached ();
1502 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
1503 creg
= mono_alloc_ireg (cfg
);
1504 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ARMREG_R3
, FALSE
);
1505 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
1506 creg
= mono_alloc_ireg (cfg
);
1507 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 4));
1508 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, creg
);
1509 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
1511 g_assert_not_reached ();
1518 arg
->backend
.reg3
= ainfo
->reg
;
1519 /* FP args are passed in int regs */
1520 call
->used_iregs
|= 1 << ainfo
->reg
;
1521 if (ainfo
->size
== 8) {
1522 arg
->opcode
= OP_OUTARG_R8
;
1523 call
->used_iregs
|= 1 << (ainfo
->reg
+ 1);
1525 arg
->opcode
= OP_OUTARG_R4
;
1528 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
1532 g_assert_not_reached ();
1536 /* Handle the case where there are no implicit arguments */
1537 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== sig
->sentinelpos
))
1538 emit_sig_cookie (cfg
, call
, cinfo
);
1540 if (sig
->ret
&& MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1543 if (cinfo
->ret
.storage
== RegTypeStructByVal
) {
1544 /* The JIT will transform this into a normal call */
1545 call
->vret_in_reg
= TRUE
;
1547 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
1548 vtarg
->sreg1
= call
->vret_var
->dreg
;
1549 vtarg
->dreg
= mono_alloc_preg (cfg
);
1550 MONO_ADD_INS (cfg
->cbb
, vtarg
);
1552 mono_call_inst_add_outarg_reg (cfg
, call
, vtarg
->dreg
, cinfo
->ret
.reg
, FALSE
);
1556 call
->stack_usage
= cinfo
->stack_usage
;
1562 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
1564 MonoCallInst
*call
= (MonoCallInst
*)ins
->inst_p0
;
1565 ArgInfo
*ainfo
= ins
->inst_p1
;
1566 int ovf_size
= ainfo
->vtsize
;
1567 int doffset
= ainfo
->offset
;
1568 int i
, soffset
, dreg
;
1571 for (i
= 0; i
< ainfo
->size
; ++i
) {
1572 dreg
= mono_alloc_ireg (cfg
);
1573 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, src
->dreg
, soffset
);
1574 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
+ i
, FALSE
);
1575 soffset
+= sizeof (gpointer
);
1577 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
1579 mini_emit_memcpy (cfg
, ARMREG_SP
, doffset
, src
->dreg
, soffset
, ovf_size
* sizeof (gpointer
), 0);
1583 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
1585 MonoType
*ret
= mini_type_get_underlying_type (cfg
->generic_sharing_context
, mono_method_signature (method
)->ret
);
1588 if (ret
->type
== MONO_TYPE_I8
|| ret
->type
== MONO_TYPE_U8
) {
1591 MONO_INST_NEW (cfg
, ins
, OP_SETLRET
);
1592 ins
->sreg1
= val
->dreg
+ 1;
1593 ins
->sreg2
= val
->dreg
+ 2;
1594 MONO_ADD_INS (cfg
->cbb
, ins
);
1597 #ifdef MONO_ARCH_SOFT_FLOAT
1598 if (ret
->type
== MONO_TYPE_R8
) {
1601 MONO_INST_NEW (cfg
, ins
, OP_SETFRET
);
1602 ins
->dreg
= cfg
->ret
->dreg
;
1603 ins
->sreg1
= val
->dreg
;
1604 MONO_ADD_INS (cfg
->cbb
, ins
);
1607 if (ret
->type
== MONO_TYPE_R4
) {
1608 /* Already converted to an int in method_to_ir () */
1609 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
1612 #elif defined(ARM_FPU_VFP)
1613 if (ret
->type
== MONO_TYPE_R8
|| ret
->type
== MONO_TYPE_R4
) {
1616 MONO_INST_NEW (cfg
, ins
, OP_SETFRET
);
1617 ins
->dreg
= cfg
->ret
->dreg
;
1618 ins
->sreg1
= val
->dreg
;
1619 MONO_ADD_INS (cfg
->cbb
, ins
);
1623 if (ret
->type
== MONO_TYPE_R4
|| ret
->type
== MONO_TYPE_R8
) {
1624 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
1631 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
1635 mono_arch_is_inst_imm (gint64 imm
)
1640 #define DYN_CALL_STACK_ARGS 6
1643 MonoMethodSignature
*sig
;
1648 mgreg_t regs
[PARAM_REGS
+ DYN_CALL_STACK_ARGS
];
1654 dyn_call_supported (CallInfo
*cinfo
, MonoMethodSignature
*sig
)
1658 if (sig
->hasthis
+ sig
->param_count
> PARAM_REGS
+ DYN_CALL_STACK_ARGS
)
1661 switch (cinfo
->ret
.storage
) {
1663 case RegTypeGeneral
:
1664 case RegTypeIRegPair
:
1665 case RegTypeStructByAddr
:
1670 #elif defined(ARM_FPU_VFP)
1679 for (i
= 0; i
< cinfo
->nargs
; ++i
) {
1680 switch (cinfo
->args
[i
].storage
) {
1681 case RegTypeGeneral
:
1683 case RegTypeIRegPair
:
1686 if (cinfo
->args
[i
].offset
>= (DYN_CALL_STACK_ARGS
* sizeof (gpointer
)))
1689 case RegTypeStructByVal
:
1690 if (cinfo
->args
[i
].reg
+ cinfo
->args
[i
].vtsize
>= PARAM_REGS
+ DYN_CALL_STACK_ARGS
)
1698 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
1699 for (i
= 0; i
< sig
->param_count
; ++i
) {
1700 MonoType
*t
= sig
->params
[i
];
1708 #ifdef MONO_ARCH_SOFT_FLOAT
1727 mono_arch_dyn_call_prepare (MonoMethodSignature
*sig
)
1729 ArchDynCallInfo
*info
;
1732 cinfo
= get_call_info (NULL
, sig
, FALSE
);
1734 if (!dyn_call_supported (cinfo
, sig
)) {
1739 info
= g_new0 (ArchDynCallInfo
, 1);
1740 // FIXME: Preprocess the info to speed up start_dyn_call ()
1742 info
->cinfo
= cinfo
;
1744 return (MonoDynCallInfo
*)info
;
1748 mono_arch_dyn_call_free (MonoDynCallInfo
*info
)
1750 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
1752 g_free (ainfo
->cinfo
);
1757 mono_arch_start_dyn_call (MonoDynCallInfo
*info
, gpointer
**args
, guint8
*ret
, guint8
*buf
, int buf_len
)
1759 ArchDynCallInfo
*dinfo
= (ArchDynCallInfo
*)info
;
1760 DynCallArgs
*p
= (DynCallArgs
*)buf
;
1761 int arg_index
, greg
, i
, j
;
1762 MonoMethodSignature
*sig
= dinfo
->sig
;
1764 g_assert (buf_len
>= sizeof (DynCallArgs
));
1772 if (dinfo
->cinfo
->vtype_retaddr
)
1773 p
->regs
[greg
++] = (mgreg_t
)ret
;
1776 p
->regs
[greg
++] = (mgreg_t
)*(args
[arg_index
++]);
1778 for (i
= 0; i
< sig
->param_count
; i
++) {
1779 MonoType
*t
= mono_type_get_underlying_type (sig
->params
[i
]);
1780 gpointer
*arg
= args
[arg_index
++];
1781 ArgInfo
*ainfo
= &dinfo
->cinfo
->args
[i
+ sig
->hasthis
];
1784 if (ainfo
->storage
== RegTypeGeneral
|| ainfo
->storage
== RegTypeIRegPair
|| ainfo
->storage
== RegTypeStructByVal
)
1786 else if (ainfo
->storage
== RegTypeBase
)
1787 slot
= PARAM_REGS
+ (ainfo
->offset
/ 4);
1789 g_assert_not_reached ();
1792 p
->regs
[slot
] = (mgreg_t
)*arg
;
1797 case MONO_TYPE_STRING
:
1798 case MONO_TYPE_CLASS
:
1799 case MONO_TYPE_ARRAY
:
1800 case MONO_TYPE_SZARRAY
:
1801 case MONO_TYPE_OBJECT
:
1805 p
->regs
[slot
] = (mgreg_t
)*arg
;
1807 case MONO_TYPE_BOOLEAN
:
1809 p
->regs
[slot
] = *(guint8
*)arg
;
1812 p
->regs
[slot
] = *(gint8
*)arg
;
1815 p
->regs
[slot
] = *(gint16
*)arg
;
1818 case MONO_TYPE_CHAR
:
1819 p
->regs
[slot
] = *(guint16
*)arg
;
1822 p
->regs
[slot
] = *(gint32
*)arg
;
1825 p
->regs
[slot
] = *(guint32
*)arg
;
1829 p
->regs
[slot
++] = (mgreg_t
)arg
[0];
1830 p
->regs
[slot
] = (mgreg_t
)arg
[1];
1833 p
->regs
[slot
] = *(mgreg_t
*)arg
;
1836 p
->regs
[slot
++] = (mgreg_t
)arg
[0];
1837 p
->regs
[slot
] = (mgreg_t
)arg
[1];
1839 case MONO_TYPE_GENERICINST
:
1840 if (MONO_TYPE_IS_REFERENCE (t
)) {
1841 p
->regs
[slot
] = (mgreg_t
)*arg
;
1846 case MONO_TYPE_VALUETYPE
:
1847 g_assert (ainfo
->storage
== RegTypeStructByVal
);
1849 if (ainfo
->size
== 0)
1850 slot
= PARAM_REGS
+ (ainfo
->offset
/ 4);
1854 for (j
= 0; j
< ainfo
->size
+ ainfo
->vtsize
; ++j
)
1855 p
->regs
[slot
++] = ((mgreg_t
*)arg
) [j
];
1858 g_assert_not_reached ();
1864 mono_arch_finish_dyn_call (MonoDynCallInfo
*info
, guint8
*buf
)
1866 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
1867 MonoMethodSignature
*sig
= ((ArchDynCallInfo
*)info
)->sig
;
1868 guint8
*ret
= ((DynCallArgs
*)buf
)->ret
;
1869 mgreg_t res
= ((DynCallArgs
*)buf
)->res
;
1870 mgreg_t res2
= ((DynCallArgs
*)buf
)->res2
;
1872 switch (mono_type_get_underlying_type (sig
->ret
)->type
) {
1873 case MONO_TYPE_VOID
:
1874 *(gpointer
*)ret
= NULL
;
1876 case MONO_TYPE_STRING
:
1877 case MONO_TYPE_CLASS
:
1878 case MONO_TYPE_ARRAY
:
1879 case MONO_TYPE_SZARRAY
:
1880 case MONO_TYPE_OBJECT
:
1884 *(gpointer
*)ret
= (gpointer
)res
;
1890 case MONO_TYPE_BOOLEAN
:
1891 *(guint8
*)ret
= res
;
1894 *(gint16
*)ret
= res
;
1897 case MONO_TYPE_CHAR
:
1898 *(guint16
*)ret
= res
;
1901 *(gint32
*)ret
= res
;
1904 *(guint32
*)ret
= res
;
1908 /* This handles endianness as well */
1909 ((gint32
*)ret
) [0] = res
;
1910 ((gint32
*)ret
) [1] = res2
;
1912 case MONO_TYPE_GENERICINST
:
1913 if (MONO_TYPE_IS_REFERENCE (sig
->ret
)) {
1914 *(gpointer
*)ret
= (gpointer
)res
;
1919 case MONO_TYPE_VALUETYPE
:
1920 g_assert (ainfo
->cinfo
->vtype_retaddr
);
1923 #if defined(ARM_FPU_VFP)
1925 *(float*)ret
= *(float*)&res
;
1927 case MONO_TYPE_R8
: {
1933 *(double*)ret
= *(double*)®s
;
1938 g_assert_not_reached ();
1943 * Allow tracing to work with this interface (with an optional argument)
1947 mono_arch_instrument_prolog (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
)
1951 code
= mono_arm_emit_load_imm (code
, ARMREG_R0
, (guint32
)cfg
->method
);
1952 ARM_MOV_REG_IMM8 (code
, ARMREG_R1
, 0); /* NULL ebp for now */
1953 code
= mono_arm_emit_load_imm (code
, ARMREG_R2
, (guint32
)func
);
1954 code
= emit_call_reg (code
, ARMREG_R2
);
1967 mono_arch_instrument_epilog_full (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
, gboolean preserve_argument_registers
)
1970 int save_mode
= SAVE_NONE
;
1972 MonoMethod
*method
= cfg
->method
;
1973 int rtype
= mini_type_get_underlying_type (cfg
->generic_sharing_context
, mono_method_signature (method
)->ret
)->type
;
1974 int save_offset
= cfg
->param_area
;
1978 offset
= code
- cfg
->native_code
;
1979 /* we need about 16 instructions */
1980 if (offset
> (cfg
->code_size
- 16 * 4)) {
1981 cfg
->code_size
*= 2;
1982 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
1983 code
= cfg
->native_code
+ offset
;
1986 case MONO_TYPE_VOID
:
1987 /* special case string .ctor icall */
1988 if (strcmp (".ctor", method
->name
) && method
->klass
== mono_defaults
.string_class
)
1989 save_mode
= SAVE_ONE
;
1991 save_mode
= SAVE_NONE
;
1995 save_mode
= SAVE_TWO
;
1999 save_mode
= SAVE_FP
;
2001 case MONO_TYPE_VALUETYPE
:
2002 save_mode
= SAVE_STRUCT
;
2005 save_mode
= SAVE_ONE
;
2009 switch (save_mode
) {
2011 ARM_STR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
2012 ARM_STR_IMM (code
, ARMREG_R1
, cfg
->frame_reg
, save_offset
+ 4);
2013 if (enable_arguments
) {
2014 ARM_MOV_REG_REG (code
, ARMREG_R2
, ARMREG_R1
);
2015 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_R0
);
2019 ARM_STR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
2020 if (enable_arguments
) {
2021 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_R0
);
2025 /* FIXME: what reg? */
2026 if (enable_arguments
) {
2027 /* FIXME: what reg? */
2031 if (enable_arguments
) {
2032 /* FIXME: get the actual address */
2033 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_R0
);
2041 code
= mono_arm_emit_load_imm (code
, ARMREG_R0
, (guint32
)cfg
->method
);
2042 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, (guint32
)func
);
2043 code
= emit_call_reg (code
, ARMREG_IP
);
2045 switch (save_mode
) {
2047 ARM_LDR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
2048 ARM_LDR_IMM (code
, ARMREG_R1
, cfg
->frame_reg
, save_offset
+ 4);
2051 ARM_LDR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
2065 * The immediate field for cond branches is big enough for all reasonable methods
2067 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
2068 if (0 && ins->inst_true_bb->native_offset) { \
2069 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
2071 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
2072 ARM_B_COND (code, (condcode), 0); \
2075 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
2077 /* emit an exception if condition is fail
2079 * We assign the extra code used to throw the implicit exceptions
2080 * to cfg->bb_exit as far as the big branch handling is concerned
2082 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
2084 mono_add_patch_info (cfg, code - cfg->native_code, \
2085 MONO_PATCH_INFO_EXC, exc_name); \
2086 ARM_BL_COND (code, (condcode), 0); \
2089 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
2092 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2097 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2099 MonoInst
*ins
, *n
, *last_ins
= NULL
;
2101 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
2102 switch (ins
->opcode
) {
2105 /* Already done by an arch-independent pass */
2107 case OP_LOAD_MEMBASE
:
2108 case OP_LOADI4_MEMBASE
:
2110 * OP_STORE_MEMBASE_REG reg, offset(basereg)
2111 * OP_LOAD_MEMBASE offset(basereg), reg
2113 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_REG
2114 || last_ins
->opcode
== OP_STORE_MEMBASE_REG
) &&
2115 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2116 ins
->inst_offset
== last_ins
->inst_offset
) {
2117 if (ins
->dreg
== last_ins
->sreg1
) {
2118 MONO_DELETE_INS (bb
, ins
);
2121 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2122 ins
->opcode
= OP_MOVE
;
2123 ins
->sreg1
= last_ins
->sreg1
;
2127 * Note: reg1 must be different from the basereg in the second load
2128 * OP_LOAD_MEMBASE offset(basereg), reg1
2129 * OP_LOAD_MEMBASE offset(basereg), reg2
2131 * OP_LOAD_MEMBASE offset(basereg), reg1
2132 * OP_MOVE reg1, reg2
2134 } if (last_ins
&& (last_ins
->opcode
== OP_LOADI4_MEMBASE
2135 || last_ins
->opcode
== OP_LOAD_MEMBASE
) &&
2136 ins
->inst_basereg
!= last_ins
->dreg
&&
2137 ins
->inst_basereg
== last_ins
->inst_basereg
&&
2138 ins
->inst_offset
== last_ins
->inst_offset
) {
2140 if (ins
->dreg
== last_ins
->dreg
) {
2141 MONO_DELETE_INS (bb
, ins
);
2144 ins
->opcode
= OP_MOVE
;
2145 ins
->sreg1
= last_ins
->dreg
;
2148 //g_assert_not_reached ();
2152 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2153 * OP_LOAD_MEMBASE offset(basereg), reg
2155 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2156 * OP_ICONST reg, imm
2158 } else if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_IMM
2159 || last_ins
->opcode
== OP_STORE_MEMBASE_IMM
) &&
2160 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2161 ins
->inst_offset
== last_ins
->inst_offset
) {
2162 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2163 ins
->opcode
= OP_ICONST
;
2164 ins
->inst_c0
= last_ins
->inst_imm
;
2165 g_assert_not_reached (); // check this rule
2169 case OP_LOADU1_MEMBASE
:
2170 case OP_LOADI1_MEMBASE
:
2171 if (last_ins
&& (last_ins
->opcode
== OP_STOREI1_MEMBASE_REG
) &&
2172 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2173 ins
->inst_offset
== last_ins
->inst_offset
) {
2174 ins
->opcode
= (ins
->opcode
== OP_LOADI1_MEMBASE
) ? OP_ICONV_TO_I1
: OP_ICONV_TO_U1
;
2175 ins
->sreg1
= last_ins
->sreg1
;
2178 case OP_LOADU2_MEMBASE
:
2179 case OP_LOADI2_MEMBASE
:
2180 if (last_ins
&& (last_ins
->opcode
== OP_STOREI2_MEMBASE_REG
) &&
2181 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2182 ins
->inst_offset
== last_ins
->inst_offset
) {
2183 ins
->opcode
= (ins
->opcode
== OP_LOADI2_MEMBASE
) ? OP_ICONV_TO_I2
: OP_ICONV_TO_U2
;
2184 ins
->sreg1
= last_ins
->sreg1
;
2188 ins
->opcode
= OP_MOVE
;
2192 if (ins
->dreg
== ins
->sreg1
) {
2193 MONO_DELETE_INS (bb
, ins
);
2197 * OP_MOVE sreg, dreg
2198 * OP_MOVE dreg, sreg
2200 if (last_ins
&& last_ins
->opcode
== OP_MOVE
&&
2201 ins
->sreg1
== last_ins
->dreg
&&
2202 ins
->dreg
== last_ins
->sreg1
) {
2203 MONO_DELETE_INS (bb
, ins
);
2211 bb
->last_ins
= last_ins
;
2215 * the branch_cc_table should maintain the order of these
2229 branch_cc_table
[] = {
2243 #define NEW_INS(cfg,dest,op) do { \
2244 MONO_INST_NEW ((cfg), (dest), (op)); \
2245 mono_bblock_insert_before_ins (bb, ins, (dest)); \
2249 map_to_reg_reg_op (int op
)
2258 case OP_COMPARE_IMM
:
2260 case OP_ICOMPARE_IMM
:
2274 case OP_LOAD_MEMBASE
:
2275 return OP_LOAD_MEMINDEX
;
2276 case OP_LOADI4_MEMBASE
:
2277 return OP_LOADI4_MEMINDEX
;
2278 case OP_LOADU4_MEMBASE
:
2279 return OP_LOADU4_MEMINDEX
;
2280 case OP_LOADU1_MEMBASE
:
2281 return OP_LOADU1_MEMINDEX
;
2282 case OP_LOADI2_MEMBASE
:
2283 return OP_LOADI2_MEMINDEX
;
2284 case OP_LOADU2_MEMBASE
:
2285 return OP_LOADU2_MEMINDEX
;
2286 case OP_LOADI1_MEMBASE
:
2287 return OP_LOADI1_MEMINDEX
;
2288 case OP_STOREI1_MEMBASE_REG
:
2289 return OP_STOREI1_MEMINDEX
;
2290 case OP_STOREI2_MEMBASE_REG
:
2291 return OP_STOREI2_MEMINDEX
;
2292 case OP_STOREI4_MEMBASE_REG
:
2293 return OP_STOREI4_MEMINDEX
;
2294 case OP_STORE_MEMBASE_REG
:
2295 return OP_STORE_MEMINDEX
;
2296 case OP_STORER4_MEMBASE_REG
:
2297 return OP_STORER4_MEMINDEX
;
2298 case OP_STORER8_MEMBASE_REG
:
2299 return OP_STORER8_MEMINDEX
;
2300 case OP_STORE_MEMBASE_IMM
:
2301 return OP_STORE_MEMBASE_REG
;
2302 case OP_STOREI1_MEMBASE_IMM
:
2303 return OP_STOREI1_MEMBASE_REG
;
2304 case OP_STOREI2_MEMBASE_IMM
:
2305 return OP_STOREI2_MEMBASE_REG
;
2306 case OP_STOREI4_MEMBASE_IMM
:
2307 return OP_STOREI4_MEMBASE_REG
;
2309 g_assert_not_reached ();
2313 * Remove from the instruction list the instructions that can't be
2314 * represented with very simple instructions with no register
2318 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2320 MonoInst
*ins
, *temp
, *last_ins
= NULL
;
2321 int rot_amount
, imm8
, low_imm
;
2323 MONO_BB_FOR_EACH_INS (bb
, ins
) {
2325 switch (ins
->opcode
) {
2329 case OP_COMPARE_IMM
:
2330 case OP_ICOMPARE_IMM
:
2344 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
)) < 0) {
2345 NEW_INS (cfg
, temp
, OP_ICONST
);
2346 temp
->inst_c0
= ins
->inst_imm
;
2347 temp
->dreg
= mono_alloc_ireg (cfg
);
2348 ins
->sreg2
= temp
->dreg
;
2349 ins
->opcode
= mono_op_imm_to_op (ins
->opcode
);
2351 if (ins
->opcode
== OP_SBB
|| ins
->opcode
== OP_ISBB
|| ins
->opcode
== OP_SUBCC
)
2357 if (ins
->inst_imm
== 1) {
2358 ins
->opcode
= OP_MOVE
;
2361 if (ins
->inst_imm
== 0) {
2362 ins
->opcode
= OP_ICONST
;
2366 imm8
= mono_is_power_of_two (ins
->inst_imm
);
2368 ins
->opcode
= OP_SHL_IMM
;
2369 ins
->inst_imm
= imm8
;
2372 NEW_INS (cfg
, temp
, OP_ICONST
);
2373 temp
->inst_c0
= ins
->inst_imm
;
2374 temp
->dreg
= mono_alloc_ireg (cfg
);
2375 ins
->sreg2
= temp
->dreg
;
2376 ins
->opcode
= OP_IMUL
;
2382 if (ins
->next
&& (ins
->next
->opcode
== OP_COND_EXC_C
|| ins
->next
->opcode
== OP_COND_EXC_IC
))
2383 /* ARM sets the C flag to 1 if there was _no_ overflow */
2384 ins
->next
->opcode
= OP_COND_EXC_NC
;
2386 case OP_LOCALLOC_IMM
:
2387 NEW_INS (cfg
, temp
, OP_ICONST
);
2388 temp
->inst_c0
= ins
->inst_imm
;
2389 temp
->dreg
= mono_alloc_ireg (cfg
);
2390 ins
->sreg1
= temp
->dreg
;
2391 ins
->opcode
= OP_LOCALLOC
;
2393 case OP_LOAD_MEMBASE
:
2394 case OP_LOADI4_MEMBASE
:
2395 case OP_LOADU4_MEMBASE
:
2396 case OP_LOADU1_MEMBASE
:
2397 /* we can do two things: load the immed in a register
2398 * and use an indexed load, or see if the immed can be
2399 * represented as an ad_imm + a load with a smaller offset
2400 * that fits. We just do the first for now, optimize later.
2402 if (arm_is_imm12 (ins
->inst_offset
))
2404 NEW_INS (cfg
, temp
, OP_ICONST
);
2405 temp
->inst_c0
= ins
->inst_offset
;
2406 temp
->dreg
= mono_alloc_ireg (cfg
);
2407 ins
->sreg2
= temp
->dreg
;
2408 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2410 case OP_LOADI2_MEMBASE
:
2411 case OP_LOADU2_MEMBASE
:
2412 case OP_LOADI1_MEMBASE
:
2413 if (arm_is_imm8 (ins
->inst_offset
))
2415 NEW_INS (cfg
, temp
, OP_ICONST
);
2416 temp
->inst_c0
= ins
->inst_offset
;
2417 temp
->dreg
= mono_alloc_ireg (cfg
);
2418 ins
->sreg2
= temp
->dreg
;
2419 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2421 case OP_LOADR4_MEMBASE
:
2422 case OP_LOADR8_MEMBASE
:
2423 if (arm_is_fpimm8 (ins
->inst_offset
))
2425 low_imm
= ins
->inst_offset
& 0x1ff;
2426 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_offset
& ~0x1ff, &rot_amount
)) >= 0) {
2427 NEW_INS (cfg
, temp
, OP_ADD_IMM
);
2428 temp
->inst_imm
= ins
->inst_offset
& ~0x1ff;
2429 temp
->sreg1
= ins
->inst_basereg
;
2430 temp
->dreg
= mono_alloc_ireg (cfg
);
2431 ins
->inst_basereg
= temp
->dreg
;
2432 ins
->inst_offset
= low_imm
;
2435 /* VFP/FPA doesn't have indexed load instructions */
2436 g_assert_not_reached ();
2438 case OP_STORE_MEMBASE_REG
:
2439 case OP_STOREI4_MEMBASE_REG
:
2440 case OP_STOREI1_MEMBASE_REG
:
2441 if (arm_is_imm12 (ins
->inst_offset
))
2443 NEW_INS (cfg
, temp
, OP_ICONST
);
2444 temp
->inst_c0
= ins
->inst_offset
;
2445 temp
->dreg
= mono_alloc_ireg (cfg
);
2446 ins
->sreg2
= temp
->dreg
;
2447 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2449 case OP_STOREI2_MEMBASE_REG
:
2450 if (arm_is_imm8 (ins
->inst_offset
))
2452 NEW_INS (cfg
, temp
, OP_ICONST
);
2453 temp
->inst_c0
= ins
->inst_offset
;
2454 temp
->dreg
= mono_alloc_ireg (cfg
);
2455 ins
->sreg2
= temp
->dreg
;
2456 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2458 case OP_STORER4_MEMBASE_REG
:
2459 case OP_STORER8_MEMBASE_REG
:
2460 if (arm_is_fpimm8 (ins
->inst_offset
))
2462 low_imm
= ins
->inst_offset
& 0x1ff;
2463 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_offset
& ~ 0x1ff, &rot_amount
)) >= 0 && arm_is_fpimm8 (low_imm
)) {
2464 NEW_INS (cfg
, temp
, OP_ADD_IMM
);
2465 temp
->inst_imm
= ins
->inst_offset
& ~0x1ff;
2466 temp
->sreg1
= ins
->inst_destbasereg
;
2467 temp
->dreg
= mono_alloc_ireg (cfg
);
2468 ins
->inst_destbasereg
= temp
->dreg
;
2469 ins
->inst_offset
= low_imm
;
2472 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
2473 /* VFP/FPA doesn't have indexed store instructions */
2474 g_assert_not_reached ();
2476 case OP_STORE_MEMBASE_IMM
:
2477 case OP_STOREI1_MEMBASE_IMM
:
2478 case OP_STOREI2_MEMBASE_IMM
:
2479 case OP_STOREI4_MEMBASE_IMM
:
2480 NEW_INS (cfg
, temp
, OP_ICONST
);
2481 temp
->inst_c0
= ins
->inst_imm
;
2482 temp
->dreg
= mono_alloc_ireg (cfg
);
2483 ins
->sreg1
= temp
->dreg
;
2484 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2486 goto loop_start
; /* make it handle the possibly big ins->inst_offset */
2488 gboolean swap
= FALSE
;
2492 /* Optimized away */
2497 /* Some fp compares require swapped operands */
2498 switch (ins
->next
->opcode
) {
2500 ins
->next
->opcode
= OP_FBLT
;
2504 ins
->next
->opcode
= OP_FBLT_UN
;
2508 ins
->next
->opcode
= OP_FBGE
;
2512 ins
->next
->opcode
= OP_FBGE_UN
;
2520 ins
->sreg1
= ins
->sreg2
;
2529 bb
->last_ins
= last_ins
;
2530 bb
->max_vreg
= cfg
->next_vreg
;
2534 mono_arch_decompose_long_opts (MonoCompile
*cfg
, MonoInst
*long_ins
)
2538 if (long_ins
->opcode
== OP_LNEG
) {
2540 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ARM_RSBS_IMM
, ins
->dreg
+ 1, ins
->sreg1
+ 1, 0);
2541 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ARM_RSC_IMM
, ins
->dreg
+ 2, ins
->sreg1
+ 2, 0);
2547 emit_float_to_int (MonoCompile
*cfg
, guchar
*code
, int dreg
, int sreg
, int size
, gboolean is_signed
)
2549 /* sreg is a float, dreg is an integer reg */
2551 ARM_FIXZ (code
, dreg
, sreg
);
2552 #elif defined(ARM_FPU_VFP)
2554 ARM_TOSIZD (code
, ARM_VFP_F0
, sreg
);
2556 ARM_TOUIZD (code
, ARM_VFP_F0
, sreg
);
2557 ARM_FMRS (code
, dreg
, ARM_VFP_F0
);
2561 ARM_AND_REG_IMM8 (code
, dreg
, dreg
, 0xff);
2562 else if (size
== 2) {
2563 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
2564 ARM_SHR_IMM (code
, dreg
, dreg
, 16);
2568 ARM_SHL_IMM (code
, dreg
, dreg
, 24);
2569 ARM_SAR_IMM (code
, dreg
, dreg
, 24);
2570 } else if (size
== 2) {
2571 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
2572 ARM_SAR_IMM (code
, dreg
, dreg
, 16);
2580 const guchar
*target
;
2585 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
2588 search_thunk_slot (void *data
, int csize
, int bsize
, void *user_data
) {
2589 PatchData
*pdata
= (PatchData
*)user_data
;
2590 guchar
*code
= data
;
2591 guint32
*thunks
= data
;
2592 guint32
*endthunks
= (guint32
*)(code
+ bsize
);
2594 int difflow
, diffhigh
;
2596 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2597 difflow
= (char*)pdata
->code
- (char*)thunks
;
2598 diffhigh
= (char*)pdata
->code
- (char*)endthunks
;
2599 if (!((is_call_imm (thunks
) && is_call_imm (endthunks
)) || (is_call_imm (difflow
) && is_call_imm (diffhigh
))))
2603 * The thunk is composed of 3 words:
2604 * load constant from thunks [2] into ARM_IP
2607 * Note that the LR register is already setup
2609 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2610 if ((pdata
->found
== 2) || (pdata
->code
>= code
&& pdata
->code
<= code
+ csize
)) {
2611 while (thunks
< endthunks
) {
2612 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2613 if (thunks
[2] == (guint32
)pdata
->target
) {
2614 arm_patch (pdata
->code
, (guchar
*)thunks
);
2615 mono_arch_flush_icache (pdata
->code
, 4);
2618 } else if ((thunks
[0] == 0) && (thunks
[1] == 0) && (thunks
[2] == 0)) {
2619 /* found a free slot instead: emit thunk */
2620 /* ARMREG_IP is fine to use since this can't be an IMT call
2623 code
= (guchar
*)thunks
;
2624 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
2625 if (thumb_supported
)
2626 ARM_BX (code
, ARMREG_IP
);
2628 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
2629 thunks
[2] = (guint32
)pdata
->target
;
2630 mono_arch_flush_icache ((guchar
*)thunks
, 12);
2632 arm_patch (pdata
->code
, (guchar
*)thunks
);
2633 mono_arch_flush_icache (pdata
->code
, 4);
2637 /* skip 12 bytes, the size of the thunk */
2641 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2647 handle_thunk (MonoDomain
*domain
, int absolute
, guchar
*code
, const guchar
*target
)
2652 domain
= mono_domain_get ();
2655 pdata
.target
= target
;
2656 pdata
.absolute
= absolute
;
2659 mono_domain_lock (domain
);
2660 mono_domain_code_foreach (domain
, search_thunk_slot
, &pdata
);
2663 /* this uses the first available slot */
2665 mono_domain_code_foreach (domain
, search_thunk_slot
, &pdata
);
2667 mono_domain_unlock (domain
);
2669 if (pdata
.found
!= 1)
2670 g_print ("thunk failed for %p from %p\n", target
, code
);
2671 g_assert (pdata
.found
== 1);
2675 arm_patch_general (MonoDomain
*domain
, guchar
*code
, const guchar
*target
)
2677 guint32
*code32
= (void*)code
;
2678 guint32 ins
= *code32
;
2679 guint32 prim
= (ins
>> 25) & 7;
2680 guint32 tval
= GPOINTER_TO_UINT (target
);
2682 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2683 if (prim
== 5) { /* 101b */
2684 /* the diff starts 8 bytes from the branch opcode */
2685 gint diff
= target
- code
- 8;
2687 gint tmask
= 0xffffffff;
2688 if (tval
& 1) { /* entering thumb mode */
2689 diff
= target
- 1 - code
- 8;
2690 g_assert (thumb_supported
);
2691 tbits
= 0xf << 28; /* bl->blx bit pattern */
2692 g_assert ((ins
& (1 << 24))); /* it must be a bl, not b instruction */
2693 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
2697 tmask
= ~(1 << 24); /* clear the link bit */
2698 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
2703 if (diff
<= 33554431) {
2705 ins
= (ins
& 0xff000000) | diff
;
2707 *code32
= ins
| tbits
;
2711 /* diff between 0 and -33554432 */
2712 if (diff
>= -33554432) {
2714 ins
= (ins
& 0xff000000) | (diff
& ~0xff000000);
2716 *code32
= ins
| tbits
;
2721 handle_thunk (domain
, TRUE
, code
, target
);
2726 * The alternative call sequences looks like this:
2728 * ldr ip, [pc] // loads the address constant
2729 * b 1f // jumps around the constant
2730 * address constant embedded in the code
2735 * There are two cases for patching:
2736 * a) at the end of method emission: in this case code points to the start
2737 * of the call sequence
2738 * b) during runtime patching of the call site: in this case code points
2739 * to the mov pc, ip instruction
2741 * We have to handle also the thunk jump code sequence:
2745 * address constant // execution never reaches here
2747 if ((ins
& 0x0ffffff0) == 0x12fff10) {
2748 /* Branch and exchange: the address is constructed in a reg
2749 * We can patch BX when the code sequence is the following:
2750 * ldr ip, [pc, #0] ; 0x8
2757 guint8
*emit
= (guint8
*)ccode
;
2758 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
2760 ARM_MOV_REG_REG (emit
, ARMREG_LR
, ARMREG_PC
);
2761 ARM_BX (emit
, ARMREG_IP
);
2763 /*patching from magic trampoline*/
2764 if (ins
== ccode
[3]) {
2765 g_assert (code32
[-4] == ccode
[0]);
2766 g_assert (code32
[-3] == ccode
[1]);
2767 g_assert (code32
[-1] == ccode
[2]);
2768 code32
[-2] = (guint32
)target
;
2771 /*patching from JIT*/
2772 if (ins
== ccode
[0]) {
2773 g_assert (code32
[1] == ccode
[1]);
2774 g_assert (code32
[3] == ccode
[2]);
2775 g_assert (code32
[4] == ccode
[3]);
2776 code32
[2] = (guint32
)target
;
2779 g_assert_not_reached ();
2780 } else if ((ins
& 0x0ffffff0) == 0x12fff30) {
2788 guint8
*emit
= (guint8
*)ccode
;
2789 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
2791 ARM_BLX_REG (emit
, ARMREG_IP
);
2793 g_assert (code32
[-3] == ccode
[0]);
2794 g_assert (code32
[-2] == ccode
[1]);
2795 g_assert (code32
[0] == ccode
[2]);
2797 code32
[-1] = (guint32
)target
;
2800 guint32
*tmp
= ccode
;
2801 guint8
*emit
= (guint8
*)tmp
;
2802 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
2803 ARM_MOV_REG_REG (emit
, ARMREG_LR
, ARMREG_PC
);
2804 ARM_MOV_REG_REG (emit
, ARMREG_PC
, ARMREG_IP
);
2805 ARM_BX (emit
, ARMREG_IP
);
2806 if (ins
== ccode
[2]) {
2807 g_assert_not_reached (); // should be -2 ...
2808 code32
[-1] = (guint32
)target
;
2811 if (ins
== ccode
[0]) {
2812 /* handles both thunk jump code and the far call sequence */
2813 code32
[2] = (guint32
)target
;
2816 g_assert_not_reached ();
2818 // g_print ("patched with 0x%08x\n", ins);
2822 arm_patch (guchar
*code
, const guchar
*target
)
2824 arm_patch_general (NULL
, code
, target
);
2828 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
2829 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
2830 * to be used with the emit macros.
2831 * Return -1 otherwise.
2834 mono_arm_is_rotated_imm8 (guint32 val
, gint
*rot_amount
)
2837 for (i
= 0; i
< 31; i
+= 2) {
2838 res
= (val
<< (32 - i
)) | (val
>> i
);
2841 *rot_amount
= i
? 32 - i
: 0;
2848 * Emits in code a sequence of instructions that load the value 'val'
2849 * into the dreg register. Uses at most 4 instructions.
2852 mono_arm_emit_load_imm (guint8
*code
, int dreg
, guint32 val
)
2854 int imm8
, rot_amount
;
2856 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
2857 /* skip the constant pool */
2863 if ((imm8
= mono_arm_is_rotated_imm8 (val
, &rot_amount
)) >= 0) {
2864 ARM_MOV_REG_IMM (code
, dreg
, imm8
, rot_amount
);
2865 } else if ((imm8
= mono_arm_is_rotated_imm8 (~val
, &rot_amount
)) >= 0) {
2866 ARM_MVN_REG_IMM (code
, dreg
, imm8
, rot_amount
);
2869 ARM_MOVW_REG_IMM (code
, dreg
, val
& 0xffff);
2871 ARM_MOVT_REG_IMM (code
, dreg
, (val
>> 16) & 0xffff);
2875 ARM_MOV_REG_IMM8 (code
, dreg
, (val
& 0xFF));
2877 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF00) >> 8, 24);
2879 if (val
& 0xFF0000) {
2880 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
2882 if (val
& 0xFF000000) {
2883 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
2885 } else if (val
& 0xFF00) {
2886 ARM_MOV_REG_IMM (code
, dreg
, (val
& 0xFF00) >> 8, 24);
2887 if (val
& 0xFF0000) {
2888 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
2890 if (val
& 0xFF000000) {
2891 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
2893 } else if (val
& 0xFF0000) {
2894 ARM_MOV_REG_IMM (code
, dreg
, (val
& 0xFF0000) >> 16, 16);
2895 if (val
& 0xFF000000) {
2896 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
2899 //g_assert_not_reached ();
2905 mono_arm_thumb_supported (void)
2907 return thumb_supported
;
2911 * emit_load_volatile_arguments:
2913 * Load volatile arguments from the stack to the original input registers.
2914 * Required before a tail call.
2917 emit_load_volatile_arguments (MonoCompile
*cfg
, guint8
*code
)
2919 MonoMethod
*method
= cfg
->method
;
2920 MonoMethodSignature
*sig
;
2925 /* FIXME: Generate intermediate code instead */
2927 sig
= mono_method_signature (method
);
2929 /* This is the opposite of the code in emit_prolog */
2933 cinfo
= get_call_info (NULL
, sig
, sig
->pinvoke
);
2935 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
2936 ArgInfo
*ainfo
= &cinfo
->ret
;
2937 inst
= cfg
->vret_addr
;
2938 g_assert (arm_is_imm12 (inst
->inst_offset
));
2939 ARM_LDR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
2941 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
2942 ArgInfo
*ainfo
= cinfo
->args
+ i
;
2943 inst
= cfg
->args
[pos
];
2945 if (cfg
->verbose_level
> 2)
2946 g_print ("Loading argument %d (type: %d)\n", i
, ainfo
->storage
);
2947 if (inst
->opcode
== OP_REGVAR
) {
2948 if (ainfo
->storage
== RegTypeGeneral
)
2949 ARM_MOV_REG_REG (code
, inst
->dreg
, ainfo
->reg
);
2950 else if (ainfo
->storage
== RegTypeFP
) {
2951 g_assert_not_reached ();
2952 } else if (ainfo
->storage
== RegTypeBase
) {
2956 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
2957 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
2959 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2960 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
2964 g_assert_not_reached ();
2966 if (ainfo
->storage
== RegTypeGeneral
|| ainfo
->storage
== RegTypeIRegPair
) {
2967 switch (ainfo
->size
) {
2974 g_assert (arm_is_imm12 (inst
->inst_offset
));
2975 ARM_LDR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
2976 g_assert (arm_is_imm12 (inst
->inst_offset
+ 4));
2977 ARM_LDR_IMM (code
, ainfo
->reg
+ 1, inst
->inst_basereg
, inst
->inst_offset
+ 4);
2980 if (arm_is_imm12 (inst
->inst_offset
)) {
2981 ARM_LDR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
2983 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
2984 ARM_LDR_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
2988 } else if (ainfo
->storage
== RegTypeBaseGen
) {
2991 } else if (ainfo
->storage
== RegTypeBase
) {
2993 } else if (ainfo
->storage
== RegTypeFP
) {
2994 g_assert_not_reached ();
2995 } else if (ainfo
->storage
== RegTypeStructByVal
) {
2996 int doffset
= inst
->inst_offset
;
3000 if (mono_class_from_mono_type (inst
->inst_vtype
))
3001 size
= mono_class_native_size (mono_class_from_mono_type (inst
->inst_vtype
), NULL
);
3002 for (cur_reg
= 0; cur_reg
< ainfo
->size
; ++cur_reg
) {
3003 if (arm_is_imm12 (doffset
)) {
3004 ARM_LDR_IMM (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, doffset
);
3006 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, doffset
);
3007 ARM_LDR_REG_REG (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, ARMREG_IP
);
3009 soffset
+= sizeof (gpointer
);
3010 doffset
+= sizeof (gpointer
);
3015 } else if (ainfo
->storage
== RegTypeStructByAddr
) {
3032 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3037 guint8
*code
= cfg
->native_code
+ cfg
->code_len
;
3038 MonoInst
*last_ins
= NULL
;
3039 guint last_offset
= 0;
3041 int imm8
, rot_amount
;
3043 /* we don't align basic blocks of loops on arm */
3045 if (cfg
->verbose_level
> 2)
3046 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
3048 cpos
= bb
->max_offset
;
3050 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
) {
3051 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
3052 //g_assert (!mono_compile_aot);
3055 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
3056 /* this is not thread save, but good enough */
3057 /* fixme: howto handle overflows? */
3058 //x86_inc_mem (code, &cov->data [bb->dfn].count);
3061 if (mono_break_at_bb_method
&& mono_method_desc_full_match (mono_break_at_bb_method
, cfg
->method
) && bb
->block_num
== mono_break_at_bb_bb_num
) {
3062 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3063 (gpointer
)"mono_break");
3064 code
= emit_call_seq (cfg
, code
);
3067 MONO_BB_FOR_EACH_INS (bb
, ins
) {
3068 offset
= code
- cfg
->native_code
;
3070 max_len
= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
3072 if (offset
> (cfg
->code_size
- max_len
- 16)) {
3073 cfg
->code_size
*= 2;
3074 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
3075 code
= cfg
->native_code
+ offset
;
3077 // if (ins->cil_code)
3078 // g_print ("cil code\n");
3079 mono_debug_record_line_number (cfg
, ins
, offset
);
3081 switch (ins
->opcode
) {
3082 case OP_MEMORY_BARRIER
:
3085 #ifdef HAVE_AEABI_READ_TP
3086 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3087 (gpointer
)"__aeabi_read_tp");
3088 code
= emit_call_seq (cfg
, code
);
3090 ARM_LDR_IMM (code
, ins
->dreg
, ARMREG_R0
, ins
->inst_offset
);
3092 g_assert_not_reached ();
3096 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3097 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
3100 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3101 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
3103 case OP_STOREI1_MEMBASE_IMM
:
3104 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
& 0xFF);
3105 g_assert (arm_is_imm12 (ins
->inst_offset
));
3106 ARM_STRB_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
3108 case OP_STOREI2_MEMBASE_IMM
:
3109 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
& 0xFFFF);
3110 g_assert (arm_is_imm8 (ins
->inst_offset
));
3111 ARM_STRH_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
3113 case OP_STORE_MEMBASE_IMM
:
3114 case OP_STOREI4_MEMBASE_IMM
:
3115 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
);
3116 g_assert (arm_is_imm12 (ins
->inst_offset
));
3117 ARM_STR_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
3119 case OP_STOREI1_MEMBASE_REG
:
3120 g_assert (arm_is_imm12 (ins
->inst_offset
));
3121 ARM_STRB_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3123 case OP_STOREI2_MEMBASE_REG
:
3124 g_assert (arm_is_imm8 (ins
->inst_offset
));
3125 ARM_STRH_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3127 case OP_STORE_MEMBASE_REG
:
3128 case OP_STOREI4_MEMBASE_REG
:
3129 /* this case is special, since it happens for spill code after lowering has been called */
3130 if (arm_is_imm12 (ins
->inst_offset
)) {
3131 ARM_STR_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3133 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
3134 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
3137 case OP_STOREI1_MEMINDEX
:
3138 ARM_STRB_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
3140 case OP_STOREI2_MEMINDEX
:
3141 ARM_STRH_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
3143 case OP_STORE_MEMINDEX
:
3144 case OP_STOREI4_MEMINDEX
:
3145 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
3148 g_assert_not_reached ();
3150 case OP_LOAD_MEMINDEX
:
3151 case OP_LOADI4_MEMINDEX
:
3152 case OP_LOADU4_MEMINDEX
:
3153 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3155 case OP_LOADI1_MEMINDEX
:
3156 ARM_LDRSB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3158 case OP_LOADU1_MEMINDEX
:
3159 ARM_LDRB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3161 case OP_LOADI2_MEMINDEX
:
3162 ARM_LDRSH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3164 case OP_LOADU2_MEMINDEX
:
3165 ARM_LDRH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3167 case OP_LOAD_MEMBASE
:
3168 case OP_LOADI4_MEMBASE
:
3169 case OP_LOADU4_MEMBASE
:
3170 /* this case is special, since it happens for spill code after lowering has been called */
3171 if (arm_is_imm12 (ins
->inst_offset
)) {
3172 ARM_LDR_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3174 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
3175 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
3178 case OP_LOADI1_MEMBASE
:
3179 g_assert (arm_is_imm8 (ins
->inst_offset
));
3180 ARM_LDRSB_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3182 case OP_LOADU1_MEMBASE
:
3183 g_assert (arm_is_imm12 (ins
->inst_offset
));
3184 ARM_LDRB_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3186 case OP_LOADU2_MEMBASE
:
3187 g_assert (arm_is_imm8 (ins
->inst_offset
));
3188 ARM_LDRH_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3190 case OP_LOADI2_MEMBASE
:
3191 g_assert (arm_is_imm8 (ins
->inst_offset
));
3192 ARM_LDRSH_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3194 case OP_ICONV_TO_I1
:
3195 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 24);
3196 ARM_SAR_IMM (code
, ins
->dreg
, ins
->dreg
, 24);
3198 case OP_ICONV_TO_I2
:
3199 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 16);
3200 ARM_SAR_IMM (code
, ins
->dreg
, ins
->dreg
, 16);
3202 case OP_ICONV_TO_U1
:
3203 ARM_AND_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 0xff);
3205 case OP_ICONV_TO_U2
:
3206 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 16);
3207 ARM_SHR_IMM (code
, ins
->dreg
, ins
->dreg
, 16);
3211 ARM_CMP_REG_REG (code
, ins
->sreg1
, ins
->sreg2
);
3213 case OP_COMPARE_IMM
:
3214 case OP_ICOMPARE_IMM
:
3215 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3216 g_assert (imm8
>= 0);
3217 ARM_CMP_REG_IMM (code
, ins
->sreg1
, imm8
, rot_amount
);
3221 * gdb does not like encountering the hw breakpoint ins in the debugged code.
3222 * So instead of emitting a trap, we emit a call a C function and place a
3225 //*(int*)code = 0xef9f0001;
3228 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3229 (gpointer
)"mono_break");
3230 code
= emit_call_seq (cfg
, code
);
3232 case OP_RELAXED_NOP
:
3237 case OP_DUMMY_STORE
:
3238 case OP_NOT_REACHED
:
3241 case OP_SEQ_POINT
: {
3243 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
3244 MonoInst
*ss_trigger_page_var
= cfg
->arch
.ss_trigger_page_var
;
3246 int dreg
= ARMREG_LR
;
3249 * For AOT, we use one got slot per method, which will point to a
3250 * SeqPointInfo structure, containing all the information required
3251 * by the code below.
3253 if (cfg
->compile_aot
) {
3254 g_assert (info_var
);
3255 g_assert (info_var
->opcode
== OP_REGOFFSET
);
3256 g_assert (arm_is_imm12 (info_var
->inst_offset
));
3260 * Read from the single stepping trigger page. This will cause a
3261 * SIGSEGV when single stepping is enabled.
3262 * We do this _before_ the breakpoint, so single stepping after
3263 * a breakpoint is hit will step to the next IL offset.
3265 g_assert (((guint64
)(gsize
)ss_trigger_page
>> 32) == 0);
3267 if (ins
->flags
& MONO_INST_SINGLE_STEP_LOC
) {
3268 if (cfg
->compile_aot
) {
3269 /* Load the trigger page addr from the variable initialized in the prolog */
3270 var
= ss_trigger_page_var
;
3272 g_assert (var
->opcode
== OP_REGOFFSET
);
3273 g_assert (arm_is_imm12 (var
->inst_offset
));
3274 ARM_LDR_IMM (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
3276 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
3278 *(int*)code
= (int)ss_trigger_page
;
3281 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
3284 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
3286 if (cfg
->compile_aot
) {
3287 guint32 offset
= code
- cfg
->native_code
;
3290 ARM_LDR_IMM (code
, dreg
, info_var
->inst_basereg
, info_var
->inst_offset
);
3291 /* Add the offset */
3292 val
= ((offset
/ 4) * sizeof (guint8
*)) + G_STRUCT_OFFSET (SeqPointInfo
, bp_addrs
);
3293 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF), 0);
3295 * Have to emit nops to keep the difference between the offset
3296 * stored in seq_points and breakpoint instruction constant,
3297 * mono_arch_get_ip_for_breakpoint () depends on this.
3300 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF00) >> 8, 24);
3304 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
3307 g_assert (!(val
& 0xFF000000));
3308 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
3309 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
3311 /* What is faster, a branch or a load ? */
3312 ARM_CMP_REG_IMM (code
, dreg
, 0, 0);
3313 /* The breakpoint instruction */
3314 ARM_LDR_IMM_COND (code
, dreg
, dreg
, 0, ARMCOND_NE
);
3317 * A placeholder for a possible breakpoint inserted by
3318 * mono_arch_set_breakpoint ().
3320 for (i
= 0; i
< 4; ++i
)
3327 ARM_ADDS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3330 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3334 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3337 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3338 g_assert (imm8
>= 0);
3339 ARM_ADDS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3343 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3344 g_assert (imm8
>= 0);
3345 ARM_ADD_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3349 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3350 g_assert (imm8
>= 0);
3351 ARM_ADCS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3354 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3355 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3357 case OP_IADD_OVF_UN
:
3358 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3359 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3362 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3363 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3365 case OP_ISUB_OVF_UN
:
3366 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3367 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3369 case OP_ADD_OVF_CARRY
:
3370 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3371 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3373 case OP_ADD_OVF_UN_CARRY
:
3374 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3375 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3377 case OP_SUB_OVF_CARRY
:
3378 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3379 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3381 case OP_SUB_OVF_UN_CARRY
:
3382 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3383 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3387 ARM_SUBS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3390 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3391 g_assert (imm8
>= 0);
3392 ARM_SUBS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3395 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3399 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3403 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3404 g_assert (imm8
>= 0);
3405 ARM_SUB_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3409 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3410 g_assert (imm8
>= 0);
3411 ARM_SBCS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3413 case OP_ARM_RSBS_IMM
:
3414 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3415 g_assert (imm8
>= 0);
3416 ARM_RSBS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3418 case OP_ARM_RSC_IMM
:
3419 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3420 g_assert (imm8
>= 0);
3421 ARM_RSC_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3424 ARM_AND_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3428 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3429 g_assert (imm8
>= 0);
3430 ARM_AND_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3438 /* crappy ARM arch doesn't have a DIV instruction */
3439 g_assert_not_reached ();
3441 ARM_ORR_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3445 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3446 g_assert (imm8
>= 0);
3447 ARM_ORR_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3450 ARM_EOR_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3454 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3455 g_assert (imm8
>= 0);
3456 ARM_EOR_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3459 ARM_SHL_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3464 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
3465 else if (ins
->dreg
!= ins
->sreg1
)
3466 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
3469 ARM_SAR_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3474 ARM_SAR_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
3475 else if (ins
->dreg
!= ins
->sreg1
)
3476 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
3479 case OP_ISHR_UN_IMM
:
3481 ARM_SHR_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
3482 else if (ins
->dreg
!= ins
->sreg1
)
3483 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
3486 ARM_SHR_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3489 ARM_MVN_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
3492 ARM_RSB_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 0);
3495 if (ins
->dreg
== ins
->sreg2
)
3496 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3498 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3501 g_assert_not_reached ();
3504 /* FIXME: handle ovf/ sreg2 != dreg */
3505 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3506 /* FIXME: MUL doesn't set the C/O flags on ARM */
3508 case OP_IMUL_OVF_UN
:
3509 /* FIXME: handle ovf/ sreg2 != dreg */
3510 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3511 /* FIXME: MUL doesn't set the C/O flags on ARM */
3514 code
= mono_arm_emit_load_imm (code
, ins
->dreg
, ins
->inst_c0
);
3517 /* Load the GOT offset */
3518 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
3519 ARM_LDR_IMM (code
, ins
->dreg
, ARMREG_PC
, 0);
3521 *(gpointer
*)code
= NULL
;
3523 /* Load the value from the GOT */
3524 ARM_LDR_REG_REG (code
, ins
->dreg
, ARMREG_PC
, ins
->dreg
);
3526 case OP_ICONV_TO_I4
:
3527 case OP_ICONV_TO_U4
:
3529 if (ins
->dreg
!= ins
->sreg1
)
3530 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
3533 int saved
= ins
->sreg2
;
3534 if (ins
->sreg2
== ARM_LSW_REG
) {
3535 ARM_MOV_REG_REG (code
, ARMREG_LR
, ins
->sreg2
);
3538 if (ins
->sreg1
!= ARM_LSW_REG
)
3539 ARM_MOV_REG_REG (code
, ARM_LSW_REG
, ins
->sreg1
);
3540 if (saved
!= ARM_MSW_REG
)
3541 ARM_MOV_REG_REG (code
, ARM_MSW_REG
, saved
);
3546 ARM_MVFD (code
, ins
->dreg
, ins
->sreg1
);
3547 #elif defined(ARM_FPU_VFP)
3548 ARM_CPYD (code
, ins
->dreg
, ins
->sreg1
);
3551 case OP_FCONV_TO_R4
:
3553 ARM_MVFS (code
, ins
->dreg
, ins
->sreg1
);
3554 #elif defined(ARM_FPU_VFP)
3555 ARM_CVTD (code
, ins
->dreg
, ins
->sreg1
);
3556 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
3561 * Keep in sync with mono_arch_emit_epilog
3563 g_assert (!cfg
->method
->save_lmf
);
3565 code
= emit_load_volatile_arguments (cfg
, code
);
3567 code
= emit_big_add (code
, ARMREG_SP
, cfg
->frame_reg
, cfg
->stack_usage
);
3568 ARM_POP_NWB (code
, cfg
->used_int_regs
| ((1 << ARMREG_SP
)) | ((1 << ARMREG_LR
)));
3569 mono_add_patch_info (cfg
, (guint8
*) code
- cfg
->native_code
, MONO_PATCH_INFO_METHOD_JUMP
, ins
->inst_p0
);
3570 if (cfg
->compile_aot
) {
3571 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
3573 *(gpointer
*)code
= NULL
;
3575 ARM_LDR_REG_REG (code
, ARMREG_PC
, ARMREG_PC
, ARMREG_IP
);
3581 /* ensure ins->sreg1 is not NULL */
3582 ARM_LDR_IMM (code
, ARMREG_LR
, ins
->sreg1
, 0);
3585 g_assert (cfg
->sig_cookie
< 128);
3586 ARM_LDR_IMM (code
, ARMREG_IP
, cfg
->frame_reg
, cfg
->sig_cookie
);
3587 ARM_STR_IMM (code
, ARMREG_IP
, ins
->sreg1
, 0);
3596 call
= (MonoCallInst
*)ins
;
3597 if (ins
->flags
& MONO_INST_HAS_METHOD
)
3598 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_METHOD
, call
->method
);
3600 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_ABS
, call
->fptr
);
3601 code
= emit_call_seq (cfg
, code
);
3602 code
= emit_move_return_value (cfg
, ins
, code
);
3608 case OP_VOIDCALL_REG
:
3610 code
= emit_call_reg (code
, ins
->sreg1
);
3611 code
= emit_move_return_value (cfg
, ins
, code
);
3613 case OP_FCALL_MEMBASE
:
3614 case OP_LCALL_MEMBASE
:
3615 case OP_VCALL_MEMBASE
:
3616 case OP_VCALL2_MEMBASE
:
3617 case OP_VOIDCALL_MEMBASE
:
3618 case OP_CALL_MEMBASE
:
3619 g_assert (arm_is_imm12 (ins
->inst_offset
));
3620 g_assert (ins
->sreg1
!= ARMREG_LR
);
3621 call
= (MonoCallInst
*)ins
;
3622 if (call
->method
->klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3623 ARM_ADD_REG_IMM8 (code
, ARMREG_LR
, ARMREG_PC
, 4);
3624 ARM_LDR_IMM (code
, ARMREG_PC
, ins
->sreg1
, ins
->inst_offset
);
3626 * We can't embed the method in the code stream in PIC code, or
3628 * Instead, we put it in V5 in code emitted by
3629 * mono_arch_emit_imt_argument (), and embed NULL here to
3630 * signal the IMT thunk that the value is in V5.
3632 if (call
->dynamic_imt_arg
)
3633 *((gpointer
*)code
) = NULL
;
3635 *((gpointer
*)code
) = (gpointer
)call
->method
;
3638 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
3639 ARM_LDR_IMM (code
, ARMREG_PC
, ins
->sreg1
, ins
->inst_offset
);
3641 code
= emit_move_return_value (cfg
, ins
, code
);
3644 /* keep alignment */
3645 int alloca_waste
= cfg
->param_area
;
3648 /* round the size to 8 bytes */
3649 ARM_ADD_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 7);
3650 ARM_BIC_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, 7);
3652 ARM_ADD_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, alloca_waste
);
3653 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ins
->dreg
);
3654 /* memzero the area: dreg holds the size, sp is the pointer */
3655 if (ins
->flags
& MONO_INST_INIT
) {
3656 guint8
*start_loop
, *branch_to_cond
;
3657 ARM_MOV_REG_IMM8 (code
, ARMREG_LR
, 0);
3658 branch_to_cond
= code
;
3661 ARM_STR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ins
->dreg
);
3662 arm_patch (branch_to_cond
, code
);
3663 /* decrement by 4 and set flags */
3664 ARM_SUBS_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, 4);
3665 ARM_B_COND (code
, ARMCOND_GE
, 0);
3666 arm_patch (code
- 4, start_loop
);
3668 ARM_ADD_REG_IMM8 (code
, ins
->dreg
, ARMREG_SP
, alloca_waste
);
3673 MonoInst
*var
= cfg
->dyn_call_var
;
3675 g_assert (var
->opcode
== OP_REGOFFSET
);
3676 g_assert (arm_is_imm12 (var
->inst_offset
));
3678 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
3679 ARM_MOV_REG_REG( code
, ARMREG_LR
, ins
->sreg1
);
3681 ARM_MOV_REG_REG( code
, ARMREG_IP
, ins
->sreg2
);
3683 /* Save args buffer */
3684 ARM_STR_IMM (code
, ARMREG_LR
, var
->inst_basereg
, var
->inst_offset
);
3686 /* Set stack slots using R0 as scratch reg */
3687 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
3688 for (i
= 0; i
< DYN_CALL_STACK_ARGS
; ++i
) {
3689 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_LR
, (PARAM_REGS
+ i
) * sizeof (gpointer
));
3690 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_SP
, i
* sizeof (gpointer
));
3693 /* Set argument registers */
3694 for (i
= 0; i
< PARAM_REGS
; ++i
)
3695 ARM_LDR_IMM (code
, i
, ARMREG_LR
, i
* sizeof (gpointer
));
3698 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
3699 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
3702 ARM_LDR_IMM (code
, ARMREG_IP
, var
->inst_basereg
, var
->inst_offset
);
3703 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_IP
, G_STRUCT_OFFSET (DynCallArgs
, res
));
3704 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_IP
, G_STRUCT_OFFSET (DynCallArgs
, res2
));
3708 if (ins
->sreg1
!= ARMREG_R0
)
3709 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
3710 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3711 (gpointer
)"mono_arch_throw_exception");
3712 code
= emit_call_seq (cfg
, code
);
3716 if (ins
->sreg1
!= ARMREG_R0
)
3717 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
3718 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3719 (gpointer
)"mono_arch_rethrow_exception");
3720 code
= emit_call_seq (cfg
, code
);
3723 case OP_START_HANDLER
: {
3724 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3726 if (arm_is_imm12 (spvar
->inst_offset
)) {
3727 ARM_STR_IMM (code
, ARMREG_LR
, spvar
->inst_basereg
, spvar
->inst_offset
);
3729 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
3730 ARM_STR_REG_REG (code
, ARMREG_LR
, spvar
->inst_basereg
, ARMREG_IP
);
3734 case OP_ENDFILTER
: {
3735 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3737 if (ins
->sreg1
!= ARMREG_R0
)
3738 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
3739 if (arm_is_imm12 (spvar
->inst_offset
)) {
3740 ARM_LDR_IMM (code
, ARMREG_IP
, spvar
->inst_basereg
, spvar
->inst_offset
);
3742 g_assert (ARMREG_IP
!= spvar
->inst_basereg
);
3743 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
3744 ARM_LDR_REG_REG (code
, ARMREG_IP
, spvar
->inst_basereg
, ARMREG_IP
);
3746 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
3749 case OP_ENDFINALLY
: {
3750 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3752 if (arm_is_imm12 (spvar
->inst_offset
)) {
3753 ARM_LDR_IMM (code
, ARMREG_IP
, spvar
->inst_basereg
, spvar
->inst_offset
);
3755 g_assert (ARMREG_IP
!= spvar
->inst_basereg
);
3756 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
3757 ARM_LDR_REG_REG (code
, ARMREG_IP
, spvar
->inst_basereg
, ARMREG_IP
);
3759 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
3762 case OP_CALL_HANDLER
:
3763 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3767 ins
->inst_c0
= code
- cfg
->native_code
;
3770 /*if (ins->inst_target_bb->native_offset) {
3772 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3774 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3779 ARM_MOV_REG_REG (code
, ARMREG_PC
, ins
->sreg1
);
3783 * In the normal case we have:
3784 * ldr pc, [pc, ins->sreg1 << 2]
3787 * ldr lr, [pc, ins->sreg1 << 2]
3789 * After follows the data.
3790 * FIXME: add aot support.
3792 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_SWITCH
, ins
->inst_p0
);
3793 max_len
+= 4 * GPOINTER_TO_INT (ins
->klass
);
3794 if (offset
> (cfg
->code_size
- max_len
- 16)) {
3795 cfg
->code_size
+= max_len
;
3796 cfg
->code_size
*= 2;
3797 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
3798 code
= cfg
->native_code
+ offset
;
3800 ARM_LDR_REG_REG_SHIFT (code
, ARMREG_PC
, ARMREG_PC
, ins
->sreg1
, ARMSHIFT_LSL
, 2);
3802 code
+= 4 * GPOINTER_TO_INT (ins
->klass
);
3806 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
3807 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
3811 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
3812 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_LT
);
3816 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
3817 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_LO
);
3821 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
3822 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_GT
);
3826 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
3827 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_HI
);
3829 case OP_COND_EXC_EQ
:
3830 case OP_COND_EXC_NE_UN
:
3831 case OP_COND_EXC_LT
:
3832 case OP_COND_EXC_LT_UN
:
3833 case OP_COND_EXC_GT
:
3834 case OP_COND_EXC_GT_UN
:
3835 case OP_COND_EXC_GE
:
3836 case OP_COND_EXC_GE_UN
:
3837 case OP_COND_EXC_LE
:
3838 case OP_COND_EXC_LE_UN
:
3839 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_EQ
, ins
->inst_p1
);
3841 case OP_COND_EXC_IEQ
:
3842 case OP_COND_EXC_INE_UN
:
3843 case OP_COND_EXC_ILT
:
3844 case OP_COND_EXC_ILT_UN
:
3845 case OP_COND_EXC_IGT
:
3846 case OP_COND_EXC_IGT_UN
:
3847 case OP_COND_EXC_IGE
:
3848 case OP_COND_EXC_IGE_UN
:
3849 case OP_COND_EXC_ILE
:
3850 case OP_COND_EXC_ILE_UN
:
3851 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_IEQ
, ins
->inst_p1
);
3854 case OP_COND_EXC_IC
:
3855 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS
, ins
->inst_p1
);
3857 case OP_COND_EXC_OV
:
3858 case OP_COND_EXC_IOV
:
3859 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS
, ins
->inst_p1
);
3861 case OP_COND_EXC_NC
:
3862 case OP_COND_EXC_INC
:
3863 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC
, ins
->inst_p1
);
3865 case OP_COND_EXC_NO
:
3866 case OP_COND_EXC_INO
:
3867 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC
, ins
->inst_p1
);
3879 EMIT_COND_BRANCH (ins
, ins
->opcode
- OP_IBEQ
);
3882 /* floating point opcodes */
3885 if (cfg
->compile_aot
) {
3886 ARM_LDFD (code
, ins
->dreg
, ARMREG_PC
, 0);
3888 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
3890 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[1];
3893 /* FIXME: we can optimize the imm load by dealing with part of
3894 * the displacement in LDFD (aligning to 512).
3896 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)ins
->inst_p0
);
3897 ARM_LDFD (code
, ins
->dreg
, ARMREG_LR
, 0);
3901 if (cfg
->compile_aot
) {
3902 ARM_LDFS (code
, ins
->dreg
, ARMREG_PC
, 0);
3904 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
3907 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)ins
->inst_p0
);
3908 ARM_LDFS (code
, ins
->dreg
, ARMREG_LR
, 0);
3911 case OP_STORER8_MEMBASE_REG
:
3912 /* This is generated by the local regalloc pass which runs after the lowering pass */
3913 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
3914 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
3915 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_destbasereg
);
3916 ARM_STFD (code
, ins
->sreg1
, ARMREG_LR
, 0);
3918 ARM_STFD (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3921 case OP_LOADR8_MEMBASE
:
3922 /* This is generated by the local regalloc pass which runs after the lowering pass */
3923 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
3924 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
3925 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_basereg
);
3926 ARM_LDFD (code
, ins
->dreg
, ARMREG_LR
, 0);
3928 ARM_LDFD (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3931 case OP_STORER4_MEMBASE_REG
:
3932 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
3933 ARM_STFS (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3935 case OP_LOADR4_MEMBASE
:
3936 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
3937 ARM_LDFS (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3939 case OP_ICONV_TO_R_UN
: {
3941 tmpreg
= ins
->dreg
== 0? 1: 0;
3942 ARM_CMP_REG_IMM8 (code
, ins
->sreg1
, 0);
3943 ARM_FLTD (code
, ins
->dreg
, ins
->sreg1
);
3944 ARM_B_COND (code
, ARMCOND_GE
, 8);
3945 /* save the temp register */
3946 ARM_SUB_REG_IMM8 (code
, ARMREG_SP
, ARMREG_SP
, 8);
3947 ARM_STFD (code
, tmpreg
, ARMREG_SP
, 0);
3948 ARM_LDFD (code
, tmpreg
, ARMREG_PC
, 12);
3949 ARM_FPA_ADFD (code
, ins
->dreg
, ins
->dreg
, tmpreg
);
3950 ARM_LDFD (code
, tmpreg
, ARMREG_SP
, 0);
3951 ARM_ADD_REG_IMM8 (code
, ARMREG_SP
, ARMREG_SP
, 8);
3952 /* skip the constant pool */
3955 *(int*)code
= 0x41f00000;
3960 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
3961 * adfltd fdest, fdest, ftemp
3965 case OP_ICONV_TO_R4
:
3966 ARM_FLTS (code
, ins
->dreg
, ins
->sreg1
);
3968 case OP_ICONV_TO_R8
:
3969 ARM_FLTD (code
, ins
->dreg
, ins
->sreg1
);
3972 #elif defined(ARM_FPU_VFP)
3975 if (cfg
->compile_aot
) {
3976 ARM_FLDD (code
, ins
->dreg
, ARMREG_PC
, 0);
3978 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
3980 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[1];
3983 /* FIXME: we can optimize the imm load by dealing with part of
3984 * the displacement in LDFD (aligning to 512).
3986 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)ins
->inst_p0
);
3987 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
3991 if (cfg
->compile_aot
) {
3992 ARM_FLDS (code
, ins
->dreg
, ARMREG_PC
, 0);
3994 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
3996 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
3998 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)ins
->inst_p0
);
3999 ARM_FLDS (code
, ins
->dreg
, ARMREG_LR
, 0);
4000 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
4003 case OP_STORER8_MEMBASE_REG
:
4004 /* This is generated by the local regalloc pass which runs after the lowering pass */
4005 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
4006 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4007 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_destbasereg
);
4008 ARM_FSTD (code
, ins
->sreg1
, ARMREG_LR
, 0);
4010 ARM_FSTD (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4013 case OP_LOADR8_MEMBASE
:
4014 /* This is generated by the local regalloc pass which runs after the lowering pass */
4015 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
4016 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4017 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_basereg
);
4018 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
4020 ARM_FLDD (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4023 case OP_STORER4_MEMBASE_REG
:
4024 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
4025 ARM_CVTD (code
, ARM_VFP_F0
, ins
->sreg1
);
4026 ARM_FSTS (code
, ARM_VFP_F0
, ins
->inst_destbasereg
, ins
->inst_offset
);
4028 case OP_LOADR4_MEMBASE
:
4029 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
4030 ARM_FLDS (code
, ARM_VFP_F0
, ins
->inst_basereg
, ins
->inst_offset
);
4031 ARM_CVTS (code
, ins
->dreg
, ARM_VFP_F0
);
4033 case OP_ICONV_TO_R_UN
: {
4034 g_assert_not_reached ();
4037 case OP_ICONV_TO_R4
:
4038 ARM_FMSR (code
, ARM_VFP_F0
, ins
->sreg1
);
4039 ARM_FSITOS (code
, ARM_VFP_F0
, ARM_VFP_F0
);
4040 ARM_CVTS (code
, ins
->dreg
, ARM_VFP_F0
);
4042 case OP_ICONV_TO_R8
:
4043 ARM_FMSR (code
, ARM_VFP_F0
, ins
->sreg1
);
4044 ARM_FSITOD (code
, ins
->dreg
, ARM_VFP_F0
);
4048 if (mono_method_signature (cfg
->method
)->ret
->type
== MONO_TYPE_R4
) {
4049 ARM_CVTD (code
, ARM_VFP_F0
, ins
->sreg1
);
4050 ARM_FMRS (code
, ARMREG_R0
, ARM_VFP_F0
);
4052 ARM_FMRRD (code
, ARMREG_R0
, ARMREG_R1
, ins
->sreg1
);
4058 case OP_FCONV_TO_I1
:
4059 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, TRUE
);
4061 case OP_FCONV_TO_U1
:
4062 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, FALSE
);
4064 case OP_FCONV_TO_I2
:
4065 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, TRUE
);
4067 case OP_FCONV_TO_U2
:
4068 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, FALSE
);
4070 case OP_FCONV_TO_I4
:
4072 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, TRUE
);
4074 case OP_FCONV_TO_U4
:
4076 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, FALSE
);
4078 case OP_FCONV_TO_I8
:
4079 case OP_FCONV_TO_U8
:
4080 g_assert_not_reached ();
4081 /* Implemented as helper calls */
4083 case OP_LCONV_TO_R_UN
:
4084 g_assert_not_reached ();
4085 /* Implemented as helper calls */
4087 case OP_LCONV_TO_OVF_I4_2
: {
4088 guint8
*high_bit_not_set
, *valid_negative
, *invalid_negative
, *valid_positive
;
4090 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
4093 ARM_CMP_REG_IMM8 (code
, ins
->sreg1
, 0);
4094 high_bit_not_set
= code
;
4095 ARM_B_COND (code
, ARMCOND_GE
, 0); /*branch if bit 31 of the lower part is not set*/
4097 ARM_CMN_REG_IMM8 (code
, ins
->sreg2
, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
4098 valid_negative
= code
;
4099 ARM_B_COND (code
, ARMCOND_EQ
, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
4100 invalid_negative
= code
;
4101 ARM_B_COND (code
, ARMCOND_AL
, 0);
4103 arm_patch (high_bit_not_set
, code
);
4105 ARM_CMP_REG_IMM8 (code
, ins
->sreg2
, 0);
4106 valid_positive
= code
;
4107 ARM_B_COND (code
, ARMCOND_EQ
, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
4109 arm_patch (invalid_negative
, code
);
4110 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL
, "OverflowException");
4112 arm_patch (valid_negative
, code
);
4113 arm_patch (valid_positive
, code
);
4115 if (ins
->dreg
!= ins
->sreg1
)
4116 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4121 ARM_FPA_ADFD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4124 ARM_FPA_SUFD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4127 ARM_FPA_MUFD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4130 ARM_FPA_DVFD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4133 ARM_MNFD (code
, ins
->dreg
, ins
->sreg1
);
4135 #elif defined(ARM_FPU_VFP)
4137 ARM_VFP_ADDD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4140 ARM_VFP_SUBD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4143 ARM_VFP_MULD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4146 ARM_VFP_DIVD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4149 ARM_NEGD (code
, ins
->dreg
, ins
->sreg1
);
4154 g_assert_not_reached ();
4158 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg1
, ins
->sreg2
);
4159 #elif defined(ARM_FPU_VFP)
4160 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
4166 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg1
, ins
->sreg2
);
4167 #elif defined(ARM_FPU_VFP)
4168 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
4171 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
4172 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
4176 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg1
, ins
->sreg2
);
4177 #elif defined(ARM_FPU_VFP)
4178 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
4181 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
4182 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
4186 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg1
, ins
->sreg2
);
4187 #elif defined(ARM_FPU_VFP)
4188 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
4191 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
4192 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
4193 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
4198 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg2
, ins
->sreg1
);
4199 #elif defined(ARM_FPU_VFP)
4200 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
4203 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
4204 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
4209 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg2
, ins
->sreg1
);
4210 #elif defined(ARM_FPU_VFP)
4211 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
4214 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
4215 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
4216 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
4218 /* ARM FPA flags table:
4219 * N Less than ARMCOND_MI
4220 * Z Equal ARMCOND_EQ
4221 * C Greater Than or Equal ARMCOND_CS
4222 * V Unordered ARMCOND_VS
4225 EMIT_COND_BRANCH (ins
, OP_IBEQ
- OP_IBEQ
);
4228 EMIT_COND_BRANCH (ins
, OP_IBNE_UN
- OP_IBEQ
);
4231 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_MI
); /* N set */
4234 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_VS
); /* V set */
4235 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_MI
); /* N set */
4241 g_assert_not_reached ();
4245 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_GE
);
4247 /* FPA requires EQ even thou the docs suggests that just CS is enough */
4248 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_EQ
);
4249 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_CS
);
4253 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_VS
); /* V set */
4254 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_GE
);
4259 if (ins
->dreg
!= ins
->sreg1
)
4260 ARM_MVFD (code
, ins
->dreg
, ins
->sreg1
);
4261 #elif defined(ARM_FPU_VFP)
4262 ARM_ABSD (code
, ARM_VFP_D1
, ins
->sreg1
);
4263 ARM_FLDD (code
, ARM_VFP_D0
, ARMREG_PC
, 0);
4265 *(guint32
*)code
= 0xffffffff;
4267 *(guint32
*)code
= 0x7fefffff;
4269 ARM_CMPD (code
, ARM_VFP_D1
, ARM_VFP_D0
);
4271 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT
, "ArithmeticException");
4272 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg1
);
4274 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS
, "ArithmeticException");
4276 ARM_CPYD (code
, ins
->dreg
, ins
->sreg1
);
4281 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
4282 g_assert_not_reached ();
4285 if ((cfg
->opt
& MONO_OPT_BRANCH
) && ((code
- cfg
->native_code
- offset
) > max_len
)) {
4286 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4287 mono_inst_name (ins
->opcode
), max_len
, code
- cfg
->native_code
- offset
);
4288 g_assert_not_reached ();
4294 last_offset
= offset
;
4297 cfg
->code_len
= code
- cfg
->native_code
;
4300 #endif /* DISABLE_JIT */
4302 #ifdef HAVE_AEABI_READ_TP
4303 void __aeabi_read_tp (void);
4307 mono_arch_register_lowlevel_calls (void)
4309 /* The signature doesn't matter */
4310 mono_register_jit_icall (mono_arm_throw_exception
, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE
);
4311 mono_register_jit_icall (mono_arm_throw_exception_by_token
, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE
);
4313 #ifdef HAVE_AEABI_READ_TP
4314 mono_register_jit_icall (__aeabi_read_tp
, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE
);
4318 #define patch_lis_ori(ip,val) do {\
4319 guint16 *__lis_ori = (guint16*)(ip); \
4320 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
4321 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
4325 mono_arch_patch_code (MonoMethod
*method
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gboolean run_cctors
)
4327 MonoJumpInfo
*patch_info
;
4328 gboolean compile_aot
= !run_cctors
;
4330 for (patch_info
= ji
; patch_info
; patch_info
= patch_info
->next
) {
4331 unsigned char *ip
= patch_info
->ip
.i
+ code
;
4332 const unsigned char *target
;
4334 if (patch_info
->type
== MONO_PATCH_INFO_SWITCH
&& !compile_aot
) {
4335 gpointer
*jt
= (gpointer
*)(ip
+ 8);
4337 /* jt is the inlined jump table, 2 instructions after ip
4338 * In the normal case we store the absolute addresses,
4339 * otherwise the displacements.
4341 for (i
= 0; i
< patch_info
->data
.table
->table_size
; i
++)
4342 jt
[i
] = code
+ (int)patch_info
->data
.table
->table
[i
];
4345 target
= mono_resolve_patch_target (method
, domain
, code
, patch_info
, run_cctors
);
4348 switch (patch_info
->type
) {
4349 case MONO_PATCH_INFO_BB
:
4350 case MONO_PATCH_INFO_LABEL
:
4353 /* No need to patch these */
4358 switch (patch_info
->type
) {
4359 case MONO_PATCH_INFO_IP
:
4360 g_assert_not_reached ();
4361 patch_lis_ori (ip
, ip
);
4363 case MONO_PATCH_INFO_METHOD_REL
:
4364 g_assert_not_reached ();
4365 *((gpointer
*)(ip
)) = code
+ patch_info
->data
.offset
;
4367 case MONO_PATCH_INFO_METHODCONST
:
4368 case MONO_PATCH_INFO_CLASS
:
4369 case MONO_PATCH_INFO_IMAGE
:
4370 case MONO_PATCH_INFO_FIELD
:
4371 case MONO_PATCH_INFO_VTABLE
:
4372 case MONO_PATCH_INFO_IID
:
4373 case MONO_PATCH_INFO_SFLDA
:
4374 case MONO_PATCH_INFO_LDSTR
:
4375 case MONO_PATCH_INFO_TYPE_FROM_HANDLE
:
4376 case MONO_PATCH_INFO_LDTOKEN
:
4377 g_assert_not_reached ();
4378 /* from OP_AOTCONST : lis + ori */
4379 patch_lis_ori (ip
, target
);
4381 case MONO_PATCH_INFO_R4
:
4382 case MONO_PATCH_INFO_R8
:
4383 g_assert_not_reached ();
4384 *((gconstpointer
*)(ip
+ 2)) = patch_info
->data
.target
;
4386 case MONO_PATCH_INFO_EXC_NAME
:
4387 g_assert_not_reached ();
4388 *((gconstpointer
*)(ip
+ 1)) = patch_info
->data
.name
;
4390 case MONO_PATCH_INFO_NONE
:
4391 case MONO_PATCH_INFO_BB_OVF
:
4392 case MONO_PATCH_INFO_EXC_OVF
:
4393 /* everything is dealt with at epilog output time */
4398 arm_patch_general (domain
, ip
, target
);
4403 * Stack frame layout:
4405 * ------------------- fp
4406 * MonoLMF structure or saved registers
4407 * -------------------
4409 * -------------------
4411 * -------------------
4412 * optional 8 bytes for tracing
4413 * -------------------
4414 * param area size is cfg->param_area
4415 * ------------------- sp
4418 mono_arch_emit_prolog (MonoCompile
*cfg
)
4420 MonoMethod
*method
= cfg
->method
;
4422 MonoMethodSignature
*sig
;
4424 int alloc_size
, pos
, max_offset
, i
, rot_amount
;
4429 int prev_sp_offset
, reg_offset
;
4431 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
4434 sig
= mono_method_signature (method
);
4435 cfg
->code_size
= 256 + sig
->param_count
* 20;
4436 code
= cfg
->native_code
= g_malloc (cfg
->code_size
);
4438 mono_emit_unwind_op_def_cfa (cfg
, code
, ARMREG_SP
, 0);
4440 ARM_MOV_REG_REG (code
, ARMREG_IP
, ARMREG_SP
);
4442 alloc_size
= cfg
->stack_offset
;
4445 if (!method
->save_lmf
) {
4446 /* We save SP by storing it into IP and saving IP */
4447 ARM_PUSH (code
, (cfg
->used_int_regs
| (1 << ARMREG_IP
) | (1 << ARMREG_LR
)));
4448 prev_sp_offset
= 8; /* ip and lr */
4449 for (i
= 0; i
< 16; ++i
) {
4450 if (cfg
->used_int_regs
& (1 << i
))
4451 prev_sp_offset
+= 4;
4453 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
4455 for (i
= 0; i
< 16; ++i
) {
4456 if ((cfg
->used_int_regs
& (1 << i
)) || (i
== ARMREG_IP
) || (i
== ARMREG_LR
)) {
4457 mono_emit_unwind_op_offset (cfg
, code
, i
, (- prev_sp_offset
) + reg_offset
);
4462 ARM_PUSH (code
, 0x5ff0);
4463 prev_sp_offset
= 4 * 10; /* all but r0-r3, sp and pc */
4464 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
4466 for (i
= 0; i
< 16; ++i
) {
4467 if ((i
> ARMREG_R3
) && (i
!= ARMREG_SP
) && (i
!= ARMREG_PC
)) {
4468 mono_emit_unwind_op_offset (cfg
, code
, i
, (- prev_sp_offset
) + reg_offset
);
4472 pos
+= sizeof (MonoLMF
) - prev_sp_offset
;
4476 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
4477 if (alloc_size
& (MONO_ARCH_FRAME_ALIGNMENT
- 1)) {
4478 alloc_size
+= MONO_ARCH_FRAME_ALIGNMENT
- 1;
4479 alloc_size
&= ~(MONO_ARCH_FRAME_ALIGNMENT
- 1);
4482 /* the stack used in the pushed regs */
4483 if (prev_sp_offset
& 4)
4485 cfg
->stack_usage
= alloc_size
;
4487 if ((i
= mono_arm_is_rotated_imm8 (alloc_size
, &rot_amount
)) >= 0) {
4488 ARM_SUB_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
4490 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, alloc_size
);
4491 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
4493 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
+ alloc_size
);
4495 if (cfg
->frame_reg
!= ARMREG_SP
) {
4496 ARM_MOV_REG_REG (code
, cfg
->frame_reg
, ARMREG_SP
);
4497 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, cfg
->frame_reg
);
4499 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
4500 prev_sp_offset
+= alloc_size
;
4502 /* compute max_offset in order to use short forward jumps
4503 * we could skip do it on arm because the immediate displacement
4504 * for jumps is large enough, it may be useful later for constant pools
4507 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
4508 MonoInst
*ins
= bb
->code
;
4509 bb
->max_offset
= max_offset
;
4511 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
)
4514 MONO_BB_FOR_EACH_INS (bb
, ins
)
4515 max_offset
+= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
4518 /* store runtime generic context */
4519 if (cfg
->rgctx_var
) {
4520 MonoInst
*ins
= cfg
->rgctx_var
;
4522 g_assert (ins
->opcode
== OP_REGOFFSET
);
4524 if (arm_is_imm12 (ins
->inst_offset
)) {
4525 ARM_STR_IMM (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ins
->inst_offset
);
4527 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4528 ARM_STR_REG_REG (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ARMREG_LR
);
4532 /* load arguments allocated to register from the stack */
4535 cinfo
= get_call_info (NULL
, sig
, sig
->pinvoke
);
4537 if (MONO_TYPE_ISSTRUCT (sig
->ret
) && cinfo
->ret
.storage
!= RegTypeStructByVal
) {
4538 ArgInfo
*ainfo
= &cinfo
->ret
;
4539 inst
= cfg
->vret_addr
;
4540 g_assert (arm_is_imm12 (inst
->inst_offset
));
4541 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4544 if (sig
->call_convention
== MONO_CALL_VARARG
) {
4545 ArgInfo
*cookie
= &cinfo
->sig_cookie
;
4547 /* Save the sig cookie address */
4548 g_assert (cookie
->storage
== RegTypeBase
);
4550 g_assert (arm_is_imm12 (prev_sp_offset
+ cookie
->offset
));
4551 g_assert (arm_is_imm12 (cfg
->sig_cookie
));
4552 ARM_ADD_REG_IMM8 (code
, ARMREG_IP
, cfg
->frame_reg
, prev_sp_offset
+ cookie
->offset
);
4553 ARM_STR_IMM (code
, ARMREG_IP
, cfg
->frame_reg
, cfg
->sig_cookie
);
4556 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
4557 ArgInfo
*ainfo
= cinfo
->args
+ i
;
4558 inst
= cfg
->args
[pos
];
4560 if (cfg
->verbose_level
> 2)
4561 g_print ("Saving argument %d (type: %d)\n", i
, ainfo
->storage
);
4562 if (inst
->opcode
== OP_REGVAR
) {
4563 if (ainfo
->storage
== RegTypeGeneral
)
4564 ARM_MOV_REG_REG (code
, inst
->dreg
, ainfo
->reg
);
4565 else if (ainfo
->storage
== RegTypeFP
) {
4566 g_assert_not_reached ();
4567 } else if (ainfo
->storage
== RegTypeBase
) {
4568 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
4569 ARM_LDR_IMM (code
, inst
->dreg
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
4571 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4572 ARM_LDR_REG_REG (code
, inst
->dreg
, ARMREG_SP
, ARMREG_IP
);
4575 g_assert_not_reached ();
4577 if (cfg
->verbose_level
> 2)
4578 g_print ("Argument %d assigned to register %s\n", pos
, mono_arch_regname (inst
->dreg
));
4580 /* the argument should be put on the stack: FIXME handle size != word */
4581 if (ainfo
->storage
== RegTypeGeneral
|| ainfo
->storage
== RegTypeIRegPair
) {
4582 switch (ainfo
->size
) {
4584 if (arm_is_imm12 (inst
->inst_offset
))
4585 ARM_STRB_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4587 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4588 ARM_STRB_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
4592 if (arm_is_imm8 (inst
->inst_offset
)) {
4593 ARM_STRH_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4595 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4596 ARM_STRH_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
4600 g_assert (arm_is_imm12 (inst
->inst_offset
));
4601 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4602 g_assert (arm_is_imm12 (inst
->inst_offset
+ 4));
4603 ARM_STR_IMM (code
, ainfo
->reg
+ 1, inst
->inst_basereg
, inst
->inst_offset
+ 4);
4606 if (arm_is_imm12 (inst
->inst_offset
)) {
4607 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4609 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4610 ARM_STR_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
4614 } else if (ainfo
->storage
== RegTypeBaseGen
) {
4615 g_assert (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
));
4616 g_assert (arm_is_imm12 (inst
->inst_offset
));
4617 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
4618 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
4619 ARM_STR_IMM (code
, ARMREG_R3
, inst
->inst_basereg
, inst
->inst_offset
);
4620 } else if (ainfo
->storage
== RegTypeBase
) {
4621 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
4622 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
4624 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
);
4625 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
4628 switch (ainfo
->size
) {
4630 if (arm_is_imm8 (inst
->inst_offset
)) {
4631 ARM_STRB_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
4633 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4634 ARM_STRB_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
4638 if (arm_is_imm8 (inst
->inst_offset
)) {
4639 ARM_STRH_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
4641 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4642 ARM_STRH_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
4646 if (arm_is_imm12 (inst
->inst_offset
)) {
4647 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
4649 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4650 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
4652 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
+ 4)) {
4653 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
+ 4));
4655 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
+ 4);
4656 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
4658 if (arm_is_imm12 (inst
->inst_offset
+ 4)) {
4659 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
4661 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
+ 4);
4662 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
4666 if (arm_is_imm12 (inst
->inst_offset
)) {
4667 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
4669 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4670 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
4674 } else if (ainfo
->storage
== RegTypeFP
) {
4675 g_assert_not_reached ();
4676 } else if (ainfo
->storage
== RegTypeStructByVal
) {
4677 int doffset
= inst
->inst_offset
;
4681 size
= mini_type_stack_size_full (cfg
->generic_sharing_context
, inst
->inst_vtype
, NULL
, sig
->pinvoke
);
4682 for (cur_reg
= 0; cur_reg
< ainfo
->size
; ++cur_reg
) {
4683 if (arm_is_imm12 (doffset
)) {
4684 ARM_STR_IMM (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, doffset
);
4686 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, doffset
);
4687 ARM_STR_REG_REG (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, ARMREG_IP
);
4689 soffset
+= sizeof (gpointer
);
4690 doffset
+= sizeof (gpointer
);
4692 if (ainfo
->vtsize
) {
4693 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4694 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
4695 code
= emit_memcpy (code
, ainfo
->vtsize
* sizeof (gpointer
), inst
->inst_basereg
, doffset
, ARMREG_SP
, prev_sp_offset
+ ainfo
->offset
);
4697 } else if (ainfo
->storage
== RegTypeStructByAddr
) {
4698 g_assert_not_reached ();
4699 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4700 code
= emit_memcpy (code
, ainfo
->vtsize
* sizeof (gpointer
), inst
->inst_basereg
, inst
->inst_offset
, ainfo
->reg
, 0);
4702 g_assert_not_reached ();
4707 if (method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
) {
4708 if (cfg
->compile_aot
)
4709 /* AOT code is only used in the root domain */
4710 code
= mono_arm_emit_load_imm (code
, ARMREG_R0
, 0);
4712 code
= mono_arm_emit_load_imm (code
, ARMREG_R0
, (guint32
)cfg
->domain
);
4713 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
4714 (gpointer
)"mono_jit_thread_attach");
4715 code
= emit_call_seq (cfg
, code
);
4718 if (method
->save_lmf
) {
4719 gboolean get_lmf_fast
= FALSE
;
4721 #ifdef HAVE_AEABI_READ_TP
4722 gint32 lmf_addr_tls_offset
= mono_get_lmf_addr_tls_offset ();
4724 if (lmf_addr_tls_offset
!= -1) {
4725 get_lmf_fast
= TRUE
;
4727 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
4728 (gpointer
)"__aeabi_read_tp");
4729 code
= emit_call_seq (cfg
, code
);
4731 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_R0
, lmf_addr_tls_offset
);
4732 get_lmf_fast
= TRUE
;
4735 if (!get_lmf_fast
) {
4736 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
4737 (gpointer
)"mono_get_lmf_addr");
4738 code
= emit_call_seq (cfg
, code
);
4740 /* we build the MonoLMF structure on the stack - see mini-arm.h */
4741 /* lmf_offset is the offset from the previous stack pointer,
4742 * alloc_size is the total stack space allocated, so the offset
4743 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
4744 * The pointer to the struct is put in r1 (new_lmf).
4745 * r2 is used as scratch
4746 * The callee-saved registers are already in the MonoLMF structure
4748 code
= emit_big_add (code
, ARMREG_R1
, ARMREG_SP
, alloc_size
- lmf_offset
);
4749 /* r0 is the result from mono_get_lmf_addr () */
4750 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_R1
, G_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
4751 /* new_lmf->previous_lmf = *lmf_addr */
4752 ARM_LDR_IMM (code
, ARMREG_R2
, ARMREG_R0
, G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
4753 ARM_STR_IMM (code
, ARMREG_R2
, ARMREG_R1
, G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
4754 /* *(lmf_addr) = r1 */
4755 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_R0
, G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
4756 /* Skip method (only needed for trampoline LMF frames) */
4757 ARM_STR_IMM (code
, ARMREG_SP
, ARMREG_R1
, G_STRUCT_OFFSET (MonoLMF
, ebp
));
4758 /* save the current IP */
4759 ARM_MOV_REG_REG (code
, ARMREG_R2
, ARMREG_PC
);
4760 ARM_STR_IMM (code
, ARMREG_R2
, ARMREG_R1
, G_STRUCT_OFFSET (MonoLMF
, eip
));
4764 code
= mono_arch_instrument_prolog (cfg
, mono_trace_enter_method
, code
, TRUE
);
4766 if (cfg
->arch
.seq_point_info_var
) {
4767 MonoInst
*ins
= cfg
->arch
.seq_point_info_var
;
4769 /* Initialize the variable from a GOT slot */
4770 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_SEQ_POINT_INFO
, cfg
->method
);
4771 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_PC
, 0);
4773 *(gpointer
*)code
= NULL
;
4775 ARM_LDR_REG_REG (code
, ARMREG_R0
, ARMREG_PC
, ARMREG_R0
);
4777 g_assert (ins
->opcode
== OP_REGOFFSET
);
4779 if (arm_is_imm12 (ins
->inst_offset
)) {
4780 ARM_STR_IMM (code
, ARMREG_R0
, ins
->inst_basereg
, ins
->inst_offset
);
4782 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4783 ARM_STR_REG_REG (code
, ARMREG_R0
, ins
->inst_basereg
, ARMREG_LR
);
4787 /* Initialize ss_trigger_page_var */
4789 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
4790 MonoInst
*ss_trigger_page_var
= cfg
->arch
.ss_trigger_page_var
;
4791 int dreg
= ARMREG_LR
;
4794 g_assert (info_var
->opcode
== OP_REGOFFSET
);
4795 g_assert (arm_is_imm12 (info_var
->inst_offset
));
4797 ARM_LDR_IMM (code
, dreg
, info_var
->inst_basereg
, info_var
->inst_offset
);
4798 /* Load the trigger page addr */
4799 ARM_LDR_IMM (code
, dreg
, dreg
, G_STRUCT_OFFSET (SeqPointInfo
, ss_trigger_page
));
4800 ARM_STR_IMM (code
, dreg
, ss_trigger_page_var
->inst_basereg
, ss_trigger_page_var
->inst_offset
);
4804 cfg
->code_len
= code
- cfg
->native_code
;
4805 g_assert (cfg
->code_len
< cfg
->code_size
);
4812 mono_arch_emit_epilog (MonoCompile
*cfg
)
4814 MonoMethod
*method
= cfg
->method
;
4815 int pos
, i
, rot_amount
;
4816 int max_epilog_size
= 16 + 20*4;
4820 if (cfg
->method
->save_lmf
)
4821 max_epilog_size
+= 128;
4823 if (mono_jit_trace_calls
!= NULL
)
4824 max_epilog_size
+= 50;
4826 if (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)
4827 max_epilog_size
+= 50;
4829 while (cfg
->code_len
+ max_epilog_size
> (cfg
->code_size
- 16)) {
4830 cfg
->code_size
*= 2;
4831 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4832 mono_jit_stats
.code_reallocs
++;
4836 * Keep in sync with OP_JMP
4838 code
= cfg
->native_code
+ cfg
->code_len
;
4840 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
)) {
4841 code
= mono_arch_instrument_epilog (cfg
, mono_trace_leave_method
, code
, TRUE
);
4845 /* Load returned vtypes into registers if needed */
4846 cinfo
= cfg
->arch
.cinfo
;
4847 if (cinfo
->ret
.storage
== RegTypeStructByVal
) {
4848 MonoInst
*ins
= cfg
->ret
;
4850 if (arm_is_imm12 (ins
->inst_offset
)) {
4851 ARM_LDR_IMM (code
, ARMREG_R0
, ins
->inst_basereg
, ins
->inst_offset
);
4853 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4854 ARM_LDR_REG_REG (code
, ARMREG_R0
, ins
->inst_basereg
, ARMREG_LR
);
4858 if (method
->save_lmf
) {
4860 /* all but r0-r3, sp and pc */
4861 pos
+= sizeof (MonoLMF
) - (4 * 10);
4863 /* r2 contains the pointer to the current LMF */
4864 code
= emit_big_add (code
, ARMREG_R2
, cfg
->frame_reg
, cfg
->stack_usage
- lmf_offset
);
4865 /* ip = previous_lmf */
4866 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R2
, G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
4868 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_R2
, G_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
4869 /* *(lmf_addr) = previous_lmf */
4870 ARM_STR_IMM (code
, ARMREG_IP
, ARMREG_LR
, G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
4871 /* FIXME: speedup: there is no actual need to restore the registers if
4872 * we didn't actually change them (idea from Zoltan).
4875 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
4876 ARM_ADD_REG_IMM8 (code
, ARMREG_SP
, ARMREG_R2
, (sizeof (MonoLMF
) - 10 * sizeof (gulong
)));
4877 ARM_POP_NWB (code
, 0xaff0); /* restore ip to sp and lr to pc */
4879 if ((i
= mono_arm_is_rotated_imm8 (cfg
->stack_usage
, &rot_amount
)) >= 0) {
4880 ARM_ADD_REG_IMM (code
, ARMREG_SP
, cfg
->frame_reg
, i
, rot_amount
);
4882 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, cfg
->stack_usage
);
4883 ARM_ADD_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
4885 /* FIXME: add v4 thumb interworking support */
4886 ARM_POP_NWB (code
, cfg
->used_int_regs
| ((1 << ARMREG_SP
) | (1 << ARMREG_PC
)));
4889 cfg
->code_len
= code
- cfg
->native_code
;
4891 g_assert (cfg
->code_len
< cfg
->code_size
);
4895 /* remove once throw_exception_by_name is eliminated */
4897 exception_id_by_name (const char *name
)
4899 if (strcmp (name
, "IndexOutOfRangeException") == 0)
4900 return MONO_EXC_INDEX_OUT_OF_RANGE
;
4901 if (strcmp (name
, "OverflowException") == 0)
4902 return MONO_EXC_OVERFLOW
;
4903 if (strcmp (name
, "ArithmeticException") == 0)
4904 return MONO_EXC_ARITHMETIC
;
4905 if (strcmp (name
, "DivideByZeroException") == 0)
4906 return MONO_EXC_DIVIDE_BY_ZERO
;
4907 if (strcmp (name
, "InvalidCastException") == 0)
4908 return MONO_EXC_INVALID_CAST
;
4909 if (strcmp (name
, "NullReferenceException") == 0)
4910 return MONO_EXC_NULL_REF
;
4911 if (strcmp (name
, "ArrayTypeMismatchException") == 0)
4912 return MONO_EXC_ARRAY_TYPE_MISMATCH
;
4913 g_error ("Unknown intrinsic exception %s\n", name
);
4918 mono_arch_emit_exceptions (MonoCompile
*cfg
)
4920 MonoJumpInfo
*patch_info
;
4923 const guint8
* exc_throw_pos
[MONO_EXC_INTRINS_NUM
] = {NULL
};
4924 guint8 exc_throw_found
[MONO_EXC_INTRINS_NUM
] = {0};
4925 int max_epilog_size
= 50;
4927 /* count the number of exception infos */
4930 * make sure we have enough space for exceptions
4932 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
4933 if (patch_info
->type
== MONO_PATCH_INFO_EXC
) {
4934 i
= exception_id_by_name (patch_info
->data
.target
);
4935 if (!exc_throw_found
[i
]) {
4936 max_epilog_size
+= 32;
4937 exc_throw_found
[i
] = TRUE
;
4942 while (cfg
->code_len
+ max_epilog_size
> (cfg
->code_size
- 16)) {
4943 cfg
->code_size
*= 2;
4944 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4945 mono_jit_stats
.code_reallocs
++;
4948 code
= cfg
->native_code
+ cfg
->code_len
;
4950 /* add code to raise exceptions */
4951 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
4952 switch (patch_info
->type
) {
4953 case MONO_PATCH_INFO_EXC
: {
4954 MonoClass
*exc_class
;
4955 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
4957 i
= exception_id_by_name (patch_info
->data
.target
);
4958 if (exc_throw_pos
[i
]) {
4959 arm_patch (ip
, exc_throw_pos
[i
]);
4960 patch_info
->type
= MONO_PATCH_INFO_NONE
;
4963 exc_throw_pos
[i
] = code
;
4965 arm_patch (ip
, code
);
4967 exc_class
= mono_class_from_name (mono_defaults
.corlib
, "System", patch_info
->data
.name
);
4968 g_assert (exc_class
);
4970 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_LR
);
4971 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_PC
, 0);
4972 patch_info
->type
= MONO_PATCH_INFO_INTERNAL_METHOD
;
4973 patch_info
->data
.name
= "mono_arch_throw_corlib_exception";
4974 patch_info
->ip
.i
= code
- cfg
->native_code
;
4976 *(guint32
*)(gpointer
)code
= exc_class
->type_token
;
4986 cfg
->code_len
= code
- cfg
->native_code
;
4988 g_assert (cfg
->code_len
< cfg
->code_size
);
4992 static gboolean tls_offset_inited
= FALSE
;
4995 mono_arch_setup_jit_tls_data (MonoJitTlsData
*tls
)
4997 if (!tls_offset_inited
) {
4998 tls_offset_inited
= TRUE
;
5000 lmf_tls_offset
= mono_get_lmf_tls_offset ();
5001 lmf_addr_tls_offset
= mono_get_lmf_addr_tls_offset ();
5006 mono_arch_free_jit_tls_data (MonoJitTlsData
*tls
)
5011 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
5018 mono_arch_print_tree (MonoInst
*tree
, int arity
)
5024 mono_arch_get_domain_intrinsic (MonoCompile
* cfg
)
5026 return mono_get_domain_intrinsic (cfg
);
5030 mono_arch_get_patch_offset (guint8
*code
)
5037 mono_arch_flush_register_windows (void)
5041 #ifdef MONO_ARCH_HAVE_IMT
5044 mono_arch_emit_imt_argument (MonoCompile
*cfg
, MonoCallInst
*call
, MonoInst
*imt_arg
)
5046 if (cfg
->compile_aot
) {
5047 int method_reg
= mono_alloc_ireg (cfg
);
5050 call
->dynamic_imt_arg
= TRUE
;
5053 mono_call_inst_add_outarg_reg (cfg
, call
, imt_arg
->dreg
, ARMREG_V5
, FALSE
);
5055 MONO_INST_NEW (cfg
, ins
, OP_AOTCONST
);
5056 ins
->dreg
= method_reg
;
5057 ins
->inst_p0
= call
->method
;
5058 ins
->inst_c1
= MONO_PATCH_INFO_METHODCONST
;
5059 MONO_ADD_INS (cfg
->cbb
, ins
);
5061 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, ARMREG_V5
, FALSE
);
5063 } else if (cfg
->generic_context
) {
5065 /* Always pass in a register for simplicity */
5066 call
->dynamic_imt_arg
= TRUE
;
5068 cfg
->uses_rgctx_reg
= TRUE
;
5071 mono_call_inst_add_outarg_reg (cfg
, call
, imt_arg
->dreg
, ARMREG_V5
, FALSE
);
5074 int method_reg
= mono_alloc_preg (cfg
);
5076 MONO_INST_NEW (cfg
, ins
, OP_PCONST
);
5077 ins
->inst_p0
= call
->method
;
5078 ins
->dreg
= method_reg
;
5079 MONO_ADD_INS (cfg
->cbb
, ins
);
5081 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, ARMREG_V5
, FALSE
);
5087 mono_arch_find_imt_method (mgreg_t
*regs
, guint8
*code
)
5089 guint32
*code_ptr
= (guint32
*)code
;
5091 /* The IMT value is stored in the code stream right after the LDC instruction. */
5092 if (!IS_LDR_PC (code_ptr
[0])) {
5093 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__
, code
, code_ptr
[2], code_ptr
[1], code_ptr
[0]);
5094 g_assert (IS_LDR_PC (code_ptr
[0]));
5096 if (code_ptr
[1] == 0)
5097 /* This is AOTed code, the IMT method is in V5 */
5098 return (MonoMethod
*)regs
[ARMREG_V5
];
5100 return (MonoMethod
*) code_ptr
[1];
5104 mono_arch_find_static_call_vtable (mgreg_t
*regs
, guint8
*code
)
5106 return (MonoVTable
*) regs
[MONO_ARCH_RGCTX_REG
];
5109 #define ENABLE_WRONG_METHOD_CHECK 0
5110 #define BASE_SIZE (6 * 4)
5111 #define BSEARCH_ENTRY_SIZE (4 * 4)
5112 #define CMP_SIZE (3 * 4)
5113 #define BRANCH_SIZE (1 * 4)
5114 #define CALL_SIZE (2 * 4)
5115 #define WMC_SIZE (5 * 4)
5116 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
5119 arm_emit_value_and_patch_ldr (arminstr_t
*code
, arminstr_t
*target
, guint32 value
)
5121 guint32 delta
= DISTANCE (target
, code
);
5123 g_assert (delta
>= 0 && delta
<= 0xFFF);
5124 *target
= *target
| delta
;
5130 mono_arch_build_imt_thunk (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
5131 gpointer fail_tramp
)
5133 int size
, i
, extra_space
= 0;
5134 arminstr_t
*code
, *start
, *vtable_target
= NULL
;
5135 gboolean large_offsets
= FALSE
;
5136 guint32
**constant_pool_starts
;
5139 constant_pool_starts
= g_new0 (guint32
*, count
);
5142 * We might be called with a fail_tramp from the IMT builder code even if
5143 * MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK is not defined.
5145 //g_assert (!fail_tramp);
5147 for (i
= 0; i
< count
; ++i
) {
5148 MonoIMTCheckItem
*item
= imt_entries
[i
];
5149 if (item
->is_equals
) {
5150 if (!arm_is_imm12 (DISTANCE (vtable
, &vtable
->vtable
[item
->value
.vtable_slot
]))) {
5151 item
->chunk_size
+= 32;
5152 large_offsets
= TRUE
;
5155 if (item
->check_target_idx
) {
5156 if (!item
->compare_done
)
5157 item
->chunk_size
+= CMP_SIZE
;
5158 item
->chunk_size
+= BRANCH_SIZE
;
5160 #if ENABLE_WRONG_METHOD_CHECK
5161 item
->chunk_size
+= WMC_SIZE
;
5164 item
->chunk_size
+= CALL_SIZE
;
5166 item
->chunk_size
+= BSEARCH_ENTRY_SIZE
;
5167 imt_entries
[item
->check_target_idx
]->compare_done
= TRUE
;
5169 size
+= item
->chunk_size
;
5173 size
+= 4 * count
; /* The ARM_ADD_REG_IMM to pop the stack */
5175 start
= code
= mono_domain_code_reserve (domain
, size
);
5178 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable
->klass
->name_space
, vtable
->klass
->name
, count
, size
, start
, ((guint8
*)start
) + size
, vtable
);
5179 for (i
= 0; i
< count
; ++i
) {
5180 MonoIMTCheckItem
*item
= imt_entries
[i
];
5181 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i
, item
->key
, item
->key
->name
, &vtable
->vtable
[item
->value
.vtable_slot
], item
->is_equals
, item
->chunk_size
);
5186 ARM_PUSH4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
5188 ARM_PUSH2 (code
, ARMREG_R0
, ARMREG_R1
);
5189 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_LR
, -4);
5190 vtable_target
= code
;
5191 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
5193 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
5194 ARM_CMP_REG_IMM8 (code
, ARMREG_R0
, 0);
5195 ARM_MOV_REG_REG_COND (code
, ARMREG_R0
, ARMREG_V5
, ARMCOND_EQ
);
5197 for (i
= 0; i
< count
; ++i
) {
5198 MonoIMTCheckItem
*item
= imt_entries
[i
];
5199 arminstr_t
*imt_method
= NULL
, *vtable_offset_ins
= NULL
;
5200 gint32 vtable_offset
;
5202 item
->code_target
= (guint8
*)code
;
5204 if (item
->is_equals
) {
5205 if (item
->check_target_idx
) {
5206 if (!item
->compare_done
) {
5208 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
5209 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
5211 item
->jmp_code
= (guint8
*)code
;
5212 ARM_B_COND (code
, ARMCOND_NE
, 0);
5214 /*Enable the commented code to assert on wrong method*/
5215 #if ENABLE_WRONG_METHOD_CHECK
5217 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
5218 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
5219 ARM_B_COND (code
, ARMCOND_NE
, 1);
5225 vtable_offset
= DISTANCE (vtable
, &vtable
->vtable
[item
->value
.vtable_slot
]);
5226 if (!arm_is_imm12 (vtable_offset
)) {
5228 * We need to branch to a computed address but we don't have
5229 * a free register to store it, since IP must contain the
5230 * vtable address. So we push the two values to the stack, and
5231 * load them both using LDM.
5233 /* Compute target address */
5234 vtable_offset_ins
= code
;
5235 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
5236 ARM_LDR_REG_REG (code
, ARMREG_R1
, ARMREG_IP
, ARMREG_R1
);
5237 /* Save it to the fourth slot */
5238 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (gpointer
));
5239 /* Restore registers and branch */
5240 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
5242 code
= arm_emit_value_and_patch_ldr (code
, vtable_offset_ins
, vtable_offset
);
5244 ARM_POP2 (code
, ARMREG_R0
, ARMREG_R1
);
5246 ARM_ADD_REG_IMM8 (code
, ARMREG_SP
, ARMREG_SP
, 2 * sizeof (gpointer
));
5247 ARM_LDR_IMM (code
, ARMREG_PC
, ARMREG_IP
, vtable_offset
);
5251 code
= arm_emit_value_and_patch_ldr (code
, imt_method
, (guint32
)item
->key
);
5253 /*must emit after unconditional branch*/
5254 if (vtable_target
) {
5255 code
= arm_emit_value_and_patch_ldr (code
, vtable_target
, (guint32
)vtable
);
5256 item
->chunk_size
+= 4;
5257 vtable_target
= NULL
;
5260 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
5261 constant_pool_starts
[i
] = code
;
5263 code
+= extra_space
;
5267 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
5268 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
5270 item
->jmp_code
= (guint8
*)code
;
5271 ARM_B_COND (code
, ARMCOND_GE
, 0);
5276 for (i
= 0; i
< count
; ++i
) {
5277 MonoIMTCheckItem
*item
= imt_entries
[i
];
5278 if (item
->jmp_code
) {
5279 if (item
->check_target_idx
)
5280 arm_patch (item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
);
5282 if (i
> 0 && item
->is_equals
) {
5284 arminstr_t
*space_start
= constant_pool_starts
[i
];
5285 for (j
= i
- 1; j
>= 0 && !imt_entries
[j
]->is_equals
; --j
) {
5286 space_start
= arm_emit_value_and_patch_ldr (space_start
, (arminstr_t
*)imt_entries
[j
]->code_target
, (guint32
)imt_entries
[j
]->key
);
5293 char *buff
= g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable
->klass
->name_space
, vtable
->klass
->name
, count
);
5294 mono_disassemble_code (NULL
, (guint8
*)start
, size
, buff
);
5299 g_free (constant_pool_starts
);
5301 mono_arch_flush_icache ((guint8
*)start
, size
);
5302 mono_stats
.imt_thunks_size
+= code
- start
;
5304 g_assert (DISTANCE (start
, code
) <= size
);
5311 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
5313 if (reg
== ARMREG_SP
)
5314 return (gpointer
)ctx
->esp
;
5316 return (gpointer
)ctx
->regs
[reg
];
5320 * mono_arch_set_breakpoint:
5322 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
5323 * The location should contain code emitted by OP_SEQ_POINT.
5326 mono_arch_set_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
5329 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
5332 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), ji
->code_start
);
5334 g_assert (native_offset
% 4 == 0);
5335 g_assert (info
->bp_addrs
[native_offset
/ 4] == 0);
5336 info
->bp_addrs
[native_offset
/ 4] = bp_trigger_page
;
5338 int dreg
= ARMREG_LR
;
5340 /* Read from another trigger page */
5341 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
5343 *(int*)code
= (int)bp_trigger_page
;
5345 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
5347 mono_arch_flush_icache (code
- 16, 16);
5350 /* This is currently implemented by emitting an SWI instruction, which
5351 * qemu/linux seems to convert to a SIGILL.
5353 *(int*)code
= (0xef << 24) | 8;
5355 mono_arch_flush_icache (code
- 4, 4);
5361 * mono_arch_clear_breakpoint:
5363 * Clear the breakpoint at IP.
5366 mono_arch_clear_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
5372 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
5373 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), ji
->code_start
);
5375 g_assert (native_offset
% 4 == 0);
5376 g_assert (info
->bp_addrs
[native_offset
/ 4] == bp_trigger_page
);
5377 info
->bp_addrs
[native_offset
/ 4] = 0;
5379 for (i
= 0; i
< 4; ++i
)
5382 mono_arch_flush_icache (ip
, code
- ip
);
5387 * mono_arch_start_single_stepping:
5389 * Start single stepping.
5392 mono_arch_start_single_stepping (void)
5394 mono_mprotect (ss_trigger_page
, mono_pagesize (), 0);
5398 * mono_arch_stop_single_stepping:
5400 * Stop single stepping.
5403 mono_arch_stop_single_stepping (void)
5405 mono_mprotect (ss_trigger_page
, mono_pagesize (), MONO_MMAP_READ
);
5409 #define DBG_SIGNAL SIGBUS
5411 #define DBG_SIGNAL SIGSEGV
5415 * mono_arch_is_single_step_event:
5417 * Return whenever the machine state in SIGCTX corresponds to a single
5421 mono_arch_is_single_step_event (void *info
, void *sigctx
)
5423 siginfo_t
*sinfo
= info
;
5425 /* Sometimes the address is off by 4 */
5426 if (sinfo
->si_addr
>= ss_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)ss_trigger_page
+ 128)
5433 * mono_arch_is_breakpoint_event:
5435 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
5438 mono_arch_is_breakpoint_event (void *info
, void *sigctx
)
5440 siginfo_t
*sinfo
= info
;
5442 if (sinfo
->si_signo
== DBG_SIGNAL
) {
5443 /* Sometimes the address is off by 4 */
5444 if (sinfo
->si_addr
>= bp_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)bp_trigger_page
+ 128)
5454 mono_arch_get_ip_for_breakpoint (MonoJitInfo
*ji
, MonoContext
*ctx
)
5456 guint8
*ip
= MONO_CONTEXT_GET_IP (ctx
);
5467 mono_arch_get_ip_for_single_step (MonoJitInfo
*ji
, MonoContext
*ctx
)
5469 guint8
*ip
= MONO_CONTEXT_GET_IP (ctx
);
5477 * mono_arch_skip_breakpoint:
5479 * See mini-amd64.c for docs.
5482 mono_arch_skip_breakpoint (MonoContext
*ctx
)
5484 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
5488 * mono_arch_skip_single_step:
5490 * See mini-amd64.c for docs.
5493 mono_arch_skip_single_step (MonoContext
*ctx
)
5495 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
5499 * mono_arch_get_seq_point_info:
5501 * See mini-amd64.c for docs.
5504 mono_arch_get_seq_point_info (MonoDomain
*domain
, guint8
*code
)
5509 // FIXME: Add a free function
5511 mono_domain_lock (domain
);
5512 info
= g_hash_table_lookup (domain_jit_info (domain
)->arch_seq_points
,
5514 mono_domain_unlock (domain
);
5517 ji
= mono_jit_info_table_find (domain
, (char*)code
);
5520 info
= g_malloc0 (sizeof (SeqPointInfo
) + ji
->code_size
);
5522 info
->ss_trigger_page
= ss_trigger_page
;
5523 info
->bp_trigger_page
= bp_trigger_page
;
5525 mono_domain_lock (domain
);
5526 g_hash_table_insert (domain_jit_info (domain
)->arch_seq_points
,
5528 mono_domain_unlock (domain
);