2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
15 #include <mono/utils/mono-mmap.h>
22 #include "mono/arch/arm/arm-fpa-codegen.h"
23 #elif defined(ARM_FPU_VFP)
24 #include "mono/arch/arm/arm-vfp-codegen.h"
27 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID)
28 #define HAVE_AEABI_READ_TP 1
31 static gint lmf_tls_offset
= -1;
32 static gint lmf_addr_tls_offset
= -1;
34 /* This mutex protects architecture specific caches */
35 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
36 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
37 static CRITICAL_SECTION mini_arch_mutex
;
39 static int v5_supported
= 0;
40 static int v7_supported
= 0;
41 static int thumb_supported
= 0;
44 * The code generated for sequence points reads from this location, which is
45 * made read-only when single stepping is enabled.
47 static gpointer ss_trigger_page
;
49 /* Enabled breakpoints read from this trigger page */
50 static gpointer bp_trigger_page
;
52 /* Structure used by the sequence points in AOTed code */
54 gpointer ss_trigger_page
;
55 gpointer bp_trigger_page
;
56 guint8
* bp_addrs
[MONO_ZERO_LEN_ARRAY
];
61 * floating point support: on ARM it is a mess, there are at least 3
62 * different setups, each of which binary incompat with the other.
63 * 1) FPA: old and ugly, but unfortunately what current distros use
64 * the double binary format has the two words swapped. 8 double registers.
65 * Implemented usually by kernel emulation.
66 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
67 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
68 * 3) VFP: the new and actually sensible and useful FP support. Implemented
69 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
71 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
73 int mono_exc_esp_offset
= 0;
75 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
76 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
77 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
79 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
80 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
81 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
83 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
84 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
87 /* A variant of ARM_LDR_IMM which can handle large offsets */
88 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
89 if (arm_is_imm12 ((offset))) { \
90 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
92 g_assert ((scratch_reg) != (basereg)); \
93 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
94 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
98 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
99 if (arm_is_imm12 ((offset))) { \
100 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
102 g_assert ((scratch_reg) != (basereg)); \
103 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
104 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
109 mono_arch_regname (int reg
)
111 static const char * rnames
[] = {
112 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
113 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
114 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
117 if (reg
>= 0 && reg
< 16)
123 mono_arch_fregname (int reg
)
125 static const char * rnames
[] = {
126 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
127 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
128 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
129 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
130 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
131 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
134 if (reg
>= 0 && reg
< 32)
142 emit_big_add (guint8
*code
, int dreg
, int sreg
, int imm
)
144 int imm8
, rot_amount
;
145 if ((imm8
= mono_arm_is_rotated_imm8 (imm
, &rot_amount
)) >= 0) {
146 ARM_ADD_REG_IMM (code
, dreg
, sreg
, imm8
, rot_amount
);
149 g_assert (dreg
!= sreg
);
150 code
= mono_arm_emit_load_imm (code
, dreg
, imm
);
151 ARM_ADD_REG_REG (code
, dreg
, dreg
, sreg
);
156 emit_memcpy (guint8
*code
, int size
, int dreg
, int doffset
, int sreg
, int soffset
)
158 /* we can use r0-r3, since this is called only for incoming args on the stack */
159 if (size
> sizeof (gpointer
) * 4) {
161 code
= emit_big_add (code
, ARMREG_R0
, sreg
, soffset
);
162 code
= emit_big_add (code
, ARMREG_R1
, dreg
, doffset
);
163 start_loop
= code
= mono_arm_emit_load_imm (code
, ARMREG_R2
, size
);
164 ARM_LDR_IMM (code
, ARMREG_R3
, ARMREG_R0
, 0);
165 ARM_STR_IMM (code
, ARMREG_R3
, ARMREG_R1
, 0);
166 ARM_ADD_REG_IMM8 (code
, ARMREG_R0
, ARMREG_R0
, 4);
167 ARM_ADD_REG_IMM8 (code
, ARMREG_R1
, ARMREG_R1
, 4);
168 ARM_SUBS_REG_IMM8 (code
, ARMREG_R2
, ARMREG_R2
, 4);
169 ARM_B_COND (code
, ARMCOND_NE
, 0);
170 arm_patch (code
- 4, start_loop
);
173 if (arm_is_imm12 (doffset
) && arm_is_imm12 (doffset
+ size
) &&
174 arm_is_imm12 (soffset
) && arm_is_imm12 (soffset
+ size
)) {
176 ARM_LDR_IMM (code
, ARMREG_LR
, sreg
, soffset
);
177 ARM_STR_IMM (code
, ARMREG_LR
, dreg
, doffset
);
183 code
= emit_big_add (code
, ARMREG_R0
, sreg
, soffset
);
184 code
= emit_big_add (code
, ARMREG_R1
, dreg
, doffset
);
185 doffset
= soffset
= 0;
187 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_R0
, soffset
);
188 ARM_STR_IMM (code
, ARMREG_LR
, ARMREG_R1
, doffset
);
194 g_assert (size
== 0);
199 emit_call_reg (guint8
*code
, int reg
)
202 ARM_BLX_REG (code
, reg
);
204 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
208 ARM_MOV_REG_REG (code
, ARMREG_PC
, reg
);
214 emit_call_seq (MonoCompile
*cfg
, guint8
*code
)
216 if (cfg
->method
->dynamic
) {
217 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
219 *(gpointer
*)code
= NULL
;
221 code
= emit_call_reg (code
, ARMREG_IP
);
229 emit_move_return_value (MonoCompile
*cfg
, MonoInst
*ins
, guint8
*code
)
231 switch (ins
->opcode
) {
234 case OP_FCALL_MEMBASE
:
236 if (ins
->dreg
!= ARM_FPA_F0
)
237 ARM_MVFD (code
, ins
->dreg
, ARM_FPA_F0
);
238 #elif defined(ARM_FPU_VFP)
239 if (((MonoCallInst
*)ins
)->signature
->ret
->type
== MONO_TYPE_R4
) {
240 ARM_FMSR (code
, ins
->dreg
, ARMREG_R0
);
241 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
243 ARM_FMDRR (code
, ARMREG_R0
, ARMREG_R1
, ins
->dreg
);
252 #endif /* #ifndef DISABLE_JIT */
255 * mono_arch_get_argument_info:
256 * @csig: a method signature
257 * @param_count: the number of parameters to consider
258 * @arg_info: an array to store the result infos
260 * Gathers information on parameters such as size, alignment and
261 * padding. arg_info should be large enought to hold param_count + 1 entries.
263 * Returns the size of the activation frame.
266 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
268 int k
, frame_size
= 0;
269 guint32 size
, align
, pad
;
272 if (MONO_TYPE_ISSTRUCT (csig
->ret
)) {
273 frame_size
+= sizeof (gpointer
);
277 arg_info
[0].offset
= offset
;
280 frame_size
+= sizeof (gpointer
);
284 arg_info
[0].size
= frame_size
;
286 for (k
= 0; k
< param_count
; k
++) {
287 size
= mini_type_stack_size_full (NULL
, csig
->params
[k
], &align
, csig
->pinvoke
);
289 /* ignore alignment for now */
292 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
293 arg_info
[k
].pad
= pad
;
295 arg_info
[k
+ 1].pad
= 0;
296 arg_info
[k
+ 1].size
= size
;
298 arg_info
[k
+ 1].offset
= offset
;
302 align
= MONO_ARCH_FRAME_ALIGNMENT
;
303 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
304 arg_info
[k
].pad
= pad
;
310 decode_vcall_slot_from_ldr (guint32 ldr
, mgreg_t
*regs
, int *displacement
)
314 reg
= (ldr
>> 16 ) & 0xf;
315 offset
= ldr
& 0xfff;
316 if (((ldr
>> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
318 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
319 o
= (gpointer
)regs
[reg
];
321 *displacement
= offset
;
326 mono_arch_get_vcall_slot (guint8
*code_ptr
, mgreg_t
*regs
, int *displacement
)
328 guint32
* code
= (guint32
*)code_ptr
;
330 /* Locate the address of the method-specific trampoline. The call using
331 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
332 looks something like this:
341 The call sequence could be also:
344 function pointer literal
348 Note that on ARM5+ we can use one instruction instead of the last two.
349 Therefore, we need to locate the 'ldr rA' instruction to know which
350 register was used to hold the method addrs.
353 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
356 /* Three possible code sequences can happen here:
360 * ldr pc, [rX - #offset]
366 * ldr pc, [rX - #offset]
368 * direct branch with bl:
372 * direct branch with mov:
376 * We only need to identify interface and virtual calls, the others can be ignored.
379 if (IS_LDR_PC (code
[-1]) && code
[-2] == ADD_LR_PC_4
)
380 return decode_vcall_slot_from_ldr (code
[-1], regs
, displacement
);
382 if (IS_LDR_PC (code
[0]) && code
[-1] == MOV_LR_PC
)
383 return decode_vcall_slot_from_ldr (code
[0], regs
, displacement
);
388 #define MAX_ARCH_DELEGATE_PARAMS 3
391 get_delegate_invoke_impl (gboolean has_target
, gboolean param_count
, guint32
*code_size
)
393 guint8
*code
, *start
;
396 start
= code
= mono_global_codeman_reserve (12);
398 /* Replace the this argument with the target */
399 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, G_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
400 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_R0
, G_STRUCT_OFFSET (MonoDelegate
, target
));
401 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
403 g_assert ((code
- start
) <= 12);
405 mono_arch_flush_icache (start
, 12);
409 size
= 8 + param_count
* 4;
410 start
= code
= mono_global_codeman_reserve (size
);
412 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, G_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
413 /* slide down the arguments */
414 for (i
= 0; i
< param_count
; ++i
) {
415 ARM_MOV_REG_REG (code
, (ARMREG_R0
+ i
), (ARMREG_R0
+ i
+ 1));
417 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
419 g_assert ((code
- start
) <= size
);
421 mono_arch_flush_icache (start
, size
);
425 *code_size
= code
- start
;
431 * mono_arch_get_delegate_invoke_impls:
433 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
437 mono_arch_get_delegate_invoke_impls (void)
444 code
= get_delegate_invoke_impl (TRUE
, 0, &code_len
);
445 res
= g_slist_prepend (res
, mono_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code
, code_len
, NULL
, NULL
));
447 for (i
= 0; i
<= MAX_ARCH_DELEGATE_PARAMS
; ++i
) {
448 code
= get_delegate_invoke_impl (FALSE
, i
, &code_len
);
449 res
= g_slist_prepend (res
, mono_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i
), code
, code_len
, NULL
, NULL
));
456 mono_arch_get_delegate_invoke_impl (MonoMethodSignature
*sig
, gboolean has_target
)
458 guint8
*code
, *start
;
460 /* FIXME: Support more cases */
461 if (MONO_TYPE_ISSTRUCT (sig
->ret
))
465 static guint8
* cached
= NULL
;
466 mono_mini_arch_lock ();
468 mono_mini_arch_unlock ();
473 start
= mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
475 start
= get_delegate_invoke_impl (TRUE
, 0, NULL
);
477 mono_mini_arch_unlock ();
480 static guint8
* cache
[MAX_ARCH_DELEGATE_PARAMS
+ 1] = {NULL
};
483 if (sig
->param_count
> MAX_ARCH_DELEGATE_PARAMS
)
485 for (i
= 0; i
< sig
->param_count
; ++i
)
486 if (!mono_is_regsize_var (sig
->params
[i
]))
489 mono_mini_arch_lock ();
490 code
= cache
[sig
->param_count
];
492 mono_mini_arch_unlock ();
497 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", sig
->param_count
);
498 start
= mono_aot_get_trampoline (name
);
501 start
= get_delegate_invoke_impl (FALSE
, sig
->param_count
, NULL
);
503 cache
[sig
->param_count
] = start
;
504 mono_mini_arch_unlock ();
512 mono_arch_get_this_arg_from_call (MonoGenericSharingContext
*gsctx
, MonoMethodSignature
*sig
, mgreg_t
*regs
, guint8
*code
)
514 /* FIXME: handle returning a struct */
515 if (MONO_TYPE_ISSTRUCT (sig
->ret
))
516 return (gpointer
)regs
[ARMREG_R1
];
517 return (gpointer
)regs
[ARMREG_R0
];
521 * Initialize the cpu to execute managed code.
524 mono_arch_cpu_init (void)
529 * Initialize architecture specific code.
532 mono_arch_init (void)
534 InitializeCriticalSection (&mini_arch_mutex
);
536 ss_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
|MONO_MMAP_32BIT
);
537 bp_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
|MONO_MMAP_32BIT
);
538 mono_mprotect (bp_trigger_page
, mono_pagesize (), 0);
540 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception
);
541 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token
);
545 * Cleanup architecture specific code.
548 mono_arch_cleanup (void)
553 * This function returns the optimizations supported on this cpu.
556 mono_arch_cpu_optimizazions (guint32
*exclude_mask
)
559 const char *cpu_arch
= getenv ("MONO_CPU_ARCH");
560 if (cpu_arch
!= NULL
) {
561 thumb_supported
= strstr (cpu_arch
, "thumb") != NULL
;
562 if (strncmp (cpu_arch
, "armv", 4) == 0) {
563 v5_supported
= cpu_arch
[4] >= '5';
564 v7_supported
= cpu_arch
[4] >= '7';
568 thumb_supported
= TRUE
;
573 FILE *file
= fopen ("/proc/cpuinfo", "r");
575 while ((line
= fgets (buf
, 512, file
))) {
576 if (strncmp (line
, "Processor", 9) == 0) {
577 char *ver
= strstr (line
, "(v");
578 if (ver
&& (ver
[2] == '5' || ver
[2] == '6' || ver
[2] == '7'))
580 if (ver
&& (ver
[2] == '7'))
584 if (strncmp (line
, "Features", 8) == 0) {
585 char *th
= strstr (line
, "thumb");
587 thumb_supported
= TRUE
;
595 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
600 /* no arm-specific optimizations yet */
608 is_regsize_var (MonoType
*t
) {
611 t
= mini_type_get_underlying_type (NULL
, t
);
618 case MONO_TYPE_FNPTR
:
620 case MONO_TYPE_OBJECT
:
621 case MONO_TYPE_STRING
:
622 case MONO_TYPE_CLASS
:
623 case MONO_TYPE_SZARRAY
:
624 case MONO_TYPE_ARRAY
:
626 case MONO_TYPE_GENERICINST
:
627 if (!mono_type_generic_inst_is_valuetype (t
))
630 case MONO_TYPE_VALUETYPE
:
637 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
642 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
643 MonoInst
*ins
= cfg
->varinfo
[i
];
644 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
647 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
650 if (ins
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
) || (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
653 /* we can only allocate 32 bit values */
654 if (is_regsize_var (ins
->inst_vtype
)) {
655 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
656 g_assert (i
== vmv
->idx
);
657 vars
= mono_varlist_insert_sorted (cfg
, vars
, vmv
, FALSE
);
664 #define USE_EXTRA_TEMPS 0
667 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
672 * FIXME: Interface calls might go through a static rgctx trampoline which
673 * sets V5, but it doesn't save it, so we need to save it ourselves, and
676 if (cfg
->flags
& MONO_CFG_HAS_CALLS
)
677 cfg
->uses_rgctx_reg
= TRUE
;
679 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V1
));
680 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V2
));
681 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V3
));
682 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V4
));
683 if (!(cfg
->compile_aot
|| cfg
->uses_rgctx_reg
))
684 /* V5 is reserved for passing the vtable/rgctx/IMT method */
685 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V5
));
686 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
687 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
693 * mono_arch_regalloc_cost:
695 * Return the cost, in number of memory references, of the action of
696 * allocating the variable VMV into a register during global register
700 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
706 #endif /* #ifndef DISABLE_JIT */
708 #ifndef __GNUC_PREREQ
709 #define __GNUC_PREREQ(maj, min) (0)
713 mono_arch_flush_icache (guint8
*code
, gint size
)
716 sys_icache_invalidate (code
, size
);
717 #elif __GNUC_PREREQ(4, 1)
718 __clear_cache (code
, code
+ size
);
719 #elif defined(PLATFORM_ANDROID)
720 const int syscall
= 0xf0002;
728 : "r" (code
), "r" (code
+ size
), "r" (syscall
)
729 : "r0", "r1", "r7", "r2"
732 __asm
__volatile ("mov r0, %0\n"
735 "swi 0x9f0002 @ sys_cacheflush"
737 : "r" (code
), "r" (code
+ size
), "r" (0)
738 : "r0", "r1", "r3" );
755 guint16 vtsize
; /* in param area */
758 guint8 size
: 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
765 gboolean vtype_retaddr
;
774 /*#define __alignof__(a) sizeof(a)*/
775 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
781 add_general (guint
*gr
, guint
*stack_size
, ArgInfo
*ainfo
, gboolean simple
)
784 if (*gr
> ARMREG_R3
) {
785 ainfo
->offset
= *stack_size
;
786 ainfo
->reg
= ARMREG_SP
; /* in the caller */
787 ainfo
->storage
= RegTypeBase
;
790 ainfo
->storage
= RegTypeGeneral
;
794 #if defined(__APPLE__) && defined(MONO_CROSS_COMPILE)
797 int i8_align
= __alignof__ (gint64
);
801 gboolean split
= i8_align
== 4;
803 gboolean split
= TRUE
;
806 if (*gr
== ARMREG_R3
&& split
) {
807 /* first word in r3 and the second on the stack */
808 ainfo
->offset
= *stack_size
;
809 ainfo
->reg
= ARMREG_SP
; /* in the caller */
810 ainfo
->storage
= RegTypeBaseGen
;
812 } else if (*gr
>= ARMREG_R3
) {
814 /* darwin aligns longs to 4 byte only */
820 ainfo
->offset
= *stack_size
;
821 ainfo
->reg
= ARMREG_SP
; /* in the caller */
822 ainfo
->storage
= RegTypeBase
;
826 if (i8_align
== 8 && ((*gr
) & 1))
829 ainfo
->storage
= RegTypeIRegPair
;
838 get_call_info (MonoMemPool
*mp
, MonoMethodSignature
*sig
, gboolean is_pinvoke
)
841 int n
= sig
->hasthis
+ sig
->param_count
;
842 MonoType
*simpletype
;
843 guint32 stack_size
= 0;
847 cinfo
= mono_mempool_alloc0 (mp
, sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
849 cinfo
= g_malloc0 (sizeof (CallInfo
) + (sizeof (ArgInfo
) * n
));
854 /* FIXME: handle returning a struct */
855 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
858 if (is_pinvoke
&& mono_class_native_size (mono_class_from_mono_type (sig
->ret
), &align
) <= sizeof (gpointer
)) {
859 cinfo
->ret
.storage
= RegTypeStructByVal
;
861 add_general (&gr
, &stack_size
, &cinfo
->ret
, TRUE
);
862 cinfo
->struct_ret
= ARMREG_R0
;
863 cinfo
->vtype_retaddr
= TRUE
;
869 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
872 DEBUG(printf("params: %d\n", sig
->param_count
));
873 for (i
= 0; i
< sig
->param_count
; ++i
) {
874 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
875 /* Prevent implicit arguments and sig_cookie from
876 being passed in registers */
878 /* Emit the signature cookie just before the implicit arguments */
879 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
881 DEBUG(printf("param %d: ", i
));
882 if (sig
->params
[i
]->byref
) {
883 DEBUG(printf("byref\n"));
884 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
888 simpletype
= mini_type_get_underlying_type (NULL
, sig
->params
[i
]);
889 switch (simpletype
->type
) {
890 case MONO_TYPE_BOOLEAN
:
893 cinfo
->args
[n
].size
= 1;
894 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
900 cinfo
->args
[n
].size
= 2;
901 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
906 cinfo
->args
[n
].size
= 4;
907 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
913 case MONO_TYPE_FNPTR
:
914 case MONO_TYPE_CLASS
:
915 case MONO_TYPE_OBJECT
:
916 case MONO_TYPE_STRING
:
917 case MONO_TYPE_SZARRAY
:
918 case MONO_TYPE_ARRAY
:
920 cinfo
->args
[n
].size
= sizeof (gpointer
);
921 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
924 case MONO_TYPE_GENERICINST
:
925 if (!mono_type_generic_inst_is_valuetype (simpletype
)) {
926 cinfo
->args
[n
].size
= sizeof (gpointer
);
927 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
932 case MONO_TYPE_TYPEDBYREF
:
933 case MONO_TYPE_VALUETYPE
: {
939 if (simpletype
->type
== MONO_TYPE_TYPEDBYREF
) {
940 size
= sizeof (MonoTypedRef
);
941 align
= sizeof (gpointer
);
943 MonoClass
*klass
= mono_class_from_mono_type (sig
->params
[i
]);
945 size
= mono_class_native_size (klass
, &align
);
947 size
= mono_class_value_size (klass
, &align
);
949 DEBUG(printf ("load %d bytes struct\n",
950 mono_class_native_size (sig
->params
[i
]->data
.klass
, NULL
)));
953 align_size
+= (sizeof (gpointer
) - 1);
954 align_size
&= ~(sizeof (gpointer
) - 1);
955 nwords
= (align_size
+ sizeof (gpointer
) -1 ) / sizeof (gpointer
);
956 cinfo
->args
[n
].storage
= RegTypeStructByVal
;
957 /* FIXME: align stack_size if needed */
959 if (align
>= 8 && (gr
& 1))
962 if (gr
> ARMREG_R3
) {
963 cinfo
->args
[n
].size
= 0;
964 cinfo
->args
[n
].vtsize
= nwords
;
966 int rest
= ARMREG_R3
- gr
+ 1;
967 int n_in_regs
= rest
>= nwords
? nwords
: rest
;
969 cinfo
->args
[n
].size
= n_in_regs
;
970 cinfo
->args
[n
].vtsize
= nwords
- n_in_regs
;
971 cinfo
->args
[n
].reg
= gr
;
975 cinfo
->args
[n
].offset
= stack_size
;
976 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
977 stack_size
+= nwords
* sizeof (gpointer
);
984 cinfo
->args
[n
].size
= 8;
985 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, FALSE
);
989 g_error ("Can't trampoline 0x%x", sig
->params
[i
]->type
);
993 /* Handle the case where there are no implicit arguments */
994 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
995 /* Prevent implicit arguments and sig_cookie from
996 being passed in registers */
998 /* Emit the signature cookie just before the implicit arguments */
999 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
1003 simpletype
= mini_type_get_underlying_type (NULL
, sig
->ret
);
1004 switch (simpletype
->type
) {
1005 case MONO_TYPE_BOOLEAN
:
1010 case MONO_TYPE_CHAR
:
1016 case MONO_TYPE_FNPTR
:
1017 case MONO_TYPE_CLASS
:
1018 case MONO_TYPE_OBJECT
:
1019 case MONO_TYPE_SZARRAY
:
1020 case MONO_TYPE_ARRAY
:
1021 case MONO_TYPE_STRING
:
1022 cinfo
->ret
.storage
= RegTypeGeneral
;
1023 cinfo
->ret
.reg
= ARMREG_R0
;
1027 cinfo
->ret
.storage
= RegTypeIRegPair
;
1028 cinfo
->ret
.reg
= ARMREG_R0
;
1032 cinfo
->ret
.storage
= RegTypeFP
;
1033 cinfo
->ret
.reg
= ARMREG_R0
;
1034 /* FIXME: cinfo->ret.reg = ???;
1035 cinfo->ret.storage = RegTypeFP;*/
1037 case MONO_TYPE_GENERICINST
:
1038 if (!mono_type_generic_inst_is_valuetype (simpletype
)) {
1039 cinfo
->ret
.storage
= RegTypeGeneral
;
1040 cinfo
->ret
.reg
= ARMREG_R0
;
1044 case MONO_TYPE_VALUETYPE
:
1045 case MONO_TYPE_TYPEDBYREF
:
1046 if (cinfo
->ret
.storage
!= RegTypeStructByVal
)
1047 cinfo
->ret
.storage
= RegTypeStructByAddr
;
1049 case MONO_TYPE_VOID
:
1052 g_error ("Can't handle as return value 0x%x", sig
->ret
->type
);
1056 /* align stack size to 8 */
1057 DEBUG (printf (" stack size: %d (%d)\n", (stack_size
+ 15) & ~15, stack_size
));
1058 stack_size
= (stack_size
+ 7) & ~7;
1060 cinfo
->stack_usage
= stack_size
;
1067 * Set var information according to the calling convention. arm version.
1068 * The locals var stuff should most likely be split in another method.
1071 mono_arch_allocate_vars (MonoCompile
*cfg
)
1073 MonoMethodSignature
*sig
;
1074 MonoMethodHeader
*header
;
1076 int i
, offset
, size
, align
, curinst
;
1077 int frame_reg
= ARMREG_FP
;
1081 sig
= mono_method_signature (cfg
->method
);
1083 if (!cfg
->arch
.cinfo
)
1084 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
, sig
->pinvoke
);
1085 cinfo
= cfg
->arch
.cinfo
;
1087 /* FIXME: this will change when we use FP as gcc does */
1088 cfg
->flags
|= MONO_CFG_HAS_SPILLUP
;
1090 /* allow room for the vararg method args: void* and long/double */
1091 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
))
1092 cfg
->param_area
= MAX (cfg
->param_area
, sizeof (gpointer
)*8);
1094 header
= cfg
->header
;
1097 * We use the frame register also for any method that has
1098 * exception clauses. This way, when the handlers are called,
1099 * the code will reference local variables using the frame reg instead of
1100 * the stack pointer: if we had to restore the stack pointer, we'd
1101 * corrupt the method frames that are already on the stack (since
1102 * filters get called before stack unwinding happens) when the filter
1103 * code would call any method (this also applies to finally etc.).
1105 if ((cfg
->flags
& MONO_CFG_HAS_ALLOCA
) || header
->num_clauses
)
1106 frame_reg
= ARMREG_FP
;
1107 cfg
->frame_reg
= frame_reg
;
1108 if (frame_reg
!= ARMREG_SP
) {
1109 cfg
->used_int_regs
|= 1 << frame_reg
;
1112 if (cfg
->compile_aot
|| cfg
->uses_rgctx_reg
)
1113 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1114 cfg
->used_int_regs
|= (1 << ARMREG_V5
);
1118 if (!MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1119 switch (mini_type_get_underlying_type (NULL
, sig
->ret
)->type
) {
1120 case MONO_TYPE_VOID
:
1123 cfg
->ret
->opcode
= OP_REGVAR
;
1124 cfg
->ret
->inst_c0
= ARMREG_R0
;
1128 /* local vars are at a positive offset from the stack pointer */
1130 * also note that if the function uses alloca, we use FP
1131 * to point at the local variables.
1133 offset
= 0; /* linkage area */
1134 /* align the offset to 16 bytes: not sure this is needed here */
1136 //offset &= ~(8 - 1);
1138 /* add parameter area size for called functions */
1139 offset
+= cfg
->param_area
;
1142 if (cfg
->flags
& MONO_CFG_HAS_FPOUT
)
1145 /* allow room to save the return value */
1146 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
))
1149 /* the MonoLMF structure is stored just below the stack pointer */
1150 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1151 if (cinfo
->ret
.storage
== RegTypeStructByVal
) {
1152 cfg
->ret
->opcode
= OP_REGOFFSET
;
1153 cfg
->ret
->inst_basereg
= cfg
->frame_reg
;
1154 offset
+= sizeof (gpointer
) - 1;
1155 offset
&= ~(sizeof (gpointer
) - 1);
1156 cfg
->ret
->inst_offset
= - offset
;
1158 ins
= cfg
->vret_addr
;
1159 offset
+= sizeof(gpointer
) - 1;
1160 offset
&= ~(sizeof(gpointer
) - 1);
1161 ins
->inst_offset
= offset
;
1162 ins
->opcode
= OP_REGOFFSET
;
1163 ins
->inst_basereg
= frame_reg
;
1164 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
1165 printf ("vret_addr =");
1166 mono_print_ins (cfg
->vret_addr
);
1169 offset
+= sizeof(gpointer
);
1172 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1173 if (cfg
->arch
.seq_point_info_var
) {
1176 ins
= cfg
->arch
.seq_point_info_var
;
1180 offset
+= align
- 1;
1181 offset
&= ~(align
- 1);
1182 ins
->opcode
= OP_REGOFFSET
;
1183 ins
->inst_basereg
= frame_reg
;
1184 ins
->inst_offset
= offset
;
1187 ins
= cfg
->arch
.ss_trigger_page_var
;
1190 offset
+= align
- 1;
1191 offset
&= ~(align
- 1);
1192 ins
->opcode
= OP_REGOFFSET
;
1193 ins
->inst_basereg
= frame_reg
;
1194 ins
->inst_offset
= offset
;
1198 curinst
= cfg
->locals_start
;
1199 for (i
= curinst
; i
< cfg
->num_varinfo
; ++i
) {
1200 ins
= cfg
->varinfo
[i
];
1201 if ((ins
->flags
& MONO_INST_IS_DEAD
) || ins
->opcode
== OP_REGVAR
|| ins
->opcode
== OP_REGOFFSET
)
1204 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1205 * pinvoke wrappers when they call functions returning structure */
1206 if (ins
->backend
.is_pinvoke
&& MONO_TYPE_ISSTRUCT (ins
->inst_vtype
) && ins
->inst_vtype
->type
!= MONO_TYPE_TYPEDBYREF
) {
1207 size
= mono_class_native_size (mono_class_from_mono_type (ins
->inst_vtype
), &ualign
);
1211 size
= mono_type_size (ins
->inst_vtype
, &align
);
1213 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1214 * since it loads/stores misaligned words, which don't do the right thing.
1216 if (align
< 4 && size
>= 4)
1218 offset
+= align
- 1;
1219 offset
&= ~(align
- 1);
1220 ins
->opcode
= OP_REGOFFSET
;
1221 ins
->inst_offset
= offset
;
1222 ins
->inst_basereg
= frame_reg
;
1224 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1229 ins
= cfg
->args
[curinst
];
1230 if (ins
->opcode
!= OP_REGVAR
) {
1231 ins
->opcode
= OP_REGOFFSET
;
1232 ins
->inst_basereg
= frame_reg
;
1233 offset
+= sizeof (gpointer
) - 1;
1234 offset
&= ~(sizeof (gpointer
) - 1);
1235 ins
->inst_offset
= offset
;
1236 offset
+= sizeof (gpointer
);
1241 if (sig
->call_convention
== MONO_CALL_VARARG
) {
1245 /* Allocate a local slot to hold the sig cookie address */
1246 offset
+= align
- 1;
1247 offset
&= ~(align
- 1);
1248 cfg
->sig_cookie
= offset
;
1252 for (i
= 0; i
< sig
->param_count
; ++i
) {
1253 ins
= cfg
->args
[curinst
];
1255 if (ins
->opcode
!= OP_REGVAR
) {
1256 ins
->opcode
= OP_REGOFFSET
;
1257 ins
->inst_basereg
= frame_reg
;
1258 size
= mini_type_stack_size_full (NULL
, sig
->params
[i
], &ualign
, sig
->pinvoke
);
1260 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1261 * since it loads/stores misaligned words, which don't do the right thing.
1263 if (align
< 4 && size
>= 4)
1265 /* The code in the prolog () stores words when storing vtypes received in a register */
1266 if (MONO_TYPE_ISSTRUCT (sig
->params
[i
]))
1268 offset
+= align
- 1;
1269 offset
&= ~(align
- 1);
1270 ins
->inst_offset
= offset
;
1276 /* align the offset to 8 bytes */
1281 cfg
->stack_offset
= offset
;
1285 mono_arch_create_vars (MonoCompile
*cfg
)
1287 MonoMethodSignature
*sig
;
1290 sig
= mono_method_signature (cfg
->method
);
1292 if (!cfg
->arch
.cinfo
)
1293 cfg
->arch
.cinfo
= get_call_info (cfg
->mempool
, sig
, sig
->pinvoke
);
1294 cinfo
= cfg
->arch
.cinfo
;
1296 if (cinfo
->ret
.storage
== RegTypeStructByVal
)
1297 cfg
->ret_var_is_local
= TRUE
;
1299 if (MONO_TYPE_ISSTRUCT (sig
->ret
) && cinfo
->ret
.storage
!= RegTypeStructByVal
) {
1300 cfg
->vret_addr
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_ARG
);
1301 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
1302 printf ("vret_addr = ");
1303 mono_print_ins (cfg
->vret_addr
);
1307 if (cfg
->gen_seq_points
&& cfg
->compile_aot
) {
1308 MonoInst
*ins
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1309 ins
->flags
|= MONO_INST_VOLATILE
;
1310 cfg
->arch
.seq_point_info_var
= ins
;
1312 /* Allocate a separate variable for this to save 1 load per seq point */
1313 ins
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1314 ins
->flags
|= MONO_INST_VOLATILE
;
1315 cfg
->arch
.ss_trigger_page_var
= ins
;
1320 emit_sig_cookie (MonoCompile
*cfg
, MonoCallInst
*call
, CallInfo
*cinfo
)
1322 MonoMethodSignature
*tmp_sig
;
1325 if (call
->tail_call
)
1328 /* FIXME: Add support for signature tokens to AOT */
1329 cfg
->disable_aot
= TRUE
;
1331 g_assert (cinfo
->sig_cookie
.storage
== RegTypeBase
);
1334 * mono_ArgIterator_Setup assumes the signature cookie is
1335 * passed first and all the arguments which were before it are
1336 * passed on the stack after the signature. So compensate by
1337 * passing a different signature.
1339 tmp_sig
= mono_metadata_signature_dup (call
->signature
);
1340 tmp_sig
->param_count
-= call
->signature
->sentinelpos
;
1341 tmp_sig
->sentinelpos
= 0;
1342 memcpy (tmp_sig
->params
, call
->signature
->params
+ call
->signature
->sentinelpos
, tmp_sig
->param_count
* sizeof (MonoType
*));
1344 MONO_INST_NEW (cfg
, sig_arg
, OP_ICONST
);
1345 sig_arg
->dreg
= mono_alloc_ireg (cfg
);
1346 sig_arg
->inst_p0
= tmp_sig
;
1347 MONO_ADD_INS (cfg
->cbb
, sig_arg
);
1349 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, cinfo
->sig_cookie
.offset
, sig_arg
->dreg
);
1354 mono_arch_get_llvm_call_info (MonoCompile
*cfg
, MonoMethodSignature
*sig
)
1359 LLVMCallInfo
*linfo
;
1361 n
= sig
->param_count
+ sig
->hasthis
;
1363 cinfo
= get_call_info (cfg
->mempool
, sig
, sig
->pinvoke
);
1365 linfo
= mono_mempool_alloc0 (cfg
->mempool
, sizeof (LLVMCallInfo
) + (sizeof (LLVMArgInfo
) * n
));
1368 * LLVM always uses the native ABI while we use our own ABI, the
1369 * only difference is the handling of vtypes:
1370 * - we only pass/receive them in registers in some cases, and only
1371 * in 1 or 2 integer registers.
1373 if (cinfo
->ret
.storage
!= RegTypeGeneral
&& cinfo
->ret
.storage
!= RegTypeNone
&& cinfo
->ret
.storage
!= RegTypeFP
&& cinfo
->ret
.storage
!= RegTypeIRegPair
) {
1374 cfg
->exception_message
= g_strdup ("unknown ret conv");
1375 cfg
->disable_llvm
= TRUE
;
1379 for (i
= 0; i
< n
; ++i
) {
1380 ainfo
= cinfo
->args
+ i
;
1382 linfo
->args
[i
].storage
= LLVMArgNone
;
1384 switch (ainfo
->storage
) {
1385 case RegTypeGeneral
:
1386 case RegTypeIRegPair
:
1388 linfo
->args
[i
].storage
= LLVMArgInIReg
;
1391 cfg
->exception_message
= g_strdup_printf ("ainfo->storage (%d)", ainfo
->storage
);
1392 cfg
->disable_llvm
= TRUE
;
1402 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
1405 MonoMethodSignature
*sig
;
1409 sig
= call
->signature
;
1410 n
= sig
->param_count
+ sig
->hasthis
;
1412 cinfo
= get_call_info (NULL
, sig
, sig
->pinvoke
);
1414 for (i
= 0; i
< n
; ++i
) {
1415 ArgInfo
*ainfo
= cinfo
->args
+ i
;
1418 if (i
>= sig
->hasthis
)
1419 t
= sig
->params
[i
- sig
->hasthis
];
1421 t
= &mono_defaults
.int_class
->byval_arg
;
1422 t
= mini_type_get_underlying_type (NULL
, t
);
1424 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1425 /* Emit the signature cookie just before the implicit arguments */
1426 emit_sig_cookie (cfg
, call
, cinfo
);
1429 in
= call
->args
[i
];
1431 switch (ainfo
->storage
) {
1432 case RegTypeGeneral
:
1433 case RegTypeIRegPair
:
1434 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
1435 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1436 ins
->dreg
= mono_alloc_ireg (cfg
);
1437 ins
->sreg1
= in
->dreg
+ 1;
1438 MONO_ADD_INS (cfg
->cbb
, ins
);
1439 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
1441 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1442 ins
->dreg
= mono_alloc_ireg (cfg
);
1443 ins
->sreg1
= in
->dreg
+ 2;
1444 MONO_ADD_INS (cfg
->cbb
, ins
);
1445 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
1446 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R8
) || (t
->type
== MONO_TYPE_R4
))) {
1447 #ifndef MONO_ARCH_SOFT_FLOAT
1451 if (ainfo
->size
== 4) {
1452 #ifdef MONO_ARCH_SOFT_FLOAT
1453 /* mono_emit_call_args () have already done the r8->r4 conversion */
1454 /* The converted value is in an int vreg */
1455 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1456 ins
->dreg
= mono_alloc_ireg (cfg
);
1457 ins
->sreg1
= in
->dreg
;
1458 MONO_ADD_INS (cfg
->cbb
, ins
);
1459 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
1461 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
1462 creg
= mono_alloc_ireg (cfg
);
1463 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
1464 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
, FALSE
);
1467 #ifdef MONO_ARCH_SOFT_FLOAT
1468 MONO_INST_NEW (cfg
, ins
, OP_FGETLOW32
);
1469 ins
->dreg
= mono_alloc_ireg (cfg
);
1470 ins
->sreg1
= in
->dreg
;
1471 MONO_ADD_INS (cfg
->cbb
, ins
);
1472 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
1474 MONO_INST_NEW (cfg
, ins
, OP_FGETHIGH32
);
1475 ins
->dreg
= mono_alloc_ireg (cfg
);
1476 ins
->sreg1
= in
->dreg
;
1477 MONO_ADD_INS (cfg
->cbb
, ins
);
1478 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
1480 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
1481 creg
= mono_alloc_ireg (cfg
);
1482 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
1483 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
, FALSE
);
1484 creg
= mono_alloc_ireg (cfg
);
1485 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8 + 4));
1486 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
+ 1, FALSE
);
1489 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
1491 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1492 ins
->dreg
= mono_alloc_ireg (cfg
);
1493 ins
->sreg1
= in
->dreg
;
1494 MONO_ADD_INS (cfg
->cbb
, ins
);
1496 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
1499 case RegTypeStructByAddr
:
1502 /* FIXME: where si the data allocated? */
1503 arg
->backend
.reg3
= ainfo
->reg
;
1504 call
->used_iregs
|= 1 << ainfo
->reg
;
1505 g_assert_not_reached ();
1508 case RegTypeStructByVal
:
1509 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
1510 ins
->opcode
= OP_OUTARG_VT
;
1511 ins
->sreg1
= in
->dreg
;
1512 ins
->klass
= in
->klass
;
1513 ins
->inst_p0
= call
;
1514 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1515 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1516 MONO_ADD_INS (cfg
->cbb
, ins
);
1519 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
1520 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
1521 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R4
) || (t
->type
== MONO_TYPE_R8
))) {
1522 if (t
->type
== MONO_TYPE_R8
) {
1523 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
1525 #ifdef MONO_ARCH_SOFT_FLOAT
1526 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
1528 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
1532 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
1535 case RegTypeBaseGen
:
1536 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
1537 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, (G_BYTE_ORDER
== G_BIG_ENDIAN
) ? in
->dreg
+ 1 : in
->dreg
+ 2);
1538 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1539 ins
->dreg
= mono_alloc_ireg (cfg
);
1540 ins
->sreg1
= G_BYTE_ORDER
== G_BIG_ENDIAN
? in
->dreg
+ 2 : in
->dreg
+ 1;
1541 MONO_ADD_INS (cfg
->cbb
, ins
);
1542 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ARMREG_R3
, FALSE
);
1543 } else if (!t
->byref
&& (t
->type
== MONO_TYPE_R8
)) {
1546 #ifdef MONO_ARCH_SOFT_FLOAT
1547 g_assert_not_reached ();
1550 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
1551 creg
= mono_alloc_ireg (cfg
);
1552 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ARMREG_R3
, FALSE
);
1553 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
1554 creg
= mono_alloc_ireg (cfg
);
1555 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 4));
1556 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, creg
);
1557 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
1559 g_assert_not_reached ();
1566 arg
->backend
.reg3
= ainfo
->reg
;
1567 /* FP args are passed in int regs */
1568 call
->used_iregs
|= 1 << ainfo
->reg
;
1569 if (ainfo
->size
== 8) {
1570 arg
->opcode
= OP_OUTARG_R8
;
1571 call
->used_iregs
|= 1 << (ainfo
->reg
+ 1);
1573 arg
->opcode
= OP_OUTARG_R4
;
1576 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
1580 g_assert_not_reached ();
1584 /* Handle the case where there are no implicit arguments */
1585 if (!sig
->pinvoke
&& (sig
->call_convention
== MONO_CALL_VARARG
) && (n
== sig
->sentinelpos
))
1586 emit_sig_cookie (cfg
, call
, cinfo
);
1588 if (sig
->ret
&& MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1591 if (cinfo
->ret
.storage
== RegTypeStructByVal
) {
1592 /* The JIT will transform this into a normal call */
1593 call
->vret_in_reg
= TRUE
;
1595 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
1596 vtarg
->sreg1
= call
->vret_var
->dreg
;
1597 vtarg
->dreg
= mono_alloc_preg (cfg
);
1598 MONO_ADD_INS (cfg
->cbb
, vtarg
);
1600 mono_call_inst_add_outarg_reg (cfg
, call
, vtarg
->dreg
, cinfo
->ret
.reg
, FALSE
);
1604 call
->stack_usage
= cinfo
->stack_usage
;
1610 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
1612 MonoCallInst
*call
= (MonoCallInst
*)ins
->inst_p0
;
1613 ArgInfo
*ainfo
= ins
->inst_p1
;
1614 int ovf_size
= ainfo
->vtsize
;
1615 int doffset
= ainfo
->offset
;
1616 int i
, soffset
, dreg
;
1619 for (i
= 0; i
< ainfo
->size
; ++i
) {
1620 dreg
= mono_alloc_ireg (cfg
);
1621 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, src
->dreg
, soffset
);
1622 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
+ i
, FALSE
);
1623 soffset
+= sizeof (gpointer
);
1625 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
1627 mini_emit_memcpy (cfg
, ARMREG_SP
, doffset
, src
->dreg
, soffset
, ovf_size
* sizeof (gpointer
), 0);
1631 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
1633 MonoType
*ret
= mini_type_get_underlying_type (cfg
->generic_sharing_context
, mono_method_signature (method
)->ret
);
1636 if (ret
->type
== MONO_TYPE_I8
|| ret
->type
== MONO_TYPE_U8
) {
1639 if (COMPILE_LLVM (cfg
)) {
1640 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
1642 MONO_INST_NEW (cfg
, ins
, OP_SETLRET
);
1643 ins
->sreg1
= val
->dreg
+ 1;
1644 ins
->sreg2
= val
->dreg
+ 2;
1645 MONO_ADD_INS (cfg
->cbb
, ins
);
1649 #ifdef MONO_ARCH_SOFT_FLOAT
1650 if (ret
->type
== MONO_TYPE_R8
) {
1653 MONO_INST_NEW (cfg
, ins
, OP_SETFRET
);
1654 ins
->dreg
= cfg
->ret
->dreg
;
1655 ins
->sreg1
= val
->dreg
;
1656 MONO_ADD_INS (cfg
->cbb
, ins
);
1659 if (ret
->type
== MONO_TYPE_R4
) {
1660 /* Already converted to an int in method_to_ir () */
1661 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
1664 #elif defined(ARM_FPU_VFP)
1665 if (ret
->type
== MONO_TYPE_R8
|| ret
->type
== MONO_TYPE_R4
) {
1668 MONO_INST_NEW (cfg
, ins
, OP_SETFRET
);
1669 ins
->dreg
= cfg
->ret
->dreg
;
1670 ins
->sreg1
= val
->dreg
;
1671 MONO_ADD_INS (cfg
->cbb
, ins
);
1675 if (ret
->type
== MONO_TYPE_R4
|| ret
->type
== MONO_TYPE_R8
) {
1676 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
1683 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
1686 #endif /* #ifndef DISABLE_JIT */
1689 mono_arch_is_inst_imm (gint64 imm
)
1694 #define DYN_CALL_STACK_ARGS 6
1697 MonoMethodSignature
*sig
;
1702 mgreg_t regs
[PARAM_REGS
+ DYN_CALL_STACK_ARGS
];
1708 dyn_call_supported (CallInfo
*cinfo
, MonoMethodSignature
*sig
)
1712 if (sig
->hasthis
+ sig
->param_count
> PARAM_REGS
+ DYN_CALL_STACK_ARGS
)
1715 switch (cinfo
->ret
.storage
) {
1717 case RegTypeGeneral
:
1718 case RegTypeIRegPair
:
1719 case RegTypeStructByAddr
:
1724 #elif defined(ARM_FPU_VFP)
1733 for (i
= 0; i
< cinfo
->nargs
; ++i
) {
1734 switch (cinfo
->args
[i
].storage
) {
1735 case RegTypeGeneral
:
1737 case RegTypeIRegPair
:
1740 if (cinfo
->args
[i
].offset
>= (DYN_CALL_STACK_ARGS
* sizeof (gpointer
)))
1743 case RegTypeStructByVal
:
1744 if (cinfo
->args
[i
].reg
+ cinfo
->args
[i
].vtsize
>= PARAM_REGS
+ DYN_CALL_STACK_ARGS
)
1752 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
1753 for (i
= 0; i
< sig
->param_count
; ++i
) {
1754 MonoType
*t
= sig
->params
[i
];
1762 #ifdef MONO_ARCH_SOFT_FLOAT
1781 mono_arch_dyn_call_prepare (MonoMethodSignature
*sig
)
1783 ArchDynCallInfo
*info
;
1786 cinfo
= get_call_info (NULL
, sig
, FALSE
);
1788 if (!dyn_call_supported (cinfo
, sig
)) {
1793 info
= g_new0 (ArchDynCallInfo
, 1);
1794 // FIXME: Preprocess the info to speed up start_dyn_call ()
1796 info
->cinfo
= cinfo
;
1798 return (MonoDynCallInfo
*)info
;
1802 mono_arch_dyn_call_free (MonoDynCallInfo
*info
)
1804 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
1806 g_free (ainfo
->cinfo
);
1811 mono_arch_start_dyn_call (MonoDynCallInfo
*info
, gpointer
**args
, guint8
*ret
, guint8
*buf
, int buf_len
)
1813 ArchDynCallInfo
*dinfo
= (ArchDynCallInfo
*)info
;
1814 DynCallArgs
*p
= (DynCallArgs
*)buf
;
1815 int arg_index
, greg
, i
, j
;
1816 MonoMethodSignature
*sig
= dinfo
->sig
;
1818 g_assert (buf_len
>= sizeof (DynCallArgs
));
1826 if (dinfo
->cinfo
->vtype_retaddr
)
1827 p
->regs
[greg
++] = (mgreg_t
)ret
;
1830 p
->regs
[greg
++] = (mgreg_t
)*(args
[arg_index
++]);
1832 for (i
= 0; i
< sig
->param_count
; i
++) {
1833 MonoType
*t
= mono_type_get_underlying_type (sig
->params
[i
]);
1834 gpointer
*arg
= args
[arg_index
++];
1835 ArgInfo
*ainfo
= &dinfo
->cinfo
->args
[i
+ sig
->hasthis
];
1838 if (ainfo
->storage
== RegTypeGeneral
|| ainfo
->storage
== RegTypeIRegPair
|| ainfo
->storage
== RegTypeStructByVal
)
1840 else if (ainfo
->storage
== RegTypeBase
)
1841 slot
= PARAM_REGS
+ (ainfo
->offset
/ 4);
1843 g_assert_not_reached ();
1846 p
->regs
[slot
] = (mgreg_t
)*arg
;
1851 case MONO_TYPE_STRING
:
1852 case MONO_TYPE_CLASS
:
1853 case MONO_TYPE_ARRAY
:
1854 case MONO_TYPE_SZARRAY
:
1855 case MONO_TYPE_OBJECT
:
1859 p
->regs
[slot
] = (mgreg_t
)*arg
;
1861 case MONO_TYPE_BOOLEAN
:
1863 p
->regs
[slot
] = *(guint8
*)arg
;
1866 p
->regs
[slot
] = *(gint8
*)arg
;
1869 p
->regs
[slot
] = *(gint16
*)arg
;
1872 case MONO_TYPE_CHAR
:
1873 p
->regs
[slot
] = *(guint16
*)arg
;
1876 p
->regs
[slot
] = *(gint32
*)arg
;
1879 p
->regs
[slot
] = *(guint32
*)arg
;
1883 p
->regs
[slot
++] = (mgreg_t
)arg
[0];
1884 p
->regs
[slot
] = (mgreg_t
)arg
[1];
1887 p
->regs
[slot
] = *(mgreg_t
*)arg
;
1890 p
->regs
[slot
++] = (mgreg_t
)arg
[0];
1891 p
->regs
[slot
] = (mgreg_t
)arg
[1];
1893 case MONO_TYPE_GENERICINST
:
1894 if (MONO_TYPE_IS_REFERENCE (t
)) {
1895 p
->regs
[slot
] = (mgreg_t
)*arg
;
1900 case MONO_TYPE_VALUETYPE
:
1901 g_assert (ainfo
->storage
== RegTypeStructByVal
);
1903 if (ainfo
->size
== 0)
1904 slot
= PARAM_REGS
+ (ainfo
->offset
/ 4);
1908 for (j
= 0; j
< ainfo
->size
+ ainfo
->vtsize
; ++j
)
1909 p
->regs
[slot
++] = ((mgreg_t
*)arg
) [j
];
1912 g_assert_not_reached ();
1918 mono_arch_finish_dyn_call (MonoDynCallInfo
*info
, guint8
*buf
)
1920 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
1921 MonoMethodSignature
*sig
= ((ArchDynCallInfo
*)info
)->sig
;
1922 guint8
*ret
= ((DynCallArgs
*)buf
)->ret
;
1923 mgreg_t res
= ((DynCallArgs
*)buf
)->res
;
1924 mgreg_t res2
= ((DynCallArgs
*)buf
)->res2
;
1926 switch (mono_type_get_underlying_type (sig
->ret
)->type
) {
1927 case MONO_TYPE_VOID
:
1928 *(gpointer
*)ret
= NULL
;
1930 case MONO_TYPE_STRING
:
1931 case MONO_TYPE_CLASS
:
1932 case MONO_TYPE_ARRAY
:
1933 case MONO_TYPE_SZARRAY
:
1934 case MONO_TYPE_OBJECT
:
1938 *(gpointer
*)ret
= (gpointer
)res
;
1944 case MONO_TYPE_BOOLEAN
:
1945 *(guint8
*)ret
= res
;
1948 *(gint16
*)ret
= res
;
1951 case MONO_TYPE_CHAR
:
1952 *(guint16
*)ret
= res
;
1955 *(gint32
*)ret
= res
;
1958 *(guint32
*)ret
= res
;
1962 /* This handles endianness as well */
1963 ((gint32
*)ret
) [0] = res
;
1964 ((gint32
*)ret
) [1] = res2
;
1966 case MONO_TYPE_GENERICINST
:
1967 if (MONO_TYPE_IS_REFERENCE (sig
->ret
)) {
1968 *(gpointer
*)ret
= (gpointer
)res
;
1973 case MONO_TYPE_VALUETYPE
:
1974 g_assert (ainfo
->cinfo
->vtype_retaddr
);
1977 #if defined(ARM_FPU_VFP)
1979 *(float*)ret
= *(float*)&res
;
1981 case MONO_TYPE_R8
: {
1987 *(double*)ret
= *(double*)®s
;
1992 g_assert_not_reached ();
1999 * Allow tracing to work with this interface (with an optional argument)
2003 mono_arch_instrument_prolog (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
)
2007 code
= mono_arm_emit_load_imm (code
, ARMREG_R0
, (guint32
)cfg
->method
);
2008 ARM_MOV_REG_IMM8 (code
, ARMREG_R1
, 0); /* NULL ebp for now */
2009 code
= mono_arm_emit_load_imm (code
, ARMREG_R2
, (guint32
)func
);
2010 code
= emit_call_reg (code
, ARMREG_R2
);
2023 mono_arch_instrument_epilog_full (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
, gboolean preserve_argument_registers
)
2026 int save_mode
= SAVE_NONE
;
2028 MonoMethod
*method
= cfg
->method
;
2029 int rtype
= mini_type_get_underlying_type (cfg
->generic_sharing_context
, mono_method_signature (method
)->ret
)->type
;
2030 int save_offset
= cfg
->param_area
;
2034 offset
= code
- cfg
->native_code
;
2035 /* we need about 16 instructions */
2036 if (offset
> (cfg
->code_size
- 16 * 4)) {
2037 cfg
->code_size
*= 2;
2038 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
2039 code
= cfg
->native_code
+ offset
;
2042 case MONO_TYPE_VOID
:
2043 /* special case string .ctor icall */
2044 if (strcmp (".ctor", method
->name
) && method
->klass
== mono_defaults
.string_class
)
2045 save_mode
= SAVE_ONE
;
2047 save_mode
= SAVE_NONE
;
2051 save_mode
= SAVE_TWO
;
2055 save_mode
= SAVE_FP
;
2057 case MONO_TYPE_VALUETYPE
:
2058 save_mode
= SAVE_STRUCT
;
2061 save_mode
= SAVE_ONE
;
2065 switch (save_mode
) {
2067 ARM_STR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
2068 ARM_STR_IMM (code
, ARMREG_R1
, cfg
->frame_reg
, save_offset
+ 4);
2069 if (enable_arguments
) {
2070 ARM_MOV_REG_REG (code
, ARMREG_R2
, ARMREG_R1
);
2071 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_R0
);
2075 ARM_STR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
2076 if (enable_arguments
) {
2077 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_R0
);
2081 /* FIXME: what reg? */
2082 if (enable_arguments
) {
2083 /* FIXME: what reg? */
2087 if (enable_arguments
) {
2088 /* FIXME: get the actual address */
2089 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_R0
);
2097 code
= mono_arm_emit_load_imm (code
, ARMREG_R0
, (guint32
)cfg
->method
);
2098 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, (guint32
)func
);
2099 code
= emit_call_reg (code
, ARMREG_IP
);
2101 switch (save_mode
) {
2103 ARM_LDR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
2104 ARM_LDR_IMM (code
, ARMREG_R1
, cfg
->frame_reg
, save_offset
+ 4);
2107 ARM_LDR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
2121 * The immediate field for cond branches is big enough for all reasonable methods
2123 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
2124 if (0 && ins->inst_true_bb->native_offset) { \
2125 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
2127 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
2128 ARM_B_COND (code, (condcode), 0); \
2131 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
2133 /* emit an exception if condition is fail
2135 * We assign the extra code used to throw the implicit exceptions
2136 * to cfg->bb_exit as far as the big branch handling is concerned
2138 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
2140 mono_add_patch_info (cfg, code - cfg->native_code, \
2141 MONO_PATCH_INFO_EXC, exc_name); \
2142 ARM_BL_COND (code, (condcode), 0); \
2145 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
2148 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2153 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2155 MonoInst
*ins
, *n
, *last_ins
= NULL
;
2157 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
2158 switch (ins
->opcode
) {
2161 /* Already done by an arch-independent pass */
2163 case OP_LOAD_MEMBASE
:
2164 case OP_LOADI4_MEMBASE
:
2166 * OP_STORE_MEMBASE_REG reg, offset(basereg)
2167 * OP_LOAD_MEMBASE offset(basereg), reg
2169 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_REG
2170 || last_ins
->opcode
== OP_STORE_MEMBASE_REG
) &&
2171 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2172 ins
->inst_offset
== last_ins
->inst_offset
) {
2173 if (ins
->dreg
== last_ins
->sreg1
) {
2174 MONO_DELETE_INS (bb
, ins
);
2177 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2178 ins
->opcode
= OP_MOVE
;
2179 ins
->sreg1
= last_ins
->sreg1
;
2183 * Note: reg1 must be different from the basereg in the second load
2184 * OP_LOAD_MEMBASE offset(basereg), reg1
2185 * OP_LOAD_MEMBASE offset(basereg), reg2
2187 * OP_LOAD_MEMBASE offset(basereg), reg1
2188 * OP_MOVE reg1, reg2
2190 } if (last_ins
&& (last_ins
->opcode
== OP_LOADI4_MEMBASE
2191 || last_ins
->opcode
== OP_LOAD_MEMBASE
) &&
2192 ins
->inst_basereg
!= last_ins
->dreg
&&
2193 ins
->inst_basereg
== last_ins
->inst_basereg
&&
2194 ins
->inst_offset
== last_ins
->inst_offset
) {
2196 if (ins
->dreg
== last_ins
->dreg
) {
2197 MONO_DELETE_INS (bb
, ins
);
2200 ins
->opcode
= OP_MOVE
;
2201 ins
->sreg1
= last_ins
->dreg
;
2204 //g_assert_not_reached ();
2208 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2209 * OP_LOAD_MEMBASE offset(basereg), reg
2211 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2212 * OP_ICONST reg, imm
2214 } else if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_IMM
2215 || last_ins
->opcode
== OP_STORE_MEMBASE_IMM
) &&
2216 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2217 ins
->inst_offset
== last_ins
->inst_offset
) {
2218 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2219 ins
->opcode
= OP_ICONST
;
2220 ins
->inst_c0
= last_ins
->inst_imm
;
2221 g_assert_not_reached (); // check this rule
2225 case OP_LOADU1_MEMBASE
:
2226 case OP_LOADI1_MEMBASE
:
2227 if (last_ins
&& (last_ins
->opcode
== OP_STOREI1_MEMBASE_REG
) &&
2228 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2229 ins
->inst_offset
== last_ins
->inst_offset
) {
2230 ins
->opcode
= (ins
->opcode
== OP_LOADI1_MEMBASE
) ? OP_ICONV_TO_I1
: OP_ICONV_TO_U1
;
2231 ins
->sreg1
= last_ins
->sreg1
;
2234 case OP_LOADU2_MEMBASE
:
2235 case OP_LOADI2_MEMBASE
:
2236 if (last_ins
&& (last_ins
->opcode
== OP_STOREI2_MEMBASE_REG
) &&
2237 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2238 ins
->inst_offset
== last_ins
->inst_offset
) {
2239 ins
->opcode
= (ins
->opcode
== OP_LOADI2_MEMBASE
) ? OP_ICONV_TO_I2
: OP_ICONV_TO_U2
;
2240 ins
->sreg1
= last_ins
->sreg1
;
2244 ins
->opcode
= OP_MOVE
;
2248 if (ins
->dreg
== ins
->sreg1
) {
2249 MONO_DELETE_INS (bb
, ins
);
2253 * OP_MOVE sreg, dreg
2254 * OP_MOVE dreg, sreg
2256 if (last_ins
&& last_ins
->opcode
== OP_MOVE
&&
2257 ins
->sreg1
== last_ins
->dreg
&&
2258 ins
->dreg
== last_ins
->sreg1
) {
2259 MONO_DELETE_INS (bb
, ins
);
2267 bb
->last_ins
= last_ins
;
2271 * the branch_cc_table should maintain the order of these
2285 branch_cc_table
[] = {
2299 #define NEW_INS(cfg,dest,op) do { \
2300 MONO_INST_NEW ((cfg), (dest), (op)); \
2301 mono_bblock_insert_before_ins (bb, ins, (dest)); \
2305 map_to_reg_reg_op (int op
)
2314 case OP_COMPARE_IMM
:
2316 case OP_ICOMPARE_IMM
:
2330 case OP_LOAD_MEMBASE
:
2331 return OP_LOAD_MEMINDEX
;
2332 case OP_LOADI4_MEMBASE
:
2333 return OP_LOADI4_MEMINDEX
;
2334 case OP_LOADU4_MEMBASE
:
2335 return OP_LOADU4_MEMINDEX
;
2336 case OP_LOADU1_MEMBASE
:
2337 return OP_LOADU1_MEMINDEX
;
2338 case OP_LOADI2_MEMBASE
:
2339 return OP_LOADI2_MEMINDEX
;
2340 case OP_LOADU2_MEMBASE
:
2341 return OP_LOADU2_MEMINDEX
;
2342 case OP_LOADI1_MEMBASE
:
2343 return OP_LOADI1_MEMINDEX
;
2344 case OP_STOREI1_MEMBASE_REG
:
2345 return OP_STOREI1_MEMINDEX
;
2346 case OP_STOREI2_MEMBASE_REG
:
2347 return OP_STOREI2_MEMINDEX
;
2348 case OP_STOREI4_MEMBASE_REG
:
2349 return OP_STOREI4_MEMINDEX
;
2350 case OP_STORE_MEMBASE_REG
:
2351 return OP_STORE_MEMINDEX
;
2352 case OP_STORER4_MEMBASE_REG
:
2353 return OP_STORER4_MEMINDEX
;
2354 case OP_STORER8_MEMBASE_REG
:
2355 return OP_STORER8_MEMINDEX
;
2356 case OP_STORE_MEMBASE_IMM
:
2357 return OP_STORE_MEMBASE_REG
;
2358 case OP_STOREI1_MEMBASE_IMM
:
2359 return OP_STOREI1_MEMBASE_REG
;
2360 case OP_STOREI2_MEMBASE_IMM
:
2361 return OP_STOREI2_MEMBASE_REG
;
2362 case OP_STOREI4_MEMBASE_IMM
:
2363 return OP_STOREI4_MEMBASE_REG
;
2365 g_assert_not_reached ();
2369 * Remove from the instruction list the instructions that can't be
2370 * represented with very simple instructions with no register
2374 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2376 MonoInst
*ins
, *temp
, *last_ins
= NULL
;
2377 int rot_amount
, imm8
, low_imm
;
2379 MONO_BB_FOR_EACH_INS (bb
, ins
) {
2381 switch (ins
->opcode
) {
2385 case OP_COMPARE_IMM
:
2386 case OP_ICOMPARE_IMM
:
2400 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
)) < 0) {
2401 NEW_INS (cfg
, temp
, OP_ICONST
);
2402 temp
->inst_c0
= ins
->inst_imm
;
2403 temp
->dreg
= mono_alloc_ireg (cfg
);
2404 ins
->sreg2
= temp
->dreg
;
2405 ins
->opcode
= mono_op_imm_to_op (ins
->opcode
);
2407 if (ins
->opcode
== OP_SBB
|| ins
->opcode
== OP_ISBB
|| ins
->opcode
== OP_SUBCC
)
2413 if (ins
->inst_imm
== 1) {
2414 ins
->opcode
= OP_MOVE
;
2417 if (ins
->inst_imm
== 0) {
2418 ins
->opcode
= OP_ICONST
;
2422 imm8
= mono_is_power_of_two (ins
->inst_imm
);
2424 ins
->opcode
= OP_SHL_IMM
;
2425 ins
->inst_imm
= imm8
;
2428 NEW_INS (cfg
, temp
, OP_ICONST
);
2429 temp
->inst_c0
= ins
->inst_imm
;
2430 temp
->dreg
= mono_alloc_ireg (cfg
);
2431 ins
->sreg2
= temp
->dreg
;
2432 ins
->opcode
= OP_IMUL
;
2438 if (ins
->next
&& (ins
->next
->opcode
== OP_COND_EXC_C
|| ins
->next
->opcode
== OP_COND_EXC_IC
))
2439 /* ARM sets the C flag to 1 if there was _no_ overflow */
2440 ins
->next
->opcode
= OP_COND_EXC_NC
;
2442 case OP_LOCALLOC_IMM
:
2443 NEW_INS (cfg
, temp
, OP_ICONST
);
2444 temp
->inst_c0
= ins
->inst_imm
;
2445 temp
->dreg
= mono_alloc_ireg (cfg
);
2446 ins
->sreg1
= temp
->dreg
;
2447 ins
->opcode
= OP_LOCALLOC
;
2449 case OP_LOAD_MEMBASE
:
2450 case OP_LOADI4_MEMBASE
:
2451 case OP_LOADU4_MEMBASE
:
2452 case OP_LOADU1_MEMBASE
:
2453 /* we can do two things: load the immed in a register
2454 * and use an indexed load, or see if the immed can be
2455 * represented as an ad_imm + a load with a smaller offset
2456 * that fits. We just do the first for now, optimize later.
2458 if (arm_is_imm12 (ins
->inst_offset
))
2460 NEW_INS (cfg
, temp
, OP_ICONST
);
2461 temp
->inst_c0
= ins
->inst_offset
;
2462 temp
->dreg
= mono_alloc_ireg (cfg
);
2463 ins
->sreg2
= temp
->dreg
;
2464 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2466 case OP_LOADI2_MEMBASE
:
2467 case OP_LOADU2_MEMBASE
:
2468 case OP_LOADI1_MEMBASE
:
2469 if (arm_is_imm8 (ins
->inst_offset
))
2471 NEW_INS (cfg
, temp
, OP_ICONST
);
2472 temp
->inst_c0
= ins
->inst_offset
;
2473 temp
->dreg
= mono_alloc_ireg (cfg
);
2474 ins
->sreg2
= temp
->dreg
;
2475 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2477 case OP_LOADR4_MEMBASE
:
2478 case OP_LOADR8_MEMBASE
:
2479 if (arm_is_fpimm8 (ins
->inst_offset
))
2481 low_imm
= ins
->inst_offset
& 0x1ff;
2482 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_offset
& ~0x1ff, &rot_amount
)) >= 0) {
2483 NEW_INS (cfg
, temp
, OP_ADD_IMM
);
2484 temp
->inst_imm
= ins
->inst_offset
& ~0x1ff;
2485 temp
->sreg1
= ins
->inst_basereg
;
2486 temp
->dreg
= mono_alloc_ireg (cfg
);
2487 ins
->inst_basereg
= temp
->dreg
;
2488 ins
->inst_offset
= low_imm
;
2491 /* VFP/FPA doesn't have indexed load instructions */
2492 g_assert_not_reached ();
2494 case OP_STORE_MEMBASE_REG
:
2495 case OP_STOREI4_MEMBASE_REG
:
2496 case OP_STOREI1_MEMBASE_REG
:
2497 if (arm_is_imm12 (ins
->inst_offset
))
2499 NEW_INS (cfg
, temp
, OP_ICONST
);
2500 temp
->inst_c0
= ins
->inst_offset
;
2501 temp
->dreg
= mono_alloc_ireg (cfg
);
2502 ins
->sreg2
= temp
->dreg
;
2503 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2505 case OP_STOREI2_MEMBASE_REG
:
2506 if (arm_is_imm8 (ins
->inst_offset
))
2508 NEW_INS (cfg
, temp
, OP_ICONST
);
2509 temp
->inst_c0
= ins
->inst_offset
;
2510 temp
->dreg
= mono_alloc_ireg (cfg
);
2511 ins
->sreg2
= temp
->dreg
;
2512 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2514 case OP_STORER4_MEMBASE_REG
:
2515 case OP_STORER8_MEMBASE_REG
:
2516 if (arm_is_fpimm8 (ins
->inst_offset
))
2518 low_imm
= ins
->inst_offset
& 0x1ff;
2519 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_offset
& ~ 0x1ff, &rot_amount
)) >= 0 && arm_is_fpimm8 (low_imm
)) {
2520 NEW_INS (cfg
, temp
, OP_ADD_IMM
);
2521 temp
->inst_imm
= ins
->inst_offset
& ~0x1ff;
2522 temp
->sreg1
= ins
->inst_destbasereg
;
2523 temp
->dreg
= mono_alloc_ireg (cfg
);
2524 ins
->inst_destbasereg
= temp
->dreg
;
2525 ins
->inst_offset
= low_imm
;
2528 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
2529 /* VFP/FPA doesn't have indexed store instructions */
2530 g_assert_not_reached ();
2532 case OP_STORE_MEMBASE_IMM
:
2533 case OP_STOREI1_MEMBASE_IMM
:
2534 case OP_STOREI2_MEMBASE_IMM
:
2535 case OP_STOREI4_MEMBASE_IMM
:
2536 NEW_INS (cfg
, temp
, OP_ICONST
);
2537 temp
->inst_c0
= ins
->inst_imm
;
2538 temp
->dreg
= mono_alloc_ireg (cfg
);
2539 ins
->sreg1
= temp
->dreg
;
2540 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2542 goto loop_start
; /* make it handle the possibly big ins->inst_offset */
2544 gboolean swap
= FALSE
;
2548 /* Optimized away */
2553 /* Some fp compares require swapped operands */
2554 switch (ins
->next
->opcode
) {
2556 ins
->next
->opcode
= OP_FBLT
;
2560 ins
->next
->opcode
= OP_FBLT_UN
;
2564 ins
->next
->opcode
= OP_FBGE
;
2568 ins
->next
->opcode
= OP_FBGE_UN
;
2576 ins
->sreg1
= ins
->sreg2
;
2585 bb
->last_ins
= last_ins
;
2586 bb
->max_vreg
= cfg
->next_vreg
;
2590 mono_arch_decompose_long_opts (MonoCompile
*cfg
, MonoInst
*long_ins
)
2594 if (long_ins
->opcode
== OP_LNEG
) {
2596 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ARM_RSBS_IMM
, ins
->dreg
+ 1, ins
->sreg1
+ 1, 0);
2597 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ARM_RSC_IMM
, ins
->dreg
+ 2, ins
->sreg1
+ 2, 0);
2603 emit_float_to_int (MonoCompile
*cfg
, guchar
*code
, int dreg
, int sreg
, int size
, gboolean is_signed
)
2605 /* sreg is a float, dreg is an integer reg */
2607 ARM_FIXZ (code
, dreg
, sreg
);
2608 #elif defined(ARM_FPU_VFP)
2610 ARM_TOSIZD (code
, ARM_VFP_F0
, sreg
);
2612 ARM_TOUIZD (code
, ARM_VFP_F0
, sreg
);
2613 ARM_FMRS (code
, dreg
, ARM_VFP_F0
);
2617 ARM_AND_REG_IMM8 (code
, dreg
, dreg
, 0xff);
2618 else if (size
== 2) {
2619 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
2620 ARM_SHR_IMM (code
, dreg
, dreg
, 16);
2624 ARM_SHL_IMM (code
, dreg
, dreg
, 24);
2625 ARM_SAR_IMM (code
, dreg
, dreg
, 24);
2626 } else if (size
== 2) {
2627 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
2628 ARM_SAR_IMM (code
, dreg
, dreg
, 16);
2634 #endif /* #ifndef DISABLE_JIT */
2638 const guchar
*target
;
2643 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
2646 search_thunk_slot (void *data
, int csize
, int bsize
, void *user_data
) {
2647 PatchData
*pdata
= (PatchData
*)user_data
;
2648 guchar
*code
= data
;
2649 guint32
*thunks
= data
;
2650 guint32
*endthunks
= (guint32
*)(code
+ bsize
);
2652 int difflow
, diffhigh
;
2654 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2655 difflow
= (char*)pdata
->code
- (char*)thunks
;
2656 diffhigh
= (char*)pdata
->code
- (char*)endthunks
;
2657 if (!((is_call_imm (thunks
) && is_call_imm (endthunks
)) || (is_call_imm (difflow
) && is_call_imm (diffhigh
))))
2661 * The thunk is composed of 3 words:
2662 * load constant from thunks [2] into ARM_IP
2665 * Note that the LR register is already setup
2667 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2668 if ((pdata
->found
== 2) || (pdata
->code
>= code
&& pdata
->code
<= code
+ csize
)) {
2669 while (thunks
< endthunks
) {
2670 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2671 if (thunks
[2] == (guint32
)pdata
->target
) {
2672 arm_patch (pdata
->code
, (guchar
*)thunks
);
2673 mono_arch_flush_icache (pdata
->code
, 4);
2676 } else if ((thunks
[0] == 0) && (thunks
[1] == 0) && (thunks
[2] == 0)) {
2677 /* found a free slot instead: emit thunk */
2678 /* ARMREG_IP is fine to use since this can't be an IMT call
2681 code
= (guchar
*)thunks
;
2682 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
2683 if (thumb_supported
)
2684 ARM_BX (code
, ARMREG_IP
);
2686 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
2687 thunks
[2] = (guint32
)pdata
->target
;
2688 mono_arch_flush_icache ((guchar
*)thunks
, 12);
2690 arm_patch (pdata
->code
, (guchar
*)thunks
);
2691 mono_arch_flush_icache (pdata
->code
, 4);
2695 /* skip 12 bytes, the size of the thunk */
2699 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2705 handle_thunk (MonoDomain
*domain
, int absolute
, guchar
*code
, const guchar
*target
)
2710 domain
= mono_domain_get ();
2713 pdata
.target
= target
;
2714 pdata
.absolute
= absolute
;
2717 mono_domain_lock (domain
);
2718 mono_domain_code_foreach (domain
, search_thunk_slot
, &pdata
);
2721 /* this uses the first available slot */
2723 mono_domain_code_foreach (domain
, search_thunk_slot
, &pdata
);
2725 mono_domain_unlock (domain
);
2727 if (pdata
.found
!= 1)
2728 g_print ("thunk failed for %p from %p\n", target
, code
);
2729 g_assert (pdata
.found
== 1);
2733 arm_patch_general (MonoDomain
*domain
, guchar
*code
, const guchar
*target
)
2735 guint32
*code32
= (void*)code
;
2736 guint32 ins
= *code32
;
2737 guint32 prim
= (ins
>> 25) & 7;
2738 guint32 tval
= GPOINTER_TO_UINT (target
);
2740 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2741 if (prim
== 5) { /* 101b */
2742 /* the diff starts 8 bytes from the branch opcode */
2743 gint diff
= target
- code
- 8;
2745 gint tmask
= 0xffffffff;
2746 if (tval
& 1) { /* entering thumb mode */
2747 diff
= target
- 1 - code
- 8;
2748 g_assert (thumb_supported
);
2749 tbits
= 0xf << 28; /* bl->blx bit pattern */
2750 g_assert ((ins
& (1 << 24))); /* it must be a bl, not b instruction */
2751 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
2755 tmask
= ~(1 << 24); /* clear the link bit */
2756 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
2761 if (diff
<= 33554431) {
2763 ins
= (ins
& 0xff000000) | diff
;
2765 *code32
= ins
| tbits
;
2769 /* diff between 0 and -33554432 */
2770 if (diff
>= -33554432) {
2772 ins
= (ins
& 0xff000000) | (diff
& ~0xff000000);
2774 *code32
= ins
| tbits
;
2779 handle_thunk (domain
, TRUE
, code
, target
);
2784 * The alternative call sequences looks like this:
2786 * ldr ip, [pc] // loads the address constant
2787 * b 1f // jumps around the constant
2788 * address constant embedded in the code
2793 * There are two cases for patching:
2794 * a) at the end of method emission: in this case code points to the start
2795 * of the call sequence
2796 * b) during runtime patching of the call site: in this case code points
2797 * to the mov pc, ip instruction
2799 * We have to handle also the thunk jump code sequence:
2803 * address constant // execution never reaches here
2805 if ((ins
& 0x0ffffff0) == 0x12fff10) {
2806 /* Branch and exchange: the address is constructed in a reg
2807 * We can patch BX when the code sequence is the following:
2808 * ldr ip, [pc, #0] ; 0x8
2815 guint8
*emit
= (guint8
*)ccode
;
2816 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
2818 ARM_MOV_REG_REG (emit
, ARMREG_LR
, ARMREG_PC
);
2819 ARM_BX (emit
, ARMREG_IP
);
2821 /*patching from magic trampoline*/
2822 if (ins
== ccode
[3]) {
2823 g_assert (code32
[-4] == ccode
[0]);
2824 g_assert (code32
[-3] == ccode
[1]);
2825 g_assert (code32
[-1] == ccode
[2]);
2826 code32
[-2] = (guint32
)target
;
2829 /*patching from JIT*/
2830 if (ins
== ccode
[0]) {
2831 g_assert (code32
[1] == ccode
[1]);
2832 g_assert (code32
[3] == ccode
[2]);
2833 g_assert (code32
[4] == ccode
[3]);
2834 code32
[2] = (guint32
)target
;
2837 g_assert_not_reached ();
2838 } else if ((ins
& 0x0ffffff0) == 0x12fff30) {
2846 guint8
*emit
= (guint8
*)ccode
;
2847 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
2849 ARM_BLX_REG (emit
, ARMREG_IP
);
2851 g_assert (code32
[-3] == ccode
[0]);
2852 g_assert (code32
[-2] == ccode
[1]);
2853 g_assert (code32
[0] == ccode
[2]);
2855 code32
[-1] = (guint32
)target
;
2858 guint32
*tmp
= ccode
;
2859 guint8
*emit
= (guint8
*)tmp
;
2860 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
2861 ARM_MOV_REG_REG (emit
, ARMREG_LR
, ARMREG_PC
);
2862 ARM_MOV_REG_REG (emit
, ARMREG_PC
, ARMREG_IP
);
2863 ARM_BX (emit
, ARMREG_IP
);
2864 if (ins
== ccode
[2]) {
2865 g_assert_not_reached (); // should be -2 ...
2866 code32
[-1] = (guint32
)target
;
2869 if (ins
== ccode
[0]) {
2870 /* handles both thunk jump code and the far call sequence */
2871 code32
[2] = (guint32
)target
;
2874 g_assert_not_reached ();
2876 // g_print ("patched with 0x%08x\n", ins);
2880 arm_patch (guchar
*code
, const guchar
*target
)
2882 arm_patch_general (NULL
, code
, target
);
2886 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
2887 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
2888 * to be used with the emit macros.
2889 * Return -1 otherwise.
2892 mono_arm_is_rotated_imm8 (guint32 val
, gint
*rot_amount
)
2895 for (i
= 0; i
< 31; i
+= 2) {
2896 res
= (val
<< (32 - i
)) | (val
>> i
);
2899 *rot_amount
= i
? 32 - i
: 0;
2906 * Emits in code a sequence of instructions that load the value 'val'
2907 * into the dreg register. Uses at most 4 instructions.
2910 mono_arm_emit_load_imm (guint8
*code
, int dreg
, guint32 val
)
2912 int imm8
, rot_amount
;
2914 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
2915 /* skip the constant pool */
2921 if ((imm8
= mono_arm_is_rotated_imm8 (val
, &rot_amount
)) >= 0) {
2922 ARM_MOV_REG_IMM (code
, dreg
, imm8
, rot_amount
);
2923 } else if ((imm8
= mono_arm_is_rotated_imm8 (~val
, &rot_amount
)) >= 0) {
2924 ARM_MVN_REG_IMM (code
, dreg
, imm8
, rot_amount
);
2927 ARM_MOVW_REG_IMM (code
, dreg
, val
& 0xffff);
2929 ARM_MOVT_REG_IMM (code
, dreg
, (val
>> 16) & 0xffff);
2933 ARM_MOV_REG_IMM8 (code
, dreg
, (val
& 0xFF));
2935 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF00) >> 8, 24);
2937 if (val
& 0xFF0000) {
2938 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
2940 if (val
& 0xFF000000) {
2941 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
2943 } else if (val
& 0xFF00) {
2944 ARM_MOV_REG_IMM (code
, dreg
, (val
& 0xFF00) >> 8, 24);
2945 if (val
& 0xFF0000) {
2946 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
2948 if (val
& 0xFF000000) {
2949 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
2951 } else if (val
& 0xFF0000) {
2952 ARM_MOV_REG_IMM (code
, dreg
, (val
& 0xFF0000) >> 16, 16);
2953 if (val
& 0xFF000000) {
2954 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
2957 //g_assert_not_reached ();
2963 mono_arm_thumb_supported (void)
2965 return thumb_supported
;
2971 * emit_load_volatile_arguments:
2973 * Load volatile arguments from the stack to the original input registers.
2974 * Required before a tail call.
2977 emit_load_volatile_arguments (MonoCompile
*cfg
, guint8
*code
)
2979 MonoMethod
*method
= cfg
->method
;
2980 MonoMethodSignature
*sig
;
2985 /* FIXME: Generate intermediate code instead */
2987 sig
= mono_method_signature (method
);
2989 /* This is the opposite of the code in emit_prolog */
2993 cinfo
= get_call_info (NULL
, sig
, sig
->pinvoke
);
2995 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
2996 ArgInfo
*ainfo
= &cinfo
->ret
;
2997 inst
= cfg
->vret_addr
;
2998 g_assert (arm_is_imm12 (inst
->inst_offset
));
2999 ARM_LDR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
3001 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
3002 ArgInfo
*ainfo
= cinfo
->args
+ i
;
3003 inst
= cfg
->args
[pos
];
3005 if (cfg
->verbose_level
> 2)
3006 g_print ("Loading argument %d (type: %d)\n", i
, ainfo
->storage
);
3007 if (inst
->opcode
== OP_REGVAR
) {
3008 if (ainfo
->storage
== RegTypeGeneral
)
3009 ARM_MOV_REG_REG (code
, inst
->dreg
, ainfo
->reg
);
3010 else if (ainfo
->storage
== RegTypeFP
) {
3011 g_assert_not_reached ();
3012 } else if (ainfo
->storage
== RegTypeBase
) {
3016 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3017 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3019 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3020 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
3024 g_assert_not_reached ();
3026 if (ainfo
->storage
== RegTypeGeneral
|| ainfo
->storage
== RegTypeIRegPair
) {
3027 switch (ainfo
->size
) {
3034 g_assert (arm_is_imm12 (inst
->inst_offset
));
3035 ARM_LDR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
3036 g_assert (arm_is_imm12 (inst
->inst_offset
+ 4));
3037 ARM_LDR_IMM (code
, ainfo
->reg
+ 1, inst
->inst_basereg
, inst
->inst_offset
+ 4);
3040 if (arm_is_imm12 (inst
->inst_offset
)) {
3041 ARM_LDR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
3043 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
3044 ARM_LDR_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
3048 } else if (ainfo
->storage
== RegTypeBaseGen
) {
3051 } else if (ainfo
->storage
== RegTypeBase
) {
3053 } else if (ainfo
->storage
== RegTypeFP
) {
3054 g_assert_not_reached ();
3055 } else if (ainfo
->storage
== RegTypeStructByVal
) {
3056 int doffset
= inst
->inst_offset
;
3060 if (mono_class_from_mono_type (inst
->inst_vtype
))
3061 size
= mono_class_native_size (mono_class_from_mono_type (inst
->inst_vtype
), NULL
);
3062 for (cur_reg
= 0; cur_reg
< ainfo
->size
; ++cur_reg
) {
3063 if (arm_is_imm12 (doffset
)) {
3064 ARM_LDR_IMM (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, doffset
);
3066 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, doffset
);
3067 ARM_LDR_REG_REG (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, ARMREG_IP
);
3069 soffset
+= sizeof (gpointer
);
3070 doffset
+= sizeof (gpointer
);
3075 } else if (ainfo
->storage
== RegTypeStructByAddr
) {
3090 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
3095 guint8
*code
= cfg
->native_code
+ cfg
->code_len
;
3096 MonoInst
*last_ins
= NULL
;
3097 guint last_offset
= 0;
3099 int imm8
, rot_amount
;
3101 /* we don't align basic blocks of loops on arm */
3103 if (cfg
->verbose_level
> 2)
3104 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
3106 cpos
= bb
->max_offset
;
3108 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
) {
3109 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
3110 //g_assert (!mono_compile_aot);
3113 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
3114 /* this is not thread save, but good enough */
3115 /* fixme: howto handle overflows? */
3116 //x86_inc_mem (code, &cov->data [bb->dfn].count);
3119 if (mono_break_at_bb_method
&& mono_method_desc_full_match (mono_break_at_bb_method
, cfg
->method
) && bb
->block_num
== mono_break_at_bb_bb_num
) {
3120 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3121 (gpointer
)"mono_break");
3122 code
= emit_call_seq (cfg
, code
);
3125 MONO_BB_FOR_EACH_INS (bb
, ins
) {
3126 offset
= code
- cfg
->native_code
;
3128 max_len
= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
3130 if (offset
> (cfg
->code_size
- max_len
- 16)) {
3131 cfg
->code_size
*= 2;
3132 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
3133 code
= cfg
->native_code
+ offset
;
3135 // if (ins->cil_code)
3136 // g_print ("cil code\n");
3137 mono_debug_record_line_number (cfg
, ins
, offset
);
3139 switch (ins
->opcode
) {
3140 case OP_MEMORY_BARRIER
:
3143 #ifdef HAVE_AEABI_READ_TP
3144 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3145 (gpointer
)"__aeabi_read_tp");
3146 code
= emit_call_seq (cfg
, code
);
3148 ARM_LDR_IMM (code
, ins
->dreg
, ARMREG_R0
, ins
->inst_offset
);
3150 g_assert_not_reached ();
3154 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3155 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
3158 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3159 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
3161 case OP_STOREI1_MEMBASE_IMM
:
3162 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
& 0xFF);
3163 g_assert (arm_is_imm12 (ins
->inst_offset
));
3164 ARM_STRB_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
3166 case OP_STOREI2_MEMBASE_IMM
:
3167 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
& 0xFFFF);
3168 g_assert (arm_is_imm8 (ins
->inst_offset
));
3169 ARM_STRH_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
3171 case OP_STORE_MEMBASE_IMM
:
3172 case OP_STOREI4_MEMBASE_IMM
:
3173 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
);
3174 g_assert (arm_is_imm12 (ins
->inst_offset
));
3175 ARM_STR_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
3177 case OP_STOREI1_MEMBASE_REG
:
3178 g_assert (arm_is_imm12 (ins
->inst_offset
));
3179 ARM_STRB_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3181 case OP_STOREI2_MEMBASE_REG
:
3182 g_assert (arm_is_imm8 (ins
->inst_offset
));
3183 ARM_STRH_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3185 case OP_STORE_MEMBASE_REG
:
3186 case OP_STOREI4_MEMBASE_REG
:
3187 /* this case is special, since it happens for spill code after lowering has been called */
3188 if (arm_is_imm12 (ins
->inst_offset
)) {
3189 ARM_STR_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3191 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
3192 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
3195 case OP_STOREI1_MEMINDEX
:
3196 ARM_STRB_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
3198 case OP_STOREI2_MEMINDEX
:
3199 ARM_STRH_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
3201 case OP_STORE_MEMINDEX
:
3202 case OP_STOREI4_MEMINDEX
:
3203 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
3206 g_assert_not_reached ();
3208 case OP_LOAD_MEMINDEX
:
3209 case OP_LOADI4_MEMINDEX
:
3210 case OP_LOADU4_MEMINDEX
:
3211 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3213 case OP_LOADI1_MEMINDEX
:
3214 ARM_LDRSB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3216 case OP_LOADU1_MEMINDEX
:
3217 ARM_LDRB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3219 case OP_LOADI2_MEMINDEX
:
3220 ARM_LDRSH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3222 case OP_LOADU2_MEMINDEX
:
3223 ARM_LDRH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3225 case OP_LOAD_MEMBASE
:
3226 case OP_LOADI4_MEMBASE
:
3227 case OP_LOADU4_MEMBASE
:
3228 /* this case is special, since it happens for spill code after lowering has been called */
3229 if (arm_is_imm12 (ins
->inst_offset
)) {
3230 ARM_LDR_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3232 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
3233 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
3236 case OP_LOADI1_MEMBASE
:
3237 g_assert (arm_is_imm8 (ins
->inst_offset
));
3238 ARM_LDRSB_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3240 case OP_LOADU1_MEMBASE
:
3241 g_assert (arm_is_imm12 (ins
->inst_offset
));
3242 ARM_LDRB_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3244 case OP_LOADU2_MEMBASE
:
3245 g_assert (arm_is_imm8 (ins
->inst_offset
));
3246 ARM_LDRH_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3248 case OP_LOADI2_MEMBASE
:
3249 g_assert (arm_is_imm8 (ins
->inst_offset
));
3250 ARM_LDRSH_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3252 case OP_ICONV_TO_I1
:
3253 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 24);
3254 ARM_SAR_IMM (code
, ins
->dreg
, ins
->dreg
, 24);
3256 case OP_ICONV_TO_I2
:
3257 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 16);
3258 ARM_SAR_IMM (code
, ins
->dreg
, ins
->dreg
, 16);
3260 case OP_ICONV_TO_U1
:
3261 ARM_AND_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 0xff);
3263 case OP_ICONV_TO_U2
:
3264 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 16);
3265 ARM_SHR_IMM (code
, ins
->dreg
, ins
->dreg
, 16);
3269 ARM_CMP_REG_REG (code
, ins
->sreg1
, ins
->sreg2
);
3271 case OP_COMPARE_IMM
:
3272 case OP_ICOMPARE_IMM
:
3273 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3274 g_assert (imm8
>= 0);
3275 ARM_CMP_REG_IMM (code
, ins
->sreg1
, imm8
, rot_amount
);
3279 * gdb does not like encountering the hw breakpoint ins in the debugged code.
3280 * So instead of emitting a trap, we emit a call a C function and place a
3283 //*(int*)code = 0xef9f0001;
3286 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3287 (gpointer
)"mono_break");
3288 code
= emit_call_seq (cfg
, code
);
3290 case OP_RELAXED_NOP
:
3295 case OP_DUMMY_STORE
:
3296 case OP_NOT_REACHED
:
3299 case OP_SEQ_POINT
: {
3301 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
3302 MonoInst
*ss_trigger_page_var
= cfg
->arch
.ss_trigger_page_var
;
3304 int dreg
= ARMREG_LR
;
3307 * For AOT, we use one got slot per method, which will point to a
3308 * SeqPointInfo structure, containing all the information required
3309 * by the code below.
3311 if (cfg
->compile_aot
) {
3312 g_assert (info_var
);
3313 g_assert (info_var
->opcode
== OP_REGOFFSET
);
3314 g_assert (arm_is_imm12 (info_var
->inst_offset
));
3318 * Read from the single stepping trigger page. This will cause a
3319 * SIGSEGV when single stepping is enabled.
3320 * We do this _before_ the breakpoint, so single stepping after
3321 * a breakpoint is hit will step to the next IL offset.
3323 g_assert (((guint64
)(gsize
)ss_trigger_page
>> 32) == 0);
3325 if (ins
->flags
& MONO_INST_SINGLE_STEP_LOC
) {
3326 if (cfg
->compile_aot
) {
3327 /* Load the trigger page addr from the variable initialized in the prolog */
3328 var
= ss_trigger_page_var
;
3330 g_assert (var
->opcode
== OP_REGOFFSET
);
3331 g_assert (arm_is_imm12 (var
->inst_offset
));
3332 ARM_LDR_IMM (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
3334 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
3336 *(int*)code
= (int)ss_trigger_page
;
3339 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
3342 mono_add_seq_point (cfg
, bb
, ins
, code
- cfg
->native_code
);
3344 if (cfg
->compile_aot
) {
3345 guint32 offset
= code
- cfg
->native_code
;
3348 ARM_LDR_IMM (code
, dreg
, info_var
->inst_basereg
, info_var
->inst_offset
);
3349 /* Add the offset */
3350 val
= ((offset
/ 4) * sizeof (guint8
*)) + G_STRUCT_OFFSET (SeqPointInfo
, bp_addrs
);
3351 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF), 0);
3353 * Have to emit nops to keep the difference between the offset
3354 * stored in seq_points and breakpoint instruction constant,
3355 * mono_arch_get_ip_for_breakpoint () depends on this.
3358 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF00) >> 8, 24);
3362 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
3365 g_assert (!(val
& 0xFF000000));
3366 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
3367 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
3369 /* What is faster, a branch or a load ? */
3370 ARM_CMP_REG_IMM (code
, dreg
, 0, 0);
3371 /* The breakpoint instruction */
3372 ARM_LDR_IMM_COND (code
, dreg
, dreg
, 0, ARMCOND_NE
);
3375 * A placeholder for a possible breakpoint inserted by
3376 * mono_arch_set_breakpoint ().
3378 for (i
= 0; i
< 4; ++i
)
3385 ARM_ADDS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3388 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3392 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3395 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3396 g_assert (imm8
>= 0);
3397 ARM_ADDS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3401 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3402 g_assert (imm8
>= 0);
3403 ARM_ADD_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3407 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3408 g_assert (imm8
>= 0);
3409 ARM_ADCS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3412 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3413 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3415 case OP_IADD_OVF_UN
:
3416 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3417 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3420 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3421 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3423 case OP_ISUB_OVF_UN
:
3424 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3425 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3427 case OP_ADD_OVF_CARRY
:
3428 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3429 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3431 case OP_ADD_OVF_UN_CARRY
:
3432 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3433 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3435 case OP_SUB_OVF_CARRY
:
3436 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3437 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3439 case OP_SUB_OVF_UN_CARRY
:
3440 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3441 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3445 ARM_SUBS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3448 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3449 g_assert (imm8
>= 0);
3450 ARM_SUBS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3453 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3457 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3461 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3462 g_assert (imm8
>= 0);
3463 ARM_SUB_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3467 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3468 g_assert (imm8
>= 0);
3469 ARM_SBCS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3471 case OP_ARM_RSBS_IMM
:
3472 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3473 g_assert (imm8
>= 0);
3474 ARM_RSBS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3476 case OP_ARM_RSC_IMM
:
3477 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3478 g_assert (imm8
>= 0);
3479 ARM_RSC_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3482 ARM_AND_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3486 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3487 g_assert (imm8
>= 0);
3488 ARM_AND_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3496 /* crappy ARM arch doesn't have a DIV instruction */
3497 g_assert_not_reached ();
3499 ARM_ORR_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3503 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3504 g_assert (imm8
>= 0);
3505 ARM_ORR_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3508 ARM_EOR_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3512 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3513 g_assert (imm8
>= 0);
3514 ARM_EOR_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3517 ARM_SHL_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3522 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
3523 else if (ins
->dreg
!= ins
->sreg1
)
3524 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
3527 ARM_SAR_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3532 ARM_SAR_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
3533 else if (ins
->dreg
!= ins
->sreg1
)
3534 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
3537 case OP_ISHR_UN_IMM
:
3539 ARM_SHR_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
3540 else if (ins
->dreg
!= ins
->sreg1
)
3541 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
3544 ARM_SHR_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3547 ARM_MVN_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
3550 ARM_RSB_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 0);
3553 if (ins
->dreg
== ins
->sreg2
)
3554 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3556 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3559 g_assert_not_reached ();
3562 /* FIXME: handle ovf/ sreg2 != dreg */
3563 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3564 /* FIXME: MUL doesn't set the C/O flags on ARM */
3566 case OP_IMUL_OVF_UN
:
3567 /* FIXME: handle ovf/ sreg2 != dreg */
3568 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3569 /* FIXME: MUL doesn't set the C/O flags on ARM */
3572 code
= mono_arm_emit_load_imm (code
, ins
->dreg
, ins
->inst_c0
);
3575 /* Load the GOT offset */
3576 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
3577 ARM_LDR_IMM (code
, ins
->dreg
, ARMREG_PC
, 0);
3579 *(gpointer
*)code
= NULL
;
3581 /* Load the value from the GOT */
3582 ARM_LDR_REG_REG (code
, ins
->dreg
, ARMREG_PC
, ins
->dreg
);
3584 case OP_ICONV_TO_I4
:
3585 case OP_ICONV_TO_U4
:
3587 if (ins
->dreg
!= ins
->sreg1
)
3588 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
3591 int saved
= ins
->sreg2
;
3592 if (ins
->sreg2
== ARM_LSW_REG
) {
3593 ARM_MOV_REG_REG (code
, ARMREG_LR
, ins
->sreg2
);
3596 if (ins
->sreg1
!= ARM_LSW_REG
)
3597 ARM_MOV_REG_REG (code
, ARM_LSW_REG
, ins
->sreg1
);
3598 if (saved
!= ARM_MSW_REG
)
3599 ARM_MOV_REG_REG (code
, ARM_MSW_REG
, saved
);
3604 ARM_MVFD (code
, ins
->dreg
, ins
->sreg1
);
3605 #elif defined(ARM_FPU_VFP)
3606 ARM_CPYD (code
, ins
->dreg
, ins
->sreg1
);
3609 case OP_FCONV_TO_R4
:
3611 ARM_MVFS (code
, ins
->dreg
, ins
->sreg1
);
3612 #elif defined(ARM_FPU_VFP)
3613 ARM_CVTD (code
, ins
->dreg
, ins
->sreg1
);
3614 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
3619 * Keep in sync with mono_arch_emit_epilog
3621 g_assert (!cfg
->method
->save_lmf
);
3623 code
= emit_load_volatile_arguments (cfg
, code
);
3625 code
= emit_big_add (code
, ARMREG_SP
, cfg
->frame_reg
, cfg
->stack_usage
);
3626 ARM_POP_NWB (code
, cfg
->used_int_regs
| ((1 << ARMREG_SP
)) | ((1 << ARMREG_LR
)));
3627 mono_add_patch_info (cfg
, (guint8
*) code
- cfg
->native_code
, MONO_PATCH_INFO_METHOD_JUMP
, ins
->inst_p0
);
3628 if (cfg
->compile_aot
) {
3629 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
3631 *(gpointer
*)code
= NULL
;
3633 ARM_LDR_REG_REG (code
, ARMREG_PC
, ARMREG_PC
, ARMREG_IP
);
3639 /* ensure ins->sreg1 is not NULL */
3640 ARM_LDR_IMM (code
, ARMREG_LR
, ins
->sreg1
, 0);
3643 g_assert (cfg
->sig_cookie
< 128);
3644 ARM_LDR_IMM (code
, ARMREG_IP
, cfg
->frame_reg
, cfg
->sig_cookie
);
3645 ARM_STR_IMM (code
, ARMREG_IP
, ins
->sreg1
, 0);
3654 call
= (MonoCallInst
*)ins
;
3655 if (ins
->flags
& MONO_INST_HAS_METHOD
)
3656 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_METHOD
, call
->method
);
3658 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_ABS
, call
->fptr
);
3659 code
= emit_call_seq (cfg
, code
);
3660 code
= emit_move_return_value (cfg
, ins
, code
);
3666 case OP_VOIDCALL_REG
:
3668 code
= emit_call_reg (code
, ins
->sreg1
);
3669 code
= emit_move_return_value (cfg
, ins
, code
);
3671 case OP_FCALL_MEMBASE
:
3672 case OP_LCALL_MEMBASE
:
3673 case OP_VCALL_MEMBASE
:
3674 case OP_VCALL2_MEMBASE
:
3675 case OP_VOIDCALL_MEMBASE
:
3676 case OP_CALL_MEMBASE
:
3677 g_assert (arm_is_imm12 (ins
->inst_offset
));
3678 g_assert (ins
->sreg1
!= ARMREG_LR
);
3679 call
= (MonoCallInst
*)ins
;
3680 if (call
->dynamic_imt_arg
|| call
->method
->klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3681 ARM_ADD_REG_IMM8 (code
, ARMREG_LR
, ARMREG_PC
, 4);
3682 ARM_LDR_IMM (code
, ARMREG_PC
, ins
->sreg1
, ins
->inst_offset
);
3684 * We can't embed the method in the code stream in PIC code, or
3686 * Instead, we put it in V5 in code emitted by
3687 * mono_arch_emit_imt_argument (), and embed NULL here to
3688 * signal the IMT thunk that the value is in V5.
3690 if (call
->dynamic_imt_arg
)
3691 *((gpointer
*)code
) = NULL
;
3693 *((gpointer
*)code
) = (gpointer
)call
->method
;
3696 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
3697 ARM_LDR_IMM (code
, ARMREG_PC
, ins
->sreg1
, ins
->inst_offset
);
3699 code
= emit_move_return_value (cfg
, ins
, code
);
3702 /* keep alignment */
3703 int alloca_waste
= cfg
->param_area
;
3706 /* round the size to 8 bytes */
3707 ARM_ADD_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 7);
3708 ARM_BIC_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, 7);
3710 ARM_ADD_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, alloca_waste
);
3711 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ins
->dreg
);
3712 /* memzero the area: dreg holds the size, sp is the pointer */
3713 if (ins
->flags
& MONO_INST_INIT
) {
3714 guint8
*start_loop
, *branch_to_cond
;
3715 ARM_MOV_REG_IMM8 (code
, ARMREG_LR
, 0);
3716 branch_to_cond
= code
;
3719 ARM_STR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ins
->dreg
);
3720 arm_patch (branch_to_cond
, code
);
3721 /* decrement by 4 and set flags */
3722 ARM_SUBS_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, 4);
3723 ARM_B_COND (code
, ARMCOND_GE
, 0);
3724 arm_patch (code
- 4, start_loop
);
3726 ARM_ADD_REG_IMM8 (code
, ins
->dreg
, ARMREG_SP
, alloca_waste
);
3731 MonoInst
*var
= cfg
->dyn_call_var
;
3733 g_assert (var
->opcode
== OP_REGOFFSET
);
3734 g_assert (arm_is_imm12 (var
->inst_offset
));
3736 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
3737 ARM_MOV_REG_REG( code
, ARMREG_LR
, ins
->sreg1
);
3739 ARM_MOV_REG_REG( code
, ARMREG_IP
, ins
->sreg2
);
3741 /* Save args buffer */
3742 ARM_STR_IMM (code
, ARMREG_LR
, var
->inst_basereg
, var
->inst_offset
);
3744 /* Set stack slots using R0 as scratch reg */
3745 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
3746 for (i
= 0; i
< DYN_CALL_STACK_ARGS
; ++i
) {
3747 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_LR
, (PARAM_REGS
+ i
) * sizeof (gpointer
));
3748 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_SP
, i
* sizeof (gpointer
));
3751 /* Set argument registers */
3752 for (i
= 0; i
< PARAM_REGS
; ++i
)
3753 ARM_LDR_IMM (code
, i
, ARMREG_LR
, i
* sizeof (gpointer
));
3756 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
3757 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
3760 ARM_LDR_IMM (code
, ARMREG_IP
, var
->inst_basereg
, var
->inst_offset
);
3761 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_IP
, G_STRUCT_OFFSET (DynCallArgs
, res
));
3762 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_IP
, G_STRUCT_OFFSET (DynCallArgs
, res2
));
3766 if (ins
->sreg1
!= ARMREG_R0
)
3767 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
3768 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3769 (gpointer
)"mono_arch_throw_exception");
3770 code
= emit_call_seq (cfg
, code
);
3774 if (ins
->sreg1
!= ARMREG_R0
)
3775 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
3776 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3777 (gpointer
)"mono_arch_rethrow_exception");
3778 code
= emit_call_seq (cfg
, code
);
3781 case OP_START_HANDLER
: {
3782 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3784 if (arm_is_imm12 (spvar
->inst_offset
)) {
3785 ARM_STR_IMM (code
, ARMREG_LR
, spvar
->inst_basereg
, spvar
->inst_offset
);
3787 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
3788 ARM_STR_REG_REG (code
, ARMREG_LR
, spvar
->inst_basereg
, ARMREG_IP
);
3792 case OP_ENDFILTER
: {
3793 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3795 if (ins
->sreg1
!= ARMREG_R0
)
3796 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
3797 if (arm_is_imm12 (spvar
->inst_offset
)) {
3798 ARM_LDR_IMM (code
, ARMREG_IP
, spvar
->inst_basereg
, spvar
->inst_offset
);
3800 g_assert (ARMREG_IP
!= spvar
->inst_basereg
);
3801 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
3802 ARM_LDR_REG_REG (code
, ARMREG_IP
, spvar
->inst_basereg
, ARMREG_IP
);
3804 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
3807 case OP_ENDFINALLY
: {
3808 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3810 if (arm_is_imm12 (spvar
->inst_offset
)) {
3811 ARM_LDR_IMM (code
, ARMREG_IP
, spvar
->inst_basereg
, spvar
->inst_offset
);
3813 g_assert (ARMREG_IP
!= spvar
->inst_basereg
);
3814 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
3815 ARM_LDR_REG_REG (code
, ARMREG_IP
, spvar
->inst_basereg
, ARMREG_IP
);
3817 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
3820 case OP_CALL_HANDLER
:
3821 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3823 mono_cfg_add_try_hole (cfg
, ins
->inst_eh_block
, code
, bb
);
3826 ins
->inst_c0
= code
- cfg
->native_code
;
3829 /*if (ins->inst_target_bb->native_offset) {
3831 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3833 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3838 ARM_MOV_REG_REG (code
, ARMREG_PC
, ins
->sreg1
);
3842 * In the normal case we have:
3843 * ldr pc, [pc, ins->sreg1 << 2]
3846 * ldr lr, [pc, ins->sreg1 << 2]
3848 * After follows the data.
3849 * FIXME: add aot support.
3851 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_SWITCH
, ins
->inst_p0
);
3852 max_len
+= 4 * GPOINTER_TO_INT (ins
->klass
);
3853 if (offset
> (cfg
->code_size
- max_len
- 16)) {
3854 cfg
->code_size
+= max_len
;
3855 cfg
->code_size
*= 2;
3856 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
3857 code
= cfg
->native_code
+ offset
;
3859 ARM_LDR_REG_REG_SHIFT (code
, ARMREG_PC
, ARMREG_PC
, ins
->sreg1
, ARMSHIFT_LSL
, 2);
3861 code
+= 4 * GPOINTER_TO_INT (ins
->klass
);
3865 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
3866 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
3870 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
3871 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_LT
);
3875 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
3876 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_LO
);
3880 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
3881 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_GT
);
3885 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
3886 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_HI
);
3888 case OP_COND_EXC_EQ
:
3889 case OP_COND_EXC_NE_UN
:
3890 case OP_COND_EXC_LT
:
3891 case OP_COND_EXC_LT_UN
:
3892 case OP_COND_EXC_GT
:
3893 case OP_COND_EXC_GT_UN
:
3894 case OP_COND_EXC_GE
:
3895 case OP_COND_EXC_GE_UN
:
3896 case OP_COND_EXC_LE
:
3897 case OP_COND_EXC_LE_UN
:
3898 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_EQ
, ins
->inst_p1
);
3900 case OP_COND_EXC_IEQ
:
3901 case OP_COND_EXC_INE_UN
:
3902 case OP_COND_EXC_ILT
:
3903 case OP_COND_EXC_ILT_UN
:
3904 case OP_COND_EXC_IGT
:
3905 case OP_COND_EXC_IGT_UN
:
3906 case OP_COND_EXC_IGE
:
3907 case OP_COND_EXC_IGE_UN
:
3908 case OP_COND_EXC_ILE
:
3909 case OP_COND_EXC_ILE_UN
:
3910 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_IEQ
, ins
->inst_p1
);
3913 case OP_COND_EXC_IC
:
3914 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS
, ins
->inst_p1
);
3916 case OP_COND_EXC_OV
:
3917 case OP_COND_EXC_IOV
:
3918 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS
, ins
->inst_p1
);
3920 case OP_COND_EXC_NC
:
3921 case OP_COND_EXC_INC
:
3922 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC
, ins
->inst_p1
);
3924 case OP_COND_EXC_NO
:
3925 case OP_COND_EXC_INO
:
3926 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC
, ins
->inst_p1
);
3938 EMIT_COND_BRANCH (ins
, ins
->opcode
- OP_IBEQ
);
3941 /* floating point opcodes */
3944 if (cfg
->compile_aot
) {
3945 ARM_LDFD (code
, ins
->dreg
, ARMREG_PC
, 0);
3947 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
3949 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[1];
3952 /* FIXME: we can optimize the imm load by dealing with part of
3953 * the displacement in LDFD (aligning to 512).
3955 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)ins
->inst_p0
);
3956 ARM_LDFD (code
, ins
->dreg
, ARMREG_LR
, 0);
3960 if (cfg
->compile_aot
) {
3961 ARM_LDFS (code
, ins
->dreg
, ARMREG_PC
, 0);
3963 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
3966 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)ins
->inst_p0
);
3967 ARM_LDFS (code
, ins
->dreg
, ARMREG_LR
, 0);
3970 case OP_STORER8_MEMBASE_REG
:
3971 /* This is generated by the local regalloc pass which runs after the lowering pass */
3972 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
3973 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
3974 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_destbasereg
);
3975 ARM_STFD (code
, ins
->sreg1
, ARMREG_LR
, 0);
3977 ARM_STFD (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3980 case OP_LOADR8_MEMBASE
:
3981 /* This is generated by the local regalloc pass which runs after the lowering pass */
3982 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
3983 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
3984 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_basereg
);
3985 ARM_LDFD (code
, ins
->dreg
, ARMREG_LR
, 0);
3987 ARM_LDFD (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3990 case OP_STORER4_MEMBASE_REG
:
3991 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
3992 ARM_STFS (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3994 case OP_LOADR4_MEMBASE
:
3995 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
3996 ARM_LDFS (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3998 case OP_ICONV_TO_R_UN
: {
4000 tmpreg
= ins
->dreg
== 0? 1: 0;
4001 ARM_CMP_REG_IMM8 (code
, ins
->sreg1
, 0);
4002 ARM_FLTD (code
, ins
->dreg
, ins
->sreg1
);
4003 ARM_B_COND (code
, ARMCOND_GE
, 8);
4004 /* save the temp register */
4005 ARM_SUB_REG_IMM8 (code
, ARMREG_SP
, ARMREG_SP
, 8);
4006 ARM_STFD (code
, tmpreg
, ARMREG_SP
, 0);
4007 ARM_LDFD (code
, tmpreg
, ARMREG_PC
, 12);
4008 ARM_FPA_ADFD (code
, ins
->dreg
, ins
->dreg
, tmpreg
);
4009 ARM_LDFD (code
, tmpreg
, ARMREG_SP
, 0);
4010 ARM_ADD_REG_IMM8 (code
, ARMREG_SP
, ARMREG_SP
, 8);
4011 /* skip the constant pool */
4014 *(int*)code
= 0x41f00000;
4019 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
4020 * adfltd fdest, fdest, ftemp
4024 case OP_ICONV_TO_R4
:
4025 ARM_FLTS (code
, ins
->dreg
, ins
->sreg1
);
4027 case OP_ICONV_TO_R8
:
4028 ARM_FLTD (code
, ins
->dreg
, ins
->sreg1
);
4031 #elif defined(ARM_FPU_VFP)
4034 if (cfg
->compile_aot
) {
4035 ARM_FLDD (code
, ins
->dreg
, ARMREG_PC
, 0);
4037 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
4039 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[1];
4042 /* FIXME: we can optimize the imm load by dealing with part of
4043 * the displacement in LDFD (aligning to 512).
4045 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)ins
->inst_p0
);
4046 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
4050 if (cfg
->compile_aot
) {
4051 ARM_FLDS (code
, ins
->dreg
, ARMREG_PC
, 0);
4053 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
4055 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
4057 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)ins
->inst_p0
);
4058 ARM_FLDS (code
, ins
->dreg
, ARMREG_LR
, 0);
4059 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
4062 case OP_STORER8_MEMBASE_REG
:
4063 /* This is generated by the local regalloc pass which runs after the lowering pass */
4064 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
4065 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4066 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_destbasereg
);
4067 ARM_FSTD (code
, ins
->sreg1
, ARMREG_LR
, 0);
4069 ARM_FSTD (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
4072 case OP_LOADR8_MEMBASE
:
4073 /* This is generated by the local regalloc pass which runs after the lowering pass */
4074 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
4075 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4076 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_basereg
);
4077 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
4079 ARM_FLDD (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
4082 case OP_STORER4_MEMBASE_REG
:
4083 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
4084 ARM_CVTD (code
, ARM_VFP_F0
, ins
->sreg1
);
4085 ARM_FSTS (code
, ARM_VFP_F0
, ins
->inst_destbasereg
, ins
->inst_offset
);
4087 case OP_LOADR4_MEMBASE
:
4088 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
4089 ARM_FLDS (code
, ARM_VFP_F0
, ins
->inst_basereg
, ins
->inst_offset
);
4090 ARM_CVTS (code
, ins
->dreg
, ARM_VFP_F0
);
4092 case OP_ICONV_TO_R_UN
: {
4093 g_assert_not_reached ();
4096 case OP_ICONV_TO_R4
:
4097 ARM_FMSR (code
, ARM_VFP_F0
, ins
->sreg1
);
4098 ARM_FSITOS (code
, ARM_VFP_F0
, ARM_VFP_F0
);
4099 ARM_CVTS (code
, ins
->dreg
, ARM_VFP_F0
);
4101 case OP_ICONV_TO_R8
:
4102 ARM_FMSR (code
, ARM_VFP_F0
, ins
->sreg1
);
4103 ARM_FSITOD (code
, ins
->dreg
, ARM_VFP_F0
);
4107 if (mono_method_signature (cfg
->method
)->ret
->type
== MONO_TYPE_R4
) {
4108 ARM_CVTD (code
, ARM_VFP_F0
, ins
->sreg1
);
4109 ARM_FMRS (code
, ARMREG_R0
, ARM_VFP_F0
);
4111 ARM_FMRRD (code
, ARMREG_R0
, ARMREG_R1
, ins
->sreg1
);
4117 case OP_FCONV_TO_I1
:
4118 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, TRUE
);
4120 case OP_FCONV_TO_U1
:
4121 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, FALSE
);
4123 case OP_FCONV_TO_I2
:
4124 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, TRUE
);
4126 case OP_FCONV_TO_U2
:
4127 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, FALSE
);
4129 case OP_FCONV_TO_I4
:
4131 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, TRUE
);
4133 case OP_FCONV_TO_U4
:
4135 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, FALSE
);
4137 case OP_FCONV_TO_I8
:
4138 case OP_FCONV_TO_U8
:
4139 g_assert_not_reached ();
4140 /* Implemented as helper calls */
4142 case OP_LCONV_TO_R_UN
:
4143 g_assert_not_reached ();
4144 /* Implemented as helper calls */
4146 case OP_LCONV_TO_OVF_I4_2
: {
4147 guint8
*high_bit_not_set
, *valid_negative
, *invalid_negative
, *valid_positive
;
4149 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
4152 ARM_CMP_REG_IMM8 (code
, ins
->sreg1
, 0);
4153 high_bit_not_set
= code
;
4154 ARM_B_COND (code
, ARMCOND_GE
, 0); /*branch if bit 31 of the lower part is not set*/
4156 ARM_CMN_REG_IMM8 (code
, ins
->sreg2
, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
4157 valid_negative
= code
;
4158 ARM_B_COND (code
, ARMCOND_EQ
, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
4159 invalid_negative
= code
;
4160 ARM_B_COND (code
, ARMCOND_AL
, 0);
4162 arm_patch (high_bit_not_set
, code
);
4164 ARM_CMP_REG_IMM8 (code
, ins
->sreg2
, 0);
4165 valid_positive
= code
;
4166 ARM_B_COND (code
, ARMCOND_EQ
, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
4168 arm_patch (invalid_negative
, code
);
4169 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL
, "OverflowException");
4171 arm_patch (valid_negative
, code
);
4172 arm_patch (valid_positive
, code
);
4174 if (ins
->dreg
!= ins
->sreg1
)
4175 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
4180 ARM_FPA_ADFD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4183 ARM_FPA_SUFD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4186 ARM_FPA_MUFD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4189 ARM_FPA_DVFD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4192 ARM_MNFD (code
, ins
->dreg
, ins
->sreg1
);
4194 #elif defined(ARM_FPU_VFP)
4196 ARM_VFP_ADDD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4199 ARM_VFP_SUBD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4202 ARM_VFP_MULD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4205 ARM_VFP_DIVD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
4208 ARM_NEGD (code
, ins
->dreg
, ins
->sreg1
);
4213 g_assert_not_reached ();
4217 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg1
, ins
->sreg2
);
4218 #elif defined(ARM_FPU_VFP)
4219 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
4225 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg1
, ins
->sreg2
);
4226 #elif defined(ARM_FPU_VFP)
4227 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
4230 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
4231 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
4235 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg1
, ins
->sreg2
);
4236 #elif defined(ARM_FPU_VFP)
4237 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
4240 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
4241 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
4245 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg1
, ins
->sreg2
);
4246 #elif defined(ARM_FPU_VFP)
4247 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
4250 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
4251 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
4252 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
4257 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg2
, ins
->sreg1
);
4258 #elif defined(ARM_FPU_VFP)
4259 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
4262 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
4263 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
4268 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg2
, ins
->sreg1
);
4269 #elif defined(ARM_FPU_VFP)
4270 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
4273 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
4274 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
4275 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
4277 /* ARM FPA flags table:
4278 * N Less than ARMCOND_MI
4279 * Z Equal ARMCOND_EQ
4280 * C Greater Than or Equal ARMCOND_CS
4281 * V Unordered ARMCOND_VS
4284 EMIT_COND_BRANCH (ins
, OP_IBEQ
- OP_IBEQ
);
4287 EMIT_COND_BRANCH (ins
, OP_IBNE_UN
- OP_IBEQ
);
4290 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_MI
); /* N set */
4293 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_VS
); /* V set */
4294 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_MI
); /* N set */
4300 g_assert_not_reached ();
4304 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_GE
);
4306 /* FPA requires EQ even thou the docs suggests that just CS is enough */
4307 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_EQ
);
4308 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_CS
);
4312 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_VS
); /* V set */
4313 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_GE
);
4318 if (ins
->dreg
!= ins
->sreg1
)
4319 ARM_MVFD (code
, ins
->dreg
, ins
->sreg1
);
4320 #elif defined(ARM_FPU_VFP)
4321 ARM_ABSD (code
, ARM_VFP_D1
, ins
->sreg1
);
4322 ARM_FLDD (code
, ARM_VFP_D0
, ARMREG_PC
, 0);
4324 *(guint32
*)code
= 0xffffffff;
4326 *(guint32
*)code
= 0x7fefffff;
4328 ARM_CMPD (code
, ARM_VFP_D1
, ARM_VFP_D0
);
4330 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT
, "ArithmeticException");
4331 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg1
);
4333 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS
, "ArithmeticException");
4335 ARM_CPYD (code
, ins
->dreg
, ins
->sreg1
);
4340 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
4341 g_assert_not_reached ();
4344 if ((cfg
->opt
& MONO_OPT_BRANCH
) && ((code
- cfg
->native_code
- offset
) > max_len
)) {
4345 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4346 mono_inst_name (ins
->opcode
), max_len
, code
- cfg
->native_code
- offset
);
4347 g_assert_not_reached ();
4353 last_offset
= offset
;
4356 cfg
->code_len
= code
- cfg
->native_code
;
4359 #endif /* DISABLE_JIT */
4361 #ifdef HAVE_AEABI_READ_TP
4362 void __aeabi_read_tp (void);
4366 mono_arch_register_lowlevel_calls (void)
4368 /* The signature doesn't matter */
4369 mono_register_jit_icall (mono_arm_throw_exception
, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE
);
4370 mono_register_jit_icall (mono_arm_throw_exception_by_token
, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE
);
4372 #ifndef MONO_CROSS_COMPILE
4373 #ifdef HAVE_AEABI_READ_TP
4374 mono_register_jit_icall (__aeabi_read_tp
, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE
);
4379 #define patch_lis_ori(ip,val) do {\
4380 guint16 *__lis_ori = (guint16*)(ip); \
4381 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
4382 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
4386 mono_arch_patch_code (MonoMethod
*method
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gboolean run_cctors
)
4388 MonoJumpInfo
*patch_info
;
4389 gboolean compile_aot
= !run_cctors
;
4391 for (patch_info
= ji
; patch_info
; patch_info
= patch_info
->next
) {
4392 unsigned char *ip
= patch_info
->ip
.i
+ code
;
4393 const unsigned char *target
;
4395 if (patch_info
->type
== MONO_PATCH_INFO_SWITCH
&& !compile_aot
) {
4396 gpointer
*jt
= (gpointer
*)(ip
+ 8);
4398 /* jt is the inlined jump table, 2 instructions after ip
4399 * In the normal case we store the absolute addresses,
4400 * otherwise the displacements.
4402 for (i
= 0; i
< patch_info
->data
.table
->table_size
; i
++)
4403 jt
[i
] = code
+ (int)patch_info
->data
.table
->table
[i
];
4406 target
= mono_resolve_patch_target (method
, domain
, code
, patch_info
, run_cctors
);
4409 switch (patch_info
->type
) {
4410 case MONO_PATCH_INFO_BB
:
4411 case MONO_PATCH_INFO_LABEL
:
4414 /* No need to patch these */
4419 switch (patch_info
->type
) {
4420 case MONO_PATCH_INFO_IP
:
4421 g_assert_not_reached ();
4422 patch_lis_ori (ip
, ip
);
4424 case MONO_PATCH_INFO_METHOD_REL
:
4425 g_assert_not_reached ();
4426 *((gpointer
*)(ip
)) = code
+ patch_info
->data
.offset
;
4428 case MONO_PATCH_INFO_METHODCONST
:
4429 case MONO_PATCH_INFO_CLASS
:
4430 case MONO_PATCH_INFO_IMAGE
:
4431 case MONO_PATCH_INFO_FIELD
:
4432 case MONO_PATCH_INFO_VTABLE
:
4433 case MONO_PATCH_INFO_IID
:
4434 case MONO_PATCH_INFO_SFLDA
:
4435 case MONO_PATCH_INFO_LDSTR
:
4436 case MONO_PATCH_INFO_TYPE_FROM_HANDLE
:
4437 case MONO_PATCH_INFO_LDTOKEN
:
4438 g_assert_not_reached ();
4439 /* from OP_AOTCONST : lis + ori */
4440 patch_lis_ori (ip
, target
);
4442 case MONO_PATCH_INFO_R4
:
4443 case MONO_PATCH_INFO_R8
:
4444 g_assert_not_reached ();
4445 *((gconstpointer
*)(ip
+ 2)) = patch_info
->data
.target
;
4447 case MONO_PATCH_INFO_EXC_NAME
:
4448 g_assert_not_reached ();
4449 *((gconstpointer
*)(ip
+ 1)) = patch_info
->data
.name
;
4451 case MONO_PATCH_INFO_NONE
:
4452 case MONO_PATCH_INFO_BB_OVF
:
4453 case MONO_PATCH_INFO_EXC_OVF
:
4454 /* everything is dealt with at epilog output time */
4459 arm_patch_general (domain
, ip
, target
);
4466 * Stack frame layout:
4468 * ------------------- fp
4469 * MonoLMF structure or saved registers
4470 * -------------------
4472 * -------------------
4474 * -------------------
4475 * optional 8 bytes for tracing
4476 * -------------------
4477 * param area size is cfg->param_area
4478 * ------------------- sp
4481 mono_arch_emit_prolog (MonoCompile
*cfg
)
4483 MonoMethod
*method
= cfg
->method
;
4485 MonoMethodSignature
*sig
;
4487 int alloc_size
, pos
, max_offset
, i
, rot_amount
;
4492 int prev_sp_offset
, reg_offset
;
4494 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
4497 sig
= mono_method_signature (method
);
4498 cfg
->code_size
= 256 + sig
->param_count
* 20;
4499 code
= cfg
->native_code
= g_malloc (cfg
->code_size
);
4501 mono_emit_unwind_op_def_cfa (cfg
, code
, ARMREG_SP
, 0);
4503 ARM_MOV_REG_REG (code
, ARMREG_IP
, ARMREG_SP
);
4505 alloc_size
= cfg
->stack_offset
;
4508 if (!method
->save_lmf
) {
4509 /* We save SP by storing it into IP and saving IP */
4510 ARM_PUSH (code
, (cfg
->used_int_regs
| (1 << ARMREG_IP
) | (1 << ARMREG_LR
)));
4511 prev_sp_offset
= 8; /* ip and lr */
4512 for (i
= 0; i
< 16; ++i
) {
4513 if (cfg
->used_int_regs
& (1 << i
))
4514 prev_sp_offset
+= 4;
4516 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
4518 for (i
= 0; i
< 16; ++i
) {
4519 if ((cfg
->used_int_regs
& (1 << i
)) || (i
== ARMREG_IP
) || (i
== ARMREG_LR
)) {
4520 mono_emit_unwind_op_offset (cfg
, code
, i
, (- prev_sp_offset
) + reg_offset
);
4525 ARM_PUSH (code
, 0x5ff0);
4526 prev_sp_offset
= 4 * 10; /* all but r0-r3, sp and pc */
4527 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
4529 for (i
= 0; i
< 16; ++i
) {
4530 if ((i
> ARMREG_R3
) && (i
!= ARMREG_SP
) && (i
!= ARMREG_PC
)) {
4531 mono_emit_unwind_op_offset (cfg
, code
, i
, (- prev_sp_offset
) + reg_offset
);
4535 pos
+= sizeof (MonoLMF
) - prev_sp_offset
;
4539 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
4540 if (alloc_size
& (MONO_ARCH_FRAME_ALIGNMENT
- 1)) {
4541 alloc_size
+= MONO_ARCH_FRAME_ALIGNMENT
- 1;
4542 alloc_size
&= ~(MONO_ARCH_FRAME_ALIGNMENT
- 1);
4545 /* the stack used in the pushed regs */
4546 if (prev_sp_offset
& 4)
4548 cfg
->stack_usage
= alloc_size
;
4550 if ((i
= mono_arm_is_rotated_imm8 (alloc_size
, &rot_amount
)) >= 0) {
4551 ARM_SUB_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
4553 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, alloc_size
);
4554 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
4556 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
+ alloc_size
);
4558 if (cfg
->frame_reg
!= ARMREG_SP
) {
4559 ARM_MOV_REG_REG (code
, cfg
->frame_reg
, ARMREG_SP
);
4560 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, cfg
->frame_reg
);
4562 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
4563 prev_sp_offset
+= alloc_size
;
4565 /* compute max_offset in order to use short forward jumps
4566 * we could skip do it on arm because the immediate displacement
4567 * for jumps is large enough, it may be useful later for constant pools
4570 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
4571 MonoInst
*ins
= bb
->code
;
4572 bb
->max_offset
= max_offset
;
4574 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
)
4577 MONO_BB_FOR_EACH_INS (bb
, ins
)
4578 max_offset
+= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
4581 /* store runtime generic context */
4582 if (cfg
->rgctx_var
) {
4583 MonoInst
*ins
= cfg
->rgctx_var
;
4585 g_assert (ins
->opcode
== OP_REGOFFSET
);
4587 if (arm_is_imm12 (ins
->inst_offset
)) {
4588 ARM_STR_IMM (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ins
->inst_offset
);
4590 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4591 ARM_STR_REG_REG (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ARMREG_LR
);
4595 /* load arguments allocated to register from the stack */
4598 cinfo
= get_call_info (NULL
, sig
, sig
->pinvoke
);
4600 if (MONO_TYPE_ISSTRUCT (sig
->ret
) && cinfo
->ret
.storage
!= RegTypeStructByVal
) {
4601 ArgInfo
*ainfo
= &cinfo
->ret
;
4602 inst
= cfg
->vret_addr
;
4603 g_assert (arm_is_imm12 (inst
->inst_offset
));
4604 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4607 if (sig
->call_convention
== MONO_CALL_VARARG
) {
4608 ArgInfo
*cookie
= &cinfo
->sig_cookie
;
4610 /* Save the sig cookie address */
4611 g_assert (cookie
->storage
== RegTypeBase
);
4613 g_assert (arm_is_imm12 (prev_sp_offset
+ cookie
->offset
));
4614 g_assert (arm_is_imm12 (cfg
->sig_cookie
));
4615 ARM_ADD_REG_IMM8 (code
, ARMREG_IP
, cfg
->frame_reg
, prev_sp_offset
+ cookie
->offset
);
4616 ARM_STR_IMM (code
, ARMREG_IP
, cfg
->frame_reg
, cfg
->sig_cookie
);
4619 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
4620 ArgInfo
*ainfo
= cinfo
->args
+ i
;
4621 inst
= cfg
->args
[pos
];
4623 if (cfg
->verbose_level
> 2)
4624 g_print ("Saving argument %d (type: %d)\n", i
, ainfo
->storage
);
4625 if (inst
->opcode
== OP_REGVAR
) {
4626 if (ainfo
->storage
== RegTypeGeneral
)
4627 ARM_MOV_REG_REG (code
, inst
->dreg
, ainfo
->reg
);
4628 else if (ainfo
->storage
== RegTypeFP
) {
4629 g_assert_not_reached ();
4630 } else if (ainfo
->storage
== RegTypeBase
) {
4631 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
4632 ARM_LDR_IMM (code
, inst
->dreg
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
4634 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4635 ARM_LDR_REG_REG (code
, inst
->dreg
, ARMREG_SP
, ARMREG_IP
);
4638 g_assert_not_reached ();
4640 if (cfg
->verbose_level
> 2)
4641 g_print ("Argument %d assigned to register %s\n", pos
, mono_arch_regname (inst
->dreg
));
4643 /* the argument should be put on the stack: FIXME handle size != word */
4644 if (ainfo
->storage
== RegTypeGeneral
|| ainfo
->storage
== RegTypeIRegPair
) {
4645 switch (ainfo
->size
) {
4647 if (arm_is_imm12 (inst
->inst_offset
))
4648 ARM_STRB_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4650 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4651 ARM_STRB_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
4655 if (arm_is_imm8 (inst
->inst_offset
)) {
4656 ARM_STRH_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4658 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4659 ARM_STRH_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
4663 g_assert (arm_is_imm12 (inst
->inst_offset
));
4664 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4665 g_assert (arm_is_imm12 (inst
->inst_offset
+ 4));
4666 ARM_STR_IMM (code
, ainfo
->reg
+ 1, inst
->inst_basereg
, inst
->inst_offset
+ 4);
4669 if (arm_is_imm12 (inst
->inst_offset
)) {
4670 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4672 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4673 ARM_STR_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
4677 } else if (ainfo
->storage
== RegTypeBaseGen
) {
4678 g_assert (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
));
4679 g_assert (arm_is_imm12 (inst
->inst_offset
));
4680 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
4681 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
4682 ARM_STR_IMM (code
, ARMREG_R3
, inst
->inst_basereg
, inst
->inst_offset
);
4683 } else if (ainfo
->storage
== RegTypeBase
) {
4684 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
4685 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
4687 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
);
4688 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
4691 switch (ainfo
->size
) {
4693 if (arm_is_imm8 (inst
->inst_offset
)) {
4694 ARM_STRB_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
4696 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4697 ARM_STRB_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
4701 if (arm_is_imm8 (inst
->inst_offset
)) {
4702 ARM_STRH_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
4704 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4705 ARM_STRH_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
4709 if (arm_is_imm12 (inst
->inst_offset
)) {
4710 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
4712 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4713 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
4715 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
+ 4)) {
4716 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
+ 4));
4718 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
+ 4);
4719 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
4721 if (arm_is_imm12 (inst
->inst_offset
+ 4)) {
4722 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
4724 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
+ 4);
4725 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
4729 if (arm_is_imm12 (inst
->inst_offset
)) {
4730 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
4732 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4733 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
4737 } else if (ainfo
->storage
== RegTypeFP
) {
4738 g_assert_not_reached ();
4739 } else if (ainfo
->storage
== RegTypeStructByVal
) {
4740 int doffset
= inst
->inst_offset
;
4744 size
= mini_type_stack_size_full (cfg
->generic_sharing_context
, inst
->inst_vtype
, NULL
, sig
->pinvoke
);
4745 for (cur_reg
= 0; cur_reg
< ainfo
->size
; ++cur_reg
) {
4746 if (arm_is_imm12 (doffset
)) {
4747 ARM_STR_IMM (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, doffset
);
4749 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, doffset
);
4750 ARM_STR_REG_REG (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, ARMREG_IP
);
4752 soffset
+= sizeof (gpointer
);
4753 doffset
+= sizeof (gpointer
);
4755 if (ainfo
->vtsize
) {
4756 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4757 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
4758 code
= emit_memcpy (code
, ainfo
->vtsize
* sizeof (gpointer
), inst
->inst_basereg
, doffset
, ARMREG_SP
, prev_sp_offset
+ ainfo
->offset
);
4760 } else if (ainfo
->storage
== RegTypeStructByAddr
) {
4761 g_assert_not_reached ();
4762 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4763 code
= emit_memcpy (code
, ainfo
->vtsize
* sizeof (gpointer
), inst
->inst_basereg
, inst
->inst_offset
, ainfo
->reg
, 0);
4765 g_assert_not_reached ();
4770 if (method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
) {
4771 if (cfg
->compile_aot
)
4772 /* AOT code is only used in the root domain */
4773 code
= mono_arm_emit_load_imm (code
, ARMREG_R0
, 0);
4775 code
= mono_arm_emit_load_imm (code
, ARMREG_R0
, (guint32
)cfg
->domain
);
4776 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
4777 (gpointer
)"mono_jit_thread_attach");
4778 code
= emit_call_seq (cfg
, code
);
4781 if (method
->save_lmf
) {
4782 gboolean get_lmf_fast
= FALSE
;
4784 #ifdef HAVE_AEABI_READ_TP
4785 gint32 lmf_addr_tls_offset
= mono_get_lmf_addr_tls_offset ();
4787 if (lmf_addr_tls_offset
!= -1) {
4788 get_lmf_fast
= TRUE
;
4790 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
4791 (gpointer
)"__aeabi_read_tp");
4792 code
= emit_call_seq (cfg
, code
);
4794 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_R0
, lmf_addr_tls_offset
);
4795 get_lmf_fast
= TRUE
;
4798 if (!get_lmf_fast
) {
4799 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
4800 (gpointer
)"mono_get_lmf_addr");
4801 code
= emit_call_seq (cfg
, code
);
4803 /* we build the MonoLMF structure on the stack - see mini-arm.h */
4804 /* lmf_offset is the offset from the previous stack pointer,
4805 * alloc_size is the total stack space allocated, so the offset
4806 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
4807 * The pointer to the struct is put in r1 (new_lmf).
4808 * r2 is used as scratch
4809 * The callee-saved registers are already in the MonoLMF structure
4811 code
= emit_big_add (code
, ARMREG_R1
, ARMREG_SP
, alloc_size
- lmf_offset
);
4812 /* r0 is the result from mono_get_lmf_addr () */
4813 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_R1
, G_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
4814 /* new_lmf->previous_lmf = *lmf_addr */
4815 ARM_LDR_IMM (code
, ARMREG_R2
, ARMREG_R0
, G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
4816 ARM_STR_IMM (code
, ARMREG_R2
, ARMREG_R1
, G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
4817 /* *(lmf_addr) = r1 */
4818 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_R0
, G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
4819 /* Skip method (only needed for trampoline LMF frames) */
4820 ARM_STR_IMM (code
, ARMREG_SP
, ARMREG_R1
, G_STRUCT_OFFSET (MonoLMF
, ebp
));
4821 /* save the current IP */
4822 ARM_MOV_REG_REG (code
, ARMREG_R2
, ARMREG_PC
);
4823 ARM_STR_IMM (code
, ARMREG_R2
, ARMREG_R1
, G_STRUCT_OFFSET (MonoLMF
, eip
));
4827 code
= mono_arch_instrument_prolog (cfg
, mono_trace_enter_method
, code
, TRUE
);
4829 if (cfg
->arch
.seq_point_info_var
) {
4830 MonoInst
*ins
= cfg
->arch
.seq_point_info_var
;
4832 /* Initialize the variable from a GOT slot */
4833 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_SEQ_POINT_INFO
, cfg
->method
);
4834 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_PC
, 0);
4836 *(gpointer
*)code
= NULL
;
4838 ARM_LDR_REG_REG (code
, ARMREG_R0
, ARMREG_PC
, ARMREG_R0
);
4840 g_assert (ins
->opcode
== OP_REGOFFSET
);
4842 if (arm_is_imm12 (ins
->inst_offset
)) {
4843 ARM_STR_IMM (code
, ARMREG_R0
, ins
->inst_basereg
, ins
->inst_offset
);
4845 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4846 ARM_STR_REG_REG (code
, ARMREG_R0
, ins
->inst_basereg
, ARMREG_LR
);
4850 /* Initialize ss_trigger_page_var */
4852 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
4853 MonoInst
*ss_trigger_page_var
= cfg
->arch
.ss_trigger_page_var
;
4854 int dreg
= ARMREG_LR
;
4857 g_assert (info_var
->opcode
== OP_REGOFFSET
);
4858 g_assert (arm_is_imm12 (info_var
->inst_offset
));
4860 ARM_LDR_IMM (code
, dreg
, info_var
->inst_basereg
, info_var
->inst_offset
);
4861 /* Load the trigger page addr */
4862 ARM_LDR_IMM (code
, dreg
, dreg
, G_STRUCT_OFFSET (SeqPointInfo
, ss_trigger_page
));
4863 ARM_STR_IMM (code
, dreg
, ss_trigger_page_var
->inst_basereg
, ss_trigger_page_var
->inst_offset
);
4867 cfg
->code_len
= code
- cfg
->native_code
;
4868 g_assert (cfg
->code_len
< cfg
->code_size
);
4875 mono_arch_emit_epilog (MonoCompile
*cfg
)
4877 MonoMethod
*method
= cfg
->method
;
4878 int pos
, i
, rot_amount
;
4879 int max_epilog_size
= 16 + 20*4;
4883 if (cfg
->method
->save_lmf
)
4884 max_epilog_size
+= 128;
4886 if (mono_jit_trace_calls
!= NULL
)
4887 max_epilog_size
+= 50;
4889 if (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)
4890 max_epilog_size
+= 50;
4892 while (cfg
->code_len
+ max_epilog_size
> (cfg
->code_size
- 16)) {
4893 cfg
->code_size
*= 2;
4894 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4895 mono_jit_stats
.code_reallocs
++;
4899 * Keep in sync with OP_JMP
4901 code
= cfg
->native_code
+ cfg
->code_len
;
4903 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
)) {
4904 code
= mono_arch_instrument_epilog (cfg
, mono_trace_leave_method
, code
, TRUE
);
4908 /* Load returned vtypes into registers if needed */
4909 cinfo
= cfg
->arch
.cinfo
;
4910 if (cinfo
->ret
.storage
== RegTypeStructByVal
) {
4911 MonoInst
*ins
= cfg
->ret
;
4913 if (arm_is_imm12 (ins
->inst_offset
)) {
4914 ARM_LDR_IMM (code
, ARMREG_R0
, ins
->inst_basereg
, ins
->inst_offset
);
4916 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4917 ARM_LDR_REG_REG (code
, ARMREG_R0
, ins
->inst_basereg
, ARMREG_LR
);
4921 if (method
->save_lmf
) {
4923 /* all but r0-r3, sp and pc */
4924 pos
+= sizeof (MonoLMF
) - (4 * 10);
4926 /* r2 contains the pointer to the current LMF */
4927 code
= emit_big_add (code
, ARMREG_R2
, cfg
->frame_reg
, cfg
->stack_usage
- lmf_offset
);
4928 /* ip = previous_lmf */
4929 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R2
, G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
4931 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_R2
, G_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
4932 /* *(lmf_addr) = previous_lmf */
4933 ARM_STR_IMM (code
, ARMREG_IP
, ARMREG_LR
, G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
4934 /* FIXME: speedup: there is no actual need to restore the registers if
4935 * we didn't actually change them (idea from Zoltan).
4938 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
4939 ARM_ADD_REG_IMM8 (code
, ARMREG_SP
, ARMREG_R2
, (sizeof (MonoLMF
) - 10 * sizeof (gulong
)));
4940 ARM_POP_NWB (code
, 0xaff0); /* restore ip to sp and lr to pc */
4942 if ((i
= mono_arm_is_rotated_imm8 (cfg
->stack_usage
, &rot_amount
)) >= 0) {
4943 ARM_ADD_REG_IMM (code
, ARMREG_SP
, cfg
->frame_reg
, i
, rot_amount
);
4945 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, cfg
->stack_usage
);
4946 ARM_ADD_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
4948 /* FIXME: add v4 thumb interworking support */
4949 ARM_POP_NWB (code
, cfg
->used_int_regs
| ((1 << ARMREG_SP
) | (1 << ARMREG_PC
)));
4952 cfg
->code_len
= code
- cfg
->native_code
;
4954 g_assert (cfg
->code_len
< cfg
->code_size
);
4958 /* remove once throw_exception_by_name is eliminated */
4960 exception_id_by_name (const char *name
)
4962 if (strcmp (name
, "IndexOutOfRangeException") == 0)
4963 return MONO_EXC_INDEX_OUT_OF_RANGE
;
4964 if (strcmp (name
, "OverflowException") == 0)
4965 return MONO_EXC_OVERFLOW
;
4966 if (strcmp (name
, "ArithmeticException") == 0)
4967 return MONO_EXC_ARITHMETIC
;
4968 if (strcmp (name
, "DivideByZeroException") == 0)
4969 return MONO_EXC_DIVIDE_BY_ZERO
;
4970 if (strcmp (name
, "InvalidCastException") == 0)
4971 return MONO_EXC_INVALID_CAST
;
4972 if (strcmp (name
, "NullReferenceException") == 0)
4973 return MONO_EXC_NULL_REF
;
4974 if (strcmp (name
, "ArrayTypeMismatchException") == 0)
4975 return MONO_EXC_ARRAY_TYPE_MISMATCH
;
4976 g_error ("Unknown intrinsic exception %s\n", name
);
4981 mono_arch_emit_exceptions (MonoCompile
*cfg
)
4983 MonoJumpInfo
*patch_info
;
4986 guint8
* exc_throw_pos
[MONO_EXC_INTRINS_NUM
];
4987 guint8 exc_throw_found
[MONO_EXC_INTRINS_NUM
];
4988 int max_epilog_size
= 50;
4990 for (i
= 0; i
< MONO_EXC_INTRINS_NUM
; i
++) {
4991 exc_throw_pos
[i
] = NULL
;
4992 exc_throw_found
[i
] = 0;
4995 /* count the number of exception infos */
4998 * make sure we have enough space for exceptions
5000 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
5001 if (patch_info
->type
== MONO_PATCH_INFO_EXC
) {
5002 i
= exception_id_by_name (patch_info
->data
.target
);
5003 if (!exc_throw_found
[i
]) {
5004 max_epilog_size
+= 32;
5005 exc_throw_found
[i
] = TRUE
;
5010 while (cfg
->code_len
+ max_epilog_size
> (cfg
->code_size
- 16)) {
5011 cfg
->code_size
*= 2;
5012 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
5013 mono_jit_stats
.code_reallocs
++;
5016 code
= cfg
->native_code
+ cfg
->code_len
;
5018 /* add code to raise exceptions */
5019 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
5020 switch (patch_info
->type
) {
5021 case MONO_PATCH_INFO_EXC
: {
5022 MonoClass
*exc_class
;
5023 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
5025 i
= exception_id_by_name (patch_info
->data
.target
);
5026 if (exc_throw_pos
[i
]) {
5027 arm_patch (ip
, exc_throw_pos
[i
]);
5028 patch_info
->type
= MONO_PATCH_INFO_NONE
;
5031 exc_throw_pos
[i
] = code
;
5033 arm_patch (ip
, code
);
5035 exc_class
= mono_class_from_name (mono_defaults
.corlib
, "System", patch_info
->data
.name
);
5036 g_assert (exc_class
);
5038 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_LR
);
5039 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_PC
, 0);
5040 patch_info
->type
= MONO_PATCH_INFO_INTERNAL_METHOD
;
5041 patch_info
->data
.name
= "mono_arch_throw_corlib_exception";
5042 patch_info
->ip
.i
= code
- cfg
->native_code
;
5044 *(guint32
*)(gpointer
)code
= exc_class
->type_token
;
5054 cfg
->code_len
= code
- cfg
->native_code
;
5056 g_assert (cfg
->code_len
< cfg
->code_size
);
5060 #endif /* #ifndef DISABLE_JIT */
5062 static gboolean tls_offset_inited
= FALSE
;
5065 mono_arch_setup_jit_tls_data (MonoJitTlsData
*tls
)
5067 if (!tls_offset_inited
) {
5068 tls_offset_inited
= TRUE
;
5070 lmf_tls_offset
= mono_get_lmf_tls_offset ();
5071 lmf_addr_tls_offset
= mono_get_lmf_addr_tls_offset ();
5076 mono_arch_free_jit_tls_data (MonoJitTlsData
*tls
)
5081 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
5088 mono_arch_print_tree (MonoInst
*tree
, int arity
)
5094 mono_arch_get_domain_intrinsic (MonoCompile
* cfg
)
5096 return mono_get_domain_intrinsic (cfg
);
5100 mono_arch_get_patch_offset (guint8
*code
)
5107 mono_arch_flush_register_windows (void)
5111 #ifdef MONO_ARCH_HAVE_IMT
5116 mono_arch_emit_imt_argument (MonoCompile
*cfg
, MonoCallInst
*call
, MonoInst
*imt_arg
)
5118 if (cfg
->compile_aot
) {
5119 int method_reg
= mono_alloc_ireg (cfg
);
5122 call
->dynamic_imt_arg
= TRUE
;
5125 mono_call_inst_add_outarg_reg (cfg
, call
, imt_arg
->dreg
, ARMREG_V5
, FALSE
);
5127 MONO_INST_NEW (cfg
, ins
, OP_AOTCONST
);
5128 ins
->dreg
= method_reg
;
5129 ins
->inst_p0
= call
->method
;
5130 ins
->inst_c1
= MONO_PATCH_INFO_METHODCONST
;
5131 MONO_ADD_INS (cfg
->cbb
, ins
);
5133 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, ARMREG_V5
, FALSE
);
5135 } else if (cfg
->generic_context
|| imt_arg
) {
5137 /* Always pass in a register for simplicity */
5138 call
->dynamic_imt_arg
= TRUE
;
5140 cfg
->uses_rgctx_reg
= TRUE
;
5143 mono_call_inst_add_outarg_reg (cfg
, call
, imt_arg
->dreg
, ARMREG_V5
, FALSE
);
5146 int method_reg
= mono_alloc_preg (cfg
);
5148 MONO_INST_NEW (cfg
, ins
, OP_PCONST
);
5149 ins
->inst_p0
= call
->method
;
5150 ins
->dreg
= method_reg
;
5151 MONO_ADD_INS (cfg
->cbb
, ins
);
5153 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, ARMREG_V5
, FALSE
);
5158 #endif /* DISABLE_JIT */
5161 mono_arch_find_imt_method (mgreg_t
*regs
, guint8
*code
)
5163 guint32
*code_ptr
= (guint32
*)code
;
5165 /* The IMT value is stored in the code stream right after the LDC instruction. */
5166 if (!IS_LDR_PC (code_ptr
[0])) {
5167 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__
, code
, code_ptr
[2], code_ptr
[1], code_ptr
[0]);
5168 g_assert (IS_LDR_PC (code_ptr
[0]));
5170 if (code_ptr
[1] == 0)
5171 /* This is AOTed code, the IMT method is in V5 */
5172 return (MonoMethod
*)regs
[ARMREG_V5
];
5174 return (MonoMethod
*) code_ptr
[1];
5178 mono_arch_find_static_call_vtable (mgreg_t
*regs
, guint8
*code
)
5180 return (MonoVTable
*) regs
[MONO_ARCH_RGCTX_REG
];
5183 #define ENABLE_WRONG_METHOD_CHECK 0
5184 #define BASE_SIZE (6 * 4)
5185 #define BSEARCH_ENTRY_SIZE (4 * 4)
5186 #define CMP_SIZE (3 * 4)
5187 #define BRANCH_SIZE (1 * 4)
5188 #define CALL_SIZE (2 * 4)
5189 #define WMC_SIZE (5 * 4)
5190 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
5193 arm_emit_value_and_patch_ldr (arminstr_t
*code
, arminstr_t
*target
, guint32 value
)
5195 guint32 delta
= DISTANCE (target
, code
);
5197 g_assert (delta
>= 0 && delta
<= 0xFFF);
5198 *target
= *target
| delta
;
5204 mono_arch_build_imt_thunk (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
5205 gpointer fail_tramp
)
5207 int size
, i
, extra_space
= 0;
5208 arminstr_t
*code
, *start
, *vtable_target
= NULL
;
5209 gboolean large_offsets
= FALSE
;
5210 guint32
**constant_pool_starts
;
5213 constant_pool_starts
= g_new0 (guint32
*, count
);
5215 for (i
= 0; i
< count
; ++i
) {
5216 MonoIMTCheckItem
*item
= imt_entries
[i
];
5217 if (item
->is_equals
) {
5218 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
5220 if (item
->has_target_code
|| !arm_is_imm12 (DISTANCE (vtable
, &vtable
->vtable
[item
->value
.vtable_slot
]))) {
5221 item
->chunk_size
+= 32;
5222 large_offsets
= TRUE
;
5225 if (item
->check_target_idx
|| fail_case
) {
5226 if (!item
->compare_done
|| fail_case
)
5227 item
->chunk_size
+= CMP_SIZE
;
5228 item
->chunk_size
+= BRANCH_SIZE
;
5230 #if ENABLE_WRONG_METHOD_CHECK
5231 item
->chunk_size
+= WMC_SIZE
;
5235 item
->chunk_size
+= 16;
5236 large_offsets
= TRUE
;
5238 item
->chunk_size
+= CALL_SIZE
;
5240 item
->chunk_size
+= BSEARCH_ENTRY_SIZE
;
5241 imt_entries
[item
->check_target_idx
]->compare_done
= TRUE
;
5243 size
+= item
->chunk_size
;
5247 size
+= 4 * count
; /* The ARM_ADD_REG_IMM to pop the stack */
5250 code
= mono_method_alloc_generic_virtual_thunk (domain
, size
);
5252 code
= mono_domain_code_reserve (domain
, size
);
5256 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable
->klass
->name_space
, vtable
->klass
->name
, count
, size
, start
, ((guint8
*)start
) + size
, vtable
);
5257 for (i
= 0; i
< count
; ++i
) {
5258 MonoIMTCheckItem
*item
= imt_entries
[i
];
5259 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i
, item
->key
, item
->key
->name
, &vtable
->vtable
[item
->value
.vtable_slot
], item
->is_equals
, item
->chunk_size
);
5264 ARM_PUSH4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
5266 ARM_PUSH2 (code
, ARMREG_R0
, ARMREG_R1
);
5267 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_LR
, -4);
5268 vtable_target
= code
;
5269 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
5271 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
5272 ARM_CMP_REG_IMM8 (code
, ARMREG_R0
, 0);
5273 ARM_MOV_REG_REG_COND (code
, ARMREG_R0
, ARMREG_V5
, ARMCOND_EQ
);
5275 for (i
= 0; i
< count
; ++i
) {
5276 MonoIMTCheckItem
*item
= imt_entries
[i
];
5277 arminstr_t
*imt_method
= NULL
, *vtable_offset_ins
= NULL
, *target_code_ins
= NULL
;
5278 gint32 vtable_offset
;
5280 item
->code_target
= (guint8
*)code
;
5282 if (item
->is_equals
) {
5283 gboolean fail_case
= !item
->check_target_idx
&& fail_tramp
;
5285 if (item
->check_target_idx
|| fail_case
) {
5286 if (!item
->compare_done
|| fail_case
) {
5288 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
5289 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
5291 item
->jmp_code
= (guint8
*)code
;
5292 ARM_B_COND (code
, ARMCOND_NE
, 0);
5294 /*Enable the commented code to assert on wrong method*/
5295 #if ENABLE_WRONG_METHOD_CHECK
5297 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
5298 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
5299 ARM_B_COND (code
, ARMCOND_NE
, 1);
5305 if (item
->has_target_code
) {
5306 target_code_ins
= code
;
5307 /* Load target address */
5308 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
5309 /* Save it to the fourth slot */
5310 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (gpointer
));
5311 /* Restore registers and branch */
5312 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
5314 code
= arm_emit_value_and_patch_ldr (code
, target_code_ins
, (gsize
)item
->value
.target_code
);
5316 vtable_offset
= DISTANCE (vtable
, &vtable
->vtable
[item
->value
.vtable_slot
]);
5317 if (!arm_is_imm12 (vtable_offset
)) {
5319 * We need to branch to a computed address but we don't have
5320 * a free register to store it, since IP must contain the
5321 * vtable address. So we push the two values to the stack, and
5322 * load them both using LDM.
5324 /* Compute target address */
5325 vtable_offset_ins
= code
;
5326 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
5327 ARM_LDR_REG_REG (code
, ARMREG_R1
, ARMREG_IP
, ARMREG_R1
);
5328 /* Save it to the fourth slot */
5329 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (gpointer
));
5330 /* Restore registers and branch */
5331 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
5333 code
= arm_emit_value_and_patch_ldr (code
, vtable_offset_ins
, vtable_offset
);
5335 ARM_POP2 (code
, ARMREG_R0
, ARMREG_R1
);
5337 ARM_ADD_REG_IMM8 (code
, ARMREG_SP
, ARMREG_SP
, 2 * sizeof (gpointer
));
5338 ARM_LDR_IMM (code
, ARMREG_PC
, ARMREG_IP
, vtable_offset
);
5343 arm_patch (item
->jmp_code
, (guchar
*)code
);
5345 target_code_ins
= code
;
5346 /* Load target address */
5347 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
5348 /* Save it to the fourth slot */
5349 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (gpointer
));
5350 /* Restore registers and branch */
5351 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
5353 code
= arm_emit_value_and_patch_ldr (code
, target_code_ins
, (gsize
)fail_tramp
);
5354 item
->jmp_code
= NULL
;
5358 code
= arm_emit_value_and_patch_ldr (code
, imt_method
, (guint32
)item
->key
);
5360 /*must emit after unconditional branch*/
5361 if (vtable_target
) {
5362 code
= arm_emit_value_and_patch_ldr (code
, vtable_target
, (guint32
)vtable
);
5363 item
->chunk_size
+= 4;
5364 vtable_target
= NULL
;
5367 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
5368 constant_pool_starts
[i
] = code
;
5370 code
+= extra_space
;
5374 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
5375 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
5377 item
->jmp_code
= (guint8
*)code
;
5378 ARM_B_COND (code
, ARMCOND_GE
, 0);
5383 for (i
= 0; i
< count
; ++i
) {
5384 MonoIMTCheckItem
*item
= imt_entries
[i
];
5385 if (item
->jmp_code
) {
5386 if (item
->check_target_idx
)
5387 arm_patch (item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
);
5389 if (i
> 0 && item
->is_equals
) {
5391 arminstr_t
*space_start
= constant_pool_starts
[i
];
5392 for (j
= i
- 1; j
>= 0 && !imt_entries
[j
]->is_equals
; --j
) {
5393 space_start
= arm_emit_value_and_patch_ldr (space_start
, (arminstr_t
*)imt_entries
[j
]->code_target
, (guint32
)imt_entries
[j
]->key
);
5400 char *buff
= g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable
->klass
->name_space
, vtable
->klass
->name
, count
);
5401 mono_disassemble_code (NULL
, (guint8
*)start
, size
, buff
);
5406 g_free (constant_pool_starts
);
5408 mono_arch_flush_icache ((guint8
*)start
, size
);
5409 mono_stats
.imt_thunks_size
+= code
- start
;
5411 g_assert (DISTANCE (start
, code
) <= size
);
5418 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
5420 if (reg
== ARMREG_SP
)
5421 return (gpointer
)ctx
->esp
;
5423 return (gpointer
)ctx
->regs
[reg
];
5427 * mono_arch_set_breakpoint:
5429 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
5430 * The location should contain code emitted by OP_SEQ_POINT.
5433 mono_arch_set_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
5436 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
5439 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), ji
->code_start
);
5441 g_assert (native_offset
% 4 == 0);
5442 g_assert (info
->bp_addrs
[native_offset
/ 4] == 0);
5443 info
->bp_addrs
[native_offset
/ 4] = bp_trigger_page
;
5445 int dreg
= ARMREG_LR
;
5447 /* Read from another trigger page */
5448 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
5450 *(int*)code
= (int)bp_trigger_page
;
5452 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
5454 mono_arch_flush_icache (code
- 16, 16);
5457 /* This is currently implemented by emitting an SWI instruction, which
5458 * qemu/linux seems to convert to a SIGILL.
5460 *(int*)code
= (0xef << 24) | 8;
5462 mono_arch_flush_icache (code
- 4, 4);
5468 * mono_arch_clear_breakpoint:
5470 * Clear the breakpoint at IP.
5473 mono_arch_clear_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
5479 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
5480 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), ji
->code_start
);
5482 g_assert (native_offset
% 4 == 0);
5483 g_assert (info
->bp_addrs
[native_offset
/ 4] == bp_trigger_page
);
5484 info
->bp_addrs
[native_offset
/ 4] = 0;
5486 for (i
= 0; i
< 4; ++i
)
5489 mono_arch_flush_icache (ip
, code
- ip
);
5494 * mono_arch_start_single_stepping:
5496 * Start single stepping.
5499 mono_arch_start_single_stepping (void)
5501 mono_mprotect (ss_trigger_page
, mono_pagesize (), 0);
5505 * mono_arch_stop_single_stepping:
5507 * Stop single stepping.
5510 mono_arch_stop_single_stepping (void)
5512 mono_mprotect (ss_trigger_page
, mono_pagesize (), MONO_MMAP_READ
);
5516 #define DBG_SIGNAL SIGBUS
5518 #define DBG_SIGNAL SIGSEGV
5522 * mono_arch_is_single_step_event:
5524 * Return whenever the machine state in SIGCTX corresponds to a single
5528 mono_arch_is_single_step_event (void *info
, void *sigctx
)
5530 siginfo_t
*sinfo
= info
;
5532 /* Sometimes the address is off by 4 */
5533 if (sinfo
->si_addr
>= ss_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)ss_trigger_page
+ 128)
5540 * mono_arch_is_breakpoint_event:
5542 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
5545 mono_arch_is_breakpoint_event (void *info
, void *sigctx
)
5547 siginfo_t
*sinfo
= info
;
5549 if (sinfo
->si_signo
== DBG_SIGNAL
) {
5550 /* Sometimes the address is off by 4 */
5551 if (sinfo
->si_addr
>= bp_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)bp_trigger_page
+ 128)
5561 mono_arch_get_ip_for_breakpoint (MonoJitInfo
*ji
, MonoContext
*ctx
)
5563 guint8
*ip
= MONO_CONTEXT_GET_IP (ctx
);
5574 mono_arch_get_ip_for_single_step (MonoJitInfo
*ji
, MonoContext
*ctx
)
5576 guint8
*ip
= MONO_CONTEXT_GET_IP (ctx
);
5584 * mono_arch_skip_breakpoint:
5586 * See mini-amd64.c for docs.
5589 mono_arch_skip_breakpoint (MonoContext
*ctx
)
5591 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
5595 * mono_arch_skip_single_step:
5597 * See mini-amd64.c for docs.
5600 mono_arch_skip_single_step (MonoContext
*ctx
)
5602 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
5606 * mono_arch_get_seq_point_info:
5608 * See mini-amd64.c for docs.
5611 mono_arch_get_seq_point_info (MonoDomain
*domain
, guint8
*code
)
5616 // FIXME: Add a free function
5618 mono_domain_lock (domain
);
5619 info
= g_hash_table_lookup (domain_jit_info (domain
)->arch_seq_points
,
5621 mono_domain_unlock (domain
);
5624 ji
= mono_jit_info_table_find (domain
, (char*)code
);
5627 info
= g_malloc0 (sizeof (SeqPointInfo
) + ji
->code_size
);
5629 info
->ss_trigger_page
= ss_trigger_page
;
5630 info
->bp_trigger_page
= bp_trigger_page
;
5632 mono_domain_lock (domain
);
5633 g_hash_table_insert (domain_jit_info (domain
)->arch_seq_points
,
5635 mono_domain_unlock (domain
);