2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
15 #include <mono/utils/mono-mmap.h>
22 #include "mono/arch/arm/arm-fpa-codegen.h"
23 #elif defined(ARM_FPU_VFP)
24 #include "mono/arch/arm/arm-vfp-codegen.h"
27 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID)
28 #define HAVE_AEABI_READ_TP 1
31 static gint lmf_tls_offset
= -1;
32 static gint lmf_addr_tls_offset
= -1;
34 /* This mutex protects architecture specific caches */
35 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
36 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
37 static CRITICAL_SECTION mini_arch_mutex
;
39 static int v5_supported
= 0;
40 static int v7_supported
= 0;
41 static int thumb_supported
= 0;
44 * The code generated for sequence points reads from this location, which is
45 * made read-only when single stepping is enabled.
47 static gpointer ss_trigger_page
;
49 /* Enabled breakpoints read from this trigger page */
50 static gpointer bp_trigger_page
;
52 /* Structure used by the sequence points in AOTed code */
54 gpointer ss_trigger_page
;
55 gpointer bp_trigger_page
;
56 guint8
* bp_addrs
[MONO_ZERO_LEN_ARRAY
];
61 * floating point support: on ARM it is a mess, there are at least 3
62 * different setups, each of which binary incompat with the other.
63 * 1) FPA: old and ugly, but unfortunately what current distros use
64 * the double binary format has the two words swapped. 8 double registers.
65 * Implemented usually by kernel emulation.
66 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
67 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
68 * 3) VFP: the new and actually sensible and useful FP support. Implemented
69 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
71 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
73 int mono_exc_esp_offset
= 0;
75 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
76 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
77 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
79 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
80 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
81 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
83 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
84 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
88 mono_arch_regname (int reg
)
90 static const char * rnames
[] = {
91 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
92 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
93 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
96 if (reg
>= 0 && reg
< 16)
102 mono_arch_fregname (int reg
)
104 static const char * rnames
[] = {
105 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
106 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
107 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
108 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
109 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
110 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
113 if (reg
>= 0 && reg
< 32)
119 emit_big_add (guint8
*code
, int dreg
, int sreg
, int imm
)
121 int imm8
, rot_amount
;
122 if ((imm8
= mono_arm_is_rotated_imm8 (imm
, &rot_amount
)) >= 0) {
123 ARM_ADD_REG_IMM (code
, dreg
, sreg
, imm8
, rot_amount
);
126 g_assert (dreg
!= sreg
);
127 code
= mono_arm_emit_load_imm (code
, dreg
, imm
);
128 ARM_ADD_REG_REG (code
, dreg
, dreg
, sreg
);
133 emit_memcpy (guint8
*code
, int size
, int dreg
, int doffset
, int sreg
, int soffset
)
135 /* we can use r0-r3, since this is called only for incoming args on the stack */
136 if (size
> sizeof (gpointer
) * 4) {
138 code
= emit_big_add (code
, ARMREG_R0
, sreg
, soffset
);
139 code
= emit_big_add (code
, ARMREG_R1
, dreg
, doffset
);
140 start_loop
= code
= mono_arm_emit_load_imm (code
, ARMREG_R2
, size
);
141 ARM_LDR_IMM (code
, ARMREG_R3
, ARMREG_R0
, 0);
142 ARM_STR_IMM (code
, ARMREG_R3
, ARMREG_R1
, 0);
143 ARM_ADD_REG_IMM8 (code
, ARMREG_R0
, ARMREG_R0
, 4);
144 ARM_ADD_REG_IMM8 (code
, ARMREG_R1
, ARMREG_R1
, 4);
145 ARM_SUBS_REG_IMM8 (code
, ARMREG_R2
, ARMREG_R2
, 4);
146 ARM_B_COND (code
, ARMCOND_NE
, 0);
147 arm_patch (code
- 4, start_loop
);
150 if (arm_is_imm12 (doffset
) && arm_is_imm12 (doffset
+ size
) &&
151 arm_is_imm12 (soffset
) && arm_is_imm12 (soffset
+ size
)) {
153 ARM_LDR_IMM (code
, ARMREG_LR
, sreg
, soffset
);
154 ARM_STR_IMM (code
, ARMREG_LR
, dreg
, doffset
);
160 code
= emit_big_add (code
, ARMREG_R0
, sreg
, soffset
);
161 code
= emit_big_add (code
, ARMREG_R1
, dreg
, doffset
);
162 doffset
= soffset
= 0;
164 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_R0
, soffset
);
165 ARM_STR_IMM (code
, ARMREG_LR
, ARMREG_R1
, doffset
);
171 g_assert (size
== 0);
176 emit_call_reg (guint8
*code
, int reg
)
179 ARM_BLX_REG (code
, reg
);
181 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
185 ARM_MOV_REG_REG (code
, ARMREG_PC
, reg
);
191 emit_call_seq (MonoCompile
*cfg
, guint8
*code
)
193 if (cfg
->method
->dynamic
) {
194 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
196 *(gpointer
*)code
= NULL
;
198 code
= emit_call_reg (code
, ARMREG_IP
);
206 emit_move_return_value (MonoCompile
*cfg
, MonoInst
*ins
, guint8
*code
)
208 switch (ins
->opcode
) {
211 case OP_FCALL_MEMBASE
:
213 if (ins
->dreg
!= ARM_FPA_F0
)
214 ARM_MVFD (code
, ins
->dreg
, ARM_FPA_F0
);
215 #elif defined(ARM_FPU_VFP)
216 if (((MonoCallInst
*)ins
)->signature
->ret
->type
== MONO_TYPE_R4
) {
217 ARM_FMSR (code
, ins
->dreg
, ARMREG_R0
);
218 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
220 ARM_FMDRR (code
, ARMREG_R0
, ARMREG_R1
, ins
->dreg
);
230 * mono_arch_get_argument_info:
231 * @csig: a method signature
232 * @param_count: the number of parameters to consider
233 * @arg_info: an array to store the result infos
235 * Gathers information on parameters such as size, alignment and
236 * padding. arg_info should be large enought to hold param_count + 1 entries.
238 * Returns the size of the activation frame.
241 mono_arch_get_argument_info (MonoMethodSignature
*csig
, int param_count
, MonoJitArgumentInfo
*arg_info
)
243 int k
, frame_size
= 0;
244 guint32 size
, align
, pad
;
247 if (MONO_TYPE_ISSTRUCT (csig
->ret
)) {
248 frame_size
+= sizeof (gpointer
);
252 arg_info
[0].offset
= offset
;
255 frame_size
+= sizeof (gpointer
);
259 arg_info
[0].size
= frame_size
;
261 for (k
= 0; k
< param_count
; k
++) {
262 size
= mini_type_stack_size_full (NULL
, csig
->params
[k
], &align
, csig
->pinvoke
);
264 /* ignore alignment for now */
267 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
268 arg_info
[k
].pad
= pad
;
270 arg_info
[k
+ 1].pad
= 0;
271 arg_info
[k
+ 1].size
= size
;
273 arg_info
[k
+ 1].offset
= offset
;
277 align
= MONO_ARCH_FRAME_ALIGNMENT
;
278 frame_size
+= pad
= (align
- (frame_size
& (align
- 1))) & (align
- 1);
279 arg_info
[k
].pad
= pad
;
285 decode_vcall_slot_from_ldr (guint32 ldr
, mgreg_t
*regs
, int *displacement
)
289 reg
= (ldr
>> 16 ) & 0xf;
290 offset
= ldr
& 0xfff;
291 if (((ldr
>> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
293 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
294 o
= (gpointer
)regs
[reg
];
296 *displacement
= offset
;
301 mono_arch_get_vcall_slot (guint8
*code_ptr
, mgreg_t
*regs
, int *displacement
)
303 guint32
* code
= (guint32
*)code_ptr
;
305 /* Locate the address of the method-specific trampoline. The call using
306 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
307 looks something like this:
316 The call sequence could be also:
319 function pointer literal
323 Note that on ARM5+ we can use one instruction instead of the last two.
324 Therefore, we need to locate the 'ldr rA' instruction to know which
325 register was used to hold the method addrs.
328 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
331 /* Three possible code sequences can happen here:
335 * ldr pc, [rX - #offset]
341 * ldr pc, [rX - #offset]
343 * direct branch with bl:
347 * direct branch with mov:
351 * We only need to identify interface and virtual calls, the others can be ignored.
354 if (IS_LDR_PC (code
[-1]) && code
[-2] == ADD_LR_PC_4
)
355 return decode_vcall_slot_from_ldr (code
[-1], regs
, displacement
);
357 if (IS_LDR_PC (code
[0]) && code
[-1] == MOV_LR_PC
)
358 return decode_vcall_slot_from_ldr (code
[0], regs
, displacement
);
363 #define MAX_ARCH_DELEGATE_PARAMS 3
366 get_delegate_invoke_impl (gboolean has_target
, gboolean param_count
, guint32
*code_size
)
368 guint8
*code
, *start
;
371 start
= code
= mono_global_codeman_reserve (12);
373 /* Replace the this argument with the target */
374 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, G_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
375 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_R0
, G_STRUCT_OFFSET (MonoDelegate
, target
));
376 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
378 g_assert ((code
- start
) <= 12);
380 mono_arch_flush_icache (start
, 12);
384 size
= 8 + param_count
* 4;
385 start
= code
= mono_global_codeman_reserve (size
);
387 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R0
, G_STRUCT_OFFSET (MonoDelegate
, method_ptr
));
388 /* slide down the arguments */
389 for (i
= 0; i
< param_count
; ++i
) {
390 ARM_MOV_REG_REG (code
, (ARMREG_R0
+ i
), (ARMREG_R0
+ i
+ 1));
392 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
394 g_assert ((code
- start
) <= size
);
396 mono_arch_flush_icache (start
, size
);
400 *code_size
= code
- start
;
406 * mono_arch_get_delegate_invoke_impls:
408 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
412 mono_arch_get_delegate_invoke_impls (void)
419 code
= get_delegate_invoke_impl (TRUE
, 0, &code_len
);
420 res
= g_slist_prepend (res
, mono_aot_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code
, code_len
));
422 for (i
= 0; i
<= MAX_ARCH_DELEGATE_PARAMS
; ++i
) {
423 code
= get_delegate_invoke_impl (FALSE
, i
, &code_len
);
424 res
= g_slist_prepend (res
, mono_aot_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i
), code
, code_len
));
431 mono_arch_get_delegate_invoke_impl (MonoMethodSignature
*sig
, gboolean has_target
)
433 guint8
*code
, *start
;
435 /* FIXME: Support more cases */
436 if (MONO_TYPE_ISSTRUCT (sig
->ret
))
440 static guint8
* cached
= NULL
;
441 mono_mini_arch_lock ();
443 mono_mini_arch_unlock ();
448 start
= mono_aot_get_named_code ("delegate_invoke_impl_has_target");
450 start
= get_delegate_invoke_impl (TRUE
, 0, NULL
);
452 mono_mini_arch_unlock ();
455 static guint8
* cache
[MAX_ARCH_DELEGATE_PARAMS
+ 1] = {NULL
};
458 if (sig
->param_count
> MAX_ARCH_DELEGATE_PARAMS
)
460 for (i
= 0; i
< sig
->param_count
; ++i
)
461 if (!mono_is_regsize_var (sig
->params
[i
]))
464 mono_mini_arch_lock ();
465 code
= cache
[sig
->param_count
];
467 mono_mini_arch_unlock ();
472 char *name
= g_strdup_printf ("delegate_invoke_impl_target_%d", sig
->param_count
);
473 start
= mono_aot_get_named_code (name
);
476 start
= get_delegate_invoke_impl (FALSE
, sig
->param_count
, NULL
);
478 cache
[sig
->param_count
] = start
;
479 mono_mini_arch_unlock ();
487 mono_arch_get_this_arg_from_call (MonoGenericSharingContext
*gsctx
, MonoMethodSignature
*sig
, mgreg_t
*regs
, guint8
*code
)
489 /* FIXME: handle returning a struct */
490 if (MONO_TYPE_ISSTRUCT (sig
->ret
))
491 return (gpointer
)regs
[ARMREG_R1
];
492 return (gpointer
)regs
[ARMREG_R0
];
496 * Initialize the cpu to execute managed code.
499 mono_arch_cpu_init (void)
504 * Initialize architecture specific code.
507 mono_arch_init (void)
509 InitializeCriticalSection (&mini_arch_mutex
);
511 ss_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
|MONO_MMAP_32BIT
);
512 bp_trigger_page
= mono_valloc (NULL
, mono_pagesize (), MONO_MMAP_READ
|MONO_MMAP_32BIT
);
513 mono_mprotect (bp_trigger_page
, mono_pagesize (), 0);
517 * Cleanup architecture specific code.
520 mono_arch_cleanup (void)
525 * This function returns the optimizations supported on this cpu.
528 mono_arch_cpu_optimizazions (guint32
*exclude_mask
)
532 thumb_supported
= TRUE
;
537 FILE *file
= fopen ("/proc/cpuinfo", "r");
539 while ((line
= fgets (buf
, 512, file
))) {
540 if (strncmp (line
, "Processor", 9) == 0) {
541 char *ver
= strstr (line
, "(v");
542 if (ver
&& (ver
[2] == '5' || ver
[2] == '6' || ver
[2] == '7'))
544 if (ver
&& (ver
[2] == '7'))
548 if (strncmp (line
, "Features", 8) == 0) {
549 char *th
= strstr (line
, "thumb");
551 thumb_supported
= TRUE
;
559 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
563 /* no arm-specific optimizations yet */
569 is_regsize_var (MonoType
*t
) {
572 t
= mini_type_get_underlying_type (NULL
, t
);
579 case MONO_TYPE_FNPTR
:
581 case MONO_TYPE_OBJECT
:
582 case MONO_TYPE_STRING
:
583 case MONO_TYPE_CLASS
:
584 case MONO_TYPE_SZARRAY
:
585 case MONO_TYPE_ARRAY
:
587 case MONO_TYPE_GENERICINST
:
588 if (!mono_type_generic_inst_is_valuetype (t
))
591 case MONO_TYPE_VALUETYPE
:
598 mono_arch_get_allocatable_int_vars (MonoCompile
*cfg
)
603 for (i
= 0; i
< cfg
->num_varinfo
; i
++) {
604 MonoInst
*ins
= cfg
->varinfo
[i
];
605 MonoMethodVar
*vmv
= MONO_VARINFO (cfg
, i
);
608 if (vmv
->range
.first_use
.abs_pos
>= vmv
->range
.last_use
.abs_pos
)
611 if (ins
->flags
& (MONO_INST_VOLATILE
|MONO_INST_INDIRECT
) || (ins
->opcode
!= OP_LOCAL
&& ins
->opcode
!= OP_ARG
))
614 /* we can only allocate 32 bit values */
615 if (is_regsize_var (ins
->inst_vtype
)) {
616 g_assert (MONO_VARINFO (cfg
, i
)->reg
== -1);
617 g_assert (i
== vmv
->idx
);
618 vars
= mono_varlist_insert_sorted (cfg
, vars
, vmv
, FALSE
);
625 #define USE_EXTRA_TEMPS 0
628 mono_arch_get_global_int_regs (MonoCompile
*cfg
)
633 * FIXME: Interface calls might go through a static rgctx trampoline which
634 * sets V5, but it doesn't save it, so we need to save it ourselves, and
637 if (cfg
->flags
& MONO_CFG_HAS_CALLS
)
638 cfg
->uses_rgctx_reg
= TRUE
;
640 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V1
));
641 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V2
));
642 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V3
));
643 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V4
));
644 if (!(cfg
->compile_aot
|| cfg
->uses_rgctx_reg
))
645 /* V5 is reserved for passing the vtable/rgctx/IMT method */
646 regs
= g_list_prepend (regs
, GUINT_TO_POINTER (ARMREG_V5
));
647 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
648 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
654 * mono_arch_regalloc_cost:
656 * Return the cost, in number of memory references, of the action of
657 * allocating the variable VMV into a register during global register
661 mono_arch_regalloc_cost (MonoCompile
*cfg
, MonoMethodVar
*vmv
)
667 #ifndef __GNUC_PREREQ
668 #define __GNUC_PREREQ(maj, min) (0)
672 mono_arch_flush_icache (guint8
*code
, gint size
)
675 sys_icache_invalidate (code
, size
);
676 #elif __GNUC_PREREQ(4, 1)
677 __clear_cache (code
, code
+ size
);
678 #elif defined(PLATFORM_ANDROID)
679 const int syscall
= 0xf0002;
687 : "r" (code
), "r" (code
+ size
), "r" (syscall
)
688 : "r0", "r1", "r7", "r2"
691 __asm
__volatile ("mov r0, %0\n"
694 "swi 0x9f0002 @ sys_cacheflush"
696 : "r" (code
), "r" (code
+ size
), "r" (0)
697 : "r0", "r1", "r3" );
714 guint16 vtsize
; /* in param area */
716 guint8 regtype
: 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */
717 guint8 size
: 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
724 gboolean vtype_retaddr
;
733 /*#define __alignof__(a) sizeof(a)*/
734 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
740 add_general (guint
*gr
, guint
*stack_size
, ArgInfo
*ainfo
, gboolean simple
)
743 if (*gr
> ARMREG_R3
) {
744 ainfo
->offset
= *stack_size
;
745 ainfo
->reg
= ARMREG_SP
; /* in the caller */
746 ainfo
->regtype
= RegTypeBase
;
749 ainfo
->regtype
= RegTypeGeneral
;
753 #if defined(__APPLE__) && defined(MONO_CROSS_COMPILE)
756 int i8_align
= __alignof__ (gint64
);
760 gboolean split
= i8_align
== 4;
762 gboolean split
= TRUE
;
765 if (*gr
== ARMREG_R3
&& split
) {
766 /* first word in r3 and the second on the stack */
767 ainfo
->offset
= *stack_size
;
768 ainfo
->reg
= ARMREG_SP
; /* in the caller */
769 ainfo
->regtype
= RegTypeBaseGen
;
771 } else if (*gr
>= ARMREG_R3
) {
773 /* darwin aligns longs to 4 byte only */
779 ainfo
->offset
= *stack_size
;
780 ainfo
->reg
= ARMREG_SP
; /* in the caller */
781 ainfo
->regtype
= RegTypeBase
;
785 if (i8_align
== 8 && ((*gr
) & 1))
788 ainfo
->regtype
= RegTypeIRegPair
;
797 get_call_info (MonoMethodSignature
*sig
, gboolean is_pinvoke
)
800 int n
= sig
->hasthis
+ sig
->param_count
;
801 MonoType
*simpletype
;
802 guint32 stack_size
= 0;
803 CallInfo
*cinfo
= g_malloc0 (sizeof (CallInfo
) + sizeof (ArgInfo
) * n
);
808 /* FIXME: handle returning a struct */
809 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
810 add_general (&gr
, &stack_size
, &cinfo
->ret
, TRUE
);
811 cinfo
->struct_ret
= ARMREG_R0
;
812 cinfo
->vtype_retaddr
= TRUE
;
817 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
820 DEBUG(printf("params: %d\n", sig
->param_count
));
821 for (i
= 0; i
< sig
->param_count
; ++i
) {
822 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
823 /* Prevent implicit arguments and sig_cookie from
824 being passed in registers */
826 /* Emit the signature cookie just before the implicit arguments */
827 add_general (&gr
, &stack_size
, &cinfo
->sig_cookie
, TRUE
);
829 DEBUG(printf("param %d: ", i
));
830 if (sig
->params
[i
]->byref
) {
831 DEBUG(printf("byref\n"));
832 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
836 simpletype
= mini_type_get_underlying_type (NULL
, sig
->params
[i
]);
837 switch (simpletype
->type
) {
838 case MONO_TYPE_BOOLEAN
:
841 cinfo
->args
[n
].size
= 1;
842 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
848 cinfo
->args
[n
].size
= 2;
849 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
854 cinfo
->args
[n
].size
= 4;
855 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
861 case MONO_TYPE_FNPTR
:
862 case MONO_TYPE_CLASS
:
863 case MONO_TYPE_OBJECT
:
864 case MONO_TYPE_STRING
:
865 case MONO_TYPE_SZARRAY
:
866 case MONO_TYPE_ARRAY
:
868 cinfo
->args
[n
].size
= sizeof (gpointer
);
869 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
872 case MONO_TYPE_GENERICINST
:
873 if (!mono_type_generic_inst_is_valuetype (sig
->params
[i
])) {
874 cinfo
->args
[n
].size
= sizeof (gpointer
);
875 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, TRUE
);
880 case MONO_TYPE_TYPEDBYREF
:
881 case MONO_TYPE_VALUETYPE
: {
887 if (simpletype
->type
== MONO_TYPE_TYPEDBYREF
) {
888 size
= sizeof (MonoTypedRef
);
889 align
= sizeof (gpointer
);
891 MonoClass
*klass
= mono_class_from_mono_type (sig
->params
[i
]);
893 size
= mono_class_native_size (klass
, &align
);
895 size
= mono_class_value_size (klass
, &align
);
897 DEBUG(printf ("load %d bytes struct\n",
898 mono_class_native_size (sig
->params
[i
]->data
.klass
, NULL
)));
901 align_size
+= (sizeof (gpointer
) - 1);
902 align_size
&= ~(sizeof (gpointer
) - 1);
903 nwords
= (align_size
+ sizeof (gpointer
) -1 ) / sizeof (gpointer
);
904 cinfo
->args
[n
].regtype
= RegTypeStructByVal
;
905 /* FIXME: align stack_size if needed */
907 if (align
>= 8 && (gr
& 1))
910 if (gr
> ARMREG_R3
) {
911 cinfo
->args
[n
].size
= 0;
912 cinfo
->args
[n
].vtsize
= nwords
;
914 int rest
= ARMREG_R3
- gr
+ 1;
915 int n_in_regs
= rest
>= nwords
? nwords
: rest
;
917 cinfo
->args
[n
].size
= n_in_regs
;
918 cinfo
->args
[n
].vtsize
= nwords
- n_in_regs
;
919 cinfo
->args
[n
].reg
= gr
;
923 cinfo
->args
[n
].offset
= stack_size
;
924 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
925 stack_size
+= nwords
* sizeof (gpointer
);
932 cinfo
->args
[n
].size
= 8;
933 add_general (&gr
, &stack_size
, cinfo
->args
+ n
, FALSE
);
937 g_error ("Can't trampoline 0x%x", sig
->params
[i
]->type
);
942 simpletype
= mini_type_get_underlying_type (NULL
, sig
->ret
);
943 switch (simpletype
->type
) {
944 case MONO_TYPE_BOOLEAN
:
955 case MONO_TYPE_FNPTR
:
956 case MONO_TYPE_CLASS
:
957 case MONO_TYPE_OBJECT
:
958 case MONO_TYPE_SZARRAY
:
959 case MONO_TYPE_ARRAY
:
960 case MONO_TYPE_STRING
:
961 cinfo
->ret
.regtype
= RegTypeGeneral
;
962 cinfo
->ret
.reg
= ARMREG_R0
;
966 cinfo
->ret
.regtype
= RegTypeIRegPair
;
967 cinfo
->ret
.reg
= ARMREG_R0
;
971 cinfo
->ret
.regtype
= RegTypeFP
;
972 cinfo
->ret
.reg
= ARMREG_R0
;
973 /* FIXME: cinfo->ret.reg = ???;
974 cinfo->ret.regtype = RegTypeFP;*/
976 case MONO_TYPE_GENERICINST
:
977 if (!mono_type_generic_inst_is_valuetype (sig
->ret
)) {
978 cinfo
->ret
.regtype
= RegTypeGeneral
;
979 cinfo
->ret
.reg
= ARMREG_R0
;
982 cinfo
->ret
.regtype
= RegTypeStructByAddr
;
984 case MONO_TYPE_VALUETYPE
:
985 cinfo
->ret
.regtype
= RegTypeStructByAddr
;
987 case MONO_TYPE_TYPEDBYREF
:
988 cinfo
->ret
.regtype
= RegTypeStructByAddr
;
993 g_error ("Can't handle as return value 0x%x", sig
->ret
->type
);
997 /* align stack size to 8 */
998 DEBUG (printf (" stack size: %d (%d)\n", (stack_size
+ 15) & ~15, stack_size
));
999 stack_size
= (stack_size
+ 7) & ~7;
1001 cinfo
->stack_usage
= stack_size
;
1007 * Set var information according to the calling convention. arm version.
1008 * The locals var stuff should most likely be split in another method.
1011 mono_arch_allocate_vars (MonoCompile
*cfg
)
1013 MonoMethodSignature
*sig
;
1014 MonoMethodHeader
*header
;
1016 int i
, offset
, size
, align
, curinst
;
1017 int frame_reg
= ARMREG_FP
;
1020 /* FIXME: this will change when we use FP as gcc does */
1021 cfg
->flags
|= MONO_CFG_HAS_SPILLUP
;
1023 /* allow room for the vararg method args: void* and long/double */
1024 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
))
1025 cfg
->param_area
= MAX (cfg
->param_area
, sizeof (gpointer
)*8);
1027 header
= mono_method_get_header (cfg
->method
);
1030 * We use the frame register also for any method that has
1031 * exception clauses. This way, when the handlers are called,
1032 * the code will reference local variables using the frame reg instead of
1033 * the stack pointer: if we had to restore the stack pointer, we'd
1034 * corrupt the method frames that are already on the stack (since
1035 * filters get called before stack unwinding happens) when the filter
1036 * code would call any method (this also applies to finally etc.).
1038 if ((cfg
->flags
& MONO_CFG_HAS_ALLOCA
) || header
->num_clauses
)
1039 frame_reg
= ARMREG_FP
;
1040 cfg
->frame_reg
= frame_reg
;
1041 if (frame_reg
!= ARMREG_SP
) {
1042 cfg
->used_int_regs
|= 1 << frame_reg
;
1045 if (cfg
->compile_aot
|| cfg
->uses_rgctx_reg
)
1046 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1047 cfg
->used_int_regs
|= (1 << ARMREG_V5
);
1049 sig
= mono_method_signature (cfg
->method
);
1053 if (!MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1054 /* FIXME: handle long and FP values */
1055 switch (mini_type_get_underlying_type (NULL
, sig
->ret
)->type
) {
1056 case MONO_TYPE_VOID
:
1059 cfg
->ret
->opcode
= OP_REGVAR
;
1060 cfg
->ret
->inst_c0
= ARMREG_R0
;
1064 /* local vars are at a positive offset from the stack pointer */
1066 * also note that if the function uses alloca, we use FP
1067 * to point at the local variables.
1069 offset
= 0; /* linkage area */
1070 /* align the offset to 16 bytes: not sure this is needed here */
1072 //offset &= ~(8 - 1);
1074 /* add parameter area size for called functions */
1075 offset
+= cfg
->param_area
;
1078 if (cfg
->flags
& MONO_CFG_HAS_FPOUT
)
1081 /* allow room to save the return value */
1082 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (cfg
->method
))
1085 /* the MonoLMF structure is stored just below the stack pointer */
1087 if (sig
->call_convention
== MONO_CALL_VARARG
) {
1088 cfg
->sig_cookie
= 0;
1091 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1092 inst
= cfg
->vret_addr
;
1093 offset
+= sizeof(gpointer
) - 1;
1094 offset
&= ~(sizeof(gpointer
) - 1);
1095 inst
->inst_offset
= offset
;
1096 inst
->opcode
= OP_REGOFFSET
;
1097 inst
->inst_basereg
= frame_reg
;
1098 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
1099 printf ("vret_addr =");
1100 mono_print_ins (cfg
->vret_addr
);
1102 offset
+= sizeof(gpointer
);
1103 if (sig
->call_convention
== MONO_CALL_VARARG
)
1104 cfg
->sig_cookie
+= sizeof (gpointer
);
1107 curinst
= cfg
->locals_start
;
1108 for (i
= curinst
; i
< cfg
->num_varinfo
; ++i
) {
1109 inst
= cfg
->varinfo
[i
];
1110 if ((inst
->flags
& MONO_INST_IS_DEAD
) || inst
->opcode
== OP_REGVAR
)
1113 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1114 * pinvoke wrappers when they call functions returning structure */
1115 if (inst
->backend
.is_pinvoke
&& MONO_TYPE_ISSTRUCT (inst
->inst_vtype
) && inst
->inst_vtype
->type
!= MONO_TYPE_TYPEDBYREF
) {
1116 size
= mono_class_native_size (mono_class_from_mono_type (inst
->inst_vtype
), &ualign
);
1120 size
= mono_type_size (inst
->inst_vtype
, &align
);
1122 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1123 * since it loads/stores misaligned words, which don't do the right thing.
1125 if (align
< 4 && size
>= 4)
1127 offset
+= align
- 1;
1128 offset
&= ~(align
- 1);
1129 inst
->inst_offset
= offset
;
1130 inst
->opcode
= OP_REGOFFSET
;
1131 inst
->inst_basereg
= frame_reg
;
1133 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1138 inst
= cfg
->args
[curinst
];
1139 if (inst
->opcode
!= OP_REGVAR
) {
1140 inst
->opcode
= OP_REGOFFSET
;
1141 inst
->inst_basereg
= frame_reg
;
1142 offset
+= sizeof (gpointer
) - 1;
1143 offset
&= ~(sizeof (gpointer
) - 1);
1144 inst
->inst_offset
= offset
;
1145 offset
+= sizeof (gpointer
);
1146 if (sig
->call_convention
== MONO_CALL_VARARG
)
1147 cfg
->sig_cookie
+= sizeof (gpointer
);
1152 for (i
= 0; i
< sig
->param_count
; ++i
) {
1153 inst
= cfg
->args
[curinst
];
1154 if (inst
->opcode
!= OP_REGVAR
) {
1155 inst
->opcode
= OP_REGOFFSET
;
1156 inst
->inst_basereg
= frame_reg
;
1157 size
= mini_type_stack_size_full (NULL
, sig
->params
[i
], &ualign
, sig
->pinvoke
);
1159 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1160 * since it loads/stores misaligned words, which don't do the right thing.
1162 if (align
< 4 && size
>= 4)
1164 offset
+= align
- 1;
1165 offset
&= ~(align
- 1);
1166 inst
->inst_offset
= offset
;
1168 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
< sig
->sentinelpos
))
1169 cfg
->sig_cookie
+= size
;
1174 /* align the offset to 8 bytes */
1179 cfg
->stack_offset
= offset
;
1183 mono_arch_create_vars (MonoCompile
*cfg
)
1185 MonoMethodSignature
*sig
;
1187 sig
= mono_method_signature (cfg
->method
);
1189 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1190 cfg
->vret_addr
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_ARG
);
1191 if (G_UNLIKELY (cfg
->verbose_level
> 1)) {
1192 printf ("vret_addr = ");
1193 mono_print_ins (cfg
->vret_addr
);
1197 if (cfg
->gen_seq_points
&& cfg
->compile_aot
) {
1198 MonoInst
*ins
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1199 ins
->flags
|= MONO_INST_VOLATILE
;
1200 cfg
->arch
.seq_point_info_var
= ins
;
1202 /* Allocate a separate variable for this to save 1 load per seq point */
1203 ins
= mono_compile_create_var (cfg
, &mono_defaults
.int_class
->byval_arg
, OP_LOCAL
);
1204 ins
->flags
|= MONO_INST_VOLATILE
;
1205 cfg
->arch
.ss_trigger_page_var
= ins
;
1210 mono_arch_emit_call (MonoCompile
*cfg
, MonoCallInst
*call
)
1213 MonoMethodSignature
*sig
;
1217 sig
= call
->signature
;
1218 n
= sig
->param_count
+ sig
->hasthis
;
1220 cinfo
= get_call_info (sig
, sig
->pinvoke
);
1222 for (i
= 0; i
< n
; ++i
) {
1223 ArgInfo
*ainfo
= cinfo
->args
+ i
;
1226 if (i
>= sig
->hasthis
)
1227 t
= sig
->params
[i
- sig
->hasthis
];
1229 t
= &mono_defaults
.int_class
->byval_arg
;
1230 t
= mini_type_get_underlying_type (NULL
, t
);
1232 if ((sig
->call_convention
== MONO_CALL_VARARG
) && (i
== sig
->sentinelpos
)) {
1237 in
= call
->args
[i
];
1239 switch (ainfo
->regtype
) {
1240 case RegTypeGeneral
:
1241 case RegTypeIRegPair
:
1242 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
1243 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1244 ins
->dreg
= mono_alloc_ireg (cfg
);
1245 ins
->sreg1
= in
->dreg
+ 1;
1246 MONO_ADD_INS (cfg
->cbb
, ins
);
1247 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
1249 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1250 ins
->dreg
= mono_alloc_ireg (cfg
);
1251 ins
->sreg1
= in
->dreg
+ 2;
1252 MONO_ADD_INS (cfg
->cbb
, ins
);
1253 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
1254 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R8
) || (t
->type
== MONO_TYPE_R4
))) {
1255 #ifndef MONO_ARCH_SOFT_FLOAT
1259 if (ainfo
->size
== 4) {
1260 #ifdef MONO_ARCH_SOFT_FLOAT
1261 /* mono_emit_call_args () have already done the r8->r4 conversion */
1262 /* The converted value is in an int vreg */
1263 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1264 ins
->dreg
= mono_alloc_ireg (cfg
);
1265 ins
->sreg1
= in
->dreg
;
1266 MONO_ADD_INS (cfg
->cbb
, ins
);
1267 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
1269 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
1270 creg
= mono_alloc_ireg (cfg
);
1271 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
1272 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
, FALSE
);
1275 #ifdef MONO_ARCH_SOFT_FLOAT
1276 MONO_INST_NEW (cfg
, ins
, OP_FGETLOW32
);
1277 ins
->dreg
= mono_alloc_ireg (cfg
);
1278 ins
->sreg1
= in
->dreg
;
1279 MONO_ADD_INS (cfg
->cbb
, ins
);
1280 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
1282 MONO_INST_NEW (cfg
, ins
, OP_FGETHIGH32
);
1283 ins
->dreg
= mono_alloc_ireg (cfg
);
1284 ins
->sreg1
= in
->dreg
;
1285 MONO_ADD_INS (cfg
->cbb
, ins
);
1286 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
+ 1, FALSE
);
1288 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
1289 creg
= mono_alloc_ireg (cfg
);
1290 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
1291 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
, FALSE
);
1292 creg
= mono_alloc_ireg (cfg
);
1293 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8 + 4));
1294 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ainfo
->reg
+ 1, FALSE
);
1297 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
1299 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1300 ins
->dreg
= mono_alloc_ireg (cfg
);
1301 ins
->sreg1
= in
->dreg
;
1302 MONO_ADD_INS (cfg
->cbb
, ins
);
1304 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ainfo
->reg
, FALSE
);
1307 case RegTypeStructByAddr
:
1310 /* FIXME: where si the data allocated? */
1311 arg
->backend
.reg3
= ainfo
->reg
;
1312 call
->used_iregs
|= 1 << ainfo
->reg
;
1313 g_assert_not_reached ();
1316 case RegTypeStructByVal
:
1317 MONO_INST_NEW (cfg
, ins
, OP_OUTARG_VT
);
1318 ins
->opcode
= OP_OUTARG_VT
;
1319 ins
->sreg1
= in
->dreg
;
1320 ins
->klass
= in
->klass
;
1321 ins
->inst_p0
= call
;
1322 ins
->inst_p1
= mono_mempool_alloc (cfg
->mempool
, sizeof (ArgInfo
));
1323 memcpy (ins
->inst_p1
, ainfo
, sizeof (ArgInfo
));
1324 MONO_ADD_INS (cfg
->cbb
, ins
);
1327 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
1328 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
1329 } else if (!t
->byref
&& ((t
->type
== MONO_TYPE_R4
) || (t
->type
== MONO_TYPE_R8
))) {
1330 if (t
->type
== MONO_TYPE_R8
) {
1331 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
1333 #ifdef MONO_ARCH_SOFT_FLOAT
1334 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STOREI4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
1336 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER4_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
1340 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, in
->dreg
);
1343 case RegTypeBaseGen
:
1344 if (!t
->byref
&& ((t
->type
== MONO_TYPE_I8
) || (t
->type
== MONO_TYPE_U8
))) {
1345 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, (G_BYTE_ORDER
== G_BIG_ENDIAN
) ? in
->dreg
+ 1 : in
->dreg
+ 2);
1346 MONO_INST_NEW (cfg
, ins
, OP_MOVE
);
1347 ins
->dreg
= mono_alloc_ireg (cfg
);
1348 ins
->sreg1
= G_BYTE_ORDER
== G_BIG_ENDIAN
? in
->dreg
+ 2 : in
->dreg
+ 1;
1349 MONO_ADD_INS (cfg
->cbb
, ins
);
1350 mono_call_inst_add_outarg_reg (cfg
, call
, ins
->dreg
, ARMREG_R3
, FALSE
);
1351 } else if (!t
->byref
&& (t
->type
== MONO_TYPE_R8
)) {
1354 #ifdef MONO_ARCH_SOFT_FLOAT
1355 g_assert_not_reached ();
1358 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORER8_MEMBASE_REG
, ARMREG_SP
, (cfg
->param_area
- 8), in
->dreg
);
1359 creg
= mono_alloc_ireg (cfg
);
1360 mono_call_inst_add_outarg_reg (cfg
, call
, creg
, ARMREG_R3
, FALSE
);
1361 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 8));
1362 creg
= mono_alloc_ireg (cfg
);
1363 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg
, OP_LOAD_MEMBASE
, creg
, ARMREG_SP
, (cfg
->param_area
- 4));
1364 MONO_EMIT_NEW_STORE_MEMBASE (cfg
, OP_STORE_MEMBASE_REG
, ARMREG_SP
, ainfo
->offset
, creg
);
1365 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
1367 g_assert_not_reached ();
1374 arg
->backend
.reg3
= ainfo
->reg
;
1375 /* FP args are passed in int regs */
1376 call
->used_iregs
|= 1 << ainfo
->reg
;
1377 if (ainfo
->size
== 8) {
1378 arg
->opcode
= OP_OUTARG_R8
;
1379 call
->used_iregs
|= 1 << (ainfo
->reg
+ 1);
1381 arg
->opcode
= OP_OUTARG_R4
;
1384 cfg
->flags
|= MONO_CFG_HAS_FPOUT
;
1388 g_assert_not_reached ();
1392 if (sig
->ret
&& MONO_TYPE_ISSTRUCT (sig
->ret
)) {
1395 MONO_INST_NEW (cfg
, vtarg
, OP_MOVE
);
1396 vtarg
->sreg1
= call
->vret_var
->dreg
;
1397 vtarg
->dreg
= mono_alloc_preg (cfg
);
1398 MONO_ADD_INS (cfg
->cbb
, vtarg
);
1400 mono_call_inst_add_outarg_reg (cfg
, call
, vtarg
->dreg
, cinfo
->ret
.reg
, FALSE
);
1403 call
->stack_usage
= cinfo
->stack_usage
;
1409 mono_arch_emit_outarg_vt (MonoCompile
*cfg
, MonoInst
*ins
, MonoInst
*src
)
1411 MonoCallInst
*call
= (MonoCallInst
*)ins
->inst_p0
;
1412 ArgInfo
*ainfo
= ins
->inst_p1
;
1413 int ovf_size
= ainfo
->vtsize
;
1414 int doffset
= ainfo
->offset
;
1415 int i
, soffset
, dreg
;
1418 for (i
= 0; i
< ainfo
->size
; ++i
) {
1419 dreg
= mono_alloc_ireg (cfg
);
1420 MONO_EMIT_NEW_LOAD_MEMBASE (cfg
, dreg
, src
->dreg
, soffset
);
1421 mono_call_inst_add_outarg_reg (cfg
, call
, dreg
, ainfo
->reg
+ i
, FALSE
);
1422 soffset
+= sizeof (gpointer
);
1424 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
1426 mini_emit_memcpy (cfg
, ARMREG_SP
, doffset
, src
->dreg
, soffset
, ovf_size
* sizeof (gpointer
), 0);
1430 mono_arch_emit_setret (MonoCompile
*cfg
, MonoMethod
*method
, MonoInst
*val
)
1432 MonoType
*ret
= mini_type_get_underlying_type (cfg
->generic_sharing_context
, mono_method_signature (method
)->ret
);
1435 if (ret
->type
== MONO_TYPE_I8
|| ret
->type
== MONO_TYPE_U8
) {
1438 MONO_INST_NEW (cfg
, ins
, OP_SETLRET
);
1439 ins
->sreg1
= val
->dreg
+ 1;
1440 ins
->sreg2
= val
->dreg
+ 2;
1441 MONO_ADD_INS (cfg
->cbb
, ins
);
1444 #ifdef MONO_ARCH_SOFT_FLOAT
1445 if (ret
->type
== MONO_TYPE_R8
) {
1448 MONO_INST_NEW (cfg
, ins
, OP_SETFRET
);
1449 ins
->dreg
= cfg
->ret
->dreg
;
1450 ins
->sreg1
= val
->dreg
;
1451 MONO_ADD_INS (cfg
->cbb
, ins
);
1454 if (ret
->type
== MONO_TYPE_R4
) {
1455 /* Already converted to an int in method_to_ir () */
1456 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
1459 #elif defined(ARM_FPU_VFP)
1460 if (ret
->type
== MONO_TYPE_R8
|| ret
->type
== MONO_TYPE_R4
) {
1463 MONO_INST_NEW (cfg
, ins
, OP_SETFRET
);
1464 ins
->dreg
= cfg
->ret
->dreg
;
1465 ins
->sreg1
= val
->dreg
;
1466 MONO_ADD_INS (cfg
->cbb
, ins
);
1470 if (ret
->type
== MONO_TYPE_R4
|| ret
->type
== MONO_TYPE_R8
) {
1471 MONO_EMIT_NEW_UNALU (cfg
, OP_FMOVE
, cfg
->ret
->dreg
, val
->dreg
);
1478 MONO_EMIT_NEW_UNALU (cfg
, OP_MOVE
, cfg
->ret
->dreg
, val
->dreg
);
1482 mono_arch_is_inst_imm (gint64 imm
)
1487 #define DYN_CALL_STACK_ARGS 6
1490 MonoMethodSignature
*sig
;
1495 mgreg_t regs
[PARAM_REGS
+ DYN_CALL_STACK_ARGS
];
1501 dyn_call_supported (CallInfo
*cinfo
, MonoMethodSignature
*sig
)
1505 if (sig
->hasthis
+ sig
->param_count
> PARAM_REGS
+ DYN_CALL_STACK_ARGS
)
1508 switch (cinfo
->ret
.regtype
) {
1510 case RegTypeGeneral
:
1511 case RegTypeIRegPair
:
1512 case RegTypeStructByAddr
:
1517 #elif defined(ARM_FPU_VFP)
1526 for (i
= 0; i
< cinfo
->nargs
; ++i
) {
1527 switch (cinfo
->args
[i
].regtype
) {
1528 case RegTypeGeneral
:
1530 case RegTypeIRegPair
:
1533 if (cinfo
->args
[i
].offset
>= (DYN_CALL_STACK_ARGS
* sizeof (gpointer
)))
1536 case RegTypeStructByVal
:
1537 if (cinfo
->args
[i
].reg
+ cinfo
->args
[i
].vtsize
>= PARAM_REGS
+ DYN_CALL_STACK_ARGS
)
1545 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
1546 for (i
= 0; i
< sig
->param_count
; ++i
) {
1547 MonoType
*t
= sig
->params
[i
];
1555 #ifdef MONO_ARCH_SOFT_FLOAT
1574 mono_arch_dyn_call_prepare (MonoMethodSignature
*sig
)
1576 ArchDynCallInfo
*info
;
1579 cinfo
= get_call_info (sig
, FALSE
);
1581 if (!dyn_call_supported (cinfo
, sig
)) {
1586 info
= g_new0 (ArchDynCallInfo
, 1);
1587 // FIXME: Preprocess the info to speed up start_dyn_call ()
1589 info
->cinfo
= cinfo
;
1591 return (MonoDynCallInfo
*)info
;
1595 mono_arch_dyn_call_free (MonoDynCallInfo
*info
)
1597 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
1599 g_free (ainfo
->cinfo
);
1604 mono_arch_start_dyn_call (MonoDynCallInfo
*info
, gpointer
**args
, guint8
*ret
, guint8
*buf
, int buf_len
)
1606 ArchDynCallInfo
*dinfo
= (ArchDynCallInfo
*)info
;
1607 DynCallArgs
*p
= (DynCallArgs
*)buf
;
1608 int arg_index
, greg
, i
, j
;
1609 MonoMethodSignature
*sig
= dinfo
->sig
;
1611 g_assert (buf_len
>= sizeof (DynCallArgs
));
1619 if (dinfo
->cinfo
->vtype_retaddr
)
1620 p
->regs
[greg
++] = (mgreg_t
)ret
;
1623 p
->regs
[greg
++] = (mgreg_t
)*(args
[arg_index
++]);
1625 for (i
= 0; i
< sig
->param_count
; i
++) {
1626 MonoType
*t
= mono_type_get_underlying_type (sig
->params
[i
]);
1627 gpointer
*arg
= args
[arg_index
++];
1628 ArgInfo
*ainfo
= &dinfo
->cinfo
->args
[i
+ sig
->hasthis
];
1631 if (ainfo
->regtype
== RegTypeGeneral
|| ainfo
->regtype
== RegTypeIRegPair
|| ainfo
->regtype
== RegTypeStructByVal
)
1633 else if (ainfo
->regtype
== RegTypeBase
)
1634 slot
= PARAM_REGS
+ (ainfo
->offset
/ 4);
1636 g_assert_not_reached ();
1639 p
->regs
[slot
] = (mgreg_t
)*arg
;
1644 case MONO_TYPE_STRING
:
1645 case MONO_TYPE_CLASS
:
1646 case MONO_TYPE_ARRAY
:
1647 case MONO_TYPE_SZARRAY
:
1648 case MONO_TYPE_OBJECT
:
1652 p
->regs
[slot
] = (mgreg_t
)*arg
;
1654 case MONO_TYPE_BOOLEAN
:
1656 p
->regs
[slot
] = *(guint8
*)arg
;
1659 p
->regs
[slot
] = *(gint8
*)arg
;
1662 p
->regs
[slot
] = *(gint16
*)arg
;
1665 case MONO_TYPE_CHAR
:
1666 p
->regs
[slot
] = *(guint16
*)arg
;
1669 p
->regs
[slot
] = *(gint32
*)arg
;
1672 p
->regs
[slot
] = *(guint32
*)arg
;
1676 p
->regs
[slot
++] = (mgreg_t
)arg
[0];
1677 p
->regs
[slot
] = (mgreg_t
)arg
[1];
1680 p
->regs
[slot
] = *(mgreg_t
*)arg
;
1683 p
->regs
[slot
++] = (mgreg_t
)arg
[0];
1684 p
->regs
[slot
] = (mgreg_t
)arg
[1];
1686 case MONO_TYPE_GENERICINST
:
1687 if (MONO_TYPE_IS_REFERENCE (t
)) {
1688 p
->regs
[slot
] = (mgreg_t
)*arg
;
1693 case MONO_TYPE_VALUETYPE
:
1694 g_assert (ainfo
->regtype
== RegTypeStructByVal
);
1696 if (ainfo
->size
== 0)
1697 slot
= PARAM_REGS
+ (ainfo
->offset
/ 4);
1701 for (j
= 0; j
< ainfo
->size
+ ainfo
->vtsize
; ++j
)
1702 p
->regs
[slot
++] = ((mgreg_t
*)arg
) [j
];
1705 g_assert_not_reached ();
1711 mono_arch_finish_dyn_call (MonoDynCallInfo
*info
, guint8
*buf
)
1713 ArchDynCallInfo
*ainfo
= (ArchDynCallInfo
*)info
;
1714 MonoMethodSignature
*sig
= ((ArchDynCallInfo
*)info
)->sig
;
1715 guint8
*ret
= ((DynCallArgs
*)buf
)->ret
;
1716 mgreg_t res
= ((DynCallArgs
*)buf
)->res
;
1717 mgreg_t res2
= ((DynCallArgs
*)buf
)->res2
;
1719 switch (mono_type_get_underlying_type (sig
->ret
)->type
) {
1720 case MONO_TYPE_VOID
:
1721 *(gpointer
*)ret
= NULL
;
1723 case MONO_TYPE_STRING
:
1724 case MONO_TYPE_CLASS
:
1725 case MONO_TYPE_ARRAY
:
1726 case MONO_TYPE_SZARRAY
:
1727 case MONO_TYPE_OBJECT
:
1731 *(gpointer
*)ret
= (gpointer
)res
;
1737 case MONO_TYPE_BOOLEAN
:
1738 *(guint8
*)ret
= res
;
1741 *(gint16
*)ret
= res
;
1744 case MONO_TYPE_CHAR
:
1745 *(guint16
*)ret
= res
;
1748 *(gint32
*)ret
= res
;
1751 *(guint32
*)ret
= res
;
1755 /* This handles endianness as well */
1756 ((gint32
*)ret
) [0] = res
;
1757 ((gint32
*)ret
) [1] = res2
;
1759 case MONO_TYPE_GENERICINST
:
1760 if (MONO_TYPE_IS_REFERENCE (sig
->ret
)) {
1761 *(gpointer
*)ret
= (gpointer
)res
;
1766 case MONO_TYPE_VALUETYPE
:
1767 g_assert (ainfo
->cinfo
->vtype_retaddr
);
1770 #if defined(ARM_FPU_VFP)
1772 *(float*)ret
= *(float*)&res
;
1774 case MONO_TYPE_R8
: {
1780 *(double*)ret
= *(double*)®s
;
1785 g_assert_not_reached ();
1790 * Allow tracing to work with this interface (with an optional argument)
1794 mono_arch_instrument_prolog (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
)
1798 code
= mono_arm_emit_load_imm (code
, ARMREG_R0
, (guint32
)cfg
->method
);
1799 ARM_MOV_REG_IMM8 (code
, ARMREG_R1
, 0); /* NULL ebp for now */
1800 code
= mono_arm_emit_load_imm (code
, ARMREG_R2
, (guint32
)func
);
1801 code
= emit_call_reg (code
, ARMREG_R2
);
1814 mono_arch_instrument_epilog_full (MonoCompile
*cfg
, void *func
, void *p
, gboolean enable_arguments
, gboolean preserve_argument_registers
)
1817 int save_mode
= SAVE_NONE
;
1819 MonoMethod
*method
= cfg
->method
;
1820 int rtype
= mini_type_get_underlying_type (cfg
->generic_sharing_context
, mono_method_signature (method
)->ret
)->type
;
1821 int save_offset
= cfg
->param_area
;
1825 offset
= code
- cfg
->native_code
;
1826 /* we need about 16 instructions */
1827 if (offset
> (cfg
->code_size
- 16 * 4)) {
1828 cfg
->code_size
*= 2;
1829 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
1830 code
= cfg
->native_code
+ offset
;
1833 case MONO_TYPE_VOID
:
1834 /* special case string .ctor icall */
1835 if (strcmp (".ctor", method
->name
) && method
->klass
== mono_defaults
.string_class
)
1836 save_mode
= SAVE_ONE
;
1838 save_mode
= SAVE_NONE
;
1842 save_mode
= SAVE_TWO
;
1846 save_mode
= SAVE_FP
;
1848 case MONO_TYPE_VALUETYPE
:
1849 save_mode
= SAVE_STRUCT
;
1852 save_mode
= SAVE_ONE
;
1856 switch (save_mode
) {
1858 ARM_STR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
1859 ARM_STR_IMM (code
, ARMREG_R1
, cfg
->frame_reg
, save_offset
+ 4);
1860 if (enable_arguments
) {
1861 ARM_MOV_REG_REG (code
, ARMREG_R2
, ARMREG_R1
);
1862 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_R0
);
1866 ARM_STR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
1867 if (enable_arguments
) {
1868 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_R0
);
1872 /* FIXME: what reg? */
1873 if (enable_arguments
) {
1874 /* FIXME: what reg? */
1878 if (enable_arguments
) {
1879 /* FIXME: get the actual address */
1880 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_R0
);
1888 code
= mono_arm_emit_load_imm (code
, ARMREG_R0
, (guint32
)cfg
->method
);
1889 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, (guint32
)func
);
1890 code
= emit_call_reg (code
, ARMREG_IP
);
1892 switch (save_mode
) {
1894 ARM_LDR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
1895 ARM_LDR_IMM (code
, ARMREG_R1
, cfg
->frame_reg
, save_offset
+ 4);
1898 ARM_LDR_IMM (code
, ARMREG_R0
, cfg
->frame_reg
, save_offset
);
1912 * The immediate field for cond branches is big enough for all reasonable methods
1914 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
1915 if (0 && ins->inst_true_bb->native_offset) { \
1916 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
1918 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1919 ARM_B_COND (code, (condcode), 0); \
1922 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
1924 /* emit an exception if condition is fail
1926 * We assign the extra code used to throw the implicit exceptions
1927 * to cfg->bb_exit as far as the big branch handling is concerned
1929 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
1931 mono_add_patch_info (cfg, code - cfg->native_code, \
1932 MONO_PATCH_INFO_EXC, exc_name); \
1933 ARM_BL_COND (code, (condcode), 0); \
1936 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
1939 mono_arch_peephole_pass_1 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1944 mono_arch_peephole_pass_2 (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
1946 MonoInst
*ins
, *n
, *last_ins
= NULL
;
1948 MONO_BB_FOR_EACH_INS_SAFE (bb
, n
, ins
) {
1949 switch (ins
->opcode
) {
1952 /* Already done by an arch-independent pass */
1954 case OP_LOAD_MEMBASE
:
1955 case OP_LOADI4_MEMBASE
:
1957 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1958 * OP_LOAD_MEMBASE offset(basereg), reg
1960 if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_REG
1961 || last_ins
->opcode
== OP_STORE_MEMBASE_REG
) &&
1962 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
1963 ins
->inst_offset
== last_ins
->inst_offset
) {
1964 if (ins
->dreg
== last_ins
->sreg1
) {
1965 MONO_DELETE_INS (bb
, ins
);
1968 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1969 ins
->opcode
= OP_MOVE
;
1970 ins
->sreg1
= last_ins
->sreg1
;
1974 * Note: reg1 must be different from the basereg in the second load
1975 * OP_LOAD_MEMBASE offset(basereg), reg1
1976 * OP_LOAD_MEMBASE offset(basereg), reg2
1978 * OP_LOAD_MEMBASE offset(basereg), reg1
1979 * OP_MOVE reg1, reg2
1981 } if (last_ins
&& (last_ins
->opcode
== OP_LOADI4_MEMBASE
1982 || last_ins
->opcode
== OP_LOAD_MEMBASE
) &&
1983 ins
->inst_basereg
!= last_ins
->dreg
&&
1984 ins
->inst_basereg
== last_ins
->inst_basereg
&&
1985 ins
->inst_offset
== last_ins
->inst_offset
) {
1987 if (ins
->dreg
== last_ins
->dreg
) {
1988 MONO_DELETE_INS (bb
, ins
);
1991 ins
->opcode
= OP_MOVE
;
1992 ins
->sreg1
= last_ins
->dreg
;
1995 //g_assert_not_reached ();
1999 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2000 * OP_LOAD_MEMBASE offset(basereg), reg
2002 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2003 * OP_ICONST reg, imm
2005 } else if (last_ins
&& (last_ins
->opcode
== OP_STOREI4_MEMBASE_IMM
2006 || last_ins
->opcode
== OP_STORE_MEMBASE_IMM
) &&
2007 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2008 ins
->inst_offset
== last_ins
->inst_offset
) {
2009 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2010 ins
->opcode
= OP_ICONST
;
2011 ins
->inst_c0
= last_ins
->inst_imm
;
2012 g_assert_not_reached (); // check this rule
2016 case OP_LOADU1_MEMBASE
:
2017 case OP_LOADI1_MEMBASE
:
2018 if (last_ins
&& (last_ins
->opcode
== OP_STOREI1_MEMBASE_REG
) &&
2019 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2020 ins
->inst_offset
== last_ins
->inst_offset
) {
2021 ins
->opcode
= (ins
->opcode
== OP_LOADI1_MEMBASE
) ? OP_ICONV_TO_I1
: OP_ICONV_TO_U1
;
2022 ins
->sreg1
= last_ins
->sreg1
;
2025 case OP_LOADU2_MEMBASE
:
2026 case OP_LOADI2_MEMBASE
:
2027 if (last_ins
&& (last_ins
->opcode
== OP_STOREI2_MEMBASE_REG
) &&
2028 ins
->inst_basereg
== last_ins
->inst_destbasereg
&&
2029 ins
->inst_offset
== last_ins
->inst_offset
) {
2030 ins
->opcode
= (ins
->opcode
== OP_LOADI2_MEMBASE
) ? OP_ICONV_TO_I2
: OP_ICONV_TO_U2
;
2031 ins
->sreg1
= last_ins
->sreg1
;
2035 ins
->opcode
= OP_MOVE
;
2039 if (ins
->dreg
== ins
->sreg1
) {
2040 MONO_DELETE_INS (bb
, ins
);
2044 * OP_MOVE sreg, dreg
2045 * OP_MOVE dreg, sreg
2047 if (last_ins
&& last_ins
->opcode
== OP_MOVE
&&
2048 ins
->sreg1
== last_ins
->dreg
&&
2049 ins
->dreg
== last_ins
->sreg1
) {
2050 MONO_DELETE_INS (bb
, ins
);
2058 bb
->last_ins
= last_ins
;
2062 * the branch_cc_table should maintain the order of these
2076 branch_cc_table
[] = {
2090 #define NEW_INS(cfg,dest,op) do { \
2091 MONO_INST_NEW ((cfg), (dest), (op)); \
2092 mono_bblock_insert_before_ins (bb, ins, (dest)); \
2096 map_to_reg_reg_op (int op
)
2105 case OP_COMPARE_IMM
:
2107 case OP_ICOMPARE_IMM
:
2121 case OP_LOAD_MEMBASE
:
2122 return OP_LOAD_MEMINDEX
;
2123 case OP_LOADI4_MEMBASE
:
2124 return OP_LOADI4_MEMINDEX
;
2125 case OP_LOADU4_MEMBASE
:
2126 return OP_LOADU4_MEMINDEX
;
2127 case OP_LOADU1_MEMBASE
:
2128 return OP_LOADU1_MEMINDEX
;
2129 case OP_LOADI2_MEMBASE
:
2130 return OP_LOADI2_MEMINDEX
;
2131 case OP_LOADU2_MEMBASE
:
2132 return OP_LOADU2_MEMINDEX
;
2133 case OP_LOADI1_MEMBASE
:
2134 return OP_LOADI1_MEMINDEX
;
2135 case OP_STOREI1_MEMBASE_REG
:
2136 return OP_STOREI1_MEMINDEX
;
2137 case OP_STOREI2_MEMBASE_REG
:
2138 return OP_STOREI2_MEMINDEX
;
2139 case OP_STOREI4_MEMBASE_REG
:
2140 return OP_STOREI4_MEMINDEX
;
2141 case OP_STORE_MEMBASE_REG
:
2142 return OP_STORE_MEMINDEX
;
2143 case OP_STORER4_MEMBASE_REG
:
2144 return OP_STORER4_MEMINDEX
;
2145 case OP_STORER8_MEMBASE_REG
:
2146 return OP_STORER8_MEMINDEX
;
2147 case OP_STORE_MEMBASE_IMM
:
2148 return OP_STORE_MEMBASE_REG
;
2149 case OP_STOREI1_MEMBASE_IMM
:
2150 return OP_STOREI1_MEMBASE_REG
;
2151 case OP_STOREI2_MEMBASE_IMM
:
2152 return OP_STOREI2_MEMBASE_REG
;
2153 case OP_STOREI4_MEMBASE_IMM
:
2154 return OP_STOREI4_MEMBASE_REG
;
2156 g_assert_not_reached ();
2160 * Remove from the instruction list the instructions that can't be
2161 * represented with very simple instructions with no register
2165 mono_arch_lowering_pass (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2167 MonoInst
*ins
, *temp
, *last_ins
= NULL
;
2168 int rot_amount
, imm8
, low_imm
;
2170 MONO_BB_FOR_EACH_INS (bb
, ins
) {
2172 switch (ins
->opcode
) {
2176 case OP_COMPARE_IMM
:
2177 case OP_ICOMPARE_IMM
:
2191 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
)) < 0) {
2192 NEW_INS (cfg
, temp
, OP_ICONST
);
2193 temp
->inst_c0
= ins
->inst_imm
;
2194 temp
->dreg
= mono_alloc_ireg (cfg
);
2195 ins
->sreg2
= temp
->dreg
;
2196 ins
->opcode
= mono_op_imm_to_op (ins
->opcode
);
2198 if (ins
->opcode
== OP_SBB
|| ins
->opcode
== OP_ISBB
|| ins
->opcode
== OP_SUBCC
)
2204 if (ins
->inst_imm
== 1) {
2205 ins
->opcode
= OP_MOVE
;
2208 if (ins
->inst_imm
== 0) {
2209 ins
->opcode
= OP_ICONST
;
2213 imm8
= mono_is_power_of_two (ins
->inst_imm
);
2215 ins
->opcode
= OP_SHL_IMM
;
2216 ins
->inst_imm
= imm8
;
2219 NEW_INS (cfg
, temp
, OP_ICONST
);
2220 temp
->inst_c0
= ins
->inst_imm
;
2221 temp
->dreg
= mono_alloc_ireg (cfg
);
2222 ins
->sreg2
= temp
->dreg
;
2223 ins
->opcode
= OP_IMUL
;
2229 if (ins
->next
&& (ins
->next
->opcode
== OP_COND_EXC_C
|| ins
->next
->opcode
== OP_COND_EXC_IC
))
2230 /* ARM sets the C flag to 1 if there was _no_ overflow */
2231 ins
->next
->opcode
= OP_COND_EXC_NC
;
2233 case OP_LOCALLOC_IMM
:
2234 NEW_INS (cfg
, temp
, OP_ICONST
);
2235 temp
->inst_c0
= ins
->inst_imm
;
2236 temp
->dreg
= mono_alloc_ireg (cfg
);
2237 ins
->sreg1
= temp
->dreg
;
2238 ins
->opcode
= OP_LOCALLOC
;
2240 case OP_LOAD_MEMBASE
:
2241 case OP_LOADI4_MEMBASE
:
2242 case OP_LOADU4_MEMBASE
:
2243 case OP_LOADU1_MEMBASE
:
2244 /* we can do two things: load the immed in a register
2245 * and use an indexed load, or see if the immed can be
2246 * represented as an ad_imm + a load with a smaller offset
2247 * that fits. We just do the first for now, optimize later.
2249 if (arm_is_imm12 (ins
->inst_offset
))
2251 NEW_INS (cfg
, temp
, OP_ICONST
);
2252 temp
->inst_c0
= ins
->inst_offset
;
2253 temp
->dreg
= mono_alloc_ireg (cfg
);
2254 ins
->sreg2
= temp
->dreg
;
2255 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2257 case OP_LOADI2_MEMBASE
:
2258 case OP_LOADU2_MEMBASE
:
2259 case OP_LOADI1_MEMBASE
:
2260 if (arm_is_imm8 (ins
->inst_offset
))
2262 NEW_INS (cfg
, temp
, OP_ICONST
);
2263 temp
->inst_c0
= ins
->inst_offset
;
2264 temp
->dreg
= mono_alloc_ireg (cfg
);
2265 ins
->sreg2
= temp
->dreg
;
2266 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2268 case OP_LOADR4_MEMBASE
:
2269 case OP_LOADR8_MEMBASE
:
2270 if (arm_is_fpimm8 (ins
->inst_offset
))
2272 low_imm
= ins
->inst_offset
& 0x1ff;
2273 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_offset
& ~0x1ff, &rot_amount
)) >= 0) {
2274 NEW_INS (cfg
, temp
, OP_ADD_IMM
);
2275 temp
->inst_imm
= ins
->inst_offset
& ~0x1ff;
2276 temp
->sreg1
= ins
->inst_basereg
;
2277 temp
->dreg
= mono_alloc_ireg (cfg
);
2278 ins
->inst_basereg
= temp
->dreg
;
2279 ins
->inst_offset
= low_imm
;
2282 /* VFP/FPA doesn't have indexed load instructions */
2283 g_assert_not_reached ();
2285 case OP_STORE_MEMBASE_REG
:
2286 case OP_STOREI4_MEMBASE_REG
:
2287 case OP_STOREI1_MEMBASE_REG
:
2288 if (arm_is_imm12 (ins
->inst_offset
))
2290 NEW_INS (cfg
, temp
, OP_ICONST
);
2291 temp
->inst_c0
= ins
->inst_offset
;
2292 temp
->dreg
= mono_alloc_ireg (cfg
);
2293 ins
->sreg2
= temp
->dreg
;
2294 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2296 case OP_STOREI2_MEMBASE_REG
:
2297 if (arm_is_imm8 (ins
->inst_offset
))
2299 NEW_INS (cfg
, temp
, OP_ICONST
);
2300 temp
->inst_c0
= ins
->inst_offset
;
2301 temp
->dreg
= mono_alloc_ireg (cfg
);
2302 ins
->sreg2
= temp
->dreg
;
2303 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2305 case OP_STORER4_MEMBASE_REG
:
2306 case OP_STORER8_MEMBASE_REG
:
2307 if (arm_is_fpimm8 (ins
->inst_offset
))
2309 low_imm
= ins
->inst_offset
& 0x1ff;
2310 if ((imm8
= mono_arm_is_rotated_imm8 (ins
->inst_offset
& ~ 0x1ff, &rot_amount
)) >= 0 && arm_is_fpimm8 (low_imm
)) {
2311 NEW_INS (cfg
, temp
, OP_ADD_IMM
);
2312 temp
->inst_imm
= ins
->inst_offset
& ~0x1ff;
2313 temp
->sreg1
= ins
->inst_destbasereg
;
2314 temp
->dreg
= mono_alloc_ireg (cfg
);
2315 ins
->inst_destbasereg
= temp
->dreg
;
2316 ins
->inst_offset
= low_imm
;
2319 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
2320 /* VFP/FPA doesn't have indexed store instructions */
2321 g_assert_not_reached ();
2323 case OP_STORE_MEMBASE_IMM
:
2324 case OP_STOREI1_MEMBASE_IMM
:
2325 case OP_STOREI2_MEMBASE_IMM
:
2326 case OP_STOREI4_MEMBASE_IMM
:
2327 NEW_INS (cfg
, temp
, OP_ICONST
);
2328 temp
->inst_c0
= ins
->inst_imm
;
2329 temp
->dreg
= mono_alloc_ireg (cfg
);
2330 ins
->sreg1
= temp
->dreg
;
2331 ins
->opcode
= map_to_reg_reg_op (ins
->opcode
);
2333 goto loop_start
; /* make it handle the possibly big ins->inst_offset */
2335 gboolean swap
= FALSE
;
2338 /* Some fp compares require swapped operands */
2339 g_assert (ins
->next
);
2340 switch (ins
->next
->opcode
) {
2342 ins
->next
->opcode
= OP_FBLT
;
2346 ins
->next
->opcode
= OP_FBLT_UN
;
2350 ins
->next
->opcode
= OP_FBGE
;
2354 ins
->next
->opcode
= OP_FBGE_UN
;
2362 ins
->sreg1
= ins
->sreg2
;
2371 bb
->last_ins
= last_ins
;
2372 bb
->max_vreg
= cfg
->next_vreg
;
2376 mono_arch_decompose_long_opts (MonoCompile
*cfg
, MonoInst
*long_ins
)
2380 if (long_ins
->opcode
== OP_LNEG
) {
2382 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ARM_RSBS_IMM
, ins
->dreg
+ 1, ins
->sreg1
+ 1, 0);
2383 MONO_EMIT_NEW_BIALU_IMM (cfg
, OP_ARM_RSC_IMM
, ins
->dreg
+ 2, ins
->sreg1
+ 2, 0);
2389 emit_float_to_int (MonoCompile
*cfg
, guchar
*code
, int dreg
, int sreg
, int size
, gboolean is_signed
)
2391 /* sreg is a float, dreg is an integer reg */
2393 ARM_FIXZ (code
, dreg
, sreg
);
2394 #elif defined(ARM_FPU_VFP)
2396 ARM_TOSIZD (code
, ARM_VFP_F0
, sreg
);
2398 ARM_TOUIZD (code
, ARM_VFP_F0
, sreg
);
2399 ARM_FMRS (code
, dreg
, ARM_VFP_F0
);
2403 ARM_AND_REG_IMM8 (code
, dreg
, dreg
, 0xff);
2404 else if (size
== 2) {
2405 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
2406 ARM_SHR_IMM (code
, dreg
, dreg
, 16);
2410 ARM_SHL_IMM (code
, dreg
, dreg
, 24);
2411 ARM_SAR_IMM (code
, dreg
, dreg
, 24);
2412 } else if (size
== 2) {
2413 ARM_SHL_IMM (code
, dreg
, dreg
, 16);
2414 ARM_SAR_IMM (code
, dreg
, dreg
, 16);
2422 const guchar
*target
;
2427 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
2430 search_thunk_slot (void *data
, int csize
, int bsize
, void *user_data
) {
2431 PatchData
*pdata
= (PatchData
*)user_data
;
2432 guchar
*code
= data
;
2433 guint32
*thunks
= data
;
2434 guint32
*endthunks
= (guint32
*)(code
+ bsize
);
2436 int difflow
, diffhigh
;
2438 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2439 difflow
= (char*)pdata
->code
- (char*)thunks
;
2440 diffhigh
= (char*)pdata
->code
- (char*)endthunks
;
2441 if (!((is_call_imm (thunks
) && is_call_imm (endthunks
)) || (is_call_imm (difflow
) && is_call_imm (diffhigh
))))
2445 * The thunk is composed of 3 words:
2446 * load constant from thunks [2] into ARM_IP
2449 * Note that the LR register is already setup
2451 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2452 if ((pdata
->found
== 2) || (pdata
->code
>= code
&& pdata
->code
<= code
+ csize
)) {
2453 while (thunks
< endthunks
) {
2454 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2455 if (thunks
[2] == (guint32
)pdata
->target
) {
2456 arm_patch (pdata
->code
, (guchar
*)thunks
);
2457 mono_arch_flush_icache (pdata
->code
, 4);
2460 } else if ((thunks
[0] == 0) && (thunks
[1] == 0) && (thunks
[2] == 0)) {
2461 /* found a free slot instead: emit thunk */
2462 /* ARMREG_IP is fine to use since this can't be an IMT call
2465 code
= (guchar
*)thunks
;
2466 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
2467 if (thumb_supported
)
2468 ARM_BX (code
, ARMREG_IP
);
2470 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
2471 thunks
[2] = (guint32
)pdata
->target
;
2472 mono_arch_flush_icache ((guchar
*)thunks
, 12);
2474 arm_patch (pdata
->code
, (guchar
*)thunks
);
2475 mono_arch_flush_icache (pdata
->code
, 4);
2479 /* skip 12 bytes, the size of the thunk */
2483 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2489 handle_thunk (MonoDomain
*domain
, int absolute
, guchar
*code
, const guchar
*target
)
2494 domain
= mono_domain_get ();
2497 pdata
.target
= target
;
2498 pdata
.absolute
= absolute
;
2501 mono_domain_lock (domain
);
2502 mono_domain_code_foreach (domain
, search_thunk_slot
, &pdata
);
2505 /* this uses the first available slot */
2507 mono_domain_code_foreach (domain
, search_thunk_slot
, &pdata
);
2509 mono_domain_unlock (domain
);
2511 if (pdata
.found
!= 1)
2512 g_print ("thunk failed for %p from %p\n", target
, code
);
2513 g_assert (pdata
.found
== 1);
2517 arm_patch_general (MonoDomain
*domain
, guchar
*code
, const guchar
*target
)
2519 guint32
*code32
= (void*)code
;
2520 guint32 ins
= *code32
;
2521 guint32 prim
= (ins
>> 25) & 7;
2522 guint32 tval
= GPOINTER_TO_UINT (target
);
2524 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2525 if (prim
== 5) { /* 101b */
2526 /* the diff starts 8 bytes from the branch opcode */
2527 gint diff
= target
- code
- 8;
2529 gint tmask
= 0xffffffff;
2530 if (tval
& 1) { /* entering thumb mode */
2531 diff
= target
- 1 - code
- 8;
2532 g_assert (thumb_supported
);
2533 tbits
= 0xf << 28; /* bl->blx bit pattern */
2534 g_assert ((ins
& (1 << 24))); /* it must be a bl, not b instruction */
2535 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
2539 tmask
= ~(1 << 24); /* clear the link bit */
2540 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
2545 if (diff
<= 33554431) {
2547 ins
= (ins
& 0xff000000) | diff
;
2549 *code32
= ins
| tbits
;
2553 /* diff between 0 and -33554432 */
2554 if (diff
>= -33554432) {
2556 ins
= (ins
& 0xff000000) | (diff
& ~0xff000000);
2558 *code32
= ins
| tbits
;
2563 handle_thunk (domain
, TRUE
, code
, target
);
2568 * The alternative call sequences looks like this:
2570 * ldr ip, [pc] // loads the address constant
2571 * b 1f // jumps around the constant
2572 * address constant embedded in the code
2577 * There are two cases for patching:
2578 * a) at the end of method emission: in this case code points to the start
2579 * of the call sequence
2580 * b) during runtime patching of the call site: in this case code points
2581 * to the mov pc, ip instruction
2583 * We have to handle also the thunk jump code sequence:
2587 * address constant // execution never reaches here
2589 if ((ins
& 0x0ffffff0) == 0x12fff10) {
2590 /* Branch and exchange: the address is constructed in a reg
2591 * We can patch BX when the code sequence is the following:
2592 * ldr ip, [pc, #0] ; 0x8
2599 guint8
*emit
= (guint8
*)ccode
;
2600 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
2602 ARM_MOV_REG_REG (emit
, ARMREG_LR
, ARMREG_PC
);
2603 ARM_BX (emit
, ARMREG_IP
);
2605 /*patching from magic trampoline*/
2606 if (ins
== ccode
[3]) {
2607 g_assert (code32
[-4] == ccode
[0]);
2608 g_assert (code32
[-3] == ccode
[1]);
2609 g_assert (code32
[-1] == ccode
[2]);
2610 code32
[-2] = (guint32
)target
;
2613 /*patching from JIT*/
2614 if (ins
== ccode
[0]) {
2615 g_assert (code32
[1] == ccode
[1]);
2616 g_assert (code32
[3] == ccode
[2]);
2617 g_assert (code32
[4] == ccode
[3]);
2618 code32
[2] = (guint32
)target
;
2621 g_assert_not_reached ();
2622 } else if ((ins
& 0x0ffffff0) == 0x12fff30) {
2630 guint8
*emit
= (guint8
*)ccode
;
2631 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
2633 ARM_BLX_REG (emit
, ARMREG_IP
);
2635 g_assert (code32
[-3] == ccode
[0]);
2636 g_assert (code32
[-2] == ccode
[1]);
2637 g_assert (code32
[0] == ccode
[2]);
2639 code32
[-1] = (guint32
)target
;
2642 guint32
*tmp
= ccode
;
2643 guint8
*emit
= (guint8
*)tmp
;
2644 ARM_LDR_IMM (emit
, ARMREG_IP
, ARMREG_PC
, 0);
2645 ARM_MOV_REG_REG (emit
, ARMREG_LR
, ARMREG_PC
);
2646 ARM_MOV_REG_REG (emit
, ARMREG_PC
, ARMREG_IP
);
2647 ARM_BX (emit
, ARMREG_IP
);
2648 if (ins
== ccode
[2]) {
2649 g_assert_not_reached (); // should be -2 ...
2650 code32
[-1] = (guint32
)target
;
2653 if (ins
== ccode
[0]) {
2654 /* handles both thunk jump code and the far call sequence */
2655 code32
[2] = (guint32
)target
;
2658 g_assert_not_reached ();
2660 // g_print ("patched with 0x%08x\n", ins);
2664 arm_patch (guchar
*code
, const guchar
*target
)
2666 arm_patch_general (NULL
, code
, target
);
2670 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
2671 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
2672 * to be used with the emit macros.
2673 * Return -1 otherwise.
2676 mono_arm_is_rotated_imm8 (guint32 val
, gint
*rot_amount
)
2679 for (i
= 0; i
< 31; i
+= 2) {
2680 res
= (val
<< (32 - i
)) | (val
>> i
);
2683 *rot_amount
= i
? 32 - i
: 0;
2690 * Emits in code a sequence of instructions that load the value 'val'
2691 * into the dreg register. Uses at most 4 instructions.
2694 mono_arm_emit_load_imm (guint8
*code
, int dreg
, guint32 val
)
2696 int imm8
, rot_amount
;
2698 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
2699 /* skip the constant pool */
2705 if ((imm8
= mono_arm_is_rotated_imm8 (val
, &rot_amount
)) >= 0) {
2706 ARM_MOV_REG_IMM (code
, dreg
, imm8
, rot_amount
);
2707 } else if ((imm8
= mono_arm_is_rotated_imm8 (~val
, &rot_amount
)) >= 0) {
2708 ARM_MVN_REG_IMM (code
, dreg
, imm8
, rot_amount
);
2711 ARM_MOVW_REG_IMM (code
, dreg
, val
& 0xffff);
2713 ARM_MOVT_REG_IMM (code
, dreg
, (val
>> 16) & 0xffff);
2717 ARM_MOV_REG_IMM8 (code
, dreg
, (val
& 0xFF));
2719 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF00) >> 8, 24);
2721 if (val
& 0xFF0000) {
2722 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
2724 if (val
& 0xFF000000) {
2725 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
2727 } else if (val
& 0xFF00) {
2728 ARM_MOV_REG_IMM (code
, dreg
, (val
& 0xFF00) >> 8, 24);
2729 if (val
& 0xFF0000) {
2730 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
2732 if (val
& 0xFF000000) {
2733 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
2735 } else if (val
& 0xFF0000) {
2736 ARM_MOV_REG_IMM (code
, dreg
, (val
& 0xFF0000) >> 16, 16);
2737 if (val
& 0xFF000000) {
2738 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF000000) >> 24, 8);
2741 //g_assert_not_reached ();
2747 * emit_load_volatile_arguments:
2749 * Load volatile arguments from the stack to the original input registers.
2750 * Required before a tail call.
2753 emit_load_volatile_arguments (MonoCompile
*cfg
, guint8
*code
)
2755 MonoMethod
*method
= cfg
->method
;
2756 MonoMethodSignature
*sig
;
2761 /* FIXME: Generate intermediate code instead */
2763 sig
= mono_method_signature (method
);
2765 /* This is the opposite of the code in emit_prolog */
2769 cinfo
= get_call_info (sig
, sig
->pinvoke
);
2771 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
2772 ArgInfo
*ainfo
= &cinfo
->ret
;
2773 inst
= cfg
->vret_addr
;
2774 g_assert (arm_is_imm12 (inst
->inst_offset
));
2775 ARM_LDR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
2777 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
2778 ArgInfo
*ainfo
= cinfo
->args
+ i
;
2779 inst
= cfg
->args
[pos
];
2781 if (cfg
->verbose_level
> 2)
2782 g_print ("Loading argument %d (type: %d)\n", i
, ainfo
->regtype
);
2783 if (inst
->opcode
== OP_REGVAR
) {
2784 if (ainfo
->regtype
== RegTypeGeneral
)
2785 ARM_MOV_REG_REG (code
, inst
->dreg
, ainfo
->reg
);
2786 else if (ainfo
->regtype
== RegTypeFP
) {
2787 g_assert_not_reached ();
2788 } else if (ainfo
->regtype
== RegTypeBase
) {
2792 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
2793 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
2795 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2796 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
2800 g_assert_not_reached ();
2802 if (ainfo
->regtype
== RegTypeGeneral
|| ainfo
->regtype
== RegTypeIRegPair
) {
2803 switch (ainfo
->size
) {
2810 g_assert (arm_is_imm12 (inst
->inst_offset
));
2811 ARM_LDR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
2812 g_assert (arm_is_imm12 (inst
->inst_offset
+ 4));
2813 ARM_LDR_IMM (code
, ainfo
->reg
+ 1, inst
->inst_basereg
, inst
->inst_offset
+ 4);
2816 if (arm_is_imm12 (inst
->inst_offset
)) {
2817 ARM_LDR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
2819 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
2820 ARM_LDR_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
2824 } else if (ainfo
->regtype
== RegTypeBaseGen
) {
2827 } else if (ainfo
->regtype
== RegTypeBase
) {
2829 } else if (ainfo
->regtype
== RegTypeFP
) {
2830 g_assert_not_reached ();
2831 } else if (ainfo
->regtype
== RegTypeStructByVal
) {
2832 int doffset
= inst
->inst_offset
;
2836 if (mono_class_from_mono_type (inst
->inst_vtype
))
2837 size
= mono_class_native_size (mono_class_from_mono_type (inst
->inst_vtype
), NULL
);
2838 for (cur_reg
= 0; cur_reg
< ainfo
->size
; ++cur_reg
) {
2839 if (arm_is_imm12 (doffset
)) {
2840 ARM_LDR_IMM (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, doffset
);
2842 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, doffset
);
2843 ARM_LDR_REG_REG (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, ARMREG_IP
);
2845 soffset
+= sizeof (gpointer
);
2846 doffset
+= sizeof (gpointer
);
2851 } else if (ainfo
->regtype
== RegTypeStructByAddr
) {
2868 mono_arch_output_basic_block (MonoCompile
*cfg
, MonoBasicBlock
*bb
)
2873 guint8
*code
= cfg
->native_code
+ cfg
->code_len
;
2874 MonoInst
*last_ins
= NULL
;
2875 guint last_offset
= 0;
2877 int imm8
, rot_amount
;
2879 /* we don't align basic blocks of loops on arm */
2881 if (cfg
->verbose_level
> 2)
2882 g_print ("Basic block %d starting at offset 0x%x\n", bb
->block_num
, bb
->native_offset
);
2884 cpos
= bb
->max_offset
;
2886 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
) {
2887 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
2888 //g_assert (!mono_compile_aot);
2891 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
2892 /* this is not thread save, but good enough */
2893 /* fixme: howto handle overflows? */
2894 //x86_inc_mem (code, &cov->data [bb->dfn].count);
2897 if (mono_break_at_bb_method
&& mono_method_desc_full_match (mono_break_at_bb_method
, cfg
->method
) && bb
->block_num
== mono_break_at_bb_bb_num
) {
2898 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
2899 (gpointer
)"mono_break");
2900 code
= emit_call_seq (cfg
, code
);
2903 MONO_BB_FOR_EACH_INS (bb
, ins
) {
2904 offset
= code
- cfg
->native_code
;
2906 max_len
= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
2908 if (offset
> (cfg
->code_size
- max_len
- 16)) {
2909 cfg
->code_size
*= 2;
2910 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
2911 code
= cfg
->native_code
+ offset
;
2913 // if (ins->cil_code)
2914 // g_print ("cil code\n");
2915 mono_debug_record_line_number (cfg
, ins
, offset
);
2917 switch (ins
->opcode
) {
2918 case OP_MEMORY_BARRIER
:
2921 #ifdef HAVE_AEABI_READ_TP
2922 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
2923 (gpointer
)"__aeabi_read_tp");
2924 code
= emit_call_seq (cfg
, code
);
2926 ARM_LDR_IMM (code
, ins
->dreg
, ARMREG_R0
, ins
->inst_offset
);
2928 g_assert_not_reached ();
2932 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2933 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
2936 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2937 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
2939 case OP_STOREI1_MEMBASE_IMM
:
2940 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
& 0xFF);
2941 g_assert (arm_is_imm12 (ins
->inst_offset
));
2942 ARM_STRB_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
2944 case OP_STOREI2_MEMBASE_IMM
:
2945 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
& 0xFFFF);
2946 g_assert (arm_is_imm8 (ins
->inst_offset
));
2947 ARM_STRH_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
2949 case OP_STORE_MEMBASE_IMM
:
2950 case OP_STOREI4_MEMBASE_IMM
:
2951 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_imm
);
2952 g_assert (arm_is_imm12 (ins
->inst_offset
));
2953 ARM_STR_IMM (code
, ARMREG_LR
, ins
->inst_destbasereg
, ins
->inst_offset
);
2955 case OP_STOREI1_MEMBASE_REG
:
2956 g_assert (arm_is_imm12 (ins
->inst_offset
));
2957 ARM_STRB_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
2959 case OP_STOREI2_MEMBASE_REG
:
2960 g_assert (arm_is_imm8 (ins
->inst_offset
));
2961 ARM_STRH_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
2963 case OP_STORE_MEMBASE_REG
:
2964 case OP_STOREI4_MEMBASE_REG
:
2965 /* this case is special, since it happens for spill code after lowering has been called */
2966 if (arm_is_imm12 (ins
->inst_offset
)) {
2967 ARM_STR_IMM (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
2969 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
2970 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ARMREG_LR
);
2973 case OP_STOREI1_MEMINDEX
:
2974 ARM_STRB_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
2976 case OP_STOREI2_MEMINDEX
:
2977 ARM_STRH_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
2979 case OP_STORE_MEMINDEX
:
2980 case OP_STOREI4_MEMINDEX
:
2981 ARM_STR_REG_REG (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->sreg2
);
2984 g_assert_not_reached ();
2986 case OP_LOAD_MEMINDEX
:
2987 case OP_LOADI4_MEMINDEX
:
2988 case OP_LOADU4_MEMINDEX
:
2989 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
2991 case OP_LOADI1_MEMINDEX
:
2992 ARM_LDRSB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
2994 case OP_LOADU1_MEMINDEX
:
2995 ARM_LDRB_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
2997 case OP_LOADI2_MEMINDEX
:
2998 ARM_LDRSH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3000 case OP_LOADU2_MEMINDEX
:
3001 ARM_LDRH_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ins
->sreg2
);
3003 case OP_LOAD_MEMBASE
:
3004 case OP_LOADI4_MEMBASE
:
3005 case OP_LOADU4_MEMBASE
:
3006 /* this case is special, since it happens for spill code after lowering has been called */
3007 if (arm_is_imm12 (ins
->inst_offset
)) {
3008 ARM_LDR_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3010 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
3011 ARM_LDR_REG_REG (code
, ins
->dreg
, ins
->inst_basereg
, ARMREG_LR
);
3014 case OP_LOADI1_MEMBASE
:
3015 g_assert (arm_is_imm8 (ins
->inst_offset
));
3016 ARM_LDRSB_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3018 case OP_LOADU1_MEMBASE
:
3019 g_assert (arm_is_imm12 (ins
->inst_offset
));
3020 ARM_LDRB_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3022 case OP_LOADU2_MEMBASE
:
3023 g_assert (arm_is_imm8 (ins
->inst_offset
));
3024 ARM_LDRH_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3026 case OP_LOADI2_MEMBASE
:
3027 g_assert (arm_is_imm8 (ins
->inst_offset
));
3028 ARM_LDRSH_IMM (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3030 case OP_ICONV_TO_I1
:
3031 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 24);
3032 ARM_SAR_IMM (code
, ins
->dreg
, ins
->dreg
, 24);
3034 case OP_ICONV_TO_I2
:
3035 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 16);
3036 ARM_SAR_IMM (code
, ins
->dreg
, ins
->dreg
, 16);
3038 case OP_ICONV_TO_U1
:
3039 ARM_AND_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 0xff);
3041 case OP_ICONV_TO_U2
:
3042 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, 16);
3043 ARM_SHR_IMM (code
, ins
->dreg
, ins
->dreg
, 16);
3047 ARM_CMP_REG_REG (code
, ins
->sreg1
, ins
->sreg2
);
3049 case OP_COMPARE_IMM
:
3050 case OP_ICOMPARE_IMM
:
3051 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3052 g_assert (imm8
>= 0);
3053 ARM_CMP_REG_IMM (code
, ins
->sreg1
, imm8
, rot_amount
);
3057 * gdb does not like encountering the hw breakpoint ins in the debugged code.
3058 * So instead of emitting a trap, we emit a call a C function and place a
3061 //*(int*)code = 0xef9f0001;
3064 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3065 (gpointer
)"mono_break");
3066 code
= emit_call_seq (cfg
, code
);
3068 case OP_RELAXED_NOP
:
3073 case OP_DUMMY_STORE
:
3074 case OP_NOT_REACHED
:
3077 case OP_SEQ_POINT
: {
3079 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
3080 MonoInst
*ss_trigger_page_var
= cfg
->arch
.ss_trigger_page_var
;
3082 int dreg
= ARMREG_LR
;
3085 * For AOT, we use one got slot per method, which will point to a
3086 * SeqPointInfo structure, containing all the information required
3087 * by the code below.
3089 if (cfg
->compile_aot
) {
3090 g_assert (info_var
);
3091 g_assert (info_var
->opcode
== OP_REGOFFSET
);
3092 g_assert (arm_is_imm12 (info_var
->inst_offset
));
3096 * Read from the single stepping trigger page. This will cause a
3097 * SIGSEGV when single stepping is enabled.
3098 * We do this _before_ the breakpoint, so single stepping after
3099 * a breakpoint is hit will step to the next IL offset.
3101 g_assert (((guint64
)(gsize
)ss_trigger_page
>> 32) == 0);
3103 if (ins
->flags
& MONO_INST_SINGLE_STEP_LOC
) {
3104 if (cfg
->compile_aot
) {
3105 /* Load the trigger page addr from the variable initialized in the prolog */
3106 var
= ss_trigger_page_var
;
3108 g_assert (var
->opcode
== OP_REGOFFSET
);
3109 g_assert (arm_is_imm12 (var
->inst_offset
));
3110 ARM_LDR_IMM (code
, dreg
, var
->inst_basereg
, var
->inst_offset
);
3112 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
3114 *(int*)code
= (int)ss_trigger_page
;
3117 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
3120 il_offset
= ins
->inst_imm
;
3122 if (!cfg
->seq_points
)
3123 cfg
->seq_points
= g_ptr_array_new ();
3124 g_ptr_array_add (cfg
->seq_points
, GUINT_TO_POINTER (il_offset
));
3125 g_ptr_array_add (cfg
->seq_points
, GUINT_TO_POINTER (code
- cfg
->native_code
));
3127 if (cfg
->compile_aot
) {
3128 guint32 offset
= code
- cfg
->native_code
;
3131 ARM_LDR_IMM (code
, dreg
, info_var
->inst_basereg
, info_var
->inst_offset
);
3132 /* Add the offset */
3133 val
= ((offset
/ 4) * sizeof (guint8
*)) + G_STRUCT_OFFSET (SeqPointInfo
, bp_addrs
);
3134 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF), 0);
3136 * Have to emit nops to keep the difference between the offset
3137 * stored in seq_points and breakpoint instruction constant,
3138 * mono_arch_get_ip_for_breakpoint () depends on this.
3141 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF00) >> 8, 24);
3145 ARM_ADD_REG_IMM (code
, dreg
, dreg
, (val
& 0xFF0000) >> 16, 16);
3148 g_assert (!(val
& 0xFF000000));
3149 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
3150 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
3152 /* What is faster, a branch or a load ? */
3153 ARM_CMP_REG_IMM (code
, dreg
, 0, 0);
3154 /* The breakpoint instruction */
3155 ARM_LDR_IMM_COND (code
, dreg
, dreg
, 0, ARMCOND_NE
);
3158 * A placeholder for a possible breakpoint inserted by
3159 * mono_arch_set_breakpoint ().
3161 for (i
= 0; i
< 4; ++i
)
3168 ARM_ADDS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3171 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3175 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3178 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3179 g_assert (imm8
>= 0);
3180 ARM_ADDS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3184 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3185 g_assert (imm8
>= 0);
3186 ARM_ADD_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3190 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3191 g_assert (imm8
>= 0);
3192 ARM_ADCS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3195 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3196 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3198 case OP_IADD_OVF_UN
:
3199 ARM_ADD_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3200 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3203 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3204 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3206 case OP_ISUB_OVF_UN
:
3207 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3208 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3210 case OP_ADD_OVF_CARRY
:
3211 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3212 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3214 case OP_ADD_OVF_UN_CARRY
:
3215 ARM_ADCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3216 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3218 case OP_SUB_OVF_CARRY
:
3219 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3220 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3222 case OP_SUB_OVF_UN_CARRY
:
3223 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3224 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3228 ARM_SUBS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3231 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3232 g_assert (imm8
>= 0);
3233 ARM_SUBS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3236 ARM_SUB_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3240 ARM_SBCS_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3244 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3245 g_assert (imm8
>= 0);
3246 ARM_SUB_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3250 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3251 g_assert (imm8
>= 0);
3252 ARM_SBCS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3254 case OP_ARM_RSBS_IMM
:
3255 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3256 g_assert (imm8
>= 0);
3257 ARM_RSBS_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3259 case OP_ARM_RSC_IMM
:
3260 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3261 g_assert (imm8
>= 0);
3262 ARM_RSC_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3265 ARM_AND_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3269 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3270 g_assert (imm8
>= 0);
3271 ARM_AND_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3279 /* crappy ARM arch doesn't have a DIV instruction */
3280 g_assert_not_reached ();
3282 ARM_ORR_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3286 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3287 g_assert (imm8
>= 0);
3288 ARM_ORR_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3291 ARM_EOR_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3295 imm8
= mono_arm_is_rotated_imm8 (ins
->inst_imm
, &rot_amount
);
3296 g_assert (imm8
>= 0);
3297 ARM_EOR_REG_IMM (code
, ins
->dreg
, ins
->sreg1
, imm8
, rot_amount
);
3300 ARM_SHL_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3305 ARM_SHL_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
3306 else if (ins
->dreg
!= ins
->sreg1
)
3307 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
3310 ARM_SAR_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3315 ARM_SAR_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
3316 else if (ins
->dreg
!= ins
->sreg1
)
3317 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
3320 case OP_ISHR_UN_IMM
:
3322 ARM_SHR_IMM (code
, ins
->dreg
, ins
->sreg1
, (ins
->inst_imm
& 0x1f));
3323 else if (ins
->dreg
!= ins
->sreg1
)
3324 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
3327 ARM_SHR_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3330 ARM_MVN_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
3333 ARM_RSB_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 0);
3336 if (ins
->dreg
== ins
->sreg2
)
3337 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3339 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg2
, ins
->sreg1
);
3342 g_assert_not_reached ();
3345 /* FIXME: handle ovf/ sreg2 != dreg */
3346 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3347 /* FIXME: MUL doesn't set the C/O flags on ARM */
3349 case OP_IMUL_OVF_UN
:
3350 /* FIXME: handle ovf/ sreg2 != dreg */
3351 ARM_MUL_REG_REG (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3352 /* FIXME: MUL doesn't set the C/O flags on ARM */
3355 code
= mono_arm_emit_load_imm (code
, ins
->dreg
, ins
->inst_c0
);
3358 /* Load the GOT offset */
3359 mono_add_patch_info (cfg
, offset
, (MonoJumpInfoType
)ins
->inst_i1
, ins
->inst_p0
);
3360 ARM_LDR_IMM (code
, ins
->dreg
, ARMREG_PC
, 0);
3362 *(gpointer
*)code
= NULL
;
3364 /* Load the value from the GOT */
3365 ARM_LDR_REG_REG (code
, ins
->dreg
, ARMREG_PC
, ins
->dreg
);
3367 case OP_ICONV_TO_I4
:
3368 case OP_ICONV_TO_U4
:
3370 if (ins
->dreg
!= ins
->sreg1
)
3371 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
3374 int saved
= ins
->sreg2
;
3375 if (ins
->sreg2
== ARM_LSW_REG
) {
3376 ARM_MOV_REG_REG (code
, ARMREG_LR
, ins
->sreg2
);
3379 if (ins
->sreg1
!= ARM_LSW_REG
)
3380 ARM_MOV_REG_REG (code
, ARM_LSW_REG
, ins
->sreg1
);
3381 if (saved
!= ARM_MSW_REG
)
3382 ARM_MOV_REG_REG (code
, ARM_MSW_REG
, saved
);
3387 ARM_MVFD (code
, ins
->dreg
, ins
->sreg1
);
3388 #elif defined(ARM_FPU_VFP)
3389 ARM_CPYD (code
, ins
->dreg
, ins
->sreg1
);
3392 case OP_FCONV_TO_R4
:
3394 ARM_MVFS (code
, ins
->dreg
, ins
->sreg1
);
3395 #elif defined(ARM_FPU_VFP)
3396 ARM_CVTD (code
, ins
->dreg
, ins
->sreg1
);
3397 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
3402 * Keep in sync with mono_arch_emit_epilog
3404 g_assert (!cfg
->method
->save_lmf
);
3406 code
= emit_load_volatile_arguments (cfg
, code
);
3408 code
= emit_big_add (code
, ARMREG_SP
, cfg
->frame_reg
, cfg
->stack_usage
);
3409 ARM_POP_NWB (code
, cfg
->used_int_regs
| ((1 << ARMREG_SP
)) | ((1 << ARMREG_LR
)));
3410 mono_add_patch_info (cfg
, (guint8
*) code
- cfg
->native_code
, MONO_PATCH_INFO_METHOD_JUMP
, ins
->inst_p0
);
3411 if (cfg
->compile_aot
) {
3412 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
3414 *(gpointer
*)code
= NULL
;
3416 ARM_LDR_REG_REG (code
, ARMREG_PC
, ARMREG_PC
, ARMREG_IP
);
3422 /* ensure ins->sreg1 is not NULL */
3423 ARM_LDR_IMM (code
, ARMREG_LR
, ins
->sreg1
, 0);
3427 if (ppc_is_imm16 (cfg
->sig_cookie
+ cfg
->stack_usage
)) {
3428 ppc_addi (code
, ppc_r11
, cfg
->frame_reg
, cfg
->sig_cookie
+ cfg
->stack_usage
);
3430 ppc_load (code
, ppc_r11
, cfg
->sig_cookie
+ cfg
->stack_usage
);
3431 ppc_add (code
, ppc_r11
, cfg
->frame_reg
, ppc_r11
);
3433 ppc_stw (code
, ppc_r11
, 0, ins
->sreg1
);
3443 call
= (MonoCallInst
*)ins
;
3444 if (ins
->flags
& MONO_INST_HAS_METHOD
)
3445 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_METHOD
, call
->method
);
3447 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_ABS
, call
->fptr
);
3448 code
= emit_call_seq (cfg
, code
);
3449 code
= emit_move_return_value (cfg
, ins
, code
);
3455 case OP_VOIDCALL_REG
:
3457 code
= emit_call_reg (code
, ins
->sreg1
);
3458 code
= emit_move_return_value (cfg
, ins
, code
);
3460 case OP_FCALL_MEMBASE
:
3461 case OP_LCALL_MEMBASE
:
3462 case OP_VCALL_MEMBASE
:
3463 case OP_VCALL2_MEMBASE
:
3464 case OP_VOIDCALL_MEMBASE
:
3465 case OP_CALL_MEMBASE
:
3466 g_assert (arm_is_imm12 (ins
->inst_offset
));
3467 g_assert (ins
->sreg1
!= ARMREG_LR
);
3468 call
= (MonoCallInst
*)ins
;
3469 if (call
->method
->klass
->flags
& TYPE_ATTRIBUTE_INTERFACE
) {
3470 ARM_ADD_REG_IMM8 (code
, ARMREG_LR
, ARMREG_PC
, 4);
3471 ARM_LDR_IMM (code
, ARMREG_PC
, ins
->sreg1
, ins
->inst_offset
);
3473 * We can't embed the method in the code stream in PIC code, or
3475 * Instead, we put it in V5 in code emitted by
3476 * mono_arch_emit_imt_argument (), and embed NULL here to
3477 * signal the IMT thunk that the value is in V5.
3479 if (call
->dynamic_imt_arg
)
3480 *((gpointer
*)code
) = NULL
;
3482 *((gpointer
*)code
) = (gpointer
)call
->method
;
3485 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
3486 ARM_LDR_IMM (code
, ARMREG_PC
, ins
->sreg1
, ins
->inst_offset
);
3488 code
= emit_move_return_value (cfg
, ins
, code
);
3491 /* keep alignment */
3492 int alloca_waste
= cfg
->param_area
;
3495 /* round the size to 8 bytes */
3496 ARM_ADD_REG_IMM8 (code
, ins
->dreg
, ins
->sreg1
, 7);
3497 ARM_BIC_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, 7);
3499 ARM_ADD_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, alloca_waste
);
3500 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ins
->dreg
);
3501 /* memzero the area: dreg holds the size, sp is the pointer */
3502 if (ins
->flags
& MONO_INST_INIT
) {
3503 guint8
*start_loop
, *branch_to_cond
;
3504 ARM_MOV_REG_IMM8 (code
, ARMREG_LR
, 0);
3505 branch_to_cond
= code
;
3508 ARM_STR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ins
->dreg
);
3509 arm_patch (branch_to_cond
, code
);
3510 /* decrement by 4 and set flags */
3511 ARM_SUBS_REG_IMM8 (code
, ins
->dreg
, ins
->dreg
, 4);
3512 ARM_B_COND (code
, ARMCOND_GE
, 0);
3513 arm_patch (code
- 4, start_loop
);
3515 ARM_ADD_REG_IMM8 (code
, ins
->dreg
, ARMREG_SP
, alloca_waste
);
3520 MonoInst
*var
= cfg
->dyn_call_var
;
3522 g_assert (var
->opcode
== OP_REGOFFSET
);
3523 g_assert (arm_is_imm12 (var
->inst_offset
));
3525 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
3526 ARM_MOV_REG_REG( code
, ARMREG_LR
, ins
->sreg1
);
3528 ARM_MOV_REG_REG( code
, ARMREG_IP
, ins
->sreg2
);
3530 /* Save args buffer */
3531 ARM_STR_IMM (code
, ARMREG_LR
, var
->inst_basereg
, var
->inst_offset
);
3533 /* Set stack slots using R0 as scratch reg */
3534 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
3535 for (i
= 0; i
< DYN_CALL_STACK_ARGS
; ++i
) {
3536 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_LR
, (PARAM_REGS
+ i
) * sizeof (gpointer
));
3537 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_SP
, i
* sizeof (gpointer
));
3540 /* Set argument registers */
3541 for (i
= 0; i
< PARAM_REGS
; ++i
)
3542 ARM_LDR_IMM (code
, i
, ARMREG_LR
, i
* sizeof (gpointer
));
3545 ARM_MOV_REG_REG (code
, ARMREG_LR
, ARMREG_PC
);
3546 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
3549 ARM_LDR_IMM (code
, ARMREG_IP
, var
->inst_basereg
, var
->inst_offset
);
3550 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_IP
, G_STRUCT_OFFSET (DynCallArgs
, res
));
3551 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_IP
, G_STRUCT_OFFSET (DynCallArgs
, res2
));
3555 if (ins
->sreg1
!= ARMREG_R0
)
3556 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
3557 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3558 (gpointer
)"mono_arch_throw_exception");
3559 code
= emit_call_seq (cfg
, code
);
3563 if (ins
->sreg1
!= ARMREG_R0
)
3564 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
3565 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
3566 (gpointer
)"mono_arch_rethrow_exception");
3567 code
= emit_call_seq (cfg
, code
);
3570 case OP_START_HANDLER
: {
3571 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3573 if (arm_is_imm12 (spvar
->inst_offset
)) {
3574 ARM_STR_IMM (code
, ARMREG_LR
, spvar
->inst_basereg
, spvar
->inst_offset
);
3576 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
3577 ARM_STR_REG_REG (code
, ARMREG_LR
, spvar
->inst_basereg
, ARMREG_IP
);
3581 case OP_ENDFILTER
: {
3582 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3584 if (ins
->sreg1
!= ARMREG_R0
)
3585 ARM_MOV_REG_REG (code
, ARMREG_R0
, ins
->sreg1
);
3586 if (arm_is_imm12 (spvar
->inst_offset
)) {
3587 ARM_LDR_IMM (code
, ARMREG_IP
, spvar
->inst_basereg
, spvar
->inst_offset
);
3589 g_assert (ARMREG_IP
!= spvar
->inst_basereg
);
3590 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
3591 ARM_LDR_REG_REG (code
, ARMREG_IP
, spvar
->inst_basereg
, ARMREG_IP
);
3593 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
3596 case OP_ENDFINALLY
: {
3597 MonoInst
*spvar
= mono_find_spvar_for_region (cfg
, bb
->region
);
3599 if (arm_is_imm12 (spvar
->inst_offset
)) {
3600 ARM_LDR_IMM (code
, ARMREG_IP
, spvar
->inst_basereg
, spvar
->inst_offset
);
3602 g_assert (ARMREG_IP
!= spvar
->inst_basereg
);
3603 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, spvar
->inst_offset
);
3604 ARM_LDR_REG_REG (code
, ARMREG_IP
, spvar
->inst_basereg
, ARMREG_IP
);
3606 ARM_MOV_REG_REG (code
, ARMREG_PC
, ARMREG_IP
);
3609 case OP_CALL_HANDLER
:
3610 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3614 ins
->inst_c0
= code
- cfg
->native_code
;
3617 /*if (ins->inst_target_bb->native_offset) {
3619 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3621 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_BB
, ins
->inst_target_bb
);
3626 ARM_MOV_REG_REG (code
, ARMREG_PC
, ins
->sreg1
);
3630 * In the normal case we have:
3631 * ldr pc, [pc, ins->sreg1 << 2]
3634 * ldr lr, [pc, ins->sreg1 << 2]
3636 * After follows the data.
3637 * FIXME: add aot support.
3639 mono_add_patch_info (cfg
, offset
, MONO_PATCH_INFO_SWITCH
, ins
->inst_p0
);
3640 max_len
+= 4 * GPOINTER_TO_INT (ins
->klass
);
3641 if (offset
> (cfg
->code_size
- max_len
- 16)) {
3642 cfg
->code_size
+= max_len
;
3643 cfg
->code_size
*= 2;
3644 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
3645 code
= cfg
->native_code
+ offset
;
3647 ARM_LDR_REG_REG_SHIFT (code
, ARMREG_PC
, ARMREG_PC
, ins
->sreg1
, ARMSHIFT_LSL
, 2);
3649 code
+= 4 * GPOINTER_TO_INT (ins
->klass
);
3653 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
3654 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
3658 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
3659 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_LT
);
3663 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
3664 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_LO
);
3668 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
3669 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_GT
);
3673 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
3674 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_HI
);
3676 case OP_COND_EXC_EQ
:
3677 case OP_COND_EXC_NE_UN
:
3678 case OP_COND_EXC_LT
:
3679 case OP_COND_EXC_LT_UN
:
3680 case OP_COND_EXC_GT
:
3681 case OP_COND_EXC_GT_UN
:
3682 case OP_COND_EXC_GE
:
3683 case OP_COND_EXC_GE_UN
:
3684 case OP_COND_EXC_LE
:
3685 case OP_COND_EXC_LE_UN
:
3686 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_EQ
, ins
->inst_p1
);
3688 case OP_COND_EXC_IEQ
:
3689 case OP_COND_EXC_INE_UN
:
3690 case OP_COND_EXC_ILT
:
3691 case OP_COND_EXC_ILT_UN
:
3692 case OP_COND_EXC_IGT
:
3693 case OP_COND_EXC_IGT_UN
:
3694 case OP_COND_EXC_IGE
:
3695 case OP_COND_EXC_IGE_UN
:
3696 case OP_COND_EXC_ILE
:
3697 case OP_COND_EXC_ILE_UN
:
3698 EMIT_COND_SYSTEM_EXCEPTION (ins
->opcode
- OP_COND_EXC_IEQ
, ins
->inst_p1
);
3701 case OP_COND_EXC_IC
:
3702 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS
, ins
->inst_p1
);
3704 case OP_COND_EXC_OV
:
3705 case OP_COND_EXC_IOV
:
3706 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS
, ins
->inst_p1
);
3708 case OP_COND_EXC_NC
:
3709 case OP_COND_EXC_INC
:
3710 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC
, ins
->inst_p1
);
3712 case OP_COND_EXC_NO
:
3713 case OP_COND_EXC_INO
:
3714 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC
, ins
->inst_p1
);
3726 EMIT_COND_BRANCH (ins
, ins
->opcode
- OP_IBEQ
);
3729 /* floating point opcodes */
3732 if (cfg
->compile_aot
) {
3733 ARM_LDFD (code
, ins
->dreg
, ARMREG_PC
, 0);
3735 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
3737 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[1];
3740 /* FIXME: we can optimize the imm load by dealing with part of
3741 * the displacement in LDFD (aligning to 512).
3743 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)ins
->inst_p0
);
3744 ARM_LDFD (code
, ins
->dreg
, ARMREG_LR
, 0);
3748 if (cfg
->compile_aot
) {
3749 ARM_LDFS (code
, ins
->dreg
, ARMREG_PC
, 0);
3751 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
3754 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)ins
->inst_p0
);
3755 ARM_LDFS (code
, ins
->dreg
, ARMREG_LR
, 0);
3758 case OP_STORER8_MEMBASE_REG
:
3759 /* This is generated by the local regalloc pass which runs after the lowering pass */
3760 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
3761 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
3762 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_destbasereg
);
3763 ARM_STFD (code
, ins
->sreg1
, ARMREG_LR
, 0);
3765 ARM_STFD (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3768 case OP_LOADR8_MEMBASE
:
3769 /* This is generated by the local regalloc pass which runs after the lowering pass */
3770 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
3771 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
3772 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_basereg
);
3773 ARM_LDFD (code
, ins
->dreg
, ARMREG_LR
, 0);
3775 ARM_LDFD (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3778 case OP_STORER4_MEMBASE_REG
:
3779 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
3780 ARM_STFS (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3782 case OP_LOADR4_MEMBASE
:
3783 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
3784 ARM_LDFS (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3786 case OP_ICONV_TO_R_UN
: {
3788 tmpreg
= ins
->dreg
== 0? 1: 0;
3789 ARM_CMP_REG_IMM8 (code
, ins
->sreg1
, 0);
3790 ARM_FLTD (code
, ins
->dreg
, ins
->sreg1
);
3791 ARM_B_COND (code
, ARMCOND_GE
, 8);
3792 /* save the temp register */
3793 ARM_SUB_REG_IMM8 (code
, ARMREG_SP
, ARMREG_SP
, 8);
3794 ARM_STFD (code
, tmpreg
, ARMREG_SP
, 0);
3795 ARM_LDFD (code
, tmpreg
, ARMREG_PC
, 12);
3796 ARM_FPA_ADFD (code
, ins
->dreg
, ins
->dreg
, tmpreg
);
3797 ARM_LDFD (code
, tmpreg
, ARMREG_SP
, 0);
3798 ARM_ADD_REG_IMM8 (code
, ARMREG_SP
, ARMREG_SP
, 8);
3799 /* skip the constant pool */
3802 *(int*)code
= 0x41f00000;
3807 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
3808 * adfltd fdest, fdest, ftemp
3812 case OP_ICONV_TO_R4
:
3813 ARM_FLTS (code
, ins
->dreg
, ins
->sreg1
);
3815 case OP_ICONV_TO_R8
:
3816 ARM_FLTD (code
, ins
->dreg
, ins
->sreg1
);
3819 #elif defined(ARM_FPU_VFP)
3822 if (cfg
->compile_aot
) {
3823 ARM_FLDD (code
, ins
->dreg
, ARMREG_PC
, 0);
3825 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
3827 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[1];
3830 /* FIXME: we can optimize the imm load by dealing with part of
3831 * the displacement in LDFD (aligning to 512).
3833 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)ins
->inst_p0
);
3834 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
3838 if (cfg
->compile_aot
) {
3839 ARM_FLDS (code
, ins
->dreg
, ARMREG_PC
, 0);
3841 *(guint32
*)code
= ((guint32
*)(ins
->inst_p0
))[0];
3843 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
3845 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, (guint32
)ins
->inst_p0
);
3846 ARM_FLDS (code
, ins
->dreg
, ARMREG_LR
, 0);
3847 ARM_CVTS (code
, ins
->dreg
, ins
->dreg
);
3850 case OP_STORER8_MEMBASE_REG
:
3851 /* This is generated by the local regalloc pass which runs after the lowering pass */
3852 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
3853 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
3854 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_destbasereg
);
3855 ARM_FSTD (code
, ins
->sreg1
, ARMREG_LR
, 0);
3857 ARM_FSTD (code
, ins
->sreg1
, ins
->inst_destbasereg
, ins
->inst_offset
);
3860 case OP_LOADR8_MEMBASE
:
3861 /* This is generated by the local regalloc pass which runs after the lowering pass */
3862 if (!arm_is_fpimm8 (ins
->inst_offset
)) {
3863 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
3864 ARM_ADD_REG_REG (code
, ARMREG_LR
, ARMREG_LR
, ins
->inst_basereg
);
3865 ARM_FLDD (code
, ins
->dreg
, ARMREG_LR
, 0);
3867 ARM_FLDD (code
, ins
->dreg
, ins
->inst_basereg
, ins
->inst_offset
);
3870 case OP_STORER4_MEMBASE_REG
:
3871 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
3872 ARM_CVTD (code
, ARM_VFP_F0
, ins
->sreg1
);
3873 ARM_FSTS (code
, ARM_VFP_F0
, ins
->inst_destbasereg
, ins
->inst_offset
);
3875 case OP_LOADR4_MEMBASE
:
3876 g_assert (arm_is_fpimm8 (ins
->inst_offset
));
3877 ARM_FLDS (code
, ARM_VFP_F0
, ins
->inst_basereg
, ins
->inst_offset
);
3878 ARM_CVTS (code
, ins
->dreg
, ARM_VFP_F0
);
3880 case OP_ICONV_TO_R_UN
: {
3881 g_assert_not_reached ();
3884 case OP_ICONV_TO_R4
:
3885 ARM_FMSR (code
, ARM_VFP_F0
, ins
->sreg1
);
3886 ARM_FSITOS (code
, ARM_VFP_F0
, ARM_VFP_F0
);
3887 ARM_CVTS (code
, ins
->dreg
, ARM_VFP_F0
);
3889 case OP_ICONV_TO_R8
:
3890 ARM_FMSR (code
, ARM_VFP_F0
, ins
->sreg1
);
3891 ARM_FSITOD (code
, ins
->dreg
, ARM_VFP_F0
);
3895 if (mono_method_signature (cfg
->method
)->ret
->type
== MONO_TYPE_R4
) {
3896 ARM_CVTD (code
, ARM_VFP_F0
, ins
->sreg1
);
3897 ARM_FMRS (code
, ARMREG_R0
, ARM_VFP_F0
);
3899 ARM_FMRRD (code
, ARMREG_R0
, ARMREG_R1
, ins
->sreg1
);
3905 case OP_FCONV_TO_I1
:
3906 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, TRUE
);
3908 case OP_FCONV_TO_U1
:
3909 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 1, FALSE
);
3911 case OP_FCONV_TO_I2
:
3912 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, TRUE
);
3914 case OP_FCONV_TO_U2
:
3915 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 2, FALSE
);
3917 case OP_FCONV_TO_I4
:
3919 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, TRUE
);
3921 case OP_FCONV_TO_U4
:
3923 code
= emit_float_to_int (cfg
, code
, ins
->dreg
, ins
->sreg1
, 4, FALSE
);
3925 case OP_FCONV_TO_I8
:
3926 case OP_FCONV_TO_U8
:
3927 g_assert_not_reached ();
3928 /* Implemented as helper calls */
3930 case OP_LCONV_TO_R_UN
:
3931 g_assert_not_reached ();
3932 /* Implemented as helper calls */
3934 case OP_LCONV_TO_OVF_I4_2
: {
3935 guint8
*high_bit_not_set
, *valid_negative
, *invalid_negative
, *valid_positive
;
3937 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3940 ARM_CMP_REG_IMM8 (code
, ins
->sreg1
, 0);
3941 high_bit_not_set
= code
;
3942 ARM_B_COND (code
, ARMCOND_GE
, 0); /*branch if bit 31 of the lower part is not set*/
3944 ARM_CMN_REG_IMM8 (code
, ins
->sreg2
, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
3945 valid_negative
= code
;
3946 ARM_B_COND (code
, ARMCOND_EQ
, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
3947 invalid_negative
= code
;
3948 ARM_B_COND (code
, ARMCOND_AL
, 0);
3950 arm_patch (high_bit_not_set
, code
);
3952 ARM_CMP_REG_IMM8 (code
, ins
->sreg2
, 0);
3953 valid_positive
= code
;
3954 ARM_B_COND (code
, ARMCOND_EQ
, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
3956 arm_patch (invalid_negative
, code
);
3957 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL
, "OverflowException");
3959 arm_patch (valid_negative
, code
);
3960 arm_patch (valid_positive
, code
);
3962 if (ins
->dreg
!= ins
->sreg1
)
3963 ARM_MOV_REG_REG (code
, ins
->dreg
, ins
->sreg1
);
3968 ARM_FPA_ADFD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3971 ARM_FPA_SUFD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3974 ARM_FPA_MUFD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3977 ARM_FPA_DVFD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3980 ARM_MNFD (code
, ins
->dreg
, ins
->sreg1
);
3982 #elif defined(ARM_FPU_VFP)
3984 ARM_VFP_ADDD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3987 ARM_VFP_SUBD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3990 ARM_VFP_MULD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3993 ARM_VFP_DIVD (code
, ins
->dreg
, ins
->sreg1
, ins
->sreg2
);
3996 ARM_NEGD (code
, ins
->dreg
, ins
->sreg1
);
4001 g_assert_not_reached ();
4005 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg1
, ins
->sreg2
);
4006 #elif defined(ARM_FPU_VFP)
4007 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
4013 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg1
, ins
->sreg2
);
4014 #elif defined(ARM_FPU_VFP)
4015 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
4018 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 0, ARMCOND_NE
);
4019 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_EQ
);
4023 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg1
, ins
->sreg2
);
4024 #elif defined(ARM_FPU_VFP)
4025 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
4028 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
4029 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
4033 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg1
, ins
->sreg2
);
4034 #elif defined(ARM_FPU_VFP)
4035 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg2
);
4038 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
4039 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
4040 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
4045 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg2
, ins
->sreg1
);
4046 #elif defined(ARM_FPU_VFP)
4047 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
4050 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
4051 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
4056 ARM_FCMP (code
, ARM_FPA_CMF
, ins
->sreg2
, ins
->sreg1
);
4057 #elif defined(ARM_FPU_VFP)
4058 ARM_CMPD (code
, ins
->sreg2
, ins
->sreg1
);
4061 ARM_MOV_REG_IMM8 (code
, ins
->dreg
, 0);
4062 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_MI
);
4063 ARM_MOV_REG_IMM8_COND (code
, ins
->dreg
, 1, ARMCOND_VS
);
4065 /* ARM FPA flags table:
4066 * N Less than ARMCOND_MI
4067 * Z Equal ARMCOND_EQ
4068 * C Greater Than or Equal ARMCOND_CS
4069 * V Unordered ARMCOND_VS
4072 EMIT_COND_BRANCH (ins
, OP_IBEQ
- OP_IBEQ
);
4075 EMIT_COND_BRANCH (ins
, OP_IBNE_UN
- OP_IBEQ
);
4078 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_MI
); /* N set */
4081 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_VS
); /* V set */
4082 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_MI
); /* N set */
4088 g_assert_not_reached ();
4092 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_GE
);
4094 /* FPA requires EQ even thou the docs suggests that just CS is enough */
4095 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_EQ
);
4096 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_CS
);
4100 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_VS
); /* V set */
4101 EMIT_COND_BRANCH_FLAGS (ins
, ARMCOND_GE
);
4106 if (ins
->dreg
!= ins
->sreg1
)
4107 ARM_MVFD (code
, ins
->dreg
, ins
->sreg1
);
4108 #elif defined(ARM_FPU_VFP)
4109 ARM_ABSD (code
, ARM_VFP_D1
, ins
->sreg1
);
4110 ARM_FLDD (code
, ARM_VFP_D0
, ARMREG_PC
, 0);
4112 *(guint32
*)code
= 0xffffffff;
4114 *(guint32
*)code
= 0x7fefffff;
4116 ARM_CMPD (code
, ARM_VFP_D1
, ARM_VFP_D0
);
4118 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT
, "ArithmeticException");
4119 ARM_CMPD (code
, ins
->sreg1
, ins
->sreg1
);
4121 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS
, "ArithmeticException");
4123 ARM_CPYD (code
, ins
->dreg
, ins
->sreg1
);
4128 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins
->opcode
), __FUNCTION__
);
4129 g_assert_not_reached ();
4132 if ((cfg
->opt
& MONO_OPT_BRANCH
) && ((code
- cfg
->native_code
- offset
) > max_len
)) {
4133 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4134 mono_inst_name (ins
->opcode
), max_len
, code
- cfg
->native_code
- offset
);
4135 g_assert_not_reached ();
4141 last_offset
= offset
;
4144 cfg
->code_len
= code
- cfg
->native_code
;
4147 #endif /* DISABLE_JIT */
4149 #ifdef HAVE_AEABI_READ_TP
4150 void __aeabi_read_tp (void);
4154 mono_arch_register_lowlevel_calls (void)
4156 /* The signature doesn't matter */
4157 mono_register_jit_icall (mono_arm_throw_exception
, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE
);
4158 mono_register_jit_icall (mono_arm_throw_exception_by_token
, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE
);
4160 #ifdef HAVE_AEABI_READ_TP
4161 mono_register_jit_icall (__aeabi_read_tp
, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE
);
4165 #define patch_lis_ori(ip,val) do {\
4166 guint16 *__lis_ori = (guint16*)(ip); \
4167 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
4168 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
4172 mono_arch_patch_code (MonoMethod
*method
, MonoDomain
*domain
, guint8
*code
, MonoJumpInfo
*ji
, gboolean run_cctors
)
4174 MonoJumpInfo
*patch_info
;
4175 gboolean compile_aot
= !run_cctors
;
4177 for (patch_info
= ji
; patch_info
; patch_info
= patch_info
->next
) {
4178 unsigned char *ip
= patch_info
->ip
.i
+ code
;
4179 const unsigned char *target
;
4181 if (patch_info
->type
== MONO_PATCH_INFO_SWITCH
&& !compile_aot
) {
4182 gpointer
*jt
= (gpointer
*)(ip
+ 8);
4184 /* jt is the inlined jump table, 2 instructions after ip
4185 * In the normal case we store the absolute addresses,
4186 * otherwise the displacements.
4188 for (i
= 0; i
< patch_info
->data
.table
->table_size
; i
++)
4189 jt
[i
] = code
+ (int)patch_info
->data
.table
->table
[i
];
4192 target
= mono_resolve_patch_target (method
, domain
, code
, patch_info
, run_cctors
);
4195 switch (patch_info
->type
) {
4196 case MONO_PATCH_INFO_BB
:
4197 case MONO_PATCH_INFO_LABEL
:
4200 /* No need to patch these */
4205 switch (patch_info
->type
) {
4206 case MONO_PATCH_INFO_IP
:
4207 g_assert_not_reached ();
4208 patch_lis_ori (ip
, ip
);
4210 case MONO_PATCH_INFO_METHOD_REL
:
4211 g_assert_not_reached ();
4212 *((gpointer
*)(ip
)) = code
+ patch_info
->data
.offset
;
4214 case MONO_PATCH_INFO_METHODCONST
:
4215 case MONO_PATCH_INFO_CLASS
:
4216 case MONO_PATCH_INFO_IMAGE
:
4217 case MONO_PATCH_INFO_FIELD
:
4218 case MONO_PATCH_INFO_VTABLE
:
4219 case MONO_PATCH_INFO_IID
:
4220 case MONO_PATCH_INFO_SFLDA
:
4221 case MONO_PATCH_INFO_LDSTR
:
4222 case MONO_PATCH_INFO_TYPE_FROM_HANDLE
:
4223 case MONO_PATCH_INFO_LDTOKEN
:
4224 g_assert_not_reached ();
4225 /* from OP_AOTCONST : lis + ori */
4226 patch_lis_ori (ip
, target
);
4228 case MONO_PATCH_INFO_R4
:
4229 case MONO_PATCH_INFO_R8
:
4230 g_assert_not_reached ();
4231 *((gconstpointer
*)(ip
+ 2)) = patch_info
->data
.target
;
4233 case MONO_PATCH_INFO_EXC_NAME
:
4234 g_assert_not_reached ();
4235 *((gconstpointer
*)(ip
+ 1)) = patch_info
->data
.name
;
4237 case MONO_PATCH_INFO_NONE
:
4238 case MONO_PATCH_INFO_BB_OVF
:
4239 case MONO_PATCH_INFO_EXC_OVF
:
4240 /* everything is dealt with at epilog output time */
4245 arm_patch_general (domain
, ip
, target
);
4250 * Stack frame layout:
4252 * ------------------- fp
4253 * MonoLMF structure or saved registers
4254 * -------------------
4256 * -------------------
4258 * -------------------
4259 * optional 8 bytes for tracing
4260 * -------------------
4261 * param area size is cfg->param_area
4262 * ------------------- sp
4265 mono_arch_emit_prolog (MonoCompile
*cfg
)
4267 MonoMethod
*method
= cfg
->method
;
4269 MonoMethodSignature
*sig
;
4271 int alloc_size
, pos
, max_offset
, i
, rot_amount
;
4276 int prev_sp_offset
, reg_offset
;
4278 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
))
4281 sig
= mono_method_signature (method
);
4282 cfg
->code_size
= 256 + sig
->param_count
* 20;
4283 code
= cfg
->native_code
= g_malloc (cfg
->code_size
);
4285 mono_emit_unwind_op_def_cfa (cfg
, code
, ARMREG_SP
, 0);
4287 ARM_MOV_REG_REG (code
, ARMREG_IP
, ARMREG_SP
);
4289 alloc_size
= cfg
->stack_offset
;
4292 if (!method
->save_lmf
) {
4293 /* We save SP by storing it into IP and saving IP */
4294 ARM_PUSH (code
, (cfg
->used_int_regs
| (1 << ARMREG_IP
) | (1 << ARMREG_LR
)));
4295 prev_sp_offset
= 8; /* ip and lr */
4296 for (i
= 0; i
< 16; ++i
) {
4297 if (cfg
->used_int_regs
& (1 << i
))
4298 prev_sp_offset
+= 4;
4300 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
4302 for (i
= 0; i
< 16; ++i
) {
4303 if ((cfg
->used_int_regs
& (1 << i
)) || (i
== ARMREG_IP
) || (i
== ARMREG_LR
)) {
4304 mono_emit_unwind_op_offset (cfg
, code
, i
, (- prev_sp_offset
) + reg_offset
);
4309 ARM_PUSH (code
, 0x5ff0);
4310 prev_sp_offset
= 4 * 10; /* all but r0-r3, sp and pc */
4311 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
);
4313 for (i
= 0; i
< 16; ++i
) {
4314 if ((i
> ARMREG_R3
) && (i
!= ARMREG_SP
) && (i
!= ARMREG_PC
)) {
4315 mono_emit_unwind_op_offset (cfg
, code
, i
, (- prev_sp_offset
) + reg_offset
);
4319 pos
+= sizeof (MonoLMF
) - prev_sp_offset
;
4323 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
4324 if (alloc_size
& (MONO_ARCH_FRAME_ALIGNMENT
- 1)) {
4325 alloc_size
+= MONO_ARCH_FRAME_ALIGNMENT
- 1;
4326 alloc_size
&= ~(MONO_ARCH_FRAME_ALIGNMENT
- 1);
4329 /* the stack used in the pushed regs */
4330 if (prev_sp_offset
& 4)
4332 cfg
->stack_usage
= alloc_size
;
4334 if ((i
= mono_arm_is_rotated_imm8 (alloc_size
, &rot_amount
)) >= 0) {
4335 ARM_SUB_REG_IMM (code
, ARMREG_SP
, ARMREG_SP
, i
, rot_amount
);
4337 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, alloc_size
);
4338 ARM_SUB_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
4340 mono_emit_unwind_op_def_cfa_offset (cfg
, code
, prev_sp_offset
+ alloc_size
);
4342 if (cfg
->frame_reg
!= ARMREG_SP
) {
4343 ARM_MOV_REG_REG (code
, cfg
->frame_reg
, ARMREG_SP
);
4344 mono_emit_unwind_op_def_cfa_reg (cfg
, code
, cfg
->frame_reg
);
4346 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
4347 prev_sp_offset
+= alloc_size
;
4349 /* compute max_offset in order to use short forward jumps
4350 * we could skip do it on arm because the immediate displacement
4351 * for jumps is large enough, it may be useful later for constant pools
4354 for (bb
= cfg
->bb_entry
; bb
; bb
= bb
->next_bb
) {
4355 MonoInst
*ins
= bb
->code
;
4356 bb
->max_offset
= max_offset
;
4358 if (cfg
->prof_options
& MONO_PROFILE_COVERAGE
)
4361 MONO_BB_FOR_EACH_INS (bb
, ins
)
4362 max_offset
+= ((guint8
*)ins_get_spec (ins
->opcode
))[MONO_INST_LEN
];
4365 /* store runtime generic context */
4366 if (cfg
->rgctx_var
) {
4367 MonoInst
*ins
= cfg
->rgctx_var
;
4369 g_assert (ins
->opcode
== OP_REGOFFSET
);
4371 if (arm_is_imm12 (ins
->inst_offset
)) {
4372 ARM_STR_IMM (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ins
->inst_offset
);
4374 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4375 ARM_STR_REG_REG (code
, MONO_ARCH_RGCTX_REG
, ins
->inst_basereg
, ARMREG_LR
);
4379 /* load arguments allocated to register from the stack */
4382 cinfo
= get_call_info (sig
, sig
->pinvoke
);
4384 if (MONO_TYPE_ISSTRUCT (sig
->ret
)) {
4385 ArgInfo
*ainfo
= &cinfo
->ret
;
4386 inst
= cfg
->vret_addr
;
4387 g_assert (arm_is_imm12 (inst
->inst_offset
));
4388 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4390 for (i
= 0; i
< sig
->param_count
+ sig
->hasthis
; ++i
) {
4391 ArgInfo
*ainfo
= cinfo
->args
+ i
;
4392 inst
= cfg
->args
[pos
];
4394 if (cfg
->verbose_level
> 2)
4395 g_print ("Saving argument %d (type: %d)\n", i
, ainfo
->regtype
);
4396 if (inst
->opcode
== OP_REGVAR
) {
4397 if (ainfo
->regtype
== RegTypeGeneral
)
4398 ARM_MOV_REG_REG (code
, inst
->dreg
, ainfo
->reg
);
4399 else if (ainfo
->regtype
== RegTypeFP
) {
4400 g_assert_not_reached ();
4401 } else if (ainfo
->regtype
== RegTypeBase
) {
4402 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
4403 ARM_LDR_IMM (code
, inst
->dreg
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
4405 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4406 ARM_LDR_REG_REG (code
, inst
->dreg
, ARMREG_SP
, ARMREG_IP
);
4409 g_assert_not_reached ();
4411 if (cfg
->verbose_level
> 2)
4412 g_print ("Argument %d assigned to register %s\n", pos
, mono_arch_regname (inst
->dreg
));
4414 /* the argument should be put on the stack: FIXME handle size != word */
4415 if (ainfo
->regtype
== RegTypeGeneral
|| ainfo
->regtype
== RegTypeIRegPair
) {
4416 switch (ainfo
->size
) {
4418 if (arm_is_imm12 (inst
->inst_offset
))
4419 ARM_STRB_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4421 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4422 ARM_STRB_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
4426 if (arm_is_imm8 (inst
->inst_offset
)) {
4427 ARM_STRH_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4429 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4430 ARM_STRH_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
4434 g_assert (arm_is_imm12 (inst
->inst_offset
));
4435 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4436 g_assert (arm_is_imm12 (inst
->inst_offset
+ 4));
4437 ARM_STR_IMM (code
, ainfo
->reg
+ 1, inst
->inst_basereg
, inst
->inst_offset
+ 4);
4440 if (arm_is_imm12 (inst
->inst_offset
)) {
4441 ARM_STR_IMM (code
, ainfo
->reg
, inst
->inst_basereg
, inst
->inst_offset
);
4443 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4444 ARM_STR_REG_REG (code
, ainfo
->reg
, inst
->inst_basereg
, ARMREG_IP
);
4448 } else if (ainfo
->regtype
== RegTypeBaseGen
) {
4449 g_assert (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
));
4450 g_assert (arm_is_imm12 (inst
->inst_offset
));
4451 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
4452 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
4453 ARM_STR_IMM (code
, ARMREG_R3
, inst
->inst_basereg
, inst
->inst_offset
);
4454 } else if (ainfo
->regtype
== RegTypeBase
) {
4455 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
)) {
4456 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
));
4458 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
);
4459 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
4462 switch (ainfo
->size
) {
4464 if (arm_is_imm8 (inst
->inst_offset
)) {
4465 ARM_STRB_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
4467 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4468 ARM_STRB_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
4472 if (arm_is_imm8 (inst
->inst_offset
)) {
4473 ARM_STRH_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
4475 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4476 ARM_STRH_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
4480 if (arm_is_imm12 (inst
->inst_offset
)) {
4481 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
4483 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4484 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
4486 if (arm_is_imm12 (prev_sp_offset
+ ainfo
->offset
+ 4)) {
4487 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_SP
, (prev_sp_offset
+ ainfo
->offset
+ 4));
4489 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, prev_sp_offset
+ ainfo
->offset
+ 4);
4490 ARM_LDR_REG_REG (code
, ARMREG_LR
, ARMREG_SP
, ARMREG_IP
);
4492 if (arm_is_imm12 (inst
->inst_offset
+ 4)) {
4493 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
+ 4);
4495 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
+ 4);
4496 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
4500 if (arm_is_imm12 (inst
->inst_offset
)) {
4501 ARM_STR_IMM (code
, ARMREG_LR
, inst
->inst_basereg
, inst
->inst_offset
);
4503 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, inst
->inst_offset
);
4504 ARM_STR_REG_REG (code
, ARMREG_LR
, inst
->inst_basereg
, ARMREG_IP
);
4508 } else if (ainfo
->regtype
== RegTypeFP
) {
4509 g_assert_not_reached ();
4510 } else if (ainfo
->regtype
== RegTypeStructByVal
) {
4511 int doffset
= inst
->inst_offset
;
4515 size
= mini_type_stack_size_full (cfg
->generic_sharing_context
, inst
->inst_vtype
, NULL
, sig
->pinvoke
);
4516 for (cur_reg
= 0; cur_reg
< ainfo
->size
; ++cur_reg
) {
4517 if (arm_is_imm12 (doffset
)) {
4518 ARM_STR_IMM (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, doffset
);
4520 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, doffset
);
4521 ARM_STR_REG_REG (code
, ainfo
->reg
+ cur_reg
, inst
->inst_basereg
, ARMREG_IP
);
4523 soffset
+= sizeof (gpointer
);
4524 doffset
+= sizeof (gpointer
);
4526 if (ainfo
->vtsize
) {
4527 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4528 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
4529 code
= emit_memcpy (code
, ainfo
->vtsize
* sizeof (gpointer
), inst
->inst_basereg
, doffset
, ARMREG_SP
, prev_sp_offset
+ ainfo
->offset
);
4531 } else if (ainfo
->regtype
== RegTypeStructByAddr
) {
4532 g_assert_not_reached ();
4533 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4534 code
= emit_memcpy (code
, ainfo
->vtsize
* sizeof (gpointer
), inst
->inst_basereg
, inst
->inst_offset
, ainfo
->reg
, 0);
4536 g_assert_not_reached ();
4541 if (method
->wrapper_type
== MONO_WRAPPER_NATIVE_TO_MANAGED
) {
4542 if (cfg
->compile_aot
)
4543 /* AOT code is only used in the root domain */
4544 code
= mono_arm_emit_load_imm (code
, ARMREG_R0
, 0);
4546 code
= mono_arm_emit_load_imm (code
, ARMREG_R0
, (guint32
)cfg
->domain
);
4547 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
4548 (gpointer
)"mono_jit_thread_attach");
4549 code
= emit_call_seq (cfg
, code
);
4552 if (method
->save_lmf
) {
4553 gboolean get_lmf_fast
= FALSE
;
4555 #ifdef HAVE_AEABI_READ_TP
4556 gint32 lmf_addr_tls_offset
= mono_get_lmf_addr_tls_offset ();
4558 if (lmf_addr_tls_offset
!= -1) {
4559 get_lmf_fast
= TRUE
;
4561 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
4562 (gpointer
)"__aeabi_read_tp");
4563 code
= emit_call_seq (cfg
, code
);
4565 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_R0
, lmf_addr_tls_offset
);
4566 get_lmf_fast
= TRUE
;
4569 if (!get_lmf_fast
) {
4570 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_INTERNAL_METHOD
,
4571 (gpointer
)"mono_get_lmf_addr");
4572 code
= emit_call_seq (cfg
, code
);
4574 /* we build the MonoLMF structure on the stack - see mini-arm.h */
4575 /* lmf_offset is the offset from the previous stack pointer,
4576 * alloc_size is the total stack space allocated, so the offset
4577 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
4578 * The pointer to the struct is put in r1 (new_lmf).
4579 * r2 is used as scratch
4580 * The callee-saved registers are already in the MonoLMF structure
4582 code
= emit_big_add (code
, ARMREG_R1
, ARMREG_SP
, alloc_size
- lmf_offset
);
4583 /* r0 is the result from mono_get_lmf_addr () */
4584 ARM_STR_IMM (code
, ARMREG_R0
, ARMREG_R1
, G_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
4585 /* new_lmf->previous_lmf = *lmf_addr */
4586 ARM_LDR_IMM (code
, ARMREG_R2
, ARMREG_R0
, G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
4587 ARM_STR_IMM (code
, ARMREG_R2
, ARMREG_R1
, G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
4588 /* *(lmf_addr) = r1 */
4589 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_R0
, G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
4590 /* Skip method (only needed for trampoline LMF frames) */
4591 ARM_STR_IMM (code
, ARMREG_SP
, ARMREG_R1
, G_STRUCT_OFFSET (MonoLMF
, ebp
));
4592 /* save the current IP */
4593 ARM_MOV_REG_REG (code
, ARMREG_R2
, ARMREG_PC
);
4594 ARM_STR_IMM (code
, ARMREG_R2
, ARMREG_R1
, G_STRUCT_OFFSET (MonoLMF
, eip
));
4598 code
= mono_arch_instrument_prolog (cfg
, mono_trace_enter_method
, code
, TRUE
);
4600 if (cfg
->arch
.seq_point_info_var
) {
4601 MonoInst
*ins
= cfg
->arch
.seq_point_info_var
;
4603 /* Initialize the variable from a GOT slot */
4604 mono_add_patch_info (cfg
, code
- cfg
->native_code
, MONO_PATCH_INFO_SEQ_POINT_INFO
, cfg
->method
);
4605 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_PC
, 0);
4607 *(gpointer
*)code
= NULL
;
4609 ARM_LDR_REG_REG (code
, ARMREG_R0
, ARMREG_PC
, ARMREG_R0
);
4611 g_assert (ins
->opcode
== OP_REGOFFSET
);
4613 if (arm_is_imm12 (ins
->inst_offset
)) {
4614 ARM_STR_IMM (code
, ARMREG_R0
, ins
->inst_basereg
, ins
->inst_offset
);
4616 code
= mono_arm_emit_load_imm (code
, ARMREG_LR
, ins
->inst_offset
);
4617 ARM_STR_REG_REG (code
, ARMREG_R0
, ins
->inst_basereg
, ARMREG_LR
);
4621 /* Initialize ss_trigger_page_var */
4623 MonoInst
*info_var
= cfg
->arch
.seq_point_info_var
;
4624 MonoInst
*ss_trigger_page_var
= cfg
->arch
.ss_trigger_page_var
;
4625 int dreg
= ARMREG_LR
;
4628 g_assert (info_var
->opcode
== OP_REGOFFSET
);
4629 g_assert (arm_is_imm12 (info_var
->inst_offset
));
4631 ARM_LDR_IMM (code
, dreg
, info_var
->inst_basereg
, info_var
->inst_offset
);
4632 /* Load the trigger page addr */
4633 ARM_LDR_IMM (code
, dreg
, dreg
, G_STRUCT_OFFSET (SeqPointInfo
, ss_trigger_page
));
4634 ARM_STR_IMM (code
, dreg
, ss_trigger_page_var
->inst_basereg
, ss_trigger_page_var
->inst_offset
);
4638 cfg
->code_len
= code
- cfg
->native_code
;
4639 g_assert (cfg
->code_len
< cfg
->code_size
);
4646 mono_arch_emit_epilog (MonoCompile
*cfg
)
4648 MonoMethod
*method
= cfg
->method
;
4649 int pos
, i
, rot_amount
;
4650 int max_epilog_size
= 16 + 20*4;
4653 if (cfg
->method
->save_lmf
)
4654 max_epilog_size
+= 128;
4656 if (mono_jit_trace_calls
!= NULL
)
4657 max_epilog_size
+= 50;
4659 if (cfg
->prof_options
& MONO_PROFILE_ENTER_LEAVE
)
4660 max_epilog_size
+= 50;
4662 while (cfg
->code_len
+ max_epilog_size
> (cfg
->code_size
- 16)) {
4663 cfg
->code_size
*= 2;
4664 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4665 mono_jit_stats
.code_reallocs
++;
4669 * Keep in sync with OP_JMP
4671 code
= cfg
->native_code
+ cfg
->code_len
;
4673 if (mono_jit_trace_calls
!= NULL
&& mono_trace_eval (method
)) {
4674 code
= mono_arch_instrument_epilog (cfg
, mono_trace_leave_method
, code
, TRUE
);
4678 if (method
->save_lmf
) {
4680 /* all but r0-r3, sp and pc */
4681 pos
+= sizeof (MonoLMF
) - (4 * 10);
4683 /* r2 contains the pointer to the current LMF */
4684 code
= emit_big_add (code
, ARMREG_R2
, cfg
->frame_reg
, cfg
->stack_usage
- lmf_offset
);
4685 /* ip = previous_lmf */
4686 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_R2
, G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
4688 ARM_LDR_IMM (code
, ARMREG_LR
, ARMREG_R2
, G_STRUCT_OFFSET (MonoLMF
, lmf_addr
));
4689 /* *(lmf_addr) = previous_lmf */
4690 ARM_STR_IMM (code
, ARMREG_IP
, ARMREG_LR
, G_STRUCT_OFFSET (MonoLMF
, previous_lmf
));
4691 /* FIXME: speedup: there is no actual need to restore the registers if
4692 * we didn't actually change them (idea from Zoltan).
4695 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
4696 ARM_ADD_REG_IMM8 (code
, ARMREG_SP
, ARMREG_R2
, (sizeof (MonoLMF
) - 10 * sizeof (gulong
)));
4697 ARM_POP_NWB (code
, 0xaff0); /* restore ip to sp and lr to pc */
4699 if ((i
= mono_arm_is_rotated_imm8 (cfg
->stack_usage
, &rot_amount
)) >= 0) {
4700 ARM_ADD_REG_IMM (code
, ARMREG_SP
, cfg
->frame_reg
, i
, rot_amount
);
4702 code
= mono_arm_emit_load_imm (code
, ARMREG_IP
, cfg
->stack_usage
);
4703 ARM_ADD_REG_REG (code
, ARMREG_SP
, ARMREG_SP
, ARMREG_IP
);
4705 /* FIXME: add v4 thumb interworking support */
4706 ARM_POP_NWB (code
, cfg
->used_int_regs
| ((1 << ARMREG_SP
) | (1 << ARMREG_PC
)));
4709 cfg
->code_len
= code
- cfg
->native_code
;
4711 g_assert (cfg
->code_len
< cfg
->code_size
);
4715 /* remove once throw_exception_by_name is eliminated */
4717 exception_id_by_name (const char *name
)
4719 if (strcmp (name
, "IndexOutOfRangeException") == 0)
4720 return MONO_EXC_INDEX_OUT_OF_RANGE
;
4721 if (strcmp (name
, "OverflowException") == 0)
4722 return MONO_EXC_OVERFLOW
;
4723 if (strcmp (name
, "ArithmeticException") == 0)
4724 return MONO_EXC_ARITHMETIC
;
4725 if (strcmp (name
, "DivideByZeroException") == 0)
4726 return MONO_EXC_DIVIDE_BY_ZERO
;
4727 if (strcmp (name
, "InvalidCastException") == 0)
4728 return MONO_EXC_INVALID_CAST
;
4729 if (strcmp (name
, "NullReferenceException") == 0)
4730 return MONO_EXC_NULL_REF
;
4731 if (strcmp (name
, "ArrayTypeMismatchException") == 0)
4732 return MONO_EXC_ARRAY_TYPE_MISMATCH
;
4733 g_error ("Unknown intrinsic exception %s\n", name
);
4738 mono_arch_emit_exceptions (MonoCompile
*cfg
)
4740 MonoJumpInfo
*patch_info
;
4743 const guint8
* exc_throw_pos
[MONO_EXC_INTRINS_NUM
] = {NULL
};
4744 guint8 exc_throw_found
[MONO_EXC_INTRINS_NUM
] = {0};
4745 int max_epilog_size
= 50;
4747 /* count the number of exception infos */
4750 * make sure we have enough space for exceptions
4752 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
4753 if (patch_info
->type
== MONO_PATCH_INFO_EXC
) {
4754 i
= exception_id_by_name (patch_info
->data
.target
);
4755 if (!exc_throw_found
[i
]) {
4756 max_epilog_size
+= 32;
4757 exc_throw_found
[i
] = TRUE
;
4762 while (cfg
->code_len
+ max_epilog_size
> (cfg
->code_size
- 16)) {
4763 cfg
->code_size
*= 2;
4764 cfg
->native_code
= g_realloc (cfg
->native_code
, cfg
->code_size
);
4765 mono_jit_stats
.code_reallocs
++;
4768 code
= cfg
->native_code
+ cfg
->code_len
;
4770 /* add code to raise exceptions */
4771 for (patch_info
= cfg
->patch_info
; patch_info
; patch_info
= patch_info
->next
) {
4772 switch (patch_info
->type
) {
4773 case MONO_PATCH_INFO_EXC
: {
4774 MonoClass
*exc_class
;
4775 unsigned char *ip
= patch_info
->ip
.i
+ cfg
->native_code
;
4777 i
= exception_id_by_name (patch_info
->data
.target
);
4778 if (exc_throw_pos
[i
]) {
4779 arm_patch (ip
, exc_throw_pos
[i
]);
4780 patch_info
->type
= MONO_PATCH_INFO_NONE
;
4783 exc_throw_pos
[i
] = code
;
4785 arm_patch (ip
, code
);
4787 exc_class
= mono_class_from_name (mono_defaults
.corlib
, "System", patch_info
->data
.name
);
4788 g_assert (exc_class
);
4790 ARM_MOV_REG_REG (code
, ARMREG_R1
, ARMREG_LR
);
4791 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_PC
, 0);
4792 patch_info
->type
= MONO_PATCH_INFO_INTERNAL_METHOD
;
4793 patch_info
->data
.name
= "mono_arch_throw_corlib_exception";
4794 patch_info
->ip
.i
= code
- cfg
->native_code
;
4796 *(guint32
*)(gpointer
)code
= exc_class
->type_token
;
4806 cfg
->code_len
= code
- cfg
->native_code
;
4808 g_assert (cfg
->code_len
< cfg
->code_size
);
4812 static gboolean tls_offset_inited
= FALSE
;
4815 mono_arch_setup_jit_tls_data (MonoJitTlsData
*tls
)
4817 if (!tls_offset_inited
) {
4818 tls_offset_inited
= TRUE
;
4820 lmf_tls_offset
= mono_get_lmf_tls_offset ();
4821 lmf_addr_tls_offset
= mono_get_lmf_addr_tls_offset ();
4826 mono_arch_free_jit_tls_data (MonoJitTlsData
*tls
)
4831 mono_arch_emit_inst_for_method (MonoCompile
*cfg
, MonoMethod
*cmethod
, MonoMethodSignature
*fsig
, MonoInst
**args
)
4838 mono_arch_print_tree (MonoInst
*tree
, int arity
)
4844 mono_arch_get_domain_intrinsic (MonoCompile
* cfg
)
4846 return mono_get_domain_intrinsic (cfg
);
4850 mono_arch_get_thread_intrinsic (MonoCompile
* cfg
)
4852 return mono_get_thread_intrinsic (cfg
);
4856 mono_arch_get_patch_offset (guint8
*code
)
4863 mono_arch_flush_register_windows (void)
4867 #ifdef MONO_ARCH_HAVE_IMT
4870 mono_arch_emit_imt_argument (MonoCompile
*cfg
, MonoCallInst
*call
, MonoInst
*imt_arg
)
4872 if (cfg
->compile_aot
) {
4873 int method_reg
= mono_alloc_ireg (cfg
);
4876 call
->dynamic_imt_arg
= TRUE
;
4879 mono_call_inst_add_outarg_reg (cfg
, call
, imt_arg
->dreg
, ARMREG_V5
, FALSE
);
4881 MONO_INST_NEW (cfg
, ins
, OP_AOTCONST
);
4882 ins
->dreg
= method_reg
;
4883 ins
->inst_p0
= call
->method
;
4884 ins
->inst_c1
= MONO_PATCH_INFO_METHODCONST
;
4885 MONO_ADD_INS (cfg
->cbb
, ins
);
4887 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, ARMREG_V5
, FALSE
);
4889 } else if (cfg
->generic_context
) {
4891 /* Always pass in a register for simplicity */
4892 call
->dynamic_imt_arg
= TRUE
;
4894 cfg
->uses_rgctx_reg
= TRUE
;
4897 mono_call_inst_add_outarg_reg (cfg
, call
, imt_arg
->dreg
, ARMREG_V5
, FALSE
);
4900 int method_reg
= mono_alloc_preg (cfg
);
4902 MONO_INST_NEW (cfg
, ins
, OP_PCONST
);
4903 ins
->inst_p0
= call
->method
;
4904 ins
->dreg
= method_reg
;
4905 MONO_ADD_INS (cfg
->cbb
, ins
);
4907 mono_call_inst_add_outarg_reg (cfg
, call
, method_reg
, ARMREG_V5
, FALSE
);
4913 mono_arch_find_imt_method (mgreg_t
*regs
, guint8
*code
)
4915 guint32
*code_ptr
= (guint32
*)code
;
4917 /* The IMT value is stored in the code stream right after the LDC instruction. */
4918 if (!IS_LDR_PC (code_ptr
[0])) {
4919 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__
, code
, code_ptr
[2], code_ptr
[1], code_ptr
[0]);
4920 g_assert (IS_LDR_PC (code_ptr
[0]));
4922 if (code_ptr
[1] == 0)
4923 /* This is AOTed code, the IMT method is in V5 */
4924 return (MonoMethod
*)regs
[ARMREG_V5
];
4926 return (MonoMethod
*) code_ptr
[1];
4930 mono_arch_find_this_argument (mgreg_t
*regs
, MonoMethod
*method
, MonoGenericSharingContext
*gsctx
)
4932 return mono_arch_get_this_arg_from_call (gsctx
, mono_method_signature (method
), regs
, NULL
);
4936 mono_arch_find_static_call_vtable (mgreg_t
*regs
, guint8
*code
)
4938 return (MonoVTable
*) regs
[MONO_ARCH_RGCTX_REG
];
4941 #define ENABLE_WRONG_METHOD_CHECK 0
4942 #define BASE_SIZE (6 * 4)
4943 #define BSEARCH_ENTRY_SIZE (4 * 4)
4944 #define CMP_SIZE (3 * 4)
4945 #define BRANCH_SIZE (1 * 4)
4946 #define CALL_SIZE (2 * 4)
4947 #define WMC_SIZE (5 * 4)
4948 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
4951 arm_emit_value_and_patch_ldr (arminstr_t
*code
, arminstr_t
*target
, guint32 value
)
4953 guint32 delta
= DISTANCE (target
, code
);
4955 g_assert (delta
>= 0 && delta
<= 0xFFF);
4956 *target
= *target
| delta
;
4962 mono_arch_build_imt_thunk (MonoVTable
*vtable
, MonoDomain
*domain
, MonoIMTCheckItem
**imt_entries
, int count
,
4963 gpointer fail_tramp
)
4965 int size
, i
, extra_space
= 0;
4966 arminstr_t
*code
, *start
, *vtable_target
= NULL
;
4967 gboolean large_offsets
= FALSE
;
4968 guint32
**constant_pool_starts
;
4971 constant_pool_starts
= g_new0 (guint32
*, count
);
4974 * We might be called with a fail_tramp from the IMT builder code even if
4975 * MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK is not defined.
4977 //g_assert (!fail_tramp);
4979 for (i
= 0; i
< count
; ++i
) {
4980 MonoIMTCheckItem
*item
= imt_entries
[i
];
4981 if (item
->is_equals
) {
4982 if (!arm_is_imm12 (DISTANCE (vtable
, &vtable
->vtable
[item
->value
.vtable_slot
]))) {
4983 item
->chunk_size
+= 32;
4984 large_offsets
= TRUE
;
4987 if (item
->check_target_idx
) {
4988 if (!item
->compare_done
)
4989 item
->chunk_size
+= CMP_SIZE
;
4990 item
->chunk_size
+= BRANCH_SIZE
;
4992 #if ENABLE_WRONG_METHOD_CHECK
4993 item
->chunk_size
+= WMC_SIZE
;
4996 item
->chunk_size
+= CALL_SIZE
;
4998 item
->chunk_size
+= BSEARCH_ENTRY_SIZE
;
4999 imt_entries
[item
->check_target_idx
]->compare_done
= TRUE
;
5001 size
+= item
->chunk_size
;
5005 size
+= 4 * count
; /* The ARM_ADD_REG_IMM to pop the stack */
5007 start
= code
= mono_domain_code_reserve (domain
, size
);
5010 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable
->klass
->name_space
, vtable
->klass
->name
, count
, size
, start
, ((guint8
*)start
) + size
, vtable
);
5011 for (i
= 0; i
< count
; ++i
) {
5012 MonoIMTCheckItem
*item
= imt_entries
[i
];
5013 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i
, item
->key
, item
->key
->name
, &vtable
->vtable
[item
->value
.vtable_slot
], item
->is_equals
, item
->chunk_size
);
5018 ARM_PUSH4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
5020 ARM_PUSH2 (code
, ARMREG_R0
, ARMREG_R1
);
5021 ARM_LDR_IMM (code
, ARMREG_R0
, ARMREG_LR
, -4);
5022 vtable_target
= code
;
5023 ARM_LDR_IMM (code
, ARMREG_IP
, ARMREG_PC
, 0);
5025 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
5026 ARM_CMP_REG_IMM8 (code
, ARMREG_R0
, 0);
5027 ARM_MOV_REG_REG_COND (code
, ARMREG_R0
, ARMREG_V5
, ARMCOND_EQ
);
5029 for (i
= 0; i
< count
; ++i
) {
5030 MonoIMTCheckItem
*item
= imt_entries
[i
];
5031 arminstr_t
*imt_method
= NULL
, *vtable_offset_ins
= NULL
;
5032 gint32 vtable_offset
;
5034 item
->code_target
= (guint8
*)code
;
5036 if (item
->is_equals
) {
5037 if (item
->check_target_idx
) {
5038 if (!item
->compare_done
) {
5040 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
5041 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
5043 item
->jmp_code
= (guint8
*)code
;
5044 ARM_B_COND (code
, ARMCOND_NE
, 0);
5046 /*Enable the commented code to assert on wrong method*/
5047 #if ENABLE_WRONG_METHOD_CHECK
5049 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
5050 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
5051 ARM_B_COND (code
, ARMCOND_NE
, 1);
5057 vtable_offset
= DISTANCE (vtable
, &vtable
->vtable
[item
->value
.vtable_slot
]);
5058 if (!arm_is_imm12 (vtable_offset
)) {
5060 * We need to branch to a computed address but we don't have
5061 * a free register to store it, since IP must contain the
5062 * vtable address. So we push the two values to the stack, and
5063 * load them both using LDM.
5065 /* Compute target address */
5066 vtable_offset_ins
= code
;
5067 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
5068 ARM_LDR_REG_REG (code
, ARMREG_R1
, ARMREG_IP
, ARMREG_R1
);
5069 /* Save it to the fourth slot */
5070 ARM_STR_IMM (code
, ARMREG_R1
, ARMREG_SP
, 3 * sizeof (gpointer
));
5071 /* Restore registers and branch */
5072 ARM_POP4 (code
, ARMREG_R0
, ARMREG_R1
, ARMREG_IP
, ARMREG_PC
);
5074 code
= arm_emit_value_and_patch_ldr (code
, vtable_offset_ins
, vtable_offset
);
5076 ARM_POP2 (code
, ARMREG_R0
, ARMREG_R1
);
5078 ARM_ADD_REG_IMM8 (code
, ARMREG_SP
, ARMREG_SP
, 2 * sizeof (gpointer
));
5079 ARM_LDR_IMM (code
, ARMREG_PC
, ARMREG_IP
, vtable_offset
);
5083 code
= arm_emit_value_and_patch_ldr (code
, imt_method
, (guint32
)item
->key
);
5085 /*must emit after unconditional branch*/
5086 if (vtable_target
) {
5087 code
= arm_emit_value_and_patch_ldr (code
, vtable_target
, (guint32
)vtable
);
5088 item
->chunk_size
+= 4;
5089 vtable_target
= NULL
;
5092 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
5093 constant_pool_starts
[i
] = code
;
5095 code
+= extra_space
;
5099 ARM_LDR_IMM (code
, ARMREG_R1
, ARMREG_PC
, 0);
5100 ARM_CMP_REG_REG (code
, ARMREG_R0
, ARMREG_R1
);
5102 item
->jmp_code
= (guint8
*)code
;
5103 ARM_B_COND (code
, ARMCOND_GE
, 0);
5108 for (i
= 0; i
< count
; ++i
) {
5109 MonoIMTCheckItem
*item
= imt_entries
[i
];
5110 if (item
->jmp_code
) {
5111 if (item
->check_target_idx
)
5112 arm_patch (item
->jmp_code
, imt_entries
[item
->check_target_idx
]->code_target
);
5114 if (i
> 0 && item
->is_equals
) {
5116 arminstr_t
*space_start
= constant_pool_starts
[i
];
5117 for (j
= i
- 1; j
>= 0 && !imt_entries
[j
]->is_equals
; --j
) {
5118 space_start
= arm_emit_value_and_patch_ldr (space_start
, (arminstr_t
*)imt_entries
[j
]->code_target
, (guint32
)imt_entries
[j
]->key
);
5125 char *buff
= g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable
->klass
->name_space
, vtable
->klass
->name
, count
);
5126 mono_disassemble_code (NULL
, (guint8
*)start
, size
, buff
);
5131 g_free (constant_pool_starts
);
5133 mono_arch_flush_icache ((guint8
*)start
, size
);
5134 mono_stats
.imt_thunks_size
+= code
- start
;
5136 g_assert (DISTANCE (start
, code
) <= size
);
5143 mono_arch_context_get_int_reg (MonoContext
*ctx
, int reg
)
5145 if (reg
== ARMREG_SP
)
5146 return (gpointer
)ctx
->esp
;
5148 return (gpointer
)ctx
->regs
[reg
];
5152 * mono_arch_set_breakpoint:
5154 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
5155 * The location should contain code emitted by OP_SEQ_POINT.
5158 mono_arch_set_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
5161 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
5164 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), ji
->code_start
);
5166 g_assert (native_offset
% 4 == 0);
5167 g_assert (info
->bp_addrs
[native_offset
/ 4] == 0);
5168 info
->bp_addrs
[native_offset
/ 4] = bp_trigger_page
;
5170 int dreg
= ARMREG_LR
;
5172 /* Read from another trigger page */
5173 ARM_LDR_IMM (code
, dreg
, ARMREG_PC
, 0);
5175 *(int*)code
= (int)bp_trigger_page
;
5177 ARM_LDR_IMM (code
, dreg
, dreg
, 0);
5179 mono_arch_flush_icache (code
- 16, 16);
5182 /* This is currently implemented by emitting an SWI instruction, which
5183 * qemu/linux seems to convert to a SIGILL.
5185 *(int*)code
= (0xef << 24) | 8;
5187 mono_arch_flush_icache (code
- 4, 4);
5193 * mono_arch_clear_breakpoint:
5195 * Clear the breakpoint at IP.
5198 mono_arch_clear_breakpoint (MonoJitInfo
*ji
, guint8
*ip
)
5204 guint32 native_offset
= ip
- (guint8
*)ji
->code_start
;
5205 SeqPointInfo
*info
= mono_arch_get_seq_point_info (mono_domain_get (), ji
->code_start
);
5207 g_assert (native_offset
% 4 == 0);
5208 g_assert (info
->bp_addrs
[native_offset
/ 4] == bp_trigger_page
);
5209 info
->bp_addrs
[native_offset
/ 4] = 0;
5211 for (i
= 0; i
< 4; ++i
)
5214 mono_arch_flush_icache (ip
, code
- ip
);
5219 * mono_arch_start_single_stepping:
5221 * Start single stepping.
5224 mono_arch_start_single_stepping (void)
5226 mono_mprotect (ss_trigger_page
, mono_pagesize (), 0);
5230 * mono_arch_stop_single_stepping:
5232 * Stop single stepping.
5235 mono_arch_stop_single_stepping (void)
5237 mono_mprotect (ss_trigger_page
, mono_pagesize (), MONO_MMAP_READ
);
5241 #define DBG_SIGNAL SIGBUS
5243 #define DBG_SIGNAL SIGSEGV
5247 * mono_arch_is_single_step_event:
5249 * Return whenever the machine state in SIGCTX corresponds to a single
5253 mono_arch_is_single_step_event (void *info
, void *sigctx
)
5255 siginfo_t
*sinfo
= info
;
5257 /* Sometimes the address is off by 4 */
5258 if (sinfo
->si_addr
>= ss_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)ss_trigger_page
+ 128)
5265 * mono_arch_is_breakpoint_event:
5267 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
5270 mono_arch_is_breakpoint_event (void *info
, void *sigctx
)
5272 siginfo_t
*sinfo
= info
;
5274 if (sinfo
->si_signo
== DBG_SIGNAL
) {
5275 /* Sometimes the address is off by 4 */
5276 if (sinfo
->si_addr
>= bp_trigger_page
&& (guint8
*)sinfo
->si_addr
<= (guint8
*)bp_trigger_page
+ 128)
5286 mono_arch_get_ip_for_breakpoint (MonoJitInfo
*ji
, MonoContext
*ctx
)
5288 guint8
*ip
= MONO_CONTEXT_GET_IP (ctx
);
5299 mono_arch_get_ip_for_single_step (MonoJitInfo
*ji
, MonoContext
*ctx
)
5301 guint8
*ip
= MONO_CONTEXT_GET_IP (ctx
);
5309 * mono_arch_skip_breakpoint:
5311 * See mini-amd64.c for docs.
5314 mono_arch_skip_breakpoint (MonoContext
*ctx
)
5316 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
5320 * mono_arch_skip_single_step:
5322 * See mini-amd64.c for docs.
5325 mono_arch_skip_single_step (MonoContext
*ctx
)
5327 MONO_CONTEXT_SET_IP (ctx
, (guint8
*)MONO_CONTEXT_GET_IP (ctx
) + 4);
5331 * mono_arch_get_seq_point_info:
5333 * See mini-amd64.c for docs.
5336 mono_arch_get_seq_point_info (MonoDomain
*domain
, guint8
*code
)
5341 // FIXME: Add a free function
5343 mono_domain_lock (domain
);
5344 info
= g_hash_table_lookup (domain_jit_info (domain
)->arch_seq_points
,
5346 mono_domain_unlock (domain
);
5349 ji
= mono_jit_info_table_find (domain
, (char*)code
);
5352 info
= g_malloc0 (sizeof (SeqPointInfo
) + ji
->code_size
);
5354 info
->ss_trigger_page
= ss_trigger_page
;
5355 info
->bp_trigger_page
= bp_trigger_page
;
5357 mono_domain_lock (domain
);
5358 g_hash_table_insert (domain_jit_info (domain
)->arch_seq_points
,
5360 mono_domain_unlock (domain
);