2010-04-06 Rodrigo Kumpera <rkumpera@novell.com>
[mono.git] / mono / mini / mini-arm.c
blob0a72637f062ad53b5efd0e90572c5925241fab7c
1 /*
2 * mini-arm.c: ARM backend for the Mono code generator
4 * Authors:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 */
10 #include "mini.h"
11 #include <string.h>
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
15 #include <mono/utils/mono-mmap.h>
17 #include "mini-arm.h"
18 #include "cpu-arm.h"
19 #include "trace.h"
20 #include "ir-emit.h"
21 #ifdef ARM_FPU_FPA
22 #include "mono/arch/arm/arm-fpa-codegen.h"
23 #elif defined(ARM_FPU_VFP)
24 #include "mono/arch/arm/arm-vfp-codegen.h"
25 #endif
27 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID)
28 #define HAVE_AEABI_READ_TP 1
29 #endif
31 static gint lmf_tls_offset = -1;
32 static gint lmf_addr_tls_offset = -1;
34 /* This mutex protects architecture specific caches */
35 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
36 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
37 static CRITICAL_SECTION mini_arch_mutex;
39 static int v5_supported = 0;
40 static int v7_supported = 0;
41 static int thumb_supported = 0;
44 * The code generated for sequence points reads from this location, which is
45 * made read-only when single stepping is enabled.
47 static gpointer ss_trigger_page;
49 /* Enabled breakpoints read from this trigger page */
50 static gpointer bp_trigger_page;
52 /* Structure used by the sequence points in AOTed code */
53 typedef struct {
54 gpointer ss_trigger_page;
55 gpointer bp_trigger_page;
56 guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
57 } SeqPointInfo;
60 * TODO:
61 * floating point support: on ARM it is a mess, there are at least 3
62 * different setups, each of which binary incompat with the other.
63 * 1) FPA: old and ugly, but unfortunately what current distros use
64 * the double binary format has the two words swapped. 8 double registers.
65 * Implemented usually by kernel emulation.
66 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
67 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
68 * 3) VFP: the new and actually sensible and useful FP support. Implemented
69 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
71 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
73 int mono_exc_esp_offset = 0;
75 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
76 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
77 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
79 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
80 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
81 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
83 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
84 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
85 #define DEBUG_IMT 0
87 /* A variant of ARM_LDR_IMM which can handle large offsets */
88 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
89 if (arm_is_imm12 ((offset))) { \
90 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
91 } else { \
92 g_assert ((scratch_reg) != (basereg)); \
93 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
94 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
95 } \
96 } while (0)
98 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
99 if (arm_is_imm12 ((offset))) { \
100 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
101 } else { \
102 g_assert ((scratch_reg) != (basereg)); \
103 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
104 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
106 } while (0)
108 const char*
109 mono_arch_regname (int reg)
111 static const char * rnames[] = {
112 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
113 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
114 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
115 "arm_pc"
117 if (reg >= 0 && reg < 16)
118 return rnames [reg];
119 return "unknown";
122 const char*
123 mono_arch_fregname (int reg)
125 static const char * rnames[] = {
126 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
127 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
128 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
129 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
130 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
131 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
132 "arm_f30", "arm_f31"
134 if (reg >= 0 && reg < 32)
135 return rnames [reg];
136 return "unknown";
139 #ifndef DISABLE_JIT
141 static guint8*
142 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
144 int imm8, rot_amount;
145 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
146 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
147 return code;
149 g_assert (dreg != sreg);
150 code = mono_arm_emit_load_imm (code, dreg, imm);
151 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
152 return code;
155 static guint8*
156 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
158 /* we can use r0-r3, since this is called only for incoming args on the stack */
159 if (size > sizeof (gpointer) * 4) {
160 guint8 *start_loop;
161 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
162 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
163 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
164 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
165 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
166 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
167 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
168 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
169 ARM_B_COND (code, ARMCOND_NE, 0);
170 arm_patch (code - 4, start_loop);
171 return code;
173 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
174 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
175 while (size >= 4) {
176 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
177 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
178 doffset += 4;
179 soffset += 4;
180 size -= 4;
182 } else if (size) {
183 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
184 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
185 doffset = soffset = 0;
186 while (size >= 4) {
187 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
188 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
189 doffset += 4;
190 soffset += 4;
191 size -= 4;
194 g_assert (size == 0);
195 return code;
198 static guint8*
199 emit_call_reg (guint8 *code, int reg)
201 if (v5_supported) {
202 ARM_BLX_REG (code, reg);
203 } else {
204 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
205 if (thumb_supported)
206 ARM_BX (code, reg);
207 else
208 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
210 return code;
213 static guint8*
214 emit_call_seq (MonoCompile *cfg, guint8 *code)
216 if (cfg->method->dynamic) {
217 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
218 ARM_B (code, 0);
219 *(gpointer*)code = NULL;
220 code += 4;
221 code = emit_call_reg (code, ARMREG_IP);
222 } else {
223 ARM_BL (code, 0);
225 return code;
228 static guint8*
229 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
231 switch (ins->opcode) {
232 case OP_FCALL:
233 case OP_FCALL_REG:
234 case OP_FCALL_MEMBASE:
235 #ifdef ARM_FPU_FPA
236 if (ins->dreg != ARM_FPA_F0)
237 ARM_MVFD (code, ins->dreg, ARM_FPA_F0);
238 #elif defined(ARM_FPU_VFP)
239 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
240 ARM_FMSR (code, ins->dreg, ARMREG_R0);
241 ARM_CVTS (code, ins->dreg, ins->dreg);
242 } else {
243 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
245 #endif
246 break;
249 return code;
252 #endif /* #ifndef DISABLE_JIT */
255 * mono_arch_get_argument_info:
256 * @csig: a method signature
257 * @param_count: the number of parameters to consider
258 * @arg_info: an array to store the result infos
260 * Gathers information on parameters such as size, alignment and
261 * padding. arg_info should be large enought to hold param_count + 1 entries.
263 * Returns the size of the activation frame.
266 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
268 int k, frame_size = 0;
269 guint32 size, align, pad;
270 int offset = 8;
272 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
273 frame_size += sizeof (gpointer);
274 offset += 4;
277 arg_info [0].offset = offset;
279 if (csig->hasthis) {
280 frame_size += sizeof (gpointer);
281 offset += 4;
284 arg_info [0].size = frame_size;
286 for (k = 0; k < param_count; k++) {
287 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
289 /* ignore alignment for now */
290 align = 1;
292 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
293 arg_info [k].pad = pad;
294 frame_size += size;
295 arg_info [k + 1].pad = 0;
296 arg_info [k + 1].size = size;
297 offset += pad;
298 arg_info [k + 1].offset = offset;
299 offset += size;
302 align = MONO_ARCH_FRAME_ALIGNMENT;
303 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
304 arg_info [k].pad = pad;
306 return frame_size;
309 static gpointer
310 decode_vcall_slot_from_ldr (guint32 ldr, mgreg_t *regs, int *displacement)
312 char *o = NULL;
313 int reg, offset = 0;
314 reg = (ldr >> 16 ) & 0xf;
315 offset = ldr & 0xfff;
316 if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
317 offset = -offset;
318 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
319 o = (gpointer)regs [reg];
321 *displacement = offset;
322 return o;
325 gpointer
326 mono_arch_get_vcall_slot (guint8 *code_ptr, mgreg_t *regs, int *displacement)
328 guint32* code = (guint32*)code_ptr;
330 /* Locate the address of the method-specific trampoline. The call using
331 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
332 looks something like this:
334 ldr rA, rX, #offset
335 mov lr, pc
336 mov pc, rA
337 or better:
338 mov lr, pc
339 ldr pc, rX, #offset
341 The call sequence could be also:
342 ldr ip, pc, 0
343 b skip
344 function pointer literal
345 skip:
346 mov lr, pc
347 mov pc, ip
348 Note that on ARM5+ we can use one instruction instead of the last two.
349 Therefore, we need to locate the 'ldr rA' instruction to know which
350 register was used to hold the method addrs.
353 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
354 --code;
356 /* Three possible code sequences can happen here:
357 * interface call:
359 * add lr, [pc + #4]
360 * ldr pc, [rX - #offset]
361 * .word IMT value
363 * virtual call:
365 * mov lr, pc
366 * ldr pc, [rX - #offset]
368 * direct branch with bl:
370 * bl #offset
372 * direct branch with mov:
374 * mv pc, rX
376 * We only need to identify interface and virtual calls, the others can be ignored.
379 if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
380 return decode_vcall_slot_from_ldr (code [-1], regs, displacement);
382 if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
383 return decode_vcall_slot_from_ldr (code [0], regs, displacement);
385 return NULL;
388 #define MAX_ARCH_DELEGATE_PARAMS 3
390 static gpointer
391 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
393 guint8 *code, *start;
395 if (has_target) {
396 start = code = mono_global_codeman_reserve (12);
398 /* Replace the this argument with the target */
399 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
400 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
401 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
403 g_assert ((code - start) <= 12);
405 mono_arch_flush_icache (start, 12);
406 } else {
407 int size, i;
409 size = 8 + param_count * 4;
410 start = code = mono_global_codeman_reserve (size);
412 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
413 /* slide down the arguments */
414 for (i = 0; i < param_count; ++i) {
415 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
417 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
419 g_assert ((code - start) <= size);
421 mono_arch_flush_icache (start, size);
424 if (code_size)
425 *code_size = code - start;
427 return start;
431 * mono_arch_get_delegate_invoke_impls:
433 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
434 * trampolines.
436 GSList*
437 mono_arch_get_delegate_invoke_impls (void)
439 GSList *res = NULL;
440 guint8 *code;
441 guint32 code_len;
442 int i;
444 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
445 res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len));
447 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
448 code = get_delegate_invoke_impl (FALSE, i, &code_len);
449 res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len));
452 return res;
455 gpointer
456 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
458 guint8 *code, *start;
460 /* FIXME: Support more cases */
461 if (MONO_TYPE_ISSTRUCT (sig->ret))
462 return NULL;
464 if (has_target) {
465 static guint8* cached = NULL;
466 mono_mini_arch_lock ();
467 if (cached) {
468 mono_mini_arch_unlock ();
469 return cached;
472 if (mono_aot_only)
473 start = mono_aot_get_named_code ("delegate_invoke_impl_has_target");
474 else
475 start = get_delegate_invoke_impl (TRUE, 0, NULL);
476 cached = start;
477 mono_mini_arch_unlock ();
478 return cached;
479 } else {
480 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
481 int i;
483 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
484 return NULL;
485 for (i = 0; i < sig->param_count; ++i)
486 if (!mono_is_regsize_var (sig->params [i]))
487 return NULL;
489 mono_mini_arch_lock ();
490 code = cache [sig->param_count];
491 if (code) {
492 mono_mini_arch_unlock ();
493 return code;
496 if (mono_aot_only) {
497 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
498 start = mono_aot_get_named_code (name);
499 g_free (name);
500 } else {
501 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
503 cache [sig->param_count] = start;
504 mono_mini_arch_unlock ();
505 return start;
508 return NULL;
511 gpointer
512 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, mgreg_t *regs, guint8 *code)
514 /* FIXME: handle returning a struct */
515 if (MONO_TYPE_ISSTRUCT (sig->ret))
516 return (gpointer)regs [ARMREG_R1];
517 return (gpointer)regs [ARMREG_R0];
521 * Initialize the cpu to execute managed code.
523 void
524 mono_arch_cpu_init (void)
529 * Initialize architecture specific code.
531 void
532 mono_arch_init (void)
534 InitializeCriticalSection (&mini_arch_mutex);
536 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
537 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
538 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
542 * Cleanup architecture specific code.
544 void
545 mono_arch_cleanup (void)
550 * This function returns the optimizations supported on this cpu.
552 guint32
553 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
555 guint32 opts = 0;
556 const char *cpu_arch = getenv ("MONO_CPU_ARCH");
557 if (cpu_arch != NULL) {
558 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
559 if (strncmp (cpu_arch, "armv", 4) == 0) {
560 v5_supported = cpu_arch [4] >= '5';
561 v7_supported = cpu_arch [4] >= '7';
563 } else {
564 #if __APPLE__
565 thumb_supported = TRUE;
566 v5_supported = TRUE;
567 #else
568 char buf [512];
569 char *line;
570 FILE *file = fopen ("/proc/cpuinfo", "r");
571 if (file) {
572 while ((line = fgets (buf, 512, file))) {
573 if (strncmp (line, "Processor", 9) == 0) {
574 char *ver = strstr (line, "(v");
575 if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7'))
576 v5_supported = TRUE;
577 if (ver && (ver [2] == '7'))
578 v7_supported = TRUE;
579 continue;
581 if (strncmp (line, "Features", 8) == 0) {
582 char *th = strstr (line, "thumb");
583 if (th) {
584 thumb_supported = TRUE;
585 if (v5_supported)
586 break;
588 continue;
591 fclose (file);
592 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
594 #endif
597 /* no arm-specific optimizations yet */
598 *exclude_mask = 0;
599 return opts;
602 #ifndef DISABLE_JIT
604 static gboolean
605 is_regsize_var (MonoType *t) {
606 if (t->byref)
607 return TRUE;
608 t = mini_type_get_underlying_type (NULL, t);
609 switch (t->type) {
610 case MONO_TYPE_I4:
611 case MONO_TYPE_U4:
612 case MONO_TYPE_I:
613 case MONO_TYPE_U:
614 case MONO_TYPE_PTR:
615 case MONO_TYPE_FNPTR:
616 return TRUE;
617 case MONO_TYPE_OBJECT:
618 case MONO_TYPE_STRING:
619 case MONO_TYPE_CLASS:
620 case MONO_TYPE_SZARRAY:
621 case MONO_TYPE_ARRAY:
622 return TRUE;
623 case MONO_TYPE_GENERICINST:
624 if (!mono_type_generic_inst_is_valuetype (t))
625 return TRUE;
626 return FALSE;
627 case MONO_TYPE_VALUETYPE:
628 return FALSE;
630 return FALSE;
633 GList *
634 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
636 GList *vars = NULL;
637 int i;
639 for (i = 0; i < cfg->num_varinfo; i++) {
640 MonoInst *ins = cfg->varinfo [i];
641 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
643 /* unused vars */
644 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
645 continue;
647 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
648 continue;
650 /* we can only allocate 32 bit values */
651 if (is_regsize_var (ins->inst_vtype)) {
652 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
653 g_assert (i == vmv->idx);
654 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
658 return vars;
661 #define USE_EXTRA_TEMPS 0
663 GList *
664 mono_arch_get_global_int_regs (MonoCompile *cfg)
666 GList *regs = NULL;
669 * FIXME: Interface calls might go through a static rgctx trampoline which
670 * sets V5, but it doesn't save it, so we need to save it ourselves, and
671 * avoid using it.
673 if (cfg->flags & MONO_CFG_HAS_CALLS)
674 cfg->uses_rgctx_reg = TRUE;
676 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
677 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
678 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
679 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
680 if (!(cfg->compile_aot || cfg->uses_rgctx_reg))
681 /* V5 is reserved for passing the vtable/rgctx/IMT method */
682 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
683 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
684 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
686 return regs;
690 * mono_arch_regalloc_cost:
692 * Return the cost, in number of memory references, of the action of
693 * allocating the variable VMV into a register during global register
694 * allocation.
696 guint32
697 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
699 /* FIXME: */
700 return 2;
703 #endif /* #ifndef DISABLE_JIT */
705 #ifndef __GNUC_PREREQ
706 #define __GNUC_PREREQ(maj, min) (0)
707 #endif
709 void
710 mono_arch_flush_icache (guint8 *code, gint size)
712 #if __APPLE__
713 sys_icache_invalidate (code, size);
714 #elif __GNUC_PREREQ(4, 1)
715 __clear_cache (code, code + size);
716 #elif defined(PLATFORM_ANDROID)
717 const int syscall = 0xf0002;
718 __asm __volatile (
719 "mov r0, %0\n"
720 "mov r1, %1\n"
721 "mov r7, %2\n"
722 "mov r2, #0x0\n"
723 "svc 0x00000000\n"
725 : "r" (code), "r" (code + size), "r" (syscall)
726 : "r0", "r1", "r7", "r2"
728 #else
729 __asm __volatile ("mov r0, %0\n"
730 "mov r1, %1\n"
731 "mov r2, %2\n"
732 "swi 0x9f0002 @ sys_cacheflush"
733 : /* no outputs */
734 : "r" (code), "r" (code + size), "r" (0)
735 : "r0", "r1", "r3" );
736 #endif
739 typedef enum {
740 RegTypeNone,
741 RegTypeGeneral,
742 RegTypeIRegPair,
743 RegTypeBase,
744 RegTypeBaseGen,
745 RegTypeFP,
746 RegTypeStructByVal,
747 RegTypeStructByAddr
748 } ArgStorage;
750 typedef struct {
751 gint32 offset;
752 guint16 vtsize; /* in param area */
753 guint8 reg;
754 ArgStorage storage;
755 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
756 } ArgInfo;
758 typedef struct {
759 int nargs;
760 guint32 stack_usage;
761 guint32 struct_ret;
762 gboolean vtype_retaddr;
763 ArgInfo ret;
764 ArgInfo sig_cookie;
765 ArgInfo args [1];
766 } CallInfo;
768 #define DEBUG(a)
770 #ifndef __GNUC__
771 /*#define __alignof__(a) sizeof(a)*/
772 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
773 #endif
775 #define PARAM_REGS 4
777 static void inline
778 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
780 if (simple) {
781 if (*gr > ARMREG_R3) {
782 ainfo->offset = *stack_size;
783 ainfo->reg = ARMREG_SP; /* in the caller */
784 ainfo->storage = RegTypeBase;
785 *stack_size += 4;
786 } else {
787 ainfo->storage = RegTypeGeneral;
788 ainfo->reg = *gr;
790 } else {
791 #if defined(__APPLE__) && defined(MONO_CROSS_COMPILE)
792 int i8_align = 4;
793 #else
794 int i8_align = __alignof__ (gint64);
795 #endif
797 #if __ARM_EABI__
798 gboolean split = i8_align == 4;
799 #else
800 gboolean split = TRUE;
801 #endif
803 if (*gr == ARMREG_R3 && split) {
804 /* first word in r3 and the second on the stack */
805 ainfo->offset = *stack_size;
806 ainfo->reg = ARMREG_SP; /* in the caller */
807 ainfo->storage = RegTypeBaseGen;
808 *stack_size += 4;
809 } else if (*gr >= ARMREG_R3) {
810 #ifdef __ARM_EABI__
811 /* darwin aligns longs to 4 byte only */
812 if (i8_align == 8) {
813 *stack_size += 7;
814 *stack_size &= ~7;
816 #endif
817 ainfo->offset = *stack_size;
818 ainfo->reg = ARMREG_SP; /* in the caller */
819 ainfo->storage = RegTypeBase;
820 *stack_size += 8;
821 } else {
822 #ifdef __ARM_EABI__
823 if (i8_align == 8 && ((*gr) & 1))
824 (*gr) ++;
825 #endif
826 ainfo->storage = RegTypeIRegPair;
827 ainfo->reg = *gr;
829 (*gr) ++;
831 (*gr) ++;
834 static CallInfo*
835 get_call_info (MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
837 guint i, gr;
838 int n = sig->hasthis + sig->param_count;
839 MonoType *simpletype;
840 guint32 stack_size = 0;
841 CallInfo *cinfo;
843 if (mp)
844 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
845 else
846 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
848 cinfo->nargs = n;
849 gr = ARMREG_R0;
851 /* FIXME: handle returning a struct */
852 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
853 guint32 align;
855 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (sig->ret), &align) <= sizeof (gpointer)) {
856 cinfo->ret.storage = RegTypeStructByVal;
857 } else {
858 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
859 cinfo->struct_ret = ARMREG_R0;
860 cinfo->vtype_retaddr = TRUE;
864 n = 0;
865 if (sig->hasthis) {
866 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
867 n++;
869 DEBUG(printf("params: %d\n", sig->param_count));
870 for (i = 0; i < sig->param_count; ++i) {
871 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
872 /* Prevent implicit arguments and sig_cookie from
873 being passed in registers */
874 gr = ARMREG_R3 + 1;
875 /* Emit the signature cookie just before the implicit arguments */
876 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
878 DEBUG(printf("param %d: ", i));
879 if (sig->params [i]->byref) {
880 DEBUG(printf("byref\n"));
881 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
882 n++;
883 continue;
885 simpletype = mini_type_get_underlying_type (NULL, sig->params [i]);
886 switch (simpletype->type) {
887 case MONO_TYPE_BOOLEAN:
888 case MONO_TYPE_I1:
889 case MONO_TYPE_U1:
890 cinfo->args [n].size = 1;
891 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
892 n++;
893 break;
894 case MONO_TYPE_CHAR:
895 case MONO_TYPE_I2:
896 case MONO_TYPE_U2:
897 cinfo->args [n].size = 2;
898 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
899 n++;
900 break;
901 case MONO_TYPE_I4:
902 case MONO_TYPE_U4:
903 cinfo->args [n].size = 4;
904 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
905 n++;
906 break;
907 case MONO_TYPE_I:
908 case MONO_TYPE_U:
909 case MONO_TYPE_PTR:
910 case MONO_TYPE_FNPTR:
911 case MONO_TYPE_CLASS:
912 case MONO_TYPE_OBJECT:
913 case MONO_TYPE_STRING:
914 case MONO_TYPE_SZARRAY:
915 case MONO_TYPE_ARRAY:
916 case MONO_TYPE_R4:
917 cinfo->args [n].size = sizeof (gpointer);
918 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
919 n++;
920 break;
921 case MONO_TYPE_GENERICINST:
922 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
923 cinfo->args [n].size = sizeof (gpointer);
924 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
925 n++;
926 break;
928 /* Fall through */
929 case MONO_TYPE_TYPEDBYREF:
930 case MONO_TYPE_VALUETYPE: {
931 gint size;
932 int align_size;
933 int nwords;
934 guint32 align;
936 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
937 size = sizeof (MonoTypedRef);
938 align = sizeof (gpointer);
939 } else {
940 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
941 if (is_pinvoke)
942 size = mono_class_native_size (klass, &align);
943 else
944 size = mono_class_value_size (klass, &align);
946 DEBUG(printf ("load %d bytes struct\n",
947 mono_class_native_size (sig->params [i]->data.klass, NULL)));
948 align_size = size;
949 nwords = 0;
950 align_size += (sizeof (gpointer) - 1);
951 align_size &= ~(sizeof (gpointer) - 1);
952 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
953 cinfo->args [n].storage = RegTypeStructByVal;
954 /* FIXME: align stack_size if needed */
955 #ifdef __ARM_EABI__
956 if (align >= 8 && (gr & 1))
957 gr ++;
958 #endif
959 if (gr > ARMREG_R3) {
960 cinfo->args [n].size = 0;
961 cinfo->args [n].vtsize = nwords;
962 } else {
963 int rest = ARMREG_R3 - gr + 1;
964 int n_in_regs = rest >= nwords? nwords: rest;
966 cinfo->args [n].size = n_in_regs;
967 cinfo->args [n].vtsize = nwords - n_in_regs;
968 cinfo->args [n].reg = gr;
969 gr += n_in_regs;
970 nwords -= n_in_regs;
972 cinfo->args [n].offset = stack_size;
973 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
974 stack_size += nwords * sizeof (gpointer);
975 n++;
976 break;
978 case MONO_TYPE_U8:
979 case MONO_TYPE_I8:
980 case MONO_TYPE_R8:
981 cinfo->args [n].size = 8;
982 add_general (&gr, &stack_size, cinfo->args + n, FALSE);
983 n++;
984 break;
985 default:
986 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
990 /* Handle the case where there are no implicit arguments */
991 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
992 /* Prevent implicit arguments and sig_cookie from
993 being passed in registers */
994 gr = ARMREG_R3 + 1;
995 /* Emit the signature cookie just before the implicit arguments */
996 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1000 simpletype = mini_type_get_underlying_type (NULL, sig->ret);
1001 switch (simpletype->type) {
1002 case MONO_TYPE_BOOLEAN:
1003 case MONO_TYPE_I1:
1004 case MONO_TYPE_U1:
1005 case MONO_TYPE_I2:
1006 case MONO_TYPE_U2:
1007 case MONO_TYPE_CHAR:
1008 case MONO_TYPE_I4:
1009 case MONO_TYPE_U4:
1010 case MONO_TYPE_I:
1011 case MONO_TYPE_U:
1012 case MONO_TYPE_PTR:
1013 case MONO_TYPE_FNPTR:
1014 case MONO_TYPE_CLASS:
1015 case MONO_TYPE_OBJECT:
1016 case MONO_TYPE_SZARRAY:
1017 case MONO_TYPE_ARRAY:
1018 case MONO_TYPE_STRING:
1019 cinfo->ret.storage = RegTypeGeneral;
1020 cinfo->ret.reg = ARMREG_R0;
1021 break;
1022 case MONO_TYPE_U8:
1023 case MONO_TYPE_I8:
1024 cinfo->ret.storage = RegTypeIRegPair;
1025 cinfo->ret.reg = ARMREG_R0;
1026 break;
1027 case MONO_TYPE_R4:
1028 case MONO_TYPE_R8:
1029 cinfo->ret.storage = RegTypeFP;
1030 cinfo->ret.reg = ARMREG_R0;
1031 /* FIXME: cinfo->ret.reg = ???;
1032 cinfo->ret.storage = RegTypeFP;*/
1033 break;
1034 case MONO_TYPE_GENERICINST:
1035 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1036 cinfo->ret.storage = RegTypeGeneral;
1037 cinfo->ret.reg = ARMREG_R0;
1038 break;
1040 /* Fall through */
1041 case MONO_TYPE_VALUETYPE:
1042 case MONO_TYPE_TYPEDBYREF:
1043 if (cinfo->ret.storage != RegTypeStructByVal)
1044 cinfo->ret.storage = RegTypeStructByAddr;
1045 break;
1046 case MONO_TYPE_VOID:
1047 break;
1048 default:
1049 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1053 /* align stack size to 8 */
1054 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1055 stack_size = (stack_size + 7) & ~7;
1057 cinfo->stack_usage = stack_size;
1058 return cinfo;
1061 #ifndef DISABLE_JIT
1064 * Set var information according to the calling convention. arm version.
1065 * The locals var stuff should most likely be split in another method.
1067 void
1068 mono_arch_allocate_vars (MonoCompile *cfg)
1070 MonoMethodSignature *sig;
1071 MonoMethodHeader *header;
1072 MonoInst *ins;
1073 int i, offset, size, align, curinst;
1074 int frame_reg = ARMREG_FP;
1075 CallInfo *cinfo;
1076 guint32 ualign;
1078 sig = mono_method_signature (cfg->method);
1080 if (!cfg->arch.cinfo)
1081 cfg->arch.cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1082 cinfo = cfg->arch.cinfo;
1084 /* FIXME: this will change when we use FP as gcc does */
1085 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1087 /* allow room for the vararg method args: void* and long/double */
1088 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1089 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1091 header = cfg->header;
1094 * We use the frame register also for any method that has
1095 * exception clauses. This way, when the handlers are called,
1096 * the code will reference local variables using the frame reg instead of
1097 * the stack pointer: if we had to restore the stack pointer, we'd
1098 * corrupt the method frames that are already on the stack (since
1099 * filters get called before stack unwinding happens) when the filter
1100 * code would call any method (this also applies to finally etc.).
1102 if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
1103 frame_reg = ARMREG_FP;
1104 cfg->frame_reg = frame_reg;
1105 if (frame_reg != ARMREG_SP) {
1106 cfg->used_int_regs |= 1 << frame_reg;
1109 if (cfg->compile_aot || cfg->uses_rgctx_reg)
1110 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1111 cfg->used_int_regs |= (1 << ARMREG_V5);
1113 offset = 0;
1114 curinst = 0;
1115 if (!MONO_TYPE_ISSTRUCT (sig->ret)) {
1116 switch (mini_type_get_underlying_type (NULL, sig->ret)->type) {
1117 case MONO_TYPE_VOID:
1118 break;
1119 default:
1120 cfg->ret->opcode = OP_REGVAR;
1121 cfg->ret->inst_c0 = ARMREG_R0;
1122 break;
1125 /* local vars are at a positive offset from the stack pointer */
1127 * also note that if the function uses alloca, we use FP
1128 * to point at the local variables.
1130 offset = 0; /* linkage area */
1131 /* align the offset to 16 bytes: not sure this is needed here */
1132 //offset += 8 - 1;
1133 //offset &= ~(8 - 1);
1135 /* add parameter area size for called functions */
1136 offset += cfg->param_area;
1137 offset += 8 - 1;
1138 offset &= ~(8 - 1);
1139 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1140 offset += 8;
1142 /* allow room to save the return value */
1143 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1144 offset += 8;
1146 /* the MonoLMF structure is stored just below the stack pointer */
1147 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1148 if (cinfo->ret.storage == RegTypeStructByVal) {
1149 cfg->ret->opcode = OP_REGOFFSET;
1150 cfg->ret->inst_basereg = cfg->frame_reg;
1151 offset += sizeof (gpointer) - 1;
1152 offset &= ~(sizeof (gpointer) - 1);
1153 cfg->ret->inst_offset = - offset;
1154 } else {
1155 ins = cfg->vret_addr;
1156 offset += sizeof(gpointer) - 1;
1157 offset &= ~(sizeof(gpointer) - 1);
1158 ins->inst_offset = offset;
1159 ins->opcode = OP_REGOFFSET;
1160 ins->inst_basereg = frame_reg;
1161 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1162 printf ("vret_addr =");
1163 mono_print_ins (cfg->vret_addr);
1166 offset += sizeof(gpointer);
1169 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1170 if (cfg->arch.seq_point_info_var) {
1171 MonoInst *ins;
1173 ins = cfg->arch.seq_point_info_var;
1175 size = 4;
1176 align = 4;
1177 offset += align - 1;
1178 offset &= ~(align - 1);
1179 ins->opcode = OP_REGOFFSET;
1180 ins->inst_basereg = frame_reg;
1181 ins->inst_offset = offset;
1182 offset += size;
1184 ins = cfg->arch.ss_trigger_page_var;
1185 size = 4;
1186 align = 4;
1187 offset += align - 1;
1188 offset &= ~(align - 1);
1189 ins->opcode = OP_REGOFFSET;
1190 ins->inst_basereg = frame_reg;
1191 ins->inst_offset = offset;
1192 offset += size;
1195 curinst = cfg->locals_start;
1196 for (i = curinst; i < cfg->num_varinfo; ++i) {
1197 ins = cfg->varinfo [i];
1198 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
1199 continue;
1201 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1202 * pinvoke wrappers when they call functions returning structure */
1203 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (ins->inst_vtype) && ins->inst_vtype->type != MONO_TYPE_TYPEDBYREF) {
1204 size = mono_class_native_size (mono_class_from_mono_type (ins->inst_vtype), &ualign);
1205 align = ualign;
1207 else
1208 size = mono_type_size (ins->inst_vtype, &align);
1210 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1211 * since it loads/stores misaligned words, which don't do the right thing.
1213 if (align < 4 && size >= 4)
1214 align = 4;
1215 offset += align - 1;
1216 offset &= ~(align - 1);
1217 ins->opcode = OP_REGOFFSET;
1218 ins->inst_offset = offset;
1219 ins->inst_basereg = frame_reg;
1220 offset += size;
1221 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1224 curinst = 0;
1225 if (sig->hasthis) {
1226 ins = cfg->args [curinst];
1227 if (ins->opcode != OP_REGVAR) {
1228 ins->opcode = OP_REGOFFSET;
1229 ins->inst_basereg = frame_reg;
1230 offset += sizeof (gpointer) - 1;
1231 offset &= ~(sizeof (gpointer) - 1);
1232 ins->inst_offset = offset;
1233 offset += sizeof (gpointer);
1235 curinst++;
1238 if (sig->call_convention == MONO_CALL_VARARG) {
1239 size = 4;
1240 align = 4;
1242 /* Allocate a local slot to hold the sig cookie address */
1243 offset += align - 1;
1244 offset &= ~(align - 1);
1245 cfg->sig_cookie = offset;
1246 offset += size;
1249 for (i = 0; i < sig->param_count; ++i) {
1250 ins = cfg->args [curinst];
1252 if (ins->opcode != OP_REGVAR) {
1253 ins->opcode = OP_REGOFFSET;
1254 ins->inst_basereg = frame_reg;
1255 size = mini_type_stack_size_full (NULL, sig->params [i], &ualign, sig->pinvoke);
1256 align = ualign;
1257 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1258 * since it loads/stores misaligned words, which don't do the right thing.
1260 if (align < 4 && size >= 4)
1261 align = 4;
1262 /* The code in the prolog () stores words when storing vtypes received in a register */
1263 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
1264 align = 4;
1265 offset += align - 1;
1266 offset &= ~(align - 1);
1267 ins->inst_offset = offset;
1268 offset += size;
1270 curinst++;
1273 /* align the offset to 8 bytes */
1274 offset += 8 - 1;
1275 offset &= ~(8 - 1);
1277 /* change sign? */
1278 cfg->stack_offset = offset;
1281 void
1282 mono_arch_create_vars (MonoCompile *cfg)
1284 MonoMethodSignature *sig;
1285 CallInfo *cinfo;
1287 sig = mono_method_signature (cfg->method);
1289 if (!cfg->arch.cinfo)
1290 cfg->arch.cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1291 cinfo = cfg->arch.cinfo;
1293 if (cinfo->ret.storage == RegTypeStructByVal)
1294 cfg->ret_var_is_local = TRUE;
1296 if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != RegTypeStructByVal) {
1297 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1298 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1299 printf ("vret_addr = ");
1300 mono_print_ins (cfg->vret_addr);
1304 if (cfg->gen_seq_points && cfg->compile_aot) {
1305 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1306 ins->flags |= MONO_INST_VOLATILE;
1307 cfg->arch.seq_point_info_var = ins;
1309 /* Allocate a separate variable for this to save 1 load per seq point */
1310 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1311 ins->flags |= MONO_INST_VOLATILE;
1312 cfg->arch.ss_trigger_page_var = ins;
1316 static void
1317 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1319 MonoMethodSignature *tmp_sig;
1320 MonoInst *sig_arg;
1322 if (call->tail_call)
1323 NOT_IMPLEMENTED;
1325 /* FIXME: Add support for signature tokens to AOT */
1326 cfg->disable_aot = TRUE;
1328 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
1331 * mono_ArgIterator_Setup assumes the signature cookie is
1332 * passed first and all the arguments which were before it are
1333 * passed on the stack after the signature. So compensate by
1334 * passing a different signature.
1336 tmp_sig = mono_metadata_signature_dup (call->signature);
1337 tmp_sig->param_count -= call->signature->sentinelpos;
1338 tmp_sig->sentinelpos = 0;
1339 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1341 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1342 sig_arg->dreg = mono_alloc_ireg (cfg);
1343 sig_arg->inst_p0 = tmp_sig;
1344 MONO_ADD_INS (cfg->cbb, sig_arg);
1346 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_arg->dreg);
1349 #ifdef ENABLE_LLVM
1350 LLVMCallInfo*
1351 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
1353 int i, n;
1354 CallInfo *cinfo;
1355 ArgInfo *ainfo;
1356 LLVMCallInfo *linfo;
1358 n = sig->param_count + sig->hasthis;
1360 cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1362 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
1365 * LLVM always uses the native ABI while we use our own ABI, the
1366 * only difference is the handling of vtypes:
1367 * - we only pass/receive them in registers in some cases, and only
1368 * in 1 or 2 integer registers.
1370 if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
1371 cfg->exception_message = g_strdup ("unknown ret conv");
1372 cfg->disable_llvm = TRUE;
1373 return linfo;
1376 for (i = 0; i < n; ++i) {
1377 ainfo = cinfo->args + i;
1379 linfo->args [i].storage = LLVMArgNone;
1381 switch (ainfo->storage) {
1382 case RegTypeGeneral:
1383 case RegTypeIRegPair:
1384 case RegTypeBase:
1385 linfo->args [i].storage = LLVMArgInIReg;
1386 break;
1387 default:
1388 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
1389 cfg->disable_llvm = TRUE;
1390 break;
1394 return linfo;
1396 #endif
1398 void
1399 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1401 MonoInst *in, *ins;
1402 MonoMethodSignature *sig;
1403 int i, n;
1404 CallInfo *cinfo;
1406 sig = call->signature;
1407 n = sig->param_count + sig->hasthis;
1409 cinfo = get_call_info (NULL, sig, sig->pinvoke);
1411 for (i = 0; i < n; ++i) {
1412 ArgInfo *ainfo = cinfo->args + i;
1413 MonoType *t;
1415 if (i >= sig->hasthis)
1416 t = sig->params [i - sig->hasthis];
1417 else
1418 t = &mono_defaults.int_class->byval_arg;
1419 t = mini_type_get_underlying_type (NULL, t);
1421 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1422 /* Emit the signature cookie just before the implicit arguments */
1423 emit_sig_cookie (cfg, call, cinfo);
1426 in = call->args [i];
1428 switch (ainfo->storage) {
1429 case RegTypeGeneral:
1430 case RegTypeIRegPair:
1431 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1432 MONO_INST_NEW (cfg, ins, OP_MOVE);
1433 ins->dreg = mono_alloc_ireg (cfg);
1434 ins->sreg1 = in->dreg + 1;
1435 MONO_ADD_INS (cfg->cbb, ins);
1436 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1438 MONO_INST_NEW (cfg, ins, OP_MOVE);
1439 ins->dreg = mono_alloc_ireg (cfg);
1440 ins->sreg1 = in->dreg + 2;
1441 MONO_ADD_INS (cfg->cbb, ins);
1442 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1443 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
1444 #ifndef MONO_ARCH_SOFT_FLOAT
1445 int creg;
1446 #endif
1448 if (ainfo->size == 4) {
1449 #ifdef MONO_ARCH_SOFT_FLOAT
1450 /* mono_emit_call_args () have already done the r8->r4 conversion */
1451 /* The converted value is in an int vreg */
1452 MONO_INST_NEW (cfg, ins, OP_MOVE);
1453 ins->dreg = mono_alloc_ireg (cfg);
1454 ins->sreg1 = in->dreg;
1455 MONO_ADD_INS (cfg->cbb, ins);
1456 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1457 #else
1458 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1459 creg = mono_alloc_ireg (cfg);
1460 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1461 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1462 #endif
1463 } else {
1464 #ifdef MONO_ARCH_SOFT_FLOAT
1465 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
1466 ins->dreg = mono_alloc_ireg (cfg);
1467 ins->sreg1 = in->dreg;
1468 MONO_ADD_INS (cfg->cbb, ins);
1469 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1471 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
1472 ins->dreg = mono_alloc_ireg (cfg);
1473 ins->sreg1 = in->dreg;
1474 MONO_ADD_INS (cfg->cbb, ins);
1475 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1476 #else
1477 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1478 creg = mono_alloc_ireg (cfg);
1479 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1480 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1481 creg = mono_alloc_ireg (cfg);
1482 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
1483 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
1484 #endif
1486 cfg->flags |= MONO_CFG_HAS_FPOUT;
1487 } else {
1488 MONO_INST_NEW (cfg, ins, OP_MOVE);
1489 ins->dreg = mono_alloc_ireg (cfg);
1490 ins->sreg1 = in->dreg;
1491 MONO_ADD_INS (cfg->cbb, ins);
1493 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1495 break;
1496 case RegTypeStructByAddr:
1497 NOT_IMPLEMENTED;
1498 #if 0
1499 /* FIXME: where si the data allocated? */
1500 arg->backend.reg3 = ainfo->reg;
1501 call->used_iregs |= 1 << ainfo->reg;
1502 g_assert_not_reached ();
1503 #endif
1504 break;
1505 case RegTypeStructByVal:
1506 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1507 ins->opcode = OP_OUTARG_VT;
1508 ins->sreg1 = in->dreg;
1509 ins->klass = in->klass;
1510 ins->inst_p0 = call;
1511 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1512 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1513 MONO_ADD_INS (cfg->cbb, ins);
1514 break;
1515 case RegTypeBase:
1516 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1517 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1518 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
1519 if (t->type == MONO_TYPE_R8) {
1520 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1521 } else {
1522 #ifdef MONO_ARCH_SOFT_FLOAT
1523 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1524 #else
1525 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1526 #endif
1528 } else {
1529 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1531 break;
1532 case RegTypeBaseGen:
1533 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1534 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
1535 MONO_INST_NEW (cfg, ins, OP_MOVE);
1536 ins->dreg = mono_alloc_ireg (cfg);
1537 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
1538 MONO_ADD_INS (cfg->cbb, ins);
1539 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
1540 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
1541 int creg;
1543 #ifdef MONO_ARCH_SOFT_FLOAT
1544 g_assert_not_reached ();
1545 #endif
1547 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1548 creg = mono_alloc_ireg (cfg);
1549 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
1550 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1551 creg = mono_alloc_ireg (cfg);
1552 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
1553 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
1554 cfg->flags |= MONO_CFG_HAS_FPOUT;
1555 } else {
1556 g_assert_not_reached ();
1558 break;
1559 case RegTypeFP: {
1560 /* FIXME: */
1561 NOT_IMPLEMENTED;
1562 #if 0
1563 arg->backend.reg3 = ainfo->reg;
1564 /* FP args are passed in int regs */
1565 call->used_iregs |= 1 << ainfo->reg;
1566 if (ainfo->size == 8) {
1567 arg->opcode = OP_OUTARG_R8;
1568 call->used_iregs |= 1 << (ainfo->reg + 1);
1569 } else {
1570 arg->opcode = OP_OUTARG_R4;
1572 #endif
1573 cfg->flags |= MONO_CFG_HAS_FPOUT;
1574 break;
1576 default:
1577 g_assert_not_reached ();
1581 /* Handle the case where there are no implicit arguments */
1582 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
1583 emit_sig_cookie (cfg, call, cinfo);
1585 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1586 MonoInst *vtarg;
1588 if (cinfo->ret.storage == RegTypeStructByVal) {
1589 /* The JIT will transform this into a normal call */
1590 call->vret_in_reg = TRUE;
1591 } else {
1592 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1593 vtarg->sreg1 = call->vret_var->dreg;
1594 vtarg->dreg = mono_alloc_preg (cfg);
1595 MONO_ADD_INS (cfg->cbb, vtarg);
1597 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
1601 call->stack_usage = cinfo->stack_usage;
1603 g_free (cinfo);
1606 void
1607 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1609 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1610 ArgInfo *ainfo = ins->inst_p1;
1611 int ovf_size = ainfo->vtsize;
1612 int doffset = ainfo->offset;
1613 int i, soffset, dreg;
1615 soffset = 0;
1616 for (i = 0; i < ainfo->size; ++i) {
1617 dreg = mono_alloc_ireg (cfg);
1618 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1619 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
1620 soffset += sizeof (gpointer);
1622 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
1623 if (ovf_size != 0)
1624 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
1627 void
1628 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1630 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
1632 if (!ret->byref) {
1633 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1634 MonoInst *ins;
1636 if (COMPILE_LLVM (cfg)) {
1637 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1638 } else {
1639 MONO_INST_NEW (cfg, ins, OP_SETLRET);
1640 ins->sreg1 = val->dreg + 1;
1641 ins->sreg2 = val->dreg + 2;
1642 MONO_ADD_INS (cfg->cbb, ins);
1644 return;
1646 #ifdef MONO_ARCH_SOFT_FLOAT
1647 if (ret->type == MONO_TYPE_R8) {
1648 MonoInst *ins;
1650 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1651 ins->dreg = cfg->ret->dreg;
1652 ins->sreg1 = val->dreg;
1653 MONO_ADD_INS (cfg->cbb, ins);
1654 return;
1656 if (ret->type == MONO_TYPE_R4) {
1657 /* Already converted to an int in method_to_ir () */
1658 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1659 return;
1661 #elif defined(ARM_FPU_VFP)
1662 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
1663 MonoInst *ins;
1665 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1666 ins->dreg = cfg->ret->dreg;
1667 ins->sreg1 = val->dreg;
1668 MONO_ADD_INS (cfg->cbb, ins);
1669 return;
1671 #else
1672 if (ret->type == MONO_TYPE_R4 || ret->type == MONO_TYPE_R8) {
1673 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1674 return;
1676 #endif
1679 /* FIXME: */
1680 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1683 #endif /* #ifndef DISABLE_JIT */
1685 gboolean
1686 mono_arch_is_inst_imm (gint64 imm)
1688 return TRUE;
1691 #define DYN_CALL_STACK_ARGS 6
1693 typedef struct {
1694 MonoMethodSignature *sig;
1695 CallInfo *cinfo;
1696 } ArchDynCallInfo;
1698 typedef struct {
1699 mgreg_t regs [PARAM_REGS + DYN_CALL_STACK_ARGS];
1700 mgreg_t res, res2;
1701 guint8 *ret;
1702 } DynCallArgs;
1704 static gboolean
1705 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
1707 int i;
1709 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
1710 return FALSE;
1712 switch (cinfo->ret.storage) {
1713 case RegTypeNone:
1714 case RegTypeGeneral:
1715 case RegTypeIRegPair:
1716 case RegTypeStructByAddr:
1717 break;
1718 case RegTypeFP:
1719 #ifdef ARM_FPU_FPA
1720 return FALSE;
1721 #elif defined(ARM_FPU_VFP)
1722 break;
1723 #else
1724 return FALSE;
1725 #endif
1726 default:
1727 return FALSE;
1730 for (i = 0; i < cinfo->nargs; ++i) {
1731 switch (cinfo->args [i].storage) {
1732 case RegTypeGeneral:
1733 break;
1734 case RegTypeIRegPair:
1735 break;
1736 case RegTypeBase:
1737 if (cinfo->args [i].offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
1738 return FALSE;
1739 break;
1740 case RegTypeStructByVal:
1741 if (cinfo->args [i].reg + cinfo->args [i].vtsize >= PARAM_REGS + DYN_CALL_STACK_ARGS)
1742 return FALSE;
1743 break;
1744 default:
1745 return FALSE;
1749 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
1750 for (i = 0; i < sig->param_count; ++i) {
1751 MonoType *t = sig->params [i];
1753 if (t->byref)
1754 continue;
1756 switch (t->type) {
1757 case MONO_TYPE_R4:
1758 case MONO_TYPE_R8:
1759 #ifdef MONO_ARCH_SOFT_FLOAT
1760 return FALSE;
1761 #else
1762 break;
1763 #endif
1765 case MONO_TYPE_I8:
1766 case MONO_TYPE_U8:
1767 return FALSE;
1769 default:
1770 break;
1774 return TRUE;
1777 MonoDynCallInfo*
1778 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
1780 ArchDynCallInfo *info;
1781 CallInfo *cinfo;
1783 cinfo = get_call_info (NULL, sig, FALSE);
1785 if (!dyn_call_supported (cinfo, sig)) {
1786 g_free (cinfo);
1787 return NULL;
1790 info = g_new0 (ArchDynCallInfo, 1);
1791 // FIXME: Preprocess the info to speed up start_dyn_call ()
1792 info->sig = sig;
1793 info->cinfo = cinfo;
1795 return (MonoDynCallInfo*)info;
1798 void
1799 mono_arch_dyn_call_free (MonoDynCallInfo *info)
1801 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1803 g_free (ainfo->cinfo);
1804 g_free (ainfo);
1807 void
1808 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
1810 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
1811 DynCallArgs *p = (DynCallArgs*)buf;
1812 int arg_index, greg, i, j;
1813 MonoMethodSignature *sig = dinfo->sig;
1815 g_assert (buf_len >= sizeof (DynCallArgs));
1817 p->res = 0;
1818 p->ret = ret;
1820 arg_index = 0;
1821 greg = 0;
1823 if (dinfo->cinfo->vtype_retaddr)
1824 p->regs [greg ++] = (mgreg_t)ret;
1826 if (sig->hasthis)
1827 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
1829 for (i = 0; i < sig->param_count; i++) {
1830 MonoType *t = mono_type_get_underlying_type (sig->params [i]);
1831 gpointer *arg = args [arg_index ++];
1832 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
1833 int slot = -1;
1835 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
1836 slot = ainfo->reg;
1837 else if (ainfo->storage == RegTypeBase)
1838 slot = PARAM_REGS + (ainfo->offset / 4);
1839 else
1840 g_assert_not_reached ();
1842 if (t->byref) {
1843 p->regs [slot] = (mgreg_t)*arg;
1844 continue;
1847 switch (t->type) {
1848 case MONO_TYPE_STRING:
1849 case MONO_TYPE_CLASS:
1850 case MONO_TYPE_ARRAY:
1851 case MONO_TYPE_SZARRAY:
1852 case MONO_TYPE_OBJECT:
1853 case MONO_TYPE_PTR:
1854 case MONO_TYPE_I:
1855 case MONO_TYPE_U:
1856 p->regs [slot] = (mgreg_t)*arg;
1857 break;
1858 case MONO_TYPE_BOOLEAN:
1859 case MONO_TYPE_U1:
1860 p->regs [slot] = *(guint8*)arg;
1861 break;
1862 case MONO_TYPE_I1:
1863 p->regs [slot] = *(gint8*)arg;
1864 break;
1865 case MONO_TYPE_I2:
1866 p->regs [slot] = *(gint16*)arg;
1867 break;
1868 case MONO_TYPE_U2:
1869 case MONO_TYPE_CHAR:
1870 p->regs [slot] = *(guint16*)arg;
1871 break;
1872 case MONO_TYPE_I4:
1873 p->regs [slot] = *(gint32*)arg;
1874 break;
1875 case MONO_TYPE_U4:
1876 p->regs [slot] = *(guint32*)arg;
1877 break;
1878 case MONO_TYPE_I8:
1879 case MONO_TYPE_U8:
1880 p->regs [slot ++] = (mgreg_t)arg [0];
1881 p->regs [slot] = (mgreg_t)arg [1];
1882 break;
1883 case MONO_TYPE_R4:
1884 p->regs [slot] = *(mgreg_t*)arg;
1885 break;
1886 case MONO_TYPE_R8:
1887 p->regs [slot ++] = (mgreg_t)arg [0];
1888 p->regs [slot] = (mgreg_t)arg [1];
1889 break;
1890 case MONO_TYPE_GENERICINST:
1891 if (MONO_TYPE_IS_REFERENCE (t)) {
1892 p->regs [slot] = (mgreg_t)*arg;
1893 break;
1894 } else {
1895 /* Fall though */
1897 case MONO_TYPE_VALUETYPE:
1898 g_assert (ainfo->storage == RegTypeStructByVal);
1900 if (ainfo->size == 0)
1901 slot = PARAM_REGS + (ainfo->offset / 4);
1902 else
1903 slot = ainfo->reg;
1905 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
1906 p->regs [slot ++] = ((mgreg_t*)arg) [j];
1907 break;
1908 default:
1909 g_assert_not_reached ();
1914 void
1915 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
1917 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1918 MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
1919 guint8 *ret = ((DynCallArgs*)buf)->ret;
1920 mgreg_t res = ((DynCallArgs*)buf)->res;
1921 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
1923 switch (mono_type_get_underlying_type (sig->ret)->type) {
1924 case MONO_TYPE_VOID:
1925 *(gpointer*)ret = NULL;
1926 break;
1927 case MONO_TYPE_STRING:
1928 case MONO_TYPE_CLASS:
1929 case MONO_TYPE_ARRAY:
1930 case MONO_TYPE_SZARRAY:
1931 case MONO_TYPE_OBJECT:
1932 case MONO_TYPE_I:
1933 case MONO_TYPE_U:
1934 case MONO_TYPE_PTR:
1935 *(gpointer*)ret = (gpointer)res;
1936 break;
1937 case MONO_TYPE_I1:
1938 *(gint8*)ret = res;
1939 break;
1940 case MONO_TYPE_U1:
1941 case MONO_TYPE_BOOLEAN:
1942 *(guint8*)ret = res;
1943 break;
1944 case MONO_TYPE_I2:
1945 *(gint16*)ret = res;
1946 break;
1947 case MONO_TYPE_U2:
1948 case MONO_TYPE_CHAR:
1949 *(guint16*)ret = res;
1950 break;
1951 case MONO_TYPE_I4:
1952 *(gint32*)ret = res;
1953 break;
1954 case MONO_TYPE_U4:
1955 *(guint32*)ret = res;
1956 break;
1957 case MONO_TYPE_I8:
1958 case MONO_TYPE_U8:
1959 /* This handles endianness as well */
1960 ((gint32*)ret) [0] = res;
1961 ((gint32*)ret) [1] = res2;
1962 break;
1963 case MONO_TYPE_GENERICINST:
1964 if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
1965 *(gpointer*)ret = (gpointer)res;
1966 break;
1967 } else {
1968 /* Fall though */
1970 case MONO_TYPE_VALUETYPE:
1971 g_assert (ainfo->cinfo->vtype_retaddr);
1972 /* Nothing to do */
1973 break;
1974 #if defined(ARM_FPU_VFP)
1975 case MONO_TYPE_R4:
1976 *(float*)ret = *(float*)&res;
1977 break;
1978 case MONO_TYPE_R8: {
1979 mgreg_t regs [2];
1981 regs [0] = res;
1982 regs [1] = res2;
1984 *(double*)ret = *(double*)&regs;
1985 break;
1987 #endif
1988 default:
1989 g_assert_not_reached ();
1993 #ifndef DISABLE_JIT
1996 * Allow tracing to work with this interface (with an optional argument)
1999 void*
2000 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2002 guchar *code = p;
2004 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2005 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2006 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2007 code = emit_call_reg (code, ARMREG_R2);
2008 return code;
2011 enum {
2012 SAVE_NONE,
2013 SAVE_STRUCT,
2014 SAVE_ONE,
2015 SAVE_TWO,
2016 SAVE_FP
2019 void*
2020 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2022 guchar *code = p;
2023 int save_mode = SAVE_NONE;
2024 int offset;
2025 MonoMethod *method = cfg->method;
2026 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
2027 int save_offset = cfg->param_area;
2028 save_offset += 7;
2029 save_offset &= ~7;
2031 offset = code - cfg->native_code;
2032 /* we need about 16 instructions */
2033 if (offset > (cfg->code_size - 16 * 4)) {
2034 cfg->code_size *= 2;
2035 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2036 code = cfg->native_code + offset;
2038 switch (rtype) {
2039 case MONO_TYPE_VOID:
2040 /* special case string .ctor icall */
2041 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2042 save_mode = SAVE_ONE;
2043 else
2044 save_mode = SAVE_NONE;
2045 break;
2046 case MONO_TYPE_I8:
2047 case MONO_TYPE_U8:
2048 save_mode = SAVE_TWO;
2049 break;
2050 case MONO_TYPE_R4:
2051 case MONO_TYPE_R8:
2052 save_mode = SAVE_FP;
2053 break;
2054 case MONO_TYPE_VALUETYPE:
2055 save_mode = SAVE_STRUCT;
2056 break;
2057 default:
2058 save_mode = SAVE_ONE;
2059 break;
2062 switch (save_mode) {
2063 case SAVE_TWO:
2064 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2065 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2066 if (enable_arguments) {
2067 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
2068 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2070 break;
2071 case SAVE_ONE:
2072 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2073 if (enable_arguments) {
2074 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2076 break;
2077 case SAVE_FP:
2078 /* FIXME: what reg? */
2079 if (enable_arguments) {
2080 /* FIXME: what reg? */
2082 break;
2083 case SAVE_STRUCT:
2084 if (enable_arguments) {
2085 /* FIXME: get the actual address */
2086 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2088 break;
2089 case SAVE_NONE:
2090 default:
2091 break;
2094 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2095 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
2096 code = emit_call_reg (code, ARMREG_IP);
2098 switch (save_mode) {
2099 case SAVE_TWO:
2100 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2101 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2102 break;
2103 case SAVE_ONE:
2104 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2105 break;
2106 case SAVE_FP:
2107 /* FIXME */
2108 break;
2109 case SAVE_NONE:
2110 default:
2111 break;
2114 return code;
2118 * The immediate field for cond branches is big enough for all reasonable methods
2120 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
2121 if (0 && ins->inst_true_bb->native_offset) { \
2122 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
2123 } else { \
2124 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
2125 ARM_B_COND (code, (condcode), 0); \
2128 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
2130 /* emit an exception if condition is fail
2132 * We assign the extra code used to throw the implicit exceptions
2133 * to cfg->bb_exit as far as the big branch handling is concerned
2135 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
2136 do { \
2137 mono_add_patch_info (cfg, code - cfg->native_code, \
2138 MONO_PATCH_INFO_EXC, exc_name); \
2139 ARM_BL_COND (code, (condcode), 0); \
2140 } while (0);
2142 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
2144 void
2145 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
2149 void
2150 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
2152 MonoInst *ins, *n, *last_ins = NULL;
2154 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
2155 switch (ins->opcode) {
2156 case OP_MUL_IMM:
2157 case OP_IMUL_IMM:
2158 /* Already done by an arch-independent pass */
2159 break;
2160 case OP_LOAD_MEMBASE:
2161 case OP_LOADI4_MEMBASE:
2163 * OP_STORE_MEMBASE_REG reg, offset(basereg)
2164 * OP_LOAD_MEMBASE offset(basereg), reg
2166 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
2167 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
2168 ins->inst_basereg == last_ins->inst_destbasereg &&
2169 ins->inst_offset == last_ins->inst_offset) {
2170 if (ins->dreg == last_ins->sreg1) {
2171 MONO_DELETE_INS (bb, ins);
2172 continue;
2173 } else {
2174 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2175 ins->opcode = OP_MOVE;
2176 ins->sreg1 = last_ins->sreg1;
2180 * Note: reg1 must be different from the basereg in the second load
2181 * OP_LOAD_MEMBASE offset(basereg), reg1
2182 * OP_LOAD_MEMBASE offset(basereg), reg2
2183 * -->
2184 * OP_LOAD_MEMBASE offset(basereg), reg1
2185 * OP_MOVE reg1, reg2
2187 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
2188 || last_ins->opcode == OP_LOAD_MEMBASE) &&
2189 ins->inst_basereg != last_ins->dreg &&
2190 ins->inst_basereg == last_ins->inst_basereg &&
2191 ins->inst_offset == last_ins->inst_offset) {
2193 if (ins->dreg == last_ins->dreg) {
2194 MONO_DELETE_INS (bb, ins);
2195 continue;
2196 } else {
2197 ins->opcode = OP_MOVE;
2198 ins->sreg1 = last_ins->dreg;
2201 //g_assert_not_reached ();
2203 #if 0
2205 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2206 * OP_LOAD_MEMBASE offset(basereg), reg
2207 * -->
2208 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2209 * OP_ICONST reg, imm
2211 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
2212 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
2213 ins->inst_basereg == last_ins->inst_destbasereg &&
2214 ins->inst_offset == last_ins->inst_offset) {
2215 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2216 ins->opcode = OP_ICONST;
2217 ins->inst_c0 = last_ins->inst_imm;
2218 g_assert_not_reached (); // check this rule
2219 #endif
2221 break;
2222 case OP_LOADU1_MEMBASE:
2223 case OP_LOADI1_MEMBASE:
2224 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
2225 ins->inst_basereg == last_ins->inst_destbasereg &&
2226 ins->inst_offset == last_ins->inst_offset) {
2227 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
2228 ins->sreg1 = last_ins->sreg1;
2230 break;
2231 case OP_LOADU2_MEMBASE:
2232 case OP_LOADI2_MEMBASE:
2233 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
2234 ins->inst_basereg == last_ins->inst_destbasereg &&
2235 ins->inst_offset == last_ins->inst_offset) {
2236 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
2237 ins->sreg1 = last_ins->sreg1;
2239 break;
2240 case OP_MOVE:
2241 ins->opcode = OP_MOVE;
2243 * OP_MOVE reg, reg
2245 if (ins->dreg == ins->sreg1) {
2246 MONO_DELETE_INS (bb, ins);
2247 continue;
2250 * OP_MOVE sreg, dreg
2251 * OP_MOVE dreg, sreg
2253 if (last_ins && last_ins->opcode == OP_MOVE &&
2254 ins->sreg1 == last_ins->dreg &&
2255 ins->dreg == last_ins->sreg1) {
2256 MONO_DELETE_INS (bb, ins);
2257 continue;
2259 break;
2261 last_ins = ins;
2262 ins = ins->next;
2264 bb->last_ins = last_ins;
2268 * the branch_cc_table should maintain the order of these
2269 * opcodes.
2270 case CEE_BEQ:
2271 case CEE_BGE:
2272 case CEE_BGT:
2273 case CEE_BLE:
2274 case CEE_BLT:
2275 case CEE_BNE_UN:
2276 case CEE_BGE_UN:
2277 case CEE_BGT_UN:
2278 case CEE_BLE_UN:
2279 case CEE_BLT_UN:
2281 static const guchar
2282 branch_cc_table [] = {
2283 ARMCOND_EQ,
2284 ARMCOND_GE,
2285 ARMCOND_GT,
2286 ARMCOND_LE,
2287 ARMCOND_LT,
2289 ARMCOND_NE,
2290 ARMCOND_HS,
2291 ARMCOND_HI,
2292 ARMCOND_LS,
2293 ARMCOND_LO
2296 #define NEW_INS(cfg,dest,op) do { \
2297 MONO_INST_NEW ((cfg), (dest), (op)); \
2298 mono_bblock_insert_before_ins (bb, ins, (dest)); \
2299 } while (0)
2301 static int
2302 map_to_reg_reg_op (int op)
2304 switch (op) {
2305 case OP_ADD_IMM:
2306 return OP_IADD;
2307 case OP_SUB_IMM:
2308 return OP_ISUB;
2309 case OP_AND_IMM:
2310 return OP_IAND;
2311 case OP_COMPARE_IMM:
2312 return OP_COMPARE;
2313 case OP_ICOMPARE_IMM:
2314 return OP_ICOMPARE;
2315 case OP_ADDCC_IMM:
2316 return OP_ADDCC;
2317 case OP_ADC_IMM:
2318 return OP_ADC;
2319 case OP_SUBCC_IMM:
2320 return OP_SUBCC;
2321 case OP_SBB_IMM:
2322 return OP_SBB;
2323 case OP_OR_IMM:
2324 return OP_IOR;
2325 case OP_XOR_IMM:
2326 return OP_IXOR;
2327 case OP_LOAD_MEMBASE:
2328 return OP_LOAD_MEMINDEX;
2329 case OP_LOADI4_MEMBASE:
2330 return OP_LOADI4_MEMINDEX;
2331 case OP_LOADU4_MEMBASE:
2332 return OP_LOADU4_MEMINDEX;
2333 case OP_LOADU1_MEMBASE:
2334 return OP_LOADU1_MEMINDEX;
2335 case OP_LOADI2_MEMBASE:
2336 return OP_LOADI2_MEMINDEX;
2337 case OP_LOADU2_MEMBASE:
2338 return OP_LOADU2_MEMINDEX;
2339 case OP_LOADI1_MEMBASE:
2340 return OP_LOADI1_MEMINDEX;
2341 case OP_STOREI1_MEMBASE_REG:
2342 return OP_STOREI1_MEMINDEX;
2343 case OP_STOREI2_MEMBASE_REG:
2344 return OP_STOREI2_MEMINDEX;
2345 case OP_STOREI4_MEMBASE_REG:
2346 return OP_STOREI4_MEMINDEX;
2347 case OP_STORE_MEMBASE_REG:
2348 return OP_STORE_MEMINDEX;
2349 case OP_STORER4_MEMBASE_REG:
2350 return OP_STORER4_MEMINDEX;
2351 case OP_STORER8_MEMBASE_REG:
2352 return OP_STORER8_MEMINDEX;
2353 case OP_STORE_MEMBASE_IMM:
2354 return OP_STORE_MEMBASE_REG;
2355 case OP_STOREI1_MEMBASE_IMM:
2356 return OP_STOREI1_MEMBASE_REG;
2357 case OP_STOREI2_MEMBASE_IMM:
2358 return OP_STOREI2_MEMBASE_REG;
2359 case OP_STOREI4_MEMBASE_IMM:
2360 return OP_STOREI4_MEMBASE_REG;
2362 g_assert_not_reached ();
2366 * Remove from the instruction list the instructions that can't be
2367 * represented with very simple instructions with no register
2368 * requirements.
2370 void
2371 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
2373 MonoInst *ins, *temp, *last_ins = NULL;
2374 int rot_amount, imm8, low_imm;
2376 MONO_BB_FOR_EACH_INS (bb, ins) {
2377 loop_start:
2378 switch (ins->opcode) {
2379 case OP_ADD_IMM:
2380 case OP_SUB_IMM:
2381 case OP_AND_IMM:
2382 case OP_COMPARE_IMM:
2383 case OP_ICOMPARE_IMM:
2384 case OP_ADDCC_IMM:
2385 case OP_ADC_IMM:
2386 case OP_SUBCC_IMM:
2387 case OP_SBB_IMM:
2388 case OP_OR_IMM:
2389 case OP_XOR_IMM:
2390 case OP_IADD_IMM:
2391 case OP_ISUB_IMM:
2392 case OP_IAND_IMM:
2393 case OP_IADC_IMM:
2394 case OP_ISBB_IMM:
2395 case OP_IOR_IMM:
2396 case OP_IXOR_IMM:
2397 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
2398 NEW_INS (cfg, temp, OP_ICONST);
2399 temp->inst_c0 = ins->inst_imm;
2400 temp->dreg = mono_alloc_ireg (cfg);
2401 ins->sreg2 = temp->dreg;
2402 ins->opcode = mono_op_imm_to_op (ins->opcode);
2404 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
2405 goto loop_start;
2406 else
2407 break;
2408 case OP_MUL_IMM:
2409 case OP_IMUL_IMM:
2410 if (ins->inst_imm == 1) {
2411 ins->opcode = OP_MOVE;
2412 break;
2414 if (ins->inst_imm == 0) {
2415 ins->opcode = OP_ICONST;
2416 ins->inst_c0 = 0;
2417 break;
2419 imm8 = mono_is_power_of_two (ins->inst_imm);
2420 if (imm8 > 0) {
2421 ins->opcode = OP_SHL_IMM;
2422 ins->inst_imm = imm8;
2423 break;
2425 NEW_INS (cfg, temp, OP_ICONST);
2426 temp->inst_c0 = ins->inst_imm;
2427 temp->dreg = mono_alloc_ireg (cfg);
2428 ins->sreg2 = temp->dreg;
2429 ins->opcode = OP_IMUL;
2430 break;
2431 case OP_SBB:
2432 case OP_ISBB:
2433 case OP_SUBCC:
2434 case OP_ISUBCC:
2435 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
2436 /* ARM sets the C flag to 1 if there was _no_ overflow */
2437 ins->next->opcode = OP_COND_EXC_NC;
2438 break;
2439 case OP_LOCALLOC_IMM:
2440 NEW_INS (cfg, temp, OP_ICONST);
2441 temp->inst_c0 = ins->inst_imm;
2442 temp->dreg = mono_alloc_ireg (cfg);
2443 ins->sreg1 = temp->dreg;
2444 ins->opcode = OP_LOCALLOC;
2445 break;
2446 case OP_LOAD_MEMBASE:
2447 case OP_LOADI4_MEMBASE:
2448 case OP_LOADU4_MEMBASE:
2449 case OP_LOADU1_MEMBASE:
2450 /* we can do two things: load the immed in a register
2451 * and use an indexed load, or see if the immed can be
2452 * represented as an ad_imm + a load with a smaller offset
2453 * that fits. We just do the first for now, optimize later.
2455 if (arm_is_imm12 (ins->inst_offset))
2456 break;
2457 NEW_INS (cfg, temp, OP_ICONST);
2458 temp->inst_c0 = ins->inst_offset;
2459 temp->dreg = mono_alloc_ireg (cfg);
2460 ins->sreg2 = temp->dreg;
2461 ins->opcode = map_to_reg_reg_op (ins->opcode);
2462 break;
2463 case OP_LOADI2_MEMBASE:
2464 case OP_LOADU2_MEMBASE:
2465 case OP_LOADI1_MEMBASE:
2466 if (arm_is_imm8 (ins->inst_offset))
2467 break;
2468 NEW_INS (cfg, temp, OP_ICONST);
2469 temp->inst_c0 = ins->inst_offset;
2470 temp->dreg = mono_alloc_ireg (cfg);
2471 ins->sreg2 = temp->dreg;
2472 ins->opcode = map_to_reg_reg_op (ins->opcode);
2473 break;
2474 case OP_LOADR4_MEMBASE:
2475 case OP_LOADR8_MEMBASE:
2476 if (arm_is_fpimm8 (ins->inst_offset))
2477 break;
2478 low_imm = ins->inst_offset & 0x1ff;
2479 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
2480 NEW_INS (cfg, temp, OP_ADD_IMM);
2481 temp->inst_imm = ins->inst_offset & ~0x1ff;
2482 temp->sreg1 = ins->inst_basereg;
2483 temp->dreg = mono_alloc_ireg (cfg);
2484 ins->inst_basereg = temp->dreg;
2485 ins->inst_offset = low_imm;
2486 break;
2488 /* VFP/FPA doesn't have indexed load instructions */
2489 g_assert_not_reached ();
2490 break;
2491 case OP_STORE_MEMBASE_REG:
2492 case OP_STOREI4_MEMBASE_REG:
2493 case OP_STOREI1_MEMBASE_REG:
2494 if (arm_is_imm12 (ins->inst_offset))
2495 break;
2496 NEW_INS (cfg, temp, OP_ICONST);
2497 temp->inst_c0 = ins->inst_offset;
2498 temp->dreg = mono_alloc_ireg (cfg);
2499 ins->sreg2 = temp->dreg;
2500 ins->opcode = map_to_reg_reg_op (ins->opcode);
2501 break;
2502 case OP_STOREI2_MEMBASE_REG:
2503 if (arm_is_imm8 (ins->inst_offset))
2504 break;
2505 NEW_INS (cfg, temp, OP_ICONST);
2506 temp->inst_c0 = ins->inst_offset;
2507 temp->dreg = mono_alloc_ireg (cfg);
2508 ins->sreg2 = temp->dreg;
2509 ins->opcode = map_to_reg_reg_op (ins->opcode);
2510 break;
2511 case OP_STORER4_MEMBASE_REG:
2512 case OP_STORER8_MEMBASE_REG:
2513 if (arm_is_fpimm8 (ins->inst_offset))
2514 break;
2515 low_imm = ins->inst_offset & 0x1ff;
2516 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
2517 NEW_INS (cfg, temp, OP_ADD_IMM);
2518 temp->inst_imm = ins->inst_offset & ~0x1ff;
2519 temp->sreg1 = ins->inst_destbasereg;
2520 temp->dreg = mono_alloc_ireg (cfg);
2521 ins->inst_destbasereg = temp->dreg;
2522 ins->inst_offset = low_imm;
2523 break;
2525 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
2526 /* VFP/FPA doesn't have indexed store instructions */
2527 g_assert_not_reached ();
2528 break;
2529 case OP_STORE_MEMBASE_IMM:
2530 case OP_STOREI1_MEMBASE_IMM:
2531 case OP_STOREI2_MEMBASE_IMM:
2532 case OP_STOREI4_MEMBASE_IMM:
2533 NEW_INS (cfg, temp, OP_ICONST);
2534 temp->inst_c0 = ins->inst_imm;
2535 temp->dreg = mono_alloc_ireg (cfg);
2536 ins->sreg1 = temp->dreg;
2537 ins->opcode = map_to_reg_reg_op (ins->opcode);
2538 last_ins = temp;
2539 goto loop_start; /* make it handle the possibly big ins->inst_offset */
2540 case OP_FCOMPARE: {
2541 gboolean swap = FALSE;
2542 int reg;
2544 if (!ins->next) {
2545 /* Optimized away */
2546 NULLIFY_INS (ins);
2547 break;
2550 /* Some fp compares require swapped operands */
2551 switch (ins->next->opcode) {
2552 case OP_FBGT:
2553 ins->next->opcode = OP_FBLT;
2554 swap = TRUE;
2555 break;
2556 case OP_FBGT_UN:
2557 ins->next->opcode = OP_FBLT_UN;
2558 swap = TRUE;
2559 break;
2560 case OP_FBLE:
2561 ins->next->opcode = OP_FBGE;
2562 swap = TRUE;
2563 break;
2564 case OP_FBLE_UN:
2565 ins->next->opcode = OP_FBGE_UN;
2566 swap = TRUE;
2567 break;
2568 default:
2569 break;
2571 if (swap) {
2572 reg = ins->sreg1;
2573 ins->sreg1 = ins->sreg2;
2574 ins->sreg2 = reg;
2576 break;
2580 last_ins = ins;
2582 bb->last_ins = last_ins;
2583 bb->max_vreg = cfg->next_vreg;
2586 void
2587 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
2589 MonoInst *ins;
2591 if (long_ins->opcode == OP_LNEG) {
2592 ins = long_ins;
2593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
2594 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
2595 NULLIFY_INS (ins);
2599 static guchar*
2600 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2602 /* sreg is a float, dreg is an integer reg */
2603 #ifdef ARM_FPU_FPA
2604 ARM_FIXZ (code, dreg, sreg);
2605 #elif defined(ARM_FPU_VFP)
2606 if (is_signed)
2607 ARM_TOSIZD (code, ARM_VFP_F0, sreg);
2608 else
2609 ARM_TOUIZD (code, ARM_VFP_F0, sreg);
2610 ARM_FMRS (code, dreg, ARM_VFP_F0);
2611 #endif
2612 if (!is_signed) {
2613 if (size == 1)
2614 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
2615 else if (size == 2) {
2616 ARM_SHL_IMM (code, dreg, dreg, 16);
2617 ARM_SHR_IMM (code, dreg, dreg, 16);
2619 } else {
2620 if (size == 1) {
2621 ARM_SHL_IMM (code, dreg, dreg, 24);
2622 ARM_SAR_IMM (code, dreg, dreg, 24);
2623 } else if (size == 2) {
2624 ARM_SHL_IMM (code, dreg, dreg, 16);
2625 ARM_SAR_IMM (code, dreg, dreg, 16);
2628 return code;
2631 #endif /* #ifndef DISABLE_JIT */
2633 typedef struct {
2634 guchar *code;
2635 const guchar *target;
2636 int absolute;
2637 int found;
2638 } PatchData;
2640 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
2642 static int
2643 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
2644 PatchData *pdata = (PatchData*)user_data;
2645 guchar *code = data;
2646 guint32 *thunks = data;
2647 guint32 *endthunks = (guint32*)(code + bsize);
2648 int count = 0;
2649 int difflow, diffhigh;
2651 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2652 difflow = (char*)pdata->code - (char*)thunks;
2653 diffhigh = (char*)pdata->code - (char*)endthunks;
2654 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
2655 return 0;
2658 * The thunk is composed of 3 words:
2659 * load constant from thunks [2] into ARM_IP
2660 * bx to ARM_IP
2661 * address constant
2662 * Note that the LR register is already setup
2664 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2665 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
2666 while (thunks < endthunks) {
2667 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2668 if (thunks [2] == (guint32)pdata->target) {
2669 arm_patch (pdata->code, (guchar*)thunks);
2670 mono_arch_flush_icache (pdata->code, 4);
2671 pdata->found = 1;
2672 return 1;
2673 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
2674 /* found a free slot instead: emit thunk */
2675 /* ARMREG_IP is fine to use since this can't be an IMT call
2676 * which is indirect
2678 code = (guchar*)thunks;
2679 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
2680 if (thumb_supported)
2681 ARM_BX (code, ARMREG_IP);
2682 else
2683 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2684 thunks [2] = (guint32)pdata->target;
2685 mono_arch_flush_icache ((guchar*)thunks, 12);
2687 arm_patch (pdata->code, (guchar*)thunks);
2688 mono_arch_flush_icache (pdata->code, 4);
2689 pdata->found = 1;
2690 return 1;
2692 /* skip 12 bytes, the size of the thunk */
2693 thunks += 3;
2694 count++;
2696 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2698 return 0;
2701 static void
2702 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target)
2704 PatchData pdata;
2706 if (!domain)
2707 domain = mono_domain_get ();
2709 pdata.code = code;
2710 pdata.target = target;
2711 pdata.absolute = absolute;
2712 pdata.found = 0;
2714 mono_domain_lock (domain);
2715 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2717 if (!pdata.found) {
2718 /* this uses the first available slot */
2719 pdata.found = 2;
2720 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2722 mono_domain_unlock (domain);
2724 if (pdata.found != 1)
2725 g_print ("thunk failed for %p from %p\n", target, code);
2726 g_assert (pdata.found == 1);
2729 static void
2730 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target)
2732 guint32 *code32 = (void*)code;
2733 guint32 ins = *code32;
2734 guint32 prim = (ins >> 25) & 7;
2735 guint32 tval = GPOINTER_TO_UINT (target);
2737 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2738 if (prim == 5) { /* 101b */
2739 /* the diff starts 8 bytes from the branch opcode */
2740 gint diff = target - code - 8;
2741 gint tbits;
2742 gint tmask = 0xffffffff;
2743 if (tval & 1) { /* entering thumb mode */
2744 diff = target - 1 - code - 8;
2745 g_assert (thumb_supported);
2746 tbits = 0xf << 28; /* bl->blx bit pattern */
2747 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
2748 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
2749 if (diff & 2) {
2750 tbits |= 1 << 24;
2752 tmask = ~(1 << 24); /* clear the link bit */
2753 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
2754 } else {
2755 tbits = 0;
2757 if (diff >= 0) {
2758 if (diff <= 33554431) {
2759 diff >>= 2;
2760 ins = (ins & 0xff000000) | diff;
2761 ins &= tmask;
2762 *code32 = ins | tbits;
2763 return;
2765 } else {
2766 /* diff between 0 and -33554432 */
2767 if (diff >= -33554432) {
2768 diff >>= 2;
2769 ins = (ins & 0xff000000) | (diff & ~0xff000000);
2770 ins &= tmask;
2771 *code32 = ins | tbits;
2772 return;
2776 handle_thunk (domain, TRUE, code, target);
2777 return;
2781 * The alternative call sequences looks like this:
2783 * ldr ip, [pc] // loads the address constant
2784 * b 1f // jumps around the constant
2785 * address constant embedded in the code
2786 * 1f:
2787 * mov lr, pc
2788 * mov pc, ip
2790 * There are two cases for patching:
2791 * a) at the end of method emission: in this case code points to the start
2792 * of the call sequence
2793 * b) during runtime patching of the call site: in this case code points
2794 * to the mov pc, ip instruction
2796 * We have to handle also the thunk jump code sequence:
2798 * ldr ip, [pc]
2799 * mov pc, ip
2800 * address constant // execution never reaches here
2802 if ((ins & 0x0ffffff0) == 0x12fff10) {
2803 /* Branch and exchange: the address is constructed in a reg
2804 * We can patch BX when the code sequence is the following:
2805 * ldr ip, [pc, #0] ; 0x8
2806 * b 0xc
2807 * .word code_ptr
2808 * mov lr, pc
2809 * bx ips
2810 * */
2811 guint32 ccode [4];
2812 guint8 *emit = (guint8*)ccode;
2813 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2814 ARM_B (emit, 0);
2815 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2816 ARM_BX (emit, ARMREG_IP);
2818 /*patching from magic trampoline*/
2819 if (ins == ccode [3]) {
2820 g_assert (code32 [-4] == ccode [0]);
2821 g_assert (code32 [-3] == ccode [1]);
2822 g_assert (code32 [-1] == ccode [2]);
2823 code32 [-2] = (guint32)target;
2824 return;
2826 /*patching from JIT*/
2827 if (ins == ccode [0]) {
2828 g_assert (code32 [1] == ccode [1]);
2829 g_assert (code32 [3] == ccode [2]);
2830 g_assert (code32 [4] == ccode [3]);
2831 code32 [2] = (guint32)target;
2832 return;
2834 g_assert_not_reached ();
2835 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
2837 * ldr ip, [pc, #0]
2838 * b 0xc
2839 * .word code_ptr
2840 * blx ip
2842 guint32 ccode [4];
2843 guint8 *emit = (guint8*)ccode;
2844 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2845 ARM_B (emit, 0);
2846 ARM_BLX_REG (emit, ARMREG_IP);
2848 g_assert (code32 [-3] == ccode [0]);
2849 g_assert (code32 [-2] == ccode [1]);
2850 g_assert (code32 [0] == ccode [2]);
2852 code32 [-1] = (guint32)target;
2853 } else {
2854 guint32 ccode [4];
2855 guint32 *tmp = ccode;
2856 guint8 *emit = (guint8*)tmp;
2857 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2858 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2859 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
2860 ARM_BX (emit, ARMREG_IP);
2861 if (ins == ccode [2]) {
2862 g_assert_not_reached (); // should be -2 ...
2863 code32 [-1] = (guint32)target;
2864 return;
2866 if (ins == ccode [0]) {
2867 /* handles both thunk jump code and the far call sequence */
2868 code32 [2] = (guint32)target;
2869 return;
2871 g_assert_not_reached ();
2873 // g_print ("patched with 0x%08x\n", ins);
2876 void
2877 arm_patch (guchar *code, const guchar *target)
2879 arm_patch_general (NULL, code, target);
2883 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
2884 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
2885 * to be used with the emit macros.
2886 * Return -1 otherwise.
2889 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
2891 guint32 res, i;
2892 for (i = 0; i < 31; i+= 2) {
2893 res = (val << (32 - i)) | (val >> i);
2894 if (res & ~0xff)
2895 continue;
2896 *rot_amount = i? 32 - i: 0;
2897 return res;
2899 return -1;
2903 * Emits in code a sequence of instructions that load the value 'val'
2904 * into the dreg register. Uses at most 4 instructions.
2906 guint8*
2907 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
2909 int imm8, rot_amount;
2910 #if 0
2911 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
2912 /* skip the constant pool */
2913 ARM_B (code, 0);
2914 *(int*)code = val;
2915 code += 4;
2916 return code;
2917 #endif
2918 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
2919 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
2920 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
2921 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
2922 } else {
2923 if (v7_supported) {
2924 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
2925 if (val >> 16)
2926 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
2927 return code;
2929 if (val & 0xFF) {
2930 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
2931 if (val & 0xFF00) {
2932 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
2934 if (val & 0xFF0000) {
2935 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2937 if (val & 0xFF000000) {
2938 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2940 } else if (val & 0xFF00) {
2941 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
2942 if (val & 0xFF0000) {
2943 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2945 if (val & 0xFF000000) {
2946 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2948 } else if (val & 0xFF0000) {
2949 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
2950 if (val & 0xFF000000) {
2951 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2954 //g_assert_not_reached ();
2956 return code;
2959 gboolean
2960 mono_arm_thumb_supported (void)
2962 return thumb_supported;
2965 #ifndef DISABLE_JIT
2968 * emit_load_volatile_arguments:
2970 * Load volatile arguments from the stack to the original input registers.
2971 * Required before a tail call.
2973 static guint8*
2974 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
2976 MonoMethod *method = cfg->method;
2977 MonoMethodSignature *sig;
2978 MonoInst *inst;
2979 CallInfo *cinfo;
2980 guint32 i, pos;
2982 /* FIXME: Generate intermediate code instead */
2984 sig = mono_method_signature (method);
2986 /* This is the opposite of the code in emit_prolog */
2988 pos = 0;
2990 cinfo = get_call_info (NULL, sig, sig->pinvoke);
2992 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2993 ArgInfo *ainfo = &cinfo->ret;
2994 inst = cfg->vret_addr;
2995 g_assert (arm_is_imm12 (inst->inst_offset));
2996 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2998 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2999 ArgInfo *ainfo = cinfo->args + i;
3000 inst = cfg->args [pos];
3002 if (cfg->verbose_level > 2)
3003 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
3004 if (inst->opcode == OP_REGVAR) {
3005 if (ainfo->storage == RegTypeGeneral)
3006 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
3007 else if (ainfo->storage == RegTypeFP) {
3008 g_assert_not_reached ();
3009 } else if (ainfo->storage == RegTypeBase) {
3010 // FIXME:
3011 NOT_IMPLEMENTED;
3013 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3014 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3015 } else {
3016 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3017 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
3020 } else
3021 g_assert_not_reached ();
3022 } else {
3023 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
3024 switch (ainfo->size) {
3025 case 1:
3026 case 2:
3027 // FIXME:
3028 NOT_IMPLEMENTED;
3029 break;
3030 case 8:
3031 g_assert (arm_is_imm12 (inst->inst_offset));
3032 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3033 g_assert (arm_is_imm12 (inst->inst_offset + 4));
3034 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3035 break;
3036 default:
3037 if (arm_is_imm12 (inst->inst_offset)) {
3038 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3039 } else {
3040 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3041 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3043 break;
3045 } else if (ainfo->storage == RegTypeBaseGen) {
3046 // FIXME:
3047 NOT_IMPLEMENTED;
3048 } else if (ainfo->storage == RegTypeBase) {
3049 /* Nothing to do */
3050 } else if (ainfo->storage == RegTypeFP) {
3051 g_assert_not_reached ();
3052 } else if (ainfo->storage == RegTypeStructByVal) {
3053 int doffset = inst->inst_offset;
3054 int soffset = 0;
3055 int cur_reg;
3056 int size = 0;
3057 if (mono_class_from_mono_type (inst->inst_vtype))
3058 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
3059 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
3060 if (arm_is_imm12 (doffset)) {
3061 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
3062 } else {
3063 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
3064 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
3066 soffset += sizeof (gpointer);
3067 doffset += sizeof (gpointer);
3069 if (ainfo->vtsize)
3070 // FIXME:
3071 NOT_IMPLEMENTED;
3072 } else if (ainfo->storage == RegTypeStructByAddr) {
3073 } else {
3074 // FIXME:
3075 NOT_IMPLEMENTED;
3078 pos ++;
3081 g_free (cinfo);
3083 return code;
3086 void
3087 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
3089 MonoInst *ins;
3090 MonoCallInst *call;
3091 guint offset;
3092 guint8 *code = cfg->native_code + cfg->code_len;
3093 MonoInst *last_ins = NULL;
3094 guint last_offset = 0;
3095 int max_len, cpos;
3096 int imm8, rot_amount;
3098 /* we don't align basic blocks of loops on arm */
3100 if (cfg->verbose_level > 2)
3101 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
3103 cpos = bb->max_offset;
3105 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
3106 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
3107 //g_assert (!mono_compile_aot);
3108 //cpos += 6;
3109 //if (bb->cil_code)
3110 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
3111 /* this is not thread save, but good enough */
3112 /* fixme: howto handle overflows? */
3113 //x86_inc_mem (code, &cov->data [bb->dfn].count);
3116 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
3117 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3118 (gpointer)"mono_break");
3119 code = emit_call_seq (cfg, code);
3122 MONO_BB_FOR_EACH_INS (bb, ins) {
3123 offset = code - cfg->native_code;
3125 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3127 if (offset > (cfg->code_size - max_len - 16)) {
3128 cfg->code_size *= 2;
3129 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3130 code = cfg->native_code + offset;
3132 // if (ins->cil_code)
3133 // g_print ("cil code\n");
3134 mono_debug_record_line_number (cfg, ins, offset);
3136 switch (ins->opcode) {
3137 case OP_MEMORY_BARRIER:
3138 break;
3139 case OP_TLS_GET:
3140 #ifdef HAVE_AEABI_READ_TP
3141 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3142 (gpointer)"__aeabi_read_tp");
3143 code = emit_call_seq (cfg, code);
3145 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
3146 #else
3147 g_assert_not_reached ();
3148 #endif
3149 break;
3150 /*case OP_BIGMUL:
3151 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3152 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
3153 break;
3154 case OP_BIGMUL_UN:
3155 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3156 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
3157 break;*/
3158 case OP_STOREI1_MEMBASE_IMM:
3159 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
3160 g_assert (arm_is_imm12 (ins->inst_offset));
3161 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3162 break;
3163 case OP_STOREI2_MEMBASE_IMM:
3164 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
3165 g_assert (arm_is_imm8 (ins->inst_offset));
3166 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3167 break;
3168 case OP_STORE_MEMBASE_IMM:
3169 case OP_STOREI4_MEMBASE_IMM:
3170 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
3171 g_assert (arm_is_imm12 (ins->inst_offset));
3172 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3173 break;
3174 case OP_STOREI1_MEMBASE_REG:
3175 g_assert (arm_is_imm12 (ins->inst_offset));
3176 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3177 break;
3178 case OP_STOREI2_MEMBASE_REG:
3179 g_assert (arm_is_imm8 (ins->inst_offset));
3180 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3181 break;
3182 case OP_STORE_MEMBASE_REG:
3183 case OP_STOREI4_MEMBASE_REG:
3184 /* this case is special, since it happens for spill code after lowering has been called */
3185 if (arm_is_imm12 (ins->inst_offset)) {
3186 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3187 } else {
3188 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3189 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
3191 break;
3192 case OP_STOREI1_MEMINDEX:
3193 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3194 break;
3195 case OP_STOREI2_MEMINDEX:
3196 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3197 break;
3198 case OP_STORE_MEMINDEX:
3199 case OP_STOREI4_MEMINDEX:
3200 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3201 break;
3202 case OP_LOADU4_MEM:
3203 g_assert_not_reached ();
3204 break;
3205 case OP_LOAD_MEMINDEX:
3206 case OP_LOADI4_MEMINDEX:
3207 case OP_LOADU4_MEMINDEX:
3208 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3209 break;
3210 case OP_LOADI1_MEMINDEX:
3211 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3212 break;
3213 case OP_LOADU1_MEMINDEX:
3214 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3215 break;
3216 case OP_LOADI2_MEMINDEX:
3217 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3218 break;
3219 case OP_LOADU2_MEMINDEX:
3220 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3221 break;
3222 case OP_LOAD_MEMBASE:
3223 case OP_LOADI4_MEMBASE:
3224 case OP_LOADU4_MEMBASE:
3225 /* this case is special, since it happens for spill code after lowering has been called */
3226 if (arm_is_imm12 (ins->inst_offset)) {
3227 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3228 } else {
3229 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3230 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
3232 break;
3233 case OP_LOADI1_MEMBASE:
3234 g_assert (arm_is_imm8 (ins->inst_offset));
3235 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3236 break;
3237 case OP_LOADU1_MEMBASE:
3238 g_assert (arm_is_imm12 (ins->inst_offset));
3239 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3240 break;
3241 case OP_LOADU2_MEMBASE:
3242 g_assert (arm_is_imm8 (ins->inst_offset));
3243 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3244 break;
3245 case OP_LOADI2_MEMBASE:
3246 g_assert (arm_is_imm8 (ins->inst_offset));
3247 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3248 break;
3249 case OP_ICONV_TO_I1:
3250 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
3251 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
3252 break;
3253 case OP_ICONV_TO_I2:
3254 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3255 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
3256 break;
3257 case OP_ICONV_TO_U1:
3258 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
3259 break;
3260 case OP_ICONV_TO_U2:
3261 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3262 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
3263 break;
3264 case OP_COMPARE:
3265 case OP_ICOMPARE:
3266 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
3267 break;
3268 case OP_COMPARE_IMM:
3269 case OP_ICOMPARE_IMM:
3270 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3271 g_assert (imm8 >= 0);
3272 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
3273 break;
3274 case OP_BREAK:
3276 * gdb does not like encountering the hw breakpoint ins in the debugged code.
3277 * So instead of emitting a trap, we emit a call a C function and place a
3278 * breakpoint there.
3280 //*(int*)code = 0xef9f0001;
3281 //code += 4;
3282 //ARM_DBRK (code);
3283 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3284 (gpointer)"mono_break");
3285 code = emit_call_seq (cfg, code);
3286 break;
3287 case OP_RELAXED_NOP:
3288 ARM_NOP (code);
3289 break;
3290 case OP_NOP:
3291 case OP_DUMMY_USE:
3292 case OP_DUMMY_STORE:
3293 case OP_NOT_REACHED:
3294 case OP_NOT_NULL:
3295 break;
3296 case OP_SEQ_POINT: {
3297 int i;
3298 MonoInst *info_var = cfg->arch.seq_point_info_var;
3299 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
3300 MonoInst *var;
3301 int dreg = ARMREG_LR;
3304 * For AOT, we use one got slot per method, which will point to a
3305 * SeqPointInfo structure, containing all the information required
3306 * by the code below.
3308 if (cfg->compile_aot) {
3309 g_assert (info_var);
3310 g_assert (info_var->opcode == OP_REGOFFSET);
3311 g_assert (arm_is_imm12 (info_var->inst_offset));
3315 * Read from the single stepping trigger page. This will cause a
3316 * SIGSEGV when single stepping is enabled.
3317 * We do this _before_ the breakpoint, so single stepping after
3318 * a breakpoint is hit will step to the next IL offset.
3320 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
3322 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
3323 if (cfg->compile_aot) {
3324 /* Load the trigger page addr from the variable initialized in the prolog */
3325 var = ss_trigger_page_var;
3326 g_assert (var);
3327 g_assert (var->opcode == OP_REGOFFSET);
3328 g_assert (arm_is_imm12 (var->inst_offset));
3329 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
3330 } else {
3331 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3332 ARM_B (code, 0);
3333 *(int*)code = (int)ss_trigger_page;
3334 code += 4;
3336 ARM_LDR_IMM (code, dreg, dreg, 0);
3339 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
3341 if (cfg->compile_aot) {
3342 guint32 offset = code - cfg->native_code;
3343 guint32 val;
3345 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
3346 /* Add the offset */
3347 val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
3348 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
3350 * Have to emit nops to keep the difference between the offset
3351 * stored in seq_points and breakpoint instruction constant,
3352 * mono_arch_get_ip_for_breakpoint () depends on this.
3354 if (val & 0xFF00)
3355 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3356 else
3357 ARM_NOP (code);
3358 if (val & 0xFF0000)
3359 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3360 else
3361 ARM_NOP (code);
3362 g_assert (!(val & 0xFF000000));
3363 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
3364 ARM_LDR_IMM (code, dreg, dreg, 0);
3366 /* What is faster, a branch or a load ? */
3367 ARM_CMP_REG_IMM (code, dreg, 0, 0);
3368 /* The breakpoint instruction */
3369 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
3370 } else {
3372 * A placeholder for a possible breakpoint inserted by
3373 * mono_arch_set_breakpoint ().
3375 for (i = 0; i < 4; ++i)
3376 ARM_NOP (code);
3378 break;
3380 case OP_ADDCC:
3381 case OP_IADDCC:
3382 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3383 break;
3384 case OP_IADD:
3385 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3386 break;
3387 case OP_ADC:
3388 case OP_IADC:
3389 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3390 break;
3391 case OP_ADDCC_IMM:
3392 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3393 g_assert (imm8 >= 0);
3394 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3395 break;
3396 case OP_ADD_IMM:
3397 case OP_IADD_IMM:
3398 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3399 g_assert (imm8 >= 0);
3400 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3401 break;
3402 case OP_ADC_IMM:
3403 case OP_IADC_IMM:
3404 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3405 g_assert (imm8 >= 0);
3406 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3407 break;
3408 case OP_IADD_OVF:
3409 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3410 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3411 break;
3412 case OP_IADD_OVF_UN:
3413 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3414 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3415 break;
3416 case OP_ISUB_OVF:
3417 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3418 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3419 break;
3420 case OP_ISUB_OVF_UN:
3421 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3422 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3423 break;
3424 case OP_ADD_OVF_CARRY:
3425 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3426 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3427 break;
3428 case OP_ADD_OVF_UN_CARRY:
3429 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3430 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3431 break;
3432 case OP_SUB_OVF_CARRY:
3433 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3434 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3435 break;
3436 case OP_SUB_OVF_UN_CARRY:
3437 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3438 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3439 break;
3440 case OP_SUBCC:
3441 case OP_ISUBCC:
3442 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3443 break;
3444 case OP_SUBCC_IMM:
3445 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3446 g_assert (imm8 >= 0);
3447 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3448 break;
3449 case OP_ISUB:
3450 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3451 break;
3452 case OP_SBB:
3453 case OP_ISBB:
3454 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3455 break;
3456 case OP_SUB_IMM:
3457 case OP_ISUB_IMM:
3458 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3459 g_assert (imm8 >= 0);
3460 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3461 break;
3462 case OP_SBB_IMM:
3463 case OP_ISBB_IMM:
3464 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3465 g_assert (imm8 >= 0);
3466 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3467 break;
3468 case OP_ARM_RSBS_IMM:
3469 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3470 g_assert (imm8 >= 0);
3471 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3472 break;
3473 case OP_ARM_RSC_IMM:
3474 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3475 g_assert (imm8 >= 0);
3476 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3477 break;
3478 case OP_IAND:
3479 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3480 break;
3481 case OP_AND_IMM:
3482 case OP_IAND_IMM:
3483 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3484 g_assert (imm8 >= 0);
3485 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3486 break;
3487 case OP_IDIV:
3488 case OP_IDIV_UN:
3489 case OP_DIV_IMM:
3490 case OP_IREM:
3491 case OP_IREM_UN:
3492 case OP_REM_IMM:
3493 /* crappy ARM arch doesn't have a DIV instruction */
3494 g_assert_not_reached ();
3495 case OP_IOR:
3496 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3497 break;
3498 case OP_OR_IMM:
3499 case OP_IOR_IMM:
3500 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3501 g_assert (imm8 >= 0);
3502 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3503 break;
3504 case OP_IXOR:
3505 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3506 break;
3507 case OP_XOR_IMM:
3508 case OP_IXOR_IMM:
3509 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3510 g_assert (imm8 >= 0);
3511 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3512 break;
3513 case OP_ISHL:
3514 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3515 break;
3516 case OP_SHL_IMM:
3517 case OP_ISHL_IMM:
3518 if (ins->inst_imm)
3519 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3520 else if (ins->dreg != ins->sreg1)
3521 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3522 break;
3523 case OP_ISHR:
3524 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3525 break;
3526 case OP_SHR_IMM:
3527 case OP_ISHR_IMM:
3528 if (ins->inst_imm)
3529 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3530 else if (ins->dreg != ins->sreg1)
3531 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3532 break;
3533 case OP_SHR_UN_IMM:
3534 case OP_ISHR_UN_IMM:
3535 if (ins->inst_imm)
3536 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3537 else if (ins->dreg != ins->sreg1)
3538 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3539 break;
3540 case OP_ISHR_UN:
3541 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3542 break;
3543 case OP_INOT:
3544 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
3545 break;
3546 case OP_INEG:
3547 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
3548 break;
3549 case OP_IMUL:
3550 if (ins->dreg == ins->sreg2)
3551 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3552 else
3553 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
3554 break;
3555 case OP_MUL_IMM:
3556 g_assert_not_reached ();
3557 break;
3558 case OP_IMUL_OVF:
3559 /* FIXME: handle ovf/ sreg2 != dreg */
3560 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3561 /* FIXME: MUL doesn't set the C/O flags on ARM */
3562 break;
3563 case OP_IMUL_OVF_UN:
3564 /* FIXME: handle ovf/ sreg2 != dreg */
3565 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3566 /* FIXME: MUL doesn't set the C/O flags on ARM */
3567 break;
3568 case OP_ICONST:
3569 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
3570 break;
3571 case OP_AOTCONST:
3572 /* Load the GOT offset */
3573 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3574 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
3575 ARM_B (code, 0);
3576 *(gpointer*)code = NULL;
3577 code += 4;
3578 /* Load the value from the GOT */
3579 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
3580 break;
3581 case OP_ICONV_TO_I4:
3582 case OP_ICONV_TO_U4:
3583 case OP_MOVE:
3584 if (ins->dreg != ins->sreg1)
3585 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3586 break;
3587 case OP_SETLRET: {
3588 int saved = ins->sreg2;
3589 if (ins->sreg2 == ARM_LSW_REG) {
3590 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
3591 saved = ARMREG_LR;
3593 if (ins->sreg1 != ARM_LSW_REG)
3594 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
3595 if (saved != ARM_MSW_REG)
3596 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
3597 break;
3599 case OP_FMOVE:
3600 #ifdef ARM_FPU_FPA
3601 ARM_MVFD (code, ins->dreg, ins->sreg1);
3602 #elif defined(ARM_FPU_VFP)
3603 ARM_CPYD (code, ins->dreg, ins->sreg1);
3604 #endif
3605 break;
3606 case OP_FCONV_TO_R4:
3607 #ifdef ARM_FPU_FPA
3608 ARM_MVFS (code, ins->dreg, ins->sreg1);
3609 #elif defined(ARM_FPU_VFP)
3610 ARM_CVTD (code, ins->dreg, ins->sreg1);
3611 ARM_CVTS (code, ins->dreg, ins->dreg);
3612 #endif
3613 break;
3614 case OP_JMP:
3616 * Keep in sync with mono_arch_emit_epilog
3618 g_assert (!cfg->method->save_lmf);
3620 code = emit_load_volatile_arguments (cfg, code);
3622 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
3623 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
3624 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
3625 if (cfg->compile_aot) {
3626 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3627 ARM_B (code, 0);
3628 *(gpointer*)code = NULL;
3629 code += 4;
3630 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
3631 } else {
3632 ARM_B (code, 0);
3634 break;
3635 case OP_CHECK_THIS:
3636 /* ensure ins->sreg1 is not NULL */
3637 ARM_LDR_IMM (code, ARMREG_LR, ins->sreg1, 0);
3638 break;
3639 case OP_ARGLIST: {
3640 g_assert (cfg->sig_cookie < 128);
3641 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
3642 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
3643 break;
3645 case OP_FCALL:
3646 case OP_LCALL:
3647 case OP_VCALL:
3648 case OP_VCALL2:
3649 case OP_VOIDCALL:
3650 case OP_CALL:
3651 call = (MonoCallInst*)ins;
3652 if (ins->flags & MONO_INST_HAS_METHOD)
3653 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
3654 else
3655 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
3656 code = emit_call_seq (cfg, code);
3657 code = emit_move_return_value (cfg, ins, code);
3658 break;
3659 case OP_FCALL_REG:
3660 case OP_LCALL_REG:
3661 case OP_VCALL_REG:
3662 case OP_VCALL2_REG:
3663 case OP_VOIDCALL_REG:
3664 case OP_CALL_REG:
3665 code = emit_call_reg (code, ins->sreg1);
3666 code = emit_move_return_value (cfg, ins, code);
3667 break;
3668 case OP_FCALL_MEMBASE:
3669 case OP_LCALL_MEMBASE:
3670 case OP_VCALL_MEMBASE:
3671 case OP_VCALL2_MEMBASE:
3672 case OP_VOIDCALL_MEMBASE:
3673 case OP_CALL_MEMBASE:
3674 g_assert (arm_is_imm12 (ins->inst_offset));
3675 g_assert (ins->sreg1 != ARMREG_LR);
3676 call = (MonoCallInst*)ins;
3677 if (call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3678 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4);
3679 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3681 * We can't embed the method in the code stream in PIC code, or
3682 * in gshared code.
3683 * Instead, we put it in V5 in code emitted by
3684 * mono_arch_emit_imt_argument (), and embed NULL here to
3685 * signal the IMT thunk that the value is in V5.
3687 if (call->dynamic_imt_arg)
3688 *((gpointer*)code) = NULL;
3689 else
3690 *((gpointer*)code) = (gpointer)call->method;
3691 code += 4;
3692 } else {
3693 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3694 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3696 code = emit_move_return_value (cfg, ins, code);
3697 break;
3698 case OP_LOCALLOC: {
3699 /* keep alignment */
3700 int alloca_waste = cfg->param_area;
3701 alloca_waste += 7;
3702 alloca_waste &= ~7;
3703 /* round the size to 8 bytes */
3704 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
3705 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
3706 if (alloca_waste)
3707 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
3708 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
3709 /* memzero the area: dreg holds the size, sp is the pointer */
3710 if (ins->flags & MONO_INST_INIT) {
3711 guint8 *start_loop, *branch_to_cond;
3712 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
3713 branch_to_cond = code;
3714 ARM_B (code, 0);
3715 start_loop = code;
3716 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
3717 arm_patch (branch_to_cond, code);
3718 /* decrement by 4 and set flags */
3719 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, 4);
3720 ARM_B_COND (code, ARMCOND_GE, 0);
3721 arm_patch (code - 4, start_loop);
3723 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
3724 break;
3726 case OP_DYN_CALL: {
3727 int i;
3728 MonoInst *var = cfg->dyn_call_var;
3730 g_assert (var->opcode == OP_REGOFFSET);
3731 g_assert (arm_is_imm12 (var->inst_offset));
3733 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
3734 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
3735 /* ip = ftn */
3736 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
3738 /* Save args buffer */
3739 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
3741 /* Set stack slots using R0 as scratch reg */
3742 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
3743 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
3744 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (gpointer));
3745 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (gpointer));
3748 /* Set argument registers */
3749 for (i = 0; i < PARAM_REGS; ++i)
3750 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (gpointer));
3752 /* Make the call */
3753 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3754 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3756 /* Save result */
3757 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
3758 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res));
3759 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res2));
3760 break;
3762 case OP_THROW: {
3763 if (ins->sreg1 != ARMREG_R0)
3764 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3765 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3766 (gpointer)"mono_arch_throw_exception");
3767 code = emit_call_seq (cfg, code);
3768 break;
3770 case OP_RETHROW: {
3771 if (ins->sreg1 != ARMREG_R0)
3772 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3773 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3774 (gpointer)"mono_arch_rethrow_exception");
3775 code = emit_call_seq (cfg, code);
3776 break;
3778 case OP_START_HANDLER: {
3779 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3781 if (arm_is_imm12 (spvar->inst_offset)) {
3782 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
3783 } else {
3784 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3785 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
3787 break;
3789 case OP_ENDFILTER: {
3790 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3792 if (ins->sreg1 != ARMREG_R0)
3793 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3794 if (arm_is_imm12 (spvar->inst_offset)) {
3795 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3796 } else {
3797 g_assert (ARMREG_IP != spvar->inst_basereg);
3798 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3799 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3801 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3802 break;
3804 case OP_ENDFINALLY: {
3805 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3807 if (arm_is_imm12 (spvar->inst_offset)) {
3808 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3809 } else {
3810 g_assert (ARMREG_IP != spvar->inst_basereg);
3811 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3812 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3814 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3815 break;
3817 case OP_CALL_HANDLER:
3818 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3819 ARM_BL (code, 0);
3820 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
3821 break;
3822 case OP_LABEL:
3823 ins->inst_c0 = code - cfg->native_code;
3824 break;
3825 case OP_BR:
3826 /*if (ins->inst_target_bb->native_offset) {
3827 ARM_B (code, 0);
3828 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3829 } else*/ {
3830 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3831 ARM_B (code, 0);
3833 break;
3834 case OP_BR_REG:
3835 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
3836 break;
3837 case OP_SWITCH:
3839 * In the normal case we have:
3840 * ldr pc, [pc, ins->sreg1 << 2]
3841 * nop
3842 * If aot, we have:
3843 * ldr lr, [pc, ins->sreg1 << 2]
3844 * add pc, pc, lr
3845 * After follows the data.
3846 * FIXME: add aot support.
3848 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
3849 max_len += 4 * GPOINTER_TO_INT (ins->klass);
3850 if (offset > (cfg->code_size - max_len - 16)) {
3851 cfg->code_size += max_len;
3852 cfg->code_size *= 2;
3853 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3854 code = cfg->native_code + offset;
3856 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
3857 ARM_NOP (code);
3858 code += 4 * GPOINTER_TO_INT (ins->klass);
3859 break;
3860 case OP_CEQ:
3861 case OP_ICEQ:
3862 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3863 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3864 break;
3865 case OP_CLT:
3866 case OP_ICLT:
3867 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3868 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
3869 break;
3870 case OP_CLT_UN:
3871 case OP_ICLT_UN:
3872 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3873 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
3874 break;
3875 case OP_CGT:
3876 case OP_ICGT:
3877 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3878 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
3879 break;
3880 case OP_CGT_UN:
3881 case OP_ICGT_UN:
3882 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3883 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
3884 break;
3885 case OP_COND_EXC_EQ:
3886 case OP_COND_EXC_NE_UN:
3887 case OP_COND_EXC_LT:
3888 case OP_COND_EXC_LT_UN:
3889 case OP_COND_EXC_GT:
3890 case OP_COND_EXC_GT_UN:
3891 case OP_COND_EXC_GE:
3892 case OP_COND_EXC_GE_UN:
3893 case OP_COND_EXC_LE:
3894 case OP_COND_EXC_LE_UN:
3895 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
3896 break;
3897 case OP_COND_EXC_IEQ:
3898 case OP_COND_EXC_INE_UN:
3899 case OP_COND_EXC_ILT:
3900 case OP_COND_EXC_ILT_UN:
3901 case OP_COND_EXC_IGT:
3902 case OP_COND_EXC_IGT_UN:
3903 case OP_COND_EXC_IGE:
3904 case OP_COND_EXC_IGE_UN:
3905 case OP_COND_EXC_ILE:
3906 case OP_COND_EXC_ILE_UN:
3907 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
3908 break;
3909 case OP_COND_EXC_C:
3910 case OP_COND_EXC_IC:
3911 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
3912 break;
3913 case OP_COND_EXC_OV:
3914 case OP_COND_EXC_IOV:
3915 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
3916 break;
3917 case OP_COND_EXC_NC:
3918 case OP_COND_EXC_INC:
3919 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
3920 break;
3921 case OP_COND_EXC_NO:
3922 case OP_COND_EXC_INO:
3923 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
3924 break;
3925 case OP_IBEQ:
3926 case OP_IBNE_UN:
3927 case OP_IBLT:
3928 case OP_IBLT_UN:
3929 case OP_IBGT:
3930 case OP_IBGT_UN:
3931 case OP_IBGE:
3932 case OP_IBGE_UN:
3933 case OP_IBLE:
3934 case OP_IBLE_UN:
3935 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
3936 break;
3938 /* floating point opcodes */
3939 #ifdef ARM_FPU_FPA
3940 case OP_R8CONST:
3941 if (cfg->compile_aot) {
3942 ARM_LDFD (code, ins->dreg, ARMREG_PC, 0);
3943 ARM_B (code, 1);
3944 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3945 code += 4;
3946 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3947 code += 4;
3948 } else {
3949 /* FIXME: we can optimize the imm load by dealing with part of
3950 * the displacement in LDFD (aligning to 512).
3952 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3953 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3955 break;
3956 case OP_R4CONST:
3957 if (cfg->compile_aot) {
3958 ARM_LDFS (code, ins->dreg, ARMREG_PC, 0);
3959 ARM_B (code, 0);
3960 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3961 code += 4;
3962 } else {
3963 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3964 ARM_LDFS (code, ins->dreg, ARMREG_LR, 0);
3966 break;
3967 case OP_STORER8_MEMBASE_REG:
3968 /* This is generated by the local regalloc pass which runs after the lowering pass */
3969 if (!arm_is_fpimm8 (ins->inst_offset)) {
3970 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3971 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
3972 ARM_STFD (code, ins->sreg1, ARMREG_LR, 0);
3973 } else {
3974 ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3976 break;
3977 case OP_LOADR8_MEMBASE:
3978 /* This is generated by the local regalloc pass which runs after the lowering pass */
3979 if (!arm_is_fpimm8 (ins->inst_offset)) {
3980 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3981 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
3982 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3983 } else {
3984 ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3986 break;
3987 case OP_STORER4_MEMBASE_REG:
3988 g_assert (arm_is_fpimm8 (ins->inst_offset));
3989 ARM_STFS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3990 break;
3991 case OP_LOADR4_MEMBASE:
3992 g_assert (arm_is_fpimm8 (ins->inst_offset));
3993 ARM_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3994 break;
3995 case OP_ICONV_TO_R_UN: {
3996 int tmpreg;
3997 tmpreg = ins->dreg == 0? 1: 0;
3998 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3999 ARM_FLTD (code, ins->dreg, ins->sreg1);
4000 ARM_B_COND (code, ARMCOND_GE, 8);
4001 /* save the temp register */
4002 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
4003 ARM_STFD (code, tmpreg, ARMREG_SP, 0);
4004 ARM_LDFD (code, tmpreg, ARMREG_PC, 12);
4005 ARM_FPA_ADFD (code, ins->dreg, ins->dreg, tmpreg);
4006 ARM_LDFD (code, tmpreg, ARMREG_SP, 0);
4007 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
4008 /* skip the constant pool */
4009 ARM_B (code, 8);
4010 code += 4;
4011 *(int*)code = 0x41f00000;
4012 code += 4;
4013 *(int*)code = 0;
4014 code += 4;
4015 /* FIXME: adjust:
4016 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
4017 * adfltd fdest, fdest, ftemp
4019 break;
4021 case OP_ICONV_TO_R4:
4022 ARM_FLTS (code, ins->dreg, ins->sreg1);
4023 break;
4024 case OP_ICONV_TO_R8:
4025 ARM_FLTD (code, ins->dreg, ins->sreg1);
4026 break;
4028 #elif defined(ARM_FPU_VFP)
4030 case OP_R8CONST:
4031 if (cfg->compile_aot) {
4032 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
4033 ARM_B (code, 1);
4034 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
4035 code += 4;
4036 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
4037 code += 4;
4038 } else {
4039 /* FIXME: we can optimize the imm load by dealing with part of
4040 * the displacement in LDFD (aligning to 512).
4042 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
4043 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4045 break;
4046 case OP_R4CONST:
4047 if (cfg->compile_aot) {
4048 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
4049 ARM_B (code, 0);
4050 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
4051 code += 4;
4052 ARM_CVTS (code, ins->dreg, ins->dreg);
4053 } else {
4054 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
4055 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
4056 ARM_CVTS (code, ins->dreg, ins->dreg);
4058 break;
4059 case OP_STORER8_MEMBASE_REG:
4060 /* This is generated by the local regalloc pass which runs after the lowering pass */
4061 if (!arm_is_fpimm8 (ins->inst_offset)) {
4062 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4063 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
4064 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
4065 } else {
4066 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4068 break;
4069 case OP_LOADR8_MEMBASE:
4070 /* This is generated by the local regalloc pass which runs after the lowering pass */
4071 if (!arm_is_fpimm8 (ins->inst_offset)) {
4072 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4073 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
4074 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4075 } else {
4076 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4078 break;
4079 case OP_STORER4_MEMBASE_REG:
4080 g_assert (arm_is_fpimm8 (ins->inst_offset));
4081 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
4082 ARM_FSTS (code, ARM_VFP_F0, ins->inst_destbasereg, ins->inst_offset);
4083 break;
4084 case OP_LOADR4_MEMBASE:
4085 g_assert (arm_is_fpimm8 (ins->inst_offset));
4086 ARM_FLDS (code, ARM_VFP_F0, ins->inst_basereg, ins->inst_offset);
4087 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4088 break;
4089 case OP_ICONV_TO_R_UN: {
4090 g_assert_not_reached ();
4091 break;
4093 case OP_ICONV_TO_R4:
4094 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
4095 ARM_FSITOS (code, ARM_VFP_F0, ARM_VFP_F0);
4096 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4097 break;
4098 case OP_ICONV_TO_R8:
4099 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
4100 ARM_FSITOD (code, ins->dreg, ARM_VFP_F0);
4101 break;
4103 case OP_SETFRET:
4104 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
4105 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
4106 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
4107 } else {
4108 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
4110 break;
4112 #endif
4114 case OP_FCONV_TO_I1:
4115 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
4116 break;
4117 case OP_FCONV_TO_U1:
4118 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
4119 break;
4120 case OP_FCONV_TO_I2:
4121 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
4122 break;
4123 case OP_FCONV_TO_U2:
4124 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
4125 break;
4126 case OP_FCONV_TO_I4:
4127 case OP_FCONV_TO_I:
4128 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
4129 break;
4130 case OP_FCONV_TO_U4:
4131 case OP_FCONV_TO_U:
4132 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
4133 break;
4134 case OP_FCONV_TO_I8:
4135 case OP_FCONV_TO_U8:
4136 g_assert_not_reached ();
4137 /* Implemented as helper calls */
4138 break;
4139 case OP_LCONV_TO_R_UN:
4140 g_assert_not_reached ();
4141 /* Implemented as helper calls */
4142 break;
4143 case OP_LCONV_TO_OVF_I4_2: {
4144 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
4146 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
4149 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
4150 high_bit_not_set = code;
4151 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
4153 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
4154 valid_negative = code;
4155 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
4156 invalid_negative = code;
4157 ARM_B_COND (code, ARMCOND_AL, 0);
4159 arm_patch (high_bit_not_set, code);
4161 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
4162 valid_positive = code;
4163 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
4165 arm_patch (invalid_negative, code);
4166 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
4168 arm_patch (valid_negative, code);
4169 arm_patch (valid_positive, code);
4171 if (ins->dreg != ins->sreg1)
4172 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4173 break;
4175 #ifdef ARM_FPU_FPA
4176 case OP_FADD:
4177 ARM_FPA_ADFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4178 break;
4179 case OP_FSUB:
4180 ARM_FPA_SUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4181 break;
4182 case OP_FMUL:
4183 ARM_FPA_MUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4184 break;
4185 case OP_FDIV:
4186 ARM_FPA_DVFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4187 break;
4188 case OP_FNEG:
4189 ARM_MNFD (code, ins->dreg, ins->sreg1);
4190 break;
4191 #elif defined(ARM_FPU_VFP)
4192 case OP_FADD:
4193 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
4194 break;
4195 case OP_FSUB:
4196 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
4197 break;
4198 case OP_FMUL:
4199 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
4200 break;
4201 case OP_FDIV:
4202 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
4203 break;
4204 case OP_FNEG:
4205 ARM_NEGD (code, ins->dreg, ins->sreg1);
4206 break;
4207 #endif
4208 case OP_FREM:
4209 /* emulated */
4210 g_assert_not_reached ();
4211 break;
4212 case OP_FCOMPARE:
4213 #ifdef ARM_FPU_FPA
4214 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4215 #elif defined(ARM_FPU_VFP)
4216 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4217 ARM_FMSTAT (code);
4218 #endif
4219 break;
4220 case OP_FCEQ:
4221 #ifdef ARM_FPU_FPA
4222 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4223 #elif defined(ARM_FPU_VFP)
4224 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4225 ARM_FMSTAT (code);
4226 #endif
4227 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
4228 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
4229 break;
4230 case OP_FCLT:
4231 #ifdef ARM_FPU_FPA
4232 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4233 #elif defined(ARM_FPU_VFP)
4234 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4235 ARM_FMSTAT (code);
4236 #endif
4237 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4238 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4239 break;
4240 case OP_FCLT_UN:
4241 #ifdef ARM_FPU_FPA
4242 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4243 #elif defined(ARM_FPU_VFP)
4244 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4245 ARM_FMSTAT (code);
4246 #endif
4247 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4248 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4249 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4250 break;
4251 case OP_FCGT:
4252 /* swapped */
4253 #ifdef ARM_FPU_FPA
4254 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4255 #elif defined(ARM_FPU_VFP)
4256 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4257 ARM_FMSTAT (code);
4258 #endif
4259 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4260 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4261 break;
4262 case OP_FCGT_UN:
4263 /* swapped */
4264 #ifdef ARM_FPU_FPA
4265 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4266 #elif defined(ARM_FPU_VFP)
4267 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4268 ARM_FMSTAT (code);
4269 #endif
4270 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4271 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4272 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4273 break;
4274 /* ARM FPA flags table:
4275 * N Less than ARMCOND_MI
4276 * Z Equal ARMCOND_EQ
4277 * C Greater Than or Equal ARMCOND_CS
4278 * V Unordered ARMCOND_VS
4280 case OP_FBEQ:
4281 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
4282 break;
4283 case OP_FBNE_UN:
4284 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
4285 break;
4286 case OP_FBLT:
4287 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4288 break;
4289 case OP_FBLT_UN:
4290 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4291 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4292 break;
4293 case OP_FBGT:
4294 case OP_FBGT_UN:
4295 case OP_FBLE:
4296 case OP_FBLE_UN:
4297 g_assert_not_reached ();
4298 break;
4299 case OP_FBGE:
4300 #ifdef ARM_FPU_VFP
4301 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4302 #else
4303 /* FPA requires EQ even thou the docs suggests that just CS is enough */
4304 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
4305 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
4306 #endif
4307 break;
4308 case OP_FBGE_UN:
4309 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4310 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4311 break;
4313 case OP_CKFINITE: {
4314 #ifdef ARM_FPU_FPA
4315 if (ins->dreg != ins->sreg1)
4316 ARM_MVFD (code, ins->dreg, ins->sreg1);
4317 #elif defined(ARM_FPU_VFP)
4318 ARM_ABSD (code, ARM_VFP_D1, ins->sreg1);
4319 ARM_FLDD (code, ARM_VFP_D0, ARMREG_PC, 0);
4320 ARM_B (code, 1);
4321 *(guint32*)code = 0xffffffff;
4322 code += 4;
4323 *(guint32*)code = 0x7fefffff;
4324 code += 4;
4325 ARM_CMPD (code, ARM_VFP_D1, ARM_VFP_D0);
4326 ARM_FMSTAT (code);
4327 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
4328 ARM_CMPD (code, ins->sreg1, ins->sreg1);
4329 ARM_FMSTAT (code);
4330 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
4332 ARM_CPYD (code, ins->dreg, ins->sreg1);
4333 #endif
4334 break;
4336 default:
4337 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4338 g_assert_not_reached ();
4341 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
4342 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4343 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
4344 g_assert_not_reached ();
4347 cpos += max_len;
4349 last_ins = ins;
4350 last_offset = offset;
4353 cfg->code_len = code - cfg->native_code;
4356 #endif /* DISABLE_JIT */
4358 #ifdef HAVE_AEABI_READ_TP
4359 void __aeabi_read_tp (void);
4360 #endif
4362 void
4363 mono_arch_register_lowlevel_calls (void)
4365 /* The signature doesn't matter */
4366 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
4367 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
4369 #ifndef MONO_CROSS_COMPILE
4370 #ifdef HAVE_AEABI_READ_TP
4371 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
4372 #endif
4373 #endif
4376 #define patch_lis_ori(ip,val) do {\
4377 guint16 *__lis_ori = (guint16*)(ip); \
4378 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
4379 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
4380 } while (0)
4382 void
4383 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4385 MonoJumpInfo *patch_info;
4386 gboolean compile_aot = !run_cctors;
4388 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4389 unsigned char *ip = patch_info->ip.i + code;
4390 const unsigned char *target;
4392 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
4393 gpointer *jt = (gpointer*)(ip + 8);
4394 int i;
4395 /* jt is the inlined jump table, 2 instructions after ip
4396 * In the normal case we store the absolute addresses,
4397 * otherwise the displacements.
4399 for (i = 0; i < patch_info->data.table->table_size; i++)
4400 jt [i] = code + (int)patch_info->data.table->table [i];
4401 continue;
4403 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4405 if (compile_aot) {
4406 switch (patch_info->type) {
4407 case MONO_PATCH_INFO_BB:
4408 case MONO_PATCH_INFO_LABEL:
4409 break;
4410 default:
4411 /* No need to patch these */
4412 continue;
4416 switch (patch_info->type) {
4417 case MONO_PATCH_INFO_IP:
4418 g_assert_not_reached ();
4419 patch_lis_ori (ip, ip);
4420 continue;
4421 case MONO_PATCH_INFO_METHOD_REL:
4422 g_assert_not_reached ();
4423 *((gpointer *)(ip)) = code + patch_info->data.offset;
4424 continue;
4425 case MONO_PATCH_INFO_METHODCONST:
4426 case MONO_PATCH_INFO_CLASS:
4427 case MONO_PATCH_INFO_IMAGE:
4428 case MONO_PATCH_INFO_FIELD:
4429 case MONO_PATCH_INFO_VTABLE:
4430 case MONO_PATCH_INFO_IID:
4431 case MONO_PATCH_INFO_SFLDA:
4432 case MONO_PATCH_INFO_LDSTR:
4433 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
4434 case MONO_PATCH_INFO_LDTOKEN:
4435 g_assert_not_reached ();
4436 /* from OP_AOTCONST : lis + ori */
4437 patch_lis_ori (ip, target);
4438 continue;
4439 case MONO_PATCH_INFO_R4:
4440 case MONO_PATCH_INFO_R8:
4441 g_assert_not_reached ();
4442 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
4443 continue;
4444 case MONO_PATCH_INFO_EXC_NAME:
4445 g_assert_not_reached ();
4446 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
4447 continue;
4448 case MONO_PATCH_INFO_NONE:
4449 case MONO_PATCH_INFO_BB_OVF:
4450 case MONO_PATCH_INFO_EXC_OVF:
4451 /* everything is dealt with at epilog output time */
4452 continue;
4453 default:
4454 break;
4456 arm_patch_general (domain, ip, target);
4460 #ifndef DISABLE_JIT
4463 * Stack frame layout:
4465 * ------------------- fp
4466 * MonoLMF structure or saved registers
4467 * -------------------
4468 * locals
4469 * -------------------
4470 * spilled regs
4471 * -------------------
4472 * optional 8 bytes for tracing
4473 * -------------------
4474 * param area size is cfg->param_area
4475 * ------------------- sp
4477 guint8 *
4478 mono_arch_emit_prolog (MonoCompile *cfg)
4480 MonoMethod *method = cfg->method;
4481 MonoBasicBlock *bb;
4482 MonoMethodSignature *sig;
4483 MonoInst *inst;
4484 int alloc_size, pos, max_offset, i, rot_amount;
4485 guint8 *code;
4486 CallInfo *cinfo;
4487 int tracing = 0;
4488 int lmf_offset = 0;
4489 int prev_sp_offset, reg_offset;
4491 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4492 tracing = 1;
4494 sig = mono_method_signature (method);
4495 cfg->code_size = 256 + sig->param_count * 20;
4496 code = cfg->native_code = g_malloc (cfg->code_size);
4498 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
4500 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
4502 alloc_size = cfg->stack_offset;
4503 pos = 0;
4505 if (!method->save_lmf) {
4506 /* We save SP by storing it into IP and saving IP */
4507 ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
4508 prev_sp_offset = 8; /* ip and lr */
4509 for (i = 0; i < 16; ++i) {
4510 if (cfg->used_int_regs & (1 << i))
4511 prev_sp_offset += 4;
4513 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4514 reg_offset = 0;
4515 for (i = 0; i < 16; ++i) {
4516 if ((cfg->used_int_regs & (1 << i)) || (i == ARMREG_IP) || (i == ARMREG_LR)) {
4517 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4518 reg_offset += 4;
4521 } else {
4522 ARM_PUSH (code, 0x5ff0);
4523 prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
4524 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4525 reg_offset = 0;
4526 for (i = 0; i < 16; ++i) {
4527 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
4528 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4529 reg_offset += 4;
4532 pos += sizeof (MonoLMF) - prev_sp_offset;
4533 lmf_offset = pos;
4535 alloc_size += pos;
4536 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
4537 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
4538 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
4539 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
4542 /* the stack used in the pushed regs */
4543 if (prev_sp_offset & 4)
4544 alloc_size += 4;
4545 cfg->stack_usage = alloc_size;
4546 if (alloc_size) {
4547 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
4548 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4549 } else {
4550 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
4551 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4553 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
4555 if (cfg->frame_reg != ARMREG_SP) {
4556 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
4557 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
4559 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
4560 prev_sp_offset += alloc_size;
4562 /* compute max_offset in order to use short forward jumps
4563 * we could skip do it on arm because the immediate displacement
4564 * for jumps is large enough, it may be useful later for constant pools
4566 max_offset = 0;
4567 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4568 MonoInst *ins = bb->code;
4569 bb->max_offset = max_offset;
4571 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
4572 max_offset += 6;
4574 MONO_BB_FOR_EACH_INS (bb, ins)
4575 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4578 /* store runtime generic context */
4579 if (cfg->rgctx_var) {
4580 MonoInst *ins = cfg->rgctx_var;
4582 g_assert (ins->opcode == OP_REGOFFSET);
4584 if (arm_is_imm12 (ins->inst_offset)) {
4585 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
4586 } else {
4587 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4588 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
4592 /* load arguments allocated to register from the stack */
4593 pos = 0;
4595 cinfo = get_call_info (NULL, sig, sig->pinvoke);
4597 if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != RegTypeStructByVal) {
4598 ArgInfo *ainfo = &cinfo->ret;
4599 inst = cfg->vret_addr;
4600 g_assert (arm_is_imm12 (inst->inst_offset));
4601 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4604 if (sig->call_convention == MONO_CALL_VARARG) {
4605 ArgInfo *cookie = &cinfo->sig_cookie;
4607 /* Save the sig cookie address */
4608 g_assert (cookie->storage == RegTypeBase);
4610 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
4611 g_assert (arm_is_imm12 (cfg->sig_cookie));
4612 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
4613 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4616 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4617 ArgInfo *ainfo = cinfo->args + i;
4618 inst = cfg->args [pos];
4620 if (cfg->verbose_level > 2)
4621 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
4622 if (inst->opcode == OP_REGVAR) {
4623 if (ainfo->storage == RegTypeGeneral)
4624 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4625 else if (ainfo->storage == RegTypeFP) {
4626 g_assert_not_reached ();
4627 } else if (ainfo->storage == RegTypeBase) {
4628 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4629 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4630 } else {
4631 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4632 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4634 } else
4635 g_assert_not_reached ();
4637 if (cfg->verbose_level > 2)
4638 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
4639 } else {
4640 /* the argument should be put on the stack: FIXME handle size != word */
4641 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4642 switch (ainfo->size) {
4643 case 1:
4644 if (arm_is_imm12 (inst->inst_offset))
4645 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4646 else {
4647 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4648 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4650 break;
4651 case 2:
4652 if (arm_is_imm8 (inst->inst_offset)) {
4653 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4654 } else {
4655 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4656 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4658 break;
4659 case 8:
4660 g_assert (arm_is_imm12 (inst->inst_offset));
4661 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4662 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4663 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4664 break;
4665 default:
4666 if (arm_is_imm12 (inst->inst_offset)) {
4667 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4668 } else {
4669 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4670 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4672 break;
4674 } else if (ainfo->storage == RegTypeBaseGen) {
4675 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
4676 g_assert (arm_is_imm12 (inst->inst_offset));
4677 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4678 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4679 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
4680 } else if (ainfo->storage == RegTypeBase) {
4681 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4682 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4683 } else {
4684 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
4685 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4688 switch (ainfo->size) {
4689 case 1:
4690 if (arm_is_imm8 (inst->inst_offset)) {
4691 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4692 } else {
4693 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4694 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4696 break;
4697 case 2:
4698 if (arm_is_imm8 (inst->inst_offset)) {
4699 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4700 } else {
4701 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4702 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4704 break;
4705 case 8:
4706 if (arm_is_imm12 (inst->inst_offset)) {
4707 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4708 } else {
4709 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4710 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4712 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
4713 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
4714 } else {
4715 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
4716 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4718 if (arm_is_imm12 (inst->inst_offset + 4)) {
4719 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4720 } else {
4721 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
4722 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4724 break;
4725 default:
4726 if (arm_is_imm12 (inst->inst_offset)) {
4727 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4728 } else {
4729 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4730 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4732 break;
4734 } else if (ainfo->storage == RegTypeFP) {
4735 g_assert_not_reached ();
4736 } else if (ainfo->storage == RegTypeStructByVal) {
4737 int doffset = inst->inst_offset;
4738 int soffset = 0;
4739 int cur_reg;
4740 int size = 0;
4741 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
4742 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4743 if (arm_is_imm12 (doffset)) {
4744 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4745 } else {
4746 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4747 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4749 soffset += sizeof (gpointer);
4750 doffset += sizeof (gpointer);
4752 if (ainfo->vtsize) {
4753 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4754 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
4755 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
4757 } else if (ainfo->storage == RegTypeStructByAddr) {
4758 g_assert_not_reached ();
4759 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4760 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
4761 } else
4762 g_assert_not_reached ();
4764 pos++;
4767 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
4768 if (cfg->compile_aot)
4769 /* AOT code is only used in the root domain */
4770 code = mono_arm_emit_load_imm (code, ARMREG_R0, 0);
4771 else
4772 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->domain);
4773 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4774 (gpointer)"mono_jit_thread_attach");
4775 code = emit_call_seq (cfg, code);
4778 if (method->save_lmf) {
4779 gboolean get_lmf_fast = FALSE;
4781 #ifdef HAVE_AEABI_READ_TP
4782 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
4784 if (lmf_addr_tls_offset != -1) {
4785 get_lmf_fast = TRUE;
4787 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4788 (gpointer)"__aeabi_read_tp");
4789 code = emit_call_seq (cfg, code);
4791 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
4792 get_lmf_fast = TRUE;
4794 #endif
4795 if (!get_lmf_fast) {
4796 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4797 (gpointer)"mono_get_lmf_addr");
4798 code = emit_call_seq (cfg, code);
4800 /* we build the MonoLMF structure on the stack - see mini-arm.h */
4801 /* lmf_offset is the offset from the previous stack pointer,
4802 * alloc_size is the total stack space allocated, so the offset
4803 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
4804 * The pointer to the struct is put in r1 (new_lmf).
4805 * r2 is used as scratch
4806 * The callee-saved registers are already in the MonoLMF structure
4808 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, alloc_size - lmf_offset);
4809 /* r0 is the result from mono_get_lmf_addr () */
4810 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4811 /* new_lmf->previous_lmf = *lmf_addr */
4812 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4813 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4814 /* *(lmf_addr) = r1 */
4815 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4816 /* Skip method (only needed for trampoline LMF frames) */
4817 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
4818 /* save the current IP */
4819 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
4820 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
4823 if (tracing)
4824 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4826 if (cfg->arch.seq_point_info_var) {
4827 MonoInst *ins = cfg->arch.seq_point_info_var;
4829 /* Initialize the variable from a GOT slot */
4830 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
4831 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4832 ARM_B (code, 0);
4833 *(gpointer*)code = NULL;
4834 code += 4;
4835 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
4837 g_assert (ins->opcode == OP_REGOFFSET);
4839 if (arm_is_imm12 (ins->inst_offset)) {
4840 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
4841 } else {
4842 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4843 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
4847 /* Initialize ss_trigger_page_var */
4849 MonoInst *info_var = cfg->arch.seq_point_info_var;
4850 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4851 int dreg = ARMREG_LR;
4853 if (info_var) {
4854 g_assert (info_var->opcode == OP_REGOFFSET);
4855 g_assert (arm_is_imm12 (info_var->inst_offset));
4857 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4858 /* Load the trigger page addr */
4859 ARM_LDR_IMM (code, dreg, dreg, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
4860 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
4864 cfg->code_len = code - cfg->native_code;
4865 g_assert (cfg->code_len < cfg->code_size);
4866 g_free (cinfo);
4868 return code;
4871 void
4872 mono_arch_emit_epilog (MonoCompile *cfg)
4874 MonoMethod *method = cfg->method;
4875 int pos, i, rot_amount;
4876 int max_epilog_size = 16 + 20*4;
4877 guint8 *code;
4878 CallInfo *cinfo;
4880 if (cfg->method->save_lmf)
4881 max_epilog_size += 128;
4883 if (mono_jit_trace_calls != NULL)
4884 max_epilog_size += 50;
4886 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4887 max_epilog_size += 50;
4889 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4890 cfg->code_size *= 2;
4891 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4892 mono_jit_stats.code_reallocs++;
4896 * Keep in sync with OP_JMP
4898 code = cfg->native_code + cfg->code_len;
4900 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
4901 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4903 pos = 0;
4905 /* Load returned vtypes into registers if needed */
4906 cinfo = cfg->arch.cinfo;
4907 if (cinfo->ret.storage == RegTypeStructByVal) {
4908 MonoInst *ins = cfg->ret;
4910 if (arm_is_imm12 (ins->inst_offset)) {
4911 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
4912 } else {
4913 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4914 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
4918 if (method->save_lmf) {
4919 int lmf_offset;
4920 /* all but r0-r3, sp and pc */
4921 pos += sizeof (MonoLMF) - (4 * 10);
4922 lmf_offset = pos;
4923 /* r2 contains the pointer to the current LMF */
4924 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, cfg->stack_usage - lmf_offset);
4925 /* ip = previous_lmf */
4926 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4927 /* lr = lmf_addr */
4928 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4929 /* *(lmf_addr) = previous_lmf */
4930 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4931 /* FIXME: speedup: there is no actual need to restore the registers if
4932 * we didn't actually change them (idea from Zoltan).
4934 /* restore iregs */
4935 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
4936 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_R2, (sizeof (MonoLMF) - 10 * sizeof (gulong)));
4937 ARM_POP_NWB (code, 0xaff0); /* restore ip to sp and lr to pc */
4938 } else {
4939 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
4940 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
4941 } else {
4942 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
4943 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4945 /* FIXME: add v4 thumb interworking support */
4946 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
4949 cfg->code_len = code - cfg->native_code;
4951 g_assert (cfg->code_len < cfg->code_size);
4955 /* remove once throw_exception_by_name is eliminated */
4956 static int
4957 exception_id_by_name (const char *name)
4959 if (strcmp (name, "IndexOutOfRangeException") == 0)
4960 return MONO_EXC_INDEX_OUT_OF_RANGE;
4961 if (strcmp (name, "OverflowException") == 0)
4962 return MONO_EXC_OVERFLOW;
4963 if (strcmp (name, "ArithmeticException") == 0)
4964 return MONO_EXC_ARITHMETIC;
4965 if (strcmp (name, "DivideByZeroException") == 0)
4966 return MONO_EXC_DIVIDE_BY_ZERO;
4967 if (strcmp (name, "InvalidCastException") == 0)
4968 return MONO_EXC_INVALID_CAST;
4969 if (strcmp (name, "NullReferenceException") == 0)
4970 return MONO_EXC_NULL_REF;
4971 if (strcmp (name, "ArrayTypeMismatchException") == 0)
4972 return MONO_EXC_ARRAY_TYPE_MISMATCH;
4973 g_error ("Unknown intrinsic exception %s\n", name);
4974 return -1;
4977 void
4978 mono_arch_emit_exceptions (MonoCompile *cfg)
4980 MonoJumpInfo *patch_info;
4981 int i;
4982 guint8 *code;
4983 const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
4984 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
4985 int max_epilog_size = 50;
4987 /* count the number of exception infos */
4990 * make sure we have enough space for exceptions
4992 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4993 if (patch_info->type == MONO_PATCH_INFO_EXC) {
4994 i = exception_id_by_name (patch_info->data.target);
4995 if (!exc_throw_found [i]) {
4996 max_epilog_size += 32;
4997 exc_throw_found [i] = TRUE;
5002 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5003 cfg->code_size *= 2;
5004 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5005 mono_jit_stats.code_reallocs++;
5008 code = cfg->native_code + cfg->code_len;
5010 /* add code to raise exceptions */
5011 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5012 switch (patch_info->type) {
5013 case MONO_PATCH_INFO_EXC: {
5014 MonoClass *exc_class;
5015 unsigned char *ip = patch_info->ip.i + cfg->native_code;
5017 i = exception_id_by_name (patch_info->data.target);
5018 if (exc_throw_pos [i]) {
5019 arm_patch (ip, exc_throw_pos [i]);
5020 patch_info->type = MONO_PATCH_INFO_NONE;
5021 break;
5022 } else {
5023 exc_throw_pos [i] = code;
5025 arm_patch (ip, code);
5027 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
5028 g_assert (exc_class);
5030 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
5031 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
5032 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
5033 patch_info->data.name = "mono_arch_throw_corlib_exception";
5034 patch_info->ip.i = code - cfg->native_code;
5035 ARM_BL (code, 0);
5036 *(guint32*)(gpointer)code = exc_class->type_token;
5037 code += 4;
5038 break;
5040 default:
5041 /* do nothing */
5042 break;
5046 cfg->code_len = code - cfg->native_code;
5048 g_assert (cfg->code_len < cfg->code_size);
5052 #endif /* #ifndef DISABLE_JIT */
5054 static gboolean tls_offset_inited = FALSE;
5056 void
5057 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
5059 if (!tls_offset_inited) {
5060 tls_offset_inited = TRUE;
5062 lmf_tls_offset = mono_get_lmf_tls_offset ();
5063 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
5067 void
5068 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
5072 MonoInst*
5073 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5075 /* FIXME: */
5076 return NULL;
5079 gboolean
5080 mono_arch_print_tree (MonoInst *tree, int arity)
5082 return 0;
5085 MonoInst*
5086 mono_arch_get_domain_intrinsic (MonoCompile* cfg)
5088 return mono_get_domain_intrinsic (cfg);
5091 guint32
5092 mono_arch_get_patch_offset (guint8 *code)
5094 /* OP_AOTCONST */
5095 return 8;
5098 void
5099 mono_arch_flush_register_windows (void)
5103 #ifdef MONO_ARCH_HAVE_IMT
5105 #ifndef DISABLE_JIT
5107 void
5108 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
5110 if (cfg->compile_aot) {
5111 int method_reg = mono_alloc_ireg (cfg);
5112 MonoInst *ins;
5114 call->dynamic_imt_arg = TRUE;
5116 if (imt_arg) {
5117 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
5118 } else {
5119 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
5120 ins->dreg = method_reg;
5121 ins->inst_p0 = call->method;
5122 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
5123 MONO_ADD_INS (cfg->cbb, ins);
5125 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
5127 } else if (cfg->generic_context) {
5129 /* Always pass in a register for simplicity */
5130 call->dynamic_imt_arg = TRUE;
5132 cfg->uses_rgctx_reg = TRUE;
5134 if (imt_arg) {
5135 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
5136 } else {
5137 MonoInst *ins;
5138 int method_reg = mono_alloc_preg (cfg);
5140 MONO_INST_NEW (cfg, ins, OP_PCONST);
5141 ins->inst_p0 = call->method;
5142 ins->dreg = method_reg;
5143 MONO_ADD_INS (cfg->cbb, ins);
5145 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
5150 #endif /* DISABLE_JIT */
5152 MonoMethod*
5153 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
5155 guint32 *code_ptr = (guint32*)code;
5156 code_ptr -= 2;
5157 /* The IMT value is stored in the code stream right after the LDC instruction. */
5158 if (!IS_LDR_PC (code_ptr [0])) {
5159 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
5160 g_assert (IS_LDR_PC (code_ptr [0]));
5162 if (code_ptr [1] == 0)
5163 /* This is AOTed code, the IMT method is in V5 */
5164 return (MonoMethod*)regs [ARMREG_V5];
5165 else
5166 return (MonoMethod*) code_ptr [1];
5169 MonoVTable*
5170 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
5172 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
5175 #define ENABLE_WRONG_METHOD_CHECK 0
5176 #define BASE_SIZE (6 * 4)
5177 #define BSEARCH_ENTRY_SIZE (4 * 4)
5178 #define CMP_SIZE (3 * 4)
5179 #define BRANCH_SIZE (1 * 4)
5180 #define CALL_SIZE (2 * 4)
5181 #define WMC_SIZE (5 * 4)
5182 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
5184 static arminstr_t *
5185 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
5187 guint32 delta = DISTANCE (target, code);
5188 delta -= 8;
5189 g_assert (delta >= 0 && delta <= 0xFFF);
5190 *target = *target | delta;
5191 *code = value;
5192 return code + 1;
5195 gpointer
5196 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
5197 gpointer fail_tramp)
5199 int size, i, extra_space = 0;
5200 arminstr_t *code, *start, *vtable_target = NULL;
5201 gboolean large_offsets = FALSE;
5202 guint32 **constant_pool_starts;
5204 size = BASE_SIZE;
5205 constant_pool_starts = g_new0 (guint32*, count);
5208 * We might be called with a fail_tramp from the IMT builder code even if
5209 * MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK is not defined.
5211 //g_assert (!fail_tramp);
5213 for (i = 0; i < count; ++i) {
5214 MonoIMTCheckItem *item = imt_entries [i];
5215 if (item->is_equals) {
5216 if (!arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
5217 item->chunk_size += 32;
5218 large_offsets = TRUE;
5221 if (item->check_target_idx) {
5222 if (!item->compare_done)
5223 item->chunk_size += CMP_SIZE;
5224 item->chunk_size += BRANCH_SIZE;
5225 } else {
5226 #if ENABLE_WRONG_METHOD_CHECK
5227 item->chunk_size += WMC_SIZE;
5228 #endif
5230 item->chunk_size += CALL_SIZE;
5231 } else {
5232 item->chunk_size += BSEARCH_ENTRY_SIZE;
5233 imt_entries [item->check_target_idx]->compare_done = TRUE;
5235 size += item->chunk_size;
5238 if (large_offsets)
5239 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
5241 start = code = mono_domain_code_reserve (domain, size);
5243 #if DEBUG_IMT
5244 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
5245 for (i = 0; i < count; ++i) {
5246 MonoIMTCheckItem *item = imt_entries [i];
5247 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, item->key->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
5249 #endif
5251 if (large_offsets)
5252 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5253 else
5254 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
5255 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
5256 vtable_target = code;
5257 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
5259 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
5260 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
5261 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
5263 for (i = 0; i < count; ++i) {
5264 MonoIMTCheckItem *item = imt_entries [i];
5265 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL;
5266 gint32 vtable_offset;
5268 item->code_target = (guint8*)code;
5270 if (item->is_equals) {
5271 if (item->check_target_idx) {
5272 if (!item->compare_done) {
5273 imt_method = code;
5274 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5275 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5277 item->jmp_code = (guint8*)code;
5278 ARM_B_COND (code, ARMCOND_NE, 0);
5279 } else {
5280 /*Enable the commented code to assert on wrong method*/
5281 #if ENABLE_WRONG_METHOD_CHECK
5282 imt_method = code;
5283 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5284 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5285 ARM_B_COND (code, ARMCOND_NE, 1);
5287 ARM_DBRK (code);
5288 #endif
5291 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
5292 if (!arm_is_imm12 (vtable_offset)) {
5294 * We need to branch to a computed address but we don't have
5295 * a free register to store it, since IP must contain the
5296 * vtable address. So we push the two values to the stack, and
5297 * load them both using LDM.
5299 /* Compute target address */
5300 vtable_offset_ins = code;
5301 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5302 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
5303 /* Save it to the fourth slot */
5304 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
5305 /* Restore registers and branch */
5306 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5308 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
5309 } else {
5310 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
5311 if (large_offsets)
5312 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
5313 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
5316 if (imt_method)
5317 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
5319 /*must emit after unconditional branch*/
5320 if (vtable_target) {
5321 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
5322 item->chunk_size += 4;
5323 vtable_target = NULL;
5326 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
5327 constant_pool_starts [i] = code;
5328 if (extra_space) {
5329 code += extra_space;
5330 extra_space = 0;
5332 } else {
5333 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5334 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5336 item->jmp_code = (guint8*)code;
5337 ARM_B_COND (code, ARMCOND_GE, 0);
5338 ++extra_space;
5342 for (i = 0; i < count; ++i) {
5343 MonoIMTCheckItem *item = imt_entries [i];
5344 if (item->jmp_code) {
5345 if (item->check_target_idx)
5346 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
5348 if (i > 0 && item->is_equals) {
5349 int j;
5350 arminstr_t *space_start = constant_pool_starts [i];
5351 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
5352 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
5357 #if DEBUG_IMT
5359 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
5360 mono_disassemble_code (NULL, (guint8*)start, size, buff);
5361 g_free (buff);
5363 #endif
5365 g_free (constant_pool_starts);
5367 mono_arch_flush_icache ((guint8*)start, size);
5368 mono_stats.imt_thunks_size += code - start;
5370 g_assert (DISTANCE (start, code) <= size);
5371 return start;
5374 #endif
5376 gpointer
5377 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
5379 if (reg == ARMREG_SP)
5380 return (gpointer)ctx->esp;
5381 else
5382 return (gpointer)ctx->regs [reg];
5386 * mono_arch_set_breakpoint:
5388 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
5389 * The location should contain code emitted by OP_SEQ_POINT.
5391 void
5392 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
5394 guint8 *code = ip;
5395 guint32 native_offset = ip - (guint8*)ji->code_start;
5397 if (ji->from_aot) {
5398 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5400 g_assert (native_offset % 4 == 0);
5401 g_assert (info->bp_addrs [native_offset / 4] == 0);
5402 info->bp_addrs [native_offset / 4] = bp_trigger_page;
5403 } else {
5404 int dreg = ARMREG_LR;
5406 /* Read from another trigger page */
5407 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
5408 ARM_B (code, 0);
5409 *(int*)code = (int)bp_trigger_page;
5410 code += 4;
5411 ARM_LDR_IMM (code, dreg, dreg, 0);
5413 mono_arch_flush_icache (code - 16, 16);
5415 #if 0
5416 /* This is currently implemented by emitting an SWI instruction, which
5417 * qemu/linux seems to convert to a SIGILL.
5419 *(int*)code = (0xef << 24) | 8;
5420 code += 4;
5421 mono_arch_flush_icache (code - 4, 4);
5422 #endif
5427 * mono_arch_clear_breakpoint:
5429 * Clear the breakpoint at IP.
5431 void
5432 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
5434 guint8 *code = ip;
5435 int i;
5437 if (ji->from_aot) {
5438 guint32 native_offset = ip - (guint8*)ji->code_start;
5439 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5441 g_assert (native_offset % 4 == 0);
5442 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
5443 info->bp_addrs [native_offset / 4] = 0;
5444 } else {
5445 for (i = 0; i < 4; ++i)
5446 ARM_NOP (code);
5448 mono_arch_flush_icache (ip, code - ip);
5453 * mono_arch_start_single_stepping:
5455 * Start single stepping.
5457 void
5458 mono_arch_start_single_stepping (void)
5460 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
5464 * mono_arch_stop_single_stepping:
5466 * Stop single stepping.
5468 void
5469 mono_arch_stop_single_stepping (void)
5471 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
5474 #if __APPLE__
5475 #define DBG_SIGNAL SIGBUS
5476 #else
5477 #define DBG_SIGNAL SIGSEGV
5478 #endif
5481 * mono_arch_is_single_step_event:
5483 * Return whenever the machine state in SIGCTX corresponds to a single
5484 * step event.
5486 gboolean
5487 mono_arch_is_single_step_event (void *info, void *sigctx)
5489 siginfo_t *sinfo = info;
5491 /* Sometimes the address is off by 4 */
5492 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
5493 return TRUE;
5494 else
5495 return FALSE;
5499 * mono_arch_is_breakpoint_event:
5501 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
5503 gboolean
5504 mono_arch_is_breakpoint_event (void *info, void *sigctx)
5506 siginfo_t *sinfo = info;
5508 if (sinfo->si_signo == DBG_SIGNAL) {
5509 /* Sometimes the address is off by 4 */
5510 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
5511 return TRUE;
5512 else
5513 return FALSE;
5514 } else {
5515 return FALSE;
5519 guint8*
5520 mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
5522 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5524 if (ji->from_aot)
5525 ip -= 6 * 4;
5526 else
5527 ip -= 12;
5529 return ip;
5532 guint8*
5533 mono_arch_get_ip_for_single_step (MonoJitInfo *ji, MonoContext *ctx)
5535 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5537 ip += 4;
5539 return ip;
5543 * mono_arch_skip_breakpoint:
5545 * See mini-amd64.c for docs.
5547 void
5548 mono_arch_skip_breakpoint (MonoContext *ctx)
5550 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5554 * mono_arch_skip_single_step:
5556 * See mini-amd64.c for docs.
5558 void
5559 mono_arch_skip_single_step (MonoContext *ctx)
5561 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5565 * mono_arch_get_seq_point_info:
5567 * See mini-amd64.c for docs.
5569 gpointer
5570 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
5572 SeqPointInfo *info;
5573 MonoJitInfo *ji;
5575 // FIXME: Add a free function
5577 mono_domain_lock (domain);
5578 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
5579 code);
5580 mono_domain_unlock (domain);
5582 if (!info) {
5583 ji = mono_jit_info_table_find (domain, (char*)code);
5584 g_assert (ji);
5586 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
5588 info->ss_trigger_page = ss_trigger_page;
5589 info->bp_trigger_page = bp_trigger_page;
5591 mono_domain_lock (domain);
5592 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
5593 code, info);
5594 mono_domain_unlock (domain);
5597 return info;