2010-06-21 Rodrigo Kumpera <rkumpera@novell.com>
[mono.git] / mono / mini / mini-arm.c
blobd1bfafcd984a5e64d6bb0570837e0f04d6e3f68f
1 /*
2 * mini-arm.c: ARM backend for the Mono code generator
4 * Authors:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 */
10 #include "mini.h"
11 #include <string.h>
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
15 #include <mono/utils/mono-mmap.h>
17 #include "mini-arm.h"
18 #include "cpu-arm.h"
19 #include "trace.h"
20 #include "ir-emit.h"
21 #ifdef ARM_FPU_FPA
22 #include "mono/arch/arm/arm-fpa-codegen.h"
23 #elif defined(ARM_FPU_VFP)
24 #include "mono/arch/arm/arm-vfp-codegen.h"
25 #endif
27 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID)
28 #define HAVE_AEABI_READ_TP 1
29 #endif
31 static gint lmf_tls_offset = -1;
32 static gint lmf_addr_tls_offset = -1;
34 /* This mutex protects architecture specific caches */
35 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
36 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
37 static CRITICAL_SECTION mini_arch_mutex;
39 static int v5_supported = 0;
40 static int v7_supported = 0;
41 static int thumb_supported = 0;
44 * The code generated for sequence points reads from this location, which is
45 * made read-only when single stepping is enabled.
47 static gpointer ss_trigger_page;
49 /* Enabled breakpoints read from this trigger page */
50 static gpointer bp_trigger_page;
52 /* Structure used by the sequence points in AOTed code */
53 typedef struct {
54 gpointer ss_trigger_page;
55 gpointer bp_trigger_page;
56 guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
57 } SeqPointInfo;
60 * TODO:
61 * floating point support: on ARM it is a mess, there are at least 3
62 * different setups, each of which binary incompat with the other.
63 * 1) FPA: old and ugly, but unfortunately what current distros use
64 * the double binary format has the two words swapped. 8 double registers.
65 * Implemented usually by kernel emulation.
66 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
67 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
68 * 3) VFP: the new and actually sensible and useful FP support. Implemented
69 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
71 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
73 int mono_exc_esp_offset = 0;
75 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
76 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
77 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
79 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
80 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
81 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
83 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
84 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
85 #define DEBUG_IMT 0
87 /* A variant of ARM_LDR_IMM which can handle large offsets */
88 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
89 if (arm_is_imm12 ((offset))) { \
90 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
91 } else { \
92 g_assert ((scratch_reg) != (basereg)); \
93 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
94 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
95 } \
96 } while (0)
98 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
99 if (arm_is_imm12 ((offset))) { \
100 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
101 } else { \
102 g_assert ((scratch_reg) != (basereg)); \
103 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
104 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
106 } while (0)
108 const char*
109 mono_arch_regname (int reg)
111 static const char * rnames[] = {
112 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
113 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
114 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
115 "arm_pc"
117 if (reg >= 0 && reg < 16)
118 return rnames [reg];
119 return "unknown";
122 const char*
123 mono_arch_fregname (int reg)
125 static const char * rnames[] = {
126 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
127 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
128 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
129 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
130 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
131 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
132 "arm_f30", "arm_f31"
134 if (reg >= 0 && reg < 32)
135 return rnames [reg];
136 return "unknown";
139 #ifndef DISABLE_JIT
141 static guint8*
142 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
144 int imm8, rot_amount;
145 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
146 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
147 return code;
149 g_assert (dreg != sreg);
150 code = mono_arm_emit_load_imm (code, dreg, imm);
151 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
152 return code;
155 static guint8*
156 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
158 /* we can use r0-r3, since this is called only for incoming args on the stack */
159 if (size > sizeof (gpointer) * 4) {
160 guint8 *start_loop;
161 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
162 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
163 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
164 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
165 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
166 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
167 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
168 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
169 ARM_B_COND (code, ARMCOND_NE, 0);
170 arm_patch (code - 4, start_loop);
171 return code;
173 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
174 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
175 while (size >= 4) {
176 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
177 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
178 doffset += 4;
179 soffset += 4;
180 size -= 4;
182 } else if (size) {
183 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
184 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
185 doffset = soffset = 0;
186 while (size >= 4) {
187 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
188 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
189 doffset += 4;
190 soffset += 4;
191 size -= 4;
194 g_assert (size == 0);
195 return code;
198 static guint8*
199 emit_call_reg (guint8 *code, int reg)
201 if (v5_supported) {
202 ARM_BLX_REG (code, reg);
203 } else {
204 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
205 if (thumb_supported)
206 ARM_BX (code, reg);
207 else
208 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
210 return code;
213 static guint8*
214 emit_call_seq (MonoCompile *cfg, guint8 *code)
216 if (cfg->method->dynamic) {
217 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
218 ARM_B (code, 0);
219 *(gpointer*)code = NULL;
220 code += 4;
221 code = emit_call_reg (code, ARMREG_IP);
222 } else {
223 ARM_BL (code, 0);
225 return code;
228 static guint8*
229 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
231 switch (ins->opcode) {
232 case OP_FCALL:
233 case OP_FCALL_REG:
234 case OP_FCALL_MEMBASE:
235 #ifdef ARM_FPU_FPA
236 if (ins->dreg != ARM_FPA_F0)
237 ARM_MVFD (code, ins->dreg, ARM_FPA_F0);
238 #elif defined(ARM_FPU_VFP)
239 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
240 ARM_FMSR (code, ins->dreg, ARMREG_R0);
241 ARM_CVTS (code, ins->dreg, ins->dreg);
242 } else {
243 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
245 #endif
246 break;
249 return code;
252 #endif /* #ifndef DISABLE_JIT */
255 * mono_arch_get_argument_info:
256 * @csig: a method signature
257 * @param_count: the number of parameters to consider
258 * @arg_info: an array to store the result infos
260 * Gathers information on parameters such as size, alignment and
261 * padding. arg_info should be large enought to hold param_count + 1 entries.
263 * Returns the size of the activation frame.
266 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
268 int k, frame_size = 0;
269 guint32 size, align, pad;
270 int offset = 8;
272 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
273 frame_size += sizeof (gpointer);
274 offset += 4;
277 arg_info [0].offset = offset;
279 if (csig->hasthis) {
280 frame_size += sizeof (gpointer);
281 offset += 4;
284 arg_info [0].size = frame_size;
286 for (k = 0; k < param_count; k++) {
287 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
289 /* ignore alignment for now */
290 align = 1;
292 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
293 arg_info [k].pad = pad;
294 frame_size += size;
295 arg_info [k + 1].pad = 0;
296 arg_info [k + 1].size = size;
297 offset += pad;
298 arg_info [k + 1].offset = offset;
299 offset += size;
302 align = MONO_ARCH_FRAME_ALIGNMENT;
303 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
304 arg_info [k].pad = pad;
306 return frame_size;
309 static gpointer
310 decode_vcall_slot_from_ldr (guint32 ldr, mgreg_t *regs, int *displacement)
312 char *o = NULL;
313 int reg, offset = 0;
314 reg = (ldr >> 16 ) & 0xf;
315 offset = ldr & 0xfff;
316 if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
317 offset = -offset;
318 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
319 o = (gpointer)regs [reg];
321 *displacement = offset;
322 return o;
325 gpointer
326 mono_arch_get_vcall_slot (guint8 *code_ptr, mgreg_t *regs, int *displacement)
328 guint32* code = (guint32*)code_ptr;
330 /* Locate the address of the method-specific trampoline. The call using
331 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
332 looks something like this:
334 ldr rA, rX, #offset
335 mov lr, pc
336 mov pc, rA
337 or better:
338 mov lr, pc
339 ldr pc, rX, #offset
341 The call sequence could be also:
342 ldr ip, pc, 0
343 b skip
344 function pointer literal
345 skip:
346 mov lr, pc
347 mov pc, ip
348 Note that on ARM5+ we can use one instruction instead of the last two.
349 Therefore, we need to locate the 'ldr rA' instruction to know which
350 register was used to hold the method addrs.
353 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
354 --code;
356 /* Three possible code sequences can happen here:
357 * interface call:
359 * add lr, [pc + #4]
360 * ldr pc, [rX - #offset]
361 * .word IMT value
363 * virtual call:
365 * mov lr, pc
366 * ldr pc, [rX - #offset]
368 * direct branch with bl:
370 * bl #offset
372 * direct branch with mov:
374 * mv pc, rX
376 * We only need to identify interface and virtual calls, the others can be ignored.
379 if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
380 return decode_vcall_slot_from_ldr (code [-1], regs, displacement);
382 if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
383 return decode_vcall_slot_from_ldr (code [0], regs, displacement);
385 return NULL;
388 #define MAX_ARCH_DELEGATE_PARAMS 3
390 static gpointer
391 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
393 guint8 *code, *start;
395 if (has_target) {
396 start = code = mono_global_codeman_reserve (12);
398 /* Replace the this argument with the target */
399 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
400 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
401 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
403 g_assert ((code - start) <= 12);
405 mono_arch_flush_icache (start, 12);
406 } else {
407 int size, i;
409 size = 8 + param_count * 4;
410 start = code = mono_global_codeman_reserve (size);
412 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
413 /* slide down the arguments */
414 for (i = 0; i < param_count; ++i) {
415 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
417 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
419 g_assert ((code - start) <= size);
421 mono_arch_flush_icache (start, size);
424 if (code_size)
425 *code_size = code - start;
427 return start;
431 * mono_arch_get_delegate_invoke_impls:
433 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
434 * trampolines.
436 GSList*
437 mono_arch_get_delegate_invoke_impls (void)
439 GSList *res = NULL;
440 guint8 *code;
441 guint32 code_len;
442 int i;
444 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
445 res = g_slist_prepend (res, mono_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len, NULL, NULL));
447 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
448 code = get_delegate_invoke_impl (FALSE, i, &code_len);
449 res = g_slist_prepend (res, mono_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len, NULL, NULL));
452 return res;
455 gpointer
456 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
458 guint8 *code, *start;
460 /* FIXME: Support more cases */
461 if (MONO_TYPE_ISSTRUCT (sig->ret))
462 return NULL;
464 if (has_target) {
465 static guint8* cached = NULL;
466 mono_mini_arch_lock ();
467 if (cached) {
468 mono_mini_arch_unlock ();
469 return cached;
472 if (mono_aot_only)
473 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
474 else
475 start = get_delegate_invoke_impl (TRUE, 0, NULL);
476 cached = start;
477 mono_mini_arch_unlock ();
478 return cached;
479 } else {
480 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
481 int i;
483 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
484 return NULL;
485 for (i = 0; i < sig->param_count; ++i)
486 if (!mono_is_regsize_var (sig->params [i]))
487 return NULL;
489 mono_mini_arch_lock ();
490 code = cache [sig->param_count];
491 if (code) {
492 mono_mini_arch_unlock ();
493 return code;
496 if (mono_aot_only) {
497 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
498 start = mono_aot_get_trampoline (name);
499 g_free (name);
500 } else {
501 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
503 cache [sig->param_count] = start;
504 mono_mini_arch_unlock ();
505 return start;
508 return NULL;
511 gpointer
512 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, mgreg_t *regs, guint8 *code)
514 /* FIXME: handle returning a struct */
515 if (MONO_TYPE_ISSTRUCT (sig->ret))
516 return (gpointer)regs [ARMREG_R1];
517 return (gpointer)regs [ARMREG_R0];
521 * Initialize the cpu to execute managed code.
523 void
524 mono_arch_cpu_init (void)
529 * Initialize architecture specific code.
531 void
532 mono_arch_init (void)
534 InitializeCriticalSection (&mini_arch_mutex);
536 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
537 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
538 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
540 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
541 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
545 * Cleanup architecture specific code.
547 void
548 mono_arch_cleanup (void)
553 * This function returns the optimizations supported on this cpu.
555 guint32
556 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
558 guint32 opts = 0;
559 const char *cpu_arch = getenv ("MONO_CPU_ARCH");
560 if (cpu_arch != NULL) {
561 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
562 if (strncmp (cpu_arch, "armv", 4) == 0) {
563 v5_supported = cpu_arch [4] >= '5';
564 v7_supported = cpu_arch [4] >= '7';
566 } else {
567 #if __APPLE__
568 thumb_supported = TRUE;
569 v5_supported = TRUE;
570 #else
571 char buf [512];
572 char *line;
573 FILE *file = fopen ("/proc/cpuinfo", "r");
574 if (file) {
575 while ((line = fgets (buf, 512, file))) {
576 if (strncmp (line, "Processor", 9) == 0) {
577 char *ver = strstr (line, "(v");
578 if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7'))
579 v5_supported = TRUE;
580 if (ver && (ver [2] == '7'))
581 v7_supported = TRUE;
582 continue;
584 if (strncmp (line, "Features", 8) == 0) {
585 char *th = strstr (line, "thumb");
586 if (th) {
587 thumb_supported = TRUE;
588 if (v5_supported)
589 break;
591 continue;
594 fclose (file);
595 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
597 #endif
600 /* no arm-specific optimizations yet */
601 *exclude_mask = 0;
602 return opts;
605 #ifndef DISABLE_JIT
607 static gboolean
608 is_regsize_var (MonoType *t) {
609 if (t->byref)
610 return TRUE;
611 t = mini_type_get_underlying_type (NULL, t);
612 switch (t->type) {
613 case MONO_TYPE_I4:
614 case MONO_TYPE_U4:
615 case MONO_TYPE_I:
616 case MONO_TYPE_U:
617 case MONO_TYPE_PTR:
618 case MONO_TYPE_FNPTR:
619 return TRUE;
620 case MONO_TYPE_OBJECT:
621 case MONO_TYPE_STRING:
622 case MONO_TYPE_CLASS:
623 case MONO_TYPE_SZARRAY:
624 case MONO_TYPE_ARRAY:
625 return TRUE;
626 case MONO_TYPE_GENERICINST:
627 if (!mono_type_generic_inst_is_valuetype (t))
628 return TRUE;
629 return FALSE;
630 case MONO_TYPE_VALUETYPE:
631 return FALSE;
633 return FALSE;
636 GList *
637 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
639 GList *vars = NULL;
640 int i;
642 for (i = 0; i < cfg->num_varinfo; i++) {
643 MonoInst *ins = cfg->varinfo [i];
644 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
646 /* unused vars */
647 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
648 continue;
650 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
651 continue;
653 /* we can only allocate 32 bit values */
654 if (is_regsize_var (ins->inst_vtype)) {
655 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
656 g_assert (i == vmv->idx);
657 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
661 return vars;
664 #define USE_EXTRA_TEMPS 0
666 GList *
667 mono_arch_get_global_int_regs (MonoCompile *cfg)
669 GList *regs = NULL;
672 * FIXME: Interface calls might go through a static rgctx trampoline which
673 * sets V5, but it doesn't save it, so we need to save it ourselves, and
674 * avoid using it.
676 if (cfg->flags & MONO_CFG_HAS_CALLS)
677 cfg->uses_rgctx_reg = TRUE;
679 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
680 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
681 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
682 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
683 if (!(cfg->compile_aot || cfg->uses_rgctx_reg))
684 /* V5 is reserved for passing the vtable/rgctx/IMT method */
685 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
686 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
687 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
689 return regs;
693 * mono_arch_regalloc_cost:
695 * Return the cost, in number of memory references, of the action of
696 * allocating the variable VMV into a register during global register
697 * allocation.
699 guint32
700 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
702 /* FIXME: */
703 return 2;
706 #endif /* #ifndef DISABLE_JIT */
708 #ifndef __GNUC_PREREQ
709 #define __GNUC_PREREQ(maj, min) (0)
710 #endif
712 void
713 mono_arch_flush_icache (guint8 *code, gint size)
715 #if __APPLE__
716 sys_icache_invalidate (code, size);
717 #elif __GNUC_PREREQ(4, 1)
718 __clear_cache (code, code + size);
719 #elif defined(PLATFORM_ANDROID)
720 const int syscall = 0xf0002;
721 __asm __volatile (
722 "mov r0, %0\n"
723 "mov r1, %1\n"
724 "mov r7, %2\n"
725 "mov r2, #0x0\n"
726 "svc 0x00000000\n"
728 : "r" (code), "r" (code + size), "r" (syscall)
729 : "r0", "r1", "r7", "r2"
731 #else
732 __asm __volatile ("mov r0, %0\n"
733 "mov r1, %1\n"
734 "mov r2, %2\n"
735 "swi 0x9f0002 @ sys_cacheflush"
736 : /* no outputs */
737 : "r" (code), "r" (code + size), "r" (0)
738 : "r0", "r1", "r3" );
739 #endif
742 typedef enum {
743 RegTypeNone,
744 RegTypeGeneral,
745 RegTypeIRegPair,
746 RegTypeBase,
747 RegTypeBaseGen,
748 RegTypeFP,
749 RegTypeStructByVal,
750 RegTypeStructByAddr
751 } ArgStorage;
753 typedef struct {
754 gint32 offset;
755 guint16 vtsize; /* in param area */
756 guint8 reg;
757 ArgStorage storage;
758 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
759 } ArgInfo;
761 typedef struct {
762 int nargs;
763 guint32 stack_usage;
764 guint32 struct_ret;
765 gboolean vtype_retaddr;
766 ArgInfo ret;
767 ArgInfo sig_cookie;
768 ArgInfo args [1];
769 } CallInfo;
771 #define DEBUG(a)
773 #ifndef __GNUC__
774 /*#define __alignof__(a) sizeof(a)*/
775 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
776 #endif
778 #define PARAM_REGS 4
780 static void inline
781 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
783 if (simple) {
784 if (*gr > ARMREG_R3) {
785 ainfo->offset = *stack_size;
786 ainfo->reg = ARMREG_SP; /* in the caller */
787 ainfo->storage = RegTypeBase;
788 *stack_size += 4;
789 } else {
790 ainfo->storage = RegTypeGeneral;
791 ainfo->reg = *gr;
793 } else {
794 #if defined(__APPLE__) && defined(MONO_CROSS_COMPILE)
795 int i8_align = 4;
796 #else
797 int i8_align = __alignof__ (gint64);
798 #endif
800 #if __ARM_EABI__
801 gboolean split = i8_align == 4;
802 #else
803 gboolean split = TRUE;
804 #endif
806 if (*gr == ARMREG_R3 && split) {
807 /* first word in r3 and the second on the stack */
808 ainfo->offset = *stack_size;
809 ainfo->reg = ARMREG_SP; /* in the caller */
810 ainfo->storage = RegTypeBaseGen;
811 *stack_size += 4;
812 } else if (*gr >= ARMREG_R3) {
813 #ifdef __ARM_EABI__
814 /* darwin aligns longs to 4 byte only */
815 if (i8_align == 8) {
816 *stack_size += 7;
817 *stack_size &= ~7;
819 #endif
820 ainfo->offset = *stack_size;
821 ainfo->reg = ARMREG_SP; /* in the caller */
822 ainfo->storage = RegTypeBase;
823 *stack_size += 8;
824 } else {
825 #ifdef __ARM_EABI__
826 if (i8_align == 8 && ((*gr) & 1))
827 (*gr) ++;
828 #endif
829 ainfo->storage = RegTypeIRegPair;
830 ainfo->reg = *gr;
832 (*gr) ++;
834 (*gr) ++;
837 static CallInfo*
838 get_call_info (MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
840 guint i, gr;
841 int n = sig->hasthis + sig->param_count;
842 MonoType *simpletype;
843 guint32 stack_size = 0;
844 CallInfo *cinfo;
846 if (mp)
847 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
848 else
849 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
851 cinfo->nargs = n;
852 gr = ARMREG_R0;
854 /* FIXME: handle returning a struct */
855 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
856 guint32 align;
858 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (sig->ret), &align) <= sizeof (gpointer)) {
859 cinfo->ret.storage = RegTypeStructByVal;
860 } else {
861 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
862 cinfo->struct_ret = ARMREG_R0;
863 cinfo->vtype_retaddr = TRUE;
867 n = 0;
868 if (sig->hasthis) {
869 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
870 n++;
872 DEBUG(printf("params: %d\n", sig->param_count));
873 for (i = 0; i < sig->param_count; ++i) {
874 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
875 /* Prevent implicit arguments and sig_cookie from
876 being passed in registers */
877 gr = ARMREG_R3 + 1;
878 /* Emit the signature cookie just before the implicit arguments */
879 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
881 DEBUG(printf("param %d: ", i));
882 if (sig->params [i]->byref) {
883 DEBUG(printf("byref\n"));
884 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
885 n++;
886 continue;
888 simpletype = mini_type_get_underlying_type (NULL, sig->params [i]);
889 switch (simpletype->type) {
890 case MONO_TYPE_BOOLEAN:
891 case MONO_TYPE_I1:
892 case MONO_TYPE_U1:
893 cinfo->args [n].size = 1;
894 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
895 n++;
896 break;
897 case MONO_TYPE_CHAR:
898 case MONO_TYPE_I2:
899 case MONO_TYPE_U2:
900 cinfo->args [n].size = 2;
901 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
902 n++;
903 break;
904 case MONO_TYPE_I4:
905 case MONO_TYPE_U4:
906 cinfo->args [n].size = 4;
907 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
908 n++;
909 break;
910 case MONO_TYPE_I:
911 case MONO_TYPE_U:
912 case MONO_TYPE_PTR:
913 case MONO_TYPE_FNPTR:
914 case MONO_TYPE_CLASS:
915 case MONO_TYPE_OBJECT:
916 case MONO_TYPE_STRING:
917 case MONO_TYPE_SZARRAY:
918 case MONO_TYPE_ARRAY:
919 case MONO_TYPE_R4:
920 cinfo->args [n].size = sizeof (gpointer);
921 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
922 n++;
923 break;
924 case MONO_TYPE_GENERICINST:
925 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
926 cinfo->args [n].size = sizeof (gpointer);
927 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
928 n++;
929 break;
931 /* Fall through */
932 case MONO_TYPE_TYPEDBYREF:
933 case MONO_TYPE_VALUETYPE: {
934 gint size;
935 int align_size;
936 int nwords;
937 guint32 align;
939 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
940 size = sizeof (MonoTypedRef);
941 align = sizeof (gpointer);
942 } else {
943 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
944 if (is_pinvoke)
945 size = mono_class_native_size (klass, &align);
946 else
947 size = mono_class_value_size (klass, &align);
949 DEBUG(printf ("load %d bytes struct\n",
950 mono_class_native_size (sig->params [i]->data.klass, NULL)));
951 align_size = size;
952 nwords = 0;
953 align_size += (sizeof (gpointer) - 1);
954 align_size &= ~(sizeof (gpointer) - 1);
955 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
956 cinfo->args [n].storage = RegTypeStructByVal;
957 /* FIXME: align stack_size if needed */
958 #ifdef __ARM_EABI__
959 if (align >= 8 && (gr & 1))
960 gr ++;
961 #endif
962 if (gr > ARMREG_R3) {
963 cinfo->args [n].size = 0;
964 cinfo->args [n].vtsize = nwords;
965 } else {
966 int rest = ARMREG_R3 - gr + 1;
967 int n_in_regs = rest >= nwords? nwords: rest;
969 cinfo->args [n].size = n_in_regs;
970 cinfo->args [n].vtsize = nwords - n_in_regs;
971 cinfo->args [n].reg = gr;
972 gr += n_in_regs;
973 nwords -= n_in_regs;
975 cinfo->args [n].offset = stack_size;
976 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
977 stack_size += nwords * sizeof (gpointer);
978 n++;
979 break;
981 case MONO_TYPE_U8:
982 case MONO_TYPE_I8:
983 case MONO_TYPE_R8:
984 cinfo->args [n].size = 8;
985 add_general (&gr, &stack_size, cinfo->args + n, FALSE);
986 n++;
987 break;
988 default:
989 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
993 /* Handle the case where there are no implicit arguments */
994 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
995 /* Prevent implicit arguments and sig_cookie from
996 being passed in registers */
997 gr = ARMREG_R3 + 1;
998 /* Emit the signature cookie just before the implicit arguments */
999 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1003 simpletype = mini_type_get_underlying_type (NULL, sig->ret);
1004 switch (simpletype->type) {
1005 case MONO_TYPE_BOOLEAN:
1006 case MONO_TYPE_I1:
1007 case MONO_TYPE_U1:
1008 case MONO_TYPE_I2:
1009 case MONO_TYPE_U2:
1010 case MONO_TYPE_CHAR:
1011 case MONO_TYPE_I4:
1012 case MONO_TYPE_U4:
1013 case MONO_TYPE_I:
1014 case MONO_TYPE_U:
1015 case MONO_TYPE_PTR:
1016 case MONO_TYPE_FNPTR:
1017 case MONO_TYPE_CLASS:
1018 case MONO_TYPE_OBJECT:
1019 case MONO_TYPE_SZARRAY:
1020 case MONO_TYPE_ARRAY:
1021 case MONO_TYPE_STRING:
1022 cinfo->ret.storage = RegTypeGeneral;
1023 cinfo->ret.reg = ARMREG_R0;
1024 break;
1025 case MONO_TYPE_U8:
1026 case MONO_TYPE_I8:
1027 cinfo->ret.storage = RegTypeIRegPair;
1028 cinfo->ret.reg = ARMREG_R0;
1029 break;
1030 case MONO_TYPE_R4:
1031 case MONO_TYPE_R8:
1032 cinfo->ret.storage = RegTypeFP;
1033 cinfo->ret.reg = ARMREG_R0;
1034 /* FIXME: cinfo->ret.reg = ???;
1035 cinfo->ret.storage = RegTypeFP;*/
1036 break;
1037 case MONO_TYPE_GENERICINST:
1038 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1039 cinfo->ret.storage = RegTypeGeneral;
1040 cinfo->ret.reg = ARMREG_R0;
1041 break;
1043 /* Fall through */
1044 case MONO_TYPE_VALUETYPE:
1045 case MONO_TYPE_TYPEDBYREF:
1046 if (cinfo->ret.storage != RegTypeStructByVal)
1047 cinfo->ret.storage = RegTypeStructByAddr;
1048 break;
1049 case MONO_TYPE_VOID:
1050 break;
1051 default:
1052 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1056 /* align stack size to 8 */
1057 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1058 stack_size = (stack_size + 7) & ~7;
1060 cinfo->stack_usage = stack_size;
1061 return cinfo;
1064 #ifndef DISABLE_JIT
1067 * Set var information according to the calling convention. arm version.
1068 * The locals var stuff should most likely be split in another method.
1070 void
1071 mono_arch_allocate_vars (MonoCompile *cfg)
1073 MonoMethodSignature *sig;
1074 MonoMethodHeader *header;
1075 MonoInst *ins;
1076 int i, offset, size, align, curinst;
1077 int frame_reg = ARMREG_FP;
1078 CallInfo *cinfo;
1079 guint32 ualign;
1081 sig = mono_method_signature (cfg->method);
1083 if (!cfg->arch.cinfo)
1084 cfg->arch.cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1085 cinfo = cfg->arch.cinfo;
1087 /* FIXME: this will change when we use FP as gcc does */
1088 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1090 /* allow room for the vararg method args: void* and long/double */
1091 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1092 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1094 header = cfg->header;
1097 * We use the frame register also for any method that has
1098 * exception clauses. This way, when the handlers are called,
1099 * the code will reference local variables using the frame reg instead of
1100 * the stack pointer: if we had to restore the stack pointer, we'd
1101 * corrupt the method frames that are already on the stack (since
1102 * filters get called before stack unwinding happens) when the filter
1103 * code would call any method (this also applies to finally etc.).
1105 if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
1106 frame_reg = ARMREG_FP;
1107 cfg->frame_reg = frame_reg;
1108 if (frame_reg != ARMREG_SP) {
1109 cfg->used_int_regs |= 1 << frame_reg;
1112 if (cfg->compile_aot || cfg->uses_rgctx_reg)
1113 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1114 cfg->used_int_regs |= (1 << ARMREG_V5);
1116 offset = 0;
1117 curinst = 0;
1118 if (!MONO_TYPE_ISSTRUCT (sig->ret)) {
1119 switch (mini_type_get_underlying_type (NULL, sig->ret)->type) {
1120 case MONO_TYPE_VOID:
1121 break;
1122 default:
1123 cfg->ret->opcode = OP_REGVAR;
1124 cfg->ret->inst_c0 = ARMREG_R0;
1125 break;
1128 /* local vars are at a positive offset from the stack pointer */
1130 * also note that if the function uses alloca, we use FP
1131 * to point at the local variables.
1133 offset = 0; /* linkage area */
1134 /* align the offset to 16 bytes: not sure this is needed here */
1135 //offset += 8 - 1;
1136 //offset &= ~(8 - 1);
1138 /* add parameter area size for called functions */
1139 offset += cfg->param_area;
1140 offset += 8 - 1;
1141 offset &= ~(8 - 1);
1142 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1143 offset += 8;
1145 /* allow room to save the return value */
1146 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1147 offset += 8;
1149 /* the MonoLMF structure is stored just below the stack pointer */
1150 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1151 if (cinfo->ret.storage == RegTypeStructByVal) {
1152 cfg->ret->opcode = OP_REGOFFSET;
1153 cfg->ret->inst_basereg = cfg->frame_reg;
1154 offset += sizeof (gpointer) - 1;
1155 offset &= ~(sizeof (gpointer) - 1);
1156 cfg->ret->inst_offset = - offset;
1157 } else {
1158 ins = cfg->vret_addr;
1159 offset += sizeof(gpointer) - 1;
1160 offset &= ~(sizeof(gpointer) - 1);
1161 ins->inst_offset = offset;
1162 ins->opcode = OP_REGOFFSET;
1163 ins->inst_basereg = frame_reg;
1164 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1165 printf ("vret_addr =");
1166 mono_print_ins (cfg->vret_addr);
1169 offset += sizeof(gpointer);
1172 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1173 if (cfg->arch.seq_point_info_var) {
1174 MonoInst *ins;
1176 ins = cfg->arch.seq_point_info_var;
1178 size = 4;
1179 align = 4;
1180 offset += align - 1;
1181 offset &= ~(align - 1);
1182 ins->opcode = OP_REGOFFSET;
1183 ins->inst_basereg = frame_reg;
1184 ins->inst_offset = offset;
1185 offset += size;
1187 ins = cfg->arch.ss_trigger_page_var;
1188 size = 4;
1189 align = 4;
1190 offset += align - 1;
1191 offset &= ~(align - 1);
1192 ins->opcode = OP_REGOFFSET;
1193 ins->inst_basereg = frame_reg;
1194 ins->inst_offset = offset;
1195 offset += size;
1198 curinst = cfg->locals_start;
1199 for (i = curinst; i < cfg->num_varinfo; ++i) {
1200 ins = cfg->varinfo [i];
1201 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
1202 continue;
1204 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1205 * pinvoke wrappers when they call functions returning structure */
1206 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (ins->inst_vtype) && ins->inst_vtype->type != MONO_TYPE_TYPEDBYREF) {
1207 size = mono_class_native_size (mono_class_from_mono_type (ins->inst_vtype), &ualign);
1208 align = ualign;
1210 else
1211 size = mono_type_size (ins->inst_vtype, &align);
1213 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1214 * since it loads/stores misaligned words, which don't do the right thing.
1216 if (align < 4 && size >= 4)
1217 align = 4;
1218 offset += align - 1;
1219 offset &= ~(align - 1);
1220 ins->opcode = OP_REGOFFSET;
1221 ins->inst_offset = offset;
1222 ins->inst_basereg = frame_reg;
1223 offset += size;
1224 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1227 curinst = 0;
1228 if (sig->hasthis) {
1229 ins = cfg->args [curinst];
1230 if (ins->opcode != OP_REGVAR) {
1231 ins->opcode = OP_REGOFFSET;
1232 ins->inst_basereg = frame_reg;
1233 offset += sizeof (gpointer) - 1;
1234 offset &= ~(sizeof (gpointer) - 1);
1235 ins->inst_offset = offset;
1236 offset += sizeof (gpointer);
1238 curinst++;
1241 if (sig->call_convention == MONO_CALL_VARARG) {
1242 size = 4;
1243 align = 4;
1245 /* Allocate a local slot to hold the sig cookie address */
1246 offset += align - 1;
1247 offset &= ~(align - 1);
1248 cfg->sig_cookie = offset;
1249 offset += size;
1252 for (i = 0; i < sig->param_count; ++i) {
1253 ins = cfg->args [curinst];
1255 if (ins->opcode != OP_REGVAR) {
1256 ins->opcode = OP_REGOFFSET;
1257 ins->inst_basereg = frame_reg;
1258 size = mini_type_stack_size_full (NULL, sig->params [i], &ualign, sig->pinvoke);
1259 align = ualign;
1260 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1261 * since it loads/stores misaligned words, which don't do the right thing.
1263 if (align < 4 && size >= 4)
1264 align = 4;
1265 /* The code in the prolog () stores words when storing vtypes received in a register */
1266 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
1267 align = 4;
1268 offset += align - 1;
1269 offset &= ~(align - 1);
1270 ins->inst_offset = offset;
1271 offset += size;
1273 curinst++;
1276 /* align the offset to 8 bytes */
1277 offset += 8 - 1;
1278 offset &= ~(8 - 1);
1280 /* change sign? */
1281 cfg->stack_offset = offset;
1284 void
1285 mono_arch_create_vars (MonoCompile *cfg)
1287 MonoMethodSignature *sig;
1288 CallInfo *cinfo;
1290 sig = mono_method_signature (cfg->method);
1292 if (!cfg->arch.cinfo)
1293 cfg->arch.cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1294 cinfo = cfg->arch.cinfo;
1296 if (cinfo->ret.storage == RegTypeStructByVal)
1297 cfg->ret_var_is_local = TRUE;
1299 if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != RegTypeStructByVal) {
1300 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1301 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1302 printf ("vret_addr = ");
1303 mono_print_ins (cfg->vret_addr);
1307 if (cfg->gen_seq_points && cfg->compile_aot) {
1308 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1309 ins->flags |= MONO_INST_VOLATILE;
1310 cfg->arch.seq_point_info_var = ins;
1312 /* Allocate a separate variable for this to save 1 load per seq point */
1313 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1314 ins->flags |= MONO_INST_VOLATILE;
1315 cfg->arch.ss_trigger_page_var = ins;
1319 static void
1320 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1322 MonoMethodSignature *tmp_sig;
1323 MonoInst *sig_arg;
1325 if (call->tail_call)
1326 NOT_IMPLEMENTED;
1328 /* FIXME: Add support for signature tokens to AOT */
1329 cfg->disable_aot = TRUE;
1331 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
1334 * mono_ArgIterator_Setup assumes the signature cookie is
1335 * passed first and all the arguments which were before it are
1336 * passed on the stack after the signature. So compensate by
1337 * passing a different signature.
1339 tmp_sig = mono_metadata_signature_dup (call->signature);
1340 tmp_sig->param_count -= call->signature->sentinelpos;
1341 tmp_sig->sentinelpos = 0;
1342 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1344 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1345 sig_arg->dreg = mono_alloc_ireg (cfg);
1346 sig_arg->inst_p0 = tmp_sig;
1347 MONO_ADD_INS (cfg->cbb, sig_arg);
1349 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_arg->dreg);
1352 #ifdef ENABLE_LLVM
1353 LLVMCallInfo*
1354 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
1356 int i, n;
1357 CallInfo *cinfo;
1358 ArgInfo *ainfo;
1359 LLVMCallInfo *linfo;
1361 n = sig->param_count + sig->hasthis;
1363 cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1365 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
1368 * LLVM always uses the native ABI while we use our own ABI, the
1369 * only difference is the handling of vtypes:
1370 * - we only pass/receive them in registers in some cases, and only
1371 * in 1 or 2 integer registers.
1373 if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
1374 cfg->exception_message = g_strdup ("unknown ret conv");
1375 cfg->disable_llvm = TRUE;
1376 return linfo;
1379 for (i = 0; i < n; ++i) {
1380 ainfo = cinfo->args + i;
1382 linfo->args [i].storage = LLVMArgNone;
1384 switch (ainfo->storage) {
1385 case RegTypeGeneral:
1386 case RegTypeIRegPair:
1387 case RegTypeBase:
1388 linfo->args [i].storage = LLVMArgInIReg;
1389 break;
1390 default:
1391 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
1392 cfg->disable_llvm = TRUE;
1393 break;
1397 return linfo;
1399 #endif
1401 void
1402 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1404 MonoInst *in, *ins;
1405 MonoMethodSignature *sig;
1406 int i, n;
1407 CallInfo *cinfo;
1409 sig = call->signature;
1410 n = sig->param_count + sig->hasthis;
1412 cinfo = get_call_info (NULL, sig, sig->pinvoke);
1414 for (i = 0; i < n; ++i) {
1415 ArgInfo *ainfo = cinfo->args + i;
1416 MonoType *t;
1418 if (i >= sig->hasthis)
1419 t = sig->params [i - sig->hasthis];
1420 else
1421 t = &mono_defaults.int_class->byval_arg;
1422 t = mini_type_get_underlying_type (NULL, t);
1424 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1425 /* Emit the signature cookie just before the implicit arguments */
1426 emit_sig_cookie (cfg, call, cinfo);
1429 in = call->args [i];
1431 switch (ainfo->storage) {
1432 case RegTypeGeneral:
1433 case RegTypeIRegPair:
1434 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1435 MONO_INST_NEW (cfg, ins, OP_MOVE);
1436 ins->dreg = mono_alloc_ireg (cfg);
1437 ins->sreg1 = in->dreg + 1;
1438 MONO_ADD_INS (cfg->cbb, ins);
1439 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1441 MONO_INST_NEW (cfg, ins, OP_MOVE);
1442 ins->dreg = mono_alloc_ireg (cfg);
1443 ins->sreg1 = in->dreg + 2;
1444 MONO_ADD_INS (cfg->cbb, ins);
1445 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1446 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
1447 #ifndef MONO_ARCH_SOFT_FLOAT
1448 int creg;
1449 #endif
1451 if (ainfo->size == 4) {
1452 #ifdef MONO_ARCH_SOFT_FLOAT
1453 /* mono_emit_call_args () have already done the r8->r4 conversion */
1454 /* The converted value is in an int vreg */
1455 MONO_INST_NEW (cfg, ins, OP_MOVE);
1456 ins->dreg = mono_alloc_ireg (cfg);
1457 ins->sreg1 = in->dreg;
1458 MONO_ADD_INS (cfg->cbb, ins);
1459 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1460 #else
1461 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1462 creg = mono_alloc_ireg (cfg);
1463 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1464 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1465 #endif
1466 } else {
1467 #ifdef MONO_ARCH_SOFT_FLOAT
1468 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
1469 ins->dreg = mono_alloc_ireg (cfg);
1470 ins->sreg1 = in->dreg;
1471 MONO_ADD_INS (cfg->cbb, ins);
1472 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1474 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
1475 ins->dreg = mono_alloc_ireg (cfg);
1476 ins->sreg1 = in->dreg;
1477 MONO_ADD_INS (cfg->cbb, ins);
1478 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1479 #else
1480 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1481 creg = mono_alloc_ireg (cfg);
1482 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1483 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1484 creg = mono_alloc_ireg (cfg);
1485 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
1486 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
1487 #endif
1489 cfg->flags |= MONO_CFG_HAS_FPOUT;
1490 } else {
1491 MONO_INST_NEW (cfg, ins, OP_MOVE);
1492 ins->dreg = mono_alloc_ireg (cfg);
1493 ins->sreg1 = in->dreg;
1494 MONO_ADD_INS (cfg->cbb, ins);
1496 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1498 break;
1499 case RegTypeStructByAddr:
1500 NOT_IMPLEMENTED;
1501 #if 0
1502 /* FIXME: where si the data allocated? */
1503 arg->backend.reg3 = ainfo->reg;
1504 call->used_iregs |= 1 << ainfo->reg;
1505 g_assert_not_reached ();
1506 #endif
1507 break;
1508 case RegTypeStructByVal:
1509 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1510 ins->opcode = OP_OUTARG_VT;
1511 ins->sreg1 = in->dreg;
1512 ins->klass = in->klass;
1513 ins->inst_p0 = call;
1514 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1515 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1516 MONO_ADD_INS (cfg->cbb, ins);
1517 break;
1518 case RegTypeBase:
1519 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1520 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1521 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
1522 if (t->type == MONO_TYPE_R8) {
1523 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1524 } else {
1525 #ifdef MONO_ARCH_SOFT_FLOAT
1526 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1527 #else
1528 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1529 #endif
1531 } else {
1532 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1534 break;
1535 case RegTypeBaseGen:
1536 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1537 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
1538 MONO_INST_NEW (cfg, ins, OP_MOVE);
1539 ins->dreg = mono_alloc_ireg (cfg);
1540 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
1541 MONO_ADD_INS (cfg->cbb, ins);
1542 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
1543 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
1544 int creg;
1546 #ifdef MONO_ARCH_SOFT_FLOAT
1547 g_assert_not_reached ();
1548 #endif
1550 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1551 creg = mono_alloc_ireg (cfg);
1552 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
1553 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1554 creg = mono_alloc_ireg (cfg);
1555 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
1556 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
1557 cfg->flags |= MONO_CFG_HAS_FPOUT;
1558 } else {
1559 g_assert_not_reached ();
1561 break;
1562 case RegTypeFP: {
1563 /* FIXME: */
1564 NOT_IMPLEMENTED;
1565 #if 0
1566 arg->backend.reg3 = ainfo->reg;
1567 /* FP args are passed in int regs */
1568 call->used_iregs |= 1 << ainfo->reg;
1569 if (ainfo->size == 8) {
1570 arg->opcode = OP_OUTARG_R8;
1571 call->used_iregs |= 1 << (ainfo->reg + 1);
1572 } else {
1573 arg->opcode = OP_OUTARG_R4;
1575 #endif
1576 cfg->flags |= MONO_CFG_HAS_FPOUT;
1577 break;
1579 default:
1580 g_assert_not_reached ();
1584 /* Handle the case where there are no implicit arguments */
1585 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
1586 emit_sig_cookie (cfg, call, cinfo);
1588 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1589 MonoInst *vtarg;
1591 if (cinfo->ret.storage == RegTypeStructByVal) {
1592 /* The JIT will transform this into a normal call */
1593 call->vret_in_reg = TRUE;
1594 } else {
1595 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1596 vtarg->sreg1 = call->vret_var->dreg;
1597 vtarg->dreg = mono_alloc_preg (cfg);
1598 MONO_ADD_INS (cfg->cbb, vtarg);
1600 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
1604 call->stack_usage = cinfo->stack_usage;
1606 g_free (cinfo);
1609 void
1610 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1612 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1613 ArgInfo *ainfo = ins->inst_p1;
1614 int ovf_size = ainfo->vtsize;
1615 int doffset = ainfo->offset;
1616 int i, soffset, dreg;
1618 soffset = 0;
1619 for (i = 0; i < ainfo->size; ++i) {
1620 dreg = mono_alloc_ireg (cfg);
1621 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1622 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
1623 soffset += sizeof (gpointer);
1625 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
1626 if (ovf_size != 0)
1627 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
1630 void
1631 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1633 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
1635 if (!ret->byref) {
1636 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1637 MonoInst *ins;
1639 if (COMPILE_LLVM (cfg)) {
1640 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1641 } else {
1642 MONO_INST_NEW (cfg, ins, OP_SETLRET);
1643 ins->sreg1 = val->dreg + 1;
1644 ins->sreg2 = val->dreg + 2;
1645 MONO_ADD_INS (cfg->cbb, ins);
1647 return;
1649 #ifdef MONO_ARCH_SOFT_FLOAT
1650 if (ret->type == MONO_TYPE_R8) {
1651 MonoInst *ins;
1653 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1654 ins->dreg = cfg->ret->dreg;
1655 ins->sreg1 = val->dreg;
1656 MONO_ADD_INS (cfg->cbb, ins);
1657 return;
1659 if (ret->type == MONO_TYPE_R4) {
1660 /* Already converted to an int in method_to_ir () */
1661 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1662 return;
1664 #elif defined(ARM_FPU_VFP)
1665 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
1666 MonoInst *ins;
1668 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1669 ins->dreg = cfg->ret->dreg;
1670 ins->sreg1 = val->dreg;
1671 MONO_ADD_INS (cfg->cbb, ins);
1672 return;
1674 #else
1675 if (ret->type == MONO_TYPE_R4 || ret->type == MONO_TYPE_R8) {
1676 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1677 return;
1679 #endif
1682 /* FIXME: */
1683 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1686 #endif /* #ifndef DISABLE_JIT */
1688 gboolean
1689 mono_arch_is_inst_imm (gint64 imm)
1691 return TRUE;
1694 #define DYN_CALL_STACK_ARGS 6
1696 typedef struct {
1697 MonoMethodSignature *sig;
1698 CallInfo *cinfo;
1699 } ArchDynCallInfo;
1701 typedef struct {
1702 mgreg_t regs [PARAM_REGS + DYN_CALL_STACK_ARGS];
1703 mgreg_t res, res2;
1704 guint8 *ret;
1705 } DynCallArgs;
1707 static gboolean
1708 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
1710 int i;
1712 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
1713 return FALSE;
1715 switch (cinfo->ret.storage) {
1716 case RegTypeNone:
1717 case RegTypeGeneral:
1718 case RegTypeIRegPair:
1719 case RegTypeStructByAddr:
1720 break;
1721 case RegTypeFP:
1722 #ifdef ARM_FPU_FPA
1723 return FALSE;
1724 #elif defined(ARM_FPU_VFP)
1725 break;
1726 #else
1727 return FALSE;
1728 #endif
1729 default:
1730 return FALSE;
1733 for (i = 0; i < cinfo->nargs; ++i) {
1734 switch (cinfo->args [i].storage) {
1735 case RegTypeGeneral:
1736 break;
1737 case RegTypeIRegPair:
1738 break;
1739 case RegTypeBase:
1740 if (cinfo->args [i].offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
1741 return FALSE;
1742 break;
1743 case RegTypeStructByVal:
1744 if (cinfo->args [i].reg + cinfo->args [i].vtsize >= PARAM_REGS + DYN_CALL_STACK_ARGS)
1745 return FALSE;
1746 break;
1747 default:
1748 return FALSE;
1752 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
1753 for (i = 0; i < sig->param_count; ++i) {
1754 MonoType *t = sig->params [i];
1756 if (t->byref)
1757 continue;
1759 switch (t->type) {
1760 case MONO_TYPE_R4:
1761 case MONO_TYPE_R8:
1762 #ifdef MONO_ARCH_SOFT_FLOAT
1763 return FALSE;
1764 #else
1765 break;
1766 #endif
1768 case MONO_TYPE_I8:
1769 case MONO_TYPE_U8:
1770 return FALSE;
1772 default:
1773 break;
1777 return TRUE;
1780 MonoDynCallInfo*
1781 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
1783 ArchDynCallInfo *info;
1784 CallInfo *cinfo;
1786 cinfo = get_call_info (NULL, sig, FALSE);
1788 if (!dyn_call_supported (cinfo, sig)) {
1789 g_free (cinfo);
1790 return NULL;
1793 info = g_new0 (ArchDynCallInfo, 1);
1794 // FIXME: Preprocess the info to speed up start_dyn_call ()
1795 info->sig = sig;
1796 info->cinfo = cinfo;
1798 return (MonoDynCallInfo*)info;
1801 void
1802 mono_arch_dyn_call_free (MonoDynCallInfo *info)
1804 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1806 g_free (ainfo->cinfo);
1807 g_free (ainfo);
1810 void
1811 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
1813 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
1814 DynCallArgs *p = (DynCallArgs*)buf;
1815 int arg_index, greg, i, j;
1816 MonoMethodSignature *sig = dinfo->sig;
1818 g_assert (buf_len >= sizeof (DynCallArgs));
1820 p->res = 0;
1821 p->ret = ret;
1823 arg_index = 0;
1824 greg = 0;
1826 if (dinfo->cinfo->vtype_retaddr)
1827 p->regs [greg ++] = (mgreg_t)ret;
1829 if (sig->hasthis)
1830 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
1832 for (i = 0; i < sig->param_count; i++) {
1833 MonoType *t = mono_type_get_underlying_type (sig->params [i]);
1834 gpointer *arg = args [arg_index ++];
1835 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
1836 int slot = -1;
1838 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
1839 slot = ainfo->reg;
1840 else if (ainfo->storage == RegTypeBase)
1841 slot = PARAM_REGS + (ainfo->offset / 4);
1842 else
1843 g_assert_not_reached ();
1845 if (t->byref) {
1846 p->regs [slot] = (mgreg_t)*arg;
1847 continue;
1850 switch (t->type) {
1851 case MONO_TYPE_STRING:
1852 case MONO_TYPE_CLASS:
1853 case MONO_TYPE_ARRAY:
1854 case MONO_TYPE_SZARRAY:
1855 case MONO_TYPE_OBJECT:
1856 case MONO_TYPE_PTR:
1857 case MONO_TYPE_I:
1858 case MONO_TYPE_U:
1859 p->regs [slot] = (mgreg_t)*arg;
1860 break;
1861 case MONO_TYPE_BOOLEAN:
1862 case MONO_TYPE_U1:
1863 p->regs [slot] = *(guint8*)arg;
1864 break;
1865 case MONO_TYPE_I1:
1866 p->regs [slot] = *(gint8*)arg;
1867 break;
1868 case MONO_TYPE_I2:
1869 p->regs [slot] = *(gint16*)arg;
1870 break;
1871 case MONO_TYPE_U2:
1872 case MONO_TYPE_CHAR:
1873 p->regs [slot] = *(guint16*)arg;
1874 break;
1875 case MONO_TYPE_I4:
1876 p->regs [slot] = *(gint32*)arg;
1877 break;
1878 case MONO_TYPE_U4:
1879 p->regs [slot] = *(guint32*)arg;
1880 break;
1881 case MONO_TYPE_I8:
1882 case MONO_TYPE_U8:
1883 p->regs [slot ++] = (mgreg_t)arg [0];
1884 p->regs [slot] = (mgreg_t)arg [1];
1885 break;
1886 case MONO_TYPE_R4:
1887 p->regs [slot] = *(mgreg_t*)arg;
1888 break;
1889 case MONO_TYPE_R8:
1890 p->regs [slot ++] = (mgreg_t)arg [0];
1891 p->regs [slot] = (mgreg_t)arg [1];
1892 break;
1893 case MONO_TYPE_GENERICINST:
1894 if (MONO_TYPE_IS_REFERENCE (t)) {
1895 p->regs [slot] = (mgreg_t)*arg;
1896 break;
1897 } else {
1898 /* Fall though */
1900 case MONO_TYPE_VALUETYPE:
1901 g_assert (ainfo->storage == RegTypeStructByVal);
1903 if (ainfo->size == 0)
1904 slot = PARAM_REGS + (ainfo->offset / 4);
1905 else
1906 slot = ainfo->reg;
1908 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
1909 p->regs [slot ++] = ((mgreg_t*)arg) [j];
1910 break;
1911 default:
1912 g_assert_not_reached ();
1917 void
1918 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
1920 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1921 MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
1922 guint8 *ret = ((DynCallArgs*)buf)->ret;
1923 mgreg_t res = ((DynCallArgs*)buf)->res;
1924 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
1926 switch (mono_type_get_underlying_type (sig->ret)->type) {
1927 case MONO_TYPE_VOID:
1928 *(gpointer*)ret = NULL;
1929 break;
1930 case MONO_TYPE_STRING:
1931 case MONO_TYPE_CLASS:
1932 case MONO_TYPE_ARRAY:
1933 case MONO_TYPE_SZARRAY:
1934 case MONO_TYPE_OBJECT:
1935 case MONO_TYPE_I:
1936 case MONO_TYPE_U:
1937 case MONO_TYPE_PTR:
1938 *(gpointer*)ret = (gpointer)res;
1939 break;
1940 case MONO_TYPE_I1:
1941 *(gint8*)ret = res;
1942 break;
1943 case MONO_TYPE_U1:
1944 case MONO_TYPE_BOOLEAN:
1945 *(guint8*)ret = res;
1946 break;
1947 case MONO_TYPE_I2:
1948 *(gint16*)ret = res;
1949 break;
1950 case MONO_TYPE_U2:
1951 case MONO_TYPE_CHAR:
1952 *(guint16*)ret = res;
1953 break;
1954 case MONO_TYPE_I4:
1955 *(gint32*)ret = res;
1956 break;
1957 case MONO_TYPE_U4:
1958 *(guint32*)ret = res;
1959 break;
1960 case MONO_TYPE_I8:
1961 case MONO_TYPE_U8:
1962 /* This handles endianness as well */
1963 ((gint32*)ret) [0] = res;
1964 ((gint32*)ret) [1] = res2;
1965 break;
1966 case MONO_TYPE_GENERICINST:
1967 if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
1968 *(gpointer*)ret = (gpointer)res;
1969 break;
1970 } else {
1971 /* Fall though */
1973 case MONO_TYPE_VALUETYPE:
1974 g_assert (ainfo->cinfo->vtype_retaddr);
1975 /* Nothing to do */
1976 break;
1977 #if defined(ARM_FPU_VFP)
1978 case MONO_TYPE_R4:
1979 *(float*)ret = *(float*)&res;
1980 break;
1981 case MONO_TYPE_R8: {
1982 mgreg_t regs [2];
1984 regs [0] = res;
1985 regs [1] = res2;
1987 *(double*)ret = *(double*)&regs;
1988 break;
1990 #endif
1991 default:
1992 g_assert_not_reached ();
1996 #ifndef DISABLE_JIT
1999 * Allow tracing to work with this interface (with an optional argument)
2002 void*
2003 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2005 guchar *code = p;
2007 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2008 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2009 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2010 code = emit_call_reg (code, ARMREG_R2);
2011 return code;
2014 enum {
2015 SAVE_NONE,
2016 SAVE_STRUCT,
2017 SAVE_ONE,
2018 SAVE_TWO,
2019 SAVE_FP
2022 void*
2023 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2025 guchar *code = p;
2026 int save_mode = SAVE_NONE;
2027 int offset;
2028 MonoMethod *method = cfg->method;
2029 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
2030 int save_offset = cfg->param_area;
2031 save_offset += 7;
2032 save_offset &= ~7;
2034 offset = code - cfg->native_code;
2035 /* we need about 16 instructions */
2036 if (offset > (cfg->code_size - 16 * 4)) {
2037 cfg->code_size *= 2;
2038 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2039 code = cfg->native_code + offset;
2041 switch (rtype) {
2042 case MONO_TYPE_VOID:
2043 /* special case string .ctor icall */
2044 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2045 save_mode = SAVE_ONE;
2046 else
2047 save_mode = SAVE_NONE;
2048 break;
2049 case MONO_TYPE_I8:
2050 case MONO_TYPE_U8:
2051 save_mode = SAVE_TWO;
2052 break;
2053 case MONO_TYPE_R4:
2054 case MONO_TYPE_R8:
2055 save_mode = SAVE_FP;
2056 break;
2057 case MONO_TYPE_VALUETYPE:
2058 save_mode = SAVE_STRUCT;
2059 break;
2060 default:
2061 save_mode = SAVE_ONE;
2062 break;
2065 switch (save_mode) {
2066 case SAVE_TWO:
2067 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2068 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2069 if (enable_arguments) {
2070 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
2071 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2073 break;
2074 case SAVE_ONE:
2075 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2076 if (enable_arguments) {
2077 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2079 break;
2080 case SAVE_FP:
2081 /* FIXME: what reg? */
2082 if (enable_arguments) {
2083 /* FIXME: what reg? */
2085 break;
2086 case SAVE_STRUCT:
2087 if (enable_arguments) {
2088 /* FIXME: get the actual address */
2089 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2091 break;
2092 case SAVE_NONE:
2093 default:
2094 break;
2097 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2098 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
2099 code = emit_call_reg (code, ARMREG_IP);
2101 switch (save_mode) {
2102 case SAVE_TWO:
2103 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2104 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2105 break;
2106 case SAVE_ONE:
2107 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2108 break;
2109 case SAVE_FP:
2110 /* FIXME */
2111 break;
2112 case SAVE_NONE:
2113 default:
2114 break;
2117 return code;
2121 * The immediate field for cond branches is big enough for all reasonable methods
2123 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
2124 if (0 && ins->inst_true_bb->native_offset) { \
2125 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
2126 } else { \
2127 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
2128 ARM_B_COND (code, (condcode), 0); \
2131 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
2133 /* emit an exception if condition is fail
2135 * We assign the extra code used to throw the implicit exceptions
2136 * to cfg->bb_exit as far as the big branch handling is concerned
2138 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
2139 do { \
2140 mono_add_patch_info (cfg, code - cfg->native_code, \
2141 MONO_PATCH_INFO_EXC, exc_name); \
2142 ARM_BL_COND (code, (condcode), 0); \
2143 } while (0);
2145 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
2147 void
2148 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
2152 void
2153 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
2155 MonoInst *ins, *n, *last_ins = NULL;
2157 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
2158 switch (ins->opcode) {
2159 case OP_MUL_IMM:
2160 case OP_IMUL_IMM:
2161 /* Already done by an arch-independent pass */
2162 break;
2163 case OP_LOAD_MEMBASE:
2164 case OP_LOADI4_MEMBASE:
2166 * OP_STORE_MEMBASE_REG reg, offset(basereg)
2167 * OP_LOAD_MEMBASE offset(basereg), reg
2169 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
2170 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
2171 ins->inst_basereg == last_ins->inst_destbasereg &&
2172 ins->inst_offset == last_ins->inst_offset) {
2173 if (ins->dreg == last_ins->sreg1) {
2174 MONO_DELETE_INS (bb, ins);
2175 continue;
2176 } else {
2177 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2178 ins->opcode = OP_MOVE;
2179 ins->sreg1 = last_ins->sreg1;
2183 * Note: reg1 must be different from the basereg in the second load
2184 * OP_LOAD_MEMBASE offset(basereg), reg1
2185 * OP_LOAD_MEMBASE offset(basereg), reg2
2186 * -->
2187 * OP_LOAD_MEMBASE offset(basereg), reg1
2188 * OP_MOVE reg1, reg2
2190 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
2191 || last_ins->opcode == OP_LOAD_MEMBASE) &&
2192 ins->inst_basereg != last_ins->dreg &&
2193 ins->inst_basereg == last_ins->inst_basereg &&
2194 ins->inst_offset == last_ins->inst_offset) {
2196 if (ins->dreg == last_ins->dreg) {
2197 MONO_DELETE_INS (bb, ins);
2198 continue;
2199 } else {
2200 ins->opcode = OP_MOVE;
2201 ins->sreg1 = last_ins->dreg;
2204 //g_assert_not_reached ();
2206 #if 0
2208 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2209 * OP_LOAD_MEMBASE offset(basereg), reg
2210 * -->
2211 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2212 * OP_ICONST reg, imm
2214 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
2215 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
2216 ins->inst_basereg == last_ins->inst_destbasereg &&
2217 ins->inst_offset == last_ins->inst_offset) {
2218 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2219 ins->opcode = OP_ICONST;
2220 ins->inst_c0 = last_ins->inst_imm;
2221 g_assert_not_reached (); // check this rule
2222 #endif
2224 break;
2225 case OP_LOADU1_MEMBASE:
2226 case OP_LOADI1_MEMBASE:
2227 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
2228 ins->inst_basereg == last_ins->inst_destbasereg &&
2229 ins->inst_offset == last_ins->inst_offset) {
2230 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
2231 ins->sreg1 = last_ins->sreg1;
2233 break;
2234 case OP_LOADU2_MEMBASE:
2235 case OP_LOADI2_MEMBASE:
2236 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
2237 ins->inst_basereg == last_ins->inst_destbasereg &&
2238 ins->inst_offset == last_ins->inst_offset) {
2239 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
2240 ins->sreg1 = last_ins->sreg1;
2242 break;
2243 case OP_MOVE:
2244 ins->opcode = OP_MOVE;
2246 * OP_MOVE reg, reg
2248 if (ins->dreg == ins->sreg1) {
2249 MONO_DELETE_INS (bb, ins);
2250 continue;
2253 * OP_MOVE sreg, dreg
2254 * OP_MOVE dreg, sreg
2256 if (last_ins && last_ins->opcode == OP_MOVE &&
2257 ins->sreg1 == last_ins->dreg &&
2258 ins->dreg == last_ins->sreg1) {
2259 MONO_DELETE_INS (bb, ins);
2260 continue;
2262 break;
2264 last_ins = ins;
2265 ins = ins->next;
2267 bb->last_ins = last_ins;
2271 * the branch_cc_table should maintain the order of these
2272 * opcodes.
2273 case CEE_BEQ:
2274 case CEE_BGE:
2275 case CEE_BGT:
2276 case CEE_BLE:
2277 case CEE_BLT:
2278 case CEE_BNE_UN:
2279 case CEE_BGE_UN:
2280 case CEE_BGT_UN:
2281 case CEE_BLE_UN:
2282 case CEE_BLT_UN:
2284 static const guchar
2285 branch_cc_table [] = {
2286 ARMCOND_EQ,
2287 ARMCOND_GE,
2288 ARMCOND_GT,
2289 ARMCOND_LE,
2290 ARMCOND_LT,
2292 ARMCOND_NE,
2293 ARMCOND_HS,
2294 ARMCOND_HI,
2295 ARMCOND_LS,
2296 ARMCOND_LO
2299 #define NEW_INS(cfg,dest,op) do { \
2300 MONO_INST_NEW ((cfg), (dest), (op)); \
2301 mono_bblock_insert_before_ins (bb, ins, (dest)); \
2302 } while (0)
2304 static int
2305 map_to_reg_reg_op (int op)
2307 switch (op) {
2308 case OP_ADD_IMM:
2309 return OP_IADD;
2310 case OP_SUB_IMM:
2311 return OP_ISUB;
2312 case OP_AND_IMM:
2313 return OP_IAND;
2314 case OP_COMPARE_IMM:
2315 return OP_COMPARE;
2316 case OP_ICOMPARE_IMM:
2317 return OP_ICOMPARE;
2318 case OP_ADDCC_IMM:
2319 return OP_ADDCC;
2320 case OP_ADC_IMM:
2321 return OP_ADC;
2322 case OP_SUBCC_IMM:
2323 return OP_SUBCC;
2324 case OP_SBB_IMM:
2325 return OP_SBB;
2326 case OP_OR_IMM:
2327 return OP_IOR;
2328 case OP_XOR_IMM:
2329 return OP_IXOR;
2330 case OP_LOAD_MEMBASE:
2331 return OP_LOAD_MEMINDEX;
2332 case OP_LOADI4_MEMBASE:
2333 return OP_LOADI4_MEMINDEX;
2334 case OP_LOADU4_MEMBASE:
2335 return OP_LOADU4_MEMINDEX;
2336 case OP_LOADU1_MEMBASE:
2337 return OP_LOADU1_MEMINDEX;
2338 case OP_LOADI2_MEMBASE:
2339 return OP_LOADI2_MEMINDEX;
2340 case OP_LOADU2_MEMBASE:
2341 return OP_LOADU2_MEMINDEX;
2342 case OP_LOADI1_MEMBASE:
2343 return OP_LOADI1_MEMINDEX;
2344 case OP_STOREI1_MEMBASE_REG:
2345 return OP_STOREI1_MEMINDEX;
2346 case OP_STOREI2_MEMBASE_REG:
2347 return OP_STOREI2_MEMINDEX;
2348 case OP_STOREI4_MEMBASE_REG:
2349 return OP_STOREI4_MEMINDEX;
2350 case OP_STORE_MEMBASE_REG:
2351 return OP_STORE_MEMINDEX;
2352 case OP_STORER4_MEMBASE_REG:
2353 return OP_STORER4_MEMINDEX;
2354 case OP_STORER8_MEMBASE_REG:
2355 return OP_STORER8_MEMINDEX;
2356 case OP_STORE_MEMBASE_IMM:
2357 return OP_STORE_MEMBASE_REG;
2358 case OP_STOREI1_MEMBASE_IMM:
2359 return OP_STOREI1_MEMBASE_REG;
2360 case OP_STOREI2_MEMBASE_IMM:
2361 return OP_STOREI2_MEMBASE_REG;
2362 case OP_STOREI4_MEMBASE_IMM:
2363 return OP_STOREI4_MEMBASE_REG;
2365 g_assert_not_reached ();
2369 * Remove from the instruction list the instructions that can't be
2370 * represented with very simple instructions with no register
2371 * requirements.
2373 void
2374 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
2376 MonoInst *ins, *temp, *last_ins = NULL;
2377 int rot_amount, imm8, low_imm;
2379 MONO_BB_FOR_EACH_INS (bb, ins) {
2380 loop_start:
2381 switch (ins->opcode) {
2382 case OP_ADD_IMM:
2383 case OP_SUB_IMM:
2384 case OP_AND_IMM:
2385 case OP_COMPARE_IMM:
2386 case OP_ICOMPARE_IMM:
2387 case OP_ADDCC_IMM:
2388 case OP_ADC_IMM:
2389 case OP_SUBCC_IMM:
2390 case OP_SBB_IMM:
2391 case OP_OR_IMM:
2392 case OP_XOR_IMM:
2393 case OP_IADD_IMM:
2394 case OP_ISUB_IMM:
2395 case OP_IAND_IMM:
2396 case OP_IADC_IMM:
2397 case OP_ISBB_IMM:
2398 case OP_IOR_IMM:
2399 case OP_IXOR_IMM:
2400 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
2401 NEW_INS (cfg, temp, OP_ICONST);
2402 temp->inst_c0 = ins->inst_imm;
2403 temp->dreg = mono_alloc_ireg (cfg);
2404 ins->sreg2 = temp->dreg;
2405 ins->opcode = mono_op_imm_to_op (ins->opcode);
2407 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
2408 goto loop_start;
2409 else
2410 break;
2411 case OP_MUL_IMM:
2412 case OP_IMUL_IMM:
2413 if (ins->inst_imm == 1) {
2414 ins->opcode = OP_MOVE;
2415 break;
2417 if (ins->inst_imm == 0) {
2418 ins->opcode = OP_ICONST;
2419 ins->inst_c0 = 0;
2420 break;
2422 imm8 = mono_is_power_of_two (ins->inst_imm);
2423 if (imm8 > 0) {
2424 ins->opcode = OP_SHL_IMM;
2425 ins->inst_imm = imm8;
2426 break;
2428 NEW_INS (cfg, temp, OP_ICONST);
2429 temp->inst_c0 = ins->inst_imm;
2430 temp->dreg = mono_alloc_ireg (cfg);
2431 ins->sreg2 = temp->dreg;
2432 ins->opcode = OP_IMUL;
2433 break;
2434 case OP_SBB:
2435 case OP_ISBB:
2436 case OP_SUBCC:
2437 case OP_ISUBCC:
2438 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
2439 /* ARM sets the C flag to 1 if there was _no_ overflow */
2440 ins->next->opcode = OP_COND_EXC_NC;
2441 break;
2442 case OP_LOCALLOC_IMM:
2443 NEW_INS (cfg, temp, OP_ICONST);
2444 temp->inst_c0 = ins->inst_imm;
2445 temp->dreg = mono_alloc_ireg (cfg);
2446 ins->sreg1 = temp->dreg;
2447 ins->opcode = OP_LOCALLOC;
2448 break;
2449 case OP_LOAD_MEMBASE:
2450 case OP_LOADI4_MEMBASE:
2451 case OP_LOADU4_MEMBASE:
2452 case OP_LOADU1_MEMBASE:
2453 /* we can do two things: load the immed in a register
2454 * and use an indexed load, or see if the immed can be
2455 * represented as an ad_imm + a load with a smaller offset
2456 * that fits. We just do the first for now, optimize later.
2458 if (arm_is_imm12 (ins->inst_offset))
2459 break;
2460 NEW_INS (cfg, temp, OP_ICONST);
2461 temp->inst_c0 = ins->inst_offset;
2462 temp->dreg = mono_alloc_ireg (cfg);
2463 ins->sreg2 = temp->dreg;
2464 ins->opcode = map_to_reg_reg_op (ins->opcode);
2465 break;
2466 case OP_LOADI2_MEMBASE:
2467 case OP_LOADU2_MEMBASE:
2468 case OP_LOADI1_MEMBASE:
2469 if (arm_is_imm8 (ins->inst_offset))
2470 break;
2471 NEW_INS (cfg, temp, OP_ICONST);
2472 temp->inst_c0 = ins->inst_offset;
2473 temp->dreg = mono_alloc_ireg (cfg);
2474 ins->sreg2 = temp->dreg;
2475 ins->opcode = map_to_reg_reg_op (ins->opcode);
2476 break;
2477 case OP_LOADR4_MEMBASE:
2478 case OP_LOADR8_MEMBASE:
2479 if (arm_is_fpimm8 (ins->inst_offset))
2480 break;
2481 low_imm = ins->inst_offset & 0x1ff;
2482 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
2483 NEW_INS (cfg, temp, OP_ADD_IMM);
2484 temp->inst_imm = ins->inst_offset & ~0x1ff;
2485 temp->sreg1 = ins->inst_basereg;
2486 temp->dreg = mono_alloc_ireg (cfg);
2487 ins->inst_basereg = temp->dreg;
2488 ins->inst_offset = low_imm;
2489 break;
2491 /* VFP/FPA doesn't have indexed load instructions */
2492 g_assert_not_reached ();
2493 break;
2494 case OP_STORE_MEMBASE_REG:
2495 case OP_STOREI4_MEMBASE_REG:
2496 case OP_STOREI1_MEMBASE_REG:
2497 if (arm_is_imm12 (ins->inst_offset))
2498 break;
2499 NEW_INS (cfg, temp, OP_ICONST);
2500 temp->inst_c0 = ins->inst_offset;
2501 temp->dreg = mono_alloc_ireg (cfg);
2502 ins->sreg2 = temp->dreg;
2503 ins->opcode = map_to_reg_reg_op (ins->opcode);
2504 break;
2505 case OP_STOREI2_MEMBASE_REG:
2506 if (arm_is_imm8 (ins->inst_offset))
2507 break;
2508 NEW_INS (cfg, temp, OP_ICONST);
2509 temp->inst_c0 = ins->inst_offset;
2510 temp->dreg = mono_alloc_ireg (cfg);
2511 ins->sreg2 = temp->dreg;
2512 ins->opcode = map_to_reg_reg_op (ins->opcode);
2513 break;
2514 case OP_STORER4_MEMBASE_REG:
2515 case OP_STORER8_MEMBASE_REG:
2516 if (arm_is_fpimm8 (ins->inst_offset))
2517 break;
2518 low_imm = ins->inst_offset & 0x1ff;
2519 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
2520 NEW_INS (cfg, temp, OP_ADD_IMM);
2521 temp->inst_imm = ins->inst_offset & ~0x1ff;
2522 temp->sreg1 = ins->inst_destbasereg;
2523 temp->dreg = mono_alloc_ireg (cfg);
2524 ins->inst_destbasereg = temp->dreg;
2525 ins->inst_offset = low_imm;
2526 break;
2528 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
2529 /* VFP/FPA doesn't have indexed store instructions */
2530 g_assert_not_reached ();
2531 break;
2532 case OP_STORE_MEMBASE_IMM:
2533 case OP_STOREI1_MEMBASE_IMM:
2534 case OP_STOREI2_MEMBASE_IMM:
2535 case OP_STOREI4_MEMBASE_IMM:
2536 NEW_INS (cfg, temp, OP_ICONST);
2537 temp->inst_c0 = ins->inst_imm;
2538 temp->dreg = mono_alloc_ireg (cfg);
2539 ins->sreg1 = temp->dreg;
2540 ins->opcode = map_to_reg_reg_op (ins->opcode);
2541 last_ins = temp;
2542 goto loop_start; /* make it handle the possibly big ins->inst_offset */
2543 case OP_FCOMPARE: {
2544 gboolean swap = FALSE;
2545 int reg;
2547 if (!ins->next) {
2548 /* Optimized away */
2549 NULLIFY_INS (ins);
2550 break;
2553 /* Some fp compares require swapped operands */
2554 switch (ins->next->opcode) {
2555 case OP_FBGT:
2556 ins->next->opcode = OP_FBLT;
2557 swap = TRUE;
2558 break;
2559 case OP_FBGT_UN:
2560 ins->next->opcode = OP_FBLT_UN;
2561 swap = TRUE;
2562 break;
2563 case OP_FBLE:
2564 ins->next->opcode = OP_FBGE;
2565 swap = TRUE;
2566 break;
2567 case OP_FBLE_UN:
2568 ins->next->opcode = OP_FBGE_UN;
2569 swap = TRUE;
2570 break;
2571 default:
2572 break;
2574 if (swap) {
2575 reg = ins->sreg1;
2576 ins->sreg1 = ins->sreg2;
2577 ins->sreg2 = reg;
2579 break;
2583 last_ins = ins;
2585 bb->last_ins = last_ins;
2586 bb->max_vreg = cfg->next_vreg;
2589 void
2590 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
2592 MonoInst *ins;
2594 if (long_ins->opcode == OP_LNEG) {
2595 ins = long_ins;
2596 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
2597 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
2598 NULLIFY_INS (ins);
2602 static guchar*
2603 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2605 /* sreg is a float, dreg is an integer reg */
2606 #ifdef ARM_FPU_FPA
2607 ARM_FIXZ (code, dreg, sreg);
2608 #elif defined(ARM_FPU_VFP)
2609 if (is_signed)
2610 ARM_TOSIZD (code, ARM_VFP_F0, sreg);
2611 else
2612 ARM_TOUIZD (code, ARM_VFP_F0, sreg);
2613 ARM_FMRS (code, dreg, ARM_VFP_F0);
2614 #endif
2615 if (!is_signed) {
2616 if (size == 1)
2617 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
2618 else if (size == 2) {
2619 ARM_SHL_IMM (code, dreg, dreg, 16);
2620 ARM_SHR_IMM (code, dreg, dreg, 16);
2622 } else {
2623 if (size == 1) {
2624 ARM_SHL_IMM (code, dreg, dreg, 24);
2625 ARM_SAR_IMM (code, dreg, dreg, 24);
2626 } else if (size == 2) {
2627 ARM_SHL_IMM (code, dreg, dreg, 16);
2628 ARM_SAR_IMM (code, dreg, dreg, 16);
2631 return code;
2634 #endif /* #ifndef DISABLE_JIT */
2636 typedef struct {
2637 guchar *code;
2638 const guchar *target;
2639 int absolute;
2640 int found;
2641 } PatchData;
2643 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
2645 static int
2646 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
2647 PatchData *pdata = (PatchData*)user_data;
2648 guchar *code = data;
2649 guint32 *thunks = data;
2650 guint32 *endthunks = (guint32*)(code + bsize);
2651 int count = 0;
2652 int difflow, diffhigh;
2654 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2655 difflow = (char*)pdata->code - (char*)thunks;
2656 diffhigh = (char*)pdata->code - (char*)endthunks;
2657 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
2658 return 0;
2661 * The thunk is composed of 3 words:
2662 * load constant from thunks [2] into ARM_IP
2663 * bx to ARM_IP
2664 * address constant
2665 * Note that the LR register is already setup
2667 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2668 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
2669 while (thunks < endthunks) {
2670 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2671 if (thunks [2] == (guint32)pdata->target) {
2672 arm_patch (pdata->code, (guchar*)thunks);
2673 mono_arch_flush_icache (pdata->code, 4);
2674 pdata->found = 1;
2675 return 1;
2676 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
2677 /* found a free slot instead: emit thunk */
2678 /* ARMREG_IP is fine to use since this can't be an IMT call
2679 * which is indirect
2681 code = (guchar*)thunks;
2682 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
2683 if (thumb_supported)
2684 ARM_BX (code, ARMREG_IP);
2685 else
2686 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2687 thunks [2] = (guint32)pdata->target;
2688 mono_arch_flush_icache ((guchar*)thunks, 12);
2690 arm_patch (pdata->code, (guchar*)thunks);
2691 mono_arch_flush_icache (pdata->code, 4);
2692 pdata->found = 1;
2693 return 1;
2695 /* skip 12 bytes, the size of the thunk */
2696 thunks += 3;
2697 count++;
2699 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2701 return 0;
2704 static void
2705 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target)
2707 PatchData pdata;
2709 if (!domain)
2710 domain = mono_domain_get ();
2712 pdata.code = code;
2713 pdata.target = target;
2714 pdata.absolute = absolute;
2715 pdata.found = 0;
2717 mono_domain_lock (domain);
2718 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2720 if (!pdata.found) {
2721 /* this uses the first available slot */
2722 pdata.found = 2;
2723 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2725 mono_domain_unlock (domain);
2727 if (pdata.found != 1)
2728 g_print ("thunk failed for %p from %p\n", target, code);
2729 g_assert (pdata.found == 1);
2732 static void
2733 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target)
2735 guint32 *code32 = (void*)code;
2736 guint32 ins = *code32;
2737 guint32 prim = (ins >> 25) & 7;
2738 guint32 tval = GPOINTER_TO_UINT (target);
2740 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2741 if (prim == 5) { /* 101b */
2742 /* the diff starts 8 bytes from the branch opcode */
2743 gint diff = target - code - 8;
2744 gint tbits;
2745 gint tmask = 0xffffffff;
2746 if (tval & 1) { /* entering thumb mode */
2747 diff = target - 1 - code - 8;
2748 g_assert (thumb_supported);
2749 tbits = 0xf << 28; /* bl->blx bit pattern */
2750 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
2751 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
2752 if (diff & 2) {
2753 tbits |= 1 << 24;
2755 tmask = ~(1 << 24); /* clear the link bit */
2756 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
2757 } else {
2758 tbits = 0;
2760 if (diff >= 0) {
2761 if (diff <= 33554431) {
2762 diff >>= 2;
2763 ins = (ins & 0xff000000) | diff;
2764 ins &= tmask;
2765 *code32 = ins | tbits;
2766 return;
2768 } else {
2769 /* diff between 0 and -33554432 */
2770 if (diff >= -33554432) {
2771 diff >>= 2;
2772 ins = (ins & 0xff000000) | (diff & ~0xff000000);
2773 ins &= tmask;
2774 *code32 = ins | tbits;
2775 return;
2779 handle_thunk (domain, TRUE, code, target);
2780 return;
2784 * The alternative call sequences looks like this:
2786 * ldr ip, [pc] // loads the address constant
2787 * b 1f // jumps around the constant
2788 * address constant embedded in the code
2789 * 1f:
2790 * mov lr, pc
2791 * mov pc, ip
2793 * There are two cases for patching:
2794 * a) at the end of method emission: in this case code points to the start
2795 * of the call sequence
2796 * b) during runtime patching of the call site: in this case code points
2797 * to the mov pc, ip instruction
2799 * We have to handle also the thunk jump code sequence:
2801 * ldr ip, [pc]
2802 * mov pc, ip
2803 * address constant // execution never reaches here
2805 if ((ins & 0x0ffffff0) == 0x12fff10) {
2806 /* Branch and exchange: the address is constructed in a reg
2807 * We can patch BX when the code sequence is the following:
2808 * ldr ip, [pc, #0] ; 0x8
2809 * b 0xc
2810 * .word code_ptr
2811 * mov lr, pc
2812 * bx ips
2813 * */
2814 guint32 ccode [4];
2815 guint8 *emit = (guint8*)ccode;
2816 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2817 ARM_B (emit, 0);
2818 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2819 ARM_BX (emit, ARMREG_IP);
2821 /*patching from magic trampoline*/
2822 if (ins == ccode [3]) {
2823 g_assert (code32 [-4] == ccode [0]);
2824 g_assert (code32 [-3] == ccode [1]);
2825 g_assert (code32 [-1] == ccode [2]);
2826 code32 [-2] = (guint32)target;
2827 return;
2829 /*patching from JIT*/
2830 if (ins == ccode [0]) {
2831 g_assert (code32 [1] == ccode [1]);
2832 g_assert (code32 [3] == ccode [2]);
2833 g_assert (code32 [4] == ccode [3]);
2834 code32 [2] = (guint32)target;
2835 return;
2837 g_assert_not_reached ();
2838 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
2840 * ldr ip, [pc, #0]
2841 * b 0xc
2842 * .word code_ptr
2843 * blx ip
2845 guint32 ccode [4];
2846 guint8 *emit = (guint8*)ccode;
2847 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2848 ARM_B (emit, 0);
2849 ARM_BLX_REG (emit, ARMREG_IP);
2851 g_assert (code32 [-3] == ccode [0]);
2852 g_assert (code32 [-2] == ccode [1]);
2853 g_assert (code32 [0] == ccode [2]);
2855 code32 [-1] = (guint32)target;
2856 } else {
2857 guint32 ccode [4];
2858 guint32 *tmp = ccode;
2859 guint8 *emit = (guint8*)tmp;
2860 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2861 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2862 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
2863 ARM_BX (emit, ARMREG_IP);
2864 if (ins == ccode [2]) {
2865 g_assert_not_reached (); // should be -2 ...
2866 code32 [-1] = (guint32)target;
2867 return;
2869 if (ins == ccode [0]) {
2870 /* handles both thunk jump code and the far call sequence */
2871 code32 [2] = (guint32)target;
2872 return;
2874 g_assert_not_reached ();
2876 // g_print ("patched with 0x%08x\n", ins);
2879 void
2880 arm_patch (guchar *code, const guchar *target)
2882 arm_patch_general (NULL, code, target);
2886 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
2887 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
2888 * to be used with the emit macros.
2889 * Return -1 otherwise.
2892 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
2894 guint32 res, i;
2895 for (i = 0; i < 31; i+= 2) {
2896 res = (val << (32 - i)) | (val >> i);
2897 if (res & ~0xff)
2898 continue;
2899 *rot_amount = i? 32 - i: 0;
2900 return res;
2902 return -1;
2906 * Emits in code a sequence of instructions that load the value 'val'
2907 * into the dreg register. Uses at most 4 instructions.
2909 guint8*
2910 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
2912 int imm8, rot_amount;
2913 #if 0
2914 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
2915 /* skip the constant pool */
2916 ARM_B (code, 0);
2917 *(int*)code = val;
2918 code += 4;
2919 return code;
2920 #endif
2921 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
2922 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
2923 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
2924 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
2925 } else {
2926 if (v7_supported) {
2927 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
2928 if (val >> 16)
2929 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
2930 return code;
2932 if (val & 0xFF) {
2933 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
2934 if (val & 0xFF00) {
2935 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
2937 if (val & 0xFF0000) {
2938 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2940 if (val & 0xFF000000) {
2941 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2943 } else if (val & 0xFF00) {
2944 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
2945 if (val & 0xFF0000) {
2946 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2948 if (val & 0xFF000000) {
2949 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2951 } else if (val & 0xFF0000) {
2952 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
2953 if (val & 0xFF000000) {
2954 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2957 //g_assert_not_reached ();
2959 return code;
2962 gboolean
2963 mono_arm_thumb_supported (void)
2965 return thumb_supported;
2968 #ifndef DISABLE_JIT
2971 * emit_load_volatile_arguments:
2973 * Load volatile arguments from the stack to the original input registers.
2974 * Required before a tail call.
2976 static guint8*
2977 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
2979 MonoMethod *method = cfg->method;
2980 MonoMethodSignature *sig;
2981 MonoInst *inst;
2982 CallInfo *cinfo;
2983 guint32 i, pos;
2985 /* FIXME: Generate intermediate code instead */
2987 sig = mono_method_signature (method);
2989 /* This is the opposite of the code in emit_prolog */
2991 pos = 0;
2993 cinfo = get_call_info (NULL, sig, sig->pinvoke);
2995 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2996 ArgInfo *ainfo = &cinfo->ret;
2997 inst = cfg->vret_addr;
2998 g_assert (arm_is_imm12 (inst->inst_offset));
2999 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3001 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3002 ArgInfo *ainfo = cinfo->args + i;
3003 inst = cfg->args [pos];
3005 if (cfg->verbose_level > 2)
3006 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
3007 if (inst->opcode == OP_REGVAR) {
3008 if (ainfo->storage == RegTypeGeneral)
3009 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
3010 else if (ainfo->storage == RegTypeFP) {
3011 g_assert_not_reached ();
3012 } else if (ainfo->storage == RegTypeBase) {
3013 // FIXME:
3014 NOT_IMPLEMENTED;
3016 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3017 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3018 } else {
3019 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3020 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
3023 } else
3024 g_assert_not_reached ();
3025 } else {
3026 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
3027 switch (ainfo->size) {
3028 case 1:
3029 case 2:
3030 // FIXME:
3031 NOT_IMPLEMENTED;
3032 break;
3033 case 8:
3034 g_assert (arm_is_imm12 (inst->inst_offset));
3035 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3036 g_assert (arm_is_imm12 (inst->inst_offset + 4));
3037 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3038 break;
3039 default:
3040 if (arm_is_imm12 (inst->inst_offset)) {
3041 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3042 } else {
3043 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3044 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3046 break;
3048 } else if (ainfo->storage == RegTypeBaseGen) {
3049 // FIXME:
3050 NOT_IMPLEMENTED;
3051 } else if (ainfo->storage == RegTypeBase) {
3052 /* Nothing to do */
3053 } else if (ainfo->storage == RegTypeFP) {
3054 g_assert_not_reached ();
3055 } else if (ainfo->storage == RegTypeStructByVal) {
3056 int doffset = inst->inst_offset;
3057 int soffset = 0;
3058 int cur_reg;
3059 int size = 0;
3060 if (mono_class_from_mono_type (inst->inst_vtype))
3061 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
3062 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
3063 if (arm_is_imm12 (doffset)) {
3064 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
3065 } else {
3066 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
3067 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
3069 soffset += sizeof (gpointer);
3070 doffset += sizeof (gpointer);
3072 if (ainfo->vtsize)
3073 // FIXME:
3074 NOT_IMPLEMENTED;
3075 } else if (ainfo->storage == RegTypeStructByAddr) {
3076 } else {
3077 // FIXME:
3078 NOT_IMPLEMENTED;
3081 pos ++;
3084 g_free (cinfo);
3086 return code;
3089 void
3090 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
3092 MonoInst *ins;
3093 MonoCallInst *call;
3094 guint offset;
3095 guint8 *code = cfg->native_code + cfg->code_len;
3096 MonoInst *last_ins = NULL;
3097 guint last_offset = 0;
3098 int max_len, cpos;
3099 int imm8, rot_amount;
3101 /* we don't align basic blocks of loops on arm */
3103 if (cfg->verbose_level > 2)
3104 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
3106 cpos = bb->max_offset;
3108 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
3109 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
3110 //g_assert (!mono_compile_aot);
3111 //cpos += 6;
3112 //if (bb->cil_code)
3113 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
3114 /* this is not thread save, but good enough */
3115 /* fixme: howto handle overflows? */
3116 //x86_inc_mem (code, &cov->data [bb->dfn].count);
3119 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
3120 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3121 (gpointer)"mono_break");
3122 code = emit_call_seq (cfg, code);
3125 MONO_BB_FOR_EACH_INS (bb, ins) {
3126 offset = code - cfg->native_code;
3128 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3130 if (offset > (cfg->code_size - max_len - 16)) {
3131 cfg->code_size *= 2;
3132 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3133 code = cfg->native_code + offset;
3135 // if (ins->cil_code)
3136 // g_print ("cil code\n");
3137 mono_debug_record_line_number (cfg, ins, offset);
3139 switch (ins->opcode) {
3140 case OP_MEMORY_BARRIER:
3141 break;
3142 case OP_TLS_GET:
3143 #ifdef HAVE_AEABI_READ_TP
3144 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3145 (gpointer)"__aeabi_read_tp");
3146 code = emit_call_seq (cfg, code);
3148 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
3149 #else
3150 g_assert_not_reached ();
3151 #endif
3152 break;
3153 /*case OP_BIGMUL:
3154 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3155 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
3156 break;
3157 case OP_BIGMUL_UN:
3158 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3159 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
3160 break;*/
3161 case OP_STOREI1_MEMBASE_IMM:
3162 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
3163 g_assert (arm_is_imm12 (ins->inst_offset));
3164 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3165 break;
3166 case OP_STOREI2_MEMBASE_IMM:
3167 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
3168 g_assert (arm_is_imm8 (ins->inst_offset));
3169 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3170 break;
3171 case OP_STORE_MEMBASE_IMM:
3172 case OP_STOREI4_MEMBASE_IMM:
3173 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
3174 g_assert (arm_is_imm12 (ins->inst_offset));
3175 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3176 break;
3177 case OP_STOREI1_MEMBASE_REG:
3178 g_assert (arm_is_imm12 (ins->inst_offset));
3179 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3180 break;
3181 case OP_STOREI2_MEMBASE_REG:
3182 g_assert (arm_is_imm8 (ins->inst_offset));
3183 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3184 break;
3185 case OP_STORE_MEMBASE_REG:
3186 case OP_STOREI4_MEMBASE_REG:
3187 /* this case is special, since it happens for spill code after lowering has been called */
3188 if (arm_is_imm12 (ins->inst_offset)) {
3189 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3190 } else {
3191 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3192 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
3194 break;
3195 case OP_STOREI1_MEMINDEX:
3196 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3197 break;
3198 case OP_STOREI2_MEMINDEX:
3199 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3200 break;
3201 case OP_STORE_MEMINDEX:
3202 case OP_STOREI4_MEMINDEX:
3203 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3204 break;
3205 case OP_LOADU4_MEM:
3206 g_assert_not_reached ();
3207 break;
3208 case OP_LOAD_MEMINDEX:
3209 case OP_LOADI4_MEMINDEX:
3210 case OP_LOADU4_MEMINDEX:
3211 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3212 break;
3213 case OP_LOADI1_MEMINDEX:
3214 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3215 break;
3216 case OP_LOADU1_MEMINDEX:
3217 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3218 break;
3219 case OP_LOADI2_MEMINDEX:
3220 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3221 break;
3222 case OP_LOADU2_MEMINDEX:
3223 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3224 break;
3225 case OP_LOAD_MEMBASE:
3226 case OP_LOADI4_MEMBASE:
3227 case OP_LOADU4_MEMBASE:
3228 /* this case is special, since it happens for spill code after lowering has been called */
3229 if (arm_is_imm12 (ins->inst_offset)) {
3230 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3231 } else {
3232 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3233 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
3235 break;
3236 case OP_LOADI1_MEMBASE:
3237 g_assert (arm_is_imm8 (ins->inst_offset));
3238 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3239 break;
3240 case OP_LOADU1_MEMBASE:
3241 g_assert (arm_is_imm12 (ins->inst_offset));
3242 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3243 break;
3244 case OP_LOADU2_MEMBASE:
3245 g_assert (arm_is_imm8 (ins->inst_offset));
3246 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3247 break;
3248 case OP_LOADI2_MEMBASE:
3249 g_assert (arm_is_imm8 (ins->inst_offset));
3250 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3251 break;
3252 case OP_ICONV_TO_I1:
3253 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
3254 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
3255 break;
3256 case OP_ICONV_TO_I2:
3257 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3258 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
3259 break;
3260 case OP_ICONV_TO_U1:
3261 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
3262 break;
3263 case OP_ICONV_TO_U2:
3264 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3265 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
3266 break;
3267 case OP_COMPARE:
3268 case OP_ICOMPARE:
3269 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
3270 break;
3271 case OP_COMPARE_IMM:
3272 case OP_ICOMPARE_IMM:
3273 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3274 g_assert (imm8 >= 0);
3275 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
3276 break;
3277 case OP_BREAK:
3279 * gdb does not like encountering the hw breakpoint ins in the debugged code.
3280 * So instead of emitting a trap, we emit a call a C function and place a
3281 * breakpoint there.
3283 //*(int*)code = 0xef9f0001;
3284 //code += 4;
3285 //ARM_DBRK (code);
3286 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3287 (gpointer)"mono_break");
3288 code = emit_call_seq (cfg, code);
3289 break;
3290 case OP_RELAXED_NOP:
3291 ARM_NOP (code);
3292 break;
3293 case OP_NOP:
3294 case OP_DUMMY_USE:
3295 case OP_DUMMY_STORE:
3296 case OP_NOT_REACHED:
3297 case OP_NOT_NULL:
3298 break;
3299 case OP_SEQ_POINT: {
3300 int i;
3301 MonoInst *info_var = cfg->arch.seq_point_info_var;
3302 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
3303 MonoInst *var;
3304 int dreg = ARMREG_LR;
3307 * For AOT, we use one got slot per method, which will point to a
3308 * SeqPointInfo structure, containing all the information required
3309 * by the code below.
3311 if (cfg->compile_aot) {
3312 g_assert (info_var);
3313 g_assert (info_var->opcode == OP_REGOFFSET);
3314 g_assert (arm_is_imm12 (info_var->inst_offset));
3318 * Read from the single stepping trigger page. This will cause a
3319 * SIGSEGV when single stepping is enabled.
3320 * We do this _before_ the breakpoint, so single stepping after
3321 * a breakpoint is hit will step to the next IL offset.
3323 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
3325 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
3326 if (cfg->compile_aot) {
3327 /* Load the trigger page addr from the variable initialized in the prolog */
3328 var = ss_trigger_page_var;
3329 g_assert (var);
3330 g_assert (var->opcode == OP_REGOFFSET);
3331 g_assert (arm_is_imm12 (var->inst_offset));
3332 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
3333 } else {
3334 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3335 ARM_B (code, 0);
3336 *(int*)code = (int)ss_trigger_page;
3337 code += 4;
3339 ARM_LDR_IMM (code, dreg, dreg, 0);
3342 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
3344 if (cfg->compile_aot) {
3345 guint32 offset = code - cfg->native_code;
3346 guint32 val;
3348 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
3349 /* Add the offset */
3350 val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
3351 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
3353 * Have to emit nops to keep the difference between the offset
3354 * stored in seq_points and breakpoint instruction constant,
3355 * mono_arch_get_ip_for_breakpoint () depends on this.
3357 if (val & 0xFF00)
3358 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3359 else
3360 ARM_NOP (code);
3361 if (val & 0xFF0000)
3362 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3363 else
3364 ARM_NOP (code);
3365 g_assert (!(val & 0xFF000000));
3366 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
3367 ARM_LDR_IMM (code, dreg, dreg, 0);
3369 /* What is faster, a branch or a load ? */
3370 ARM_CMP_REG_IMM (code, dreg, 0, 0);
3371 /* The breakpoint instruction */
3372 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
3373 } else {
3375 * A placeholder for a possible breakpoint inserted by
3376 * mono_arch_set_breakpoint ().
3378 for (i = 0; i < 4; ++i)
3379 ARM_NOP (code);
3381 break;
3383 case OP_ADDCC:
3384 case OP_IADDCC:
3385 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3386 break;
3387 case OP_IADD:
3388 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3389 break;
3390 case OP_ADC:
3391 case OP_IADC:
3392 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3393 break;
3394 case OP_ADDCC_IMM:
3395 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3396 g_assert (imm8 >= 0);
3397 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3398 break;
3399 case OP_ADD_IMM:
3400 case OP_IADD_IMM:
3401 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3402 g_assert (imm8 >= 0);
3403 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3404 break;
3405 case OP_ADC_IMM:
3406 case OP_IADC_IMM:
3407 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3408 g_assert (imm8 >= 0);
3409 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3410 break;
3411 case OP_IADD_OVF:
3412 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3413 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3414 break;
3415 case OP_IADD_OVF_UN:
3416 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3417 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3418 break;
3419 case OP_ISUB_OVF:
3420 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3421 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3422 break;
3423 case OP_ISUB_OVF_UN:
3424 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3425 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3426 break;
3427 case OP_ADD_OVF_CARRY:
3428 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3429 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3430 break;
3431 case OP_ADD_OVF_UN_CARRY:
3432 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3433 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3434 break;
3435 case OP_SUB_OVF_CARRY:
3436 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3437 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3438 break;
3439 case OP_SUB_OVF_UN_CARRY:
3440 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3441 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3442 break;
3443 case OP_SUBCC:
3444 case OP_ISUBCC:
3445 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3446 break;
3447 case OP_SUBCC_IMM:
3448 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3449 g_assert (imm8 >= 0);
3450 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3451 break;
3452 case OP_ISUB:
3453 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3454 break;
3455 case OP_SBB:
3456 case OP_ISBB:
3457 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3458 break;
3459 case OP_SUB_IMM:
3460 case OP_ISUB_IMM:
3461 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3462 g_assert (imm8 >= 0);
3463 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3464 break;
3465 case OP_SBB_IMM:
3466 case OP_ISBB_IMM:
3467 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3468 g_assert (imm8 >= 0);
3469 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3470 break;
3471 case OP_ARM_RSBS_IMM:
3472 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3473 g_assert (imm8 >= 0);
3474 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3475 break;
3476 case OP_ARM_RSC_IMM:
3477 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3478 g_assert (imm8 >= 0);
3479 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3480 break;
3481 case OP_IAND:
3482 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3483 break;
3484 case OP_AND_IMM:
3485 case OP_IAND_IMM:
3486 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3487 g_assert (imm8 >= 0);
3488 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3489 break;
3490 case OP_IDIV:
3491 case OP_IDIV_UN:
3492 case OP_DIV_IMM:
3493 case OP_IREM:
3494 case OP_IREM_UN:
3495 case OP_REM_IMM:
3496 /* crappy ARM arch doesn't have a DIV instruction */
3497 g_assert_not_reached ();
3498 case OP_IOR:
3499 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3500 break;
3501 case OP_OR_IMM:
3502 case OP_IOR_IMM:
3503 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3504 g_assert (imm8 >= 0);
3505 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3506 break;
3507 case OP_IXOR:
3508 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3509 break;
3510 case OP_XOR_IMM:
3511 case OP_IXOR_IMM:
3512 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3513 g_assert (imm8 >= 0);
3514 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3515 break;
3516 case OP_ISHL:
3517 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3518 break;
3519 case OP_SHL_IMM:
3520 case OP_ISHL_IMM:
3521 if (ins->inst_imm)
3522 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3523 else if (ins->dreg != ins->sreg1)
3524 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3525 break;
3526 case OP_ISHR:
3527 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3528 break;
3529 case OP_SHR_IMM:
3530 case OP_ISHR_IMM:
3531 if (ins->inst_imm)
3532 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3533 else if (ins->dreg != ins->sreg1)
3534 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3535 break;
3536 case OP_SHR_UN_IMM:
3537 case OP_ISHR_UN_IMM:
3538 if (ins->inst_imm)
3539 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3540 else if (ins->dreg != ins->sreg1)
3541 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3542 break;
3543 case OP_ISHR_UN:
3544 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3545 break;
3546 case OP_INOT:
3547 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
3548 break;
3549 case OP_INEG:
3550 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
3551 break;
3552 case OP_IMUL:
3553 if (ins->dreg == ins->sreg2)
3554 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3555 else
3556 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
3557 break;
3558 case OP_MUL_IMM:
3559 g_assert_not_reached ();
3560 break;
3561 case OP_IMUL_OVF:
3562 /* FIXME: handle ovf/ sreg2 != dreg */
3563 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3564 /* FIXME: MUL doesn't set the C/O flags on ARM */
3565 break;
3566 case OP_IMUL_OVF_UN:
3567 /* FIXME: handle ovf/ sreg2 != dreg */
3568 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3569 /* FIXME: MUL doesn't set the C/O flags on ARM */
3570 break;
3571 case OP_ICONST:
3572 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
3573 break;
3574 case OP_AOTCONST:
3575 /* Load the GOT offset */
3576 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3577 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
3578 ARM_B (code, 0);
3579 *(gpointer*)code = NULL;
3580 code += 4;
3581 /* Load the value from the GOT */
3582 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
3583 break;
3584 case OP_ICONV_TO_I4:
3585 case OP_ICONV_TO_U4:
3586 case OP_MOVE:
3587 if (ins->dreg != ins->sreg1)
3588 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3589 break;
3590 case OP_SETLRET: {
3591 int saved = ins->sreg2;
3592 if (ins->sreg2 == ARM_LSW_REG) {
3593 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
3594 saved = ARMREG_LR;
3596 if (ins->sreg1 != ARM_LSW_REG)
3597 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
3598 if (saved != ARM_MSW_REG)
3599 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
3600 break;
3602 case OP_FMOVE:
3603 #ifdef ARM_FPU_FPA
3604 ARM_MVFD (code, ins->dreg, ins->sreg1);
3605 #elif defined(ARM_FPU_VFP)
3606 ARM_CPYD (code, ins->dreg, ins->sreg1);
3607 #endif
3608 break;
3609 case OP_FCONV_TO_R4:
3610 #ifdef ARM_FPU_FPA
3611 ARM_MVFS (code, ins->dreg, ins->sreg1);
3612 #elif defined(ARM_FPU_VFP)
3613 ARM_CVTD (code, ins->dreg, ins->sreg1);
3614 ARM_CVTS (code, ins->dreg, ins->dreg);
3615 #endif
3616 break;
3617 case OP_JMP:
3619 * Keep in sync with mono_arch_emit_epilog
3621 g_assert (!cfg->method->save_lmf);
3623 code = emit_load_volatile_arguments (cfg, code);
3625 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
3626 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
3627 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
3628 if (cfg->compile_aot) {
3629 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3630 ARM_B (code, 0);
3631 *(gpointer*)code = NULL;
3632 code += 4;
3633 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
3634 } else {
3635 ARM_B (code, 0);
3637 break;
3638 case OP_CHECK_THIS:
3639 /* ensure ins->sreg1 is not NULL */
3640 ARM_LDR_IMM (code, ARMREG_LR, ins->sreg1, 0);
3641 break;
3642 case OP_ARGLIST: {
3643 g_assert (cfg->sig_cookie < 128);
3644 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
3645 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
3646 break;
3648 case OP_FCALL:
3649 case OP_LCALL:
3650 case OP_VCALL:
3651 case OP_VCALL2:
3652 case OP_VOIDCALL:
3653 case OP_CALL:
3654 call = (MonoCallInst*)ins;
3655 if (ins->flags & MONO_INST_HAS_METHOD)
3656 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
3657 else
3658 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
3659 code = emit_call_seq (cfg, code);
3660 code = emit_move_return_value (cfg, ins, code);
3661 break;
3662 case OP_FCALL_REG:
3663 case OP_LCALL_REG:
3664 case OP_VCALL_REG:
3665 case OP_VCALL2_REG:
3666 case OP_VOIDCALL_REG:
3667 case OP_CALL_REG:
3668 code = emit_call_reg (code, ins->sreg1);
3669 code = emit_move_return_value (cfg, ins, code);
3670 break;
3671 case OP_FCALL_MEMBASE:
3672 case OP_LCALL_MEMBASE:
3673 case OP_VCALL_MEMBASE:
3674 case OP_VCALL2_MEMBASE:
3675 case OP_VOIDCALL_MEMBASE:
3676 case OP_CALL_MEMBASE:
3677 g_assert (arm_is_imm12 (ins->inst_offset));
3678 g_assert (ins->sreg1 != ARMREG_LR);
3679 call = (MonoCallInst*)ins;
3680 if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3681 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4);
3682 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3684 * We can't embed the method in the code stream in PIC code, or
3685 * in gshared code.
3686 * Instead, we put it in V5 in code emitted by
3687 * mono_arch_emit_imt_argument (), and embed NULL here to
3688 * signal the IMT thunk that the value is in V5.
3690 if (call->dynamic_imt_arg)
3691 *((gpointer*)code) = NULL;
3692 else
3693 *((gpointer*)code) = (gpointer)call->method;
3694 code += 4;
3695 } else {
3696 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3697 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3699 code = emit_move_return_value (cfg, ins, code);
3700 break;
3701 case OP_LOCALLOC: {
3702 /* keep alignment */
3703 int alloca_waste = cfg->param_area;
3704 alloca_waste += 7;
3705 alloca_waste &= ~7;
3706 /* round the size to 8 bytes */
3707 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
3708 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
3709 if (alloca_waste)
3710 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
3711 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
3712 /* memzero the area: dreg holds the size, sp is the pointer */
3713 if (ins->flags & MONO_INST_INIT) {
3714 guint8 *start_loop, *branch_to_cond;
3715 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
3716 branch_to_cond = code;
3717 ARM_B (code, 0);
3718 start_loop = code;
3719 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
3720 arm_patch (branch_to_cond, code);
3721 /* decrement by 4 and set flags */
3722 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, 4);
3723 ARM_B_COND (code, ARMCOND_GE, 0);
3724 arm_patch (code - 4, start_loop);
3726 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
3727 break;
3729 case OP_DYN_CALL: {
3730 int i;
3731 MonoInst *var = cfg->dyn_call_var;
3733 g_assert (var->opcode == OP_REGOFFSET);
3734 g_assert (arm_is_imm12 (var->inst_offset));
3736 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
3737 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
3738 /* ip = ftn */
3739 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
3741 /* Save args buffer */
3742 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
3744 /* Set stack slots using R0 as scratch reg */
3745 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
3746 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
3747 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (gpointer));
3748 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (gpointer));
3751 /* Set argument registers */
3752 for (i = 0; i < PARAM_REGS; ++i)
3753 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (gpointer));
3755 /* Make the call */
3756 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3757 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3759 /* Save result */
3760 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
3761 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res));
3762 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res2));
3763 break;
3765 case OP_THROW: {
3766 if (ins->sreg1 != ARMREG_R0)
3767 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3768 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3769 (gpointer)"mono_arch_throw_exception");
3770 code = emit_call_seq (cfg, code);
3771 break;
3773 case OP_RETHROW: {
3774 if (ins->sreg1 != ARMREG_R0)
3775 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3776 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3777 (gpointer)"mono_arch_rethrow_exception");
3778 code = emit_call_seq (cfg, code);
3779 break;
3781 case OP_START_HANDLER: {
3782 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3784 if (arm_is_imm12 (spvar->inst_offset)) {
3785 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
3786 } else {
3787 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3788 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
3790 break;
3792 case OP_ENDFILTER: {
3793 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3795 if (ins->sreg1 != ARMREG_R0)
3796 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3797 if (arm_is_imm12 (spvar->inst_offset)) {
3798 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3799 } else {
3800 g_assert (ARMREG_IP != spvar->inst_basereg);
3801 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3802 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3804 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3805 break;
3807 case OP_ENDFINALLY: {
3808 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3810 if (arm_is_imm12 (spvar->inst_offset)) {
3811 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3812 } else {
3813 g_assert (ARMREG_IP != spvar->inst_basereg);
3814 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3815 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3817 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3818 break;
3820 case OP_CALL_HANDLER:
3821 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3822 ARM_BL (code, 0);
3823 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
3824 break;
3825 case OP_LABEL:
3826 ins->inst_c0 = code - cfg->native_code;
3827 break;
3828 case OP_BR:
3829 /*if (ins->inst_target_bb->native_offset) {
3830 ARM_B (code, 0);
3831 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3832 } else*/ {
3833 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3834 ARM_B (code, 0);
3836 break;
3837 case OP_BR_REG:
3838 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
3839 break;
3840 case OP_SWITCH:
3842 * In the normal case we have:
3843 * ldr pc, [pc, ins->sreg1 << 2]
3844 * nop
3845 * If aot, we have:
3846 * ldr lr, [pc, ins->sreg1 << 2]
3847 * add pc, pc, lr
3848 * After follows the data.
3849 * FIXME: add aot support.
3851 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
3852 max_len += 4 * GPOINTER_TO_INT (ins->klass);
3853 if (offset > (cfg->code_size - max_len - 16)) {
3854 cfg->code_size += max_len;
3855 cfg->code_size *= 2;
3856 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3857 code = cfg->native_code + offset;
3859 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
3860 ARM_NOP (code);
3861 code += 4 * GPOINTER_TO_INT (ins->klass);
3862 break;
3863 case OP_CEQ:
3864 case OP_ICEQ:
3865 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3866 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3867 break;
3868 case OP_CLT:
3869 case OP_ICLT:
3870 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3871 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
3872 break;
3873 case OP_CLT_UN:
3874 case OP_ICLT_UN:
3875 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3876 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
3877 break;
3878 case OP_CGT:
3879 case OP_ICGT:
3880 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3881 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
3882 break;
3883 case OP_CGT_UN:
3884 case OP_ICGT_UN:
3885 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3886 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
3887 break;
3888 case OP_COND_EXC_EQ:
3889 case OP_COND_EXC_NE_UN:
3890 case OP_COND_EXC_LT:
3891 case OP_COND_EXC_LT_UN:
3892 case OP_COND_EXC_GT:
3893 case OP_COND_EXC_GT_UN:
3894 case OP_COND_EXC_GE:
3895 case OP_COND_EXC_GE_UN:
3896 case OP_COND_EXC_LE:
3897 case OP_COND_EXC_LE_UN:
3898 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
3899 break;
3900 case OP_COND_EXC_IEQ:
3901 case OP_COND_EXC_INE_UN:
3902 case OP_COND_EXC_ILT:
3903 case OP_COND_EXC_ILT_UN:
3904 case OP_COND_EXC_IGT:
3905 case OP_COND_EXC_IGT_UN:
3906 case OP_COND_EXC_IGE:
3907 case OP_COND_EXC_IGE_UN:
3908 case OP_COND_EXC_ILE:
3909 case OP_COND_EXC_ILE_UN:
3910 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
3911 break;
3912 case OP_COND_EXC_C:
3913 case OP_COND_EXC_IC:
3914 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
3915 break;
3916 case OP_COND_EXC_OV:
3917 case OP_COND_EXC_IOV:
3918 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
3919 break;
3920 case OP_COND_EXC_NC:
3921 case OP_COND_EXC_INC:
3922 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
3923 break;
3924 case OP_COND_EXC_NO:
3925 case OP_COND_EXC_INO:
3926 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
3927 break;
3928 case OP_IBEQ:
3929 case OP_IBNE_UN:
3930 case OP_IBLT:
3931 case OP_IBLT_UN:
3932 case OP_IBGT:
3933 case OP_IBGT_UN:
3934 case OP_IBGE:
3935 case OP_IBGE_UN:
3936 case OP_IBLE:
3937 case OP_IBLE_UN:
3938 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
3939 break;
3941 /* floating point opcodes */
3942 #ifdef ARM_FPU_FPA
3943 case OP_R8CONST:
3944 if (cfg->compile_aot) {
3945 ARM_LDFD (code, ins->dreg, ARMREG_PC, 0);
3946 ARM_B (code, 1);
3947 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3948 code += 4;
3949 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3950 code += 4;
3951 } else {
3952 /* FIXME: we can optimize the imm load by dealing with part of
3953 * the displacement in LDFD (aligning to 512).
3955 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3956 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3958 break;
3959 case OP_R4CONST:
3960 if (cfg->compile_aot) {
3961 ARM_LDFS (code, ins->dreg, ARMREG_PC, 0);
3962 ARM_B (code, 0);
3963 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3964 code += 4;
3965 } else {
3966 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3967 ARM_LDFS (code, ins->dreg, ARMREG_LR, 0);
3969 break;
3970 case OP_STORER8_MEMBASE_REG:
3971 /* This is generated by the local regalloc pass which runs after the lowering pass */
3972 if (!arm_is_fpimm8 (ins->inst_offset)) {
3973 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3974 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
3975 ARM_STFD (code, ins->sreg1, ARMREG_LR, 0);
3976 } else {
3977 ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3979 break;
3980 case OP_LOADR8_MEMBASE:
3981 /* This is generated by the local regalloc pass which runs after the lowering pass */
3982 if (!arm_is_fpimm8 (ins->inst_offset)) {
3983 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3984 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
3985 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3986 } else {
3987 ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3989 break;
3990 case OP_STORER4_MEMBASE_REG:
3991 g_assert (arm_is_fpimm8 (ins->inst_offset));
3992 ARM_STFS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3993 break;
3994 case OP_LOADR4_MEMBASE:
3995 g_assert (arm_is_fpimm8 (ins->inst_offset));
3996 ARM_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3997 break;
3998 case OP_ICONV_TO_R_UN: {
3999 int tmpreg;
4000 tmpreg = ins->dreg == 0? 1: 0;
4001 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
4002 ARM_FLTD (code, ins->dreg, ins->sreg1);
4003 ARM_B_COND (code, ARMCOND_GE, 8);
4004 /* save the temp register */
4005 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
4006 ARM_STFD (code, tmpreg, ARMREG_SP, 0);
4007 ARM_LDFD (code, tmpreg, ARMREG_PC, 12);
4008 ARM_FPA_ADFD (code, ins->dreg, ins->dreg, tmpreg);
4009 ARM_LDFD (code, tmpreg, ARMREG_SP, 0);
4010 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
4011 /* skip the constant pool */
4012 ARM_B (code, 8);
4013 code += 4;
4014 *(int*)code = 0x41f00000;
4015 code += 4;
4016 *(int*)code = 0;
4017 code += 4;
4018 /* FIXME: adjust:
4019 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
4020 * adfltd fdest, fdest, ftemp
4022 break;
4024 case OP_ICONV_TO_R4:
4025 ARM_FLTS (code, ins->dreg, ins->sreg1);
4026 break;
4027 case OP_ICONV_TO_R8:
4028 ARM_FLTD (code, ins->dreg, ins->sreg1);
4029 break;
4031 #elif defined(ARM_FPU_VFP)
4033 case OP_R8CONST:
4034 if (cfg->compile_aot) {
4035 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
4036 ARM_B (code, 1);
4037 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
4038 code += 4;
4039 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
4040 code += 4;
4041 } else {
4042 /* FIXME: we can optimize the imm load by dealing with part of
4043 * the displacement in LDFD (aligning to 512).
4045 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
4046 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4048 break;
4049 case OP_R4CONST:
4050 if (cfg->compile_aot) {
4051 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
4052 ARM_B (code, 0);
4053 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
4054 code += 4;
4055 ARM_CVTS (code, ins->dreg, ins->dreg);
4056 } else {
4057 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
4058 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
4059 ARM_CVTS (code, ins->dreg, ins->dreg);
4061 break;
4062 case OP_STORER8_MEMBASE_REG:
4063 /* This is generated by the local regalloc pass which runs after the lowering pass */
4064 if (!arm_is_fpimm8 (ins->inst_offset)) {
4065 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4066 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
4067 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
4068 } else {
4069 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4071 break;
4072 case OP_LOADR8_MEMBASE:
4073 /* This is generated by the local regalloc pass which runs after the lowering pass */
4074 if (!arm_is_fpimm8 (ins->inst_offset)) {
4075 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4076 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
4077 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4078 } else {
4079 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4081 break;
4082 case OP_STORER4_MEMBASE_REG:
4083 g_assert (arm_is_fpimm8 (ins->inst_offset));
4084 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
4085 ARM_FSTS (code, ARM_VFP_F0, ins->inst_destbasereg, ins->inst_offset);
4086 break;
4087 case OP_LOADR4_MEMBASE:
4088 g_assert (arm_is_fpimm8 (ins->inst_offset));
4089 ARM_FLDS (code, ARM_VFP_F0, ins->inst_basereg, ins->inst_offset);
4090 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4091 break;
4092 case OP_ICONV_TO_R_UN: {
4093 g_assert_not_reached ();
4094 break;
4096 case OP_ICONV_TO_R4:
4097 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
4098 ARM_FSITOS (code, ARM_VFP_F0, ARM_VFP_F0);
4099 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4100 break;
4101 case OP_ICONV_TO_R8:
4102 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
4103 ARM_FSITOD (code, ins->dreg, ARM_VFP_F0);
4104 break;
4106 case OP_SETFRET:
4107 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
4108 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
4109 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
4110 } else {
4111 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
4113 break;
4115 #endif
4117 case OP_FCONV_TO_I1:
4118 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
4119 break;
4120 case OP_FCONV_TO_U1:
4121 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
4122 break;
4123 case OP_FCONV_TO_I2:
4124 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
4125 break;
4126 case OP_FCONV_TO_U2:
4127 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
4128 break;
4129 case OP_FCONV_TO_I4:
4130 case OP_FCONV_TO_I:
4131 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
4132 break;
4133 case OP_FCONV_TO_U4:
4134 case OP_FCONV_TO_U:
4135 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
4136 break;
4137 case OP_FCONV_TO_I8:
4138 case OP_FCONV_TO_U8:
4139 g_assert_not_reached ();
4140 /* Implemented as helper calls */
4141 break;
4142 case OP_LCONV_TO_R_UN:
4143 g_assert_not_reached ();
4144 /* Implemented as helper calls */
4145 break;
4146 case OP_LCONV_TO_OVF_I4_2: {
4147 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
4149 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
4152 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
4153 high_bit_not_set = code;
4154 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
4156 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
4157 valid_negative = code;
4158 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
4159 invalid_negative = code;
4160 ARM_B_COND (code, ARMCOND_AL, 0);
4162 arm_patch (high_bit_not_set, code);
4164 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
4165 valid_positive = code;
4166 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
4168 arm_patch (invalid_negative, code);
4169 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
4171 arm_patch (valid_negative, code);
4172 arm_patch (valid_positive, code);
4174 if (ins->dreg != ins->sreg1)
4175 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4176 break;
4178 #ifdef ARM_FPU_FPA
4179 case OP_FADD:
4180 ARM_FPA_ADFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4181 break;
4182 case OP_FSUB:
4183 ARM_FPA_SUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4184 break;
4185 case OP_FMUL:
4186 ARM_FPA_MUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4187 break;
4188 case OP_FDIV:
4189 ARM_FPA_DVFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4190 break;
4191 case OP_FNEG:
4192 ARM_MNFD (code, ins->dreg, ins->sreg1);
4193 break;
4194 #elif defined(ARM_FPU_VFP)
4195 case OP_FADD:
4196 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
4197 break;
4198 case OP_FSUB:
4199 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
4200 break;
4201 case OP_FMUL:
4202 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
4203 break;
4204 case OP_FDIV:
4205 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
4206 break;
4207 case OP_FNEG:
4208 ARM_NEGD (code, ins->dreg, ins->sreg1);
4209 break;
4210 #endif
4211 case OP_FREM:
4212 /* emulated */
4213 g_assert_not_reached ();
4214 break;
4215 case OP_FCOMPARE:
4216 #ifdef ARM_FPU_FPA
4217 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4218 #elif defined(ARM_FPU_VFP)
4219 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4220 ARM_FMSTAT (code);
4221 #endif
4222 break;
4223 case OP_FCEQ:
4224 #ifdef ARM_FPU_FPA
4225 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4226 #elif defined(ARM_FPU_VFP)
4227 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4228 ARM_FMSTAT (code);
4229 #endif
4230 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
4231 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
4232 break;
4233 case OP_FCLT:
4234 #ifdef ARM_FPU_FPA
4235 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4236 #elif defined(ARM_FPU_VFP)
4237 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4238 ARM_FMSTAT (code);
4239 #endif
4240 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4241 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4242 break;
4243 case OP_FCLT_UN:
4244 #ifdef ARM_FPU_FPA
4245 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4246 #elif defined(ARM_FPU_VFP)
4247 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4248 ARM_FMSTAT (code);
4249 #endif
4250 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4251 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4252 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4253 break;
4254 case OP_FCGT:
4255 /* swapped */
4256 #ifdef ARM_FPU_FPA
4257 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4258 #elif defined(ARM_FPU_VFP)
4259 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4260 ARM_FMSTAT (code);
4261 #endif
4262 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4263 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4264 break;
4265 case OP_FCGT_UN:
4266 /* swapped */
4267 #ifdef ARM_FPU_FPA
4268 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4269 #elif defined(ARM_FPU_VFP)
4270 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4271 ARM_FMSTAT (code);
4272 #endif
4273 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4274 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4275 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4276 break;
4277 /* ARM FPA flags table:
4278 * N Less than ARMCOND_MI
4279 * Z Equal ARMCOND_EQ
4280 * C Greater Than or Equal ARMCOND_CS
4281 * V Unordered ARMCOND_VS
4283 case OP_FBEQ:
4284 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
4285 break;
4286 case OP_FBNE_UN:
4287 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
4288 break;
4289 case OP_FBLT:
4290 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4291 break;
4292 case OP_FBLT_UN:
4293 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4294 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4295 break;
4296 case OP_FBGT:
4297 case OP_FBGT_UN:
4298 case OP_FBLE:
4299 case OP_FBLE_UN:
4300 g_assert_not_reached ();
4301 break;
4302 case OP_FBGE:
4303 #ifdef ARM_FPU_VFP
4304 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4305 #else
4306 /* FPA requires EQ even thou the docs suggests that just CS is enough */
4307 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
4308 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
4309 #endif
4310 break;
4311 case OP_FBGE_UN:
4312 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4313 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4314 break;
4316 case OP_CKFINITE: {
4317 #ifdef ARM_FPU_FPA
4318 if (ins->dreg != ins->sreg1)
4319 ARM_MVFD (code, ins->dreg, ins->sreg1);
4320 #elif defined(ARM_FPU_VFP)
4321 ARM_ABSD (code, ARM_VFP_D1, ins->sreg1);
4322 ARM_FLDD (code, ARM_VFP_D0, ARMREG_PC, 0);
4323 ARM_B (code, 1);
4324 *(guint32*)code = 0xffffffff;
4325 code += 4;
4326 *(guint32*)code = 0x7fefffff;
4327 code += 4;
4328 ARM_CMPD (code, ARM_VFP_D1, ARM_VFP_D0);
4329 ARM_FMSTAT (code);
4330 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
4331 ARM_CMPD (code, ins->sreg1, ins->sreg1);
4332 ARM_FMSTAT (code);
4333 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
4335 ARM_CPYD (code, ins->dreg, ins->sreg1);
4336 #endif
4337 break;
4339 default:
4340 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4341 g_assert_not_reached ();
4344 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
4345 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4346 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
4347 g_assert_not_reached ();
4350 cpos += max_len;
4352 last_ins = ins;
4353 last_offset = offset;
4356 cfg->code_len = code - cfg->native_code;
4359 #endif /* DISABLE_JIT */
4361 #ifdef HAVE_AEABI_READ_TP
4362 void __aeabi_read_tp (void);
4363 #endif
4365 void
4366 mono_arch_register_lowlevel_calls (void)
4368 /* The signature doesn't matter */
4369 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
4370 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
4372 #ifndef MONO_CROSS_COMPILE
4373 #ifdef HAVE_AEABI_READ_TP
4374 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
4375 #endif
4376 #endif
4379 #define patch_lis_ori(ip,val) do {\
4380 guint16 *__lis_ori = (guint16*)(ip); \
4381 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
4382 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
4383 } while (0)
4385 void
4386 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4388 MonoJumpInfo *patch_info;
4389 gboolean compile_aot = !run_cctors;
4391 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4392 unsigned char *ip = patch_info->ip.i + code;
4393 const unsigned char *target;
4395 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
4396 gpointer *jt = (gpointer*)(ip + 8);
4397 int i;
4398 /* jt is the inlined jump table, 2 instructions after ip
4399 * In the normal case we store the absolute addresses,
4400 * otherwise the displacements.
4402 for (i = 0; i < patch_info->data.table->table_size; i++)
4403 jt [i] = code + (int)patch_info->data.table->table [i];
4404 continue;
4406 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4408 if (compile_aot) {
4409 switch (patch_info->type) {
4410 case MONO_PATCH_INFO_BB:
4411 case MONO_PATCH_INFO_LABEL:
4412 break;
4413 default:
4414 /* No need to patch these */
4415 continue;
4419 switch (patch_info->type) {
4420 case MONO_PATCH_INFO_IP:
4421 g_assert_not_reached ();
4422 patch_lis_ori (ip, ip);
4423 continue;
4424 case MONO_PATCH_INFO_METHOD_REL:
4425 g_assert_not_reached ();
4426 *((gpointer *)(ip)) = code + patch_info->data.offset;
4427 continue;
4428 case MONO_PATCH_INFO_METHODCONST:
4429 case MONO_PATCH_INFO_CLASS:
4430 case MONO_PATCH_INFO_IMAGE:
4431 case MONO_PATCH_INFO_FIELD:
4432 case MONO_PATCH_INFO_VTABLE:
4433 case MONO_PATCH_INFO_IID:
4434 case MONO_PATCH_INFO_SFLDA:
4435 case MONO_PATCH_INFO_LDSTR:
4436 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
4437 case MONO_PATCH_INFO_LDTOKEN:
4438 g_assert_not_reached ();
4439 /* from OP_AOTCONST : lis + ori */
4440 patch_lis_ori (ip, target);
4441 continue;
4442 case MONO_PATCH_INFO_R4:
4443 case MONO_PATCH_INFO_R8:
4444 g_assert_not_reached ();
4445 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
4446 continue;
4447 case MONO_PATCH_INFO_EXC_NAME:
4448 g_assert_not_reached ();
4449 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
4450 continue;
4451 case MONO_PATCH_INFO_NONE:
4452 case MONO_PATCH_INFO_BB_OVF:
4453 case MONO_PATCH_INFO_EXC_OVF:
4454 /* everything is dealt with at epilog output time */
4455 continue;
4456 default:
4457 break;
4459 arm_patch_general (domain, ip, target);
4463 #ifndef DISABLE_JIT
4466 * Stack frame layout:
4468 * ------------------- fp
4469 * MonoLMF structure or saved registers
4470 * -------------------
4471 * locals
4472 * -------------------
4473 * spilled regs
4474 * -------------------
4475 * optional 8 bytes for tracing
4476 * -------------------
4477 * param area size is cfg->param_area
4478 * ------------------- sp
4480 guint8 *
4481 mono_arch_emit_prolog (MonoCompile *cfg)
4483 MonoMethod *method = cfg->method;
4484 MonoBasicBlock *bb;
4485 MonoMethodSignature *sig;
4486 MonoInst *inst;
4487 int alloc_size, pos, max_offset, i, rot_amount;
4488 guint8 *code;
4489 CallInfo *cinfo;
4490 int tracing = 0;
4491 int lmf_offset = 0;
4492 int prev_sp_offset, reg_offset;
4494 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4495 tracing = 1;
4497 sig = mono_method_signature (method);
4498 cfg->code_size = 256 + sig->param_count * 20;
4499 code = cfg->native_code = g_malloc (cfg->code_size);
4501 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
4503 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
4505 alloc_size = cfg->stack_offset;
4506 pos = 0;
4508 if (!method->save_lmf) {
4509 /* We save SP by storing it into IP and saving IP */
4510 ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
4511 prev_sp_offset = 8; /* ip and lr */
4512 for (i = 0; i < 16; ++i) {
4513 if (cfg->used_int_regs & (1 << i))
4514 prev_sp_offset += 4;
4516 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4517 reg_offset = 0;
4518 for (i = 0; i < 16; ++i) {
4519 if ((cfg->used_int_regs & (1 << i)) || (i == ARMREG_IP) || (i == ARMREG_LR)) {
4520 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4521 reg_offset += 4;
4524 } else {
4525 ARM_PUSH (code, 0x5ff0);
4526 prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
4527 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4528 reg_offset = 0;
4529 for (i = 0; i < 16; ++i) {
4530 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
4531 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4532 reg_offset += 4;
4535 pos += sizeof (MonoLMF) - prev_sp_offset;
4536 lmf_offset = pos;
4538 alloc_size += pos;
4539 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
4540 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
4541 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
4542 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
4545 /* the stack used in the pushed regs */
4546 if (prev_sp_offset & 4)
4547 alloc_size += 4;
4548 cfg->stack_usage = alloc_size;
4549 if (alloc_size) {
4550 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
4551 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4552 } else {
4553 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
4554 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4556 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
4558 if (cfg->frame_reg != ARMREG_SP) {
4559 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
4560 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
4562 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
4563 prev_sp_offset += alloc_size;
4565 /* compute max_offset in order to use short forward jumps
4566 * we could skip do it on arm because the immediate displacement
4567 * for jumps is large enough, it may be useful later for constant pools
4569 max_offset = 0;
4570 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4571 MonoInst *ins = bb->code;
4572 bb->max_offset = max_offset;
4574 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
4575 max_offset += 6;
4577 MONO_BB_FOR_EACH_INS (bb, ins)
4578 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4581 /* store runtime generic context */
4582 if (cfg->rgctx_var) {
4583 MonoInst *ins = cfg->rgctx_var;
4585 g_assert (ins->opcode == OP_REGOFFSET);
4587 if (arm_is_imm12 (ins->inst_offset)) {
4588 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
4589 } else {
4590 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4591 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
4595 /* load arguments allocated to register from the stack */
4596 pos = 0;
4598 cinfo = get_call_info (NULL, sig, sig->pinvoke);
4600 if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != RegTypeStructByVal) {
4601 ArgInfo *ainfo = &cinfo->ret;
4602 inst = cfg->vret_addr;
4603 g_assert (arm_is_imm12 (inst->inst_offset));
4604 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4607 if (sig->call_convention == MONO_CALL_VARARG) {
4608 ArgInfo *cookie = &cinfo->sig_cookie;
4610 /* Save the sig cookie address */
4611 g_assert (cookie->storage == RegTypeBase);
4613 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
4614 g_assert (arm_is_imm12 (cfg->sig_cookie));
4615 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
4616 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4619 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4620 ArgInfo *ainfo = cinfo->args + i;
4621 inst = cfg->args [pos];
4623 if (cfg->verbose_level > 2)
4624 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
4625 if (inst->opcode == OP_REGVAR) {
4626 if (ainfo->storage == RegTypeGeneral)
4627 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4628 else if (ainfo->storage == RegTypeFP) {
4629 g_assert_not_reached ();
4630 } else if (ainfo->storage == RegTypeBase) {
4631 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4632 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4633 } else {
4634 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4635 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4637 } else
4638 g_assert_not_reached ();
4640 if (cfg->verbose_level > 2)
4641 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
4642 } else {
4643 /* the argument should be put on the stack: FIXME handle size != word */
4644 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4645 switch (ainfo->size) {
4646 case 1:
4647 if (arm_is_imm12 (inst->inst_offset))
4648 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4649 else {
4650 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4651 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4653 break;
4654 case 2:
4655 if (arm_is_imm8 (inst->inst_offset)) {
4656 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4657 } else {
4658 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4659 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4661 break;
4662 case 8:
4663 g_assert (arm_is_imm12 (inst->inst_offset));
4664 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4665 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4666 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4667 break;
4668 default:
4669 if (arm_is_imm12 (inst->inst_offset)) {
4670 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4671 } else {
4672 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4673 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4675 break;
4677 } else if (ainfo->storage == RegTypeBaseGen) {
4678 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
4679 g_assert (arm_is_imm12 (inst->inst_offset));
4680 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4681 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4682 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
4683 } else if (ainfo->storage == RegTypeBase) {
4684 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4685 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4686 } else {
4687 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
4688 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4691 switch (ainfo->size) {
4692 case 1:
4693 if (arm_is_imm8 (inst->inst_offset)) {
4694 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4695 } else {
4696 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4697 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4699 break;
4700 case 2:
4701 if (arm_is_imm8 (inst->inst_offset)) {
4702 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4703 } else {
4704 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4705 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4707 break;
4708 case 8:
4709 if (arm_is_imm12 (inst->inst_offset)) {
4710 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4711 } else {
4712 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4713 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4715 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
4716 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
4717 } else {
4718 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
4719 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4721 if (arm_is_imm12 (inst->inst_offset + 4)) {
4722 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4723 } else {
4724 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
4725 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4727 break;
4728 default:
4729 if (arm_is_imm12 (inst->inst_offset)) {
4730 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4731 } else {
4732 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4733 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4735 break;
4737 } else if (ainfo->storage == RegTypeFP) {
4738 g_assert_not_reached ();
4739 } else if (ainfo->storage == RegTypeStructByVal) {
4740 int doffset = inst->inst_offset;
4741 int soffset = 0;
4742 int cur_reg;
4743 int size = 0;
4744 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
4745 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4746 if (arm_is_imm12 (doffset)) {
4747 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4748 } else {
4749 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4750 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4752 soffset += sizeof (gpointer);
4753 doffset += sizeof (gpointer);
4755 if (ainfo->vtsize) {
4756 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4757 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
4758 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
4760 } else if (ainfo->storage == RegTypeStructByAddr) {
4761 g_assert_not_reached ();
4762 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4763 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
4764 } else
4765 g_assert_not_reached ();
4767 pos++;
4770 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
4771 if (cfg->compile_aot)
4772 /* AOT code is only used in the root domain */
4773 code = mono_arm_emit_load_imm (code, ARMREG_R0, 0);
4774 else
4775 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->domain);
4776 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4777 (gpointer)"mono_jit_thread_attach");
4778 code = emit_call_seq (cfg, code);
4781 if (method->save_lmf) {
4782 gboolean get_lmf_fast = FALSE;
4784 #ifdef HAVE_AEABI_READ_TP
4785 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
4787 if (lmf_addr_tls_offset != -1) {
4788 get_lmf_fast = TRUE;
4790 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4791 (gpointer)"__aeabi_read_tp");
4792 code = emit_call_seq (cfg, code);
4794 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
4795 get_lmf_fast = TRUE;
4797 #endif
4798 if (!get_lmf_fast) {
4799 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4800 (gpointer)"mono_get_lmf_addr");
4801 code = emit_call_seq (cfg, code);
4803 /* we build the MonoLMF structure on the stack - see mini-arm.h */
4804 /* lmf_offset is the offset from the previous stack pointer,
4805 * alloc_size is the total stack space allocated, so the offset
4806 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
4807 * The pointer to the struct is put in r1 (new_lmf).
4808 * r2 is used as scratch
4809 * The callee-saved registers are already in the MonoLMF structure
4811 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, alloc_size - lmf_offset);
4812 /* r0 is the result from mono_get_lmf_addr () */
4813 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4814 /* new_lmf->previous_lmf = *lmf_addr */
4815 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4816 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4817 /* *(lmf_addr) = r1 */
4818 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4819 /* Skip method (only needed for trampoline LMF frames) */
4820 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
4821 /* save the current IP */
4822 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
4823 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
4826 if (tracing)
4827 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4829 if (cfg->arch.seq_point_info_var) {
4830 MonoInst *ins = cfg->arch.seq_point_info_var;
4832 /* Initialize the variable from a GOT slot */
4833 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
4834 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4835 ARM_B (code, 0);
4836 *(gpointer*)code = NULL;
4837 code += 4;
4838 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
4840 g_assert (ins->opcode == OP_REGOFFSET);
4842 if (arm_is_imm12 (ins->inst_offset)) {
4843 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
4844 } else {
4845 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4846 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
4850 /* Initialize ss_trigger_page_var */
4852 MonoInst *info_var = cfg->arch.seq_point_info_var;
4853 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4854 int dreg = ARMREG_LR;
4856 if (info_var) {
4857 g_assert (info_var->opcode == OP_REGOFFSET);
4858 g_assert (arm_is_imm12 (info_var->inst_offset));
4860 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4861 /* Load the trigger page addr */
4862 ARM_LDR_IMM (code, dreg, dreg, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
4863 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
4867 cfg->code_len = code - cfg->native_code;
4868 g_assert (cfg->code_len < cfg->code_size);
4869 g_free (cinfo);
4871 return code;
4874 void
4875 mono_arch_emit_epilog (MonoCompile *cfg)
4877 MonoMethod *method = cfg->method;
4878 int pos, i, rot_amount;
4879 int max_epilog_size = 16 + 20*4;
4880 guint8 *code;
4881 CallInfo *cinfo;
4883 if (cfg->method->save_lmf)
4884 max_epilog_size += 128;
4886 if (mono_jit_trace_calls != NULL)
4887 max_epilog_size += 50;
4889 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4890 max_epilog_size += 50;
4892 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4893 cfg->code_size *= 2;
4894 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4895 mono_jit_stats.code_reallocs++;
4899 * Keep in sync with OP_JMP
4901 code = cfg->native_code + cfg->code_len;
4903 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
4904 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4906 pos = 0;
4908 /* Load returned vtypes into registers if needed */
4909 cinfo = cfg->arch.cinfo;
4910 if (cinfo->ret.storage == RegTypeStructByVal) {
4911 MonoInst *ins = cfg->ret;
4913 if (arm_is_imm12 (ins->inst_offset)) {
4914 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
4915 } else {
4916 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4917 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
4921 if (method->save_lmf) {
4922 int lmf_offset;
4923 /* all but r0-r3, sp and pc */
4924 pos += sizeof (MonoLMF) - (4 * 10);
4925 lmf_offset = pos;
4926 /* r2 contains the pointer to the current LMF */
4927 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, cfg->stack_usage - lmf_offset);
4928 /* ip = previous_lmf */
4929 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4930 /* lr = lmf_addr */
4931 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4932 /* *(lmf_addr) = previous_lmf */
4933 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4934 /* FIXME: speedup: there is no actual need to restore the registers if
4935 * we didn't actually change them (idea from Zoltan).
4937 /* restore iregs */
4938 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
4939 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_R2, (sizeof (MonoLMF) - 10 * sizeof (gulong)));
4940 ARM_POP_NWB (code, 0xaff0); /* restore ip to sp and lr to pc */
4941 } else {
4942 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
4943 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
4944 } else {
4945 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
4946 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4948 /* FIXME: add v4 thumb interworking support */
4949 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
4952 cfg->code_len = code - cfg->native_code;
4954 g_assert (cfg->code_len < cfg->code_size);
4958 /* remove once throw_exception_by_name is eliminated */
4959 static int
4960 exception_id_by_name (const char *name)
4962 if (strcmp (name, "IndexOutOfRangeException") == 0)
4963 return MONO_EXC_INDEX_OUT_OF_RANGE;
4964 if (strcmp (name, "OverflowException") == 0)
4965 return MONO_EXC_OVERFLOW;
4966 if (strcmp (name, "ArithmeticException") == 0)
4967 return MONO_EXC_ARITHMETIC;
4968 if (strcmp (name, "DivideByZeroException") == 0)
4969 return MONO_EXC_DIVIDE_BY_ZERO;
4970 if (strcmp (name, "InvalidCastException") == 0)
4971 return MONO_EXC_INVALID_CAST;
4972 if (strcmp (name, "NullReferenceException") == 0)
4973 return MONO_EXC_NULL_REF;
4974 if (strcmp (name, "ArrayTypeMismatchException") == 0)
4975 return MONO_EXC_ARRAY_TYPE_MISMATCH;
4976 g_error ("Unknown intrinsic exception %s\n", name);
4977 return -1;
4980 void
4981 mono_arch_emit_exceptions (MonoCompile *cfg)
4983 MonoJumpInfo *patch_info;
4984 int i;
4985 guint8 *code;
4986 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
4987 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
4988 int max_epilog_size = 50;
4990 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
4991 exc_throw_pos [i] = NULL;
4992 exc_throw_found [i] = 0;
4995 /* count the number of exception infos */
4998 * make sure we have enough space for exceptions
5000 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5001 if (patch_info->type == MONO_PATCH_INFO_EXC) {
5002 i = exception_id_by_name (patch_info->data.target);
5003 if (!exc_throw_found [i]) {
5004 max_epilog_size += 32;
5005 exc_throw_found [i] = TRUE;
5010 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5011 cfg->code_size *= 2;
5012 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5013 mono_jit_stats.code_reallocs++;
5016 code = cfg->native_code + cfg->code_len;
5018 /* add code to raise exceptions */
5019 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5020 switch (patch_info->type) {
5021 case MONO_PATCH_INFO_EXC: {
5022 MonoClass *exc_class;
5023 unsigned char *ip = patch_info->ip.i + cfg->native_code;
5025 i = exception_id_by_name (patch_info->data.target);
5026 if (exc_throw_pos [i]) {
5027 arm_patch (ip, exc_throw_pos [i]);
5028 patch_info->type = MONO_PATCH_INFO_NONE;
5029 break;
5030 } else {
5031 exc_throw_pos [i] = code;
5033 arm_patch (ip, code);
5035 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
5036 g_assert (exc_class);
5038 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
5039 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
5040 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
5041 patch_info->data.name = "mono_arch_throw_corlib_exception";
5042 patch_info->ip.i = code - cfg->native_code;
5043 ARM_BL (code, 0);
5044 *(guint32*)(gpointer)code = exc_class->type_token;
5045 code += 4;
5046 break;
5048 default:
5049 /* do nothing */
5050 break;
5054 cfg->code_len = code - cfg->native_code;
5056 g_assert (cfg->code_len < cfg->code_size);
5060 #endif /* #ifndef DISABLE_JIT */
5062 static gboolean tls_offset_inited = FALSE;
5064 void
5065 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
5067 if (!tls_offset_inited) {
5068 tls_offset_inited = TRUE;
5070 lmf_tls_offset = mono_get_lmf_tls_offset ();
5071 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
5075 void
5076 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
5080 MonoInst*
5081 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5083 /* FIXME: */
5084 return NULL;
5087 gboolean
5088 mono_arch_print_tree (MonoInst *tree, int arity)
5090 return 0;
5093 MonoInst*
5094 mono_arch_get_domain_intrinsic (MonoCompile* cfg)
5096 return mono_get_domain_intrinsic (cfg);
5099 guint32
5100 mono_arch_get_patch_offset (guint8 *code)
5102 /* OP_AOTCONST */
5103 return 8;
5106 void
5107 mono_arch_flush_register_windows (void)
5111 #ifdef MONO_ARCH_HAVE_IMT
5113 #ifndef DISABLE_JIT
5115 void
5116 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
5118 if (cfg->compile_aot) {
5119 int method_reg = mono_alloc_ireg (cfg);
5120 MonoInst *ins;
5122 call->dynamic_imt_arg = TRUE;
5124 if (imt_arg) {
5125 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
5126 } else {
5127 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
5128 ins->dreg = method_reg;
5129 ins->inst_p0 = call->method;
5130 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
5131 MONO_ADD_INS (cfg->cbb, ins);
5133 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
5135 } else if (cfg->generic_context || imt_arg) {
5137 /* Always pass in a register for simplicity */
5138 call->dynamic_imt_arg = TRUE;
5140 cfg->uses_rgctx_reg = TRUE;
5142 if (imt_arg) {
5143 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
5144 } else {
5145 MonoInst *ins;
5146 int method_reg = mono_alloc_preg (cfg);
5148 MONO_INST_NEW (cfg, ins, OP_PCONST);
5149 ins->inst_p0 = call->method;
5150 ins->dreg = method_reg;
5151 MONO_ADD_INS (cfg->cbb, ins);
5153 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
5158 #endif /* DISABLE_JIT */
5160 MonoMethod*
5161 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
5163 guint32 *code_ptr = (guint32*)code;
5164 code_ptr -= 2;
5165 /* The IMT value is stored in the code stream right after the LDC instruction. */
5166 if (!IS_LDR_PC (code_ptr [0])) {
5167 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
5168 g_assert (IS_LDR_PC (code_ptr [0]));
5170 if (code_ptr [1] == 0)
5171 /* This is AOTed code, the IMT method is in V5 */
5172 return (MonoMethod*)regs [ARMREG_V5];
5173 else
5174 return (MonoMethod*) code_ptr [1];
5177 MonoVTable*
5178 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
5180 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
5183 #define ENABLE_WRONG_METHOD_CHECK 0
5184 #define BASE_SIZE (6 * 4)
5185 #define BSEARCH_ENTRY_SIZE (4 * 4)
5186 #define CMP_SIZE (3 * 4)
5187 #define BRANCH_SIZE (1 * 4)
5188 #define CALL_SIZE (2 * 4)
5189 #define WMC_SIZE (5 * 4)
5190 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
5192 static arminstr_t *
5193 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
5195 guint32 delta = DISTANCE (target, code);
5196 delta -= 8;
5197 g_assert (delta >= 0 && delta <= 0xFFF);
5198 *target = *target | delta;
5199 *code = value;
5200 return code + 1;
5203 gpointer
5204 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
5205 gpointer fail_tramp)
5207 int size, i, extra_space = 0;
5208 arminstr_t *code, *start, *vtable_target = NULL;
5209 gboolean large_offsets = FALSE;
5210 guint32 **constant_pool_starts;
5212 size = BASE_SIZE;
5213 constant_pool_starts = g_new0 (guint32*, count);
5215 for (i = 0; i < count; ++i) {
5216 MonoIMTCheckItem *item = imt_entries [i];
5217 if (item->is_equals) {
5218 gboolean fail_case = !item->check_target_idx && fail_tramp;
5220 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
5221 item->chunk_size += 32;
5222 large_offsets = TRUE;
5225 if (item->check_target_idx || fail_case) {
5226 if (!item->compare_done || fail_case)
5227 item->chunk_size += CMP_SIZE;
5228 item->chunk_size += BRANCH_SIZE;
5229 } else {
5230 #if ENABLE_WRONG_METHOD_CHECK
5231 item->chunk_size += WMC_SIZE;
5232 #endif
5234 if (fail_case) {
5235 item->chunk_size += 16;
5236 large_offsets = TRUE;
5238 item->chunk_size += CALL_SIZE;
5239 } else {
5240 item->chunk_size += BSEARCH_ENTRY_SIZE;
5241 imt_entries [item->check_target_idx]->compare_done = TRUE;
5243 size += item->chunk_size;
5246 if (large_offsets)
5247 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
5249 if (fail_tramp)
5250 code = mono_method_alloc_generic_virtual_thunk (domain, size);
5251 else
5252 code = mono_domain_code_reserve (domain, size);
5253 start = code;
5255 #if DEBUG_IMT
5256 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
5257 for (i = 0; i < count; ++i) {
5258 MonoIMTCheckItem *item = imt_entries [i];
5259 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, item->key->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
5261 #endif
5263 if (large_offsets)
5264 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5265 else
5266 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
5267 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
5268 vtable_target = code;
5269 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
5271 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
5272 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
5273 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
5275 for (i = 0; i < count; ++i) {
5276 MonoIMTCheckItem *item = imt_entries [i];
5277 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
5278 gint32 vtable_offset;
5280 item->code_target = (guint8*)code;
5282 if (item->is_equals) {
5283 gboolean fail_case = !item->check_target_idx && fail_tramp;
5285 if (item->check_target_idx || fail_case) {
5286 if (!item->compare_done || fail_case) {
5287 imt_method = code;
5288 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5289 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5291 item->jmp_code = (guint8*)code;
5292 ARM_B_COND (code, ARMCOND_NE, 0);
5293 } else {
5294 /*Enable the commented code to assert on wrong method*/
5295 #if ENABLE_WRONG_METHOD_CHECK
5296 imt_method = code;
5297 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5298 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5299 ARM_B_COND (code, ARMCOND_NE, 1);
5301 ARM_DBRK (code);
5302 #endif
5305 if (item->has_target_code) {
5306 target_code_ins = code;
5307 /* Load target address */
5308 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5309 /* Save it to the fourth slot */
5310 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
5311 /* Restore registers and branch */
5312 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5314 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
5315 } else {
5316 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
5317 if (!arm_is_imm12 (vtable_offset)) {
5319 * We need to branch to a computed address but we don't have
5320 * a free register to store it, since IP must contain the
5321 * vtable address. So we push the two values to the stack, and
5322 * load them both using LDM.
5324 /* Compute target address */
5325 vtable_offset_ins = code;
5326 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5327 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
5328 /* Save it to the fourth slot */
5329 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
5330 /* Restore registers and branch */
5331 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5333 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
5334 } else {
5335 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
5336 if (large_offsets)
5337 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
5338 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
5342 if (fail_case) {
5343 arm_patch (item->jmp_code, (guchar*)code);
5345 target_code_ins = code;
5346 /* Load target address */
5347 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5348 /* Save it to the fourth slot */
5349 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
5350 /* Restore registers and branch */
5351 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5353 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
5354 item->jmp_code = NULL;
5357 if (imt_method)
5358 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
5360 /*must emit after unconditional branch*/
5361 if (vtable_target) {
5362 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
5363 item->chunk_size += 4;
5364 vtable_target = NULL;
5367 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
5368 constant_pool_starts [i] = code;
5369 if (extra_space) {
5370 code += extra_space;
5371 extra_space = 0;
5373 } else {
5374 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5375 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5377 item->jmp_code = (guint8*)code;
5378 ARM_B_COND (code, ARMCOND_GE, 0);
5379 ++extra_space;
5383 for (i = 0; i < count; ++i) {
5384 MonoIMTCheckItem *item = imt_entries [i];
5385 if (item->jmp_code) {
5386 if (item->check_target_idx)
5387 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
5389 if (i > 0 && item->is_equals) {
5390 int j;
5391 arminstr_t *space_start = constant_pool_starts [i];
5392 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
5393 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
5398 #if DEBUG_IMT
5400 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
5401 mono_disassemble_code (NULL, (guint8*)start, size, buff);
5402 g_free (buff);
5404 #endif
5406 g_free (constant_pool_starts);
5408 mono_arch_flush_icache ((guint8*)start, size);
5409 mono_stats.imt_thunks_size += code - start;
5411 g_assert (DISTANCE (start, code) <= size);
5412 return start;
5415 #endif
5417 gpointer
5418 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
5420 if (reg == ARMREG_SP)
5421 return (gpointer)ctx->esp;
5422 else
5423 return (gpointer)ctx->regs [reg];
5427 * mono_arch_set_breakpoint:
5429 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
5430 * The location should contain code emitted by OP_SEQ_POINT.
5432 void
5433 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
5435 guint8 *code = ip;
5436 guint32 native_offset = ip - (guint8*)ji->code_start;
5438 if (ji->from_aot) {
5439 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5441 g_assert (native_offset % 4 == 0);
5442 g_assert (info->bp_addrs [native_offset / 4] == 0);
5443 info->bp_addrs [native_offset / 4] = bp_trigger_page;
5444 } else {
5445 int dreg = ARMREG_LR;
5447 /* Read from another trigger page */
5448 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
5449 ARM_B (code, 0);
5450 *(int*)code = (int)bp_trigger_page;
5451 code += 4;
5452 ARM_LDR_IMM (code, dreg, dreg, 0);
5454 mono_arch_flush_icache (code - 16, 16);
5456 #if 0
5457 /* This is currently implemented by emitting an SWI instruction, which
5458 * qemu/linux seems to convert to a SIGILL.
5460 *(int*)code = (0xef << 24) | 8;
5461 code += 4;
5462 mono_arch_flush_icache (code - 4, 4);
5463 #endif
5468 * mono_arch_clear_breakpoint:
5470 * Clear the breakpoint at IP.
5472 void
5473 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
5475 guint8 *code = ip;
5476 int i;
5478 if (ji->from_aot) {
5479 guint32 native_offset = ip - (guint8*)ji->code_start;
5480 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5482 g_assert (native_offset % 4 == 0);
5483 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
5484 info->bp_addrs [native_offset / 4] = 0;
5485 } else {
5486 for (i = 0; i < 4; ++i)
5487 ARM_NOP (code);
5489 mono_arch_flush_icache (ip, code - ip);
5494 * mono_arch_start_single_stepping:
5496 * Start single stepping.
5498 void
5499 mono_arch_start_single_stepping (void)
5501 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
5505 * mono_arch_stop_single_stepping:
5507 * Stop single stepping.
5509 void
5510 mono_arch_stop_single_stepping (void)
5512 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
5515 #if __APPLE__
5516 #define DBG_SIGNAL SIGBUS
5517 #else
5518 #define DBG_SIGNAL SIGSEGV
5519 #endif
5522 * mono_arch_is_single_step_event:
5524 * Return whenever the machine state in SIGCTX corresponds to a single
5525 * step event.
5527 gboolean
5528 mono_arch_is_single_step_event (void *info, void *sigctx)
5530 siginfo_t *sinfo = info;
5532 /* Sometimes the address is off by 4 */
5533 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
5534 return TRUE;
5535 else
5536 return FALSE;
5540 * mono_arch_is_breakpoint_event:
5542 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
5544 gboolean
5545 mono_arch_is_breakpoint_event (void *info, void *sigctx)
5547 siginfo_t *sinfo = info;
5549 if (sinfo->si_signo == DBG_SIGNAL) {
5550 /* Sometimes the address is off by 4 */
5551 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
5552 return TRUE;
5553 else
5554 return FALSE;
5555 } else {
5556 return FALSE;
5560 guint8*
5561 mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
5563 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5565 if (ji->from_aot)
5566 ip -= 6 * 4;
5567 else
5568 ip -= 12;
5570 return ip;
5573 guint8*
5574 mono_arch_get_ip_for_single_step (MonoJitInfo *ji, MonoContext *ctx)
5576 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5578 ip += 4;
5580 return ip;
5584 * mono_arch_skip_breakpoint:
5586 * See mini-amd64.c for docs.
5588 void
5589 mono_arch_skip_breakpoint (MonoContext *ctx)
5591 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5595 * mono_arch_skip_single_step:
5597 * See mini-amd64.c for docs.
5599 void
5600 mono_arch_skip_single_step (MonoContext *ctx)
5602 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5606 * mono_arch_get_seq_point_info:
5608 * See mini-amd64.c for docs.
5610 gpointer
5611 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
5613 SeqPointInfo *info;
5614 MonoJitInfo *ji;
5616 // FIXME: Add a free function
5618 mono_domain_lock (domain);
5619 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
5620 code);
5621 mono_domain_unlock (domain);
5623 if (!info) {
5624 ji = mono_jit_info_table_find (domain, (char*)code);
5625 g_assert (ji);
5627 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
5629 info->ss_trigger_page = ss_trigger_page;
5630 info->bp_trigger_page = bp_trigger_page;
5632 mono_domain_lock (domain);
5633 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
5634 code, info);
5635 mono_domain_unlock (domain);
5638 return info;