2009-12-30 Rodrigo Kumpera <rkumpera@novell.com>
[mono.git] / mono / mini / mini-arm.c
blob8f2a4afc4e4c6347cf4a2a301e31c102ef9121cc
1 /*
2 * mini-arm.c: ARM backend for the Mono code generator
4 * Authors:
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 */
10 #include "mini.h"
11 #include <string.h>
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
15 #include <mono/utils/mono-mmap.h>
17 #include "mini-arm.h"
18 #include "cpu-arm.h"
19 #include "trace.h"
20 #include "ir-emit.h"
21 #ifdef ARM_FPU_FPA
22 #include "mono/arch/arm/arm-fpa-codegen.h"
23 #elif defined(ARM_FPU_VFP)
24 #include "mono/arch/arm/arm-vfp-codegen.h"
25 #endif
27 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID)
28 #define HAVE_AEABI_READ_TP 1
29 #endif
31 static gint lmf_tls_offset = -1;
32 static gint lmf_addr_tls_offset = -1;
34 /* This mutex protects architecture specific caches */
35 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
36 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
37 static CRITICAL_SECTION mini_arch_mutex;
39 static int v5_supported = 0;
40 static int v7_supported = 0;
41 static int thumb_supported = 0;
44 * The code generated for sequence points reads from this location, which is
45 * made read-only when single stepping is enabled.
47 static gpointer ss_trigger_page;
49 /* Enabled breakpoints read from this trigger page */
50 static gpointer bp_trigger_page;
52 /* Structure used by the sequence points in AOTed code */
53 typedef struct {
54 gpointer ss_trigger_page;
55 gpointer bp_trigger_page;
56 guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
57 } SeqPointInfo;
60 * TODO:
61 * floating point support: on ARM it is a mess, there are at least 3
62 * different setups, each of which binary incompat with the other.
63 * 1) FPA: old and ugly, but unfortunately what current distros use
64 * the double binary format has the two words swapped. 8 double registers.
65 * Implemented usually by kernel emulation.
66 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
67 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
68 * 3) VFP: the new and actually sensible and useful FP support. Implemented
69 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
71 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
73 int mono_exc_esp_offset = 0;
75 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
76 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
77 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
79 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
80 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
81 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
83 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
84 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
85 #define DEBUG_IMT 0
87 /* A variant of ARM_LDR_IMM which can handle large offsets */
88 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
89 if (arm_is_imm12 ((offset))) { \
90 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
91 } else { \
92 g_assert ((scratch_reg) != (basereg)); \
93 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
94 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
95 } \
96 } while (0)
98 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
99 if (arm_is_imm12 ((offset))) { \
100 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
101 } else { \
102 g_assert ((scratch_reg) != (basereg)); \
103 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
104 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
106 } while (0)
108 const char*
109 mono_arch_regname (int reg)
111 static const char * rnames[] = {
112 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
113 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
114 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
115 "arm_pc"
117 if (reg >= 0 && reg < 16)
118 return rnames [reg];
119 return "unknown";
122 const char*
123 mono_arch_fregname (int reg)
125 static const char * rnames[] = {
126 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
127 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
128 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
129 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
130 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
131 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
132 "arm_f30", "arm_f31"
134 if (reg >= 0 && reg < 32)
135 return rnames [reg];
136 return "unknown";
139 static guint8*
140 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
142 int imm8, rot_amount;
143 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
144 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
145 return code;
147 g_assert (dreg != sreg);
148 code = mono_arm_emit_load_imm (code, dreg, imm);
149 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
150 return code;
153 static guint8*
154 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
156 /* we can use r0-r3, since this is called only for incoming args on the stack */
157 if (size > sizeof (gpointer) * 4) {
158 guint8 *start_loop;
159 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
160 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
161 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
162 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
163 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
164 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
165 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
166 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
167 ARM_B_COND (code, ARMCOND_NE, 0);
168 arm_patch (code - 4, start_loop);
169 return code;
171 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
172 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
173 while (size >= 4) {
174 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
175 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
176 doffset += 4;
177 soffset += 4;
178 size -= 4;
180 } else if (size) {
181 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
182 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
183 doffset = soffset = 0;
184 while (size >= 4) {
185 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
186 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
187 doffset += 4;
188 soffset += 4;
189 size -= 4;
192 g_assert (size == 0);
193 return code;
196 static guint8*
197 emit_call_reg (guint8 *code, int reg)
199 if (v5_supported) {
200 ARM_BLX_REG (code, reg);
201 } else {
202 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
203 if (thumb_supported)
204 ARM_BX (code, reg);
205 else
206 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
208 return code;
211 static guint8*
212 emit_call_seq (MonoCompile *cfg, guint8 *code)
214 if (cfg->method->dynamic) {
215 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
216 ARM_B (code, 0);
217 *(gpointer*)code = NULL;
218 code += 4;
219 code = emit_call_reg (code, ARMREG_IP);
220 } else {
221 ARM_BL (code, 0);
223 return code;
226 static guint8*
227 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
229 switch (ins->opcode) {
230 case OP_FCALL:
231 case OP_FCALL_REG:
232 case OP_FCALL_MEMBASE:
233 #ifdef ARM_FPU_FPA
234 if (ins->dreg != ARM_FPA_F0)
235 ARM_MVFD (code, ins->dreg, ARM_FPA_F0);
236 #elif defined(ARM_FPU_VFP)
237 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
238 ARM_FMSR (code, ins->dreg, ARMREG_R0);
239 ARM_CVTS (code, ins->dreg, ins->dreg);
240 } else {
241 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
243 #endif
244 break;
247 return code;
251 * mono_arch_get_argument_info:
252 * @csig: a method signature
253 * @param_count: the number of parameters to consider
254 * @arg_info: an array to store the result infos
256 * Gathers information on parameters such as size, alignment and
257 * padding. arg_info should be large enought to hold param_count + 1 entries.
259 * Returns the size of the activation frame.
262 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
264 int k, frame_size = 0;
265 guint32 size, align, pad;
266 int offset = 8;
268 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
269 frame_size += sizeof (gpointer);
270 offset += 4;
273 arg_info [0].offset = offset;
275 if (csig->hasthis) {
276 frame_size += sizeof (gpointer);
277 offset += 4;
280 arg_info [0].size = frame_size;
282 for (k = 0; k < param_count; k++) {
283 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
285 /* ignore alignment for now */
286 align = 1;
288 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
289 arg_info [k].pad = pad;
290 frame_size += size;
291 arg_info [k + 1].pad = 0;
292 arg_info [k + 1].size = size;
293 offset += pad;
294 arg_info [k + 1].offset = offset;
295 offset += size;
298 align = MONO_ARCH_FRAME_ALIGNMENT;
299 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
300 arg_info [k].pad = pad;
302 return frame_size;
305 static gpointer
306 decode_vcall_slot_from_ldr (guint32 ldr, mgreg_t *regs, int *displacement)
308 char *o = NULL;
309 int reg, offset = 0;
310 reg = (ldr >> 16 ) & 0xf;
311 offset = ldr & 0xfff;
312 if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
313 offset = -offset;
314 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
315 o = (gpointer)regs [reg];
317 *displacement = offset;
318 return o;
321 gpointer
322 mono_arch_get_vcall_slot (guint8 *code_ptr, mgreg_t *regs, int *displacement)
324 guint32* code = (guint32*)code_ptr;
326 /* Locate the address of the method-specific trampoline. The call using
327 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
328 looks something like this:
330 ldr rA, rX, #offset
331 mov lr, pc
332 mov pc, rA
333 or better:
334 mov lr, pc
335 ldr pc, rX, #offset
337 The call sequence could be also:
338 ldr ip, pc, 0
339 b skip
340 function pointer literal
341 skip:
342 mov lr, pc
343 mov pc, ip
344 Note that on ARM5+ we can use one instruction instead of the last two.
345 Therefore, we need to locate the 'ldr rA' instruction to know which
346 register was used to hold the method addrs.
349 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
350 --code;
352 /* Three possible code sequences can happen here:
353 * interface call:
355 * add lr, [pc + #4]
356 * ldr pc, [rX - #offset]
357 * .word IMT value
359 * virtual call:
361 * mov lr, pc
362 * ldr pc, [rX - #offset]
364 * direct branch with bl:
366 * bl #offset
368 * direct branch with mov:
370 * mv pc, rX
372 * We only need to identify interface and virtual calls, the others can be ignored.
375 if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
376 return decode_vcall_slot_from_ldr (code [-1], regs, displacement);
378 if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
379 return decode_vcall_slot_from_ldr (code [0], regs, displacement);
381 return NULL;
384 #define MAX_ARCH_DELEGATE_PARAMS 3
386 static gpointer
387 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
389 guint8 *code, *start;
391 if (has_target) {
392 start = code = mono_global_codeman_reserve (12);
394 /* Replace the this argument with the target */
395 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
396 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
397 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
399 g_assert ((code - start) <= 12);
401 mono_arch_flush_icache (start, 12);
402 } else {
403 int size, i;
405 size = 8 + param_count * 4;
406 start = code = mono_global_codeman_reserve (size);
408 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
409 /* slide down the arguments */
410 for (i = 0; i < param_count; ++i) {
411 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
413 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
415 g_assert ((code - start) <= size);
417 mono_arch_flush_icache (start, size);
420 if (code_size)
421 *code_size = code - start;
423 return start;
427 * mono_arch_get_delegate_invoke_impls:
429 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
430 * trampolines.
432 GSList*
433 mono_arch_get_delegate_invoke_impls (void)
435 GSList *res = NULL;
436 guint8 *code;
437 guint32 code_len;
438 int i;
440 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
441 res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len));
443 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
444 code = get_delegate_invoke_impl (FALSE, i, &code_len);
445 res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len));
448 return res;
451 gpointer
452 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
454 guint8 *code, *start;
456 /* FIXME: Support more cases */
457 if (MONO_TYPE_ISSTRUCT (sig->ret))
458 return NULL;
460 if (has_target) {
461 static guint8* cached = NULL;
462 mono_mini_arch_lock ();
463 if (cached) {
464 mono_mini_arch_unlock ();
465 return cached;
468 if (mono_aot_only)
469 start = mono_aot_get_named_code ("delegate_invoke_impl_has_target");
470 else
471 start = get_delegate_invoke_impl (TRUE, 0, NULL);
472 cached = start;
473 mono_mini_arch_unlock ();
474 return cached;
475 } else {
476 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
477 int i;
479 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
480 return NULL;
481 for (i = 0; i < sig->param_count; ++i)
482 if (!mono_is_regsize_var (sig->params [i]))
483 return NULL;
485 mono_mini_arch_lock ();
486 code = cache [sig->param_count];
487 if (code) {
488 mono_mini_arch_unlock ();
489 return code;
492 if (mono_aot_only) {
493 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
494 start = mono_aot_get_named_code (name);
495 g_free (name);
496 } else {
497 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
499 cache [sig->param_count] = start;
500 mono_mini_arch_unlock ();
501 return start;
504 return NULL;
507 gpointer
508 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, mgreg_t *regs, guint8 *code)
510 /* FIXME: handle returning a struct */
511 if (MONO_TYPE_ISSTRUCT (sig->ret))
512 return (gpointer)regs [ARMREG_R1];
513 return (gpointer)regs [ARMREG_R0];
517 * Initialize the cpu to execute managed code.
519 void
520 mono_arch_cpu_init (void)
525 * Initialize architecture specific code.
527 void
528 mono_arch_init (void)
530 InitializeCriticalSection (&mini_arch_mutex);
532 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
533 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
534 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
538 * Cleanup architecture specific code.
540 void
541 mono_arch_cleanup (void)
546 * This function returns the optimizations supported on this cpu.
548 guint32
549 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
551 guint32 opts = 0;
552 #if __APPLE__
553 thumb_supported = TRUE;
554 v5_supported = TRUE;
555 #else
556 char buf [512];
557 char *line;
558 FILE *file = fopen ("/proc/cpuinfo", "r");
559 if (file) {
560 while ((line = fgets (buf, 512, file))) {
561 if (strncmp (line, "Processor", 9) == 0) {
562 char *ver = strstr (line, "(v");
563 if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7'))
564 v5_supported = TRUE;
565 if (ver && (ver [2] == '7'))
566 v7_supported = TRUE;
567 continue;
569 if (strncmp (line, "Features", 8) == 0) {
570 char *th = strstr (line, "thumb");
571 if (th) {
572 thumb_supported = TRUE;
573 if (v5_supported)
574 break;
576 continue;
579 fclose (file);
580 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
582 #endif
584 /* no arm-specific optimizations yet */
585 *exclude_mask = 0;
586 return opts;
589 static gboolean
590 is_regsize_var (MonoType *t) {
591 if (t->byref)
592 return TRUE;
593 t = mini_type_get_underlying_type (NULL, t);
594 switch (t->type) {
595 case MONO_TYPE_I4:
596 case MONO_TYPE_U4:
597 case MONO_TYPE_I:
598 case MONO_TYPE_U:
599 case MONO_TYPE_PTR:
600 case MONO_TYPE_FNPTR:
601 return TRUE;
602 case MONO_TYPE_OBJECT:
603 case MONO_TYPE_STRING:
604 case MONO_TYPE_CLASS:
605 case MONO_TYPE_SZARRAY:
606 case MONO_TYPE_ARRAY:
607 return TRUE;
608 case MONO_TYPE_GENERICINST:
609 if (!mono_type_generic_inst_is_valuetype (t))
610 return TRUE;
611 return FALSE;
612 case MONO_TYPE_VALUETYPE:
613 return FALSE;
615 return FALSE;
618 GList *
619 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
621 GList *vars = NULL;
622 int i;
624 for (i = 0; i < cfg->num_varinfo; i++) {
625 MonoInst *ins = cfg->varinfo [i];
626 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
628 /* unused vars */
629 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
630 continue;
632 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
633 continue;
635 /* we can only allocate 32 bit values */
636 if (is_regsize_var (ins->inst_vtype)) {
637 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
638 g_assert (i == vmv->idx);
639 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
643 return vars;
646 #define USE_EXTRA_TEMPS 0
648 GList *
649 mono_arch_get_global_int_regs (MonoCompile *cfg)
651 GList *regs = NULL;
654 * FIXME: Interface calls might go through a static rgctx trampoline which
655 * sets V5, but it doesn't save it, so we need to save it ourselves, and
656 * avoid using it.
658 if (cfg->flags & MONO_CFG_HAS_CALLS)
659 cfg->uses_rgctx_reg = TRUE;
661 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
662 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
663 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
664 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
665 if (!(cfg->compile_aot || cfg->uses_rgctx_reg))
666 /* V5 is reserved for passing the vtable/rgctx/IMT method */
667 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
668 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
669 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
671 return regs;
675 * mono_arch_regalloc_cost:
677 * Return the cost, in number of memory references, of the action of
678 * allocating the variable VMV into a register during global register
679 * allocation.
681 guint32
682 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
684 /* FIXME: */
685 return 2;
688 #ifndef __GNUC_PREREQ
689 #define __GNUC_PREREQ(maj, min) (0)
690 #endif
692 void
693 mono_arch_flush_icache (guint8 *code, gint size)
695 #if __APPLE__
696 sys_icache_invalidate (code, size);
697 #elif __GNUC_PREREQ(4, 1)
698 __clear_cache (code, code + size);
699 #elif defined(PLATFORM_ANDROID)
700 const int syscall = 0xf0002;
701 __asm __volatile (
702 "mov r0, %0\n"
703 "mov r1, %1\n"
704 "mov r7, %2\n"
705 "mov r2, #0x0\n"
706 "svc 0x00000000\n"
708 : "r" (code), "r" (code + size), "r" (syscall)
709 : "r0", "r1", "r7", "r2"
711 #else
712 __asm __volatile ("mov r0, %0\n"
713 "mov r1, %1\n"
714 "mov r2, %2\n"
715 "swi 0x9f0002 @ sys_cacheflush"
716 : /* no outputs */
717 : "r" (code), "r" (code + size), "r" (0)
718 : "r0", "r1", "r3" );
719 #endif
722 typedef enum {
723 RegTypeNone,
724 RegTypeGeneral,
725 RegTypeIRegPair,
726 RegTypeBase,
727 RegTypeBaseGen,
728 RegTypeFP,
729 RegTypeStructByVal,
730 RegTypeStructByAddr
731 } ArgStorage;
733 typedef struct {
734 gint32 offset;
735 guint16 vtsize; /* in param area */
736 guint8 reg;
737 ArgStorage storage;
738 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
739 } ArgInfo;
741 typedef struct {
742 int nargs;
743 guint32 stack_usage;
744 guint32 struct_ret;
745 gboolean vtype_retaddr;
746 ArgInfo ret;
747 ArgInfo sig_cookie;
748 ArgInfo args [1];
749 } CallInfo;
751 #define DEBUG(a)
753 #ifndef __GNUC__
754 /*#define __alignof__(a) sizeof(a)*/
755 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
756 #endif
758 #define PARAM_REGS 4
760 static void inline
761 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
763 if (simple) {
764 if (*gr > ARMREG_R3) {
765 ainfo->offset = *stack_size;
766 ainfo->reg = ARMREG_SP; /* in the caller */
767 ainfo->storage = RegTypeBase;
768 *stack_size += 4;
769 } else {
770 ainfo->storage = RegTypeGeneral;
771 ainfo->reg = *gr;
773 } else {
774 #if defined(__APPLE__) && defined(MONO_CROSS_COMPILE)
775 int i8_align = 4;
776 #else
777 int i8_align = __alignof__ (gint64);
778 #endif
780 #if __ARM_EABI__
781 gboolean split = i8_align == 4;
782 #else
783 gboolean split = TRUE;
784 #endif
786 if (*gr == ARMREG_R3 && split) {
787 /* first word in r3 and the second on the stack */
788 ainfo->offset = *stack_size;
789 ainfo->reg = ARMREG_SP; /* in the caller */
790 ainfo->storage = RegTypeBaseGen;
791 *stack_size += 4;
792 } else if (*gr >= ARMREG_R3) {
793 #ifdef __ARM_EABI__
794 /* darwin aligns longs to 4 byte only */
795 if (i8_align == 8) {
796 *stack_size += 7;
797 *stack_size &= ~7;
799 #endif
800 ainfo->offset = *stack_size;
801 ainfo->reg = ARMREG_SP; /* in the caller */
802 ainfo->storage = RegTypeBase;
803 *stack_size += 8;
804 } else {
805 #ifdef __ARM_EABI__
806 if (i8_align == 8 && ((*gr) & 1))
807 (*gr) ++;
808 #endif
809 ainfo->storage = RegTypeIRegPair;
810 ainfo->reg = *gr;
812 (*gr) ++;
814 (*gr) ++;
817 static CallInfo*
818 get_call_info (MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
820 guint i, gr;
821 int n = sig->hasthis + sig->param_count;
822 MonoType *simpletype;
823 guint32 stack_size = 0;
824 CallInfo *cinfo;
826 if (mp)
827 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
828 else
829 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
831 cinfo->nargs = n;
832 gr = ARMREG_R0;
834 /* FIXME: handle returning a struct */
835 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
836 guint32 align;
838 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (sig->ret), &align) <= sizeof (gpointer)) {
839 cinfo->ret.storage = RegTypeStructByVal;
840 } else {
841 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
842 cinfo->struct_ret = ARMREG_R0;
843 cinfo->vtype_retaddr = TRUE;
847 n = 0;
848 if (sig->hasthis) {
849 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
850 n++;
852 DEBUG(printf("params: %d\n", sig->param_count));
853 for (i = 0; i < sig->param_count; ++i) {
854 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
855 /* Prevent implicit arguments and sig_cookie from
856 being passed in registers */
857 gr = ARMREG_R3 + 1;
858 /* Emit the signature cookie just before the implicit arguments */
859 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
861 DEBUG(printf("param %d: ", i));
862 if (sig->params [i]->byref) {
863 DEBUG(printf("byref\n"));
864 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
865 n++;
866 continue;
868 simpletype = mini_type_get_underlying_type (NULL, sig->params [i]);
869 switch (simpletype->type) {
870 case MONO_TYPE_BOOLEAN:
871 case MONO_TYPE_I1:
872 case MONO_TYPE_U1:
873 cinfo->args [n].size = 1;
874 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
875 n++;
876 break;
877 case MONO_TYPE_CHAR:
878 case MONO_TYPE_I2:
879 case MONO_TYPE_U2:
880 cinfo->args [n].size = 2;
881 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
882 n++;
883 break;
884 case MONO_TYPE_I4:
885 case MONO_TYPE_U4:
886 cinfo->args [n].size = 4;
887 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
888 n++;
889 break;
890 case MONO_TYPE_I:
891 case MONO_TYPE_U:
892 case MONO_TYPE_PTR:
893 case MONO_TYPE_FNPTR:
894 case MONO_TYPE_CLASS:
895 case MONO_TYPE_OBJECT:
896 case MONO_TYPE_STRING:
897 case MONO_TYPE_SZARRAY:
898 case MONO_TYPE_ARRAY:
899 case MONO_TYPE_R4:
900 cinfo->args [n].size = sizeof (gpointer);
901 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
902 n++;
903 break;
904 case MONO_TYPE_GENERICINST:
905 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
906 cinfo->args [n].size = sizeof (gpointer);
907 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
908 n++;
909 break;
911 /* Fall through */
912 case MONO_TYPE_TYPEDBYREF:
913 case MONO_TYPE_VALUETYPE: {
914 gint size;
915 int align_size;
916 int nwords;
917 guint32 align;
919 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
920 size = sizeof (MonoTypedRef);
921 align = sizeof (gpointer);
922 } else {
923 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
924 if (is_pinvoke)
925 size = mono_class_native_size (klass, &align);
926 else
927 size = mono_class_value_size (klass, &align);
929 DEBUG(printf ("load %d bytes struct\n",
930 mono_class_native_size (sig->params [i]->data.klass, NULL)));
931 align_size = size;
932 nwords = 0;
933 align_size += (sizeof (gpointer) - 1);
934 align_size &= ~(sizeof (gpointer) - 1);
935 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
936 cinfo->args [n].storage = RegTypeStructByVal;
937 /* FIXME: align stack_size if needed */
938 #ifdef __ARM_EABI__
939 if (align >= 8 && (gr & 1))
940 gr ++;
941 #endif
942 if (gr > ARMREG_R3) {
943 cinfo->args [n].size = 0;
944 cinfo->args [n].vtsize = nwords;
945 } else {
946 int rest = ARMREG_R3 - gr + 1;
947 int n_in_regs = rest >= nwords? nwords: rest;
949 cinfo->args [n].size = n_in_regs;
950 cinfo->args [n].vtsize = nwords - n_in_regs;
951 cinfo->args [n].reg = gr;
952 gr += n_in_regs;
953 nwords -= n_in_regs;
955 cinfo->args [n].offset = stack_size;
956 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
957 stack_size += nwords * sizeof (gpointer);
958 n++;
959 break;
961 case MONO_TYPE_U8:
962 case MONO_TYPE_I8:
963 case MONO_TYPE_R8:
964 cinfo->args [n].size = 8;
965 add_general (&gr, &stack_size, cinfo->args + n, FALSE);
966 n++;
967 break;
968 default:
969 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
973 /* Handle the case where there are no implicit arguments */
974 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
975 /* Prevent implicit arguments and sig_cookie from
976 being passed in registers */
977 gr = ARMREG_R3 + 1;
978 /* Emit the signature cookie just before the implicit arguments */
979 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
983 simpletype = mini_type_get_underlying_type (NULL, sig->ret);
984 switch (simpletype->type) {
985 case MONO_TYPE_BOOLEAN:
986 case MONO_TYPE_I1:
987 case MONO_TYPE_U1:
988 case MONO_TYPE_I2:
989 case MONO_TYPE_U2:
990 case MONO_TYPE_CHAR:
991 case MONO_TYPE_I4:
992 case MONO_TYPE_U4:
993 case MONO_TYPE_I:
994 case MONO_TYPE_U:
995 case MONO_TYPE_PTR:
996 case MONO_TYPE_FNPTR:
997 case MONO_TYPE_CLASS:
998 case MONO_TYPE_OBJECT:
999 case MONO_TYPE_SZARRAY:
1000 case MONO_TYPE_ARRAY:
1001 case MONO_TYPE_STRING:
1002 cinfo->ret.storage = RegTypeGeneral;
1003 cinfo->ret.reg = ARMREG_R0;
1004 break;
1005 case MONO_TYPE_U8:
1006 case MONO_TYPE_I8:
1007 cinfo->ret.storage = RegTypeIRegPair;
1008 cinfo->ret.reg = ARMREG_R0;
1009 break;
1010 case MONO_TYPE_R4:
1011 case MONO_TYPE_R8:
1012 cinfo->ret.storage = RegTypeFP;
1013 cinfo->ret.reg = ARMREG_R0;
1014 /* FIXME: cinfo->ret.reg = ???;
1015 cinfo->ret.storage = RegTypeFP;*/
1016 break;
1017 case MONO_TYPE_GENERICINST:
1018 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1019 cinfo->ret.storage = RegTypeGeneral;
1020 cinfo->ret.reg = ARMREG_R0;
1021 break;
1023 /* Fall through */
1024 case MONO_TYPE_VALUETYPE:
1025 case MONO_TYPE_TYPEDBYREF:
1026 if (cinfo->ret.storage != RegTypeStructByVal)
1027 cinfo->ret.storage = RegTypeStructByAddr;
1028 break;
1029 case MONO_TYPE_VOID:
1030 break;
1031 default:
1032 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1036 /* align stack size to 8 */
1037 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1038 stack_size = (stack_size + 7) & ~7;
1040 cinfo->stack_usage = stack_size;
1041 return cinfo;
1046 * Set var information according to the calling convention. arm version.
1047 * The locals var stuff should most likely be split in another method.
1049 void
1050 mono_arch_allocate_vars (MonoCompile *cfg)
1052 MonoMethodSignature *sig;
1053 MonoMethodHeader *header;
1054 MonoInst *inst;
1055 int i, offset, size, align, curinst;
1056 int frame_reg = ARMREG_FP;
1057 CallInfo *cinfo;
1058 guint32 ualign;
1060 sig = mono_method_signature (cfg->method);
1062 if (!cfg->arch.cinfo)
1063 cfg->arch.cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1064 cinfo = cfg->arch.cinfo;
1066 /* FIXME: this will change when we use FP as gcc does */
1067 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1069 /* allow room for the vararg method args: void* and long/double */
1070 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1071 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1073 header = mono_method_get_header (cfg->method);
1076 * We use the frame register also for any method that has
1077 * exception clauses. This way, when the handlers are called,
1078 * the code will reference local variables using the frame reg instead of
1079 * the stack pointer: if we had to restore the stack pointer, we'd
1080 * corrupt the method frames that are already on the stack (since
1081 * filters get called before stack unwinding happens) when the filter
1082 * code would call any method (this also applies to finally etc.).
1084 if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
1085 frame_reg = ARMREG_FP;
1086 cfg->frame_reg = frame_reg;
1087 if (frame_reg != ARMREG_SP) {
1088 cfg->used_int_regs |= 1 << frame_reg;
1091 if (cfg->compile_aot || cfg->uses_rgctx_reg)
1092 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1093 cfg->used_int_regs |= (1 << ARMREG_V5);
1095 offset = 0;
1096 curinst = 0;
1097 if (!MONO_TYPE_ISSTRUCT (sig->ret)) {
1098 switch (mini_type_get_underlying_type (NULL, sig->ret)->type) {
1099 case MONO_TYPE_VOID:
1100 break;
1101 default:
1102 cfg->ret->opcode = OP_REGVAR;
1103 cfg->ret->inst_c0 = ARMREG_R0;
1104 break;
1107 /* local vars are at a positive offset from the stack pointer */
1109 * also note that if the function uses alloca, we use FP
1110 * to point at the local variables.
1112 offset = 0; /* linkage area */
1113 /* align the offset to 16 bytes: not sure this is needed here */
1114 //offset += 8 - 1;
1115 //offset &= ~(8 - 1);
1117 /* add parameter area size for called functions */
1118 offset += cfg->param_area;
1119 offset += 8 - 1;
1120 offset &= ~(8 - 1);
1121 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1122 offset += 8;
1124 /* allow room to save the return value */
1125 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1126 offset += 8;
1128 /* the MonoLMF structure is stored just below the stack pointer */
1129 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1130 if (cinfo->ret.storage == RegTypeStructByVal) {
1131 cfg->ret->opcode = OP_REGOFFSET;
1132 cfg->ret->inst_basereg = cfg->frame_reg;
1133 offset += sizeof (gpointer) - 1;
1134 offset &= ~(sizeof (gpointer) - 1);
1135 cfg->ret->inst_offset = - offset;
1136 } else {
1137 inst = cfg->vret_addr;
1138 offset += sizeof(gpointer) - 1;
1139 offset &= ~(sizeof(gpointer) - 1);
1140 inst->inst_offset = offset;
1141 inst->opcode = OP_REGOFFSET;
1142 inst->inst_basereg = frame_reg;
1143 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1144 printf ("vret_addr =");
1145 mono_print_ins (cfg->vret_addr);
1148 offset += sizeof(gpointer);
1151 curinst = cfg->locals_start;
1152 for (i = curinst; i < cfg->num_varinfo; ++i) {
1153 inst = cfg->varinfo [i];
1154 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR)
1155 continue;
1157 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1158 * pinvoke wrappers when they call functions returning structure */
1159 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF) {
1160 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &ualign);
1161 align = ualign;
1163 else
1164 size = mono_type_size (inst->inst_vtype, &align);
1166 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1167 * since it loads/stores misaligned words, which don't do the right thing.
1169 if (align < 4 && size >= 4)
1170 align = 4;
1171 offset += align - 1;
1172 offset &= ~(align - 1);
1173 inst->inst_offset = offset;
1174 inst->opcode = OP_REGOFFSET;
1175 inst->inst_basereg = frame_reg;
1176 offset += size;
1177 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1180 curinst = 0;
1181 if (sig->hasthis) {
1182 inst = cfg->args [curinst];
1183 if (inst->opcode != OP_REGVAR) {
1184 inst->opcode = OP_REGOFFSET;
1185 inst->inst_basereg = frame_reg;
1186 offset += sizeof (gpointer) - 1;
1187 offset &= ~(sizeof (gpointer) - 1);
1188 inst->inst_offset = offset;
1189 offset += sizeof (gpointer);
1191 curinst++;
1194 if (sig->call_convention == MONO_CALL_VARARG) {
1195 size = 4;
1196 align = 4;
1198 /* Allocate a local slot to hold the sig cookie address */
1199 offset += align - 1;
1200 offset &= ~(align - 1);
1201 cfg->sig_cookie = offset;
1202 offset += size;
1205 for (i = 0; i < sig->param_count; ++i) {
1206 inst = cfg->args [curinst];
1208 if (inst->opcode != OP_REGVAR) {
1209 inst->opcode = OP_REGOFFSET;
1210 inst->inst_basereg = frame_reg;
1211 size = mini_type_stack_size_full (NULL, sig->params [i], &ualign, sig->pinvoke);
1212 align = ualign;
1213 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1214 * since it loads/stores misaligned words, which don't do the right thing.
1216 if (align < 4 && size >= 4)
1217 align = 4;
1218 /* The code in the prolog () stores words when storing vtypes received in a register */
1219 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
1220 align = 4;
1221 offset += align - 1;
1222 offset &= ~(align - 1);
1223 inst->inst_offset = offset;
1224 offset += size;
1226 curinst++;
1229 /* align the offset to 8 bytes */
1230 offset += 8 - 1;
1231 offset &= ~(8 - 1);
1233 /* change sign? */
1234 cfg->stack_offset = offset;
1237 void
1238 mono_arch_create_vars (MonoCompile *cfg)
1240 MonoMethodSignature *sig;
1241 CallInfo *cinfo;
1243 sig = mono_method_signature (cfg->method);
1245 if (!cfg->arch.cinfo)
1246 cfg->arch.cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1247 cinfo = cfg->arch.cinfo;
1249 if (cinfo->ret.storage == RegTypeStructByVal)
1250 cfg->ret_var_is_local = TRUE;
1252 if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != RegTypeStructByVal) {
1253 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1254 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1255 printf ("vret_addr = ");
1256 mono_print_ins (cfg->vret_addr);
1260 if (cfg->gen_seq_points && cfg->compile_aot) {
1261 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1262 ins->flags |= MONO_INST_VOLATILE;
1263 cfg->arch.seq_point_info_var = ins;
1265 /* Allocate a separate variable for this to save 1 load per seq point */
1266 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1267 ins->flags |= MONO_INST_VOLATILE;
1268 cfg->arch.ss_trigger_page_var = ins;
1272 static void
1273 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1275 MonoMethodSignature *tmp_sig;
1276 MonoInst *sig_arg;
1278 if (call->tail_call)
1279 NOT_IMPLEMENTED;
1281 /* FIXME: Add support for signature tokens to AOT */
1282 cfg->disable_aot = TRUE;
1284 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
1287 * mono_ArgIterator_Setup assumes the signature cookie is
1288 * passed first and all the arguments which were before it are
1289 * passed on the stack after the signature. So compensate by
1290 * passing a different signature.
1292 tmp_sig = mono_metadata_signature_dup (call->signature);
1293 tmp_sig->param_count -= call->signature->sentinelpos;
1294 tmp_sig->sentinelpos = 0;
1295 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1297 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1298 sig_arg->dreg = mono_alloc_ireg (cfg);
1299 sig_arg->inst_p0 = tmp_sig;
1300 MONO_ADD_INS (cfg->cbb, sig_arg);
1302 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_arg->dreg);
1305 #ifdef ENABLE_LLVM
1306 LLVMCallInfo*
1307 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
1309 int i, n;
1310 CallInfo *cinfo;
1311 ArgInfo *ainfo;
1312 LLVMCallInfo *linfo;
1314 n = sig->param_count + sig->hasthis;
1316 cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1318 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
1321 * LLVM always uses the native ABI while we use our own ABI, the
1322 * only difference is the handling of vtypes:
1323 * - we only pass/receive them in registers in some cases, and only
1324 * in 1 or 2 integer registers.
1326 if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP) {
1327 cfg->exception_message = g_strdup ("unknown ret conv");
1328 cfg->disable_llvm = TRUE;
1329 return linfo;
1332 for (i = 0; i < n; ++i) {
1333 ainfo = cinfo->args + i;
1335 linfo->args [i].storage = LLVMArgNone;
1337 switch (ainfo->storage) {
1338 case RegTypeGeneral:
1339 case RegTypeIRegPair:
1340 linfo->args [i].storage = LLVMArgInIReg;
1341 break;
1342 default:
1343 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
1344 cfg->disable_llvm = TRUE;
1345 break;
1349 return linfo;
1351 #endif
1353 void
1354 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1356 MonoInst *in, *ins;
1357 MonoMethodSignature *sig;
1358 int i, n;
1359 CallInfo *cinfo;
1361 sig = call->signature;
1362 n = sig->param_count + sig->hasthis;
1364 cinfo = get_call_info (NULL, sig, sig->pinvoke);
1366 for (i = 0; i < n; ++i) {
1367 ArgInfo *ainfo = cinfo->args + i;
1368 MonoType *t;
1370 if (i >= sig->hasthis)
1371 t = sig->params [i - sig->hasthis];
1372 else
1373 t = &mono_defaults.int_class->byval_arg;
1374 t = mini_type_get_underlying_type (NULL, t);
1376 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1377 /* Emit the signature cookie just before the implicit arguments */
1378 emit_sig_cookie (cfg, call, cinfo);
1381 in = call->args [i];
1383 switch (ainfo->storage) {
1384 case RegTypeGeneral:
1385 case RegTypeIRegPair:
1386 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1387 MONO_INST_NEW (cfg, ins, OP_MOVE);
1388 ins->dreg = mono_alloc_ireg (cfg);
1389 ins->sreg1 = in->dreg + 1;
1390 MONO_ADD_INS (cfg->cbb, ins);
1391 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1393 MONO_INST_NEW (cfg, ins, OP_MOVE);
1394 ins->dreg = mono_alloc_ireg (cfg);
1395 ins->sreg1 = in->dreg + 2;
1396 MONO_ADD_INS (cfg->cbb, ins);
1397 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1398 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
1399 #ifndef MONO_ARCH_SOFT_FLOAT
1400 int creg;
1401 #endif
1403 if (ainfo->size == 4) {
1404 #ifdef MONO_ARCH_SOFT_FLOAT
1405 /* mono_emit_call_args () have already done the r8->r4 conversion */
1406 /* The converted value is in an int vreg */
1407 MONO_INST_NEW (cfg, ins, OP_MOVE);
1408 ins->dreg = mono_alloc_ireg (cfg);
1409 ins->sreg1 = in->dreg;
1410 MONO_ADD_INS (cfg->cbb, ins);
1411 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1412 #else
1413 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1414 creg = mono_alloc_ireg (cfg);
1415 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1416 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1417 #endif
1418 } else {
1419 #ifdef MONO_ARCH_SOFT_FLOAT
1420 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
1421 ins->dreg = mono_alloc_ireg (cfg);
1422 ins->sreg1 = in->dreg;
1423 MONO_ADD_INS (cfg->cbb, ins);
1424 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1426 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
1427 ins->dreg = mono_alloc_ireg (cfg);
1428 ins->sreg1 = in->dreg;
1429 MONO_ADD_INS (cfg->cbb, ins);
1430 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1431 #else
1432 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1433 creg = mono_alloc_ireg (cfg);
1434 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1435 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1436 creg = mono_alloc_ireg (cfg);
1437 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
1438 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
1439 #endif
1441 cfg->flags |= MONO_CFG_HAS_FPOUT;
1442 } else {
1443 MONO_INST_NEW (cfg, ins, OP_MOVE);
1444 ins->dreg = mono_alloc_ireg (cfg);
1445 ins->sreg1 = in->dreg;
1446 MONO_ADD_INS (cfg->cbb, ins);
1448 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1450 break;
1451 case RegTypeStructByAddr:
1452 NOT_IMPLEMENTED;
1453 #if 0
1454 /* FIXME: where si the data allocated? */
1455 arg->backend.reg3 = ainfo->reg;
1456 call->used_iregs |= 1 << ainfo->reg;
1457 g_assert_not_reached ();
1458 #endif
1459 break;
1460 case RegTypeStructByVal:
1461 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1462 ins->opcode = OP_OUTARG_VT;
1463 ins->sreg1 = in->dreg;
1464 ins->klass = in->klass;
1465 ins->inst_p0 = call;
1466 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1467 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1468 MONO_ADD_INS (cfg->cbb, ins);
1469 break;
1470 case RegTypeBase:
1471 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1472 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1473 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
1474 if (t->type == MONO_TYPE_R8) {
1475 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1476 } else {
1477 #ifdef MONO_ARCH_SOFT_FLOAT
1478 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1479 #else
1480 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1481 #endif
1483 } else {
1484 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1486 break;
1487 case RegTypeBaseGen:
1488 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1489 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
1490 MONO_INST_NEW (cfg, ins, OP_MOVE);
1491 ins->dreg = mono_alloc_ireg (cfg);
1492 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
1493 MONO_ADD_INS (cfg->cbb, ins);
1494 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
1495 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
1496 int creg;
1498 #ifdef MONO_ARCH_SOFT_FLOAT
1499 g_assert_not_reached ();
1500 #endif
1502 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1503 creg = mono_alloc_ireg (cfg);
1504 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
1505 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1506 creg = mono_alloc_ireg (cfg);
1507 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
1508 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
1509 cfg->flags |= MONO_CFG_HAS_FPOUT;
1510 } else {
1511 g_assert_not_reached ();
1513 break;
1514 case RegTypeFP: {
1515 /* FIXME: */
1516 NOT_IMPLEMENTED;
1517 #if 0
1518 arg->backend.reg3 = ainfo->reg;
1519 /* FP args are passed in int regs */
1520 call->used_iregs |= 1 << ainfo->reg;
1521 if (ainfo->size == 8) {
1522 arg->opcode = OP_OUTARG_R8;
1523 call->used_iregs |= 1 << (ainfo->reg + 1);
1524 } else {
1525 arg->opcode = OP_OUTARG_R4;
1527 #endif
1528 cfg->flags |= MONO_CFG_HAS_FPOUT;
1529 break;
1531 default:
1532 g_assert_not_reached ();
1536 /* Handle the case where there are no implicit arguments */
1537 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
1538 emit_sig_cookie (cfg, call, cinfo);
1540 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1541 MonoInst *vtarg;
1543 if (cinfo->ret.storage == RegTypeStructByVal) {
1544 /* The JIT will transform this into a normal call */
1545 call->vret_in_reg = TRUE;
1546 } else {
1547 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1548 vtarg->sreg1 = call->vret_var->dreg;
1549 vtarg->dreg = mono_alloc_preg (cfg);
1550 MONO_ADD_INS (cfg->cbb, vtarg);
1552 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
1556 call->stack_usage = cinfo->stack_usage;
1558 g_free (cinfo);
1561 void
1562 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1564 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1565 ArgInfo *ainfo = ins->inst_p1;
1566 int ovf_size = ainfo->vtsize;
1567 int doffset = ainfo->offset;
1568 int i, soffset, dreg;
1570 soffset = 0;
1571 for (i = 0; i < ainfo->size; ++i) {
1572 dreg = mono_alloc_ireg (cfg);
1573 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1574 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
1575 soffset += sizeof (gpointer);
1577 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
1578 if (ovf_size != 0)
1579 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
1582 void
1583 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1585 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
1587 if (!ret->byref) {
1588 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1589 MonoInst *ins;
1591 MONO_INST_NEW (cfg, ins, OP_SETLRET);
1592 ins->sreg1 = val->dreg + 1;
1593 ins->sreg2 = val->dreg + 2;
1594 MONO_ADD_INS (cfg->cbb, ins);
1595 return;
1597 #ifdef MONO_ARCH_SOFT_FLOAT
1598 if (ret->type == MONO_TYPE_R8) {
1599 MonoInst *ins;
1601 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1602 ins->dreg = cfg->ret->dreg;
1603 ins->sreg1 = val->dreg;
1604 MONO_ADD_INS (cfg->cbb, ins);
1605 return;
1607 if (ret->type == MONO_TYPE_R4) {
1608 /* Already converted to an int in method_to_ir () */
1609 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1610 return;
1612 #elif defined(ARM_FPU_VFP)
1613 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
1614 MonoInst *ins;
1616 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1617 ins->dreg = cfg->ret->dreg;
1618 ins->sreg1 = val->dreg;
1619 MONO_ADD_INS (cfg->cbb, ins);
1620 return;
1622 #else
1623 if (ret->type == MONO_TYPE_R4 || ret->type == MONO_TYPE_R8) {
1624 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1625 return;
1627 #endif
1630 /* FIXME: */
1631 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1634 gboolean
1635 mono_arch_is_inst_imm (gint64 imm)
1637 return TRUE;
1640 #define DYN_CALL_STACK_ARGS 6
1642 typedef struct {
1643 MonoMethodSignature *sig;
1644 CallInfo *cinfo;
1645 } ArchDynCallInfo;
1647 typedef struct {
1648 mgreg_t regs [PARAM_REGS + DYN_CALL_STACK_ARGS];
1649 mgreg_t res, res2;
1650 guint8 *ret;
1651 } DynCallArgs;
1653 static gboolean
1654 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
1656 int i;
1658 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
1659 return FALSE;
1661 switch (cinfo->ret.storage) {
1662 case RegTypeNone:
1663 case RegTypeGeneral:
1664 case RegTypeIRegPair:
1665 case RegTypeStructByAddr:
1666 break;
1667 case RegTypeFP:
1668 #ifdef ARM_FPU_FPA
1669 return FALSE;
1670 #elif defined(ARM_FPU_VFP)
1671 break;
1672 #else
1673 return FALSE;
1674 #endif
1675 default:
1676 return FALSE;
1679 for (i = 0; i < cinfo->nargs; ++i) {
1680 switch (cinfo->args [i].storage) {
1681 case RegTypeGeneral:
1682 break;
1683 case RegTypeIRegPair:
1684 break;
1685 case RegTypeBase:
1686 if (cinfo->args [i].offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
1687 return FALSE;
1688 break;
1689 case RegTypeStructByVal:
1690 if (cinfo->args [i].reg + cinfo->args [i].vtsize >= PARAM_REGS + DYN_CALL_STACK_ARGS)
1691 return FALSE;
1692 break;
1693 default:
1694 return FALSE;
1698 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
1699 for (i = 0; i < sig->param_count; ++i) {
1700 MonoType *t = sig->params [i];
1702 if (t->byref)
1703 continue;
1705 switch (t->type) {
1706 case MONO_TYPE_R4:
1707 case MONO_TYPE_R8:
1708 #ifdef MONO_ARCH_SOFT_FLOAT
1709 return FALSE;
1710 #else
1711 break;
1712 #endif
1714 case MONO_TYPE_I8:
1715 case MONO_TYPE_U8:
1716 return FALSE;
1718 default:
1719 break;
1723 return TRUE;
1726 MonoDynCallInfo*
1727 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
1729 ArchDynCallInfo *info;
1730 CallInfo *cinfo;
1732 cinfo = get_call_info (NULL, sig, FALSE);
1734 if (!dyn_call_supported (cinfo, sig)) {
1735 g_free (cinfo);
1736 return NULL;
1739 info = g_new0 (ArchDynCallInfo, 1);
1740 // FIXME: Preprocess the info to speed up start_dyn_call ()
1741 info->sig = sig;
1742 info->cinfo = cinfo;
1744 return (MonoDynCallInfo*)info;
1747 void
1748 mono_arch_dyn_call_free (MonoDynCallInfo *info)
1750 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1752 g_free (ainfo->cinfo);
1753 g_free (ainfo);
1756 void
1757 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
1759 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
1760 DynCallArgs *p = (DynCallArgs*)buf;
1761 int arg_index, greg, i, j;
1762 MonoMethodSignature *sig = dinfo->sig;
1764 g_assert (buf_len >= sizeof (DynCallArgs));
1766 p->res = 0;
1767 p->ret = ret;
1769 arg_index = 0;
1770 greg = 0;
1772 if (dinfo->cinfo->vtype_retaddr)
1773 p->regs [greg ++] = (mgreg_t)ret;
1775 if (sig->hasthis)
1776 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
1778 for (i = 0; i < sig->param_count; i++) {
1779 MonoType *t = mono_type_get_underlying_type (sig->params [i]);
1780 gpointer *arg = args [arg_index ++];
1781 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
1782 int slot = -1;
1784 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
1785 slot = ainfo->reg;
1786 else if (ainfo->storage == RegTypeBase)
1787 slot = PARAM_REGS + (ainfo->offset / 4);
1788 else
1789 g_assert_not_reached ();
1791 if (t->byref) {
1792 p->regs [slot] = (mgreg_t)*arg;
1793 continue;
1796 switch (t->type) {
1797 case MONO_TYPE_STRING:
1798 case MONO_TYPE_CLASS:
1799 case MONO_TYPE_ARRAY:
1800 case MONO_TYPE_SZARRAY:
1801 case MONO_TYPE_OBJECT:
1802 case MONO_TYPE_PTR:
1803 case MONO_TYPE_I:
1804 case MONO_TYPE_U:
1805 p->regs [slot] = (mgreg_t)*arg;
1806 break;
1807 case MONO_TYPE_BOOLEAN:
1808 case MONO_TYPE_U1:
1809 p->regs [slot] = *(guint8*)arg;
1810 break;
1811 case MONO_TYPE_I1:
1812 p->regs [slot] = *(gint8*)arg;
1813 break;
1814 case MONO_TYPE_I2:
1815 p->regs [slot] = *(gint16*)arg;
1816 break;
1817 case MONO_TYPE_U2:
1818 case MONO_TYPE_CHAR:
1819 p->regs [slot] = *(guint16*)arg;
1820 break;
1821 case MONO_TYPE_I4:
1822 p->regs [slot] = *(gint32*)arg;
1823 break;
1824 case MONO_TYPE_U4:
1825 p->regs [slot] = *(guint32*)arg;
1826 break;
1827 case MONO_TYPE_I8:
1828 case MONO_TYPE_U8:
1829 p->regs [slot ++] = (mgreg_t)arg [0];
1830 p->regs [slot] = (mgreg_t)arg [1];
1831 break;
1832 case MONO_TYPE_R4:
1833 p->regs [slot] = *(mgreg_t*)arg;
1834 break;
1835 case MONO_TYPE_R8:
1836 p->regs [slot ++] = (mgreg_t)arg [0];
1837 p->regs [slot] = (mgreg_t)arg [1];
1838 break;
1839 case MONO_TYPE_GENERICINST:
1840 if (MONO_TYPE_IS_REFERENCE (t)) {
1841 p->regs [slot] = (mgreg_t)*arg;
1842 break;
1843 } else {
1844 /* Fall though */
1846 case MONO_TYPE_VALUETYPE:
1847 g_assert (ainfo->storage == RegTypeStructByVal);
1849 if (ainfo->size == 0)
1850 slot = PARAM_REGS + (ainfo->offset / 4);
1851 else
1852 slot = ainfo->reg;
1854 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
1855 p->regs [slot ++] = ((mgreg_t*)arg) [j];
1856 break;
1857 default:
1858 g_assert_not_reached ();
1863 void
1864 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
1866 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1867 MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
1868 guint8 *ret = ((DynCallArgs*)buf)->ret;
1869 mgreg_t res = ((DynCallArgs*)buf)->res;
1870 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
1872 switch (mono_type_get_underlying_type (sig->ret)->type) {
1873 case MONO_TYPE_VOID:
1874 *(gpointer*)ret = NULL;
1875 break;
1876 case MONO_TYPE_STRING:
1877 case MONO_TYPE_CLASS:
1878 case MONO_TYPE_ARRAY:
1879 case MONO_TYPE_SZARRAY:
1880 case MONO_TYPE_OBJECT:
1881 case MONO_TYPE_I:
1882 case MONO_TYPE_U:
1883 case MONO_TYPE_PTR:
1884 *(gpointer*)ret = (gpointer)res;
1885 break;
1886 case MONO_TYPE_I1:
1887 *(gint8*)ret = res;
1888 break;
1889 case MONO_TYPE_U1:
1890 case MONO_TYPE_BOOLEAN:
1891 *(guint8*)ret = res;
1892 break;
1893 case MONO_TYPE_I2:
1894 *(gint16*)ret = res;
1895 break;
1896 case MONO_TYPE_U2:
1897 case MONO_TYPE_CHAR:
1898 *(guint16*)ret = res;
1899 break;
1900 case MONO_TYPE_I4:
1901 *(gint32*)ret = res;
1902 break;
1903 case MONO_TYPE_U4:
1904 *(guint32*)ret = res;
1905 break;
1906 case MONO_TYPE_I8:
1907 case MONO_TYPE_U8:
1908 /* This handles endianness as well */
1909 ((gint32*)ret) [0] = res;
1910 ((gint32*)ret) [1] = res2;
1911 break;
1912 case MONO_TYPE_GENERICINST:
1913 if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
1914 *(gpointer*)ret = (gpointer)res;
1915 break;
1916 } else {
1917 /* Fall though */
1919 case MONO_TYPE_VALUETYPE:
1920 g_assert (ainfo->cinfo->vtype_retaddr);
1921 /* Nothing to do */
1922 break;
1923 #if defined(ARM_FPU_VFP)
1924 case MONO_TYPE_R4:
1925 *(float*)ret = *(float*)&res;
1926 break;
1927 case MONO_TYPE_R8: {
1928 mgreg_t regs [2];
1930 regs [0] = res;
1931 regs [1] = res2;
1933 *(double*)ret = *(double*)&regs;
1934 break;
1936 #endif
1937 default:
1938 g_assert_not_reached ();
1943 * Allow tracing to work with this interface (with an optional argument)
1946 void*
1947 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1949 guchar *code = p;
1951 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1952 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
1953 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
1954 code = emit_call_reg (code, ARMREG_R2);
1955 return code;
1958 enum {
1959 SAVE_NONE,
1960 SAVE_STRUCT,
1961 SAVE_ONE,
1962 SAVE_TWO,
1963 SAVE_FP
1966 void*
1967 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
1969 guchar *code = p;
1970 int save_mode = SAVE_NONE;
1971 int offset;
1972 MonoMethod *method = cfg->method;
1973 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
1974 int save_offset = cfg->param_area;
1975 save_offset += 7;
1976 save_offset &= ~7;
1978 offset = code - cfg->native_code;
1979 /* we need about 16 instructions */
1980 if (offset > (cfg->code_size - 16 * 4)) {
1981 cfg->code_size *= 2;
1982 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1983 code = cfg->native_code + offset;
1985 switch (rtype) {
1986 case MONO_TYPE_VOID:
1987 /* special case string .ctor icall */
1988 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
1989 save_mode = SAVE_ONE;
1990 else
1991 save_mode = SAVE_NONE;
1992 break;
1993 case MONO_TYPE_I8:
1994 case MONO_TYPE_U8:
1995 save_mode = SAVE_TWO;
1996 break;
1997 case MONO_TYPE_R4:
1998 case MONO_TYPE_R8:
1999 save_mode = SAVE_FP;
2000 break;
2001 case MONO_TYPE_VALUETYPE:
2002 save_mode = SAVE_STRUCT;
2003 break;
2004 default:
2005 save_mode = SAVE_ONE;
2006 break;
2009 switch (save_mode) {
2010 case SAVE_TWO:
2011 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2012 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2013 if (enable_arguments) {
2014 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
2015 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2017 break;
2018 case SAVE_ONE:
2019 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2020 if (enable_arguments) {
2021 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2023 break;
2024 case SAVE_FP:
2025 /* FIXME: what reg? */
2026 if (enable_arguments) {
2027 /* FIXME: what reg? */
2029 break;
2030 case SAVE_STRUCT:
2031 if (enable_arguments) {
2032 /* FIXME: get the actual address */
2033 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2035 break;
2036 case SAVE_NONE:
2037 default:
2038 break;
2041 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2042 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
2043 code = emit_call_reg (code, ARMREG_IP);
2045 switch (save_mode) {
2046 case SAVE_TWO:
2047 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2048 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2049 break;
2050 case SAVE_ONE:
2051 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2052 break;
2053 case SAVE_FP:
2054 /* FIXME */
2055 break;
2056 case SAVE_NONE:
2057 default:
2058 break;
2061 return code;
2065 * The immediate field for cond branches is big enough for all reasonable methods
2067 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
2068 if (0 && ins->inst_true_bb->native_offset) { \
2069 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
2070 } else { \
2071 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
2072 ARM_B_COND (code, (condcode), 0); \
2075 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
2077 /* emit an exception if condition is fail
2079 * We assign the extra code used to throw the implicit exceptions
2080 * to cfg->bb_exit as far as the big branch handling is concerned
2082 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
2083 do { \
2084 mono_add_patch_info (cfg, code - cfg->native_code, \
2085 MONO_PATCH_INFO_EXC, exc_name); \
2086 ARM_BL_COND (code, (condcode), 0); \
2087 } while (0);
2089 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
2091 void
2092 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
2096 void
2097 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
2099 MonoInst *ins, *n, *last_ins = NULL;
2101 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
2102 switch (ins->opcode) {
2103 case OP_MUL_IMM:
2104 case OP_IMUL_IMM:
2105 /* Already done by an arch-independent pass */
2106 break;
2107 case OP_LOAD_MEMBASE:
2108 case OP_LOADI4_MEMBASE:
2110 * OP_STORE_MEMBASE_REG reg, offset(basereg)
2111 * OP_LOAD_MEMBASE offset(basereg), reg
2113 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
2114 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
2115 ins->inst_basereg == last_ins->inst_destbasereg &&
2116 ins->inst_offset == last_ins->inst_offset) {
2117 if (ins->dreg == last_ins->sreg1) {
2118 MONO_DELETE_INS (bb, ins);
2119 continue;
2120 } else {
2121 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2122 ins->opcode = OP_MOVE;
2123 ins->sreg1 = last_ins->sreg1;
2127 * Note: reg1 must be different from the basereg in the second load
2128 * OP_LOAD_MEMBASE offset(basereg), reg1
2129 * OP_LOAD_MEMBASE offset(basereg), reg2
2130 * -->
2131 * OP_LOAD_MEMBASE offset(basereg), reg1
2132 * OP_MOVE reg1, reg2
2134 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
2135 || last_ins->opcode == OP_LOAD_MEMBASE) &&
2136 ins->inst_basereg != last_ins->dreg &&
2137 ins->inst_basereg == last_ins->inst_basereg &&
2138 ins->inst_offset == last_ins->inst_offset) {
2140 if (ins->dreg == last_ins->dreg) {
2141 MONO_DELETE_INS (bb, ins);
2142 continue;
2143 } else {
2144 ins->opcode = OP_MOVE;
2145 ins->sreg1 = last_ins->dreg;
2148 //g_assert_not_reached ();
2150 #if 0
2152 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2153 * OP_LOAD_MEMBASE offset(basereg), reg
2154 * -->
2155 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2156 * OP_ICONST reg, imm
2158 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
2159 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
2160 ins->inst_basereg == last_ins->inst_destbasereg &&
2161 ins->inst_offset == last_ins->inst_offset) {
2162 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2163 ins->opcode = OP_ICONST;
2164 ins->inst_c0 = last_ins->inst_imm;
2165 g_assert_not_reached (); // check this rule
2166 #endif
2168 break;
2169 case OP_LOADU1_MEMBASE:
2170 case OP_LOADI1_MEMBASE:
2171 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
2172 ins->inst_basereg == last_ins->inst_destbasereg &&
2173 ins->inst_offset == last_ins->inst_offset) {
2174 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
2175 ins->sreg1 = last_ins->sreg1;
2177 break;
2178 case OP_LOADU2_MEMBASE:
2179 case OP_LOADI2_MEMBASE:
2180 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
2181 ins->inst_basereg == last_ins->inst_destbasereg &&
2182 ins->inst_offset == last_ins->inst_offset) {
2183 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
2184 ins->sreg1 = last_ins->sreg1;
2186 break;
2187 case OP_MOVE:
2188 ins->opcode = OP_MOVE;
2190 * OP_MOVE reg, reg
2192 if (ins->dreg == ins->sreg1) {
2193 MONO_DELETE_INS (bb, ins);
2194 continue;
2197 * OP_MOVE sreg, dreg
2198 * OP_MOVE dreg, sreg
2200 if (last_ins && last_ins->opcode == OP_MOVE &&
2201 ins->sreg1 == last_ins->dreg &&
2202 ins->dreg == last_ins->sreg1) {
2203 MONO_DELETE_INS (bb, ins);
2204 continue;
2206 break;
2208 last_ins = ins;
2209 ins = ins->next;
2211 bb->last_ins = last_ins;
2215 * the branch_cc_table should maintain the order of these
2216 * opcodes.
2217 case CEE_BEQ:
2218 case CEE_BGE:
2219 case CEE_BGT:
2220 case CEE_BLE:
2221 case CEE_BLT:
2222 case CEE_BNE_UN:
2223 case CEE_BGE_UN:
2224 case CEE_BGT_UN:
2225 case CEE_BLE_UN:
2226 case CEE_BLT_UN:
2228 static const guchar
2229 branch_cc_table [] = {
2230 ARMCOND_EQ,
2231 ARMCOND_GE,
2232 ARMCOND_GT,
2233 ARMCOND_LE,
2234 ARMCOND_LT,
2236 ARMCOND_NE,
2237 ARMCOND_HS,
2238 ARMCOND_HI,
2239 ARMCOND_LS,
2240 ARMCOND_LO
2243 #define NEW_INS(cfg,dest,op) do { \
2244 MONO_INST_NEW ((cfg), (dest), (op)); \
2245 mono_bblock_insert_before_ins (bb, ins, (dest)); \
2246 } while (0)
2248 static int
2249 map_to_reg_reg_op (int op)
2251 switch (op) {
2252 case OP_ADD_IMM:
2253 return OP_IADD;
2254 case OP_SUB_IMM:
2255 return OP_ISUB;
2256 case OP_AND_IMM:
2257 return OP_IAND;
2258 case OP_COMPARE_IMM:
2259 return OP_COMPARE;
2260 case OP_ICOMPARE_IMM:
2261 return OP_ICOMPARE;
2262 case OP_ADDCC_IMM:
2263 return OP_ADDCC;
2264 case OP_ADC_IMM:
2265 return OP_ADC;
2266 case OP_SUBCC_IMM:
2267 return OP_SUBCC;
2268 case OP_SBB_IMM:
2269 return OP_SBB;
2270 case OP_OR_IMM:
2271 return OP_IOR;
2272 case OP_XOR_IMM:
2273 return OP_IXOR;
2274 case OP_LOAD_MEMBASE:
2275 return OP_LOAD_MEMINDEX;
2276 case OP_LOADI4_MEMBASE:
2277 return OP_LOADI4_MEMINDEX;
2278 case OP_LOADU4_MEMBASE:
2279 return OP_LOADU4_MEMINDEX;
2280 case OP_LOADU1_MEMBASE:
2281 return OP_LOADU1_MEMINDEX;
2282 case OP_LOADI2_MEMBASE:
2283 return OP_LOADI2_MEMINDEX;
2284 case OP_LOADU2_MEMBASE:
2285 return OP_LOADU2_MEMINDEX;
2286 case OP_LOADI1_MEMBASE:
2287 return OP_LOADI1_MEMINDEX;
2288 case OP_STOREI1_MEMBASE_REG:
2289 return OP_STOREI1_MEMINDEX;
2290 case OP_STOREI2_MEMBASE_REG:
2291 return OP_STOREI2_MEMINDEX;
2292 case OP_STOREI4_MEMBASE_REG:
2293 return OP_STOREI4_MEMINDEX;
2294 case OP_STORE_MEMBASE_REG:
2295 return OP_STORE_MEMINDEX;
2296 case OP_STORER4_MEMBASE_REG:
2297 return OP_STORER4_MEMINDEX;
2298 case OP_STORER8_MEMBASE_REG:
2299 return OP_STORER8_MEMINDEX;
2300 case OP_STORE_MEMBASE_IMM:
2301 return OP_STORE_MEMBASE_REG;
2302 case OP_STOREI1_MEMBASE_IMM:
2303 return OP_STOREI1_MEMBASE_REG;
2304 case OP_STOREI2_MEMBASE_IMM:
2305 return OP_STOREI2_MEMBASE_REG;
2306 case OP_STOREI4_MEMBASE_IMM:
2307 return OP_STOREI4_MEMBASE_REG;
2309 g_assert_not_reached ();
2313 * Remove from the instruction list the instructions that can't be
2314 * represented with very simple instructions with no register
2315 * requirements.
2317 void
2318 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
2320 MonoInst *ins, *temp, *last_ins = NULL;
2321 int rot_amount, imm8, low_imm;
2323 MONO_BB_FOR_EACH_INS (bb, ins) {
2324 loop_start:
2325 switch (ins->opcode) {
2326 case OP_ADD_IMM:
2327 case OP_SUB_IMM:
2328 case OP_AND_IMM:
2329 case OP_COMPARE_IMM:
2330 case OP_ICOMPARE_IMM:
2331 case OP_ADDCC_IMM:
2332 case OP_ADC_IMM:
2333 case OP_SUBCC_IMM:
2334 case OP_SBB_IMM:
2335 case OP_OR_IMM:
2336 case OP_XOR_IMM:
2337 case OP_IADD_IMM:
2338 case OP_ISUB_IMM:
2339 case OP_IAND_IMM:
2340 case OP_IADC_IMM:
2341 case OP_ISBB_IMM:
2342 case OP_IOR_IMM:
2343 case OP_IXOR_IMM:
2344 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
2345 NEW_INS (cfg, temp, OP_ICONST);
2346 temp->inst_c0 = ins->inst_imm;
2347 temp->dreg = mono_alloc_ireg (cfg);
2348 ins->sreg2 = temp->dreg;
2349 ins->opcode = mono_op_imm_to_op (ins->opcode);
2351 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
2352 goto loop_start;
2353 else
2354 break;
2355 case OP_MUL_IMM:
2356 case OP_IMUL_IMM:
2357 if (ins->inst_imm == 1) {
2358 ins->opcode = OP_MOVE;
2359 break;
2361 if (ins->inst_imm == 0) {
2362 ins->opcode = OP_ICONST;
2363 ins->inst_c0 = 0;
2364 break;
2366 imm8 = mono_is_power_of_two (ins->inst_imm);
2367 if (imm8 > 0) {
2368 ins->opcode = OP_SHL_IMM;
2369 ins->inst_imm = imm8;
2370 break;
2372 NEW_INS (cfg, temp, OP_ICONST);
2373 temp->inst_c0 = ins->inst_imm;
2374 temp->dreg = mono_alloc_ireg (cfg);
2375 ins->sreg2 = temp->dreg;
2376 ins->opcode = OP_IMUL;
2377 break;
2378 case OP_SBB:
2379 case OP_ISBB:
2380 case OP_SUBCC:
2381 case OP_ISUBCC:
2382 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
2383 /* ARM sets the C flag to 1 if there was _no_ overflow */
2384 ins->next->opcode = OP_COND_EXC_NC;
2385 break;
2386 case OP_LOCALLOC_IMM:
2387 NEW_INS (cfg, temp, OP_ICONST);
2388 temp->inst_c0 = ins->inst_imm;
2389 temp->dreg = mono_alloc_ireg (cfg);
2390 ins->sreg1 = temp->dreg;
2391 ins->opcode = OP_LOCALLOC;
2392 break;
2393 case OP_LOAD_MEMBASE:
2394 case OP_LOADI4_MEMBASE:
2395 case OP_LOADU4_MEMBASE:
2396 case OP_LOADU1_MEMBASE:
2397 /* we can do two things: load the immed in a register
2398 * and use an indexed load, or see if the immed can be
2399 * represented as an ad_imm + a load with a smaller offset
2400 * that fits. We just do the first for now, optimize later.
2402 if (arm_is_imm12 (ins->inst_offset))
2403 break;
2404 NEW_INS (cfg, temp, OP_ICONST);
2405 temp->inst_c0 = ins->inst_offset;
2406 temp->dreg = mono_alloc_ireg (cfg);
2407 ins->sreg2 = temp->dreg;
2408 ins->opcode = map_to_reg_reg_op (ins->opcode);
2409 break;
2410 case OP_LOADI2_MEMBASE:
2411 case OP_LOADU2_MEMBASE:
2412 case OP_LOADI1_MEMBASE:
2413 if (arm_is_imm8 (ins->inst_offset))
2414 break;
2415 NEW_INS (cfg, temp, OP_ICONST);
2416 temp->inst_c0 = ins->inst_offset;
2417 temp->dreg = mono_alloc_ireg (cfg);
2418 ins->sreg2 = temp->dreg;
2419 ins->opcode = map_to_reg_reg_op (ins->opcode);
2420 break;
2421 case OP_LOADR4_MEMBASE:
2422 case OP_LOADR8_MEMBASE:
2423 if (arm_is_fpimm8 (ins->inst_offset))
2424 break;
2425 low_imm = ins->inst_offset & 0x1ff;
2426 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
2427 NEW_INS (cfg, temp, OP_ADD_IMM);
2428 temp->inst_imm = ins->inst_offset & ~0x1ff;
2429 temp->sreg1 = ins->inst_basereg;
2430 temp->dreg = mono_alloc_ireg (cfg);
2431 ins->inst_basereg = temp->dreg;
2432 ins->inst_offset = low_imm;
2433 break;
2435 /* VFP/FPA doesn't have indexed load instructions */
2436 g_assert_not_reached ();
2437 break;
2438 case OP_STORE_MEMBASE_REG:
2439 case OP_STOREI4_MEMBASE_REG:
2440 case OP_STOREI1_MEMBASE_REG:
2441 if (arm_is_imm12 (ins->inst_offset))
2442 break;
2443 NEW_INS (cfg, temp, OP_ICONST);
2444 temp->inst_c0 = ins->inst_offset;
2445 temp->dreg = mono_alloc_ireg (cfg);
2446 ins->sreg2 = temp->dreg;
2447 ins->opcode = map_to_reg_reg_op (ins->opcode);
2448 break;
2449 case OP_STOREI2_MEMBASE_REG:
2450 if (arm_is_imm8 (ins->inst_offset))
2451 break;
2452 NEW_INS (cfg, temp, OP_ICONST);
2453 temp->inst_c0 = ins->inst_offset;
2454 temp->dreg = mono_alloc_ireg (cfg);
2455 ins->sreg2 = temp->dreg;
2456 ins->opcode = map_to_reg_reg_op (ins->opcode);
2457 break;
2458 case OP_STORER4_MEMBASE_REG:
2459 case OP_STORER8_MEMBASE_REG:
2460 if (arm_is_fpimm8 (ins->inst_offset))
2461 break;
2462 low_imm = ins->inst_offset & 0x1ff;
2463 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
2464 NEW_INS (cfg, temp, OP_ADD_IMM);
2465 temp->inst_imm = ins->inst_offset & ~0x1ff;
2466 temp->sreg1 = ins->inst_destbasereg;
2467 temp->dreg = mono_alloc_ireg (cfg);
2468 ins->inst_destbasereg = temp->dreg;
2469 ins->inst_offset = low_imm;
2470 break;
2472 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
2473 /* VFP/FPA doesn't have indexed store instructions */
2474 g_assert_not_reached ();
2475 break;
2476 case OP_STORE_MEMBASE_IMM:
2477 case OP_STOREI1_MEMBASE_IMM:
2478 case OP_STOREI2_MEMBASE_IMM:
2479 case OP_STOREI4_MEMBASE_IMM:
2480 NEW_INS (cfg, temp, OP_ICONST);
2481 temp->inst_c0 = ins->inst_imm;
2482 temp->dreg = mono_alloc_ireg (cfg);
2483 ins->sreg1 = temp->dreg;
2484 ins->opcode = map_to_reg_reg_op (ins->opcode);
2485 last_ins = temp;
2486 goto loop_start; /* make it handle the possibly big ins->inst_offset */
2487 case OP_FCOMPARE: {
2488 gboolean swap = FALSE;
2489 int reg;
2491 if (!ins->next) {
2492 /* Optimized away */
2493 NULLIFY_INS (ins);
2494 break;
2497 /* Some fp compares require swapped operands */
2498 switch (ins->next->opcode) {
2499 case OP_FBGT:
2500 ins->next->opcode = OP_FBLT;
2501 swap = TRUE;
2502 break;
2503 case OP_FBGT_UN:
2504 ins->next->opcode = OP_FBLT_UN;
2505 swap = TRUE;
2506 break;
2507 case OP_FBLE:
2508 ins->next->opcode = OP_FBGE;
2509 swap = TRUE;
2510 break;
2511 case OP_FBLE_UN:
2512 ins->next->opcode = OP_FBGE_UN;
2513 swap = TRUE;
2514 break;
2515 default:
2516 break;
2518 if (swap) {
2519 reg = ins->sreg1;
2520 ins->sreg1 = ins->sreg2;
2521 ins->sreg2 = reg;
2523 break;
2527 last_ins = ins;
2529 bb->last_ins = last_ins;
2530 bb->max_vreg = cfg->next_vreg;
2533 void
2534 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
2536 MonoInst *ins;
2538 if (long_ins->opcode == OP_LNEG) {
2539 ins = long_ins;
2540 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
2541 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
2542 NULLIFY_INS (ins);
2546 static guchar*
2547 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2549 /* sreg is a float, dreg is an integer reg */
2550 #ifdef ARM_FPU_FPA
2551 ARM_FIXZ (code, dreg, sreg);
2552 #elif defined(ARM_FPU_VFP)
2553 if (is_signed)
2554 ARM_TOSIZD (code, ARM_VFP_F0, sreg);
2555 else
2556 ARM_TOUIZD (code, ARM_VFP_F0, sreg);
2557 ARM_FMRS (code, dreg, ARM_VFP_F0);
2558 #endif
2559 if (!is_signed) {
2560 if (size == 1)
2561 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
2562 else if (size == 2) {
2563 ARM_SHL_IMM (code, dreg, dreg, 16);
2564 ARM_SHR_IMM (code, dreg, dreg, 16);
2566 } else {
2567 if (size == 1) {
2568 ARM_SHL_IMM (code, dreg, dreg, 24);
2569 ARM_SAR_IMM (code, dreg, dreg, 24);
2570 } else if (size == 2) {
2571 ARM_SHL_IMM (code, dreg, dreg, 16);
2572 ARM_SAR_IMM (code, dreg, dreg, 16);
2575 return code;
2578 typedef struct {
2579 guchar *code;
2580 const guchar *target;
2581 int absolute;
2582 int found;
2583 } PatchData;
2585 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
2587 static int
2588 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
2589 PatchData *pdata = (PatchData*)user_data;
2590 guchar *code = data;
2591 guint32 *thunks = data;
2592 guint32 *endthunks = (guint32*)(code + bsize);
2593 int count = 0;
2594 int difflow, diffhigh;
2596 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2597 difflow = (char*)pdata->code - (char*)thunks;
2598 diffhigh = (char*)pdata->code - (char*)endthunks;
2599 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
2600 return 0;
2603 * The thunk is composed of 3 words:
2604 * load constant from thunks [2] into ARM_IP
2605 * bx to ARM_IP
2606 * address constant
2607 * Note that the LR register is already setup
2609 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2610 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
2611 while (thunks < endthunks) {
2612 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2613 if (thunks [2] == (guint32)pdata->target) {
2614 arm_patch (pdata->code, (guchar*)thunks);
2615 mono_arch_flush_icache (pdata->code, 4);
2616 pdata->found = 1;
2617 return 1;
2618 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
2619 /* found a free slot instead: emit thunk */
2620 /* ARMREG_IP is fine to use since this can't be an IMT call
2621 * which is indirect
2623 code = (guchar*)thunks;
2624 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
2625 if (thumb_supported)
2626 ARM_BX (code, ARMREG_IP);
2627 else
2628 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2629 thunks [2] = (guint32)pdata->target;
2630 mono_arch_flush_icache ((guchar*)thunks, 12);
2632 arm_patch (pdata->code, (guchar*)thunks);
2633 mono_arch_flush_icache (pdata->code, 4);
2634 pdata->found = 1;
2635 return 1;
2637 /* skip 12 bytes, the size of the thunk */
2638 thunks += 3;
2639 count++;
2641 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2643 return 0;
2646 static void
2647 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target)
2649 PatchData pdata;
2651 if (!domain)
2652 domain = mono_domain_get ();
2654 pdata.code = code;
2655 pdata.target = target;
2656 pdata.absolute = absolute;
2657 pdata.found = 0;
2659 mono_domain_lock (domain);
2660 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2662 if (!pdata.found) {
2663 /* this uses the first available slot */
2664 pdata.found = 2;
2665 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2667 mono_domain_unlock (domain);
2669 if (pdata.found != 1)
2670 g_print ("thunk failed for %p from %p\n", target, code);
2671 g_assert (pdata.found == 1);
2674 static void
2675 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target)
2677 guint32 *code32 = (void*)code;
2678 guint32 ins = *code32;
2679 guint32 prim = (ins >> 25) & 7;
2680 guint32 tval = GPOINTER_TO_UINT (target);
2682 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2683 if (prim == 5) { /* 101b */
2684 /* the diff starts 8 bytes from the branch opcode */
2685 gint diff = target - code - 8;
2686 gint tbits;
2687 gint tmask = 0xffffffff;
2688 if (tval & 1) { /* entering thumb mode */
2689 diff = target - 1 - code - 8;
2690 g_assert (thumb_supported);
2691 tbits = 0xf << 28; /* bl->blx bit pattern */
2692 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
2693 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
2694 if (diff & 2) {
2695 tbits |= 1 << 24;
2697 tmask = ~(1 << 24); /* clear the link bit */
2698 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
2699 } else {
2700 tbits = 0;
2702 if (diff >= 0) {
2703 if (diff <= 33554431) {
2704 diff >>= 2;
2705 ins = (ins & 0xff000000) | diff;
2706 ins &= tmask;
2707 *code32 = ins | tbits;
2708 return;
2710 } else {
2711 /* diff between 0 and -33554432 */
2712 if (diff >= -33554432) {
2713 diff >>= 2;
2714 ins = (ins & 0xff000000) | (diff & ~0xff000000);
2715 ins &= tmask;
2716 *code32 = ins | tbits;
2717 return;
2721 handle_thunk (domain, TRUE, code, target);
2722 return;
2726 * The alternative call sequences looks like this:
2728 * ldr ip, [pc] // loads the address constant
2729 * b 1f // jumps around the constant
2730 * address constant embedded in the code
2731 * 1f:
2732 * mov lr, pc
2733 * mov pc, ip
2735 * There are two cases for patching:
2736 * a) at the end of method emission: in this case code points to the start
2737 * of the call sequence
2738 * b) during runtime patching of the call site: in this case code points
2739 * to the mov pc, ip instruction
2741 * We have to handle also the thunk jump code sequence:
2743 * ldr ip, [pc]
2744 * mov pc, ip
2745 * address constant // execution never reaches here
2747 if ((ins & 0x0ffffff0) == 0x12fff10) {
2748 /* Branch and exchange: the address is constructed in a reg
2749 * We can patch BX when the code sequence is the following:
2750 * ldr ip, [pc, #0] ; 0x8
2751 * b 0xc
2752 * .word code_ptr
2753 * mov lr, pc
2754 * bx ips
2755 * */
2756 guint32 ccode [4];
2757 guint8 *emit = (guint8*)ccode;
2758 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2759 ARM_B (emit, 0);
2760 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2761 ARM_BX (emit, ARMREG_IP);
2763 /*patching from magic trampoline*/
2764 if (ins == ccode [3]) {
2765 g_assert (code32 [-4] == ccode [0]);
2766 g_assert (code32 [-3] == ccode [1]);
2767 g_assert (code32 [-1] == ccode [2]);
2768 code32 [-2] = (guint32)target;
2769 return;
2771 /*patching from JIT*/
2772 if (ins == ccode [0]) {
2773 g_assert (code32 [1] == ccode [1]);
2774 g_assert (code32 [3] == ccode [2]);
2775 g_assert (code32 [4] == ccode [3]);
2776 code32 [2] = (guint32)target;
2777 return;
2779 g_assert_not_reached ();
2780 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
2782 * ldr ip, [pc, #0]
2783 * b 0xc
2784 * .word code_ptr
2785 * blx ip
2787 guint32 ccode [4];
2788 guint8 *emit = (guint8*)ccode;
2789 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2790 ARM_B (emit, 0);
2791 ARM_BLX_REG (emit, ARMREG_IP);
2793 g_assert (code32 [-3] == ccode [0]);
2794 g_assert (code32 [-2] == ccode [1]);
2795 g_assert (code32 [0] == ccode [2]);
2797 code32 [-1] = (guint32)target;
2798 } else {
2799 guint32 ccode [4];
2800 guint32 *tmp = ccode;
2801 guint8 *emit = (guint8*)tmp;
2802 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2803 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2804 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
2805 ARM_BX (emit, ARMREG_IP);
2806 if (ins == ccode [2]) {
2807 g_assert_not_reached (); // should be -2 ...
2808 code32 [-1] = (guint32)target;
2809 return;
2811 if (ins == ccode [0]) {
2812 /* handles both thunk jump code and the far call sequence */
2813 code32 [2] = (guint32)target;
2814 return;
2816 g_assert_not_reached ();
2818 // g_print ("patched with 0x%08x\n", ins);
2821 void
2822 arm_patch (guchar *code, const guchar *target)
2824 arm_patch_general (NULL, code, target);
2828 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
2829 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
2830 * to be used with the emit macros.
2831 * Return -1 otherwise.
2834 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
2836 guint32 res, i;
2837 for (i = 0; i < 31; i+= 2) {
2838 res = (val << (32 - i)) | (val >> i);
2839 if (res & ~0xff)
2840 continue;
2841 *rot_amount = i? 32 - i: 0;
2842 return res;
2844 return -1;
2848 * Emits in code a sequence of instructions that load the value 'val'
2849 * into the dreg register. Uses at most 4 instructions.
2851 guint8*
2852 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
2854 int imm8, rot_amount;
2855 #if 0
2856 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
2857 /* skip the constant pool */
2858 ARM_B (code, 0);
2859 *(int*)code = val;
2860 code += 4;
2861 return code;
2862 #endif
2863 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
2864 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
2865 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
2866 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
2867 } else {
2868 if (v7_supported) {
2869 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
2870 if (val >> 16)
2871 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
2872 return code;
2874 if (val & 0xFF) {
2875 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
2876 if (val & 0xFF00) {
2877 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
2879 if (val & 0xFF0000) {
2880 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2882 if (val & 0xFF000000) {
2883 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2885 } else if (val & 0xFF00) {
2886 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
2887 if (val & 0xFF0000) {
2888 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2890 if (val & 0xFF000000) {
2891 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2893 } else if (val & 0xFF0000) {
2894 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
2895 if (val & 0xFF000000) {
2896 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2899 //g_assert_not_reached ();
2901 return code;
2904 gboolean
2905 mono_arm_thumb_supported (void)
2907 return thumb_supported;
2911 * emit_load_volatile_arguments:
2913 * Load volatile arguments from the stack to the original input registers.
2914 * Required before a tail call.
2916 static guint8*
2917 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
2919 MonoMethod *method = cfg->method;
2920 MonoMethodSignature *sig;
2921 MonoInst *inst;
2922 CallInfo *cinfo;
2923 guint32 i, pos;
2925 /* FIXME: Generate intermediate code instead */
2927 sig = mono_method_signature (method);
2929 /* This is the opposite of the code in emit_prolog */
2931 pos = 0;
2933 cinfo = get_call_info (NULL, sig, sig->pinvoke);
2935 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2936 ArgInfo *ainfo = &cinfo->ret;
2937 inst = cfg->vret_addr;
2938 g_assert (arm_is_imm12 (inst->inst_offset));
2939 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2941 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2942 ArgInfo *ainfo = cinfo->args + i;
2943 inst = cfg->args [pos];
2945 if (cfg->verbose_level > 2)
2946 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
2947 if (inst->opcode == OP_REGVAR) {
2948 if (ainfo->storage == RegTypeGeneral)
2949 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
2950 else if (ainfo->storage == RegTypeFP) {
2951 g_assert_not_reached ();
2952 } else if (ainfo->storage == RegTypeBase) {
2953 // FIXME:
2954 NOT_IMPLEMENTED;
2956 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
2957 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
2958 } else {
2959 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2960 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
2963 } else
2964 g_assert_not_reached ();
2965 } else {
2966 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
2967 switch (ainfo->size) {
2968 case 1:
2969 case 2:
2970 // FIXME:
2971 NOT_IMPLEMENTED;
2972 break;
2973 case 8:
2974 g_assert (arm_is_imm12 (inst->inst_offset));
2975 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2976 g_assert (arm_is_imm12 (inst->inst_offset + 4));
2977 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
2978 break;
2979 default:
2980 if (arm_is_imm12 (inst->inst_offset)) {
2981 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2982 } else {
2983 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2984 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
2986 break;
2988 } else if (ainfo->storage == RegTypeBaseGen) {
2989 // FIXME:
2990 NOT_IMPLEMENTED;
2991 } else if (ainfo->storage == RegTypeBase) {
2992 /* Nothing to do */
2993 } else if (ainfo->storage == RegTypeFP) {
2994 g_assert_not_reached ();
2995 } else if (ainfo->storage == RegTypeStructByVal) {
2996 int doffset = inst->inst_offset;
2997 int soffset = 0;
2998 int cur_reg;
2999 int size = 0;
3000 if (mono_class_from_mono_type (inst->inst_vtype))
3001 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
3002 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
3003 if (arm_is_imm12 (doffset)) {
3004 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
3005 } else {
3006 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
3007 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
3009 soffset += sizeof (gpointer);
3010 doffset += sizeof (gpointer);
3012 if (ainfo->vtsize)
3013 // FIXME:
3014 NOT_IMPLEMENTED;
3015 } else if (ainfo->storage == RegTypeStructByAddr) {
3016 } else {
3017 // FIXME:
3018 NOT_IMPLEMENTED;
3021 pos ++;
3024 g_free (cinfo);
3026 return code;
3029 #ifndef DISABLE_JIT
3031 void
3032 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
3034 MonoInst *ins;
3035 MonoCallInst *call;
3036 guint offset;
3037 guint8 *code = cfg->native_code + cfg->code_len;
3038 MonoInst *last_ins = NULL;
3039 guint last_offset = 0;
3040 int max_len, cpos;
3041 int imm8, rot_amount;
3043 /* we don't align basic blocks of loops on arm */
3045 if (cfg->verbose_level > 2)
3046 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
3048 cpos = bb->max_offset;
3050 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
3051 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
3052 //g_assert (!mono_compile_aot);
3053 //cpos += 6;
3054 //if (bb->cil_code)
3055 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
3056 /* this is not thread save, but good enough */
3057 /* fixme: howto handle overflows? */
3058 //x86_inc_mem (code, &cov->data [bb->dfn].count);
3061 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
3062 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3063 (gpointer)"mono_break");
3064 code = emit_call_seq (cfg, code);
3067 MONO_BB_FOR_EACH_INS (bb, ins) {
3068 offset = code - cfg->native_code;
3070 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3072 if (offset > (cfg->code_size - max_len - 16)) {
3073 cfg->code_size *= 2;
3074 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3075 code = cfg->native_code + offset;
3077 // if (ins->cil_code)
3078 // g_print ("cil code\n");
3079 mono_debug_record_line_number (cfg, ins, offset);
3081 switch (ins->opcode) {
3082 case OP_MEMORY_BARRIER:
3083 break;
3084 case OP_TLS_GET:
3085 #ifdef HAVE_AEABI_READ_TP
3086 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3087 (gpointer)"__aeabi_read_tp");
3088 code = emit_call_seq (cfg, code);
3090 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
3091 #else
3092 g_assert_not_reached ();
3093 #endif
3094 break;
3095 /*case OP_BIGMUL:
3096 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3097 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
3098 break;
3099 case OP_BIGMUL_UN:
3100 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3101 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
3102 break;*/
3103 case OP_STOREI1_MEMBASE_IMM:
3104 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
3105 g_assert (arm_is_imm12 (ins->inst_offset));
3106 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3107 break;
3108 case OP_STOREI2_MEMBASE_IMM:
3109 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
3110 g_assert (arm_is_imm8 (ins->inst_offset));
3111 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3112 break;
3113 case OP_STORE_MEMBASE_IMM:
3114 case OP_STOREI4_MEMBASE_IMM:
3115 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
3116 g_assert (arm_is_imm12 (ins->inst_offset));
3117 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3118 break;
3119 case OP_STOREI1_MEMBASE_REG:
3120 g_assert (arm_is_imm12 (ins->inst_offset));
3121 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3122 break;
3123 case OP_STOREI2_MEMBASE_REG:
3124 g_assert (arm_is_imm8 (ins->inst_offset));
3125 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3126 break;
3127 case OP_STORE_MEMBASE_REG:
3128 case OP_STOREI4_MEMBASE_REG:
3129 /* this case is special, since it happens for spill code after lowering has been called */
3130 if (arm_is_imm12 (ins->inst_offset)) {
3131 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3132 } else {
3133 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3134 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
3136 break;
3137 case OP_STOREI1_MEMINDEX:
3138 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3139 break;
3140 case OP_STOREI2_MEMINDEX:
3141 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3142 break;
3143 case OP_STORE_MEMINDEX:
3144 case OP_STOREI4_MEMINDEX:
3145 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3146 break;
3147 case OP_LOADU4_MEM:
3148 g_assert_not_reached ();
3149 break;
3150 case OP_LOAD_MEMINDEX:
3151 case OP_LOADI4_MEMINDEX:
3152 case OP_LOADU4_MEMINDEX:
3153 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3154 break;
3155 case OP_LOADI1_MEMINDEX:
3156 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3157 break;
3158 case OP_LOADU1_MEMINDEX:
3159 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3160 break;
3161 case OP_LOADI2_MEMINDEX:
3162 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3163 break;
3164 case OP_LOADU2_MEMINDEX:
3165 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3166 break;
3167 case OP_LOAD_MEMBASE:
3168 case OP_LOADI4_MEMBASE:
3169 case OP_LOADU4_MEMBASE:
3170 /* this case is special, since it happens for spill code after lowering has been called */
3171 if (arm_is_imm12 (ins->inst_offset)) {
3172 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3173 } else {
3174 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3175 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
3177 break;
3178 case OP_LOADI1_MEMBASE:
3179 g_assert (arm_is_imm8 (ins->inst_offset));
3180 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3181 break;
3182 case OP_LOADU1_MEMBASE:
3183 g_assert (arm_is_imm12 (ins->inst_offset));
3184 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3185 break;
3186 case OP_LOADU2_MEMBASE:
3187 g_assert (arm_is_imm8 (ins->inst_offset));
3188 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3189 break;
3190 case OP_LOADI2_MEMBASE:
3191 g_assert (arm_is_imm8 (ins->inst_offset));
3192 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3193 break;
3194 case OP_ICONV_TO_I1:
3195 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
3196 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
3197 break;
3198 case OP_ICONV_TO_I2:
3199 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3200 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
3201 break;
3202 case OP_ICONV_TO_U1:
3203 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
3204 break;
3205 case OP_ICONV_TO_U2:
3206 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3207 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
3208 break;
3209 case OP_COMPARE:
3210 case OP_ICOMPARE:
3211 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
3212 break;
3213 case OP_COMPARE_IMM:
3214 case OP_ICOMPARE_IMM:
3215 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3216 g_assert (imm8 >= 0);
3217 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
3218 break;
3219 case OP_BREAK:
3221 * gdb does not like encountering the hw breakpoint ins in the debugged code.
3222 * So instead of emitting a trap, we emit a call a C function and place a
3223 * breakpoint there.
3225 //*(int*)code = 0xef9f0001;
3226 //code += 4;
3227 //ARM_DBRK (code);
3228 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3229 (gpointer)"mono_break");
3230 code = emit_call_seq (cfg, code);
3231 break;
3232 case OP_RELAXED_NOP:
3233 ARM_NOP (code);
3234 break;
3235 case OP_NOP:
3236 case OP_DUMMY_USE:
3237 case OP_DUMMY_STORE:
3238 case OP_NOT_REACHED:
3239 case OP_NOT_NULL:
3240 break;
3241 case OP_SEQ_POINT: {
3242 int i;
3243 MonoInst *info_var = cfg->arch.seq_point_info_var;
3244 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
3245 MonoInst *var;
3246 int dreg = ARMREG_LR;
3249 * For AOT, we use one got slot per method, which will point to a
3250 * SeqPointInfo structure, containing all the information required
3251 * by the code below.
3253 if (cfg->compile_aot) {
3254 g_assert (info_var);
3255 g_assert (info_var->opcode == OP_REGOFFSET);
3256 g_assert (arm_is_imm12 (info_var->inst_offset));
3260 * Read from the single stepping trigger page. This will cause a
3261 * SIGSEGV when single stepping is enabled.
3262 * We do this _before_ the breakpoint, so single stepping after
3263 * a breakpoint is hit will step to the next IL offset.
3265 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
3267 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
3268 if (cfg->compile_aot) {
3269 /* Load the trigger page addr from the variable initialized in the prolog */
3270 var = ss_trigger_page_var;
3271 g_assert (var);
3272 g_assert (var->opcode == OP_REGOFFSET);
3273 g_assert (arm_is_imm12 (var->inst_offset));
3274 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
3275 } else {
3276 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3277 ARM_B (code, 0);
3278 *(int*)code = (int)ss_trigger_page;
3279 code += 4;
3281 ARM_LDR_IMM (code, dreg, dreg, 0);
3284 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
3286 if (cfg->compile_aot) {
3287 guint32 offset = code - cfg->native_code;
3288 guint32 val;
3290 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
3291 /* Add the offset */
3292 val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
3293 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
3295 * Have to emit nops to keep the difference between the offset
3296 * stored in seq_points and breakpoint instruction constant,
3297 * mono_arch_get_ip_for_breakpoint () depends on this.
3299 if (val & 0xFF00)
3300 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3301 else
3302 ARM_NOP (code);
3303 if (val & 0xFF0000)
3304 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3305 else
3306 ARM_NOP (code);
3307 g_assert (!(val & 0xFF000000));
3308 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
3309 ARM_LDR_IMM (code, dreg, dreg, 0);
3311 /* What is faster, a branch or a load ? */
3312 ARM_CMP_REG_IMM (code, dreg, 0, 0);
3313 /* The breakpoint instruction */
3314 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
3315 } else {
3317 * A placeholder for a possible breakpoint inserted by
3318 * mono_arch_set_breakpoint ().
3320 for (i = 0; i < 4; ++i)
3321 ARM_NOP (code);
3323 break;
3325 case OP_ADDCC:
3326 case OP_IADDCC:
3327 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3328 break;
3329 case OP_IADD:
3330 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3331 break;
3332 case OP_ADC:
3333 case OP_IADC:
3334 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3335 break;
3336 case OP_ADDCC_IMM:
3337 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3338 g_assert (imm8 >= 0);
3339 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3340 break;
3341 case OP_ADD_IMM:
3342 case OP_IADD_IMM:
3343 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3344 g_assert (imm8 >= 0);
3345 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3346 break;
3347 case OP_ADC_IMM:
3348 case OP_IADC_IMM:
3349 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3350 g_assert (imm8 >= 0);
3351 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3352 break;
3353 case OP_IADD_OVF:
3354 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3355 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3356 break;
3357 case OP_IADD_OVF_UN:
3358 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3359 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3360 break;
3361 case OP_ISUB_OVF:
3362 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3363 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3364 break;
3365 case OP_ISUB_OVF_UN:
3366 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3367 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3368 break;
3369 case OP_ADD_OVF_CARRY:
3370 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3371 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3372 break;
3373 case OP_ADD_OVF_UN_CARRY:
3374 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3375 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3376 break;
3377 case OP_SUB_OVF_CARRY:
3378 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3379 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3380 break;
3381 case OP_SUB_OVF_UN_CARRY:
3382 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3383 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3384 break;
3385 case OP_SUBCC:
3386 case OP_ISUBCC:
3387 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3388 break;
3389 case OP_SUBCC_IMM:
3390 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3391 g_assert (imm8 >= 0);
3392 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3393 break;
3394 case OP_ISUB:
3395 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3396 break;
3397 case OP_SBB:
3398 case OP_ISBB:
3399 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3400 break;
3401 case OP_SUB_IMM:
3402 case OP_ISUB_IMM:
3403 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3404 g_assert (imm8 >= 0);
3405 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3406 break;
3407 case OP_SBB_IMM:
3408 case OP_ISBB_IMM:
3409 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3410 g_assert (imm8 >= 0);
3411 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3412 break;
3413 case OP_ARM_RSBS_IMM:
3414 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3415 g_assert (imm8 >= 0);
3416 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3417 break;
3418 case OP_ARM_RSC_IMM:
3419 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3420 g_assert (imm8 >= 0);
3421 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3422 break;
3423 case OP_IAND:
3424 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3425 break;
3426 case OP_AND_IMM:
3427 case OP_IAND_IMM:
3428 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3429 g_assert (imm8 >= 0);
3430 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3431 break;
3432 case OP_IDIV:
3433 case OP_IDIV_UN:
3434 case OP_DIV_IMM:
3435 case OP_IREM:
3436 case OP_IREM_UN:
3437 case OP_REM_IMM:
3438 /* crappy ARM arch doesn't have a DIV instruction */
3439 g_assert_not_reached ();
3440 case OP_IOR:
3441 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3442 break;
3443 case OP_OR_IMM:
3444 case OP_IOR_IMM:
3445 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3446 g_assert (imm8 >= 0);
3447 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3448 break;
3449 case OP_IXOR:
3450 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3451 break;
3452 case OP_XOR_IMM:
3453 case OP_IXOR_IMM:
3454 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3455 g_assert (imm8 >= 0);
3456 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3457 break;
3458 case OP_ISHL:
3459 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3460 break;
3461 case OP_SHL_IMM:
3462 case OP_ISHL_IMM:
3463 if (ins->inst_imm)
3464 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3465 else if (ins->dreg != ins->sreg1)
3466 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3467 break;
3468 case OP_ISHR:
3469 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3470 break;
3471 case OP_SHR_IMM:
3472 case OP_ISHR_IMM:
3473 if (ins->inst_imm)
3474 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3475 else if (ins->dreg != ins->sreg1)
3476 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3477 break;
3478 case OP_SHR_UN_IMM:
3479 case OP_ISHR_UN_IMM:
3480 if (ins->inst_imm)
3481 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3482 else if (ins->dreg != ins->sreg1)
3483 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3484 break;
3485 case OP_ISHR_UN:
3486 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3487 break;
3488 case OP_INOT:
3489 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
3490 break;
3491 case OP_INEG:
3492 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
3493 break;
3494 case OP_IMUL:
3495 if (ins->dreg == ins->sreg2)
3496 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3497 else
3498 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
3499 break;
3500 case OP_MUL_IMM:
3501 g_assert_not_reached ();
3502 break;
3503 case OP_IMUL_OVF:
3504 /* FIXME: handle ovf/ sreg2 != dreg */
3505 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3506 /* FIXME: MUL doesn't set the C/O flags on ARM */
3507 break;
3508 case OP_IMUL_OVF_UN:
3509 /* FIXME: handle ovf/ sreg2 != dreg */
3510 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3511 /* FIXME: MUL doesn't set the C/O flags on ARM */
3512 break;
3513 case OP_ICONST:
3514 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
3515 break;
3516 case OP_AOTCONST:
3517 /* Load the GOT offset */
3518 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3519 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
3520 ARM_B (code, 0);
3521 *(gpointer*)code = NULL;
3522 code += 4;
3523 /* Load the value from the GOT */
3524 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
3525 break;
3526 case OP_ICONV_TO_I4:
3527 case OP_ICONV_TO_U4:
3528 case OP_MOVE:
3529 if (ins->dreg != ins->sreg1)
3530 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3531 break;
3532 case OP_SETLRET: {
3533 int saved = ins->sreg2;
3534 if (ins->sreg2 == ARM_LSW_REG) {
3535 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
3536 saved = ARMREG_LR;
3538 if (ins->sreg1 != ARM_LSW_REG)
3539 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
3540 if (saved != ARM_MSW_REG)
3541 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
3542 break;
3544 case OP_FMOVE:
3545 #ifdef ARM_FPU_FPA
3546 ARM_MVFD (code, ins->dreg, ins->sreg1);
3547 #elif defined(ARM_FPU_VFP)
3548 ARM_CPYD (code, ins->dreg, ins->sreg1);
3549 #endif
3550 break;
3551 case OP_FCONV_TO_R4:
3552 #ifdef ARM_FPU_FPA
3553 ARM_MVFS (code, ins->dreg, ins->sreg1);
3554 #elif defined(ARM_FPU_VFP)
3555 ARM_CVTD (code, ins->dreg, ins->sreg1);
3556 ARM_CVTS (code, ins->dreg, ins->dreg);
3557 #endif
3558 break;
3559 case OP_JMP:
3561 * Keep in sync with mono_arch_emit_epilog
3563 g_assert (!cfg->method->save_lmf);
3565 code = emit_load_volatile_arguments (cfg, code);
3567 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
3568 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
3569 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
3570 if (cfg->compile_aot) {
3571 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3572 ARM_B (code, 0);
3573 *(gpointer*)code = NULL;
3574 code += 4;
3575 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
3576 } else {
3577 ARM_B (code, 0);
3579 break;
3580 case OP_CHECK_THIS:
3581 /* ensure ins->sreg1 is not NULL */
3582 ARM_LDR_IMM (code, ARMREG_LR, ins->sreg1, 0);
3583 break;
3584 case OP_ARGLIST: {
3585 g_assert (cfg->sig_cookie < 128);
3586 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
3587 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
3588 break;
3590 case OP_FCALL:
3591 case OP_LCALL:
3592 case OP_VCALL:
3593 case OP_VCALL2:
3594 case OP_VOIDCALL:
3595 case OP_CALL:
3596 call = (MonoCallInst*)ins;
3597 if (ins->flags & MONO_INST_HAS_METHOD)
3598 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
3599 else
3600 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
3601 code = emit_call_seq (cfg, code);
3602 code = emit_move_return_value (cfg, ins, code);
3603 break;
3604 case OP_FCALL_REG:
3605 case OP_LCALL_REG:
3606 case OP_VCALL_REG:
3607 case OP_VCALL2_REG:
3608 case OP_VOIDCALL_REG:
3609 case OP_CALL_REG:
3610 code = emit_call_reg (code, ins->sreg1);
3611 code = emit_move_return_value (cfg, ins, code);
3612 break;
3613 case OP_FCALL_MEMBASE:
3614 case OP_LCALL_MEMBASE:
3615 case OP_VCALL_MEMBASE:
3616 case OP_VCALL2_MEMBASE:
3617 case OP_VOIDCALL_MEMBASE:
3618 case OP_CALL_MEMBASE:
3619 g_assert (arm_is_imm12 (ins->inst_offset));
3620 g_assert (ins->sreg1 != ARMREG_LR);
3621 call = (MonoCallInst*)ins;
3622 if (call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3623 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4);
3624 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3626 * We can't embed the method in the code stream in PIC code, or
3627 * in gshared code.
3628 * Instead, we put it in V5 in code emitted by
3629 * mono_arch_emit_imt_argument (), and embed NULL here to
3630 * signal the IMT thunk that the value is in V5.
3632 if (call->dynamic_imt_arg)
3633 *((gpointer*)code) = NULL;
3634 else
3635 *((gpointer*)code) = (gpointer)call->method;
3636 code += 4;
3637 } else {
3638 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3639 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3641 code = emit_move_return_value (cfg, ins, code);
3642 break;
3643 case OP_LOCALLOC: {
3644 /* keep alignment */
3645 int alloca_waste = cfg->param_area;
3646 alloca_waste += 7;
3647 alloca_waste &= ~7;
3648 /* round the size to 8 bytes */
3649 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
3650 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
3651 if (alloca_waste)
3652 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
3653 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
3654 /* memzero the area: dreg holds the size, sp is the pointer */
3655 if (ins->flags & MONO_INST_INIT) {
3656 guint8 *start_loop, *branch_to_cond;
3657 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
3658 branch_to_cond = code;
3659 ARM_B (code, 0);
3660 start_loop = code;
3661 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
3662 arm_patch (branch_to_cond, code);
3663 /* decrement by 4 and set flags */
3664 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, 4);
3665 ARM_B_COND (code, ARMCOND_GE, 0);
3666 arm_patch (code - 4, start_loop);
3668 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
3669 break;
3671 case OP_DYN_CALL: {
3672 int i;
3673 MonoInst *var = cfg->dyn_call_var;
3675 g_assert (var->opcode == OP_REGOFFSET);
3676 g_assert (arm_is_imm12 (var->inst_offset));
3678 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
3679 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
3680 /* ip = ftn */
3681 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
3683 /* Save args buffer */
3684 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
3686 /* Set stack slots using R0 as scratch reg */
3687 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
3688 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
3689 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (gpointer));
3690 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (gpointer));
3693 /* Set argument registers */
3694 for (i = 0; i < PARAM_REGS; ++i)
3695 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (gpointer));
3697 /* Make the call */
3698 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3699 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3701 /* Save result */
3702 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
3703 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res));
3704 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res2));
3705 break;
3707 case OP_THROW: {
3708 if (ins->sreg1 != ARMREG_R0)
3709 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3710 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3711 (gpointer)"mono_arch_throw_exception");
3712 code = emit_call_seq (cfg, code);
3713 break;
3715 case OP_RETHROW: {
3716 if (ins->sreg1 != ARMREG_R0)
3717 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3718 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3719 (gpointer)"mono_arch_rethrow_exception");
3720 code = emit_call_seq (cfg, code);
3721 break;
3723 case OP_START_HANDLER: {
3724 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3726 if (arm_is_imm12 (spvar->inst_offset)) {
3727 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
3728 } else {
3729 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3730 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
3732 break;
3734 case OP_ENDFILTER: {
3735 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3737 if (ins->sreg1 != ARMREG_R0)
3738 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3739 if (arm_is_imm12 (spvar->inst_offset)) {
3740 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3741 } else {
3742 g_assert (ARMREG_IP != spvar->inst_basereg);
3743 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3744 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3746 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3747 break;
3749 case OP_ENDFINALLY: {
3750 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3752 if (arm_is_imm12 (spvar->inst_offset)) {
3753 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3754 } else {
3755 g_assert (ARMREG_IP != spvar->inst_basereg);
3756 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3757 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3759 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3760 break;
3762 case OP_CALL_HANDLER:
3763 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3764 ARM_BL (code, 0);
3765 break;
3766 case OP_LABEL:
3767 ins->inst_c0 = code - cfg->native_code;
3768 break;
3769 case OP_BR:
3770 /*if (ins->inst_target_bb->native_offset) {
3771 ARM_B (code, 0);
3772 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3773 } else*/ {
3774 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3775 ARM_B (code, 0);
3777 break;
3778 case OP_BR_REG:
3779 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
3780 break;
3781 case OP_SWITCH:
3783 * In the normal case we have:
3784 * ldr pc, [pc, ins->sreg1 << 2]
3785 * nop
3786 * If aot, we have:
3787 * ldr lr, [pc, ins->sreg1 << 2]
3788 * add pc, pc, lr
3789 * After follows the data.
3790 * FIXME: add aot support.
3792 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
3793 max_len += 4 * GPOINTER_TO_INT (ins->klass);
3794 if (offset > (cfg->code_size - max_len - 16)) {
3795 cfg->code_size += max_len;
3796 cfg->code_size *= 2;
3797 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3798 code = cfg->native_code + offset;
3800 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
3801 ARM_NOP (code);
3802 code += 4 * GPOINTER_TO_INT (ins->klass);
3803 break;
3804 case OP_CEQ:
3805 case OP_ICEQ:
3806 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3807 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3808 break;
3809 case OP_CLT:
3810 case OP_ICLT:
3811 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3812 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
3813 break;
3814 case OP_CLT_UN:
3815 case OP_ICLT_UN:
3816 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3817 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
3818 break;
3819 case OP_CGT:
3820 case OP_ICGT:
3821 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3822 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
3823 break;
3824 case OP_CGT_UN:
3825 case OP_ICGT_UN:
3826 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3827 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
3828 break;
3829 case OP_COND_EXC_EQ:
3830 case OP_COND_EXC_NE_UN:
3831 case OP_COND_EXC_LT:
3832 case OP_COND_EXC_LT_UN:
3833 case OP_COND_EXC_GT:
3834 case OP_COND_EXC_GT_UN:
3835 case OP_COND_EXC_GE:
3836 case OP_COND_EXC_GE_UN:
3837 case OP_COND_EXC_LE:
3838 case OP_COND_EXC_LE_UN:
3839 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
3840 break;
3841 case OP_COND_EXC_IEQ:
3842 case OP_COND_EXC_INE_UN:
3843 case OP_COND_EXC_ILT:
3844 case OP_COND_EXC_ILT_UN:
3845 case OP_COND_EXC_IGT:
3846 case OP_COND_EXC_IGT_UN:
3847 case OP_COND_EXC_IGE:
3848 case OP_COND_EXC_IGE_UN:
3849 case OP_COND_EXC_ILE:
3850 case OP_COND_EXC_ILE_UN:
3851 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
3852 break;
3853 case OP_COND_EXC_C:
3854 case OP_COND_EXC_IC:
3855 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
3856 break;
3857 case OP_COND_EXC_OV:
3858 case OP_COND_EXC_IOV:
3859 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
3860 break;
3861 case OP_COND_EXC_NC:
3862 case OP_COND_EXC_INC:
3863 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
3864 break;
3865 case OP_COND_EXC_NO:
3866 case OP_COND_EXC_INO:
3867 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
3868 break;
3869 case OP_IBEQ:
3870 case OP_IBNE_UN:
3871 case OP_IBLT:
3872 case OP_IBLT_UN:
3873 case OP_IBGT:
3874 case OP_IBGT_UN:
3875 case OP_IBGE:
3876 case OP_IBGE_UN:
3877 case OP_IBLE:
3878 case OP_IBLE_UN:
3879 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
3880 break;
3882 /* floating point opcodes */
3883 #ifdef ARM_FPU_FPA
3884 case OP_R8CONST:
3885 if (cfg->compile_aot) {
3886 ARM_LDFD (code, ins->dreg, ARMREG_PC, 0);
3887 ARM_B (code, 1);
3888 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3889 code += 4;
3890 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3891 code += 4;
3892 } else {
3893 /* FIXME: we can optimize the imm load by dealing with part of
3894 * the displacement in LDFD (aligning to 512).
3896 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3897 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3899 break;
3900 case OP_R4CONST:
3901 if (cfg->compile_aot) {
3902 ARM_LDFS (code, ins->dreg, ARMREG_PC, 0);
3903 ARM_B (code, 0);
3904 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3905 code += 4;
3906 } else {
3907 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3908 ARM_LDFS (code, ins->dreg, ARMREG_LR, 0);
3910 break;
3911 case OP_STORER8_MEMBASE_REG:
3912 /* This is generated by the local regalloc pass which runs after the lowering pass */
3913 if (!arm_is_fpimm8 (ins->inst_offset)) {
3914 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3915 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
3916 ARM_STFD (code, ins->sreg1, ARMREG_LR, 0);
3917 } else {
3918 ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3920 break;
3921 case OP_LOADR8_MEMBASE:
3922 /* This is generated by the local regalloc pass which runs after the lowering pass */
3923 if (!arm_is_fpimm8 (ins->inst_offset)) {
3924 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3925 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
3926 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3927 } else {
3928 ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3930 break;
3931 case OP_STORER4_MEMBASE_REG:
3932 g_assert (arm_is_fpimm8 (ins->inst_offset));
3933 ARM_STFS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3934 break;
3935 case OP_LOADR4_MEMBASE:
3936 g_assert (arm_is_fpimm8 (ins->inst_offset));
3937 ARM_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3938 break;
3939 case OP_ICONV_TO_R_UN: {
3940 int tmpreg;
3941 tmpreg = ins->dreg == 0? 1: 0;
3942 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3943 ARM_FLTD (code, ins->dreg, ins->sreg1);
3944 ARM_B_COND (code, ARMCOND_GE, 8);
3945 /* save the temp register */
3946 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3947 ARM_STFD (code, tmpreg, ARMREG_SP, 0);
3948 ARM_LDFD (code, tmpreg, ARMREG_PC, 12);
3949 ARM_FPA_ADFD (code, ins->dreg, ins->dreg, tmpreg);
3950 ARM_LDFD (code, tmpreg, ARMREG_SP, 0);
3951 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3952 /* skip the constant pool */
3953 ARM_B (code, 8);
3954 code += 4;
3955 *(int*)code = 0x41f00000;
3956 code += 4;
3957 *(int*)code = 0;
3958 code += 4;
3959 /* FIXME: adjust:
3960 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
3961 * adfltd fdest, fdest, ftemp
3963 break;
3965 case OP_ICONV_TO_R4:
3966 ARM_FLTS (code, ins->dreg, ins->sreg1);
3967 break;
3968 case OP_ICONV_TO_R8:
3969 ARM_FLTD (code, ins->dreg, ins->sreg1);
3970 break;
3972 #elif defined(ARM_FPU_VFP)
3974 case OP_R8CONST:
3975 if (cfg->compile_aot) {
3976 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
3977 ARM_B (code, 1);
3978 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3979 code += 4;
3980 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3981 code += 4;
3982 } else {
3983 /* FIXME: we can optimize the imm load by dealing with part of
3984 * the displacement in LDFD (aligning to 512).
3986 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3987 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
3989 break;
3990 case OP_R4CONST:
3991 if (cfg->compile_aot) {
3992 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
3993 ARM_B (code, 0);
3994 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3995 code += 4;
3996 ARM_CVTS (code, ins->dreg, ins->dreg);
3997 } else {
3998 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3999 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
4000 ARM_CVTS (code, ins->dreg, ins->dreg);
4002 break;
4003 case OP_STORER8_MEMBASE_REG:
4004 /* This is generated by the local regalloc pass which runs after the lowering pass */
4005 if (!arm_is_fpimm8 (ins->inst_offset)) {
4006 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4007 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
4008 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
4009 } else {
4010 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4012 break;
4013 case OP_LOADR8_MEMBASE:
4014 /* This is generated by the local regalloc pass which runs after the lowering pass */
4015 if (!arm_is_fpimm8 (ins->inst_offset)) {
4016 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4017 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
4018 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4019 } else {
4020 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4022 break;
4023 case OP_STORER4_MEMBASE_REG:
4024 g_assert (arm_is_fpimm8 (ins->inst_offset));
4025 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
4026 ARM_FSTS (code, ARM_VFP_F0, ins->inst_destbasereg, ins->inst_offset);
4027 break;
4028 case OP_LOADR4_MEMBASE:
4029 g_assert (arm_is_fpimm8 (ins->inst_offset));
4030 ARM_FLDS (code, ARM_VFP_F0, ins->inst_basereg, ins->inst_offset);
4031 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4032 break;
4033 case OP_ICONV_TO_R_UN: {
4034 g_assert_not_reached ();
4035 break;
4037 case OP_ICONV_TO_R4:
4038 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
4039 ARM_FSITOS (code, ARM_VFP_F0, ARM_VFP_F0);
4040 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4041 break;
4042 case OP_ICONV_TO_R8:
4043 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
4044 ARM_FSITOD (code, ins->dreg, ARM_VFP_F0);
4045 break;
4047 case OP_SETFRET:
4048 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
4049 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
4050 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
4051 } else {
4052 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
4054 break;
4056 #endif
4058 case OP_FCONV_TO_I1:
4059 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
4060 break;
4061 case OP_FCONV_TO_U1:
4062 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
4063 break;
4064 case OP_FCONV_TO_I2:
4065 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
4066 break;
4067 case OP_FCONV_TO_U2:
4068 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
4069 break;
4070 case OP_FCONV_TO_I4:
4071 case OP_FCONV_TO_I:
4072 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
4073 break;
4074 case OP_FCONV_TO_U4:
4075 case OP_FCONV_TO_U:
4076 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
4077 break;
4078 case OP_FCONV_TO_I8:
4079 case OP_FCONV_TO_U8:
4080 g_assert_not_reached ();
4081 /* Implemented as helper calls */
4082 break;
4083 case OP_LCONV_TO_R_UN:
4084 g_assert_not_reached ();
4085 /* Implemented as helper calls */
4086 break;
4087 case OP_LCONV_TO_OVF_I4_2: {
4088 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
4090 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
4093 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
4094 high_bit_not_set = code;
4095 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
4097 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
4098 valid_negative = code;
4099 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
4100 invalid_negative = code;
4101 ARM_B_COND (code, ARMCOND_AL, 0);
4103 arm_patch (high_bit_not_set, code);
4105 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
4106 valid_positive = code;
4107 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
4109 arm_patch (invalid_negative, code);
4110 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
4112 arm_patch (valid_negative, code);
4113 arm_patch (valid_positive, code);
4115 if (ins->dreg != ins->sreg1)
4116 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4117 break;
4119 #ifdef ARM_FPU_FPA
4120 case OP_FADD:
4121 ARM_FPA_ADFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4122 break;
4123 case OP_FSUB:
4124 ARM_FPA_SUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4125 break;
4126 case OP_FMUL:
4127 ARM_FPA_MUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4128 break;
4129 case OP_FDIV:
4130 ARM_FPA_DVFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4131 break;
4132 case OP_FNEG:
4133 ARM_MNFD (code, ins->dreg, ins->sreg1);
4134 break;
4135 #elif defined(ARM_FPU_VFP)
4136 case OP_FADD:
4137 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
4138 break;
4139 case OP_FSUB:
4140 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
4141 break;
4142 case OP_FMUL:
4143 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
4144 break;
4145 case OP_FDIV:
4146 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
4147 break;
4148 case OP_FNEG:
4149 ARM_NEGD (code, ins->dreg, ins->sreg1);
4150 break;
4151 #endif
4152 case OP_FREM:
4153 /* emulated */
4154 g_assert_not_reached ();
4155 break;
4156 case OP_FCOMPARE:
4157 #ifdef ARM_FPU_FPA
4158 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4159 #elif defined(ARM_FPU_VFP)
4160 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4161 ARM_FMSTAT (code);
4162 #endif
4163 break;
4164 case OP_FCEQ:
4165 #ifdef ARM_FPU_FPA
4166 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4167 #elif defined(ARM_FPU_VFP)
4168 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4169 ARM_FMSTAT (code);
4170 #endif
4171 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
4172 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
4173 break;
4174 case OP_FCLT:
4175 #ifdef ARM_FPU_FPA
4176 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4177 #elif defined(ARM_FPU_VFP)
4178 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4179 ARM_FMSTAT (code);
4180 #endif
4181 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4182 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4183 break;
4184 case OP_FCLT_UN:
4185 #ifdef ARM_FPU_FPA
4186 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4187 #elif defined(ARM_FPU_VFP)
4188 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4189 ARM_FMSTAT (code);
4190 #endif
4191 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4192 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4193 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4194 break;
4195 case OP_FCGT:
4196 /* swapped */
4197 #ifdef ARM_FPU_FPA
4198 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4199 #elif defined(ARM_FPU_VFP)
4200 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4201 ARM_FMSTAT (code);
4202 #endif
4203 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4204 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4205 break;
4206 case OP_FCGT_UN:
4207 /* swapped */
4208 #ifdef ARM_FPU_FPA
4209 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4210 #elif defined(ARM_FPU_VFP)
4211 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4212 ARM_FMSTAT (code);
4213 #endif
4214 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4215 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4216 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4217 break;
4218 /* ARM FPA flags table:
4219 * N Less than ARMCOND_MI
4220 * Z Equal ARMCOND_EQ
4221 * C Greater Than or Equal ARMCOND_CS
4222 * V Unordered ARMCOND_VS
4224 case OP_FBEQ:
4225 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
4226 break;
4227 case OP_FBNE_UN:
4228 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
4229 break;
4230 case OP_FBLT:
4231 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4232 break;
4233 case OP_FBLT_UN:
4234 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4235 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4236 break;
4237 case OP_FBGT:
4238 case OP_FBGT_UN:
4239 case OP_FBLE:
4240 case OP_FBLE_UN:
4241 g_assert_not_reached ();
4242 break;
4243 case OP_FBGE:
4244 #ifdef ARM_FPU_VFP
4245 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4246 #else
4247 /* FPA requires EQ even thou the docs suggests that just CS is enough */
4248 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
4249 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
4250 #endif
4251 break;
4252 case OP_FBGE_UN:
4253 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4254 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4255 break;
4257 case OP_CKFINITE: {
4258 #ifdef ARM_FPU_FPA
4259 if (ins->dreg != ins->sreg1)
4260 ARM_MVFD (code, ins->dreg, ins->sreg1);
4261 #elif defined(ARM_FPU_VFP)
4262 ARM_ABSD (code, ARM_VFP_D1, ins->sreg1);
4263 ARM_FLDD (code, ARM_VFP_D0, ARMREG_PC, 0);
4264 ARM_B (code, 1);
4265 *(guint32*)code = 0xffffffff;
4266 code += 4;
4267 *(guint32*)code = 0x7fefffff;
4268 code += 4;
4269 ARM_CMPD (code, ARM_VFP_D1, ARM_VFP_D0);
4270 ARM_FMSTAT (code);
4271 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
4272 ARM_CMPD (code, ins->sreg1, ins->sreg1);
4273 ARM_FMSTAT (code);
4274 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
4276 ARM_CPYD (code, ins->dreg, ins->sreg1);
4277 #endif
4278 break;
4280 default:
4281 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4282 g_assert_not_reached ();
4285 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
4286 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4287 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
4288 g_assert_not_reached ();
4291 cpos += max_len;
4293 last_ins = ins;
4294 last_offset = offset;
4297 cfg->code_len = code - cfg->native_code;
4300 #endif /* DISABLE_JIT */
4302 #ifdef HAVE_AEABI_READ_TP
4303 void __aeabi_read_tp (void);
4304 #endif
4306 void
4307 mono_arch_register_lowlevel_calls (void)
4309 /* The signature doesn't matter */
4310 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
4311 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
4313 #ifdef HAVE_AEABI_READ_TP
4314 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
4315 #endif
4318 #define patch_lis_ori(ip,val) do {\
4319 guint16 *__lis_ori = (guint16*)(ip); \
4320 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
4321 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
4322 } while (0)
4324 void
4325 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4327 MonoJumpInfo *patch_info;
4328 gboolean compile_aot = !run_cctors;
4330 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4331 unsigned char *ip = patch_info->ip.i + code;
4332 const unsigned char *target;
4334 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
4335 gpointer *jt = (gpointer*)(ip + 8);
4336 int i;
4337 /* jt is the inlined jump table, 2 instructions after ip
4338 * In the normal case we store the absolute addresses,
4339 * otherwise the displacements.
4341 for (i = 0; i < patch_info->data.table->table_size; i++)
4342 jt [i] = code + (int)patch_info->data.table->table [i];
4343 continue;
4345 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4347 if (compile_aot) {
4348 switch (patch_info->type) {
4349 case MONO_PATCH_INFO_BB:
4350 case MONO_PATCH_INFO_LABEL:
4351 break;
4352 default:
4353 /* No need to patch these */
4354 continue;
4358 switch (patch_info->type) {
4359 case MONO_PATCH_INFO_IP:
4360 g_assert_not_reached ();
4361 patch_lis_ori (ip, ip);
4362 continue;
4363 case MONO_PATCH_INFO_METHOD_REL:
4364 g_assert_not_reached ();
4365 *((gpointer *)(ip)) = code + patch_info->data.offset;
4366 continue;
4367 case MONO_PATCH_INFO_METHODCONST:
4368 case MONO_PATCH_INFO_CLASS:
4369 case MONO_PATCH_INFO_IMAGE:
4370 case MONO_PATCH_INFO_FIELD:
4371 case MONO_PATCH_INFO_VTABLE:
4372 case MONO_PATCH_INFO_IID:
4373 case MONO_PATCH_INFO_SFLDA:
4374 case MONO_PATCH_INFO_LDSTR:
4375 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
4376 case MONO_PATCH_INFO_LDTOKEN:
4377 g_assert_not_reached ();
4378 /* from OP_AOTCONST : lis + ori */
4379 patch_lis_ori (ip, target);
4380 continue;
4381 case MONO_PATCH_INFO_R4:
4382 case MONO_PATCH_INFO_R8:
4383 g_assert_not_reached ();
4384 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
4385 continue;
4386 case MONO_PATCH_INFO_EXC_NAME:
4387 g_assert_not_reached ();
4388 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
4389 continue;
4390 case MONO_PATCH_INFO_NONE:
4391 case MONO_PATCH_INFO_BB_OVF:
4392 case MONO_PATCH_INFO_EXC_OVF:
4393 /* everything is dealt with at epilog output time */
4394 continue;
4395 default:
4396 break;
4398 arm_patch_general (domain, ip, target);
4403 * Stack frame layout:
4405 * ------------------- fp
4406 * MonoLMF structure or saved registers
4407 * -------------------
4408 * locals
4409 * -------------------
4410 * spilled regs
4411 * -------------------
4412 * optional 8 bytes for tracing
4413 * -------------------
4414 * param area size is cfg->param_area
4415 * ------------------- sp
4417 guint8 *
4418 mono_arch_emit_prolog (MonoCompile *cfg)
4420 MonoMethod *method = cfg->method;
4421 MonoBasicBlock *bb;
4422 MonoMethodSignature *sig;
4423 MonoInst *inst;
4424 int alloc_size, pos, max_offset, i, rot_amount;
4425 guint8 *code;
4426 CallInfo *cinfo;
4427 int tracing = 0;
4428 int lmf_offset = 0;
4429 int prev_sp_offset, reg_offset;
4431 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4432 tracing = 1;
4434 sig = mono_method_signature (method);
4435 cfg->code_size = 256 + sig->param_count * 20;
4436 code = cfg->native_code = g_malloc (cfg->code_size);
4438 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
4440 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
4442 alloc_size = cfg->stack_offset;
4443 pos = 0;
4445 if (!method->save_lmf) {
4446 /* We save SP by storing it into IP and saving IP */
4447 ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
4448 prev_sp_offset = 8; /* ip and lr */
4449 for (i = 0; i < 16; ++i) {
4450 if (cfg->used_int_regs & (1 << i))
4451 prev_sp_offset += 4;
4453 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4454 reg_offset = 0;
4455 for (i = 0; i < 16; ++i) {
4456 if ((cfg->used_int_regs & (1 << i)) || (i == ARMREG_IP) || (i == ARMREG_LR)) {
4457 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4458 reg_offset += 4;
4461 } else {
4462 ARM_PUSH (code, 0x5ff0);
4463 prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
4464 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4465 reg_offset = 0;
4466 for (i = 0; i < 16; ++i) {
4467 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
4468 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4469 reg_offset += 4;
4472 pos += sizeof (MonoLMF) - prev_sp_offset;
4473 lmf_offset = pos;
4475 alloc_size += pos;
4476 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
4477 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
4478 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
4479 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
4482 /* the stack used in the pushed regs */
4483 if (prev_sp_offset & 4)
4484 alloc_size += 4;
4485 cfg->stack_usage = alloc_size;
4486 if (alloc_size) {
4487 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
4488 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4489 } else {
4490 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
4491 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4493 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
4495 if (cfg->frame_reg != ARMREG_SP) {
4496 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
4497 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
4499 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
4500 prev_sp_offset += alloc_size;
4502 /* compute max_offset in order to use short forward jumps
4503 * we could skip do it on arm because the immediate displacement
4504 * for jumps is large enough, it may be useful later for constant pools
4506 max_offset = 0;
4507 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4508 MonoInst *ins = bb->code;
4509 bb->max_offset = max_offset;
4511 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
4512 max_offset += 6;
4514 MONO_BB_FOR_EACH_INS (bb, ins)
4515 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4518 /* store runtime generic context */
4519 if (cfg->rgctx_var) {
4520 MonoInst *ins = cfg->rgctx_var;
4522 g_assert (ins->opcode == OP_REGOFFSET);
4524 if (arm_is_imm12 (ins->inst_offset)) {
4525 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
4526 } else {
4527 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4528 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
4532 /* load arguments allocated to register from the stack */
4533 pos = 0;
4535 cinfo = get_call_info (NULL, sig, sig->pinvoke);
4537 if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != RegTypeStructByVal) {
4538 ArgInfo *ainfo = &cinfo->ret;
4539 inst = cfg->vret_addr;
4540 g_assert (arm_is_imm12 (inst->inst_offset));
4541 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4544 if (sig->call_convention == MONO_CALL_VARARG) {
4545 ArgInfo *cookie = &cinfo->sig_cookie;
4547 /* Save the sig cookie address */
4548 g_assert (cookie->storage == RegTypeBase);
4550 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
4551 g_assert (arm_is_imm12 (cfg->sig_cookie));
4552 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
4553 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4556 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4557 ArgInfo *ainfo = cinfo->args + i;
4558 inst = cfg->args [pos];
4560 if (cfg->verbose_level > 2)
4561 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
4562 if (inst->opcode == OP_REGVAR) {
4563 if (ainfo->storage == RegTypeGeneral)
4564 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4565 else if (ainfo->storage == RegTypeFP) {
4566 g_assert_not_reached ();
4567 } else if (ainfo->storage == RegTypeBase) {
4568 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4569 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4570 } else {
4571 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4572 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4574 } else
4575 g_assert_not_reached ();
4577 if (cfg->verbose_level > 2)
4578 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
4579 } else {
4580 /* the argument should be put on the stack: FIXME handle size != word */
4581 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4582 switch (ainfo->size) {
4583 case 1:
4584 if (arm_is_imm12 (inst->inst_offset))
4585 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4586 else {
4587 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4588 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4590 break;
4591 case 2:
4592 if (arm_is_imm8 (inst->inst_offset)) {
4593 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4594 } else {
4595 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4596 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4598 break;
4599 case 8:
4600 g_assert (arm_is_imm12 (inst->inst_offset));
4601 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4602 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4603 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4604 break;
4605 default:
4606 if (arm_is_imm12 (inst->inst_offset)) {
4607 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4608 } else {
4609 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4610 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4612 break;
4614 } else if (ainfo->storage == RegTypeBaseGen) {
4615 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
4616 g_assert (arm_is_imm12 (inst->inst_offset));
4617 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4618 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4619 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
4620 } else if (ainfo->storage == RegTypeBase) {
4621 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4622 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4623 } else {
4624 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
4625 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4628 switch (ainfo->size) {
4629 case 1:
4630 if (arm_is_imm8 (inst->inst_offset)) {
4631 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4632 } else {
4633 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4634 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4636 break;
4637 case 2:
4638 if (arm_is_imm8 (inst->inst_offset)) {
4639 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4640 } else {
4641 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4642 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4644 break;
4645 case 8:
4646 if (arm_is_imm12 (inst->inst_offset)) {
4647 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4648 } else {
4649 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4650 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4652 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
4653 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
4654 } else {
4655 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
4656 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4658 if (arm_is_imm12 (inst->inst_offset + 4)) {
4659 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4660 } else {
4661 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
4662 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4664 break;
4665 default:
4666 if (arm_is_imm12 (inst->inst_offset)) {
4667 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4668 } else {
4669 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4670 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4672 break;
4674 } else if (ainfo->storage == RegTypeFP) {
4675 g_assert_not_reached ();
4676 } else if (ainfo->storage == RegTypeStructByVal) {
4677 int doffset = inst->inst_offset;
4678 int soffset = 0;
4679 int cur_reg;
4680 int size = 0;
4681 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
4682 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4683 if (arm_is_imm12 (doffset)) {
4684 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4685 } else {
4686 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4687 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4689 soffset += sizeof (gpointer);
4690 doffset += sizeof (gpointer);
4692 if (ainfo->vtsize) {
4693 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4694 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
4695 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
4697 } else if (ainfo->storage == RegTypeStructByAddr) {
4698 g_assert_not_reached ();
4699 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4700 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
4701 } else
4702 g_assert_not_reached ();
4704 pos++;
4707 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
4708 if (cfg->compile_aot)
4709 /* AOT code is only used in the root domain */
4710 code = mono_arm_emit_load_imm (code, ARMREG_R0, 0);
4711 else
4712 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->domain);
4713 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4714 (gpointer)"mono_jit_thread_attach");
4715 code = emit_call_seq (cfg, code);
4718 if (method->save_lmf) {
4719 gboolean get_lmf_fast = FALSE;
4721 #ifdef HAVE_AEABI_READ_TP
4722 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
4724 if (lmf_addr_tls_offset != -1) {
4725 get_lmf_fast = TRUE;
4727 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4728 (gpointer)"__aeabi_read_tp");
4729 code = emit_call_seq (cfg, code);
4731 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
4732 get_lmf_fast = TRUE;
4734 #endif
4735 if (!get_lmf_fast) {
4736 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4737 (gpointer)"mono_get_lmf_addr");
4738 code = emit_call_seq (cfg, code);
4740 /* we build the MonoLMF structure on the stack - see mini-arm.h */
4741 /* lmf_offset is the offset from the previous stack pointer,
4742 * alloc_size is the total stack space allocated, so the offset
4743 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
4744 * The pointer to the struct is put in r1 (new_lmf).
4745 * r2 is used as scratch
4746 * The callee-saved registers are already in the MonoLMF structure
4748 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, alloc_size - lmf_offset);
4749 /* r0 is the result from mono_get_lmf_addr () */
4750 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4751 /* new_lmf->previous_lmf = *lmf_addr */
4752 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4753 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4754 /* *(lmf_addr) = r1 */
4755 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4756 /* Skip method (only needed for trampoline LMF frames) */
4757 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
4758 /* save the current IP */
4759 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
4760 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
4763 if (tracing)
4764 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4766 if (cfg->arch.seq_point_info_var) {
4767 MonoInst *ins = cfg->arch.seq_point_info_var;
4769 /* Initialize the variable from a GOT slot */
4770 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
4771 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4772 ARM_B (code, 0);
4773 *(gpointer*)code = NULL;
4774 code += 4;
4775 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
4777 g_assert (ins->opcode == OP_REGOFFSET);
4779 if (arm_is_imm12 (ins->inst_offset)) {
4780 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
4781 } else {
4782 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4783 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
4787 /* Initialize ss_trigger_page_var */
4789 MonoInst *info_var = cfg->arch.seq_point_info_var;
4790 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4791 int dreg = ARMREG_LR;
4793 if (info_var) {
4794 g_assert (info_var->opcode == OP_REGOFFSET);
4795 g_assert (arm_is_imm12 (info_var->inst_offset));
4797 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4798 /* Load the trigger page addr */
4799 ARM_LDR_IMM (code, dreg, dreg, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
4800 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
4804 cfg->code_len = code - cfg->native_code;
4805 g_assert (cfg->code_len < cfg->code_size);
4806 g_free (cinfo);
4808 return code;
4811 void
4812 mono_arch_emit_epilog (MonoCompile *cfg)
4814 MonoMethod *method = cfg->method;
4815 int pos, i, rot_amount;
4816 int max_epilog_size = 16 + 20*4;
4817 guint8 *code;
4818 CallInfo *cinfo;
4820 if (cfg->method->save_lmf)
4821 max_epilog_size += 128;
4823 if (mono_jit_trace_calls != NULL)
4824 max_epilog_size += 50;
4826 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4827 max_epilog_size += 50;
4829 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4830 cfg->code_size *= 2;
4831 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4832 mono_jit_stats.code_reallocs++;
4836 * Keep in sync with OP_JMP
4838 code = cfg->native_code + cfg->code_len;
4840 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
4841 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4843 pos = 0;
4845 /* Load returned vtypes into registers if needed */
4846 cinfo = cfg->arch.cinfo;
4847 if (cinfo->ret.storage == RegTypeStructByVal) {
4848 MonoInst *ins = cfg->ret;
4850 if (arm_is_imm12 (ins->inst_offset)) {
4851 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
4852 } else {
4853 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4854 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
4858 if (method->save_lmf) {
4859 int lmf_offset;
4860 /* all but r0-r3, sp and pc */
4861 pos += sizeof (MonoLMF) - (4 * 10);
4862 lmf_offset = pos;
4863 /* r2 contains the pointer to the current LMF */
4864 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, cfg->stack_usage - lmf_offset);
4865 /* ip = previous_lmf */
4866 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4867 /* lr = lmf_addr */
4868 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4869 /* *(lmf_addr) = previous_lmf */
4870 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4871 /* FIXME: speedup: there is no actual need to restore the registers if
4872 * we didn't actually change them (idea from Zoltan).
4874 /* restore iregs */
4875 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
4876 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_R2, (sizeof (MonoLMF) - 10 * sizeof (gulong)));
4877 ARM_POP_NWB (code, 0xaff0); /* restore ip to sp and lr to pc */
4878 } else {
4879 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
4880 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
4881 } else {
4882 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
4883 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4885 /* FIXME: add v4 thumb interworking support */
4886 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
4889 cfg->code_len = code - cfg->native_code;
4891 g_assert (cfg->code_len < cfg->code_size);
4895 /* remove once throw_exception_by_name is eliminated */
4896 static int
4897 exception_id_by_name (const char *name)
4899 if (strcmp (name, "IndexOutOfRangeException") == 0)
4900 return MONO_EXC_INDEX_OUT_OF_RANGE;
4901 if (strcmp (name, "OverflowException") == 0)
4902 return MONO_EXC_OVERFLOW;
4903 if (strcmp (name, "ArithmeticException") == 0)
4904 return MONO_EXC_ARITHMETIC;
4905 if (strcmp (name, "DivideByZeroException") == 0)
4906 return MONO_EXC_DIVIDE_BY_ZERO;
4907 if (strcmp (name, "InvalidCastException") == 0)
4908 return MONO_EXC_INVALID_CAST;
4909 if (strcmp (name, "NullReferenceException") == 0)
4910 return MONO_EXC_NULL_REF;
4911 if (strcmp (name, "ArrayTypeMismatchException") == 0)
4912 return MONO_EXC_ARRAY_TYPE_MISMATCH;
4913 g_error ("Unknown intrinsic exception %s\n", name);
4914 return -1;
4917 void
4918 mono_arch_emit_exceptions (MonoCompile *cfg)
4920 MonoJumpInfo *patch_info;
4921 int i;
4922 guint8 *code;
4923 const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
4924 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
4925 int max_epilog_size = 50;
4927 /* count the number of exception infos */
4930 * make sure we have enough space for exceptions
4932 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4933 if (patch_info->type == MONO_PATCH_INFO_EXC) {
4934 i = exception_id_by_name (patch_info->data.target);
4935 if (!exc_throw_found [i]) {
4936 max_epilog_size += 32;
4937 exc_throw_found [i] = TRUE;
4942 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4943 cfg->code_size *= 2;
4944 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4945 mono_jit_stats.code_reallocs++;
4948 code = cfg->native_code + cfg->code_len;
4950 /* add code to raise exceptions */
4951 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4952 switch (patch_info->type) {
4953 case MONO_PATCH_INFO_EXC: {
4954 MonoClass *exc_class;
4955 unsigned char *ip = patch_info->ip.i + cfg->native_code;
4957 i = exception_id_by_name (patch_info->data.target);
4958 if (exc_throw_pos [i]) {
4959 arm_patch (ip, exc_throw_pos [i]);
4960 patch_info->type = MONO_PATCH_INFO_NONE;
4961 break;
4962 } else {
4963 exc_throw_pos [i] = code;
4965 arm_patch (ip, code);
4967 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4968 g_assert (exc_class);
4970 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
4971 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4972 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4973 patch_info->data.name = "mono_arch_throw_corlib_exception";
4974 patch_info->ip.i = code - cfg->native_code;
4975 ARM_BL (code, 0);
4976 *(guint32*)(gpointer)code = exc_class->type_token;
4977 code += 4;
4978 break;
4980 default:
4981 /* do nothing */
4982 break;
4986 cfg->code_len = code - cfg->native_code;
4988 g_assert (cfg->code_len < cfg->code_size);
4992 static gboolean tls_offset_inited = FALSE;
4994 void
4995 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4997 if (!tls_offset_inited) {
4998 tls_offset_inited = TRUE;
5000 lmf_tls_offset = mono_get_lmf_tls_offset ();
5001 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
5005 void
5006 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
5010 MonoInst*
5011 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5013 /* FIXME: */
5014 return NULL;
5017 gboolean
5018 mono_arch_print_tree (MonoInst *tree, int arity)
5020 return 0;
5023 MonoInst*
5024 mono_arch_get_domain_intrinsic (MonoCompile* cfg)
5026 return mono_get_domain_intrinsic (cfg);
5029 guint32
5030 mono_arch_get_patch_offset (guint8 *code)
5032 /* OP_AOTCONST */
5033 return 8;
5036 void
5037 mono_arch_flush_register_windows (void)
5041 #ifdef MONO_ARCH_HAVE_IMT
5043 void
5044 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
5046 if (cfg->compile_aot) {
5047 int method_reg = mono_alloc_ireg (cfg);
5048 MonoInst *ins;
5050 call->dynamic_imt_arg = TRUE;
5052 if (imt_arg) {
5053 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
5054 } else {
5055 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
5056 ins->dreg = method_reg;
5057 ins->inst_p0 = call->method;
5058 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
5059 MONO_ADD_INS (cfg->cbb, ins);
5061 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
5063 } else if (cfg->generic_context) {
5065 /* Always pass in a register for simplicity */
5066 call->dynamic_imt_arg = TRUE;
5068 cfg->uses_rgctx_reg = TRUE;
5070 if (imt_arg) {
5071 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
5072 } else {
5073 MonoInst *ins;
5074 int method_reg = mono_alloc_preg (cfg);
5076 MONO_INST_NEW (cfg, ins, OP_PCONST);
5077 ins->inst_p0 = call->method;
5078 ins->dreg = method_reg;
5079 MONO_ADD_INS (cfg->cbb, ins);
5081 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
5086 MonoMethod*
5087 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
5089 guint32 *code_ptr = (guint32*)code;
5090 code_ptr -= 2;
5091 /* The IMT value is stored in the code stream right after the LDC instruction. */
5092 if (!IS_LDR_PC (code_ptr [0])) {
5093 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
5094 g_assert (IS_LDR_PC (code_ptr [0]));
5096 if (code_ptr [1] == 0)
5097 /* This is AOTed code, the IMT method is in V5 */
5098 return (MonoMethod*)regs [ARMREG_V5];
5099 else
5100 return (MonoMethod*) code_ptr [1];
5103 MonoVTable*
5104 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
5106 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
5109 #define ENABLE_WRONG_METHOD_CHECK 0
5110 #define BASE_SIZE (6 * 4)
5111 #define BSEARCH_ENTRY_SIZE (4 * 4)
5112 #define CMP_SIZE (3 * 4)
5113 #define BRANCH_SIZE (1 * 4)
5114 #define CALL_SIZE (2 * 4)
5115 #define WMC_SIZE (5 * 4)
5116 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
5118 static arminstr_t *
5119 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
5121 guint32 delta = DISTANCE (target, code);
5122 delta -= 8;
5123 g_assert (delta >= 0 && delta <= 0xFFF);
5124 *target = *target | delta;
5125 *code = value;
5126 return code + 1;
5129 gpointer
5130 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
5131 gpointer fail_tramp)
5133 int size, i, extra_space = 0;
5134 arminstr_t *code, *start, *vtable_target = NULL;
5135 gboolean large_offsets = FALSE;
5136 guint32 **constant_pool_starts;
5138 size = BASE_SIZE;
5139 constant_pool_starts = g_new0 (guint32*, count);
5142 * We might be called with a fail_tramp from the IMT builder code even if
5143 * MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK is not defined.
5145 //g_assert (!fail_tramp);
5147 for (i = 0; i < count; ++i) {
5148 MonoIMTCheckItem *item = imt_entries [i];
5149 if (item->is_equals) {
5150 if (!arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
5151 item->chunk_size += 32;
5152 large_offsets = TRUE;
5155 if (item->check_target_idx) {
5156 if (!item->compare_done)
5157 item->chunk_size += CMP_SIZE;
5158 item->chunk_size += BRANCH_SIZE;
5159 } else {
5160 #if ENABLE_WRONG_METHOD_CHECK
5161 item->chunk_size += WMC_SIZE;
5162 #endif
5164 item->chunk_size += CALL_SIZE;
5165 } else {
5166 item->chunk_size += BSEARCH_ENTRY_SIZE;
5167 imt_entries [item->check_target_idx]->compare_done = TRUE;
5169 size += item->chunk_size;
5172 if (large_offsets)
5173 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
5175 start = code = mono_domain_code_reserve (domain, size);
5177 #if DEBUG_IMT
5178 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
5179 for (i = 0; i < count; ++i) {
5180 MonoIMTCheckItem *item = imt_entries [i];
5181 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, item->key->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
5183 #endif
5185 if (large_offsets)
5186 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5187 else
5188 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
5189 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
5190 vtable_target = code;
5191 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
5193 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
5194 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
5195 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
5197 for (i = 0; i < count; ++i) {
5198 MonoIMTCheckItem *item = imt_entries [i];
5199 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL;
5200 gint32 vtable_offset;
5202 item->code_target = (guint8*)code;
5204 if (item->is_equals) {
5205 if (item->check_target_idx) {
5206 if (!item->compare_done) {
5207 imt_method = code;
5208 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5209 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5211 item->jmp_code = (guint8*)code;
5212 ARM_B_COND (code, ARMCOND_NE, 0);
5213 } else {
5214 /*Enable the commented code to assert on wrong method*/
5215 #if ENABLE_WRONG_METHOD_CHECK
5216 imt_method = code;
5217 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5218 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5219 ARM_B_COND (code, ARMCOND_NE, 1);
5221 ARM_DBRK (code);
5222 #endif
5225 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
5226 if (!arm_is_imm12 (vtable_offset)) {
5228 * We need to branch to a computed address but we don't have
5229 * a free register to store it, since IP must contain the
5230 * vtable address. So we push the two values to the stack, and
5231 * load them both using LDM.
5233 /* Compute target address */
5234 vtable_offset_ins = code;
5235 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5236 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
5237 /* Save it to the fourth slot */
5238 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
5239 /* Restore registers and branch */
5240 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5242 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
5243 } else {
5244 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
5245 if (large_offsets)
5246 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
5247 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
5250 if (imt_method)
5251 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
5253 /*must emit after unconditional branch*/
5254 if (vtable_target) {
5255 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
5256 item->chunk_size += 4;
5257 vtable_target = NULL;
5260 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
5261 constant_pool_starts [i] = code;
5262 if (extra_space) {
5263 code += extra_space;
5264 extra_space = 0;
5266 } else {
5267 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5268 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5270 item->jmp_code = (guint8*)code;
5271 ARM_B_COND (code, ARMCOND_GE, 0);
5272 ++extra_space;
5276 for (i = 0; i < count; ++i) {
5277 MonoIMTCheckItem *item = imt_entries [i];
5278 if (item->jmp_code) {
5279 if (item->check_target_idx)
5280 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
5282 if (i > 0 && item->is_equals) {
5283 int j;
5284 arminstr_t *space_start = constant_pool_starts [i];
5285 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
5286 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
5291 #if DEBUG_IMT
5293 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
5294 mono_disassemble_code (NULL, (guint8*)start, size, buff);
5295 g_free (buff);
5297 #endif
5299 g_free (constant_pool_starts);
5301 mono_arch_flush_icache ((guint8*)start, size);
5302 mono_stats.imt_thunks_size += code - start;
5304 g_assert (DISTANCE (start, code) <= size);
5305 return start;
5308 #endif
5310 gpointer
5311 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
5313 if (reg == ARMREG_SP)
5314 return (gpointer)ctx->esp;
5315 else
5316 return (gpointer)ctx->regs [reg];
5320 * mono_arch_set_breakpoint:
5322 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
5323 * The location should contain code emitted by OP_SEQ_POINT.
5325 void
5326 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
5328 guint8 *code = ip;
5329 guint32 native_offset = ip - (guint8*)ji->code_start;
5331 if (ji->from_aot) {
5332 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5334 g_assert (native_offset % 4 == 0);
5335 g_assert (info->bp_addrs [native_offset / 4] == 0);
5336 info->bp_addrs [native_offset / 4] = bp_trigger_page;
5337 } else {
5338 int dreg = ARMREG_LR;
5340 /* Read from another trigger page */
5341 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
5342 ARM_B (code, 0);
5343 *(int*)code = (int)bp_trigger_page;
5344 code += 4;
5345 ARM_LDR_IMM (code, dreg, dreg, 0);
5347 mono_arch_flush_icache (code - 16, 16);
5349 #if 0
5350 /* This is currently implemented by emitting an SWI instruction, which
5351 * qemu/linux seems to convert to a SIGILL.
5353 *(int*)code = (0xef << 24) | 8;
5354 code += 4;
5355 mono_arch_flush_icache (code - 4, 4);
5356 #endif
5361 * mono_arch_clear_breakpoint:
5363 * Clear the breakpoint at IP.
5365 void
5366 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
5368 guint8 *code = ip;
5369 int i;
5371 if (ji->from_aot) {
5372 guint32 native_offset = ip - (guint8*)ji->code_start;
5373 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5375 g_assert (native_offset % 4 == 0);
5376 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
5377 info->bp_addrs [native_offset / 4] = 0;
5378 } else {
5379 for (i = 0; i < 4; ++i)
5380 ARM_NOP (code);
5382 mono_arch_flush_icache (ip, code - ip);
5387 * mono_arch_start_single_stepping:
5389 * Start single stepping.
5391 void
5392 mono_arch_start_single_stepping (void)
5394 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
5398 * mono_arch_stop_single_stepping:
5400 * Stop single stepping.
5402 void
5403 mono_arch_stop_single_stepping (void)
5405 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
5408 #if __APPLE__
5409 #define DBG_SIGNAL SIGBUS
5410 #else
5411 #define DBG_SIGNAL SIGSEGV
5412 #endif
5415 * mono_arch_is_single_step_event:
5417 * Return whenever the machine state in SIGCTX corresponds to a single
5418 * step event.
5420 gboolean
5421 mono_arch_is_single_step_event (void *info, void *sigctx)
5423 siginfo_t *sinfo = info;
5425 /* Sometimes the address is off by 4 */
5426 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
5427 return TRUE;
5428 else
5429 return FALSE;
5433 * mono_arch_is_breakpoint_event:
5435 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
5437 gboolean
5438 mono_arch_is_breakpoint_event (void *info, void *sigctx)
5440 siginfo_t *sinfo = info;
5442 if (sinfo->si_signo == DBG_SIGNAL) {
5443 /* Sometimes the address is off by 4 */
5444 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
5445 return TRUE;
5446 else
5447 return FALSE;
5448 } else {
5449 return FALSE;
5453 guint8*
5454 mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
5456 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5458 if (ji->from_aot)
5459 ip -= 6 * 4;
5460 else
5461 ip -= 12;
5463 return ip;
5466 guint8*
5467 mono_arch_get_ip_for_single_step (MonoJitInfo *ji, MonoContext *ctx)
5469 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5471 ip += 4;
5473 return ip;
5477 * mono_arch_skip_breakpoint:
5479 * See mini-amd64.c for docs.
5481 void
5482 mono_arch_skip_breakpoint (MonoContext *ctx)
5484 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5488 * mono_arch_skip_single_step:
5490 * See mini-amd64.c for docs.
5492 void
5493 mono_arch_skip_single_step (MonoContext *ctx)
5495 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5499 * mono_arch_get_seq_point_info:
5501 * See mini-amd64.c for docs.
5503 gpointer
5504 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
5506 SeqPointInfo *info;
5507 MonoJitInfo *ji;
5509 // FIXME: Add a free function
5511 mono_domain_lock (domain);
5512 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
5513 code);
5514 mono_domain_unlock (domain);
5516 if (!info) {
5517 ji = mono_jit_info_table_find (domain, (char*)code);
5518 g_assert (ji);
5520 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
5522 info->ss_trigger_page = ss_trigger_page;
5523 info->bp_trigger_page = bp_trigger_page;
5525 mono_domain_lock (domain);
5526 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
5527 code, info);
5528 mono_domain_unlock (domain);
5531 return info;